diff --git a/.flake8 b/.flake8
index 8e4424874f40d99fdf4d6361f1c0928be59217b4..699c5146226534fbb165b134f12d2ac5b59fc3e2 100644
--- a/.flake8
+++ b/.flake8
@@ -1,6 +1,7 @@
 [flake8]
 ignore = E114,E115,E116,E121,E123,E126,E133,E2,E704,W503,F403,F405,F999
 exclude =
+    .git,
     buildconfig,
     docs,
     Framework/Algorithms/test,
diff --git a/Framework/API/CMakeLists.txt b/Framework/API/CMakeLists.txt
index 13eb185b3bccc18714a76e94e6eb7221db26cbbe..58083f8d9796b99005d9f85f0b56923a52eb0dd0 100644
--- a/Framework/API/CMakeLists.txt
+++ b/Framework/API/CMakeLists.txt
@@ -29,6 +29,7 @@ set ( SRC_FILES
 	src/DataProcessorAlgorithm.cpp
 	src/DeprecatedAlgorithm.cpp
 	src/DetectorInfo.cpp
+        src/DetectorSearcher.cpp
 	src/DomainCreatorFactory.cpp
 	src/EnabledWhenWorkspaceIsType.cpp
 	src/EqualBinSizesValidator.cpp
@@ -45,6 +46,7 @@ set ( SRC_FILES
 	src/FunctionDomainGeneral.cpp
 	src/FunctionDomainMD.cpp
 	src/FunctionFactory.cpp
+	src/FunctionGenerator.cpp
 	src/FunctionParameterDecorator.cpp
 	src/FunctionProperty.cpp
 	src/FunctionValues.cpp
@@ -103,8 +105,8 @@ set ( SRC_FILES
 	src/MultiPeriodGroupWorker.cpp
 	src/MultipleExperimentInfos.cpp
 	src/MultipleFileProperty.cpp
-	src/NearestNeighbourInfo.cpp
-	src/NearestNeighbours.cpp
+        src/WorkspaceNearestNeighbourInfo.cpp
+        src/WorkspaceNearestNeighbours.cpp
 	src/NotebookBuilder.cpp
 	src/NotebookWriter.cpp
 	src/NullCoordTransform.cpp
@@ -189,6 +191,7 @@ set ( INC_FILES
 	inc/MantidAPI/DeclareUserAlg.h
 	inc/MantidAPI/DeprecatedAlgorithm.h
 	inc/MantidAPI/DetectorInfo.h
+        inc/MantidAPI/DetectorSearcher.h
 	inc/MantidAPI/DllConfig.h
 	inc/MantidAPI/DomainCreatorFactory.h
 	inc/MantidAPI/EnabledWhenWorkspaceIsType.h
@@ -207,6 +210,7 @@ set ( INC_FILES
 	inc/MantidAPI/FunctionDomainGeneral.h
 	inc/MantidAPI/FunctionDomainMD.h
 	inc/MantidAPI/FunctionFactory.h
+	inc/MantidAPI/FunctionGenerator.h
 	inc/MantidAPI/FunctionParameterDecorator.h
 	inc/MantidAPI/FunctionProperty.h
 	inc/MantidAPI/FunctionValues.h
@@ -295,8 +299,8 @@ set ( INC_FILES
 	inc/MantidAPI/MultiPeriodGroupWorker.h
 	inc/MantidAPI/MultipleExperimentInfos.h
 	inc/MantidAPI/MultipleFileProperty.h
-	inc/MantidAPI/NearestNeighbourInfo.h
-	inc/MantidAPI/NearestNeighbours.h
+  inc/MantidAPI/WorkspaceNearestNeighbourInfo.h
+  inc/MantidAPI/WorkspaceNearestNeighbours.h
 	inc/MantidAPI/NotebookBuilder.h
 	inc/MantidAPI/NotebookWriter.h
 	inc/MantidAPI/NullCoordTransform.h
@@ -364,6 +368,7 @@ set ( TEST_FILES
 	CostFunctionFactoryTest.h
 	DataProcessorAlgorithmTest.h
 	DetectorInfoTest.h
+        DetectorSearcherTest.h
 	EnabledWhenWorkspaceIsTypeTest.h
 	EqualBinSizesValidatorTest.h
 	ExperimentInfoTest.h
@@ -414,8 +419,8 @@ set ( TEST_FILES
 	MultiPeriodGroupWorkerTest.h
 	MultipleExperimentInfosTest.h
 	MultipleFilePropertyTest.h
-	NearestNeighbourInfoTest.h
-	NearestNeighboursTest.h
+        WorkspaceNearestNeighbourInfoTest.h
+        WorkspaceNearestNeighboursTest.h
 	NotebookBuilderTest.h
 	NotebookWriterTest.h
 	NumericAxisTest.h
diff --git a/Framework/API/inc/MantidAPI/Algorithm.h b/Framework/API/inc/MantidAPI/Algorithm.h
index faae3348a666d201aaa39ab2c05482d7c246c5a9..6fd4d6441f3f30c7bb9a83c09c61b99be72380c5 100644
--- a/Framework/API/inc/MantidAPI/Algorithm.h
+++ b/Framework/API/inc/MantidAPI/Algorithm.h
@@ -200,6 +200,11 @@ public:
   /// is provided
   const std::string alias() const override { return ""; }
 
+  /// function to return URL for algorithm documentation; A default
+  /// implementation is provided.
+  /// Override if the algorithm is not part of the Mantid distribution.
+  const std::string helpURL() const override { return ""; }
+
   const std::string workspaceMethodName() const override;
   const std::vector<std::string> workspaceMethodOn() const override;
   const std::string workspaceMethodInputProperty() const override;
diff --git a/Framework/API/inc/MantidAPI/AlgorithmProxy.h b/Framework/API/inc/MantidAPI/AlgorithmProxy.h
index 827d16e14545becd538ade8fc773eb9575565c9a..23e2d3350ac1b497a1596c46c5370e9e7e61162c 100644
--- a/Framework/API/inc/MantidAPI/AlgorithmProxy.h
+++ b/Framework/API/inc/MantidAPI/AlgorithmProxy.h
@@ -86,6 +86,8 @@ public:
   }
   /// Aliases to the algorithm
   const std::string alias() const override { return m_alias; }
+  /// Optional documentation URL for the real algorithm
+  const std::string helpURL() const override { return m_helpURL; }
   /// function returns a summary message that will be displayed in the default
   /// GUI, and in the help.
   const std::string summary() const override { return m_summary; }
@@ -176,6 +178,7 @@ private:
   const std::string
       m_categorySeparator;     ///< category seperator of the real algorithm
   const std::string m_alias;   ///< alias to the algorithm
+  const std::string m_helpURL; ///< Optional documentation URL
   const std::string m_summary; ///<Message to display in GUI and help.
   const int m_version;         ///< version of the real algorithm
 
diff --git a/Framework/API/inc/MantidAPI/CompositeFunction.h b/Framework/API/inc/MantidAPI/CompositeFunction.h
index 93ae8af41907522ef83c4435bbf7b83b599a7fd1..3e84c101ab35ddf8a9e3b6d8e0a0faeece97502e 100644
--- a/Framework/API/inc/MantidAPI/CompositeFunction.h
+++ b/Framework/API/inc/MantidAPI/CompositeFunction.h
@@ -111,13 +111,6 @@ public:
   /// Set the fitting error for a parameter
   void setError(size_t i, double err) override;
 
-  /// Check if a parameter is active
-  bool isFixed(size_t i) const override;
-  /// Removes a parameter from the list of active
-  void fix(size_t i) override;
-  /// Restores a declared parameter i to the active status
-  void unfix(size_t i) override;
-
   /// Value of i-th active parameter. Override this method to make fitted
   /// parameters different from the declared
   double activeParameter(size_t i) const override;
@@ -130,8 +123,6 @@ public:
   std::string nameOfActive(size_t i) const override;
   /// Returns the name of active parameter i
   std::string descriptionOfActive(size_t i) const override;
-  /// Check if an active parameter i is actually active
-  bool isActive(size_t i) const override;
 
   /// Return parameter index from a parameter reference.
   size_t getParameterIndex(const ParameterReference &ref) const override;
@@ -149,11 +140,7 @@ public:
   bool removeTie(size_t i) override;
   /// Get the tie of i-th parameter
   ParameterTie *getTie(size_t i) const override;
-  /// Add a new tie
-  void addTie(std::unique_ptr<ParameterTie> tie) override;
 
-  /// Overwrite IFunction methods
-  void addConstraint(std::unique_ptr<IConstraint> ic) override;
   /// Get constraint of i-th parameter
   IConstraint *getConstraint(size_t i) const override;
   /// Prepare function for a fit
@@ -229,6 +216,10 @@ protected:
   /// Declare a new parameter
   void declareParameter(const std::string &name, double initValue = 0,
                         const std::string &description = "") override;
+  /// Change status of parameter
+  void setParameterStatus(size_t i, ParameterStatus status) override;
+  /// Get status of parameter
+  ParameterStatus getParameterStatus(size_t i) const override;
 
   size_t paramOffset(size_t i) const { return m_paramOffsets[i]; }
 
diff --git a/Framework/API/inc/MantidAPI/DetectorSearcher.h b/Framework/API/inc/MantidAPI/DetectorSearcher.h
new file mode 100644
index 0000000000000000000000000000000000000000..3024479af9b50dbcb01fed9a693bc68d3608d571
--- /dev/null
+++ b/Framework/API/inc/MantidAPI/DetectorSearcher.h
@@ -0,0 +1,107 @@
+#ifndef MANTID_DETECTOR_SEARCHER_H_
+#define MANTID_DETECTOR_SEARCHER_H_
+
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/DllConfig.h"
+#include "MantidGeometry/Instrument.h"
+#include "MantidGeometry/Objects/InstrumentRayTracer.h"
+#include "MantidKernel/NearestNeighbours.h"
+#include "MantidKernel/V3D.h"
+
+#include <Eigen/Core>
+
+/**
+  DetectorSearcher is a helper class to find a specific detector within
+  the instrument geometry.
+
+  This class solves the problem of finding a detector given a Qlab vector. Two
+  search strategies are used depending on the instrument's geometry.
+
+  1) For rectangular detector geometries the InstrumentRayTracer class is used
+  to recursively search the instrument tree.
+
+  2) For geometries which do not use rectangular detectors ray tracing to every
+  component is very expensive. In this case it is quicker to use a
+  NearestNeighbours search to find likely detector positions.
+
+  @author Samuel Jackson
+  @date 2017
+
+  Copyright &copy; 2016 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+
+namespace Mantid {
+namespace API {
+
+class MANTID_API_DLL DetectorSearcher {
+public:
+  /// Search result type representing whether a detector was found and if so
+  /// which detector index it was.
+  typedef std::tuple<bool, size_t> DetectorSearchResult;
+
+  /// Create a new DetectorSearcher with the given instrument & detectors
+  DetectorSearcher(Geometry::Instrument_const_sptr instrument,
+                   const DetectorInfo &detInfo);
+  /// Find a detector that intsects with the given Qlab vector
+  DetectorSearchResult findDetectorIndex(const Kernel::V3D &q);
+
+private:
+  /// Attempt to find a detector using a full instrument ray tracing strategy
+  DetectorSearchResult searchUsingInstrumentRayTracing(const Kernel::V3D &q);
+  /// Attempt to find a detector using a nearest neighbours search strategy
+  DetectorSearchResult searchUsingNearestNeighbours(const Kernel::V3D &q);
+  /// Check whether the given direction in detector space intercepts with a
+  /// detector
+  std::tuple<bool, size_t> checkInteceptWithNeighbours(
+      const Kernel::V3D &direction,
+      const Kernel::NearestNeighbours<3>::NearestNeighbourResults &neighbours)
+      const;
+  /// Helper function to build the nearest neighbour tree
+  void createDetectorCache();
+  /// Helper function to convert a Qlab vector to a direction in detector space
+  Kernel::V3D convertQtoDirection(const Kernel::V3D &q) const;
+  /// Helper function to handle the tube gap parameter in tube instruments
+  DetectorSearchResult handleTubeGap(
+      const Kernel::V3D &detectorDir,
+      const Kernel::NearestNeighbours<3>::NearestNeighbourResults &neighbours);
+
+  // Instance variables
+
+  /// flag for whether to use InstrumentRayTracer or NearestNeighbours
+  const bool m_usingFullRayTrace;
+  /// flag for whether the crystallography convention is to be used
+  const double m_crystallography_convention;
+  /// detector info for the instrument
+  const DetectorInfo &m_detInfo;
+  /// handle to the instrument to search for detectors in
+  Geometry::Instrument_const_sptr m_instrument;
+  /// vector of detector indicies used in the search
+  std::vector<size_t> m_indexMap;
+  /// Detector search cache for fast look-up of detectors
+  std::unique_ptr<Kernel::NearestNeighbours<3>> m_detectorCacheSearch;
+  /// instrument ray tracer object for searching in rectangular detectors
+  std::unique_ptr<Geometry::InstrumentRayTracer> m_rayTracer;
+};
+}
+}
+
+#endif
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/FunctionGenerator.h b/Framework/API/inc/MantidAPI/FunctionGenerator.h
similarity index 72%
rename from Framework/CurveFitting/inc/MantidCurveFitting/Functions/FunctionGenerator.h
rename to Framework/API/inc/MantidAPI/FunctionGenerator.h
index b3d391c2038feb72eb1e6d0a705138b853c22132..40c5fe59051834a8a5cb1f6ff0761c0d014a6951 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/FunctionGenerator.h
+++ b/Framework/API/inc/MantidAPI/FunctionGenerator.h
@@ -1,11 +1,10 @@
-#ifndef MANTID_CURVEFITTING_FUNCTIONGENERATOR_H_
-#define MANTID_CURVEFITTING_FUNCTIONGENERATOR_H_
+#ifndef MANTID_API_FUNCTIONGENERATOR_H_
+#define MANTID_API_FUNCTIONGENERATOR_H_
 
 #include "MantidAPI/IFunction.h"
 
 namespace Mantid {
-namespace CurveFitting {
-namespace Functions {
+namespace API {
 /**
 FunctionGenerator is a partial implementation of IFunction that defines a
 function consisting of two parts: the source and the target. The source
@@ -45,10 +44,10 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.
 File change history is stored at: <https://github.com/mantidproject/mantid>
 Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
-class DLLExport FunctionGenerator : public API::IFunction {
+class DLLExport FunctionGenerator : public IFunction {
 public:
   /// Constructor
-  FunctionGenerator(API::IFunction_sptr source);
+  FunctionGenerator(IFunction_sptr source);
 
   /// @name Overrides implementing composition of two functions:
   /// m_source and m_target.
@@ -83,38 +82,14 @@ public:
   /// Set the fitting error for a parameter
   void setError(size_t i, double err) override;
 
-  /// Check if a declared parameter i is fixed
-  bool isFixed(size_t i) const override;
-  /// Removes a declared parameter i from the list of active
-  void fix(size_t i) override;
-  /// Restores a declared parameter i to the active status
-  void unfix(size_t i) override;
-
   /// Return parameter index from a parameter reference.
-  size_t getParameterIndex(const API::ParameterReference &ref) const override;
-  /// Tie a parameter to other parameters (or a constant)
-  void tie(const std::string &parName, const std::string &expr,
-           bool isDefault = false) override;
-  /// Apply the ties
-  void applyTies() override;
-  /// Remove all ties
-  void clearTies() override;
-  // Unhide base class function: removeTie(string).
-  using IFunction::removeTie;
-  /// Removes i-th parameter's tie
-  bool removeTie(size_t i) override;
-  /// Get the tie of i-th parameter
-  API::ParameterTie *getTie(size_t i) const override;
-
-  /// Add a constraint to function
-  void addConstraint(std::unique_ptr<API::IConstraint> ic) override;
-  /// Get constraint of i-th parameter
-  API::IConstraint *getConstraint(size_t i) const override;
-  /// Remove a constraint
-  void removeConstraint(const std::string &parName) override;
-
+  size_t getParameterIndex(const ParameterReference &ref) const override;
   /// Set up the function for a fit.
   void setUpForFit() override;
+  /// Get the tie for i-th parameter
+  ParameterTie *getTie(size_t i) const override;
+  /// Get the i-th constraint
+  IConstraint *getConstraint(size_t i) const override;
 
   /// Build target function.
   virtual void buildTargetFunction() const = 0;
@@ -123,9 +98,10 @@ protected:
   /// Declare a new parameter
   void declareParameter(const std::string &name, double initValue = 0,
                         const std::string &description = "") override;
-
-  /// Add a new tie. Derived classes must provide storage for ties
-  void addTie(std::unique_ptr<API::ParameterTie> tie) override;
+  /// Change status of parameter
+  void setParameterStatus(size_t i, ParameterStatus status) override;
+  /// Get status of parameter
+  ParameterStatus getParameterStatus(size_t i) const override;
   //@}
 
 public:
@@ -144,8 +120,8 @@ public:
   //@}
 
   /// Evaluate the function
-  void function(const API::FunctionDomain &domain,
-                API::FunctionValues &values) const override;
+  void function(const FunctionDomain &domain,
+                FunctionValues &values) const override;
 
 protected:
   /// overwrite IFunction base class method, which declare function parameters
@@ -158,17 +134,16 @@ protected:
   /// Update target function if necessary.
   void checkTargetFunction() const;
   /// Function that calculates parameters of the target function.
-  API::IFunction_sptr m_source;
+  IFunction_sptr m_source;
   /// Function that actually calculates the output.
-  mutable API::IFunction_sptr m_target;
+  mutable IFunction_sptr m_target;
   /// Cached number of parameters in m_source.
   size_t m_nOwnParams;
   /// Flag indicating that updateTargetFunction() is required.
   mutable bool m_dirty;
 };
 
-} // namespace Functions
-} // namespace CurveFitting
+} // namespace API
 } // namespace Mantid
 
-#endif /*MANTID_CURVEFITTING_FUNCTIONGENERATOR_H_*/
+#endif /*MANTID_API_FUNCTIONGENERATOR_H_*/
diff --git a/Framework/API/inc/MantidAPI/FunctionParameterDecorator.h b/Framework/API/inc/MantidAPI/FunctionParameterDecorator.h
index 2ad22fbcc8956be035e59af2402f91f92f6dbe83..e4d9c5b860c2bd7d4dec59cf5973a171bbae98da 100644
--- a/Framework/API/inc/MantidAPI/FunctionParameterDecorator.h
+++ b/Framework/API/inc/MantidAPI/FunctionParameterDecorator.h
@@ -91,15 +91,6 @@ public:
   /// Set the fitting error for a parameter of decorated function.
   void setError(size_t i, double err) override;
 
-  /// Check if a declared parameter i of decorated function is active.
-  bool isFixed(size_t i) const override;
-  /// Removes a declared parameter i of decorated function from the list of
-  /// active.
-  void fix(size_t i) override;
-  /// Restores a declared parameter i of decorated function to the active
-  /// status.
-  void unfix(size_t i) override;
-
   /// Return parameter index of decorated function from a parameter reference.
   /// Usefull for constraints and ties in composite functions.
   size_t getParameterIndex(const ParameterReference &ref) const override;
@@ -148,6 +139,8 @@ protected:
                         const std::string &description) override;
 
   void addTie(std::unique_ptr<ParameterTie>) override;
+  void setParameterStatus(size_t i, ParameterStatus status) override;
+  ParameterStatus getParameterStatus(size_t i) const override;
 
   virtual void beforeDecoratedFunctionSet(const IFunction_sptr &fn);
   void setDecoratedFunctionPrivate(const IFunction_sptr &fn);
diff --git a/Framework/API/inc/MantidAPI/IAlgorithm.h b/Framework/API/inc/MantidAPI/IAlgorithm.h
index 071e13f90f7377e98841c7a2011049e3ec43b29a..d22b7e91ac615efd7f397139af69d74c32d07803 100644
--- a/Framework/API/inc/MantidAPI/IAlgorithm.h
+++ b/Framework/API/inc/MantidAPI/IAlgorithm.h
@@ -80,6 +80,10 @@ public:
   /// function to return any aliases of the algorithm.
   virtual const std::string alias() const = 0;
 
+  /// function to return an optional URL for documentation.
+  /// Override if the algorithm is not part of the Mantid distribution
+  virtual const std::string helpURL() const = 0;
+
   /** @name Algorithms As Methods */
   ///@{
   /// Returns a name that will be used when attached as a workspace method.
diff --git a/Framework/API/inc/MantidAPI/IConstraint.h b/Framework/API/inc/MantidAPI/IConstraint.h
index e443a2e1db538d21ebea0217f77a7dfd8914a72f..154b2bcc63c5dc4f12518dbe0ffd9e85884532fb 100644
--- a/Framework/API/inc/MantidAPI/IConstraint.h
+++ b/Framework/API/inc/MantidAPI/IConstraint.h
@@ -4,8 +4,8 @@
 //----------------------------------------------------------------------
 // Includes
 //----------------------------------------------------------------------
-#include "MantidAPI/IFunction.h"
 #include "MantidAPI/ParameterReference.h"
+#include <string>
 
 namespace Mantid {
 namespace API {
diff --git a/Framework/API/inc/MantidAPI/IFunction.h b/Framework/API/inc/MantidAPI/IFunction.h
index 04e9342df3d8a219fb57886a1be9ff95fbf99fce..a61d19233ec64f70c74a8e81cc42d5c9ca7b5285 100644
--- a/Framework/API/inc/MantidAPI/IFunction.h
+++ b/Framework/API/inc/MantidAPI/IFunction.h
@@ -7,7 +7,9 @@
 #include "MantidAPI/DllConfig.h"
 #include "MantidAPI/FunctionDomain.h"
 #include "MantidAPI/FunctionValues.h"
+#include "MantidAPI/IConstraint.h"
 #include "MantidAPI/Jacobian.h"
+#include "MantidAPI/ParameterTie.h"
 #include "MantidKernel/Matrix.h"
 #include "MantidKernel/Unit.h"
 
@@ -33,9 +35,6 @@ class ProgressBase;
 namespace API {
 class Workspace;
 class MatrixWorkspace;
-class ParameterTie;
-class IConstraint;
-class ParameterReference;
 class FunctionHandler;
 
 /** This is an interface to a fitting function - a semi-abstarct class.
@@ -256,6 +255,8 @@ public:
     /// Create vector attribute
     explicit Attribute(const std::vector<double> &v)
         : m_data(v), m_quoteValue(false) {}
+    /// Copy assignment
+    Attribute &operator=(const Attribute &attr);
 
     /// Apply an attribute visitor
     template <typename T> T apply(AttributeVisitor<T> &v) {
@@ -312,9 +313,7 @@ public:
   //---------------------------------------------------------//
 
   /// Constructor
-  IFunction()
-      : m_isParallel(false), m_handler(nullptr), m_progReporter(nullptr),
-        m_chiSquared(0.0) {}
+  IFunction() : m_isParallel(false), m_handler(nullptr), m_chiSquared(0.0) {}
   /// Virtual destructor
   virtual ~IFunction();
   /// No copying
@@ -344,7 +343,7 @@ public:
   virtual int64_t estimateNoProgressCalls() const { return 1; }
 
   /// Attach a progress reporter
-  void setProgressReporter(Kernel::ProgressBase *reporter);
+  void setProgressReporter(boost::shared_ptr<Kernel::ProgressBase> reporter);
   /// Reports progress with an optional message
   void reportProgress(const std::string &msg = "") const;
   /// Returns true if a progress reporter is set & evalaution has been requested
@@ -412,20 +411,26 @@ public:
   /// Set the fitting error for a parameter
   virtual void setError(size_t i, double err) = 0;
 
-  /// Check if a declared parameter i is fixed
-  virtual bool isFixed(size_t i) const = 0;
-  /// Removes a declared parameter i from the list of active
-  virtual void fix(size_t i) = 0;
+  /// Check if a parameter i is fixed
+  bool isFixed(size_t i) const;
+  /// Check if a parameter i is fixed by default (not by user).
+  bool isFixedByDefault(size_t i) const;
+  /// Removes a parameter i from the list of active
+  void fix(size_t i, bool isDefault = false);
   /// Restores a declared parameter i to the active status
-  virtual void unfix(size_t i) = 0;
+  void unfix(size_t i);
   /// Fix a parameter
-  void fixParameter(const std::string &name);
+  void fixParameter(const std::string &name, bool isDefault = false);
   /// Free a parameter
   void unfixParameter(const std::string &name);
   /// Fix all parameters
-  void fixAll();
+  void fixAll(bool isDefault = false);
   /// Free all parameters
   void unfixAll();
+  /// Free all parameters fixed by default
+  void unfixAllDefault();
+  /// Fix all active parameters
+  void fixAllActive(bool isDefault = false);
 
   /// Return parameter index from a parameter reference. Usefull for constraints
   /// and ties in composite functions
@@ -447,7 +452,7 @@ public:
   /// Returns the name of active parameter i
   virtual std::string descriptionOfActive(size_t i) const;
   /// Check if an active parameter i is actually active
-  virtual bool isActive(size_t i) const { return !isFixed(i); }
+  bool isActive(size_t i) const;
   //@}
 
   /** @name Ties */
@@ -458,17 +463,17 @@ public:
   /// Add several ties
   virtual void addTies(const std::string &ties, bool isDefault = false);
   /// Apply the ties
-  virtual void applyTies() = 0;
+  virtual void applyTies();
   /// Removes the tie off a parameter
   virtual void removeTie(const std::string &parName);
   /// Remove all ties
-  virtual void clearTies() = 0;
+  virtual void clearTies();
   /// Removes i-th parameter's tie
-  virtual bool removeTie(size_t i) = 0;
+  virtual bool removeTie(size_t i);
   /// Get the tie of i-th parameter
-  virtual ParameterTie *getTie(size_t i) const = 0;
-  /// Add a new tie. Derived classes must provide storage for ties
-  virtual void addTie(std::unique_ptr<ParameterTie> tie) = 0;
+  virtual ParameterTie *getTie(size_t i) const;
+  /// Write a parameter tie to a string
+  std::string writeTies() const;
   //@}
 
   /** @name Constraints */
@@ -476,11 +481,15 @@ public:
   /// Add a list of conatraints from a string
   virtual void addConstraints(const std::string &str, bool isDefault = false);
   /// Add a constraint to function
-  virtual void addConstraint(std::unique_ptr<IConstraint> ic) = 0;
+  virtual void addConstraint(std::unique_ptr<IConstraint> ic);
   /// Get constraint of i-th parameter
-  virtual IConstraint *getConstraint(size_t i) const = 0;
+  virtual IConstraint *getConstraint(size_t i) const;
   /// Remove a constraint
-  virtual void removeConstraint(const std::string &parName) = 0;
+  virtual void removeConstraint(const std::string &parName);
+  /// Write a parameter constraint to a string
+  std::string writeConstraints() const;
+  /// Remove all constraints.
+  virtual void clearConstraints();
   //@}
 
   /** @name Attributes */
@@ -505,7 +514,7 @@ public:
   //@}
 
   /// Set up the function for a fit.
-  virtual void setUpForFit() = 0;
+  virtual void setUpForFit();
   /// Get number of values for a given domain.
   virtual size_t getValuesSize(const FunctionDomain &domain) const;
   /// Get number of domains required by this function
@@ -536,6 +545,18 @@ public:
   /// Return the handler
   FunctionHandler *getHandler() const { return m_handler; }
 
+  /// Describe parameter status in relation to fitting:
+  /// Active: Fit varies such parameter directly.
+  /// Fixed:  Value doesn't change during fit.
+  /// FixedByDefault:  Fixed by default, don't show in ties of
+  ///         the output string.
+  /// Tied:   Value depends on values of other parameters.
+  enum ParameterStatus { Active, Fixed, FixedByDefault, Tied };
+  /// Change status of parameter
+  virtual void setParameterStatus(size_t i, ParameterStatus status) = 0;
+  /// Get status of parameter
+  virtual ParameterStatus getParameterStatus(size_t i) const = 0;
+
 protected:
   /// Function initialization. Declare function parameters in this method.
   virtual void init();
@@ -566,15 +587,13 @@ protected:
   /// A read-only ("mutable") attribute can be stored in a const method
   void storeReadOnlyAttribute(const std::string &name,
                               const API::IFunction::Attribute &value) const;
-
-  /// Write a parameter tie to a string
-  virtual std::string writeTie(size_t iParam) const;
-  /// Write a parameter constraint to a string
-  virtual std::string writeConstraint(size_t iParam) const;
+  /// Add a new tie. Derived classes must provide storage for ties
+  virtual void addTie(std::unique_ptr<ParameterTie> tie);
 
   friend class ParameterTie;
   friend class CompositeFunction;
   friend class FunctionParameterDecorator;
+  friend class FunctionGenerator;
 
   /// Flag to hint that the function is being used in parallel computations
   bool m_isParallel;
@@ -583,7 +602,7 @@ protected:
   FunctionHandler *m_handler;
 
   /// Pointer to the progress handler
-  Kernel::ProgressBase *m_progReporter;
+  boost::shared_ptr<Kernel::ProgressBase> m_progReporter;
 
 private:
   /// The declared attributes
@@ -592,6 +611,10 @@ private:
   boost::shared_ptr<Kernel::Matrix<double>> m_covar;
   /// The chi-squared of the last fit
   double m_chiSquared;
+  /// Holds parameter ties as <parameter index,tie pointer>
+  std::vector<std::unique_ptr<ParameterTie>> m_ties;
+  /// Holds the constraints added to function
+  std::vector<std::unique_ptr<IConstraint>> m_constraints;
 };
 
 /// shared pointer to the function base class
diff --git a/Framework/API/inc/MantidAPI/IFunctionWithLocation.h b/Framework/API/inc/MantidAPI/IFunctionWithLocation.h
index 4582738c2b42d66cec85c0530d22dd3374fce128..624e0647a04c516c641dd3952f744106f9624a82 100644
--- a/Framework/API/inc/MantidAPI/IFunctionWithLocation.h
+++ b/Framework/API/inc/MantidAPI/IFunctionWithLocation.h
@@ -67,7 +67,10 @@ public:
 
   /// Fix a parameter or set up a tie such that value returned
   /// by centre() is constant during fitting.
-  virtual void fixCentre() {
+  /// @param isDefault :: If true fix centre by default:
+  ///    don't show it in ties
+  virtual void fixCentre(bool isDefault = false) {
+    UNUSED_ARG(isDefault);
     throw std::runtime_error(
         "Generic centre fixing isn't implemented for this function.");
   }
diff --git a/Framework/API/inc/MantidAPI/IPeakFunction.h b/Framework/API/inc/MantidAPI/IPeakFunction.h
index 3636919b41686ed772c00792659cea37e199d881..16984f0a6c180b63fe788bfec68c5ebd191cf4fb 100644
--- a/Framework/API/inc/MantidAPI/IPeakFunction.h
+++ b/Framework/API/inc/MantidAPI/IPeakFunction.h
@@ -79,7 +79,10 @@ public:
 
   /// Fix a parameter or set up a tie such that value returned
   /// by intensity() is constant during fitting.
-  virtual void fixIntensity() {
+  /// @param isDefault :: If true fix intensity by default:
+  ///    don't show it in ties
+  virtual void fixIntensity(bool isDefault = false) {
+    UNUSED_ARG(isDefault);
     throw std::runtime_error(
         "Generic intensity fixing isn't implemented for this function.");
   }
diff --git a/Framework/API/inc/MantidAPI/ParamFunction.h b/Framework/API/inc/MantidAPI/ParamFunction.h
index 21fe0a87fb8b3f753f55a79454b3a8a975640bb9..c6c3783ffcc8b75217c6c243fd870ce4a7133cc8 100644
--- a/Framework/API/inc/MantidAPI/ParamFunction.h
+++ b/Framework/API/inc/MantidAPI/ParamFunction.h
@@ -50,8 +50,6 @@ class MANTID_API_DLL ParamFunction : public virtual IFunction {
 public:
   /// Default constructor
   ParamFunction() {}
-  /// Virtual destructor
-  ~ParamFunction() override;
 
   /// Set i-th parameter
   void setParameter(size_t, const double &value,
@@ -83,13 +81,6 @@ public:
   /// Set the fitting error for a parameter
   void setError(size_t i, double err) override;
 
-  /// Check if a declared parameter i is active
-  bool isFixed(size_t i) const override;
-  /// Removes a declared parameter i from the list of active
-  void fix(size_t i) override;
-  /// Restores a declared parameter i to the active status
-  void unfix(size_t i) override;
-
   /// Return parameter index from a parameter reference. Usefull for constraints
   /// and ties in composite functions
   size_t getParameterIndex(const ParameterReference &ref) const override;
@@ -98,53 +89,37 @@ public:
   /// Get the containing function
   IFunction_sptr getContainingFunction(IFunction_sptr fun);
 
-  /// Apply the ties
-  void applyTies() override;
-  /// Remove all ties
-  void clearTies() override;
-  void removeTie(const std::string &parName) override {
-    IFunction::removeTie(parName);
-  }
-  /// Removes i-th parameter's tie
-  bool removeTie(size_t i) override;
-  /// Get the tie of i-th parameter
-  ParameterTie *getTie(size_t i) const override;
-  /// Add a new tie
-  void addTie(std::unique_ptr<ParameterTie> tie) override;
-
-  /// Add a constraint to function
-  void addConstraint(std::unique_ptr<IConstraint> ic) override;
-  /// Get constraint of i-th parameter
-  IConstraint *getConstraint(size_t i) const override;
-  /// Remove a constraint
-  void removeConstraint(const std::string &parName) override;
-  /// Set parameters to satisfy constraints
-  void setUpForFit() override;
-
 protected:
   /// Declare a new parameter
   void declareParameter(const std::string &name, double initValue = 0,
                         const std::string &description = "") override;
-
   /// Get the address of the parameter. For use in UserFunction with mu::Parser
   virtual double *getParameterAddress(size_t i);
-
   /// Nonvirtual member which removes all declared parameters
   void clearAllParameters();
+  /// Change status of parameter
+  void setParameterStatus(size_t i, ParameterStatus status) override;
+  /// Get status of parameter
+  ParameterStatus getParameterStatus(size_t i) const override;
 
 private:
-  /// The index map. m_indexMap[i] gives the total index for active parameter i
-  std::vector<bool> m_isFixed;
+  /// Check that a parameter index is in a valid range.
+  /// @param i :: Index to check.
+  inline void checkParameterIndex(size_t i) const {
+    if (i >= nParams()) {
+      throw std::out_of_range("ParamFunction parameter index " +
+                              std::to_string(i) + " out of range " +
+                              std::to_string(nParams()));
+    }
+  }
+  /// Keeps status for each parameter.
+  std::vector<ParameterStatus> m_parameterStatus;
   /// Keeps parameter names
   std::vector<std::string> m_parameterNames;
   /// Keeps parameter values
   std::vector<double> m_parameters;
   /// Keeps parameter errors
   std::vector<double> m_errors;
-  /// Holds parameter ties as <parameter index,tie pointer>
-  std::vector<std::unique_ptr<ParameterTie>> m_ties;
-  /// Holds the constraints added to function
-  std::vector<std::unique_ptr<IConstraint>> m_constraints;
   /// Flags of explicitly set parameters
   std::vector<bool> m_explicitlySet;
   /// parameter descriptions
diff --git a/Framework/API/inc/MantidAPI/ParameterReference.h b/Framework/API/inc/MantidAPI/ParameterReference.h
index 12b77ea8a31a329b4cc721687985c5ada679561c..d8a431574c2c96d2639565712a06c196158a218f 100644
--- a/Framework/API/inc/MantidAPI/ParameterReference.h
+++ b/Framework/API/inc/MantidAPI/ParameterReference.h
@@ -5,14 +5,11 @@
 // Includes
 //----------------------------------------------------------------------
 #include "MantidAPI/DllConfig.h"
-#include "MantidAPI/IFunction.h"
-
-namespace mu {
-class Parser;
-}
+#include <string>
 
 namespace Mantid {
 namespace API {
+class IFunction;
 /**
     A reference to a parameter in a function. To uniquely identify a parameter
     in a composite function
@@ -45,20 +42,30 @@ class MANTID_API_DLL ParameterReference {
 public:
   ParameterReference();
   ParameterReference(IFunction *fun, std::size_t index, bool isDefault = false);
-  std::size_t getIndex() const;
-  void reset(IFunction *fun, std::size_t index, bool isDefault = false);
-  void setParameter(const double &value);
+  void setParameter(const double &value, bool isExplicitlySet = true);
   double getParameter() const;
-  IFunction *getFunction() const;
   bool isDefault() const;
+  bool isParameterOf(const IFunction *fun) const;
   virtual ~ParameterReference() = default;
+  IFunction *getLocalFunction() const;
+  std::size_t getLocalIndex() const;
+  std::size_t parameterIndex() const;
+  std::string parameterName() const;
+
+protected:
+  void reset(IFunction *fun, std::size_t index, bool isDefault = false);
 
 private:
-  IFunction *m_function; ///< pointer to the function
-  std::size_t m_index;   ///< parameter index
+  /// Function-owner of this reference. parameterName() and parameterIndex()
+  /// return values relative to this function.
+  IFunction *m_owner;
+  /// Function that together with m_index uniquely identify the parameter.
+  IFunction *m_function;
+  /// Index of the parameter in m_function. It is assumed that this index
+  /// uniquely identifies the parameter withing m_function
+  std::size_t m_index;
   /// Flag to mark as default the value of an object associated with this
-  /// reference:
-  /// a tie or a constraint.
+  /// reference: a tie or a constraint.
   bool m_isDefault;
 };
 
diff --git a/Framework/API/inc/MantidAPI/ParameterTie.h b/Framework/API/inc/MantidAPI/ParameterTie.h
index 6525e08764aeeb99236c60a5db6287b34869e972..3c3586b0cbd8b21adafc61a0694bcb6fea4b33ab 100644
--- a/Framework/API/inc/MantidAPI/ParameterTie.h
+++ b/Framework/API/inc/MantidAPI/ParameterTie.h
@@ -5,8 +5,8 @@
 // Includes
 //----------------------------------------------------------------------
 #include "MantidAPI/DllConfig.h"
-#include "MantidAPI/IFunction.h"
 #include "MantidAPI/ParameterReference.h"
+#include <map>
 
 namespace mu {
 class Parser;
diff --git a/Framework/API/inc/MantidAPI/NearestNeighbourInfo.h b/Framework/API/inc/MantidAPI/WorkspaceNearestNeighbourInfo.h
similarity index 75%
rename from Framework/API/inc/MantidAPI/NearestNeighbourInfo.h
rename to Framework/API/inc/MantidAPI/WorkspaceNearestNeighbourInfo.h
index 894c8c43e130d3a1b806f36936245fb558966757..a4fe24169d07fd1187f39274ca88d33d1e5273be 100644
--- a/Framework/API/inc/MantidAPI/NearestNeighbourInfo.h
+++ b/Framework/API/inc/MantidAPI/WorkspaceNearestNeighbourInfo.h
@@ -15,10 +15,10 @@ class IDetector;
 namespace API {
 
 class MatrixWorkspace;
-class NearestNeighbours;
+class WorkspaceNearestNeighbours;
 
-/** NearestNeighbourInfo provides easy access to nearest-neighbour information
-  for a workspace.
+/** WorkspaceNearestNeighbourInfo provides easy access to nearest-neighbour
+  information for a workspace.
 
   Copyright &copy; 2016 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
   National Laboratory & European Spallation Source
@@ -41,12 +41,12 @@ class NearestNeighbours;
   File change history is stored at: <https://github.com/mantidproject/mantid>
   Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
-class MANTID_API_DLL NearestNeighbourInfo {
+class MANTID_API_DLL WorkspaceNearestNeighbourInfo {
 public:
-  NearestNeighbourInfo(const MatrixWorkspace &workspace,
-                       const bool ignoreMaskedDetectors,
-                       const int nNeighbours = 8);
-  ~NearestNeighbourInfo();
+  WorkspaceNearestNeighbourInfo(const MatrixWorkspace &workspace,
+                                const bool ignoreMaskedDetectors,
+                                const int nNeighbours = 8);
+  ~WorkspaceNearestNeighbourInfo();
 
   std::map<specnum_t, Kernel::V3D>
   getNeighbours(const Geometry::IDetector *comp,
@@ -57,10 +57,10 @@ public:
 
 private:
   const MatrixWorkspace &m_workspace;
-  std::unique_ptr<NearestNeighbours> m_nearestNeighbours;
+  std::unique_ptr<WorkspaceNearestNeighbours> m_nearestNeighbours;
 };
 
 } // namespace API
 } // namespace Mantid
 
-#endif /* MANTID_API_NEARESTNEIGHBOURINFO_H_ */
+#endif /* MANTID_API_WORKSPACENEARESTNEIGHBOURINFO_H_ */
diff --git a/Framework/API/inc/MantidAPI/NearestNeighbours.h b/Framework/API/inc/MantidAPI/WorkspaceNearestNeighbours.h
similarity index 92%
rename from Framework/API/inc/MantidAPI/NearestNeighbours.h
rename to Framework/API/inc/MantidAPI/WorkspaceNearestNeighbours.h
index cbe5525b5624ae76218e498894b212283735bacf..218c656f74827930156b2eae93168776e8dfd46a 100644
--- a/Framework/API/inc/MantidAPI/NearestNeighbours.h
+++ b/Framework/API/inc/MantidAPI/WorkspaceNearestNeighbours.h
@@ -19,7 +19,8 @@ class IDetector;
 namespace API {
 class SpectrumInfo;
 /**
- * This class is not intended for direct use. Use NearestNeighbourInfo instead!
+ * This class is not intended for direct use. Use WorkspaceNearestNeighbourInfo
+ * instead!
  *
  * This class is used to find the nearest neighbours of a detector in the
  * instrument geometry. This class can be queried through calls to the
@@ -58,11 +59,11 @@ class SpectrumInfo;
  *  File change history is stored at: <https://github.com/mantidproject/mantid>
  *  Code Documentation is available at: <http://doxygen.mantidproject.org>
  */
-class MANTID_API_DLL NearestNeighbours {
+class MANTID_API_DLL WorkspaceNearestNeighbours {
 public:
-  NearestNeighbours(int nNeighbours, const SpectrumInfo &spectrumInfo,
-                    std::vector<specnum_t> spectrumNumbers,
-                    bool ignoreMaskedDetectors = false);
+  WorkspaceNearestNeighbours(int nNeighbours, const SpectrumInfo &spectrumInfo,
+                             std::vector<specnum_t> spectrumNumbers,
+                             bool ignoreMaskedDetectors = false);
 
   // Neighbouring spectra by radius
   std::map<specnum_t, Mantid::Kernel::V3D>
diff --git a/Framework/API/src/CompositeFunction.cpp b/Framework/API/src/CompositeFunction.cpp
index 0608adeec8aca3097b0a1c20da4e9db03046f940..0111d46458d5ecb801bda6f1aee4090aeb6043af 100644
--- a/Framework/API/src/CompositeFunction.cpp
+++ b/Framework/API/src/CompositeFunction.cpp
@@ -7,6 +7,7 @@
 #include "MantidAPI/FunctionFactory.h"
 #include "MantidKernel/Exception.h"
 #include "MantidKernel/Logger.h"
+#include "MantidKernel/Strings.h"
 
 #include <boost/lexical_cast.hpp>
 #include <boost/shared_array.hpp>
@@ -89,26 +90,21 @@ std::string CompositeFunction::asString() const {
       ostr << ';';
     }
   }
-  std::string ties;
-  for (size_t i = 0; i < nParams(); i++) {
-    const ParameterTie *tie = getTie(i);
-    if (tie) {
-      IFunction_sptr fun = getFunction(functionIndex(i));
-      std::string tmp = tie->asString(fun.get());
-      if (tmp.empty()) {
-        tmp = tie->asString(this);
-        if (!tmp.empty()) {
-          if (!ties.empty()) {
-            ties += ",";
-          }
-          ties += tmp;
-        }
-      }
-    }
+
+  // collect non-default constraints
+  std::string constraints = writeConstraints();
+  // print constraints
+  if (!constraints.empty()) {
+    ostr << ";constraints=(" << constraints << ")";
   }
+
+  // collect the non-default ties
+  std::string ties = writeTies();
+  // print the ties
   if (!ties.empty()) {
     ostr << ";ties=(" << ties << ")";
   }
+
   return ostr.str();
 }
 
@@ -329,40 +325,18 @@ std::string CompositeFunction::descriptionOfActive(size_t i) const {
   return ostr.str();
 }
 
-/**
- * query to see in the function is active
- * @param i :: The index of a declared parameter
- * @return true if parameter i is active
- */
-bool CompositeFunction::isActive(size_t i) const {
-  size_t iFun = functionIndex(i);
-  return m_functions[iFun]->isActive(i - m_paramOffsets[iFun]);
-}
-
-/**
- * query to see in the function is active
- * @param i :: The index of a declared parameter
- * @return true if parameter i is active
- */
-bool CompositeFunction::isFixed(size_t i) const {
+/// Change status of parameter
+void CompositeFunction::setParameterStatus(size_t i,
+                                           IFunction::ParameterStatus status) {
   size_t iFun = functionIndex(i);
-  return m_functions[iFun]->isFixed(i - m_paramOffsets[iFun]);
+  m_functions[iFun]->setParameterStatus(i - m_paramOffsets[iFun], status);
 }
 
-/**
- * @param i :: A declared parameter index to be removed from active
- */
-void CompositeFunction::fix(size_t i) {
-  size_t iFun = functionIndex(i);
-  m_functions[iFun]->fix(i - m_paramOffsets[iFun]);
-}
-
-/** Makes a parameter active again. It doesn't change the parameter's tie.
- * @param i :: A declared parameter index to be restored to active
- */
-void CompositeFunction::unfix(size_t i) {
+/// Get status of parameter
+IFunction::ParameterStatus
+CompositeFunction::getParameterStatus(size_t i) const {
   size_t iFun = functionIndex(i);
-  m_functions[iFun]->unfix(i - m_paramOffsets[iFun]);
+  return m_functions[iFun]->getParameterStatus(i - m_paramOffsets[iFun]);
 }
 
 /** Makes sure that the function is consistent.
@@ -423,7 +397,7 @@ void CompositeFunction::removeFunction(size_t i) {
   }
 
   IFunction_sptr fun = getFunction(i);
-
+  // Reduction in parameters
   size_t dnp = fun->nParams();
 
   for (size_t j = 0; j < nParams();) {
@@ -620,12 +594,14 @@ void CompositeFunction::applyTies() {
   for (size_t i = 0; i < nFunctions(); i++) {
     getFunction(i)->applyTies();
   }
+  IFunction::applyTies();
 }
 
 /**
  * Clear the ties.
  */
 void CompositeFunction::clearTies() {
+  IFunction::clearTies();
   for (size_t i = 0; i < nFunctions(); i++) {
     getFunction(i)->clearTies();
   }
@@ -636,9 +612,13 @@ void CompositeFunction::clearTies() {
  * @return True if successfull
  */
 bool CompositeFunction::removeTie(size_t i) {
-  size_t iFun = functionIndex(i);
-  bool res = m_functions[iFun]->removeTie(i - m_paramOffsets[iFun]);
-  return res;
+  bool foundAndRemovedTie = IFunction::removeTie(i);
+  if (!foundAndRemovedTie) {
+    size_t iFun = functionIndex(i);
+    bool res = m_functions[iFun]->removeTie(i - m_paramOffsets[iFun]);
+    return res;
+  }
+  return foundAndRemovedTie;
 }
 
 /** Get the tie of i-th parameter
@@ -646,18 +626,12 @@ bool CompositeFunction::removeTie(size_t i) {
  * @return A pointer to the tie.
  */
 ParameterTie *CompositeFunction::getTie(size_t i) const {
-  size_t iFun = functionIndex(i);
-  return m_functions[iFun]->getTie(i - m_paramOffsets[iFun]);
-}
-
-/**
- * Attaches a tie to this function. The attached tie is owned by the function.
- * @param tie :: A pointer to a new tie
- */
-void CompositeFunction::addTie(std::unique_ptr<ParameterTie> tie) {
-  size_t i = getParameterIndex(*tie);
-  size_t iFun = functionIndex(i);
-  m_functions[iFun]->addTie(std::move(tie));
+  auto tie = IFunction::getTie(i);
+  if (tie == nullptr) {
+    size_t iFun = functionIndex(i);
+    tie = m_functions[iFun]->getTie(i - m_paramOffsets[iFun]);
+  }
+  return tie;
 }
 
 /**
@@ -676,19 +650,11 @@ void CompositeFunction::declareParameter(const std::string &name,
       "CompositeFunction cannot not have its own parameters.");
 }
 
-/** Add a constraint
- *  @param ic :: Pointer to a constraint.
- */
-void CompositeFunction::addConstraint(std::unique_ptr<IConstraint> ic) {
-  size_t i = getParameterIndex(*ic);
-  size_t iFun = functionIndex(i);
-  getFunction(iFun)->addConstraint(std::move(ic));
-}
-
 /**
  * Prepare the function for a fit.
  */
 void CompositeFunction::setUpForFit() {
+  IFunction::setUpForFit();
   // set up the member functions
   for (size_t i = 0; i < nFunctions(); i++) {
     getFunction(i)->setUpForFit();
@@ -726,17 +692,27 @@ void CompositeFunction::setUpForFit() {
 /// @param i :: the index
 /// @return A pointer to the constraint
 IConstraint *CompositeFunction::getConstraint(size_t i) const {
-  size_t iFun = functionIndex(i);
-  return m_functions[iFun]->getConstraint(i - m_paramOffsets[iFun]);
+  auto constraint = IFunction::getConstraint(i);
+  if (constraint == nullptr) {
+    size_t iFun = functionIndex(i);
+    constraint = m_functions[iFun]->getConstraint(i - m_paramOffsets[iFun]);
+  }
+  return constraint;
 }
 
 /** Remove a constraint
  * @param parName :: The name of a parameter which constarint to remove.
  */
 void CompositeFunction::removeConstraint(const std::string &parName) {
-  size_t iPar = parameterIndex(parName);
-  size_t iFun = functionIndex(iPar);
-  getFunction(iFun)->removeConstraint(parameterLocalName(iPar));
+  auto i = parameterIndex(parName);
+  auto constraint = IFunction::getConstraint(i);
+  if (constraint != nullptr) {
+    IFunction::removeConstraint(parName);
+  } else {
+    size_t iPar = parameterIndex(parName);
+    size_t iFun = functionIndex(iPar);
+    getFunction(iFun)->removeConstraint(parameterLocalName(iPar));
+  }
 }
 
 /** Checks if a constraint has been explicitly set
@@ -756,8 +732,8 @@ bool CompositeFunction::isExplicitlySet(size_t i) const {
  */
 size_t
 CompositeFunction::getParameterIndex(const ParameterReference &ref) const {
-  if (ref.getFunction() == this && ref.getIndex() < nParams()) {
-    return ref.getIndex();
+  if (ref.getLocalFunction() == this && ref.getLocalIndex() < nParams()) {
+    return ref.getLocalIndex();
   }
   for (size_t iFun = 0; iFun < nFunctions(); iFun++) {
     IFunction_sptr fun = getFunction(iFun);
diff --git a/Framework/API/src/DetectorSearcher.cpp b/Framework/API/src/DetectorSearcher.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f95258958694bd84316a044a7a054943f0ad052a
--- /dev/null
+++ b/Framework/API/src/DetectorSearcher.cpp
@@ -0,0 +1,256 @@
+#include "MantidAPI/DetectorSearcher.h"
+#include "MantidGeometry/Instrument/ReferenceFrame.h"
+#include "MantidKernel/ConfigService.h"
+#include "MantidKernel/NearestNeighbours.h"
+
+#include <tuple>
+
+using Mantid::Kernel::V3D;
+using Mantid::Geometry::InstrumentRayTracer;
+using Mantid::Geometry::IDetector;
+using Mantid::Geometry::ReferenceFrame;
+using namespace Mantid;
+using namespace Mantid::API;
+
+double getQSign() {
+  const auto convention =
+      Kernel::ConfigService::Instance().getString("Q.convention");
+  return (convention == "Crystallography") ? -1.0 : 1.0;
+}
+
+/** Create a new DetectorSearcher for the given instrument
+ *
+ * The search strategy will be determined in the constructor based on the
+ * given instrument geometry
+ *
+ * @param instrument :: the instrument to find detectors in
+ * @param detInfo :: the API::DetectorInfo object for this instrument
+ */
+DetectorSearcher::DetectorSearcher(Geometry::Instrument_const_sptr instrument,
+                                   const API::DetectorInfo &detInfo)
+    : m_usingFullRayTrace(instrument->containsRectDetectors() ==
+                          Geometry::Instrument::ContainsState::Full),
+      m_crystallography_convention(getQSign()), m_detInfo(detInfo),
+      m_instrument(instrument) {
+
+  /* Choose the search strategy to use
+   * If the instrument uses rectangular detectors (e.g. TOPAZ) then it is faster
+   * to run a full ray trace starting from the top of the instrument. This is
+   * due to the speed up of looking up a single pixel in the rectangular
+   * detector.
+   *
+   * If the instrument does not use rectangular detectors (e.g. WISH, CORELLI)
+   * then it is faster to use a nearest neighbour search to find the closest
+   * pixels, then check them for intersection.
+   * */
+  if (!m_usingFullRayTrace) {
+    createDetectorCache();
+  } else {
+    m_rayTracer = Kernel::make_unique<InstrumentRayTracer>(instrument);
+  }
+}
+
+/** Create a NearestNeighbours search tree for the current instrument
+ */
+void DetectorSearcher::createDetectorCache() {
+  std::vector<Eigen::Vector3d> points;
+  points.reserve(m_detInfo.size());
+  m_indexMap.reserve(m_detInfo.size());
+
+  const auto frame = m_instrument->getReferenceFrame();
+  auto beam = frame->vecPointingAlongBeam();
+  auto up = frame->vecPointingUp();
+  beam.normalize();
+
+  for (size_t pointNo = 0; pointNo < m_detInfo.size(); ++pointNo) {
+    if (m_detInfo.isMonitor(pointNo) || m_detInfo.isMasked(pointNo))
+      continue; // detector is a monitor or masked so don't use
+
+    // Calculate a unit Q vector for each detector
+    // This follows a method similar to that used in IntegrateEllipsoids
+    auto pos = m_detInfo.position(pointNo);
+    pos.normalize();
+    auto E1 = (pos - beam) * -m_crystallography_convention;
+    E1.normalize();
+
+    Eigen::Vector3d point(E1[0], E1[1], E1[2]);
+
+    // Ignore nonsensical points
+    if (point.hasNaN() || up.coLinear(beam, pos))
+      continue;
+
+    points.push_back(point);
+    m_indexMap.push_back(pointNo);
+  }
+
+  // create KDtree of cached detector Q vectors
+  m_detectorCacheSearch =
+      Kernel::make_unique<Kernel::NearestNeighbours<3>>(points);
+}
+
+/** Find the index of a detector given a vector in Qlab space
+ *
+ * If no detector is found the first parameter of the returned tuple is false
+ *
+ * @param q :: the Qlab vector to find a detector for
+ * @return tuple with data <detector found, detector index>
+ */
+DetectorSearcher::DetectorSearchResult
+DetectorSearcher::findDetectorIndex(const V3D &q) {
+  // quick check to see if this Q is valid
+  if (q.nullVector())
+    return std::make_tuple(false, 0);
+
+  // search using best strategy for current instrument
+  if (m_usingFullRayTrace) {
+    return searchUsingInstrumentRayTracing(q);
+  } else {
+    return searchUsingNearestNeighbours(q);
+  }
+}
+
+/** Find the index of a detector given a vector in Qlab space using a ray
+ * tracing search strategy
+ *
+ * If no detector is found the first parameter of the returned tuple is false
+ *
+ * @param q :: the Qlab vector to find a detector for
+ * @return tuple with data <detector found, detector index>
+ */
+DetectorSearcher::DetectorSearchResult
+DetectorSearcher::searchUsingInstrumentRayTracing(const V3D &q) {
+  const auto direction = convertQtoDirection(q);
+  m_rayTracer->traceFromSample(direction);
+  const auto det = m_rayTracer->getDetectorResult();
+
+  if (!det)
+    return std::make_tuple(false, 0);
+
+  const auto detIndex = m_detInfo.indexOf(det->getID());
+
+  if (m_detInfo.isMasked(detIndex) || m_detInfo.isMonitor(detIndex))
+    return std::make_tuple(false, 0);
+
+  return std::make_tuple(true, detIndex);
+}
+
+/** Find the index of a detector given a vector in Qlab space using a nearest
+ * neighbours search strategy
+ *
+ * If no detector is found the first parameter of the returned tuple is false
+ *
+ * @param q :: the Qlab vector to find a detector for
+ * @return tuple with data <detector found, detector index>
+ */
+DetectorSearcher::DetectorSearchResult
+DetectorSearcher::searchUsingNearestNeighbours(const V3D &q) {
+  const auto detectorDir = convertQtoDirection(q);
+  // find where this Q vector should intersect with "extended" space
+  const auto neighbours =
+      m_detectorCacheSearch->findNearest(Eigen::Vector3d(q[0], q[1], q[2]), 5);
+  if (neighbours.size() == 0)
+    return std::make_tuple(false, 0);
+
+  const auto result = checkInteceptWithNeighbours(detectorDir, neighbours);
+  const auto hitDetector = std::get<0>(result);
+  const auto index = std::get<1>(result);
+
+  if (hitDetector)
+    return std::make_tuple(true, m_indexMap[index]);
+
+  // Tube Gap Parameter specifically applies to tube instruments
+  if (!hitDetector && m_instrument->hasParameter("tube-gap")) {
+    return handleTubeGap(detectorDir, neighbours);
+  }
+
+  return std::make_tuple(false, 0);
+}
+
+/** Handle the tube-gap parameter in tube based instruments.
+ *
+ * This will check for interceptions with the nearest neighbours by "wiggling"
+ * the predicted detector direction slightly.
+ *
+ * @param detectorDir :: the predicted direction towards a detector
+ * @param neighbours :: the NearestNeighbour results to check interception with
+ * @return a detector search result with whether a detector was hit
+ */
+DetectorSearcher::DetectorSearchResult DetectorSearcher::handleTubeGap(
+    const V3D &detectorDir,
+    const Kernel::NearestNeighbours<3>::NearestNeighbourResults &neighbours) {
+  std::vector<double> gaps = m_instrument->getNumberParameter("tube-gap", true);
+  if (!gaps.empty()) {
+    const auto gap = static_cast<double>(gaps.front());
+    // try adding and subtracting tube-gap in 3 q dimensions to see if you can
+    // find detectors on each side of tube gap
+    for (int i = 0; i < 3; i++) {
+      auto gapDir = V3D(0., 0., 0.);
+      gapDir[i] = gap;
+
+      auto beam1 = detectorDir + gapDir;
+      const auto result1 = checkInteceptWithNeighbours(beam1, neighbours);
+      const auto hit1 = std::get<0>(result1);
+
+      auto beam2 = detectorDir - gapDir;
+      const auto result2 = checkInteceptWithNeighbours(beam2, neighbours);
+      const auto hit2 = std::get<0>(result2);
+
+      if (hit1 && hit2) {
+        // Set the detector to one of the neighboring pixels
+        return std::make_tuple(true, m_indexMap[std::get<1>(result1)]);
+      }
+    }
+  }
+
+  return std::make_tuple(false, 0);
+}
+
+/** Check whether the given direction in real space intersects with any of the
+ * k nearest neighbours
+ *
+ * @param direction :: real space direction vector
+ * @param neighbours :: vector of nearest neighbours to check
+ * @return tuple of <detector hit, index of correct index in m_IndexMap>
+ */
+std::tuple<bool, size_t> DetectorSearcher::checkInteceptWithNeighbours(
+    const V3D &direction,
+    const Kernel::NearestNeighbours<3>::NearestNeighbourResults &neighbours)
+    const {
+  Geometry::Track track(m_detInfo.samplePosition(), direction);
+  // Find which of the neighbours we actually intersect with
+  for (const auto &neighbour : neighbours) {
+    const auto index = std::get<1>(neighbour);
+    const auto &det = m_detInfo.detector(m_indexMap[index]);
+
+    Mantid::Geometry::BoundingBox bb;
+    if (!bb.doesLineIntersect(track))
+      continue;
+
+    const auto hitDetector = det.interceptSurface(track) > 0;
+    if (hitDetector)
+      return std::make_tuple(hitDetector, index);
+
+    track.reset(m_detInfo.samplePosition(), direction);
+  }
+
+  return std::make_tuple(false, 0);
+}
+
+/** Helper method to convert a vector in Qlab to a direction in detector space
+ *
+ * @param q :: a Qlab vector
+ * @return a direction in detector space
+ */
+V3D DetectorSearcher::convertQtoDirection(const V3D &q) const {
+  const auto norm_q = q.norm();
+  const auto refFrame = m_instrument->getReferenceFrame();
+  const V3D refBeamDir = refFrame->vecPointingAlongBeam();
+
+  const double qBeam = q.scalar_prod(refBeamDir) * m_crystallography_convention;
+  double one_over_wl = (norm_q * norm_q) / (2.0 * qBeam);
+
+  auto detectorDir = q * -m_crystallography_convention;
+  detectorDir[refFrame->pointingAlongBeam()] = one_over_wl - qBeam;
+  detectorDir.normalize();
+  return detectorDir;
+}
diff --git a/Framework/CurveFitting/src/Functions/FunctionGenerator.cpp b/Framework/API/src/FunctionGenerator.cpp
similarity index 69%
rename from Framework/CurveFitting/src/Functions/FunctionGenerator.cpp
rename to Framework/API/src/FunctionGenerator.cpp
index f8f4458efcac912f39009b8177e3991bec29fca7..3d3851fa74a82aebc9fc384af3a3c1984d641bb0 100644
--- a/Framework/CurveFitting/src/Functions/FunctionGenerator.cpp
+++ b/Framework/API/src/FunctionGenerator.cpp
@@ -1,19 +1,14 @@
-#include "MantidCurveFitting/Functions/FunctionGenerator.h"
+#include "MantidAPI/FunctionGenerator.h"
 #include "MantidAPI/IConstraint.h"
 #include "MantidAPI/ParameterTie.h"
 
 namespace Mantid {
-namespace CurveFitting {
-namespace Functions {
-
-using namespace CurveFitting;
+namespace API {
 
 using namespace Kernel;
 
-using namespace API;
-
 /// Constructor
-FunctionGenerator::FunctionGenerator(API::IFunction_sptr source)
+FunctionGenerator::FunctionGenerator(IFunction_sptr source)
     : m_source(source), m_nOwnParams(source->nParams()), m_dirty(true) {
   if (!m_source) {
     throw std::logic_error(
@@ -128,38 +123,33 @@ void FunctionGenerator::setError(size_t i, double err) {
   }
 }
 
-/// Check if a declared parameter i is fixed
-bool FunctionGenerator::isFixed(size_t i) const {
-  checkTargetFunction();
-  return i < m_nOwnParams ? m_source->isFixed(i)
-                          : m_target->isFixed(i - m_nOwnParams);
-}
-
-/// Removes a declared parameter i from the list of active
-void FunctionGenerator::fix(size_t i) {
+/// Change status of parameter
+void FunctionGenerator::setParameterStatus(size_t i,
+                                           IFunction::ParameterStatus status) {
   if (i < m_nOwnParams) {
-    m_source->fix(i);
+    m_source->setParameterStatus(i, status);
   } else {
     checkTargetFunction();
-    m_target->fix(i - m_nOwnParams);
+    m_target->setParameterStatus(i - m_nOwnParams, status);
   }
 }
 
-/// Restores a declared parameter i to the active status
-void FunctionGenerator::unfix(size_t i) {
+/// Get status of parameter
+IFunction::ParameterStatus
+FunctionGenerator::getParameterStatus(size_t i) const {
   if (i < m_nOwnParams) {
-    m_source->unfix(i);
+    return m_source->getParameterStatus(i);
   } else {
     checkTargetFunction();
-    m_target->unfix(i - m_nOwnParams);
+    return m_target->getParameterStatus(i - m_nOwnParams);
   }
 }
 
 /// Return parameter index from a parameter reference.
 size_t
 FunctionGenerator::getParameterIndex(const ParameterReference &ref) const {
-  if (ref.getFunction() == this) {
-    auto index = ref.getIndex();
+  if (ref.getLocalFunction() == this) {
+    auto index = ref.getLocalIndex();
     auto np = nParams();
     if (index < np) {
       return index;
@@ -170,90 +160,12 @@ FunctionGenerator::getParameterIndex(const ParameterReference &ref) const {
   return m_target->getParameterIndex(ref) + m_nOwnParams;
 }
 
-/// Tie a parameter to other parameters (or a constant)
-void FunctionGenerator::tie(const std::string &parName, const std::string &expr,
-                            bool isDefault) {
-  if (isSourceName(parName)) {
-    m_source->tie(parName, expr, isDefault);
-  } else {
-    checkTargetFunction();
-    m_target->tie(parName, expr, isDefault);
-  }
-}
-
-/// Apply the ties
-void FunctionGenerator::applyTies() {
-  m_source->applyTies();
+/// Set up the function for a fit.
+void FunctionGenerator::setUpForFit() {
   updateTargetFunction();
-  if (m_target) {
-    m_target->applyTies();
-  }
-}
-
-/// Remove all ties
-void FunctionGenerator::clearTies() {
-  m_source->clearTies();
-  if (m_target) {
-    m_target->clearTies();
-  }
-}
-
-/// Removes i-th parameter's tie
-bool FunctionGenerator::removeTie(size_t i) {
-  if (i < m_nOwnParams) {
-    return m_source->removeTie(i);
-  } else {
-    checkTargetFunction();
-    return m_target->removeTie(i - m_nOwnParams);
-  }
-}
-
-/// Get the tie of i-th parameter
-ParameterTie *FunctionGenerator::getTie(size_t i) const {
-  if (i < m_nOwnParams) {
-    return m_source->getTie(i);
-  } else {
-    checkTargetFunction();
-    return m_target->getTie(i - m_nOwnParams);
-  }
-}
-
-/// Add a constraint to function
-void FunctionGenerator::addConstraint(std::unique_ptr<API::IConstraint> ic) {
-  auto i = ic->getIndex();
-  if (i < m_nOwnParams) {
-    ic->reset(m_source.get(), i);
-    m_source->addConstraint(std::move(ic));
-  } else {
-    checkTargetFunction();
-    ic->reset(m_target.get(), i - m_nOwnParams);
-    m_target->addConstraint(std::move(ic));
-  }
+  IFunction::setUpForFit();
 }
 
-/// Get constraint of i-th parameter
-IConstraint *FunctionGenerator::getConstraint(size_t i) const {
-  if (i < m_nOwnParams) {
-    return m_source->getConstraint(i);
-  } else {
-    checkTargetFunction();
-    return m_target->getConstraint(i - m_nOwnParams);
-  }
-}
-
-/// Remove a constraint
-void FunctionGenerator::removeConstraint(const std::string &parName) {
-  if (isSourceName(parName)) {
-    m_source->removeConstraint(parName);
-  } else {
-    checkTargetFunction();
-    m_target->removeConstraint(parName);
-  }
-}
-
-/// Set up the function for a fit.
-void FunctionGenerator::setUpForFit() { updateTargetFunction(); }
-
 /// Declare a new parameter
 void FunctionGenerator::declareParameter(const std::string &, double,
                                          const std::string &) {
@@ -261,19 +173,6 @@ void FunctionGenerator::declareParameter(const std::string &, double,
       "FunctionGenerator cannot not have its own parameters.");
 }
 
-/// Add a new tie. Derived classes must provide storage for ties
-void FunctionGenerator::addTie(std::unique_ptr<API::ParameterTie> tie) {
-  size_t i = getParameterIndex(*tie);
-  if (i < m_nOwnParams) {
-    m_source->addTie(std::move(tie));
-  } else {
-    checkTargetFunction();
-    tie->reset(m_target.get(), tie->getIndex() - m_nOwnParams,
-               tie->isDefault());
-    m_target->addTie(std::move(tie));
-  }
-}
-
 /// Returns the number of attributes associated with the function
 size_t FunctionGenerator::nAttributes() const {
   checkTargetFunction();
@@ -293,7 +192,7 @@ std::vector<std::string> FunctionGenerator::getAttributeNames() const {
 }
 
 /// Return a value of attribute attName
-API::IFunction::Attribute
+IFunction::Attribute
 FunctionGenerator::getAttribute(const std::string &attName) const {
   if (IFunction::hasAttribute(attName)) {
     return IFunction::getAttribute(attName);
@@ -335,8 +234,8 @@ bool FunctionGenerator::hasAttribute(const std::string &attName) const {
 }
 
 // Evaluates the function
-void FunctionGenerator::function(const API::FunctionDomain &domain,
-                                 API::FunctionValues &values) const {
+void FunctionGenerator::function(const FunctionDomain &domain,
+                                 FunctionValues &values) const {
   updateTargetFunction();
   if (!m_target) {
     throw std::logic_error(
@@ -366,6 +265,34 @@ void FunctionGenerator::checkTargetFunction() const {
   }
 }
 
-} // namespace Functions
-} // namespace CurveFitting
+/// Get the tie for i-th parameter
+ParameterTie *FunctionGenerator::getTie(size_t i) const {
+  auto tie = IFunction::getTie(i);
+  if (!tie) {
+    return nullptr;
+  }
+  if (i < m_nOwnParams) {
+    tie = m_source->getTie(i);
+  } else {
+    checkTargetFunction();
+    tie = m_target->getTie(i - m_nOwnParams);
+  }
+  return tie;
+}
+
+/// Get the i-th constraint
+IConstraint *FunctionGenerator::getConstraint(size_t i) const {
+  auto constraint = IFunction::getConstraint(i);
+  if (constraint == nullptr) {
+    if (i < m_nOwnParams) {
+      constraint = m_source->getConstraint(i);
+    } else {
+      checkTargetFunction();
+      constraint = m_target->getConstraint(i - m_nOwnParams);
+    }
+  }
+  return constraint;
+}
+
+} // namespace API
 } // namespace Mantid
diff --git a/Framework/API/src/FunctionParameterDecorator.cpp b/Framework/API/src/FunctionParameterDecorator.cpp
index 4d37ae9545ca6951a86614a2588899f684077ecf..e3ccfc81e13c7137e9d76c88ba8fa8da6bdc1100 100644
--- a/Framework/API/src/FunctionParameterDecorator.cpp
+++ b/Framework/API/src/FunctionParameterDecorator.cpp
@@ -154,24 +154,6 @@ void FunctionParameterDecorator::setError(size_t i, double err) {
   return m_wrappedFunction->setError(i, err);
 }
 
-bool FunctionParameterDecorator::isFixed(size_t i) const {
-  throwIfNoFunctionSet();
-
-  return m_wrappedFunction->isFixed(i);
-}
-
-void FunctionParameterDecorator::fix(size_t i) {
-  throwIfNoFunctionSet();
-
-  m_wrappedFunction->fix(i);
-}
-
-void FunctionParameterDecorator::unfix(size_t i) {
-  throwIfNoFunctionSet();
-
-  m_wrappedFunction->unfix(i);
-}
-
 size_t FunctionParameterDecorator::getParameterIndex(
     const ParameterReference &ref) const {
   throwIfNoFunctionSet();
@@ -180,8 +162,8 @@ size_t FunctionParameterDecorator::getParameterIndex(
     return m_wrappedFunction->getParameterIndex(ref);
   }
 
-  if (ref.getFunction() == this && ref.getIndex() < nParams()) {
-    return ref.getIndex();
+  if (ref.getLocalFunction() == this && ref.getLocalIndex() < nParams()) {
+    return ref.getLocalIndex();
   }
 
   return nParams();
@@ -222,11 +204,16 @@ bool FunctionParameterDecorator::hasAttribute(
   return m_wrappedFunction->hasAttribute(attName);
 }
 
-void FunctionParameterDecorator::tie(const std::string &parName,
-                                     const std::string &expr, bool isDefault) {
+void FunctionParameterDecorator::setParameterStatus(
+    size_t i, IFunction::ParameterStatus status) {
   throwIfNoFunctionSet();
+  m_wrappedFunction->setParameterStatus(i, status);
+}
 
-  m_wrappedFunction->tie(parName, expr, isDefault);
+IFunction::ParameterStatus
+FunctionParameterDecorator::getParameterStatus(size_t i) const {
+  throwIfNoFunctionSet();
+  return m_wrappedFunction->getParameterStatus(i);
 }
 
 void FunctionParameterDecorator::applyTies() {
@@ -299,6 +286,12 @@ void FunctionParameterDecorator::declareParameter(
   UNUSED_ARG(description);
 }
 
+void FunctionParameterDecorator::tie(const std::string &parName,
+                                     const std::string &expr, bool isDefault) {
+  throwIfNoFunctionSet();
+  m_wrappedFunction->tie(parName, expr, isDefault);
+}
+
 /// Forwads addTie-call to the decorated function.
 void FunctionParameterDecorator::addTie(std::unique_ptr<ParameterTie> tie) {
   throwIfNoFunctionSet();
diff --git a/Framework/API/src/IFunction.cpp b/Framework/API/src/IFunction.cpp
index 36d32e4b4f84c7f8a70837fddadc1d2cfb7091dd..8885f0684a5d269dd32c312e656caafdeefe553e 100644
--- a/Framework/API/src/IFunction.cpp
+++ b/Framework/API/src/IFunction.cpp
@@ -66,7 +66,8 @@ boost::shared_ptr<IFunction> IFunction::clone() const {
  * @param reporter :: A pointer to a progress reporter that can be called during
  * function evaluation
  */
-void IFunction::setProgressReporter(Kernel::ProgressBase *reporter) {
+void IFunction::setProgressReporter(
+    boost::shared_ptr<Kernel::ProgressBase> reporter) {
   m_progReporter = reporter;
   m_progReporter->setNotifyStep(0.01);
 }
@@ -77,7 +78,7 @@ void IFunction::setProgressReporter(Kernel::ProgressBase *reporter) {
  */
 void IFunction::reportProgress(const std::string &msg) const {
   if (m_progReporter) {
-    const_cast<Kernel::ProgressBase *>(m_progReporter)->report(msg);
+    const_cast<Kernel::ProgressBase *>(m_progReporter.get())->report(msg);
   }
 }
 
@@ -103,6 +104,59 @@ void IFunction::functionDeriv(const FunctionDomain &domain,
   calNumericalDeriv(domain, jacobian);
 }
 
+/** Check if an active parameter i is actually active
+ * @param i :: Index of a parameter.
+ */
+bool IFunction::isActive(size_t i) const {
+  return getParameterStatus(i) == Active;
+}
+
+/**
+ * Query if the parameter is fixed
+ * @param i :: The index of a declared parameter
+ * @return true if parameter i is fixed
+ */
+bool IFunction::isFixed(size_t i) const {
+  auto status = getParameterStatus(i);
+  return status == Fixed || status == FixedByDefault;
+}
+
+/// Check if a parameter i is fixed by default (not by user).
+/// @param i :: The index of a parameter
+/// @return true if parameter i is fixed by default
+bool IFunction::isFixedByDefault(size_t i) const {
+  return getParameterStatus(i) == FixedByDefault;
+}
+
+/// This method doesn't create a tie
+/// @param i :: A declared parameter index to be fixed
+/// @param isDefault :: If true fix it by default
+///
+void IFunction::fix(size_t i, bool isDefault) {
+  auto status = getParameterStatus(i);
+  if (status == Tied) {
+    throw std::runtime_error("Cannot fix parameter " + std::to_string(i) +
+                             " (" + parameterName(i) + "): it has a tie.");
+  }
+  if (isDefault) {
+    setParameterStatus(i, FixedByDefault);
+  } else {
+    setParameterStatus(i, Fixed);
+  }
+}
+
+/** Makes a parameter active again. It doesn't change the parameter's tie.
+ * @param i :: A declared parameter index to be restored to active
+ */
+void IFunction::unfix(size_t i) {
+  auto status = getParameterStatus(i);
+  if (status == Tied) {
+    throw std::runtime_error("Cannot unfix parameter " + std::to_string(i) +
+                             " (" + parameterName(i) + "): it has a tie.");
+  }
+  setParameterStatus(i, Active);
+}
+
 /**
  * Ties a parameter to other parameters
  * @param parName :: The name of the parameter to tie.
@@ -114,13 +168,12 @@ void IFunction::functionDeriv(const FunctionDomain &domain,
 void IFunction::tie(const std::string &parName, const std::string &expr,
                     bool isDefault) {
   auto ti = Kernel::make_unique<ParameterTie>(this, parName, expr, isDefault);
-  this->fix(getParameterIndex(*ti));
   if (!isDefault && ti->isConstant()) {
     setParameter(parName, ti->eval());
+    fix(getParameterIndex(*ti));
   } else {
     addTie(std::move(ti));
   }
-  //  return ti.get();
 }
 
 /**
@@ -158,34 +211,187 @@ void IFunction::removeTie(const std::string &parName) {
   this->removeTie(i);
 }
 
-/// Write a parameter tie to a string
-/// @param iParam :: An index of a parameter.
+/// Write all parameter ties owned by this function to a string
 /// @return A tie string for the parameter.
-std::string IFunction::writeTie(size_t iParam) const {
+std::string IFunction::writeTies() const {
   std::ostringstream tieStream;
-  const ParameterTie *tie = getTie(iParam);
-  if (tie) {
-    if (!tie->isDefault()) {
-      tieStream << tie->asString(this);
+  bool first = true;
+  for (auto &tie : m_ties) {
+    if (tie->isDefault())
+      continue;
+    if (!first) {
+      tieStream << ',';
+    } else {
+      first = false;
     }
-  } else if (isFixed(iParam)) {
-    tieStream << parameterName(iParam) << "=" << getParameter(iParam);
+    tieStream << tie->asString(this);
   }
   return tieStream.str();
 }
 
-/// Write a parameter constraint to a string
-/// @param iParam :: An index of a parameter.
+/**
+ * Attaches a tie to this ParamFunction. The attached tie is owned by the
+ * ParamFunction.
+ * @param tie :: A pointer to a new tie
+ */
+void IFunction::addTie(std::unique_ptr<ParameterTie> tie) {
+
+  auto iPar = getParameterIndex(*tie);
+  bool found = false;
+  for (auto &m_tie : m_ties) {
+    auto mPar = getParameterIndex(*m_tie);
+    if (mPar == iPar) {
+      found = true;
+      m_tie = std::move(tie);
+      break;
+    }
+  }
+  if (!found) {
+    m_ties.push_back(std::move(tie));
+    setParameterStatus(iPar, Tied);
+  }
+}
+
+/**
+ * Apply the ties.
+ */
+void IFunction::applyTies() {
+  for (auto &m_tie : m_ties) {
+    m_tie->eval();
+  }
+}
+
+/**
+ * Used to find ParameterTie for a parameter i
+ */
+class ReferenceEqual {
+  /// The function that has the tie
+  const IFunction &m_fun;
+  /// index to find
+  const size_t m_i;
+
+public:
+  /// Constructor
+  explicit ReferenceEqual(const IFunction &fun, size_t i)
+      : m_fun(fun), m_i(i) {}
+  /// Bracket operator
+  /// @param p :: the element you are looking for
+  /// @return True if found
+  template <class T> bool operator()(const std::unique_ptr<T> &p) {
+    return m_fun.getParameterIndex(*p) == m_i;
+  }
+};
+
+/** Removes i-th parameter's tie if it is tied or does nothing.
+ * @param i :: The index of the tied parameter.
+ * @return True if successfull
+ */
+bool IFunction::removeTie(size_t i) {
+  if (i >= nParams()) {
+    throw std::out_of_range("Function parameter index out of range.");
+  }
+  auto it =
+      std::find_if(m_ties.begin(), m_ties.end(), ReferenceEqual(*this, i));
+  if (it != m_ties.end()) {
+    m_ties.erase(it);
+    setParameterStatus(i, Active);
+    return true;
+  }
+  unfix(i);
+  return false;
+}
+
+/** Get tie of parameter number i
+ * @param i :: The index of a declared parameter.
+ * @return A pointer to the tie
+ */
+ParameterTie *IFunction::getTie(size_t i) const {
+  auto it =
+      std::find_if(m_ties.cbegin(), m_ties.cend(), ReferenceEqual(*this, i));
+  if (it != m_ties.cend()) {
+    return it->get();
+  }
+  return nullptr;
+}
+
+/** Remove all ties
+ */
+void IFunction::clearTies() {
+  for (size_t i = 0; i < nParams(); ++i) {
+    setParameterStatus(i, Active);
+  }
+  m_ties.clear();
+}
+
+/** Add a constraint
+ *  @param ic :: Pointer to a constraint.
+ */
+void IFunction::addConstraint(std::unique_ptr<IConstraint> ic) {
+  size_t iPar = ic->parameterIndex();
+  bool found = false;
+  for (auto &constraint : m_constraints) {
+    if (constraint->parameterIndex() == iPar) {
+      found = true;
+      constraint = std::move(ic);
+      break;
+    }
+  }
+  if (!found) {
+    m_constraints.push_back(std::move(ic));
+  }
+}
+
+/** Get constraint of parameter number i
+ * @param i :: The index of a declared parameter.
+ * @return A pointer to the constraint or NULL
+ */
+IConstraint *IFunction::getConstraint(size_t i) const {
+  auto it = std::find_if(m_constraints.cbegin(), m_constraints.cend(),
+                         ReferenceEqual(*this, i));
+  if (it != m_constraints.cend()) {
+    return it->get();
+  }
+  return nullptr;
+}
+
+/** Remove a constraint
+ * @param parName :: The name of a parameter which constarint to remove.
+ */
+void IFunction::removeConstraint(const std::string &parName) {
+  size_t iPar = parameterIndex(parName);
+  for (auto it = m_constraints.begin(); it != m_constraints.end(); ++it) {
+    if (iPar == (**it).getLocalIndex()) {
+      m_constraints.erase(it);
+      break;
+    }
+  }
+}
+
+/// Remove all constraints.
+void IFunction::clearConstraints() { m_constraints.clear(); }
+
+void IFunction::setUpForFit() {
+  for (auto &constraint : m_constraints) {
+    constraint->setParamToSatisfyConstraint();
+  }
+}
+
+/// Write all parameter constraints owned by this function to a string
 /// @return A constraint string for the parameter.
-std::string IFunction::writeConstraint(size_t iParam) const {
-  const IConstraint *c = getConstraint(iParam);
-  if (c && !c->isDefault()) {
-    std::string constraint = c->asString();
-    if (!constraint.empty()) {
-      return constraint;
+std::string IFunction::writeConstraints() const {
+  std::ostringstream stream;
+  bool first = true;
+  for (auto &constrint : m_constraints) {
+    if (constrint->isDefault())
+      continue;
+    if (!first) {
+      stream << ',';
+    } else {
+      first = false;
     }
+    stream << constrint->asString();
   }
-  return "";
+  return stream.str();
 }
 
 /**
@@ -204,33 +410,29 @@ std::string IFunction::asString() const {
       ostr << ',' << attName << '=' << attValue;
     }
   }
+  std::vector<std::string> ties;
   // print the parameters
   for (size_t i = 0; i < nParams(); i++) {
-    ostr << ',' << parameterName(i) << '=' << getParameter(i);
+    std::ostringstream paramOut;
+    paramOut << parameterName(i) << '=' << getParameter(i);
+    ostr << ',' << paramOut.str();
+    // Output non-default ties only.
+    if (getParameterStatus(i) == Fixed) {
+      ties.push_back(paramOut.str());
+    }
   }
 
   // collect non-default constraints
-  std::vector<std::string> constraints;
-  for (size_t i = 0; i < nParams(); i++) {
-    auto constraint = writeConstraint(i);
-    if (!constraint.empty()) {
-      constraints.push_back(constraint);
-    }
-  }
+  std::string constraints = writeConstraints();
   // print constraints
   if (!constraints.empty()) {
-    ostr << ",constraints=("
-         << Kernel::Strings::join(constraints.begin(), constraints.end(), ",")
-         << ")";
+    ostr << ",constraints=(" << constraints << ")";
   }
 
   // collect the non-default ties
-  std::vector<std::string> ties;
-  for (size_t i = 0; i < nParams(); i++) {
-    auto tie = writeTie(i);
-    if (!tie.empty()) {
-      ties.push_back(tie);
-    }
+  auto tiesString = writeTies();
+  if (!tiesString.empty()) {
+    ties.push_back(tiesString);
   }
   // print the ties
   if (!ties.empty()) {
@@ -370,6 +572,13 @@ private:
 };
 }
 
+/// Copy assignment. Do not copy m_quoteValue flag.
+/// @param attr :: The attribute to copy from.
+IFunction::Attribute &IFunction::Attribute::operator=(const Attribute &attr) {
+  m_data = attr.m_data;
+  return *this;
+}
+
 std::string IFunction::Attribute::value() const {
   AttValue tmp(m_quoteValue);
   return apply(tmp);
@@ -1149,9 +1358,10 @@ size_t IFunction::getValuesSize(const FunctionDomain &domain) const {
 
 /// Fix a parameter
 /// @param name :: A name of a parameter to fix
-void IFunction::fixParameter(const std::string &name) {
+/// @param isDefault :: If true fix it by default
+void IFunction::fixParameter(const std::string &name, bool isDefault) {
   auto i = parameterIndex(name);
-  fix(i);
+  fix(i, isDefault);
 }
 
 /// Free a parameter
@@ -1162,14 +1372,41 @@ void IFunction::unfixParameter(const std::string &name) {
 }
 
 /// Fix all parameters
-void IFunction::fixAll() {
+/// @param isDefault :: If true fix them by default
+void IFunction::fixAll(bool isDefault) {
   for (size_t i = 0; i < nParams(); ++i) {
-    fix(i);
+    fix(i, isDefault);
   }
 }
 
 /// Free all parameters
-void IFunction::unfixAll() { clearTies(); }
+void IFunction::unfixAll() {
+  for (size_t i = 0; i < nParams(); ++i) {
+    unfix(i);
+  }
+}
+
+/// Free all parameters fixed by default
+void IFunction::unfixAllDefault() {
+  for (size_t i = 0; i < nParams(); ++i) {
+    if (getParameterStatus(i) == FixedByDefault) {
+      unfix(i);
+    }
+  }
+}
+
+/// Fix all active parameters. This method doesn't change
+/// status of a fixed parameter, eg if one was fixed by default
+/// prior to calling this method it will remain default regardless
+/// the value of isDefault argument.
+/// @param isDefault :: If true fix them by default.
+void IFunction::fixAllActive(bool isDefault) {
+  for (size_t i = 0; i < nParams(); ++i) {
+    if (getParameterStatus(i) == Active) {
+      fix(i, isDefault);
+    }
+  }
+}
 
 /// Get number of domains required by this function.
 /// If it returns a number greater than 1 then the domain
diff --git a/Framework/API/src/ParamFunction.cpp b/Framework/API/src/ParamFunction.cpp
index 3c7c628e78eb5b0adc88b326f61cfdeee31e5760..70bdd35973503e3f8a18f474b9e82dae232b33dc 100644
--- a/Framework/API/src/ParamFunction.cpp
+++ b/Framework/API/src/ParamFunction.cpp
@@ -15,12 +15,6 @@ namespace {
 Kernel::Logger g_log("ParamFunction");
 }
 
-/// Destructor
-ParamFunction::~ParamFunction() {
-  m_ties.clear();
-  m_constraints.clear();
-}
-
 /** Sets a new value to the i-th parameter.
  *  @param i :: The parameter index
  *  @param value :: The new value
@@ -43,9 +37,7 @@ void ParamFunction::setParameter(size_t i, const double &value,
     g_log.warning(errmsg.str());
   }
 
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   if (explicitlySet && value != m_parameters[i]) {
     m_explicitlySet[i] = true;
   }
@@ -58,9 +50,7 @@ void ParamFunction::setParameter(size_t i, const double &value,
  */
 void ParamFunction::setParameterDescription(size_t i,
                                             const std::string &description) {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   m_parameterDescriptions[i] = description;
 }
 
@@ -69,9 +59,7 @@ void ParamFunction::setParameterDescription(size_t i,
  *  @return the value of the requested parameter
  */
 double ParamFunction::getParameter(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_parameters[i];
 }
 
@@ -170,9 +158,7 @@ size_t ParamFunction::parameterIndex(const std::string &name) const {
  * @return the name of the parameter at the requested index
  */
 std::string ParamFunction::parameterName(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_parameterNames[i];
 }
 
@@ -181,9 +167,7 @@ std::string ParamFunction::parameterName(size_t i) const {
  * @return the description of the parameter at the requested index
  */
 std::string ParamFunction::parameterDescription(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_parameterDescriptions[i];
 }
 
@@ -193,9 +177,7 @@ std::string ParamFunction::parameterDescription(size_t i) const {
  * @return :: the error
  */
 double ParamFunction::getError(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_errors[i];
 }
 
@@ -205,9 +187,7 @@ double ParamFunction::getError(size_t i) const {
  * @param err :: The error value to set
  */
 void ParamFunction::setError(size_t i, double err) {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   m_errors[i] = err;
 }
 
@@ -226,7 +206,7 @@ void ParamFunction::declareParameter(const std::string &name, double initValue,
     throw std::invalid_argument(msg.str());
   }
 
-  m_isFixed.push_back(false);
+  m_parameterStatus.push_back(Active);
   m_parameterNames.push_back(name);
   m_parameterDescriptions.push_back(description);
   m_parameters.push_back(initValue);
@@ -234,202 +214,43 @@ void ParamFunction::declareParameter(const std::string &name, double initValue,
   m_explicitlySet.push_back(false);
 }
 
-/**
- * query if the parameter is fixed
- * @param i :: The index of a declared parameter
- * @return true if parameter i is active
- */
-bool ParamFunction::isFixed(size_t i) const {
-  if (i >= nParams())
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  return m_isFixed[i];
-}
-
-/** This method doesn't create a tie
- * @param i :: A declared parameter index to be fixed
- */
-void ParamFunction::fix(size_t i) {
-  if (isFixed(i))
-    return;
-  m_isFixed[i] = true;
-}
-
-/** Makes a parameter active again. It doesn't change the parameter's tie.
- * @param i :: A declared parameter index to be restored to active
- */
-void ParamFunction::unfix(size_t i) {
-  if (!isFixed(i))
-    return;
-  m_isFixed[i] = false;
-}
-
-/**
- * Attaches a tie to this ParamFunction. The attached tie is owned by the
- * ParamFunction.
- * @param tie :: A pointer to a new tie
- */
-void ParamFunction::addTie(std::unique_ptr<ParameterTie> tie) {
-  size_t iPar = tie->getIndex();
-  bool found = false;
-  for (auto &m_tie : m_ties) {
-    if (m_tie->getIndex() == iPar) {
-      found = true;
-      m_tie = std::move(tie);
-      break;
-    }
-  }
-  if (!found) {
-    m_ties.push_back(std::move(tie));
-  }
-}
-
-/**
- * Apply the ties.
- */
-void ParamFunction::applyTies() {
-  for (auto &m_tie : m_ties) {
-    m_tie->eval();
-  }
-}
-
-/**
- * Used to find ParameterTie for a parameter i
- */
-class ReferenceEqual {
-  /// index to find
-  const size_t m_i;
-
-public:
-  /// Constructor
-  explicit ReferenceEqual(size_t i) : m_i(i) {}
-  /// Bracket operator
-  /// @param p :: the element you are looking for
-  /// @return True if found
-  template <class T> bool operator()(const std::unique_ptr<T> &p) {
-    return p->getIndex() == m_i;
-  }
-};
-
-/** Removes i-th parameter's tie if it is tied or does nothing.
- * @param i :: The index of the tied parameter.
- * @return True if successfull
- */
-bool ParamFunction::removeTie(size_t i) {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
-  auto it = std::find_if(m_ties.begin(), m_ties.end(), ReferenceEqual(i));
-  if (it != m_ties.end()) {
-    m_ties.erase(it);
-    unfix(i);
-    return true;
-  }
-  unfix(i);
-  return false;
-}
-
-/** Get tie of parameter number i
- * @param i :: The index of a declared parameter.
- * @return A pointer to the tie
- */
-ParameterTie *ParamFunction::getTie(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
-  auto it = std::find_if(m_ties.cbegin(), m_ties.cend(), ReferenceEqual(i));
-  if (it != m_ties.cend()) {
-    return it->get();
-  }
-  return nullptr;
-}
-
-/** Remove all ties
- */
-void ParamFunction::clearTies() {
-  for (size_t i = 0; i < nParams(); ++i) {
-    unfix(i);
-  }
-  m_ties.clear();
-}
-
-/** Add a constraint
- *  @param ic :: Pointer to a constraint.
- */
-void ParamFunction::addConstraint(std::unique_ptr<IConstraint> ic) {
-  size_t iPar = ic->getIndex();
-  bool found = false;
-  for (auto &constraint : m_constraints) {
-    if (constraint->getIndex() == iPar) {
-      found = true;
-      constraint = std::move(ic);
-      break;
-    }
-  }
-  if (!found) {
-    m_constraints.push_back(std::move(ic));
-  }
-}
-
-/** Get constraint of parameter number i
- * @param i :: The index of a declared parameter.
- * @return A pointer to the constraint or NULL
- */
-IConstraint *ParamFunction::getConstraint(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
-  auto it = std::find_if(m_constraints.cbegin(), m_constraints.cend(),
-                         ReferenceEqual(i));
-  if (it != m_constraints.cend()) {
-    return it->get();
-  }
-  return nullptr;
-}
-
-/** Remove a constraint
- * @param parName :: The name of a parameter which constarint to remove.
- */
-void ParamFunction::removeConstraint(const std::string &parName) {
-  size_t iPar = parameterIndex(parName);
-  for (auto it = m_constraints.begin(); it != m_constraints.end(); ++it) {
-    if (iPar == (**it).getIndex()) {
-      m_constraints.erase(it);
-      break;
-    }
-  }
-}
-
-void ParamFunction::setUpForFit() {
-  for (auto &constraint : m_constraints) {
-    constraint->setParamToSatisfyConstraint();
-  }
-}
-
 /// Nonvirtual member which removes all declared parameters
 void ParamFunction::clearAllParameters() {
-  m_ties.clear();
-  m_constraints.clear();
+  clearTies();
+  clearConstraints();
   m_parameters.clear();
   m_parameterNames.clear();
   m_parameterDescriptions.clear();
-  m_isFixed.clear();
+  m_parameterStatus.clear();
+}
+
+/// Change status of parameter
+/// @param i :: Index of a parameter.
+/// @param status :: New parameter status.
+void ParamFunction::setParameterStatus(size_t i, ParameterStatus status) {
+  checkParameterIndex(i);
+  m_parameterStatus[i] = status;
+}
+
+/// Get status of parameter
+/// @param i :: Index of a parameter.
+/// @return Parameter status.
+IFunction::ParameterStatus ParamFunction::getParameterStatus(size_t i) const {
+  checkParameterIndex(i);
+  return m_parameterStatus[i];
 }
 
 /// Get the address of the parameter
 /// @param i :: the index of the parameter required
 /// @returns the address of the parameter
 double *ParamFunction::getParameterAddress(size_t i) {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return &m_parameters[i];
 }
 
 /// Checks if a parameter has been set explicitly
 bool ParamFunction::isExplicitlySet(size_t i) const {
-  if (i >= nParams()) {
-    throw std::out_of_range("ParamFunction parameter index out of range.");
-  }
+  checkParameterIndex(i);
   return m_explicitlySet[i];
 }
 
@@ -439,8 +260,8 @@ bool ParamFunction::isExplicitlySet(size_t i) const {
  * @return Parameter index or number of nParams() if parameter not found
  */
 size_t ParamFunction::getParameterIndex(const ParameterReference &ref) const {
-  if (ref.getFunction() == this && ref.getIndex() < nParams()) {
-    return ref.getIndex();
+  if (ref.getLocalFunction() == this && ref.getLocalIndex() < nParams()) {
+    return ref.getLocalIndex();
   }
   return nParams();
 }
diff --git a/Framework/API/src/ParameterReference.cpp b/Framework/API/src/ParameterReference.cpp
index d0b8cc52d5c45c3e387235ffba83cd936c9a9755..979289f8a31dba2bd663761d71f987bba9b53fd5 100644
--- a/Framework/API/src/ParameterReference.cpp
+++ b/Framework/API/src/ParameterReference.cpp
@@ -6,7 +6,7 @@ namespace API {
 
 /// Default constructor
 ParameterReference::ParameterReference()
-    : m_function(), m_index(0), m_isDefault(false) {}
+    : m_owner(), m_function(), m_index(0), m_isDefault(false) {}
 
 /**
  * Constructor.
@@ -17,15 +17,26 @@ ParameterReference::ParameterReference()
  *  a tie or a constraint.
  */
 ParameterReference::ParameterReference(IFunction *fun, std::size_t index,
-                                       bool isDefault) {
+                                       bool isDefault)
+    : m_owner(fun), m_function(fun), m_index(index), m_isDefault(isDefault) {
   reset(fun, index, isDefault);
 }
 
-/// Return pointer to the function
-IFunction *ParameterReference::getFunction() const { return m_function; }
+/// Return pointer to the local function
+IFunction *ParameterReference::getLocalFunction() const { return m_function; }
 
-/// Return parameter index in that function
-std::size_t ParameterReference::getIndex() const { return m_index; }
+/// Return parameter index in the local function
+std::size_t ParameterReference::getLocalIndex() const { return m_index; }
+
+/// Return parameter index in the owning function
+std::size_t ParameterReference::parameterIndex() const {
+  return m_owner->getParameterIndex(*this);
+}
+
+/// Return parameter name in the owning function
+std::string ParameterReference::parameterName() const {
+  return m_owner->parameterName(parameterIndex());
+}
 
 /**
  * Reset the reference
@@ -37,6 +48,7 @@ std::size_t ParameterReference::getIndex() const { return m_index; }
  */
 void ParameterReference::reset(IFunction *fun, std::size_t index,
                                bool isDefault) {
+  m_owner = fun;
   IFunction *fLocal = fun;
   size_t iLocal = index;
   CompositeFunction *cf = dynamic_cast<CompositeFunction *>(fun);
@@ -56,9 +68,12 @@ void ParameterReference::reset(IFunction *fun, std::size_t index,
 /**
  * Set the parameter
  * @param value :: A value to set.
+ * @param isExplicitlySet :: Flag that user explicitly set this
+ * parameter.
  */
-void ParameterReference::setParameter(const double &value) {
-  m_function->setParameter(m_index, value);
+void ParameterReference::setParameter(const double &value,
+                                      bool isExplicitlySet) {
+  m_function->setParameter(m_index, value, isExplicitlySet);
 }
 
 /// Get the value of the parameter
@@ -69,5 +84,27 @@ double ParameterReference::getParameter() const {
 /// Returns the default value flag
 bool ParameterReference::isDefault() const { return m_isDefault; }
 
+/// Find out if this refers to a parameter of a function: direct
+/// or via composite function member.
+/// @param fun :: A function to check.
+bool ParameterReference::isParameterOf(const IFunction *fun) const {
+  if (fun == m_function) {
+    return true;
+  }
+  auto fLocal = m_function;
+  size_t iLocal = m_index;
+  auto cf = dynamic_cast<const CompositeFunction *>(m_function);
+  while (cf) {
+    size_t iFun = cf->functionIndex(iLocal);
+    fLocal = cf->getFunction(iFun).get();
+    if (fLocal == fun) {
+      return true;
+    }
+    iLocal = fLocal->parameterIndex(cf->parameterLocalName(iLocal));
+    cf = dynamic_cast<CompositeFunction *>(fLocal);
+  }
+  return false;
+}
+
 } // namespace API
 } // namespace Mantid
diff --git a/Framework/API/src/ParameterTie.cpp b/Framework/API/src/ParameterTie.cpp
index bbe6b9e8dcc8c0aa8eac601325644cc7b52f4d8f..9717ee767b2cc021c699b1458f7d77f45da076a8 100644
--- a/Framework/API/src/ParameterTie.cpp
+++ b/Framework/API/src/ParameterTie.cpp
@@ -115,8 +115,8 @@ double ParameterTie::eval() {
       *(it->first) = it->second.getParameter();
     }
     res = m_parser->Eval();
-  } catch (...) {
-    throw std::runtime_error("Error in expresseion");
+  } catch (mu::ParserError &e) {
+    throw std::runtime_error("Error in expression: " + e.GetMsg());
   }
 
   setParameter(res);
@@ -177,11 +177,10 @@ std::string ParameterTie::asString(const IFunction *fun) const {
  */
 bool ParameterTie::findParametersOf(const IFunction *fun) const {
   for (const auto &varPair : m_varMap) {
-    if (varPair.second.getFunction() == fun) {
+    if (varPair.second.isParameterOf(fun)) {
       return true;
     }
   }
-
   return false;
 }
 
diff --git a/Framework/API/src/NearestNeighbourInfo.cpp b/Framework/API/src/WorkspaceNearestNeighbourInfo.cpp
similarity index 66%
rename from Framework/API/src/NearestNeighbourInfo.cpp
rename to Framework/API/src/WorkspaceNearestNeighbourInfo.cpp
index 7612bddd1e6a909f9af5e9dfe101c98d240b3f6a..2dca965546adb83e31c535b7ac7f189dfef1fd85 100644
--- a/Framework/API/src/NearestNeighbourInfo.cpp
+++ b/Framework/API/src/WorkspaceNearestNeighbourInfo.cpp
@@ -1,35 +1,35 @@
-#include "MantidAPI/NearestNeighbourInfo.h"
-#include "MantidAPI/NearestNeighbours.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbours.h"
 #include "MantidAPI/MatrixWorkspace.h"
 #include "MantidKernel/make_unique.h"
 
 namespace Mantid {
 namespace API {
 
-/** Creates NearestNeighbourInfo.
+/** Creates WorkspaceNearestNeighbourInfo.
 *
 * @param workspace :: Reference to workspace providing instrument and
 * spectrum-detector mapping
 * @param ignoreMaskedDetectors :: if true, masked detectors are ignored
 * @param nNeighbours :: number of neighbours to include
 */
-NearestNeighbourInfo::NearestNeighbourInfo(const MatrixWorkspace &workspace,
-                                           const bool ignoreMaskedDetectors,
-                                           const int nNeighbours)
+WorkspaceNearestNeighbourInfo::WorkspaceNearestNeighbourInfo(
+    const MatrixWorkspace &workspace, const bool ignoreMaskedDetectors,
+    const int nNeighbours)
     : m_workspace(workspace) {
   std::vector<specnum_t> spectrumNumbers;
   for (size_t i = 0; i < m_workspace.getNumberHistograms(); ++i)
     spectrumNumbers.push_back(m_workspace.getSpectrum(i).getSpectrumNo());
 
-  m_nearestNeighbours = Kernel::make_unique<NearestNeighbours>(
+  m_nearestNeighbours = Kernel::make_unique<WorkspaceNearestNeighbours>(
       nNeighbours, workspace.spectrumInfo(), std::move(spectrumNumbers),
       ignoreMaskedDetectors);
 }
 
 // Defined as default in source for forward declaration with std::unique_ptr.
-NearestNeighbourInfo::~NearestNeighbourInfo() = default;
+WorkspaceNearestNeighbourInfo::~WorkspaceNearestNeighbourInfo() = default;
 
-/** Queries the NearestNeighbours object for the selected detector.
+/** Queries the WorkspaceNearestNeighbours object for the selected detector.
 * NOTE! getNeighbours(spectrumNumber, radius) is MUCH faster.
 *
 * @param comp :: pointer to the querying detector
@@ -37,8 +37,8 @@ NearestNeighbourInfo::~NearestNeighbourInfo() = default;
 * @return map of DetectorID to distance for the nearest neighbours
 */
 std::map<specnum_t, Kernel::V3D>
-NearestNeighbourInfo::getNeighbours(const Geometry::IDetector *comp,
-                                    const double radius) const {
+WorkspaceNearestNeighbourInfo::getNeighbours(const Geometry::IDetector *comp,
+                                             const double radius) const {
   // Find the spectrum number
   std::vector<specnum_t> spectra = m_workspace.getSpectraFromDetectorIDs(
       std::vector<detid_t>(1, comp->getID()));
@@ -51,24 +51,27 @@ NearestNeighbourInfo::getNeighbours(const Geometry::IDetector *comp,
   return m_nearestNeighbours->neighboursInRadius(spectra[0], radius);
 }
 
-/** Queries the NearestNeighbours object for the selected spectrum number.
+/** Queries the WorkspaceNearestNeighbours object for the selected spectrum
+* number.
 *
 * @param spec :: spectrum number of the detector you are looking at
 * @param radius :: distance from detector on which to filter results
 * @return map of DetectorID to distance for the nearest neighbours
 */
 std::map<specnum_t, Kernel::V3D>
-NearestNeighbourInfo::getNeighbours(specnum_t spec, const double radius) const {
+WorkspaceNearestNeighbourInfo::getNeighbours(specnum_t spec,
+                                             const double radius) const {
   return m_nearestNeighbours->neighboursInRadius(spec, radius);
 }
 
-/** Queries the NearestNeighbours object for the selected spectrum number.
+/** Queries the WorkspaceNearestNeighbours object for the selected spectrum
+* number.
 *
 * @param spec :: spectrum number of the detector you are looking at
 * @return map of DetectorID to distance for the nearest neighbours
 */
 std::map<specnum_t, Kernel::V3D>
-NearestNeighbourInfo::getNeighboursExact(specnum_t spec) const {
+WorkspaceNearestNeighbourInfo::getNeighboursExact(specnum_t spec) const {
   return m_nearestNeighbours->neighbours(spec);
 }
 
diff --git a/Framework/API/src/NearestNeighbours.cpp b/Framework/API/src/WorkspaceNearestNeighbours.cpp
similarity index 90%
rename from Framework/API/src/NearestNeighbours.cpp
rename to Framework/API/src/WorkspaceNearestNeighbours.cpp
index 9f094269ade399b5944cac26ba3f19f0ada12663..de2b5ad4d0c0be602ffb268ce0a5c535826e188f 100644
--- a/Framework/API/src/NearestNeighbours.cpp
+++ b/Framework/API/src/WorkspaceNearestNeighbours.cpp
@@ -1,4 +1,4 @@
-#include "MantidAPI/NearestNeighbours.h"
+#include "MantidAPI/WorkspaceNearestNeighbours.h"
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidGeometry/Instrument.h"
 #include "MantidGeometry/Instrument/DetectorGroup.h"
@@ -24,10 +24,9 @@ using Kernel::V3D;
  * @param ignoreMaskedDetectors :: flag indicating that masked detectors should
  * be ignored.
  */
-NearestNeighbours::NearestNeighbours(int nNeighbours,
-                                     const SpectrumInfo &spectrumInfo,
-                                     std::vector<specnum_t> spectrumNumbers,
-                                     bool ignoreMaskedDetectors)
+WorkspaceNearestNeighbours::WorkspaceNearestNeighbours(
+    int nNeighbours, const SpectrumInfo &spectrumInfo,
+    std::vector<specnum_t> spectrumNumbers, bool ignoreMaskedDetectors)
     : m_spectrumInfo(spectrumInfo),
       m_spectrumNumbers(std::move(spectrumNumbers)),
       m_noNeighbours(nNeighbours), m_cutoff(-DBL_MAX), m_radius(0),
@@ -42,7 +41,7 @@ NearestNeighbours::NearestNeighbours(int nNeighbours,
  * @return map of Detector ID's to distance
  */
 std::map<specnum_t, V3D>
-NearestNeighbours::neighbours(const specnum_t spectrum) const {
+WorkspaceNearestNeighbours::neighbours(const specnum_t spectrum) const {
   return defaultNeighbours(spectrum);
 }
 
@@ -55,8 +54,8 @@ NearestNeighbours::neighbours(const specnum_t spectrum) const {
  * @throw NotFoundError if component is not recognised as a detector
  */
 std::map<specnum_t, V3D>
-NearestNeighbours::neighboursInRadius(const specnum_t spectrum,
-                                      const double radius) const {
+WorkspaceNearestNeighbours::neighboursInRadius(const specnum_t spectrum,
+                                               const double radius) const {
   // If the radius is stupid then don't let it continue as well be stuck forever
   if (radius < 0.0 || radius > 10.0) {
     throw std::invalid_argument(
@@ -71,7 +70,7 @@ NearestNeighbours::neighboursInRadius(const specnum_t spectrum,
       // moment mean that
       // it is necessary.
       // Cast is necessary as the user should see this as a const member
-      const_cast<NearestNeighbours *>(this)->build(eightNearest);
+      const_cast<WorkspaceNearestNeighbours *>(this)->build(eightNearest);
     }
     result = defaultNeighbours(spectrum);
   } else if (radius > m_cutoff && m_radius != radius) {
@@ -79,7 +78,7 @@ NearestNeighbours::neighboursInRadius(const specnum_t spectrum,
     int neighbours = m_noNeighbours + 1;
     while (true) {
       try {
-        const_cast<NearestNeighbours *>(this)->build(neighbours);
+        const_cast<WorkspaceNearestNeighbours *>(this)->build(neighbours);
       } catch (std::invalid_argument &) {
         break;
       }
@@ -109,7 +108,7 @@ NearestNeighbours::neighboursInRadius(const specnum_t spectrum,
  * @param noNeighbours :: The number of nearest neighbours to use to build
  * the graph
  */
-void NearestNeighbours::build(const int noNeighbours) {
+void WorkspaceNearestNeighbours::build(const int noNeighbours) {
   const auto indices = getSpectraDetectors();
   if (indices.empty()) {
     throw std::runtime_error(
@@ -201,7 +200,7 @@ void NearestNeighbours::build(const int noNeighbours) {
  * @throw NotFoundError if detector ID is not recognised
  */
 std::map<specnum_t, V3D>
-NearestNeighbours::defaultNeighbours(const specnum_t spectrum) const {
+WorkspaceNearestNeighbours::defaultNeighbours(const specnum_t spectrum) const {
   auto vertex = m_specToVertex.find(spectrum);
 
   if (vertex != m_specToVertex.end()) {
@@ -224,7 +223,7 @@ NearestNeighbours::defaultNeighbours(const specnum_t spectrum) const {
 }
 
 /// Returns the list of valid spectrum indices
-std::vector<size_t> NearestNeighbours::getSpectraDetectors() {
+std::vector<size_t> WorkspaceNearestNeighbours::getSpectraDetectors() {
   std::vector<size_t> indices;
   for (size_t i = 0; i < m_spectrumNumbers.size(); ++i) {
     // Always ignore monitors and ignore masked detectors if requested.
diff --git a/Framework/API/test/CompositeFunctionTest.h b/Framework/API/test/CompositeFunctionTest.h
index c4cf053d64db1298315186dba12c542302e6fd56..43824cf55ce45a806331233d936d0bce1bffe989 100644
--- a/Framework/API/test/CompositeFunctionTest.h
+++ b/Framework/API/test/CompositeFunctionTest.h
@@ -603,18 +603,18 @@ public:
 
     TS_ASSERT_EQUALS(mfun->nParams(), 12);
 
-    TS_ASSERT_EQUALS(mfun->getParameter(0), 154);
-    TS_ASSERT_EQUALS(mfun->getParameter(1), 77);
-    TS_ASSERT_EQUALS(mfun->getParameter(2), 1.1);
-    TS_ASSERT_EQUALS(mfun->getParameter(3), 1.2);
-    TS_ASSERT_EQUALS(mfun->getParameter(4), 1.65);
-    TS_ASSERT_EQUALS(mfun->getParameter(5), 2.1);
-    TS_ASSERT_EQUALS(mfun->getParameter(6), 2.4 * 2.4);
-    TS_ASSERT_EQUALS(mfun->getParameter(7), sqrt(2.4));
-    TS_ASSERT_EQUALS(mfun->getParameter(8), 2.4);
-    TS_ASSERT_EQUALS(mfun->getParameter(9), 3.1);
-    TS_ASSERT_EQUALS(mfun->getParameter(10), 79.1);
-    TS_ASSERT_EQUALS(mfun->getParameter(11), 3.3);
+    TS_ASSERT_EQUALS(mfun->getParameter("f0.a"), 154);
+    TS_ASSERT_EQUALS(mfun->getParameter("f0.b"), 77);
+    TS_ASSERT_EQUALS(mfun->getParameter("f1.c"), 1.1);
+    TS_ASSERT_EQUALS(mfun->getParameter("f1.h"), 1.2);
+    TS_ASSERT_EQUALS(mfun->getParameter("f1.s"), 1.65);
+    TS_ASSERT_EQUALS(mfun->getParameter("f2.c0"), 2.1);
+    TS_ASSERT_EQUALS(mfun->getParameter("f2.c1"), 2.4 * 2.4);
+    TS_ASSERT_EQUALS(mfun->getParameter("f2.c2"), sqrt(2.4));
+    TS_ASSERT_EQUALS(mfun->getParameter("f2.c3"), 2.4);
+    TS_ASSERT_EQUALS(mfun->getParameter("f3.c"), 3.1);
+    TS_ASSERT_EQUALS(mfun->getParameter("f3.h"), 79.1);
+    TS_ASSERT_EQUALS(mfun->getParameter("f3.s"), 3.3);
 
     delete mfun;
   }
@@ -1073,11 +1073,11 @@ public:
 
     TS_ASSERT_EQUALS(mfun->nParams(), 5);
 
-    TS_ASSERT(!mfun->isFixed(0));
-    TS_ASSERT(mfun->isFixed(1));
-    TS_ASSERT(!mfun->isFixed(2));
-    TS_ASSERT(mfun->isFixed(3));
-    TS_ASSERT(mfun->isFixed(4));
+    TS_ASSERT(mfun->isActive(0));  // f0.a
+    TS_ASSERT(!mfun->isActive(1)); // f0.b
+    TS_ASSERT(mfun->isActive(2));  // f1.c
+    TS_ASSERT(!mfun->isActive(3)); // f1.h
+    TS_ASSERT(mfun->isFixed(4));   // f1.s
 
     mfun->applyTies();
 
@@ -1114,9 +1114,9 @@ public:
 
     TS_ASSERT_EQUALS(mfun->nParams(), 3);
 
-    TS_ASSERT(!mfun->isFixed(0));
-    TS_ASSERT(!mfun->isFixed(1));
-    TS_ASSERT(mfun->isFixed(2));
+    TS_ASSERT(mfun->isActive(0));
+    TS_ASSERT(mfun->isActive(1));
+    TS_ASSERT(!mfun->isActive(2));
 
     mfun->applyTies();
 
diff --git a/Framework/API/test/DetectorSearcherTest.h b/Framework/API/test/DetectorSearcherTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..aae46f50b23b19b171dbcb4d6ac8a374349bdfb3
--- /dev/null
+++ b/Framework/API/test/DetectorSearcherTest.h
@@ -0,0 +1,235 @@
+#ifndef MANTID_API_DETECTORSEARCHERTEST_H_
+#define MANTID_API_DETECTORSEARCHERTEST_H_
+
+#include "MantidAPI/DetectorSearcher.h"
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/ExperimentInfo.h"
+#include "MantidBeamline/DetectorInfo.h"
+#include "MantidTestHelpers/ComponentCreationHelper.h"
+#include "MantidKernel/V3D.h"
+
+#include <cmath>
+#include <cxxtest/TestSuite.h>
+
+using Mantid::Kernel::V3D;
+using namespace Mantid;
+using namespace Mantid::Geometry;
+using namespace Mantid::API;
+
+class DetectorSearcherTest : public CxxTest::TestSuite {
+public:
+  void test_init() {
+    auto inst1 = ComponentCreationHelper::createTestInstrumentCylindrical(
+        3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+    auto inst2 =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+
+    ExperimentInfo expInfo1;
+    expInfo1.setInstrument(inst1);
+    ExperimentInfo expInfo2;
+    expInfo2.setInstrument(inst2);
+
+    TS_ASSERT_THROWS_NOTHING(
+        DetectorSearcher searcher(inst1, expInfo1.detectorInfo()))
+    TS_ASSERT_THROWS_NOTHING(
+        DetectorSearcher searcher(inst2, expInfo2.detectorInfo()))
+  }
+
+  void test_search_cylindrical() {
+    auto inst = ComponentCreationHelper::createTestInstrumentCylindrical(
+        3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+
+    DetectorSearcher searcher(inst, expInfo.detectorInfo());
+    const auto checkResult = [&searcher](const V3D &q, size_t index) {
+      const auto result = searcher.findDetectorIndex(q);
+      TS_ASSERT(std::get<0>(result))
+      TS_ASSERT_EQUALS(std::get<1>(result), index)
+    };
+
+    checkResult(V3D(0.913156, 0.285361, 0.291059), 0);
+    checkResult(V3D(-6.09343e-17, 0.995133, 0.0985376), 1);
+    checkResult(V3D(-0.913156, 0.285361, 0.291059), 2);
+    checkResult(V3D(0.959758, -1.17536e-16, 0.280828), 3);
+
+    checkResult(V3D(-0.959758, -0, 0.280828), 5);
+    checkResult(V3D(0.913156, -0.285361, 0.291059), 6);
+    checkResult(V3D(-6.09343e-17, -0.995133, 0.0985376), 7);
+    checkResult(V3D(-0.913156, -0.285361, 0.291059), 8);
+    checkResult(V3D(0.942022, 0.294382, 0.161038), 9);
+    checkResult(V3D(-6.11563e-17, 0.998759, 0.0498137), 10);
+    checkResult(V3D(-0.942022, 0.294382, 0.161038), 11);
+    checkResult(V3D(0.988034, -1.20999e-16, 0.154233), 12);
+
+    checkResult(V3D(-0.988034, -0, 0.154233), 14);
+    checkResult(V3D(0.942022, -0.294382, 0.161038), 15);
+    checkResult(V3D(-6.11563e-17, -0.998759, 0.0498137), 16);
+    checkResult(V3D(-0.942022, -0.294382, 0.161038), 17);
+    checkResult(V3D(0.948717, 0.296474, 0.109725), 18);
+    checkResult(V3D(-6.11984e-17, 0.999446, 0.0332779), 19);
+    checkResult(V3D(-0.948717, 0.296474, 0.109725), 20);
+    checkResult(V3D(0.994483, -1.21789e-16, 0.104898), 21);
+
+    checkResult(V3D(-0.994483, -0, 0.104898), 23);
+    checkResult(V3D(0.948717, -0.296474, 0.109725), 24);
+    checkResult(V3D(-6.11984e-17, -0.999446, 0.0332779), 25);
+    checkResult(V3D(-0.948717, -0.296474, 0.109725), 26);
+  }
+
+  void test_invalid_rectangular() {
+    auto inst =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+    const auto resultNull = searcher.findDetectorIndex(V3D(0, 0, 0));
+    TS_ASSERT(!std::get<0>(resultNull))
+
+    const auto resultNaN = searcher.findDetectorIndex(V3D(NAN, NAN, NAN));
+    TS_ASSERT(!std::get<0>(resultNaN))
+  }
+
+  void test_invalid_cylindrical() {
+    auto inst = ComponentCreationHelper::createTestInstrumentCylindrical(
+        3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+    const auto resultNull = searcher.findDetectorIndex(V3D(0, 0, 0));
+    TS_ASSERT(!std::get<0>(resultNull))
+
+    const auto resultNaN = searcher.findDetectorIndex(V3D(NAN, NAN, NAN));
+    TS_ASSERT(!std::get<0>(resultNaN))
+  }
+
+  void test_search_rectangular() {
+    auto inst =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+    const auto checkResult = [&searcher](V3D q, size_t index) {
+      const auto result = searcher.findDetectorIndex(q);
+      TS_ASSERT(std::get<0>(result))
+      TS_ASSERT_EQUALS(std::get<1>(result), index)
+    };
+
+    for (size_t pointNo = 0; pointNo < info.size(); ++pointNo) {
+      const auto &det = info.detector(pointNo);
+      const auto q = convertDetectorPositionToQ(det);
+      checkResult(q, pointNo);
+    }
+  }
+
+  V3D convertDetectorPositionToQ(const IDetector &det) {
+    const auto tt1 = det.getTwoTheta(V3D(0, 0, 0), V3D(0, 0, 1)); // two theta
+    const auto ph1 = det.getPhi();                                // phi
+    auto E1 =
+        V3D(-std::sin(tt1) * std::cos(ph1), -std::sin(tt1) * std::sin(ph1),
+            1. - std::cos(tt1));  // end of trajectory
+    return E1 * (1. / E1.norm()); // normalize
+  }
+};
+
+class DetectorSearcherTestPerformance : public CxxTest::TestSuite {
+public:
+  void test_rectangular() {
+    auto inst =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+
+    std::vector<double> xDirections(100);
+    std::vector<double> yDirections(100);
+    std::vector<double> zDirections(50);
+
+    // create x values of the range -1 to 1
+    int index = 0;
+    double startValue = -1;
+    std::generate(
+        xDirections.begin(), xDirections.end(),
+        [&index, &startValue]() { return startValue + index++ * 0.1; });
+
+    // create z values of the range 0.1 to 1
+    // ignore negative z values as these are not physical!
+    index = 0;
+    startValue = 0.1;
+    std::generate(
+        zDirections.begin(), zDirections.end(),
+        [&index, &startValue]() { return startValue + index++ * 0.1; });
+
+    yDirections = xDirections;
+
+    size_t hitCount = 0;
+    for (auto &x : xDirections) {
+      for (auto &y : yDirections) {
+        for (auto &z : zDirections) {
+          const auto result = searcher.findDetectorIndex(V3D(x, y, z));
+          if (std::get<0>(result))
+            ++hitCount;
+        }
+      }
+    }
+
+    TS_ASSERT_EQUALS(hitCount, 246)
+  }
+
+  void test_cylindrical() {
+    auto inst = ComponentCreationHelper::createTestInstrumentCylindrical(
+        3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+
+    ExperimentInfo expInfo;
+    expInfo.setInstrument(inst);
+    const auto &info = expInfo.detectorInfo();
+
+    DetectorSearcher searcher(inst, info);
+
+    std::vector<double> xDirections(50);
+    std::vector<double> yDirections(50);
+    std::vector<double> zDirections(50);
+
+    // create x values of the range -1 to 1
+    int index = 0;
+    double startValue = -1;
+    std::generate(
+        xDirections.begin(), xDirections.end(),
+        [&index, &startValue]() { return startValue + index++ * 0.1; });
+
+    // create z values of the range 0.1 to 1
+    // ignore negative z values as these are not physical!
+    index = 0;
+    startValue = 0.1;
+    std::generate(
+        zDirections.begin(), zDirections.end(),
+        [&index, &startValue]() { return startValue + index++ * 0.1; });
+
+    yDirections = xDirections;
+
+    size_t hitCount = 0;
+    for (auto &x : xDirections) {
+      for (auto &y : yDirections) {
+        for (auto &z : zDirections) {
+          const auto result = searcher.findDetectorIndex(V3D(x, y, z));
+          if (std::get<0>(result))
+            ++hitCount;
+        }
+      }
+    }
+
+    TS_ASSERT_EQUALS(hitCount, 16235)
+  }
+};
+
+#endif
diff --git a/Framework/API/test/FunctionTest.h b/Framework/API/test/FunctionTest.h
index 58c89921caf70680c5b55cda7df1e1cbd0d6a195..e37da29efceb22330079dc8592e81c002811217d 100644
--- a/Framework/API/test/FunctionTest.h
+++ b/Framework/API/test/FunctionTest.h
@@ -269,7 +269,8 @@ public:
     TS_ASSERT(!f.isFixed(0));
     TS_ASSERT(f.isFixed(1));
     TS_ASSERT(!f.isFixed(2));
-    TS_ASSERT(f.isFixed(3));
+    TS_ASSERT(!f.isFixed(3));
+    TS_ASSERT(!f.isActive(3));
 
     TS_ASSERT(f.isActive(0));
     TS_ASSERT(!f.isActive(1));
@@ -334,9 +335,12 @@ public:
     TS_ASSERT_EQUALS(f.getParameter("c3"), 3.3);
 
     TS_ASSERT(!f.isFixed(0));
-    TS_ASSERT(f.isFixed(1));
+    TS_ASSERT(!f.isFixed(1));
+    TS_ASSERT(!f.isActive(1));
     TS_ASSERT(!f.isFixed(2));
+    TS_ASSERT(f.isActive(2));
     TS_ASSERT(!f.isFixed(3));
+    TS_ASSERT(f.isActive(3));
 
     TS_ASSERT(!f.getTie(0));
     TS_ASSERT(f.getTie(1) && !f.getTie(1)->isDefault());
diff --git a/Framework/API/test/ImmutableCompositeFunctionTest.h b/Framework/API/test/ImmutableCompositeFunctionTest.h
index e15872611464696244eb74df62828001e803c68d..d97de5ba69c74d0cbe143b212995aa5181ec9637 100644
--- a/Framework/API/test/ImmutableCompositeFunctionTest.h
+++ b/Framework/API/test/ImmutableCompositeFunctionTest.h
@@ -279,9 +279,13 @@ public:
     icf.addTies("b2=b1,a2=a1/5");
     icf.applyTies();
 
-    TS_ASSERT_EQUALS(icf.asString(), "name=ImmutableCompositeFunctionTest_"
-                                     "Function,NumDeriv=false,a1=11,b1=12,a2=2."
-                                     "2,b2=12,ties=(a2=a1/5,b2=b1)");
+    auto icfString = icf.asString();
+    TS_ASSERT_EQUALS(icfString.substr(0, 91),
+                     "name=ImmutableCompositeFunctionTest_"
+                     "Function,NumDeriv=false,a1=11,b1=12,a2=2.2,b2=12,ties=(");
+    auto icfTies = icfString.substr(91);
+    TS_ASSERT(icfTies.find("a2=a1/5") != std::string::npos)
+    TS_ASSERT(icfTies.find("b2=b1") != std::string::npos)
 
     auto fun = FunctionFactory::Instance().createInitialized(icf.asString());
     TS_ASSERT(fun);
diff --git a/Framework/API/test/ParameterReferenceTest.h b/Framework/API/test/ParameterReferenceTest.h
index c7574f73382d077f72434582b4f891cbf3c7cb73..188dfefd7ba04759a6d8d23b76371389016b575d 100644
--- a/Framework/API/test/ParameterReferenceTest.h
+++ b/Framework/API/test/ParameterReferenceTest.h
@@ -86,37 +86,37 @@ public:
     TS_ASSERT_EQUALS(f1->getContainingFunction(r12), f1_2);
     TS_ASSERT_EQUALS(f1_2->getContainingFunction(r12), f1_2_1);
 
-    TS_ASSERT_EQUALS(r0.getFunction(), f0.get());
-    TS_ASSERT_EQUALS(r1.getFunction(), f0.get());
-    TS_ASSERT_EQUALS(r2.getFunction(), f0.get());
+    TS_ASSERT_EQUALS(r0.getLocalFunction(), f0.get());
+    TS_ASSERT_EQUALS(r1.getLocalFunction(), f0.get());
+    TS_ASSERT_EQUALS(r2.getLocalFunction(), f0.get());
 
-    TS_ASSERT_EQUALS(r0.getIndex(), 0);
-    TS_ASSERT_EQUALS(r1.getIndex(), 1);
-    TS_ASSERT_EQUALS(r2.getIndex(), 2);
+    TS_ASSERT_EQUALS(r0.getLocalIndex(), 0);
+    TS_ASSERT_EQUALS(r1.getLocalIndex(), 1);
+    TS_ASSERT_EQUALS(r2.getLocalIndex(), 2);
 
-    TS_ASSERT_EQUALS(r3.getFunction(), f1_0.get());
-    TS_ASSERT_EQUALS(r4.getFunction(), f1_0.get());
-    TS_ASSERT_EQUALS(r5.getFunction(), f1_0.get());
+    TS_ASSERT_EQUALS(r3.getLocalFunction(), f1_0.get());
+    TS_ASSERT_EQUALS(r4.getLocalFunction(), f1_0.get());
+    TS_ASSERT_EQUALS(r5.getLocalFunction(), f1_0.get());
 
-    TS_ASSERT_EQUALS(r3.getIndex(), 0);
-    TS_ASSERT_EQUALS(r4.getIndex(), 1);
-    TS_ASSERT_EQUALS(r5.getIndex(), 2);
+    TS_ASSERT_EQUALS(r3.getLocalIndex(), 0);
+    TS_ASSERT_EQUALS(r4.getLocalIndex(), 1);
+    TS_ASSERT_EQUALS(r5.getLocalIndex(), 2);
 
-    TS_ASSERT_EQUALS(r6.getFunction(), f1_1.get());
-    TS_ASSERT_EQUALS(r7.getFunction(), f1_1.get());
-    TS_ASSERT_EQUALS(r8.getFunction(), f1_1.get());
+    TS_ASSERT_EQUALS(r6.getLocalFunction(), f1_1.get());
+    TS_ASSERT_EQUALS(r7.getLocalFunction(), f1_1.get());
+    TS_ASSERT_EQUALS(r8.getLocalFunction(), f1_1.get());
 
-    TS_ASSERT_EQUALS(r6.getIndex(), 0);
-    TS_ASSERT_EQUALS(r7.getIndex(), 1);
-    TS_ASSERT_EQUALS(r8.getIndex(), 2);
+    TS_ASSERT_EQUALS(r6.getLocalIndex(), 0);
+    TS_ASSERT_EQUALS(r7.getLocalIndex(), 1);
+    TS_ASSERT_EQUALS(r8.getLocalIndex(), 2);
 
-    TS_ASSERT_EQUALS(r9.getFunction(), f1_2_0.get());
-    TS_ASSERT_EQUALS(r10.getFunction(), f1_2_0.get());
-    TS_ASSERT_EQUALS(r11.getFunction(), f1_2_0.get());
+    TS_ASSERT_EQUALS(r9.getLocalFunction(), f1_2_0.get());
+    TS_ASSERT_EQUALS(r10.getLocalFunction(), f1_2_0.get());
+    TS_ASSERT_EQUALS(r11.getLocalFunction(), f1_2_0.get());
 
-    TS_ASSERT_EQUALS(r9.getIndex(), 0);
-    TS_ASSERT_EQUALS(r10.getIndex(), 1);
-    TS_ASSERT_EQUALS(r11.getIndex(), 2);
+    TS_ASSERT_EQUALS(r9.getLocalIndex(), 0);
+    TS_ASSERT_EQUALS(r10.getLocalIndex(), 1);
+    TS_ASSERT_EQUALS(r11.getLocalIndex(), 2);
 
     delete cf;
   }
diff --git a/Framework/API/test/ParameterTieTest.h b/Framework/API/test/ParameterTieTest.h
index 15544970263f2b58c982d2a215c928c6f4434a17..1cb68a7645206f0dc5b8dbb53cd7dd401931380d 100644
--- a/Framework/API/test/ParameterTieTest.h
+++ b/Framework/API/test/ParameterTieTest.h
@@ -118,8 +118,8 @@ public:
     TS_ASSERT_EQUALS(tie.asString(&mfun), "f1.sig=f2.sig^2+f0.a+1");
 
     TS_ASSERT_DELTA(tie.eval(), 5.8, 0.00001);
-    TS_ASSERT_EQUALS(tie.getFunction(), g1.get());
-    TS_ASSERT_EQUALS(tie.getIndex(), 2);
+    TS_ASSERT_EQUALS(tie.getLocalFunction(), g1.get());
+    TS_ASSERT_EQUALS(tie.getLocalIndex(), 2);
 
     TS_ASSERT_THROWS(mustThrow1(&mfun), std::invalid_argument);
     TS_ASSERT_THROWS(mustThrow2(&mfun), std::invalid_argument);
@@ -144,8 +144,8 @@ public:
     TS_ASSERT_EQUALS(tie.asString(&mfun), "f0.b=f3.sig^2+f1.a+1");
 
     TS_ASSERT_DELTA(tie.eval(), 2, 0.00001);
-    TS_ASSERT_EQUALS(tie.getFunction(), bk1.get());
-    TS_ASSERT_EQUALS(tie.getIndex(), 1);
+    TS_ASSERT_EQUALS(tie.getLocalFunction(), bk1.get());
+    TS_ASSERT_EQUALS(tie.getLocalIndex(), 1);
 
     mfun.removeFunction(2);
     TS_ASSERT_EQUALS(tie.asString(&mfun), "f0.b=f2.sig^2+f1.a+1");
@@ -213,7 +213,7 @@ public:
 
     ParameterTie tie(&bk, "b", "2*a-1");
 
-    TS_ASSERT_EQUALS(tie.getIndex(), 1);
+    TS_ASSERT_EQUALS(tie.getLocalIndex(), 1);
     TS_ASSERT_DELTA(tie.eval(), 0.6, 0.00001);
     TS_ASSERT_THROWS(mustThrow4(&bk), std::invalid_argument);
     TS_ASSERT_THROWS(mustThrow5(&bk), std::invalid_argument);
diff --git a/Framework/API/test/NearestNeighbourInfoTest.h b/Framework/API/test/WorkspaceNearestNeighbourInfoTest.h
similarity index 68%
rename from Framework/API/test/NearestNeighbourInfoTest.h
rename to Framework/API/test/WorkspaceNearestNeighbourInfoTest.h
index f8fa58c3e4d72ae8275216a845528f2a378fe4d9..f74e380af23ee0955910c731cf2d5bc03f315562 100644
--- a/Framework/API/test/NearestNeighbourInfoTest.h
+++ b/Framework/API/test/WorkspaceNearestNeighbourInfoTest.h
@@ -5,21 +5,23 @@
 
 #include "MantidTestHelpers/FakeObjects.h"
 #include "MantidTestHelpers/InstrumentCreationHelper.h"
-#include "MantidAPI/NearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
 #include "MantidAPI/SpectrumInfo.h"
 
-using Mantid::API::NearestNeighbourInfo;
+using Mantid::API::WorkspaceNearestNeighbourInfo;
 
-class NearestNeighbourInfoTest : public CxxTest::TestSuite {
+class WorkspaceNearestNeighbourInfoTest : public CxxTest::TestSuite {
 public:
   // This pair of boilerplate methods prevent the suite being created statically
   // This means the constructor isn't called when running other tests
-  static NearestNeighbourInfoTest *createSuite() {
-    return new NearestNeighbourInfoTest();
+  static WorkspaceNearestNeighbourInfoTest *createSuite() {
+    return new WorkspaceNearestNeighbourInfoTest();
+  }
+  static void destroySuite(WorkspaceNearestNeighbourInfoTest *suite) {
+    delete suite;
   }
-  static void destroySuite(NearestNeighbourInfoTest *suite) { delete suite; }
 
-  NearestNeighbourInfoTest() {
+  WorkspaceNearestNeighbourInfoTest() {
     workspace.initialize(100, 1, 1);
     InstrumentCreationHelper::addFullInstrumentToWorkspace(workspace, false,
                                                            false, "");
@@ -29,15 +31,15 @@ public:
   }
 
   void test_construct() {
-    TS_ASSERT_THROWS_NOTHING(NearestNeighbourInfo(workspace, false));
+    TS_ASSERT_THROWS_NOTHING(WorkspaceNearestNeighbourInfo(workspace, false));
   }
 
   void test_neighbourCount() {
     // No detailed test, just checking if parameters are passed on to
     // NearestNeighbours correctly.
-    NearestNeighbourInfo nn2(workspace, false, 2);
+    WorkspaceNearestNeighbourInfo nn2(workspace, false, 2);
     TS_ASSERT_EQUALS(nn2.getNeighboursExact(3).size(), 2);
-    NearestNeighbourInfo nn4(workspace, false, 4);
+    WorkspaceNearestNeighbourInfo nn4(workspace, false, 4);
     const auto neighbours = nn4.getNeighboursExact(3);
     TS_ASSERT_EQUALS(neighbours.size(), 4);
     TS_ASSERT_EQUALS(neighbours.count(1), 1);
@@ -46,9 +48,9 @@ public:
   void test_neighbourCount_ignoreMasked() {
     // No detailed test, just checking if parameters are passed on to
     // NearestNeighbours correctly.
-    NearestNeighbourInfo nn2(workspace, true, 2);
+    WorkspaceNearestNeighbourInfo nn2(workspace, true, 2);
     TS_ASSERT_EQUALS(nn2.getNeighboursExact(3).size(), 2);
-    NearestNeighbourInfo nn4(workspace, true, 4);
+    WorkspaceNearestNeighbourInfo nn4(workspace, true, 4);
     const auto neighbours = nn4.getNeighboursExact(3);
     TS_ASSERT_EQUALS(neighbours.size(), 4);
     TS_ASSERT_EQUALS(neighbours.count(1), 0);
diff --git a/Framework/API/test/NearestNeighboursTest.h b/Framework/API/test/WorkspaceNearestNeighboursTest.h
similarity index 89%
rename from Framework/API/test/NearestNeighboursTest.h
rename to Framework/API/test/WorkspaceNearestNeighboursTest.h
index 0369aaacabbf069c5fff98582b8e0df241163e7e..22320d7276a1bb874c3d0c06da4bae845f63bd0d 100644
--- a/Framework/API/test/NearestNeighboursTest.h
+++ b/Framework/API/test/WorkspaceNearestNeighboursTest.h
@@ -1,7 +1,7 @@
 #ifndef MANTID_TEST_GEOMETRY_NEARESTNEIGHBOURS
 #define MANTID_TEST_GEOMETRY_NEARESTNEIGHBOURS
 
-#include "MantidAPI/NearestNeighbours.h"
+#include "MantidAPI/WorkspaceNearestNeighbours.h"
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidGeometry/IDetector.h"
 #include "MantidGeometry/Instrument/Detector.h"
@@ -46,20 +46,22 @@ std::vector<specnum_t> getSpectrumNumbers(const MatrixWorkspace &workspace) {
 //=====================================================================================
 // Functional tests
 //=====================================================================================
-class NearestNeighboursTest : public CxxTest::TestSuite {
+class WorkspaceNearestNeighboursTest : public CxxTest::TestSuite {
 private:
   /// Helper type giving access to protected methods. Makes testing of NN
   /// internals possible.
-  class ExposedNearestNeighbours : public Mantid::API::NearestNeighbours {
+  class ExposedNearestNeighbours
+      : public Mantid::API::WorkspaceNearestNeighbours {
   public:
     ExposedNearestNeighbours(const SpectrumInfo &spectrumInfo,
                              const std::vector<specnum_t> spectrumNumbers,
                              bool ignoreMasked = false)
-        : NearestNeighbours(8, spectrumInfo, spectrumNumbers, ignoreMasked) {}
+        : WorkspaceNearestNeighbours(8, spectrumInfo, spectrumNumbers,
+                                     ignoreMasked) {}
 
     // Direct access to intermdiate spectra detectors
     std::vector<size_t> getSpectraDetectors() {
-      return NearestNeighbours::getSpectraDetectors();
+      return WorkspaceNearestNeighbours::getSpectraDetectors();
     }
   };
 
@@ -71,8 +73,8 @@ public:
         ComponentCreationHelper::createTestInstrumentCylindrical(2));
 
     // Create the NearestNeighbours object directly.
-    NearestNeighbours nn(actualNeighboursNumber, ws->spectrumInfo(),
-                         getSpectrumNumbers(*ws));
+    WorkspaceNearestNeighbours nn(actualNeighboursNumber, ws->spectrumInfo(),
+                                  getSpectrumNumbers(*ws));
 
     // Check distances calculated in NearestNeighbours compare with those using
     // getDistance on component
@@ -88,7 +90,8 @@ public:
         ComponentCreationHelper::createTestInstrumentCylindrical(2));
 
     // Create the NearestNeighbours object directly.
-    NearestNeighbours nn(8, ws->spectrumInfo(), getSpectrumNumbers(*ws));
+    WorkspaceNearestNeighbours nn(8, ws->spectrumInfo(),
+                                  getSpectrumNumbers(*ws));
 
     detid2det_map m_detectors;
     ws->getInstrument()->getDetectors(m_detectors);
@@ -144,7 +147,8 @@ public:
         ComponentCreationHelper::createTestInstrumentRectangular(2, 16));
 
     // Create the NearestNeighbours object directly.
-    NearestNeighbours nn(8, ws->spectrumInfo(), getSpectrumNumbers(*ws));
+    WorkspaceNearestNeighbours nn(8, ws->spectrumInfo(),
+                                  getSpectrumNumbers(*ws));
 
     const auto &m_instrument = ws->getInstrument();
     // Correct # of detectors
@@ -209,7 +213,8 @@ public:
         ComponentCreationHelper::createTestInstrumentCylindrical(2));
 
     // Create the NearestNeighbours object directly.
-    NearestNeighbours nn(8, ws->spectrumInfo(), getSpectrumNumbers(*ws));
+    WorkspaceNearestNeighbours nn(8, ws->spectrumInfo(),
+                                  getSpectrumNumbers(*ws));
     for (size_t i = 0; i < 2000; i++) {
       nn.neighboursInRadius(1, 5.0);
     }
@@ -224,7 +229,7 @@ public:
     const auto &spectrumInfo = ws->spectrumInfo();
     const auto spectrumNumbers = getSpectrumNumbers(*ws);
     for (size_t i = 0; i < 2000; i++) {
-      NearestNeighbours nn(8, spectrumInfo, spectrumNumbers);
+      WorkspaceNearestNeighbours nn(8, spectrumInfo, spectrumNumbers);
       nn.neighbours(1);
     }
   }
diff --git a/Framework/Algorithms/CMakeLists.txt b/Framework/Algorithms/CMakeLists.txt
index 21ad795708b779ff06db54f13a9a728c1dec5e9b..852f6deb51336c832cd89005ef76e2844e5dfe63 100644
--- a/Framework/Algorithms/CMakeLists.txt
+++ b/Framework/Algorithms/CMakeLists.txt
@@ -20,11 +20,11 @@ set ( SRC_FILES
 	src/BinaryOperation.cpp
 	src/CalMuonDeadTime.cpp
 	src/CalMuonDetectorPhases.cpp
-	src/CalculateMuonAsymmetry.cpp
 	src/CalculateCountRate.cpp
 	src/CalculateDIFC.cpp
 	src/CalculateEfficiency.cpp
 	src/CalculateFlatBackground.cpp
+	src/CalculateMuonAsymmetry.cpp
 	src/CalculateResolution.cpp
 	src/CalculateSlits.cpp
 	src/CalculateTransmission.cpp
@@ -131,6 +131,7 @@ set ( SRC_FILES
 	src/FindCenterOfMassPosition2.cpp
 	src/FindDeadDetectors.cpp
 	src/FindDetectorsOutsideLimits.cpp
+	src/FindEPP.cpp
 	src/FindPeakBackground.cpp
 	src/FindPeaks.cpp
 	src/FitPeak.cpp
@@ -160,6 +161,7 @@ set ( SRC_FILES
 	src/InterpolatingRebin.cpp
 	src/InterpolationOption.cpp
 	src/InvertMask.cpp
+	src/LineProfile.cpp
 	src/Logarithm.cpp
 	src/LorentzCorrection.cpp
 	src/MagFormFactorCorrection.cpp
@@ -316,7 +318,7 @@ set ( C_SRC_FILES
 	src/Tomography/tomopy/utils.c
 )
 
-set ( INC_FILES	
+set ( INC_FILES
 	inc/MantidAlgorithms/AbsorptionCorrection.h
 	inc/MantidAlgorithms/AddLogDerivative.h
 	inc/MantidAlgorithms/AddNote.h
@@ -339,11 +341,11 @@ set ( INC_FILES
 	inc/MantidAlgorithms/BoostOptionalToAlgorithmProperty.h
 	inc/MantidAlgorithms/CalMuonDeadTime.h
 	inc/MantidAlgorithms/CalMuonDetectorPhases.h
-	inc/MantidAlgorithms/CalculateMuonAsymmetry.h
 	inc/MantidAlgorithms/CalculateCountRate.h
 	inc/MantidAlgorithms/CalculateDIFC.h
 	inc/MantidAlgorithms/CalculateEfficiency.h
 	inc/MantidAlgorithms/CalculateFlatBackground.h
+	inc/MantidAlgorithms/CalculateMuonAsymmetry.h
 	inc/MantidAlgorithms/CalculateResolution.h
 	inc/MantidAlgorithms/CalculateSlits.h
 	inc/MantidAlgorithms/CalculateTransmission.h
@@ -450,6 +452,7 @@ set ( INC_FILES
 	inc/MantidAlgorithms/FindCenterOfMassPosition2.h
 	inc/MantidAlgorithms/FindDeadDetectors.h
 	inc/MantidAlgorithms/FindDetectorsOutsideLimits.h
+	inc/MantidAlgorithms/FindEPP.h
 	inc/MantidAlgorithms/FindPeakBackground.h
 	inc/MantidAlgorithms/FindPeaks.h
 	inc/MantidAlgorithms/FitPeak.h
@@ -480,6 +483,7 @@ set ( INC_FILES
 	inc/MantidAlgorithms/InterpolatingRebin.h
 	inc/MantidAlgorithms/InterpolationOption.h
 	inc/MantidAlgorithms/InvertMask.h
+	inc/MantidAlgorithms/LineProfile.h
 	inc/MantidAlgorithms/Logarithm.h
 	inc/MantidAlgorithms/LorentzCorrection.h
 	inc/MantidAlgorithms/MagFormFactorCorrection.h
@@ -670,11 +674,11 @@ set ( TEST_FILES
 	BinaryOperationTest.h
 	CalMuonDeadTimeTest.h
 	CalMuonDetectorPhasesTest.h
-	CalculateMuonAsymmetryTest.h
 	CalculateCountRateTest.h
 	CalculateDIFCTest.h
 	CalculateEfficiencyTest.h
 	CalculateFlatBackgroundTest.h
+	CalculateMuonAsymmetryTest.h
 	CalculateResolutionTest.h
 	CalculateSlitsTest.h
 	CalculateTransmissionBeamSpreaderTest.h
@@ -777,6 +781,7 @@ set ( TEST_FILES
 	FindCenterOfMassPositionTest.h
 	FindDeadDetectorsTest.h
 	FindDetectorsOutsideLimitsTest.h
+	FindEPPTest.h
 	FindPeakBackgroundTest.h
 	FindPeaksTest.h
 	FitPeakTest.h
@@ -805,6 +810,7 @@ set ( TEST_FILES
 	InterpolatingRebinTest.h
 	InterpolationOptionTest.h
 	InvertMaskTest.h
+	LineProfileTest.h
 	LogarithmTest.h
 	LorentzCorrectionTest.h
 	MCAbsorptionStrategyTest.h
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/CreateSampleWorkspace.h b/Framework/Algorithms/inc/MantidAlgorithms/CreateSampleWorkspace.h
index 76d15beaefbdd695e1403c0ccf967a75c6e406a5..d5e7ca0d274add0fd3d6e95df2a953182763535d 100644
--- a/Framework/Algorithms/inc/MantidAlgorithms/CreateSampleWorkspace.h
+++ b/Framework/Algorithms/inc/MantidAlgorithms/CreateSampleWorkspace.h
@@ -63,6 +63,9 @@ private:
                            double x0, double binDelta,
                            Geometry::Instrument_sptr inst,
                            const std::string &functionString, bool isRandom);
+  API::MatrixWorkspace_sptr createScanningWorkspace(
+      int numBins, double x0, double binDelta, Geometry::Instrument_sptr inst,
+      const std::string &functionString, bool isRandom, int numScanPoints);
   Geometry::Instrument_sptr createTestInstrumentRectangular(
       API::Progress &progress, int numBanks, int numMonitors, int pixels,
       double pixelSpacing, const double bankDistanceFromSample,
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/FindEPP.h b/Framework/Algorithms/inc/MantidAlgorithms/FindEPP.h
new file mode 100644
index 0000000000000000000000000000000000000000..61ba47b88b95c0e1d38432c0232996ae85def098
--- /dev/null
+++ b/Framework/Algorithms/inc/MantidAlgorithms/FindEPP.h
@@ -0,0 +1,57 @@
+#ifndef MANTID_ALGORITHMS_FINDEPP_H_
+#define MANTID_ALGORITHMS_FINDEPP_H_
+
+#include "MantidAPI/Algorithm.h"
+#include "MantidAPI/ITableWorkspace.h"
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidAlgorithms/DllConfig.h"
+
+namespace Mantid {
+namespace Algorithms {
+
+/** Performs Gaussian fits over each spectrum to find the Elastic Peak
+ Position (EPP).
+
+  Copyright &copy; 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+class MANTID_ALGORITHMS_DLL FindEPP : public API::Algorithm {
+public:
+  const std::string name() const override;
+  int version() const override;
+  const std::string category() const override;
+  const std::string summary() const override;
+
+private:
+  void init() override;
+  void exec() override;
+  void fitGaussian(int64_t);
+  void initWorkspace();
+
+  Mantid::API::MatrixWorkspace_sptr m_inWS;
+  Mantid::API::ITableWorkspace_sptr m_outWS;
+  std::unique_ptr<Mantid::API::Progress> m_progress;
+};
+
+} // namespace Algorithms
+} // namespace Mantid
+
+#endif /* MANTID_ALGORITHMS_FINDEPP_H_ */
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/LineProfile.h b/Framework/Algorithms/inc/MantidAlgorithms/LineProfile.h
new file mode 100644
index 0000000000000000000000000000000000000000..e2da673079df0d129e3cd05df8c44e6658d2a895
--- /dev/null
+++ b/Framework/Algorithms/inc/MantidAlgorithms/LineProfile.h
@@ -0,0 +1,50 @@
+#ifndef MANTID_ALGORITHMS_LINEPROFILE_H_
+#define MANTID_ALGORITHMS_LINEPROFILE_H_
+
+#include "MantidAlgorithms/DllConfig.h"
+#include "MantidAPI/Algorithm.h"
+
+namespace Mantid {
+namespace Algorithms {
+
+/** LineProfile : Calculates a horizontal or vertical line profile over
+  a MatrixWorkspace.
+
+  Copyright &copy; 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+class MANTID_ALGORITHMS_DLL LineProfile : public API::Algorithm {
+public:
+  const std::string name() const override;
+  int version() const override;
+  const std::string category() const override;
+  const std::string summary() const override;
+  std::map<std::string, std::string> validateInputs() override;
+
+private:
+  void init() override;
+  void exec() override;
+};
+
+} // namespace Algorithms
+} // namespace Mantid
+
+#endif /* MANTID_ALGORITHMS_LINEPROFILE_H_ */
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryReductionOne2.h b/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryReductionOne2.h
index 34a4d1461b09fb6c5315cb9bd7b088c87f966838..4ac333833b4e591184f0fc62006b08802284fa2d 100644
--- a/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryReductionOne2.h
+++ b/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryReductionOne2.h
@@ -4,6 +4,15 @@
 #include "MantidAlgorithms/ReflectometryWorkflowBase2.h"
 
 namespace Mantid {
+// Forward declaration
+namespace API {
+class SpectrumInfo;
+}
+namespace HistogramData {
+class HistogramX;
+class HistogramY;
+class HistogramE;
+}
 namespace Algorithms {
 
 /** ReflectometryReductionOne2 : Reflectometry reduction of a single input TOF
@@ -59,15 +68,86 @@ private:
   // Create a direct beam workspace from input workspace in wavelength
   Mantid::API::MatrixWorkspace_sptr
   makeDirectBeamWS(Mantid::API::MatrixWorkspace_sptr inputWS);
+  // Performs direct beam correction
+  Mantid::API::MatrixWorkspace_sptr
+  directBeamCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS);
+  // Performs transmission or algorithm correction
+  Mantid::API::MatrixWorkspace_sptr
+  transOrAlgCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS,
+                       const bool detectorWSReduced);
   // Performs transmission corrections
   Mantid::API::MatrixWorkspace_sptr
-  transmissionCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS);
+  transmissionCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS,
+                         const bool detectorWSReduced);
   // Performs transmission corrections using alternative correction algorithms
   Mantid::API::MatrixWorkspace_sptr
   algorithmicCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS);
+  // Performs monitor corrections
+  Mantid::API::MatrixWorkspace_sptr
+  monitorCorrection(Mantid::API::MatrixWorkspace_sptr detectorWS);
   // convert to momentum transfer
   Mantid::API::MatrixWorkspace_sptr
   convertToQ(Mantid::API::MatrixWorkspace_sptr inputWS);
+  // Create the output workspace in wavelength
+  Mantid::API::MatrixWorkspace_sptr makeIvsLam();
+  // Do the reduction by summation in Q
+  Mantid::API::MatrixWorkspace_sptr
+  sumInQ(API::MatrixWorkspace_sptr detectorWS);
+  // Do the summation in Q for a single input value
+  void sumInQProcessValue(const int inputIdx, const double twoTheta,
+                          const double bTwoTheta,
+                          const HistogramData::HistogramX &inputX,
+                          const HistogramData::HistogramY &inputY,
+                          const HistogramData::HistogramE &inputE,
+                          const std::vector<size_t> &detectors,
+                          const size_t outSpecIdx,
+                          API::MatrixWorkspace_sptr IvsLam,
+                          std::vector<double> &outputE);
+  // Share counts to a projected value for summation in Q
+  void sumInQShareCounts(const double inputCounts, const double inputErr,
+                         const double bLambda, const double lambdaMin,
+                         const double lambdaMax, const size_t outSpecIdx,
+                         API::MatrixWorkspace_sptr IvsLam,
+                         std::vector<double> &outputE);
+  // Construct the output workspace
+  void findIvsLamRange(API::MatrixWorkspace_sptr detectorWS,
+                       const std::vector<size_t> &detectors, double &xMin,
+                       double &xMax);
+  // Construct the output workspace
+  Mantid::API::MatrixWorkspace_sptr
+  constructIvsLamWS(API::MatrixWorkspace_sptr detectorWS);
+  // Whether summation should be done in Q or the default lambda
+  bool summingInQ();
+  // Get projected coordinates onto twoThetaR
+  void getProjectedLambdaRange(const double lambda, const double twoTheta,
+                               const double bLambda, const double bTwoTheta,
+                               const std::vector<size_t> &detectors,
+                               double &lambdaTop, double &lambdaBot);
+  // Check whether two spectrum maps match
+  void verifySpectrumMaps(API::MatrixWorkspace_const_sptr ws1,
+                          API::MatrixWorkspace_const_sptr ws2,
+                          const bool severe);
+
+  // Find and cache constants
+  void findDetectorGroups();
+  void findTheta0();
+  // Accessors for detectors and theta and lambda values
+  const std::vector<std::vector<size_t>> &detectorGroups() const {
+    return m_detectorGroups;
+  };
+  double theta0() { return m_theta0; }
+  double twoThetaR(const std::vector<size_t> &detectors);
+  size_t twoThetaRDetectorIdx(const std::vector<size_t> &detectors);
+
+  API::MatrixWorkspace_sptr m_runWS;
+  const API::SpectrumInfo *m_spectrumInfo;
+  bool m_convertUnits;          // convert the input workspace to lambda
+  bool m_normaliseMonitors;     // normalise by monitors and direct beam
+  bool m_normaliseTransmission; // transmission or algorithmic correction
+  bool m_sum;                   // whether to do summation
+  double m_theta0;              // horizon angle
+  // groups of spectrum indices of the detectors of interest
+  std::vector<std::vector<size_t>> m_detectorGroups;
 };
 
 } // namespace Algorithms
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryWorkflowBase2.h b/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryWorkflowBase2.h
index 2556f1b72978f9ab65ddffe090bfc106c34daf54..a1823aa32a0fa0ee94f05dc38a1477a6fbe8a5e5 100644
--- a/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryWorkflowBase2.h
+++ b/Framework/Algorithms/inc/MantidAlgorithms/ReflectometryWorkflowBase2.h
@@ -35,6 +35,8 @@ namespace Algorithms {
 class DLLExport ReflectometryWorkflowBase2
     : public API::DataProcessorAlgorithm {
 protected:
+  /// Initialize reduction-type properties
+  void initReductionProperties();
   /// Initialize monitor properties
   void initMonitorProperties();
   /// Initialize direct beam properties
@@ -47,6 +49,8 @@ protected:
   void initAlgorithmicProperties(bool autodetect = false);
   /// Initialize momentum transfer properties
   void initMomentumTransferProperties();
+  /// Validate reduction-type properties
+  std::map<std::string, std::string> validateReductionProperties() const;
   /// Validate direct beam properties
   std::map<std::string, std::string> validateDirectBeamProperties() const;
   /// Validate transmission properties
@@ -61,7 +65,8 @@ protected:
   cropWavelength(Mantid::API::MatrixWorkspace_sptr inputWS);
   // Create a detector workspace from input workspace in wavelength
   Mantid::API::MatrixWorkspace_sptr
-  makeDetectorWS(Mantid::API::MatrixWorkspace_sptr inputWS);
+  makeDetectorWS(Mantid::API::MatrixWorkspace_sptr inputWS,
+                 const bool convert = true);
   // Create a monitor workspace from input workspace in wavelength
   Mantid::API::MatrixWorkspace_sptr
   makeMonitorWS(Mantid::API::MatrixWorkspace_sptr inputWS,
diff --git a/Framework/Algorithms/inc/MantidAlgorithms/SpatialGrouping.h b/Framework/Algorithms/inc/MantidAlgorithms/SpatialGrouping.h
index a435ab9ec6e035d55ba9fde4d43b0bcfa232e5dc..3171b7d784d40c6246a05de736583e709108b42f 100644
--- a/Framework/Algorithms/inc/MantidAlgorithms/SpatialGrouping.h
+++ b/Framework/Algorithms/inc/MantidAlgorithms/SpatialGrouping.h
@@ -2,7 +2,7 @@
 #define MANTID_ALGORITHMS_SPATIAL_GROUPING_H_
 
 #include "MantidAPI/Algorithm.h"
-#include "MantidAPI/NearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
 #include "MantidGeometry/IDTypes.h"
 
 namespace Mantid {
@@ -91,7 +91,7 @@ private:
   std::vector<std::vector<int>> m_groups;
 
   /// NearestNeighbourInfo used by expandNet()
-  std::unique_ptr<API::NearestNeighbourInfo> m_neighbourInfo;
+  std::unique_ptr<API::WorkspaceNearestNeighbourInfo> m_neighbourInfo;
 };
 
 } // namespace Algorithms
diff --git a/Framework/Algorithms/src/AnnularRingAbsorption.cpp b/Framework/Algorithms/src/AnnularRingAbsorption.cpp
index ae6300113a296876ba2f4b0f5094eb3f0c0bed78..8715fb3127e2df0cd07fe7a6d50eb0d01c0f8b6d 100644
--- a/Framework/Algorithms/src/AnnularRingAbsorption.cpp
+++ b/Framework/Algorithms/src/AnnularRingAbsorption.cpp
@@ -178,13 +178,15 @@ AnnularRingAbsorption::createSampleShapeXML(const V3D &upAxis) const {
   const double lowRadiusMtr = (wallMidPtCM - 0.5 * sampleThickCM) / 100.;
   const double uppRadiusMtr = (wallMidPtCM + 0.5 * sampleThickCM) / 100.;
 
-  // Cylinders oriented along Y, with origin at centre of bottom base
+  // Cylinders oriented along Y, with origin at the centre as expected by
+  // the MonteCarloAbsorption algorithm.
+  const V3D bottomCentre{0.0, -sampleHeightCM / 2.0 / 100.0, 0.0}; // in metres.
   const std::string innerCylID = std::string("inner-cyl");
-  const std::string innerCyl = cylinderXML(innerCylID, V3D(), lowRadiusMtr,
-                                           upAxis, sampleHeightCM / 100.0);
+  const std::string innerCyl = cylinderXML(
+      innerCylID, bottomCentre, lowRadiusMtr, upAxis, sampleHeightCM / 100.0);
   const std::string outerCylID = std::string("outer-cyl");
-  const std::string outerCyl = cylinderXML(outerCylID, V3D(), uppRadiusMtr,
-                                           upAxis, sampleHeightCM / 100.0);
+  const std::string outerCyl = cylinderXML(
+      outerCylID, bottomCentre, uppRadiusMtr, upAxis, sampleHeightCM / 100.0);
 
   // Combine shapes
   boost::format algebra("<algebra val=\"(%1% (# %2%))\" />");
diff --git a/Framework/Algorithms/src/CreateSampleWorkspace.cpp b/Framework/Algorithms/src/CreateSampleWorkspace.cpp
index 96799afe7398cd619713e7827ac08a1f75b1d9c0..33645ad7b9b1da56a2f347d31938c1e298a7067e 100644
--- a/Framework/Algorithms/src/CreateSampleWorkspace.cpp
+++ b/Framework/Algorithms/src/CreateSampleWorkspace.cpp
@@ -6,6 +6,7 @@
 #include "MantidAPI/FunctionProperty.h"
 #include "MantidAPI/Run.h"
 #include "MantidAPI/WorkspaceFactory.h"
+#include "MantidDataObjects/ScanningWorkspaceBuilder.h"
 #include "MantidDataObjects/Workspace2D.h"
 #include "MantidDataObjects/EventWorkspace.h"
 #include "MantidDataObjects/WorkspaceCreation.h"
@@ -159,6 +160,13 @@ void CreateSampleWorkspace::init() {
                   boost::make_shared<BoundedValidator<double>>(0, 1000, true),
                   "The distance along the beam direction from the source to "
                   "the sample in M (default:10.0)");
+  declareProperty("NumScanPoints", 1,
+                  boost::make_shared<BoundedValidator<int>>(0, 360, true),
+                  "Add a number of time indexed detector scan points to the "
+                  "instrument. The detectors are rotated in 1 degree "
+                  "increments around the the sample position in the x-z plane. "
+                  "Minimum (default) is 1 scan point, which gives a "
+                  "non-scanning workspace.");
 }
 
 //----------------------------------------------------------------------------------------------
@@ -180,6 +188,7 @@ void CreateSampleWorkspace::exec() {
   const double pixelSpacing = getProperty("PixelSpacing");
   const double bankDistanceFromSample = getProperty("BankDistanceFromSample");
   const double sourceSampleDistance = getProperty("SourceDistanceFromSample");
+  const int numScanPoints = getProperty("NumScanPoints");
 
   if (xMax <= xMin) {
     throw std::invalid_argument("XMax must be larger than XMin");
@@ -226,6 +235,9 @@ void CreateSampleWorkspace::exec() {
   if (wsType == "Event") {
     ws = createEventWorkspace(numPixels, numBins, numMonitors, numEvents, xMin,
                               binWidth, inst, functionString, isRandom);
+  } else if (numScanPoints > 1) {
+    ws = createScanningWorkspace(numBins, xMin, binWidth, inst, functionString,
+                                 isRandom, numScanPoints);
   } else {
     ws = createHistogramWorkspace(numPixels, numBins, numMonitors, xMin,
                                   binWidth, inst, functionString, isRandom);
@@ -309,6 +321,34 @@ MatrixWorkspace_sptr CreateSampleWorkspace::createHistogramWorkspace(
   return create<Workspace2D>(inst, indexInfo, Histogram(x, y));
 }
 
+/** Create scanning histogram workspace
+ */
+MatrixWorkspace_sptr CreateSampleWorkspace::createScanningWorkspace(
+    int numBins, double x0, double binDelta, Geometry::Instrument_sptr inst,
+    const std::string &functionString, bool isRandom, int numScanPoints) {
+  auto builder = ScanningWorkspaceBuilder(inst, numScanPoints, numBins);
+
+  auto angles = std::vector<double>();
+  auto timeRanges = std::vector<double>();
+  for (int i = 0; i < numScanPoints; ++i) {
+    angles.push_back(double(i));
+    timeRanges.push_back(double(i + 1));
+  }
+
+  builder.setTimeRanges(Kernel::DateAndTime(0), timeRanges);
+  builder.setRelativeRotationsForScans(angles, inst->getSample()->getPos(),
+                                       V3D(0, 1, 0));
+
+  BinEdges x(numBins + 1, LinearGenerator(x0, binDelta));
+
+  std::vector<double> xValues(cbegin(x), cend(x) - 1);
+  Counts y(evalFunction(functionString, xValues, isRandom ? 1 : 0));
+
+  builder.setHistogram(Histogram(x, y));
+
+  return builder.buildWorkspace();
+}
+
 /** Create event workspace
  */
 EventWorkspace_sptr CreateSampleWorkspace::createEventWorkspace(
diff --git a/Framework/Algorithms/src/FindEPP.cpp b/Framework/Algorithms/src/FindEPP.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..628913042603d63cf7ed8da9a500ab8ba32a5006
--- /dev/null
+++ b/Framework/Algorithms/src/FindEPP.cpp
@@ -0,0 +1,208 @@
+#include "MantidAlgorithms/FindEPP.h"
+#include "MantidAPI/TableRow.h"
+#include "MantidAPI/WorkspaceFactory.h"
+#include "MantidKernel/make_unique.h"
+
+#include <cmath>
+#include <sstream>
+
+namespace Mantid {
+namespace Algorithms {
+
+using namespace Mantid::Kernel;
+using namespace Mantid::API;
+
+// Register the algorithm into the AlgorithmFactory
+DECLARE_ALGORITHM(FindEPP)
+
+//----------------------------------------------------------------------------------------------
+
+/// Algorithms name for identification. @see Algorithm::name
+const std::string FindEPP::name() const { return "FindEPP"; }
+
+/// Algorithm's version for identification. @see Algorithm::version
+int FindEPP::version() const { return 2; }
+
+/// Algorithm's category for identification. @see Algorithm::category
+const std::string FindEPP::category() const {
+  return "Workflow\\MLZ\\TOFTOF;Utility";
+}
+
+/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary
+const std::string FindEPP::summary() const {
+  return "Performs Gaussian fits over each spectrum to find the Elastic Peak "
+         "Position (EPP).";
+}
+
+//----------------------------------------------------------------------------------------------
+/** Initialize the algorithm's properties.
+ */
+void FindEPP::init() {
+  declareProperty(Kernel::make_unique<WorkspaceProperty<API::MatrixWorkspace>>(
+                      "InputWorkspace", "", Direction::Input),
+                  "An input workspace.");
+  declareProperty(Kernel::make_unique<WorkspaceProperty<API::ITableWorkspace>>(
+                      "OutputWorkspace", "", Direction::Output),
+                  "An output workspace.");
+}
+
+//----------------------------------------------------------------------------------------------
+/** Execute the algorithm.
+ */
+void FindEPP::exec() {
+  m_inWS = getProperty("InputWorkspace");
+
+  initWorkspace();
+
+  int64_t numberspectra = static_cast<int64_t>(m_inWS->getNumberHistograms());
+
+  // Loop over spectra
+  PARALLEL_FOR_IF(threadSafe(*m_inWS, *m_outWS))
+  for (int64_t index = 0; index < numberspectra; ++index) {
+    PARALLEL_START_INTERUPT_REGION
+    fitGaussian(index);
+    PARALLEL_END_INTERUPT_REGION
+  }
+  PARALLEL_CHECK_INTERUPT_REGION
+
+  setProperty("OutputWorkspace", m_outWS);
+}
+
+/* Call Fit as child algorithm for each spectra
+ * @param index : the workspace index
+ */
+void FindEPP::fitGaussian(int64_t index) {
+  size_t spectrum = static_cast<size_t>(index);
+  m_outWS->cell<int>(spectrum, 0) = static_cast<int>(spectrum);
+
+  const auto &x = m_inWS->x(spectrum).rawData();
+  const auto &y = m_inWS->y(spectrum).rawData();
+  const auto &e = m_inWS->e(spectrum).rawData();
+
+  // Find the maximum value and it's index
+  const auto maxIt = std::max_element(y.begin(), y.end());
+  const double height = *maxIt;
+  size_t maxIndex = static_cast<size_t>(std::distance(y.begin(), maxIt));
+
+  if (height > 0) {
+    // Find how many bins are around maximum, that are above half-maximum
+    // Initialize the distances of the half-maxima bins from maximum
+    size_t leftHalf = maxIndex, rightHalf = x.size() - maxIndex - 1;
+
+    // Find the first bin on the right side of maximum, that drops below
+    // half-maximum
+    for (auto it = maxIt; it != y.end(); ++it) {
+      if (*it < 0.5 * height) {
+        rightHalf = it - maxIt - 1;
+        break;
+      }
+    }
+
+    // Find the first bin on the left side of maximum, that drops below
+    // half-maximum
+    for (auto it = maxIt; it != y.begin(); --it) {
+      if (*it < 0.5 * height) {
+        leftHalf = maxIt - it - 1;
+        break;
+      }
+    }
+    g_log.debug() << "Peak in spectrum #" << spectrum
+                  << " has last bins above 0.5*max at " << leftHalf << "\t"
+                  << rightHalf << "\n";
+
+    // We want to fit only if there are at least 3 bins (including the maximum
+    // itself) above half-maximum
+    if (rightHalf + leftHalf >= 2) {
+
+      // Prepare the initial parameters for the fit
+      double fwhm = x[maxIndex + rightHalf] - x[maxIndex - leftHalf];
+      double sigma = fwhm / (2. * sqrt(2. * log(2.)));
+      double center = x[maxIndex];
+      double start = center - 3. * fwhm;
+      double end = center + 3. * fwhm;
+
+      std::stringstream function;
+      function << "name=Gaussian,PeakCentre=";
+      function << center << ",Height=" << height << ",Sigma=" << sigma;
+
+      g_log.debug() << "Fitting spectrum #" << spectrum
+                    << " with: " << function.str() << "\n";
+
+      IAlgorithm_sptr fitAlg = createChildAlgorithm("Fit", 0., 0., false);
+      fitAlg->setProperty("Function", function.str());
+      fitAlg->setProperty("InputWorkspace", m_inWS);
+      fitAlg->setProperty("WorkspaceIndex", static_cast<int>(spectrum));
+      fitAlg->setProperty("StartX", start);
+      fitAlg->setProperty("EndX", end);
+      fitAlg->setProperty("CreateOutput", true);
+      fitAlg->setProperty("OutputParametersOnly", true);
+      fitAlg->executeAsChildAlg();
+
+      const std::string status = fitAlg->getProperty("OutputStatus");
+      ITableWorkspace_sptr fitResult = fitAlg->getProperty("OutputParameters");
+
+      if (status == "success") {
+        m_outWS->cell<double>(spectrum, 1) = fitResult->cell<double>(1, 1);
+        m_outWS->cell<double>(spectrum, 2) = fitResult->cell<double>(1, 2);
+        m_outWS->cell<double>(spectrum, 3) = fitResult->cell<double>(2, 1);
+        m_outWS->cell<double>(spectrum, 4) = fitResult->cell<double>(2, 2);
+        m_outWS->cell<double>(spectrum, 5) = fitResult->cell<double>(0, 1);
+        m_outWS->cell<double>(spectrum, 6) = fitResult->cell<double>(0, 2);
+        m_outWS->cell<double>(spectrum, 7) = fitResult->cell<double>(3, 1);
+        m_outWS->cell<std::string>(spectrum, 8) = status;
+      } else {
+        g_log.debug() << "Fit failed in spectrum #" << spectrum
+                      << ". \nReason :" << status
+                      << ". \nSetting the maximum.\n";
+        m_outWS->cell<std::string>(spectrum, 8) = "fitFailed";
+        m_outWS->cell<double>(spectrum, 1) = x[maxIndex];
+        m_outWS->cell<double>(spectrum, 2) = 0.;
+        m_outWS->cell<double>(spectrum, 5) = height;
+        m_outWS->cell<double>(spectrum, 6) = e[maxIndex];
+      }
+
+    } else {
+      g_log.information() << "Found <=3 bins above half maximum in spectrum #"
+                          << index << ". Not fitting.\n";
+      m_outWS->cell<std::string>(spectrum, 8) = "narrowPeak";
+      m_outWS->cell<double>(spectrum, 1) = x[maxIndex];
+      m_outWS->cell<double>(spectrum, 2) = 0.;
+      m_outWS->cell<double>(spectrum, 5) = height;
+      m_outWS->cell<double>(spectrum, 6) = e[maxIndex];
+    }
+  } else {
+    g_log.notice() << "Negative maximum in spectrum #" << spectrum
+                   << ". Skipping.\n";
+    m_outWS->cell<std::string>(spectrum, 8) = "negativeMaximum";
+  }
+  m_progress->report();
+}
+
+/**
+ * Initializes the output workspace
+ */
+void FindEPP::initWorkspace() {
+
+  m_outWS = WorkspaceFactory::Instance().createTable("TableWorkspace");
+
+  const std::vector<std::string> columns = {
+      "PeakCentre", "PeakCentreError", "Sigma", "SigmaError",
+      "Height",     "HeightError",     "chiSq"};
+
+  m_outWS->addColumn("int", "WorkspaceIndex");
+  m_outWS->getColumn(0)->setPlotType(1);
+  for (const auto &column : columns) {
+    m_outWS->addColumn("double", column);
+  }
+  m_outWS->addColumn("str", "FitStatus");
+
+  const size_t numberSpectra = m_inWS->getNumberHistograms();
+  m_progress = make_unique<Progress>(this, 0, 1, numberSpectra);
+
+  for (size_t i = 0; i < numberSpectra; ++i) {
+    m_outWS->appendRow();
+  }
+}
+
+} // namespace Algorithms
+} // namespace Mantid
diff --git a/Framework/Algorithms/src/LineProfile.cpp b/Framework/Algorithms/src/LineProfile.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1f89fa8c6bb75b3dcf21e09aae71b8fe8b216fa3
--- /dev/null
+++ b/Framework/Algorithms/src/LineProfile.cpp
@@ -0,0 +1,442 @@
+#include "MantidAlgorithms/LineProfile.h"
+
+#include "MantidAPI/BinEdgeAxis.h"
+#include "MantidAPI/CommonBinsValidator.h"
+#include "MantidAPI/IncreasingAxisValidator.h"
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidDataObjects/Workspace2D.h"
+#include "MantidDataObjects/WorkspaceCreation.h"
+#include "MantidKernel/BoundedValidator.h"
+#include "MantidKernel/CompositeValidator.h"
+#include "MantidKernel/ListValidator.h"
+#include "MantidKernel/MandatoryValidator.h"
+#include "MantidKernel/Unit.h"
+
+#include <algorithm>
+#include "boost/make_shared.hpp"
+
+namespace Mantid {
+namespace Algorithms {
+
+using Mantid::API::Axis;
+using Mantid::API::BinEdgeAxis;
+using Mantid::API::CommonBinsValidator;
+using Mantid::API::IncreasingAxisValidator;
+using Mantid::API::MatrixWorkspace;
+using Mantid::API::MatrixWorkspace_const_sptr;
+using Mantid::API::MatrixWorkspace_sptr;
+using Mantid::API::WorkspaceProperty;
+using Mantid::DataObjects::create;
+using Mantid::DataObjects::Workspace2D;
+using Mantid::DataObjects::Workspace2D_sptr;
+using Mantid::HistogramData::BinEdges;
+using Mantid::HistogramData::Counts;
+using Mantid::HistogramData::CountStandardDeviations;
+using Mantid::HistogramData::Histogram;
+using Mantid::HistogramData::Points;
+using Mantid::Kernel::BoundedValidator;
+using Mantid::Kernel::CompositeValidator;
+using Mantid::Kernel::Direction;
+using Mantid::Kernel::ListValidator;
+using Mantid::Kernel::make_unique;
+using Mantid::Kernel::MandatoryValidator;
+using Mantid::Kernel::Unit;
+
+namespace {
+/// An enum specifying a line profile orientation.
+enum class LineDirection { horizontal, vertical };
+
+/// A private namespace for the options for the Direction property.
+namespace DirectionChoices {
+const static std::string HORIZONTAL{"Horizontal"};
+const static std::string VERTICAL{"Vertical"};
+}
+
+/// A private namespace for the mode options.
+namespace ModeChoices {
+const static std::string AVERAGE{"Average"};
+const static std::string SUM{"Sum"};
+}
+
+/// A private namespace for property names.
+namespace PropertyNames {
+const static std::string CENTRE{"Centre"};
+const static std::string DIRECTION{"Direction"};
+const static std::string END{"End"};
+const static std::string INPUT_WORKSPACE{"InputWorkspace"};
+const static std::string HALF_WIDTH{"HalfWidth"};
+const static std::string IGNORE_INFS{"IgnoreInfs"};
+const static std::string IGNORE_NANS{"IgnoreNans"};
+const static std::string MODE{"Mode"};
+const static std::string OUTPUT_WORKSPACE{"OutputWorkspace"};
+const static std::string START{"Start"};
+}
+
+/// A convenience struct for rectangular constraints.
+struct Box {
+  double top;
+  double bottom;
+  double left;
+  double right;
+};
+
+/// Profile constraints as array indices.
+struct IndexLimits {
+  size_t lineStart;
+  size_t lineEnd;
+  size_t widthStart;
+  size_t widthEnd;
+};
+
+/**
+ * Set correct units and vertical axis binning.
+ * @param outWS A single-histogram workspace whose axes to modify.
+ * @param ws A workspace to copy units from.
+ * @param box Line profile constraints.
+ * @param dir Line profile orientation.
+ */
+void setAxesAndUnits(Workspace2D &outWS, const MatrixWorkspace &ws,
+                     const Box &box, const LineDirection dir) {
+  // Y units.
+  outWS.setYUnit(ws.YUnit());
+  outWS.setYUnitLabel(ws.YUnitLabel());
+  // Horizontal axis.
+  auto axisIndex = dir == LineDirection::horizontal ? 0 : 1;
+  if (ws.getAxis(axisIndex)->isSpectra()) {
+    outWS.getAxis(axisIndex)->setUnit("Empty");
+  } else {
+    outWS.getAxis(0)->setUnit(ws.getAxis(axisIndex)->unit()->unitID());
+  }
+  // Vertical axis. We'll use bin edges set to Centre +/- HalfWidth.
+  std::vector<double> vertBins(2);
+  vertBins.front() = dir == LineDirection::horizontal ? box.top : box.left;
+  vertBins.back() = dir == LineDirection::horizontal ? box.bottom : box.right;
+  auto outVertAxis = make_unique<BinEdgeAxis>(vertBins);
+  axisIndex = dir == LineDirection::horizontal ? 1 : 0;
+  if (ws.getAxis(axisIndex)->isSpectra()) {
+    outVertAxis->setUnit("Empty");
+  } else {
+    outVertAxis->setUnit(ws.getAxis(axisIndex)->unit()->unitID());
+  }
+  outWS.replaceAxis(1, outVertAxis.release());
+}
+
+/**
+ * Find the start and end indices for a line profile.
+ * @param bins Binning in a std::vector like container.
+ * @param isBinEdges Whether bins contains edges or points.
+ * @param lowerLimit A lower constraint.
+ * @param upperLImit An upper constraint.
+ * @return The interval as pair.
+ * @throw std::runtime_error if given constraints don't make sense.
+ */
+template <typename Container>
+std::pair<size_t, size_t>
+startAndEnd(const Container &bins, const bool isBinEdges,
+            const double lowerLimit, const double upperLimit) {
+  auto lowerIt = std::upper_bound(bins.cbegin(), bins.cend(), lowerLimit);
+  if (lowerIt == bins.cend()) {
+    throw std::runtime_error("Profile completely outside input workspace.");
+  }
+  if (lowerIt != bins.cbegin()) {
+    --lowerIt;
+  }
+  auto upperIt = std::upper_bound(lowerIt, bins.cend(), upperLimit);
+  if (upperIt == bins.cbegin()) {
+    throw std::runtime_error("Profile completely outside input workspace.");
+  }
+  if (isBinEdges && upperIt == bins.cend()) {
+    --upperIt;
+  }
+  const auto start = std::distance(bins.cbegin(), lowerIt);
+  const auto end = std::distance(bins.cbegin(), upperIt);
+  return std::pair<size_t, size_t>{start, end};
+}
+
+/**
+ * Extract values (binning) from (vertical) axis as vector. For
+ * spectrum axis, spectrum numbers are returned.
+ * @param axis An axis.
+ * @param numberHistograms The actual number of histograms.
+ * @return Axis bins.
+ */
+std::vector<double> extractVerticalBins(const Axis &axis,
+                                        const size_t numberHistograms) {
+  if (axis.isSpectra()) {
+    std::vector<double> spectrumNumbers(numberHistograms);
+    std::iota(spectrumNumbers.begin(), spectrumNumbers.end(), 1.0);
+    return spectrumNumbers;
+  }
+  std::vector<double> bins(axis.length());
+  for (size_t i = 0; i < bins.size(); ++i) {
+    bins[i] = axis.getValue(i);
+  }
+  return bins;
+}
+
+/**
+ * Calculate a line profile.
+ * @param Xs Output for line profile histogram's X data.
+ * @param Ys Output for line profile histogram's Y data.
+ * @param Es Output for line profile histogram's E data.
+ * @param ws A workspace where to extract a profile from.
+ * @param dir Line orientation.
+ * @param limits Line dimensions.
+ * @param lineBins Bins in line's direction.
+ * @param isBinEdges Whether lineBins represent edges or points.
+ * @param modeFunction A function performing the final calculation.
+ * @param ignoreNans Whether NaN values should be ignored or not.
+ * @param ignoreInfs Whether infinities should be ignored or not.
+ */
+template <typename Container, typename Function>
+void profile(std::vector<double> &Xs, std::vector<double> &Ys,
+             std::vector<double> &Es, const MatrixWorkspace &ws,
+             const LineDirection dir, const IndexLimits &limits,
+             const Container &lineBins, const bool isBinEdges,
+             Function modeFunction, const bool ignoreNans,
+             const bool ignoreInfs) {
+  const auto lineSize = limits.lineEnd - limits.lineStart;
+  Xs.resize(lineSize + (isBinEdges ? 1 : 0));
+  Ys.resize(lineSize);
+  Es.resize(lineSize);
+  for (size_t i = limits.lineStart; i < limits.lineEnd; ++i) {
+    Xs[i - limits.lineStart] = lineBins[i];
+    double ySum = 0;
+    double eSqSum = 0;
+    int n = 0;
+    for (size_t j = limits.widthStart; j < limits.widthEnd; ++j) {
+      const size_t iHor = dir == LineDirection::horizontal ? i : j;
+      const size_t iVert = dir == LineDirection::horizontal ? j : i;
+      const double y = ws.y(iVert)[iHor];
+      if ((ignoreNans && std::isnan(y)) || (ignoreInfs && std::isinf(y))) {
+        continue;
+      }
+      ySum += y;
+      const double e = ws.e(iVert)[iHor];
+      eSqSum += e * e;
+      ++n;
+    }
+    const size_t nTotal = limits.widthEnd - limits.widthStart;
+    Ys[i - limits.lineStart] =
+        n == 0 ? std::nan("") : modeFunction(ySum, n, nTotal);
+    const double e = modeFunction(std::sqrt(eSqSum), n, nTotal);
+    Es[i - limits.lineStart] = std::isnan(e) ? 0 : e;
+  }
+  if (isBinEdges) {
+    Xs.back() = lineBins[limits.lineEnd];
+  }
+}
+
+/**
+ * A mode function for averaging.
+ * @param sum A sum of data point.
+ * @param n Number of summed points.
+ * @return The average.
+ */
+double averageMode(const double sum, const size_t n, const size_t) noexcept {
+  return sum / static_cast<double>(n);
+}
+
+/**
+ * A mode function for weigthed summing. The weight is inversely proportional
+ * to the number of data points in the sum.
+ * @param sum A sum of data points.
+ * @param n Number of summed points.
+ * @param nTot Total number of possible points, including NaNs and infs.
+ * @return The weigthed sum.
+ */
+double sumMode(const double sum, const size_t n, const size_t nTot) noexcept {
+  return static_cast<double>(nTot) / static_cast<double>(n) * sum;
+}
+
+/**
+ * Return a suitable function to calculate the profile over its width.
+ * @param modeName The name of the calculation mode.
+ */
+auto createMode(const std::string &modeName) noexcept {
+  if (modeName == ModeChoices::AVERAGE) {
+    return averageMode;
+  }
+  return sumMode;
+}
+}
+
+// Register the algorithm into the AlgorithmFactory
+DECLARE_ALGORITHM(LineProfile)
+
+//----------------------------------------------------------------------------------------------
+
+/// Algorithms name for identification. @see Algorithm::name
+const std::string LineProfile::name() const { return "LineProfile"; }
+
+/// Algorithm's version for identification. @see Algorithm::version
+int LineProfile::version() const { return 1; }
+
+/// Algorithm's category for identification. @see Algorithm::category
+const std::string LineProfile::category() const { return "Utility"; }
+
+/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary
+const std::string LineProfile::summary() const {
+  return "Calculates a line profile over a MatrixWorkspace.";
+}
+
+//----------------------------------------------------------------------------------------------
+/** Initialize the algorithm's properties.
+ */
+void LineProfile::init() {
+  const auto mandatoryDouble = boost::make_shared<MandatoryValidator<double>>();
+  const auto positiveDouble = boost::make_shared<BoundedValidator<double>>();
+  positiveDouble->setLower(0.0);
+  positiveDouble->setLowerExclusive(true);
+  const auto mandatoryPositiveDouble = boost::make_shared<CompositeValidator>();
+  mandatoryPositiveDouble->add(mandatoryDouble);
+  mandatoryPositiveDouble->add(positiveDouble);
+  const auto inputWorkspaceValidator = boost::make_shared<CompositeValidator>();
+  inputWorkspaceValidator->add(boost::make_shared<CommonBinsValidator>());
+  inputWorkspaceValidator->add(boost::make_shared<IncreasingAxisValidator>());
+  declareProperty(Kernel::make_unique<WorkspaceProperty<MatrixWorkspace>>(
+                      PropertyNames::INPUT_WORKSPACE, "", Direction::Input,
+                      inputWorkspaceValidator),
+                  "An input workspace.");
+  declareProperty(Kernel::make_unique<WorkspaceProperty<Workspace2D>>(
+                      PropertyNames::OUTPUT_WORKSPACE, "", Direction::Output),
+                  "A single histogram workspace containing the profile.");
+  declareProperty(PropertyNames::CENTRE, EMPTY_DBL(), mandatoryDouble,
+                  "Centre of the line.");
+  declareProperty(PropertyNames::HALF_WIDTH, EMPTY_DBL(),
+                  mandatoryPositiveDouble,
+                  "Half of the width over which to calcualte the profile.");
+  const std::set<std::string> directions{DirectionChoices::HORIZONTAL,
+                                         DirectionChoices::VERTICAL};
+  declareProperty(PropertyNames::DIRECTION, DirectionChoices::HORIZONTAL,
+                  boost::make_shared<ListValidator<std::string>>(directions),
+                  "Orientation of the profile line.");
+  declareProperty(PropertyNames::START, EMPTY_DBL(),
+                  "Starting point of the line.");
+  declareProperty(PropertyNames::END, EMPTY_DBL(), "End point of the line.");
+  const std::set<std::string> modes{ModeChoices::AVERAGE, ModeChoices::SUM};
+  declareProperty(PropertyNames::MODE, ModeChoices::AVERAGE,
+                  boost::make_shared<ListValidator<std::string>>(modes),
+                  "How the profile is calculated over the line width.");
+  declareProperty(PropertyNames::IGNORE_INFS, false,
+                  "If true, ignore infinities when calculating the profile.");
+  declareProperty(
+      PropertyNames::IGNORE_NANS, true,
+      "If true, ignore not-a-numbers when calculating the profile.");
+}
+
+//----------------------------------------------------------------------------------------------
+/** Execute the algorithm.
+ */
+void LineProfile::exec() {
+  // Extract properties.
+  MatrixWorkspace_const_sptr ws = getProperty(PropertyNames::INPUT_WORKSPACE);
+  const bool ignoreNans = getProperty(PropertyNames::IGNORE_NANS);
+  const bool ignoreInfs = getProperty(PropertyNames::IGNORE_INFS);
+  const auto &horizontalBins = ws->x(0);
+  const auto horizontalIsBinEdges = ws->isHistogramData();
+  const auto vertAxis = ws->getAxis(1);
+  // It is easier if the vertical axis values are in a vector.
+  const auto verticalBins =
+      extractVerticalBins(*vertAxis, ws->getNumberHistograms());
+  const auto verticalIsBinEdges =
+      verticalBins.size() > ws->getNumberHistograms();
+  const std::string directionString = getProperty(PropertyNames::DIRECTION);
+  LineDirection dir{LineDirection::horizontal};
+  if (directionString == DirectionChoices::VERTICAL) {
+    dir = LineDirection::vertical;
+  }
+  const double centre = getProperty(PropertyNames::CENTRE);
+  const double halfWidth = getProperty(PropertyNames::HALF_WIDTH);
+  double start = getProperty(PropertyNames::START);
+  if (start == EMPTY_DBL()) {
+    start = std::numeric_limits<double>::lowest();
+  }
+  double end = getProperty(PropertyNames::END);
+  if (end == EMPTY_DBL()) {
+    end = std::numeric_limits<double>::max();
+  }
+  // Define a box in workspace's units to have a standard representation
+  // of the profile's dimensions.
+  Box bounds;
+  if (dir == LineDirection::horizontal) {
+    bounds.top = centre - halfWidth;
+    bounds.bottom = centre + halfWidth;
+    bounds.left = start;
+    bounds.right = end;
+  } else {
+    bounds.top = start;
+    bounds.bottom = end;
+    bounds.left = centre - halfWidth;
+    bounds.right = centre + halfWidth;
+  }
+  // Convert the bounds from workspace units to indices.
+  const auto vertInterval =
+      startAndEnd(verticalBins, verticalIsBinEdges, bounds.top, bounds.bottom);
+  const auto horInterval = startAndEnd(horizontalBins, horizontalIsBinEdges,
+                                       bounds.left, bounds.right);
+  // Choose mode.
+  auto mode = createMode(getProperty(PropertyNames::MODE));
+  // Build the actual profile.
+  std::vector<double> profileYs;
+  std::vector<double> profileEs;
+  std::vector<double> Xs;
+  if (dir == LineDirection::horizontal) {
+    IndexLimits limits;
+    limits.lineStart = horInterval.first;
+    limits.lineEnd = horInterval.second;
+    limits.widthStart = vertInterval.first;
+    limits.widthEnd = vertInterval.second;
+    profile(Xs, profileYs, profileEs, *ws, dir, limits, horizontalBins,
+            horizontalIsBinEdges, mode, ignoreNans, ignoreInfs);
+  } else {
+    IndexLimits limits;
+    limits.lineStart = vertInterval.first;
+    limits.lineEnd = vertInterval.second;
+    limits.widthStart = horInterval.first;
+    limits.widthEnd = horInterval.second;
+    profile(Xs, profileYs, profileEs, *ws, dir, limits, verticalBins,
+            verticalIsBinEdges, mode, ignoreNans, ignoreInfs);
+  }
+  // Prepare and set output.
+  Workspace2D_sptr outWS;
+  if (Xs.size() > profileYs.size()) {
+    outWS =
+        create<Workspace2D>(1, Histogram(BinEdges(Xs), Counts(profileYs),
+                                         CountStandardDeviations(profileEs)));
+  } else {
+    outWS =
+        create<Workspace2D>(1, Histogram(Points(Xs), Counts(profileYs),
+                                         CountStandardDeviations(profileEs)));
+  }
+  // The actual profile might be of different size than what user
+  // specified.
+  Box actualBounds;
+  actualBounds.top = verticalBins[vertInterval.first];
+  actualBounds.bottom = verticalBins[vertInterval.second];
+  actualBounds.left = horizontalBins[horInterval.first];
+  actualBounds.right = horizontalBins[horInterval.second];
+  setAxesAndUnits(*outWS, *ws, actualBounds, dir);
+  setProperty(PropertyNames::OUTPUT_WORKSPACE, outWS);
+}
+
+/** Validate the algorithm's inputs.
+ */
+std::map<std::string, std::string> LineProfile::validateInputs() {
+  std::map<std::string, std::string> issues;
+  const double start = getProperty(PropertyNames::START);
+  const double end = getProperty(PropertyNames::END);
+  if (start > end) {
+    issues[PropertyNames::START] =
+        PropertyNames::START + " greater than " + PropertyNames::END + ".";
+  }
+  MatrixWorkspace_const_sptr ws = getProperty(PropertyNames::INPUT_WORKSPACE);
+  if (ws->getAxis(1)->isText()) {
+    issues[PropertyNames::INPUT_WORKSPACE] =
+        "The vertical axis in " + PropertyNames::INPUT_WORKSPACE + " is text.";
+  }
+  return issues;
+}
+
+} // namespace Algorithms
+} // namespace Mantid
diff --git a/Framework/Algorithms/src/ReflectometryReductionOne2.cpp b/Framework/Algorithms/src/ReflectometryReductionOne2.cpp
index 714701a0695069935f85ef1f0d1b91bd9ac3bbdc..b4ad895419486aa55c6c6d8f13e99022076d1cc6 100644
--- a/Framework/Algorithms/src/ReflectometryReductionOne2.cpp
+++ b/Framework/Algorithms/src/ReflectometryReductionOne2.cpp
@@ -1,11 +1,21 @@
 #include "MantidAlgorithms/ReflectometryReductionOne2.h"
 #include "MantidAPI/Axis.h"
+#include "MantidAPI/SpectrumInfo.h"
 #include "MantidAPI/MatrixWorkspace.h"
+#include "MantidAPI/WorkspaceFactory.h"
+#include "MantidHistogramData/LinearGenerator.h"
+#include "MantidIndexing/IndexInfo.h"
 #include "MantidKernel/MandatoryValidator.h"
+#include "MantidKernel/StringTokenizer.h"
 #include "MantidKernel/Unit.h"
 
+#include <algorithm>
+#include <boost/lexical_cast.hpp>
+
 using namespace Mantid::Kernel;
 using namespace Mantid::API;
+using namespace Mantid::HistogramData;
+using namespace Mantid::Indexing;
 
 namespace Mantid {
 namespace Algorithms {
@@ -13,6 +23,230 @@ namespace Algorithms {
 /*Anonomous namespace */
 namespace {
 
+/** Get the twoTheta angle for the centre of the detector associated with the
+* given spectrum
+*
+* @param spectrumInfo : the spectrum info
+* @param spectrumIdx : the workspace index of the spectrum
+* @return : the twoTheta angle in radians
+*/
+double getDetectorTwoTheta(const SpectrumInfo *spectrumInfo,
+                           const size_t spectrumIdx) {
+  return spectrumInfo->signedTwoTheta(spectrumIdx);
+}
+
+/** Get the twoTheta angle range for the top/bottom of the detector associated
+* with the given spectrum
+*
+* @param spectrumInfo : the spectrum info
+* @param spectrumIdx : the workspace index of the spectrum
+* @return : the twoTheta angle in radians
+*/
+double getDetectorTwoThetaRange(const SpectrumInfo *spectrumInfo,
+                                const size_t spectrumIdx) {
+  // Assume the range covered by this pixel is the diff between this
+  // pixel's twoTheta and the next/prev pixel)
+  double twoTheta = getDetectorTwoTheta(spectrumInfo, spectrumIdx);
+  double bTwoTheta = 0;
+
+  if (spectrumIdx + 1 < spectrumInfo->size()) {
+    bTwoTheta = getDetectorTwoTheta(spectrumInfo, spectrumIdx + 1) - twoTheta;
+  }
+
+  return bTwoTheta;
+}
+
+/** Get the start/end of the lambda range for the detector associated
+* with the given spectrum
+*
+* @return : the lambda range
+*/
+double getLambdaRange(const HistogramX &xValues, const int xIdx) {
+  // The lambda range is the bin width from the given index to the next.
+  if (xIdx < 0 || xIdx + 1 >= static_cast<int>(xValues.size())) {
+    throw std::runtime_error("Error accessing X values out of range (index=" +
+                             std::to_string(xIdx + 1) + ", size=" +
+                             std::to_string(xValues.size()));
+  }
+
+  double result = xValues[xIdx + 1] - xValues[xIdx];
+  return result;
+}
+
+/** Get the lambda value at the centre of the detector associated
+* with the given spectrum
+*
+* @return : the lambda range
+*/
+double getLambda(const HistogramX &xValues, const int xIdx) {
+  if (xIdx < 0 || xIdx >= static_cast<int>(xValues.size())) {
+    throw std::runtime_error("Error accessing X values out of range (index=" +
+                             std::to_string(xIdx) + ", size=" +
+                             std::to_string(xValues.size()));
+  }
+
+  // The centre of the bin is the lower bin edge plus half the width
+  return xValues[xIdx] + getLambdaRange(xValues, xIdx) / 2.0;
+}
+
+/** @todo The following translation functions are duplicates of code in
+* GroupDetectors2.cpp. Longer term, we should move them to a common location if
+* possible */
+
+/* The following functions are used to translate single operators into
+* groups, just like the ones this algorithm loads from .map files.
+*
+* Each function takes a string, such as "3+4", or "6:10" and then adds
+* the resulting groups of spectra to outGroups.
+*/
+
+// An add operation, i.e. "3+4" -> [3+4]
+void translateAdd(const std::string &instructions,
+                  std::vector<std::vector<size_t>> &outGroups) {
+  auto spectra = Kernel::StringTokenizer(
+      instructions, "+", Kernel::StringTokenizer::TOK_TRIM |
+                             Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
+
+  std::vector<size_t> outSpectra;
+  outSpectra.reserve(spectra.count());
+  for (auto spectrum : spectra) {
+    // add this spectrum to the group we're about to add
+    outSpectra.push_back(boost::lexical_cast<size_t>(spectrum));
+  }
+  outGroups.push_back(std::move(outSpectra));
+}
+
+// A range summation, i.e. "3-6" -> [3+4+5+6]
+void translateSumRange(const std::string &instructions,
+                       std::vector<std::vector<size_t>> &outGroups) {
+  // add a group with the sum of the spectra in the range
+  auto spectra = Kernel::StringTokenizer(instructions, "-");
+  if (spectra.count() != 2)
+    throw std::runtime_error("Malformed range (-) operation.");
+  // fetch the start and stop spectra
+  size_t first = boost::lexical_cast<size_t>(spectra[0]);
+  size_t last = boost::lexical_cast<size_t>(spectra[1]);
+  // swap if they're back to front
+  if (first > last)
+    std::swap(first, last);
+
+  // add all the spectra in the range to the output group
+  std::vector<size_t> outSpectra;
+  outSpectra.reserve(last - first + 1);
+  for (size_t i = first; i <= last; ++i)
+    outSpectra.push_back(i);
+  if (!outSpectra.empty())
+    outGroups.push_back(std::move(outSpectra));
+}
+
+// A range insertion, i.e. "3:6" -> [3,4,5,6]
+void translateRange(const std::string &instructions,
+                    std::vector<std::vector<size_t>> &outGroups) {
+  // add a group per spectra
+  auto spectra = Kernel::StringTokenizer(
+      instructions, ":", Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
+  if (spectra.count() != 2)
+    throw std::runtime_error("Malformed range (:) operation.");
+  // fetch the start and stop spectra
+  size_t first = boost::lexical_cast<size_t>(spectra[0]);
+  size_t last = boost::lexical_cast<size_t>(spectra[1]);
+  // swap if they're back to front
+  if (first > last)
+    std::swap(first, last);
+
+  // add all the spectra in the range to separate output groups
+  for (size_t i = first; i <= last; ++i) {
+    // create group of size 1 with the spectrum and add it to output
+    outGroups.emplace_back(1, i);
+  }
+}
+
+/**
+* Translate the processing instructions into a vector of groups of indices
+*
+* @param instructions : Instructions to translate
+* @return : A vector of groups, each group being a vector of its 0-based
+* spectrum indices
+*/
+std::vector<std::vector<size_t>>
+translateInstructions(const std::string &instructions) {
+  std::vector<std::vector<size_t>> outGroups;
+
+  try {
+    // split into comma separated groups, each group potentially containing
+    // an operation (+-:) that produces even more groups.
+    auto groups = Kernel::StringTokenizer(
+        instructions, ",",
+        StringTokenizer::TOK_TRIM | StringTokenizer::TOK_IGNORE_EMPTY);
+    for (const auto &groupStr : groups) {
+      // Look for the various operators in the string. If one is found then
+      // do the necessary translation into groupings.
+      if (groupStr.find('+') != std::string::npos) {
+        // add a group with the given spectra
+        translateAdd(groupStr, outGroups);
+      } else if (groupStr.find('-') != std::string::npos) {
+        translateSumRange(groupStr, outGroups);
+      } else if (groupStr.find(':') != std::string::npos) {
+        translateRange(groupStr, outGroups);
+      } else if (!groupStr.empty()) {
+        // contains no instructions, just add this spectrum as a new group
+        // create group of size 1 with the spectrum in it and add it to output
+        outGroups.emplace_back(1, boost::lexical_cast<size_t>(groupStr));
+      }
+    }
+  } catch (boost::bad_lexical_cast &) {
+    throw std::runtime_error("Invalid processing instructions: " +
+                             instructions);
+  }
+
+  return outGroups;
+}
+
+/**
+* Map a spectrum index from the given map to the given workspace
+* @param originWS : the original workspace
+* @param mapIdx : the index in the original workspace
+* @param destWS : the destination workspace
+* @return : the index in the destination workspace
+*/
+size_t mapSpectrumIndexToWorkspace(MatrixWorkspace_const_sptr originWS,
+                                   const size_t originIdx,
+                                   MatrixWorkspace_const_sptr destWS) {
+
+  SpectrumNumber specId = originWS->indexInfo().spectrumNumber(originIdx);
+  size_t wsIdx =
+      destWS->getIndexFromSpectrumNumber(static_cast<specnum_t>(specId));
+  return wsIdx;
+}
+
+/**
+* @param originWS : Origin workspace, which provides the original workspace
+* index to spectrum number mapping.
+* @param hostWS : Workspace onto which the resulting workspace indexes will be
+* hosted
+* @throws :: If the specId are not found to exist on the host end-point
+*workspace.
+* @return :: Remapped workspace indexes applicable for the host workspace,
+*as a vector of groups of vectors of spectrum indices
+*/
+std::vector<std::vector<size_t>> mapSpectrumIndicesToWorkspace(
+    MatrixWorkspace_const_sptr originWS, MatrixWorkspace_const_sptr hostWS,
+    const std::vector<std::vector<size_t>> &detectorGroups) {
+
+  std::vector<std::vector<size_t>> hostGroups;
+
+  for (auto group : detectorGroups) {
+    std::vector<size_t> hostDetectors;
+    for (auto i : group) {
+      const size_t hostIdx = mapSpectrumIndexToWorkspace(originWS, i, hostWS);
+      hostDetectors.push_back(hostIdx);
+    }
+    hostGroups.push_back(hostDetectors);
+  }
+
+  return hostGroups;
+}
+
 /**
 * Translate all the workspace indexes in an origin workspace into workspace
 * indexes of a host end-point workspace. This is done using spectrum numbers as
@@ -24,43 +258,73 @@ namespace {
 * hosted
 * @throws :: If the specId are not found to exist on the host end-point
 *workspace.
-* @return :: Remapped workspace indexes applicable for the host workspace.
-*results
+* @return :: Remapped workspace indexes applicable for the host workspace,
 *as comma separated string.
 */
-std::string
-createProcessingCommandsFromDetectorWS(MatrixWorkspace_const_sptr originWS,
-                                       MatrixWorkspace_const_sptr hostWS) {
-  auto spectrumMap = originWS->getSpectrumToWorkspaceIndexMap();
-  auto it = spectrumMap.begin();
-  std::stringstream result;
-  specnum_t specId = (*it).first;
-  result << static_cast<int>(hostWS->getIndexFromSpectrumNumber(specId));
-  ++it;
-  for (; it != spectrumMap.end(); ++it) {
-    specId = (*it).first;
-    result << ","
-           << static_cast<int>(hostWS->getIndexFromSpectrumNumber(specId));
-  }
-  return result.str();
-}
+std::string createProcessingCommandsFromDetectorWS(
+    MatrixWorkspace_const_sptr originWS, MatrixWorkspace_const_sptr hostWS,
+    const std::vector<std::vector<size_t>> &detectorGroups) {
 
-/**
-@param ws1 : First workspace to compare
-@param ws2 : Second workspace to compare against
-@param severe: True to indicate that failure to verify should result in an
-exception. Otherwise a warning is generated.
-@return : true if spectrum maps match. False otherwise
-*/
-bool verifySpectrumMaps(MatrixWorkspace_const_sptr ws1,
-                        MatrixWorkspace_const_sptr ws2) {
-  auto map1 = ws1->getSpectrumToWorkspaceIndexMap();
-  auto map2 = ws2->getSpectrumToWorkspaceIndexMap();
-  if (map1 != map2) {
-    return false;
-  } else {
-    return true;
+  std::string result;
+
+  // Map the original indices to the host workspace
+  std::vector<std::vector<size_t>> hostGroups =
+      mapSpectrumIndicesToWorkspace(originWS, hostWS, detectorGroups);
+
+  // Add each group to the output, separated by ','
+
+  /// @todo Low priority: Add support to separate contiguous groups by ':' to
+  /// avoid having long lists of spectrum indices in the processing
+  /// instructions. This would not make any functional difference but would be
+  /// a cosmetic improvement when you view the history.
+  for (auto groupIt = hostGroups.begin(); groupIt != hostGroups.end();
+       ++groupIt) {
+    const auto &hostDetectors = *groupIt;
+
+    // Add each detector index to the output string separated by '+' to indicate
+    // that all detectors in this group will be summed. We also check for
+    // contiguous ranges so we output e.g. 3-5 instead of 3+4+5
+    bool contiguous = false;
+    size_t contiguousStart = 0;
+
+    for (auto it = hostDetectors.begin(); it != hostDetectors.end(); ++it) {
+      // Check if the next iterator is a contiguous increment from this one
+      auto nextIt = it + 1;
+      if (nextIt != hostDetectors.end() && *nextIt == *it + 1) {
+        // If this is a start of a new contiguous region, remember the start
+        // index
+        if (!contiguous) {
+          contiguousStart = *it;
+          contiguous = true;
+        }
+        // Continue to find the end of the contiguous region
+        continue;
+      }
+
+      if (contiguous) {
+        // Output the contiguous range, then reset the flag
+        result.append(std::to_string(contiguousStart))
+            .append("-")
+            .append(std::to_string(*it));
+        contiguousStart = 0;
+        contiguous = false;
+      } else {
+        // Just output the value
+        result.append(std::to_string(*it));
+      }
+
+      // Add a separator ready for the next value/range
+      if (nextIt != hostDetectors.end()) {
+        result.append("+");
+      }
+    }
+
+    if (groupIt + 1 != hostGroups.end()) {
+      result.append(",");
+    }
   }
+
+  return result;
 }
 }
 
@@ -77,6 +341,13 @@ void ReflectometryReductionOne2::init() {
                       "InputWorkspace", "", Direction::Input),
                   "Run to reduce.");
 
+  initReductionProperties();
+
+  // ThetaIn
+  declareProperty(make_unique<PropertyWithValue<double>>(
+                      "ThetaIn", Mantid::EMPTY_DBL(), Direction::Input),
+                  "Angle in degrees");
+
   // Processing instructions
   declareProperty(Kernel::make_unique<PropertyWithValue<std::string>>(
                       "ProcessingInstructions", "",
@@ -131,6 +402,9 @@ ReflectometryReductionOne2::validateInputs() {
 
   std::map<std::string, std::string> results;
 
+  const auto reduction = validateReductionProperties();
+  results.insert(reduction.begin(), reduction.end());
+
   const auto wavelength = validateWavelengthRanges();
   results.insert(wavelength.begin(), wavelength.end());
 
@@ -146,63 +420,36 @@ ReflectometryReductionOne2::validateInputs() {
 /** Execute the algorithm.
 */
 void ReflectometryReductionOne2::exec() {
-  MatrixWorkspace_sptr runWS = getProperty("InputWorkspace");
-
-  const auto xUnitID = runWS->getAxis(0)->unit()->unitID();
+  // Get input properties
+  m_runWS = getProperty("InputWorkspace");
+  const auto xUnitID = m_runWS->getAxis(0)->unit()->unitID();
 
   // Neither TOF or Lambda? Abort.
   if ((xUnitID != "Wavelength") && (xUnitID != "TOF"))
     throw std::invalid_argument(
         "InputWorkspace must have units of TOF or Wavelength");
 
-  // Output workspace in wavelength
-  MatrixWorkspace_sptr IvsLam;
+  m_spectrumInfo = &m_runWS->spectrumInfo();
 
-  if (xUnitID == "Wavelength") {
-    IvsLam = runWS;
-  } else {
-    // xUnitID == "TOF"
-
-    // Detector workspace
-    auto detectorWS = makeDetectorWS(runWS);
-
-    // Normalization by direct beam (optional)
-    Property *directBeamProperty = getProperty("RegionOfDirectBeam");
-    if (!directBeamProperty->isDefault()) {
-      const auto directBeam = makeDirectBeamWS(runWS);
-      detectorWS = divide(detectorWS, directBeam);
-    }
+  // Find and cache detector groups and theta0
+  findDetectorGroups();
+  findTheta0();
 
-    // Monitor workspace (only if I0MonitorIndex, MonitorBackgroundWavelengthMin
-    // and MonitorBackgroundWavelengthMax have been given)
-    Property *monProperty = getProperty("I0MonitorIndex");
-    Property *backgroundMinProperty =
-        getProperty("MonitorBackgroundWavelengthMin");
-    Property *backgroundMaxProperty =
-        getProperty("MonitorBackgroundWavelengthMin");
-    if (!monProperty->isDefault() && !backgroundMinProperty->isDefault() &&
-        !backgroundMaxProperty->isDefault()) {
-      const bool integratedMonitors =
-          getProperty("NormalizeByIntegratedMonitors");
-      const auto monitorWS = makeMonitorWS(runWS, integratedMonitors);
-      if (!integratedMonitors)
-        detectorWS = rebinDetectorsToMonitors(detectorWS, monitorWS);
-      IvsLam = divide(detectorWS, monitorWS);
-    } else {
-      IvsLam = detectorWS;
-    }
-
-    // Crop to wavelength limits
-    IvsLam = cropWavelength(IvsLam);
+  // Check whether conversion, normalisation, summation etc. need to be done
+  m_convertUnits = true;
+  m_normaliseMonitors = true;
+  m_normaliseTransmission = true;
+  m_sum = true;
+  if (xUnitID == "Wavelength") {
+    // Already converted converted to wavelength
+    m_convertUnits = false;
+    // Assume it's also already been normalised by monitors and summed
+    m_normaliseMonitors = false;
+    m_sum = false;
   }
 
-  // Transmission correction
-  MatrixWorkspace_sptr transRun = getProperty("FirstTransmissionRun");
-  if (transRun) {
-    IvsLam = transmissionCorrection(IvsLam);
-  } else if (getPropertyValue("CorrectionAlgorithm") != "None") {
-    IvsLam = algorithmicCorrection(IvsLam);
-  }
+  // Create the output workspace in wavelength
+  MatrixWorkspace_sptr IvsLam = makeIvsLam();
 
   // Convert to Q
   auto IvsQ = convertToQ(IvsLam);
@@ -211,6 +458,88 @@ void ReflectometryReductionOne2::exec() {
   setProperty("OutputWorkspace", IvsQ);
 }
 
+/**
+* Creates the output 1D array in wavelength from an input 2D workspace in
+* TOF. Summation is done over lambda or over lines of constant Q depending on
+* the type of reduction. For the latter, the output is projected to "virtual
+* lambda" at a reference angle twoThetaR.
+*
+* @return :: the output workspace in wavelength
+*/
+MatrixWorkspace_sptr ReflectometryReductionOne2::makeIvsLam() {
+  MatrixWorkspace_sptr result = m_runWS;
+
+  if (summingInQ()) {
+    if (m_convertUnits) {
+      g_log.debug("Converting input workspace to wavelength\n");
+      result = convertToWavelength(result);
+    }
+    if (m_normaliseMonitors) {
+      g_log.debug("Normalising input workspace by monitors\n");
+      result = directBeamCorrection(result);
+      result = monitorCorrection(result);
+    }
+    if (m_normaliseTransmission) {
+      g_log.debug("Normalising input workspace by transmission run\n");
+      result = transOrAlgCorrection(result, false);
+    }
+    if (m_sum) {
+      g_log.debug("Summing in Q\n");
+      result = sumInQ(result);
+    }
+  } else {
+    if (m_sum) {
+      g_log.debug("Summing in wavelength\n");
+      result = makeDetectorWS(result, m_convertUnits);
+    }
+    if (m_normaliseMonitors) {
+      g_log.debug("Normalising output workspace by monitors\n");
+      result = directBeamCorrection(result);
+      result = monitorCorrection(result);
+    }
+    if (m_normaliseTransmission) {
+      g_log.debug("Normalising output workspace by transmission run\n");
+      result = transOrAlgCorrection(result, true);
+    }
+  }
+
+  // Crop to wavelength limits
+  g_log.debug("Cropping output workspace\n");
+  result = cropWavelength(result);
+
+  return result;
+}
+
+/**
+* Normalize by monitors (only if I0MonitorIndex, MonitorBackgroundWavelengthMin
+* and MonitorBackgroundWavelengthMax have been given)
+*
+* @param detectorWS :: the detector workspace to normalise, in lambda
+* @return :: the normalized workspace in lambda
+*/
+MatrixWorkspace_sptr
+ReflectometryReductionOne2::monitorCorrection(MatrixWorkspace_sptr detectorWS) {
+  MatrixWorkspace_sptr IvsLam;
+  Property *monProperty = getProperty("I0MonitorIndex");
+  Property *backgroundMinProperty =
+      getProperty("MonitorBackgroundWavelengthMin");
+  Property *backgroundMaxProperty =
+      getProperty("MonitorBackgroundWavelengthMin");
+  if (!monProperty->isDefault() && !backgroundMinProperty->isDefault() &&
+      !backgroundMaxProperty->isDefault()) {
+    const bool integratedMonitors =
+        getProperty("NormalizeByIntegratedMonitors");
+    const auto monitorWS = makeMonitorWS(m_runWS, integratedMonitors);
+    if (!integratedMonitors)
+      detectorWS = rebinDetectorsToMonitors(detectorWS, monitorWS);
+    IvsLam = divide(detectorWS, monitorWS);
+  } else {
+    IvsLam = detectorWS;
+  }
+
+  return IvsLam;
+}
+
 /** Creates a direct beam workspace in wavelength from an input workspace in
 * TOF. This method should only be called if RegionOfDirectBeam is provided.
 *
@@ -239,30 +568,90 @@ ReflectometryReductionOne2::makeDirectBeamWS(MatrixWorkspace_sptr inputWS) {
   return directBeamWS;
 }
 
+/**
+* Normalize the workspace by the direct beam (optional)
+*
+* @param detectorWS : workspace in wavelength which is to be normalized
+* @return : corrected workspace
+*/
+MatrixWorkspace_sptr ReflectometryReductionOne2::directBeamCorrection(
+    MatrixWorkspace_sptr detectorWS) {
+
+  MatrixWorkspace_sptr normalized = detectorWS;
+  Property *directBeamProperty = getProperty("RegionOfDirectBeam");
+  if (!directBeamProperty->isDefault()) {
+    auto directBeam = makeDirectBeamWS(m_runWS);
+
+    // Rebin the direct beam workspace to be the same as the input.
+    auto rebinToWorkspaceAlg = this->createChildAlgorithm("RebinToWorkspace");
+    rebinToWorkspaceAlg->initialize();
+    rebinToWorkspaceAlg->setProperty("WorkspaceToMatch", detectorWS);
+    rebinToWorkspaceAlg->setProperty("WorkspaceToRebin", directBeam);
+    rebinToWorkspaceAlg->execute();
+    directBeam = rebinToWorkspaceAlg->getProperty("OutputWorkspace");
+
+    normalized = divide(detectorWS, directBeam);
+  }
+
+  return normalized;
+}
+
+/**
+* Perform either transmission or algorithmic correction according to the
+* settings.
+* @param detectorWS : workspace in wavelength which is to be normalized
+* @param detectorWSReduced:: whether the input detector workspace has been
+* reduced
+* @return : corrected workspace
+*/
+MatrixWorkspace_sptr ReflectometryReductionOne2::transOrAlgCorrection(
+    MatrixWorkspace_sptr detectorWS, const bool detectorWSReduced) {
+
+  MatrixWorkspace_sptr normalized;
+  MatrixWorkspace_sptr transRun = getProperty("FirstTransmissionRun");
+  if (transRun) {
+    normalized = transmissionCorrection(detectorWS, detectorWSReduced);
+  } else if (getPropertyValue("CorrectionAlgorithm") != "None") {
+    normalized = algorithmicCorrection(detectorWS);
+  } else {
+    normalized = detectorWS;
+  }
+
+  return normalized;
+}
+
 /** Perform transmission correction by running 'CreateTransmissionWorkspace' on
 * the input workspace
 * @param detectorWS :: the input workspace
+* @param detectorWSReduced:: whether the input detector workspace has been
+* reduced
 * @return :: the input workspace normalized by transmission
 */
 MatrixWorkspace_sptr ReflectometryReductionOne2::transmissionCorrection(
-    MatrixWorkspace_sptr detectorWS) {
+    MatrixWorkspace_sptr detectorWS, const bool detectorWSReduced) {
 
   const bool strictSpectrumChecking = getProperty("StrictSpectrumChecking");
-
   MatrixWorkspace_sptr transmissionWS = getProperty("FirstTransmissionRun");
-  Unit_const_sptr xUnit = transmissionWS->getAxis(0)->unit();
 
+  // Reduce the transmission workspace, if not already done (assume that if
+  // the workspace is in wavelength then it has already been reduced)
+  Unit_const_sptr xUnit = transmissionWS->getAxis(0)->unit();
   if (xUnit->unitID() == "TOF") {
 
-    // Processing instructions for transmission workspace
+    // Processing instructions for transmission workspace. If strict spectrum
+    // checking is not enabled then just use the same processing instructions
+    // that were passed in.
     std::string transmissionCommands = getProperty("ProcessingInstructions");
     if (strictSpectrumChecking) {
-      // If we have strict spectrum checking, the processing commands need to be
-      // made from the
-      // numerator workspace AND the transmission workspace based on matching
-      // spectrum numbers.
-      transmissionCommands =
-          createProcessingCommandsFromDetectorWS(detectorWS, transmissionWS);
+      // If we have strict spectrum checking, we should have the same
+      // spectrum numbers in both workspaces, but not necessarily with the
+      // same workspace indices. Therefore, map the processing instructions
+      // from the original workspace to the correct indices in the
+      // transmission workspace. Note that we use the run workspace here
+      // because the detectorWS may already have been reduced and may not
+      // contain the original spectra.
+      transmissionCommands = createProcessingCommandsFromDetectorWS(
+          m_runWS, transmissionWS, detectorGroups());
     }
 
     MatrixWorkspace_sptr secondTransmissionWS =
@@ -298,15 +687,10 @@ MatrixWorkspace_sptr ReflectometryReductionOne2::transmissionCorrection(
   rebinToWorkspaceAlg->execute();
   transmissionWS = rebinToWorkspaceAlg->getProperty("OutputWorkspace");
 
-  const bool match = verifySpectrumMaps(detectorWS, transmissionWS);
-  if (!match) {
-    const std::string message =
-        "Spectrum maps between workspaces do NOT match up.";
-    if (strictSpectrumChecking) {
-      throw std::invalid_argument(message);
-    } else {
-      g_log.warning(message);
-    }
+  // If the detector workspace has been reduced then the spectrum maps
+  // should match AFTER reducing the transmission workspace
+  if (detectorWSReduced) {
+    verifySpectrumMaps(detectorWS, transmissionWS, strictSpectrumChecking);
   }
 
   MatrixWorkspace_sptr normalized = divide(detectorWS, transmissionWS);
@@ -362,5 +746,464 @@ ReflectometryReductionOne2::convertToQ(MatrixWorkspace_sptr inputWS) {
   return IvsQ;
 }
 
+/**
+* Determine whether the reduction should sum along lines of constant
+* Q or in the default lambda.
+*
+* @return : true if the reduction should sum in Q; false otherwise
+*/
+bool ReflectometryReductionOne2::summingInQ() {
+  bool result = false;
+  const std::string summationType = getProperty("SummationType");
+
+  if (summationType == "SumInQ") {
+    result = true;
+  }
+
+  return result;
+}
+
+/**
+* Find and cache the indicies of the detectors of interest
+*/
+void ReflectometryReductionOne2::findDetectorGroups() {
+  std::string instructions = getPropertyValue("ProcessingInstructions");
+
+  m_detectorGroups = translateInstructions(instructions);
+
+  // Sort the groups by the first spectrum number in the group (to give the same
+  // output order as GroupDetectors)
+  std::sort(m_detectorGroups.begin(), m_detectorGroups.end(),
+            [](const std::vector<size_t> a, const std::vector<size_t> b) {
+              return a.front() < b.front();
+            });
+
+  if (m_detectorGroups.size() == 0) {
+    throw std::runtime_error("Invalid processing instructions");
+  }
+}
+
+/**
+* Find and cache the angle theta0 from which lines of constant Q emanate
+*/
+void ReflectometryReductionOne2::findTheta0() {
+  // Only requried if summing in Q
+  if (!summingInQ()) {
+    return;
+  }
+
+  const std::string reductionType = getProperty("ReductionType");
+
+  // For the non-flat sample case theta0 is 0
+  m_theta0 = 0.0;
+
+  if (reductionType == "DivergentBeam") {
+    // theta0 is the horizon angle, which is half the twoTheta angle of the
+    // detector position. This is the angle the detector has been rotated
+    // to, which we can get from ThetaIn
+    Property *thetaIn = getProperty("ThetaIn");
+    if (!thetaIn->isDefault()) {
+      m_theta0 = getProperty("ThetaIn");
+    } else {
+      /// @todo Currently, ThetaIn must be provided via a property. We could
+      /// calculate its value instead using
+      /// ReflectometryReductionOneAuto2::calculateTheta, which could be moved
+      /// to the base class (ReflectometryWorkflowBase2). Users normally use
+      /// ReflectometryReductionOneAuto2 though, so at the moment it isn't a
+      /// high priority to be able to calculate it here.
+      throw std::runtime_error(
+          "The ThetaIn property is required for the DivergentBeam case");
+    }
+  }
+
+  g_log.debug("theta0: " + std::to_string(theta0()) + " degrees\n");
+
+  // Convert to radians
+  m_theta0 *= M_PI / 180.0;
+}
+
+/**
+* Get the (arbitrary) reference angle twoThetaR for use for summation
+* in Q
+*
+* @return : the angle twoThetaR in radians
+* @throws : if the angle could not be found
+*/
+double
+ReflectometryReductionOne2::twoThetaR(const std::vector<size_t> &detectors) {
+  return getDetectorTwoTheta(m_spectrumInfo, twoThetaRDetectorIdx(detectors));
+}
+
+/**
+* Get the spectrum index which defines the twoThetaR reference angle
+* @return : the spectrum index
+*/
+size_t ReflectometryReductionOne2::twoThetaRDetectorIdx(
+    const std::vector<size_t> &detectors) {
+  // Get the mid-point of the area of interest
+  return detectors.front() + (detectors.back() - detectors.front()) / 2;
+}
+
+/**
+* Find the range of the projected lambda range when summing in Q
+*
+* @param detectorWS [in] : the workspace containing the values to project
+* @param detectors [in] : the workspace indices of the detectors of interest
+* @param xMin [out] : the start of the projected lambda range
+* @param xMax [out] : the end of the projected lambda range
+*/
+void ReflectometryReductionOne2::findIvsLamRange(
+    MatrixWorkspace_sptr detectorWS, const std::vector<size_t> &detectors,
+    double &xMin, double &xMax) {
+
+  // Get the max/min wavelength of region of interest
+  const double lambdaMin = getProperty("WavelengthMin");
+  const double lambdaMax = getProperty("WavelengthMax");
+
+  // Get the new max and min X values of the projected (virtual) lambda range
+  double dummy = 0.0;
+
+  const size_t spIdxMin = detectors.front();
+  const double twoThetaMin = getDetectorTwoTheta(m_spectrumInfo, spIdxMin);
+  const double bTwoThetaMin =
+      getDetectorTwoThetaRange(m_spectrumInfo, spIdxMin);
+  // For bLambda, use the average bin size for this spectrum
+  auto xValues = detectorWS->x(spIdxMin);
+  double bLambda = (xValues[xValues.size() - 1] - xValues[0]) /
+                   static_cast<int>(xValues.size());
+  getProjectedLambdaRange(lambdaMax, twoThetaMin, bLambda, bTwoThetaMin,
+                          detectors, dummy, xMax);
+
+  const size_t spIdxMax = detectors.back();
+  const double twoThetaMax = getDetectorTwoTheta(m_spectrumInfo, spIdxMax);
+  const double bTwoThetaMax =
+      getDetectorTwoThetaRange(m_spectrumInfo, spIdxMax);
+  xValues = detectorWS->x(spIdxMax);
+  bLambda = (xValues[xValues.size() - 1] - xValues[0]) /
+            static_cast<int>(xValues.size());
+  getProjectedLambdaRange(lambdaMin, twoThetaMax, bLambda, bTwoThetaMax,
+                          detectors, xMin, dummy);
+
+  if (xMin > xMax) {
+    throw std::runtime_error(
+        "Error projecting lambda range to reference line at twoTheta=" +
+        std::to_string(twoThetaR(detectors)) + "; projected range (" +
+        std::to_string(xMin) + "," + std::to_string(xMax) + ") is negative.");
+  }
+}
+
+/**
+* Construct an "empty" output workspace in virtual-lambda for summation in Q.
+* The workspace will have the same x values as the input workspace but the y
+* values will all be zero.
+*
+* @return : a 1D workspace where y values are all zero
+*/
+MatrixWorkspace_sptr
+ReflectometryReductionOne2::constructIvsLamWS(MatrixWorkspace_sptr detectorWS) {
+
+  // There is one output spectrum for each detector group
+  MatrixWorkspace_sptr outputWS =
+      WorkspaceFactory::Instance().create(detectorWS, detectorGroups().size());
+
+  const size_t numGroups = detectorGroups().size();
+  const size_t numHist = outputWS->getNumberHistograms();
+  if (numHist != numGroups) {
+    throw std::runtime_error(
+        "Error constructing IvsLam: number of output histograms " +
+        std::to_string(numHist) +
+        " does not equal the number of input detector groups " +
+        std::to_string(numGroups));
+  }
+
+  // Loop through each detector group in the input
+  for (size_t groupIdx = 0; groupIdx < numGroups; ++groupIdx) {
+    // Get the detectors in this group
+    auto &detectors = detectorGroups()[groupIdx];
+
+    // Find the X values. These are the projected lambda values for this
+    // detector group
+    double xMin = 0.0;
+    double xMax = 0.0;
+    findIvsLamRange(detectorWS, detectors, xMin, xMax);
+    // Use the same number of bins as the input
+    const int numBins = static_cast<int>(detectorWS->blocksize());
+    const double binWidth = (xMax - xMin) / (numBins + 1);
+    // Construct the histogram with these X values. Y and E values are zero.
+    const BinEdges xValues(numBins + 1, LinearGenerator(xMin, binWidth));
+    outputWS->setBinEdges(groupIdx, xValues);
+
+    // Set the detector ID from the twoThetaR detector.
+    const size_t twoThetaRIdx = twoThetaRDetectorIdx(detectors);
+    auto &outSpec = outputWS->getSpectrum(groupIdx);
+    const detid_t twoThetaRDetID =
+        m_spectrumInfo->detector(twoThetaRIdx).getID();
+    outSpec.clearDetectorIDs();
+    outSpec.addDetectorID(twoThetaRDetID);
+    // Set the spectrum number from the twoThetaR detector
+    SpectrumNumber specNum =
+        detectorWS->indexInfo().spectrumNumber(twoThetaRIdx);
+    auto indexInf = outputWS->indexInfo();
+    indexInf.setSpectrumNumbers(specNum, specNum);
+    outputWS->setIndexInfo(indexInf);
+  }
+
+  return outputWS;
+}
+
+/**
+* Sum counts from the input workspace in lambda along lines of constant Q by
+* projecting to "virtual lambda" at a reference angle twoThetaR.
+*
+* @param detectorWS [in] :: the input workspace in wavelength
+* @return :: the output workspace in wavelength
+*/
+MatrixWorkspace_sptr
+ReflectometryReductionOne2::sumInQ(MatrixWorkspace_sptr detectorWS) {
+
+  // Construct the output array in virtual lambda
+  MatrixWorkspace_sptr IvsLam = constructIvsLamWS(detectorWS);
+
+  // Loop through each input group (and corresponding output spectrum)
+  const size_t numGroups = detectorGroups().size();
+  for (size_t groupIdx = 0; groupIdx < numGroups; ++groupIdx) {
+    auto &detectors = detectorGroups()[groupIdx];
+    auto &outputE = IvsLam->dataE(groupIdx);
+
+    // Loop through each spectrum in the detector group
+    for (auto spIdx : detectors) {
+      // Get the angle of this detector and its size in twoTheta
+      const double twoTheta = getDetectorTwoTheta(m_spectrumInfo, spIdx);
+      const double bTwoTheta = getDetectorTwoThetaRange(m_spectrumInfo, spIdx);
+
+      // Check X length is Y length + 1
+      const auto &inputX = detectorWS->x(spIdx);
+      const auto &inputY = detectorWS->y(spIdx);
+      const auto &inputE = detectorWS->e(spIdx);
+      if (inputX.size() != inputY.size() + 1) {
+        throw std::runtime_error(
+            "Expected input workspace to be histogram data (got X len=" +
+            std::to_string(inputX.size()) + ", Y len=" +
+            std::to_string(inputY.size()) + ")");
+      }
+
+      // Create a vector for the projected errors for this spectrum.
+      // (Output Y values can simply be accumulated directly into the output
+      // workspace, but for error values we need to create a separate error
+      // vector for the projected errors from each input spectrum and then
+      // do an overall sum in quadrature.)
+      std::vector<double> projectedE(outputE.size(), 0.0);
+
+      // Process each value in the spectrum
+      const int ySize = static_cast<int>(inputY.size());
+      for (int inputIdx = 0; inputIdx < ySize; ++inputIdx) {
+        // Do the summation in Q
+        sumInQProcessValue(inputIdx, twoTheta, bTwoTheta, inputX, inputY,
+                           inputE, detectors, groupIdx, IvsLam, projectedE);
+      }
+
+      // Sum errors in quadrature
+      const int eSize = static_cast<int>(inputE.size());
+      for (int outIdx = 0; outIdx < eSize; ++outIdx) {
+        outputE[outIdx] += projectedE[outIdx] * projectedE[outIdx];
+      }
+    }
+
+    // Take the square root of all the accumulated squared errors for this
+    // detector group. Assumes Gaussian errors
+    double (*rs)(double) = std::sqrt;
+    std::transform(outputE.begin(), outputE.end(), outputE.begin(), rs);
+  }
+
+  return IvsLam;
+}
+
+/**
+* Share counts from an input value onto the projected output in virtual-lambda
+*
+* @param inputIdx [in] :: the index into the input arrays
+* @param twoTheta [in] :: the value of twotTheta for this spectrum
+* @param bTwoTheta [in] :: the size of the pixel in twoTheta
+* @param inputX [in] :: the input spectrum X values
+* @param inputY [in] :: the input spectrum Y values
+* @param inputE [in] :: the input spectrum E values
+* @param detectors [in] :: spectrum indices of the detectors of interest
+* @param outSpecIdx [in] :: the output spectrum index
+* @param IvsLam [in,out] :: the output workspace
+* @param outputE [in,out] :: the projected E values
+*/
+void ReflectometryReductionOne2::sumInQProcessValue(
+    const int inputIdx, const double twoTheta, const double bTwoTheta,
+    const HistogramX &inputX, const HistogramY &inputY,
+    const HistogramE &inputE, const std::vector<size_t> &detectors,
+    const size_t outSpecIdx, MatrixWorkspace_sptr IvsLam,
+    std::vector<double> &outputE) {
+
+  // Check whether there are any counts (if not, nothing to share)
+  const double inputCounts = inputY[inputIdx];
+  if (inputCounts <= 0.0 || std::isnan(inputCounts) ||
+      std::isinf(inputCounts)) {
+    return;
+  }
+  // Get the bin width and the bin centre
+  const double bLambda = getLambdaRange(inputX, inputIdx);
+  const double lambda = getLambda(inputX, inputIdx);
+  // Project these coordinates onto the virtual-lambda output (at twoThetaR)
+  double lambdaVMin = 0.0;
+  double lambdaVMax = 0.0;
+  getProjectedLambdaRange(lambda, twoTheta, bLambda, bTwoTheta, detectors,
+                          lambdaVMin, lambdaVMax);
+  // Share the input counts into the output array
+  sumInQShareCounts(inputCounts, inputE[inputIdx], bLambda, lambdaVMin,
+                    lambdaVMax, outSpecIdx, IvsLam, outputE);
+}
+
+/**
+ * Share the given input counts into the output array bins proportionally
+ * according to how much the bins overlap the given lambda range.
+ * outputX.size() must equal outputY.size() + 1
+ *
+ * @param inputCounts [in] :: the input counts to share out
+ * @param inputErr [in] :: the input errors to share out
+ * @param bLambda [in] :: the bin width in lambda
+ * @param lambdaMin [in] :: the start of the range to share counts to
+ * @param lambdaMax [in] :: the end of the range to share counts to
+ * @param outSpecIdx [in] :: the spectrum index to be updated in the output
+ * workspace
+ * @param IvsLam [in,out] :: the output workspace
+ * @param outputE [in,out] :: the projected E values
+ */
+void ReflectometryReductionOne2::sumInQShareCounts(
+    const double inputCounts, const double inputErr, const double bLambda,
+    const double lambdaMin, const double lambdaMax, const size_t outSpecIdx,
+    MatrixWorkspace_sptr IvsLam, std::vector<double> &outputE) {
+  // Check that we have histogram data
+  const auto &outputX = IvsLam->dataX(outSpecIdx);
+  auto &outputY = IvsLam->dataY(outSpecIdx);
+  if (outputX.size() != outputY.size() + 1) {
+    throw std::runtime_error(
+        "Expected output array to be histogram data (got X len=" +
+        std::to_string(outputX.size()) + ", Y len=" +
+        std::to_string(outputY.size()) + ")");
+  }
+
+  const double totalWidth = lambdaMax - lambdaMin;
+
+  // Get the first bin edge in the output X array that is within range.
+  // There will probably be some overlap, so start from the bin edge before
+  // this (unless we're already at the first bin edge).
+  auto startIter = std::lower_bound(outputX.begin(), outputX.end(), lambdaMin);
+  if (startIter != outputX.begin()) {
+    --startIter;
+  }
+
+  // Loop through all overlapping output bins. Convert the iterator to an
+  // index because we need to index both the X and Y arrays.
+  const int xSize = static_cast<int>(outputX.size());
+  for (auto outIdx = startIter - outputX.begin(); outIdx < xSize - 1;
+       ++outIdx) {
+    const double binStart = outputX[outIdx];
+    const double binEnd = outputX[outIdx + 1];
+    if (binStart > lambdaMax) {
+      // No longer in the overlap region so we're finished
+      break;
+    }
+    // Add a share of the input counts to this bin based on the proportion of
+    // overlap.
+    const double overlapWidth =
+        std::min({bLambda, lambdaMax - binStart, binEnd - lambdaMin});
+    const double fraction = overlapWidth / totalWidth;
+    outputY[outIdx] += inputCounts * fraction;
+    outputE[outIdx] += inputErr * fraction;
+  }
+}
+
+/**
+* Project an input pixel onto an arbitrary reference line at twoThetaR. The
+* projection is done along lines of constant Q, which emanate from theta0. The
+* top-left and bottom-right corners of the pixel are projected, resulting in an
+* output range in "virtual" lambda (lambdaV).
+*
+* For a description of this projection, see:
+*   R. Cubitt, T. Saerbeck, R.A. Campbell, R. Barker, P. Gutfreund
+*   J. Appl. Crystallogr., 48 (6) (2015)
+*
+* @param lambda [in] :: the lambda coord of the centre of the pixel to project
+* @param twoTheta [in] :: the twoTheta coord of the centre of the pixel to
+*project
+* @param bLambda [in] :: the pixel size in lambda
+* @param bTwoTheta [in] :: the pixel size in twoTheta
+* @param detectors [in] :: spectrum indices of the detectors of interest
+* @param lambdaVMin [out] :: the projected range start
+* @param lambdaVMax [out] :: the projected range end
+*/
+void ReflectometryReductionOne2::getProjectedLambdaRange(
+    const double lambda, const double twoTheta, const double bLambda,
+    const double bTwoTheta, const std::vector<size_t> &detectors,
+    double &lambdaVMin, double &lambdaVMax) {
+
+  // Get the angle from twoThetaR to this detector
+  const double twoThetaRVal = twoThetaR(detectors);
+  // Get the distance from the pixel to twoThetaR
+  const double gamma = twoTheta - twoThetaRVal;
+  // Get the angle from the horizon to the reference angle
+  const double horizonThetaR = twoThetaRVal - theta0();
+
+  // Calculate the projected wavelength range
+  try {
+    const double lambdaTop = std::sin(horizonThetaR) *
+                             (lambda + bLambda / 2.0) /
+                             std::sin(horizonThetaR + gamma - bTwoTheta / 2.0);
+    const double lambdaBot = std::sin(horizonThetaR) *
+                             (lambda - bLambda / 2.0) /
+                             std::sin(horizonThetaR + gamma + bTwoTheta / 2.0);
+    lambdaVMin = std::min(lambdaTop, lambdaBot);
+    lambdaVMax = std::max(lambdaTop, lambdaBot);
+  } catch (std::exception &ex) {
+    throw std::runtime_error(
+        "Failed to project (lambda, twoTheta) = (" + std::to_string(lambda) +
+        "," + std::to_string(twoTheta * 180.0 / M_PI) + ") onto twoThetaR = " +
+        std::to_string(twoThetaRVal) + ": " + ex.what());
+  }
+}
+
+/**
+Check whether the spectra for the given workspaces are the same.
+
+@param ws1 : First workspace to compare
+@param ws2 : Second workspace to compare against
+@param severe: True to indicate that failure to verify should result in an
+exception. Otherwise a warning is generated.
+*/
+void ReflectometryReductionOne2::verifySpectrumMaps(
+    MatrixWorkspace_const_sptr ws1, MatrixWorkspace_const_sptr ws2,
+    const bool severe) {
+
+  bool mismatch = false;
+  // Check that the number of histograms is the same
+  if (ws1->getNumberHistograms() != ws2->getNumberHistograms()) {
+    mismatch = true;
+  }
+  // Check that the spectrum numbers match for each histogram
+  if (!mismatch) {
+    for (size_t i = 0; i < ws1->getNumberHistograms(); ++i) {
+      if (ws1->indexInfo().spectrumNumber(i) !=
+          ws2->indexInfo().spectrumNumber(i)) {
+        mismatch = true;
+        break;
+      }
+    }
+  }
+  // Handle if error
+  if (mismatch) {
+    const std::string message =
+        "Spectrum maps between workspaces do NOT match up.";
+    if (severe) {
+      throw std::invalid_argument(message);
+    } else {
+      g_log.warning(message);
+    }
+  }
+}
 } // namespace Algorithms
 } // namespace Mantid
diff --git a/Framework/Algorithms/src/ReflectometryReductionOneAuto2.cpp b/Framework/Algorithms/src/ReflectometryReductionOneAuto2.cpp
index e5e8ebb3f421abe0807a01868c05b5c125c7d694..ddc0e55ed42e8198efee8b3a3c0401ed893c1f03 100644
--- a/Framework/Algorithms/src/ReflectometryReductionOneAuto2.cpp
+++ b/Framework/Algorithms/src/ReflectometryReductionOneAuto2.cpp
@@ -112,6 +112,9 @@ void ReflectometryReductionOneAuto2::init() {
           "InputWorkspace", "", Direction::Input, PropertyMode::Mandatory),
       "Input run in TOF or wavelength");
 
+  // Reduction type
+  initReductionProperties();
+
   // Analysis mode
   const std::vector<std::string> analysisMode{"PointDetectorAnalysis",
                                               "MultiDetectorAnalysis"};
@@ -228,7 +231,8 @@ void ReflectometryReductionOneAuto2::exec() {
   alg->initialize();
 
   // Mandatory properties
-
+  alg->setProperty("SummationType", getPropertyValue("SummationType"));
+  alg->setProperty("ReductionType", getPropertyValue("ReductionType"));
   double wavMin = checkForMandatoryInstrumentDefault<double>(
       this, "WavelengthMin", instrument, "LambdaMin");
   alg->setProperty("WavelengthMin", wavMin);
@@ -250,6 +254,7 @@ void ReflectometryReductionOneAuto2::exec() {
     // Calculate theta
     theta = calculateTheta(instructions, inputWS);
   }
+  alg->setProperty("ThetaIn", theta);
 
   // Optional properties
 
@@ -294,7 +299,10 @@ void ReflectometryReductionOneAuto2::exec() {
     setProperty("ScaleFactor", 1.0);
 }
 
-/** Returns the detectors of interest, specified via processing instructions
+/** Returns the detectors of interest, specified via processing instructions.
+* Note that this returns the names of the parent detectors of the first and
+* last spectrum indices in the processing instructions. It is assumed that all
+* the interim detectors have the same parent.
 *
 * @param instructions :: processing instructions defining detectors of interest
 * @param inputWS :: the input workspace
@@ -304,24 +312,30 @@ std::vector<std::string> ReflectometryReductionOneAuto2::getDetectorNames(
     const std::string &instructions, MatrixWorkspace_sptr inputWS) {
 
   std::vector<std::string> wsIndices;
-  boost::split(wsIndices, instructions, boost::is_any_of(":,-"));
+  boost::split(wsIndices, instructions, boost::is_any_of(":,-+"));
   // vector of comopnents
   std::vector<std::string> detectors;
 
-  for (const auto wsIndex : wsIndices) {
+  try {
+    for (const auto wsIndex : wsIndices) {
 
-    size_t index = boost::lexical_cast<size_t>(wsIndex);
+      size_t index = boost::lexical_cast<size_t>(wsIndex);
 
-    auto detector = inputWS->getDetector(index);
-    auto parent = detector->getParent();
+      auto detector = inputWS->getDetector(index);
+      auto parent = detector->getParent();
 
-    if (parent) {
-      auto parentType = parent->type();
-      auto detectorName = (parentType == "Instrument") ? detector->getName()
-                                                       : parent->getName();
-      detectors.push_back(detectorName);
+      if (parent) {
+        auto parentType = parent->type();
+        auto detectorName = (parentType == "Instrument") ? detector->getName()
+                                                         : parent->getName();
+        detectors.push_back(detectorName);
+      }
     }
+  } catch (boost::bad_lexical_cast &) {
+    throw std::runtime_error("Invalid processing instructions: " +
+                             instructions);
   }
+
   return detectors;
 }
 
diff --git a/Framework/Algorithms/src/ReflectometryWorkflowBase2.cpp b/Framework/Algorithms/src/ReflectometryWorkflowBase2.cpp
index 7499ff6d92a8191970397a7d1d148dfc0361df69..9be7cfe924e54088b4b4566b55e619b4b08094cd 100644
--- a/Framework/Algorithms/src/ReflectometryWorkflowBase2.cpp
+++ b/Framework/Algorithms/src/ReflectometryWorkflowBase2.cpp
@@ -4,7 +4,9 @@
 #include "MantidAPI/WorkspaceUnitValidator.h"
 #include "MantidGeometry/Instrument.h"
 #include "MantidKernel/ArrayProperty.h"
+#include "MantidKernel/CompositeValidator.h"
 #include "MantidKernel/ListValidator.h"
+#include "MantidKernel/MandatoryValidator.h"
 #include "MantidKernel/RebinParamsValidator.h"
 #include "MantidKernel/Unit.h"
 
@@ -15,6 +17,23 @@ using namespace Mantid::Geometry;
 namespace Mantid {
 namespace Algorithms {
 
+/** Initialize properties related to the type of reduction
+*/
+void ReflectometryWorkflowBase2::initReductionProperties() {
+  // Summation type
+  std::vector<std::string> summationTypes = {"SumInLambda", "SumInQ"};
+  declareProperty("SummationType", "SumInLambda",
+                  boost::make_shared<StringListValidator>(summationTypes),
+                  "The type of summation to perform.", Direction::Input);
+
+  // Reduction type
+  std::vector<std::string> reductionTypes = {"Normal", "DivergentBeam",
+                                             "NonFlatSample"};
+  declareProperty("ReductionType", "Normal",
+                  boost::make_shared<StringListValidator>(reductionTypes),
+                  "The type of reduction to perform.", Direction::Input);
+}
+
 /** Initialize properties related to direct beam normalization
 */
 void ReflectometryWorkflowBase2::initDirectBeamProperties() {
@@ -178,6 +197,33 @@ void ReflectometryWorkflowBase2::initMomentumTransferProperties() {
                   "Factor you wish to scale Q workspace by.", Direction::Input);
 }
 
+/** Validate reduction properties, if given
+*
+* @return :: A map with results of validation
+*/
+std::map<std::string, std::string>
+ReflectometryWorkflowBase2::validateReductionProperties() const {
+
+  std::map<std::string, std::string> results;
+
+  // If summing in Q, then reduction type must be given
+  const std::string summationType = getProperty("SummationType");
+  const std::string reductionType = getProperty("ReductionType");
+  if (summationType == "SumInQ") {
+    if (reductionType == "Normal") {
+      results["ReductionType"] =
+          "ReductionType must be set if SummationType is SumInQ";
+    }
+  } else {
+    if (reductionType != "Normal") {
+      results["ReductionType"] =
+          "ReductionType should not be set unless SummationType is SumInQ";
+    }
+  }
+
+  return results;
+}
+
 /** Validate direct beam if given
 *
 * @return :: A map with results of validation
@@ -320,10 +366,12 @@ ReflectometryWorkflowBase2::cropWavelength(MatrixWorkspace_sptr inputWS) {
 /** Process an input workspace in TOF according to specified processing commands
 * to get a detector workspace in wavelength.
 * @param inputWS :: the input workspace in TOF
+* @param convert :: whether the result should be converted to wavelength
 * @return :: the detector workspace in wavelength
 */
 MatrixWorkspace_sptr
-ReflectometryWorkflowBase2::makeDetectorWS(MatrixWorkspace_sptr inputWS) {
+ReflectometryWorkflowBase2::makeDetectorWS(MatrixWorkspace_sptr inputWS,
+                                           const bool convert) {
 
   const std::string processingCommands =
       getPropertyValue("ProcessingInstructions");
@@ -334,7 +382,9 @@ ReflectometryWorkflowBase2::makeDetectorWS(MatrixWorkspace_sptr inputWS) {
   groupAlg->execute();
   MatrixWorkspace_sptr detectorWS = groupAlg->getProperty("OutputWorkspace");
 
-  detectorWS = convertToWavelength(detectorWS);
+  if (convert) {
+    detectorWS = convertToWavelength(detectorWS);
+  }
 
   return detectorWS;
 }
diff --git a/Framework/Algorithms/src/SampleCorrections/RectangularBeamProfile.cpp b/Framework/Algorithms/src/SampleCorrections/RectangularBeamProfile.cpp
index b3aa0b2168d826c9b6c507eb66a2176a538befa2..5503e62b37bc6c196efa3b28c478ed43d302d248 100644
--- a/Framework/Algorithms/src/SampleCorrections/RectangularBeamProfile.cpp
+++ b/Framework/Algorithms/src/SampleCorrections/RectangularBeamProfile.cpp
@@ -90,10 +90,10 @@ RectangularBeamProfile::defineActiveRegion(const API::Sample &sample) const {
   const auto &sampleMin(sampleBox.minPoint());
   const auto &sampleMax(sampleBox.maxPoint());
   V3D minPoint, maxPoint;
-  minPoint[m_horIdx] = m_min[m_horIdx];
-  maxPoint[m_horIdx] = m_min[m_horIdx] + m_width;
-  minPoint[m_upIdx] = m_min[m_upIdx];
-  maxPoint[m_upIdx] = m_min[m_upIdx] + m_height;
+  minPoint[m_horIdx] = std::max(sampleMin[m_horIdx], m_min[m_horIdx]);
+  maxPoint[m_horIdx] = std::min(sampleMax[m_horIdx], m_min[m_horIdx] + m_width);
+  minPoint[m_upIdx] = std::max(sampleMin[m_upIdx], m_min[m_upIdx]);
+  maxPoint[m_upIdx] = std::min(sampleMax[m_upIdx], m_min[m_upIdx] + m_height);
   minPoint[m_beamIdx] = sampleMin[m_beamIdx];
   maxPoint[m_beamIdx] = sampleMax[m_beamIdx];
 
diff --git a/Framework/Algorithms/src/SmoothNeighbours.cpp b/Framework/Algorithms/src/SmoothNeighbours.cpp
index f611844678ece60a66e0ff007acaf072f14c5e3c..c49e29e6d6c6884d3fe7bbfcc7f3cc1c059e4ad7 100644
--- a/Framework/Algorithms/src/SmoothNeighbours.cpp
+++ b/Framework/Algorithms/src/SmoothNeighbours.cpp
@@ -1,7 +1,7 @@
 #include "MantidAlgorithms/SmoothNeighbours.h"
 #include "MantidAPI/DetectorInfo.h"
 #include "MantidAPI/InstrumentValidator.h"
-#include "MantidAPI/NearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidAPI/WorkspaceFactory.h"
 #include "MantidDataObjects/EventList.h"
@@ -329,7 +329,8 @@ void SmoothNeighbours::findNeighboursUbiqutious() {
   m_neighbours.resize(inWS->getNumberHistograms());
 
   bool ignoreMaskedDetectors = getProperty("IgnoreMaskedDetectors");
-  NearestNeighbourInfo neighbourInfo(*inWS, ignoreMaskedDetectors, nNeighbours);
+  WorkspaceNearestNeighbourInfo neighbourInfo(*inWS, ignoreMaskedDetectors,
+                                              nNeighbours);
 
   // Cull by radius
   RadiusFilter radiusFilter(Radius);
diff --git a/Framework/Algorithms/src/SofQWNormalisedPolygon.cpp b/Framework/Algorithms/src/SofQWNormalisedPolygon.cpp
index cf79fb878c1e0b106a7751805aefd53d5260ca7d..fcb3b55fb15fd25e088fee3cb2308ceaa1791546 100644
--- a/Framework/Algorithms/src/SofQWNormalisedPolygon.cpp
+++ b/Framework/Algorithms/src/SofQWNormalisedPolygon.cpp
@@ -1,7 +1,7 @@
 #include "MantidAlgorithms/SofQWNormalisedPolygon.h"
 #include "MantidAlgorithms/SofQW.h"
 #include "MantidAPI/BinEdgeAxis.h"
-#include "MantidAPI/NearestNeighbourInfo.h"
+#include "MantidAPI/WorkspaceNearestNeighbourInfo.h"
 #include "MantidAPI/SpectrumDetectorMapping.h"
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidAPI/WorkspaceFactory.h"
@@ -334,7 +334,8 @@ void SofQWNormalisedPolygon::initAngularCachesPSD(
 
   bool ignoreMasked = true;
   const int numNeighbours = 4;
-  NearestNeighbourInfo neighbourInfo(*workspace, ignoreMasked, numNeighbours);
+  WorkspaceNearestNeighbourInfo neighbourInfo(*workspace, ignoreMasked,
+                                              numNeighbours);
 
   this->m_theta = std::vector<double>(nHistos);
   this->m_thetaWidths = std::vector<double>(nHistos);
diff --git a/Framework/Algorithms/src/SpatialGrouping.cpp b/Framework/Algorithms/src/SpatialGrouping.cpp
index d41ff40179f70db563b0822b38d3b78853d6f23c..19d779c3a8f40b202efe9b4dc93bebc6643f7ffc 100644
--- a/Framework/Algorithms/src/SpatialGrouping.cpp
+++ b/Framework/Algorithms/src/SpatialGrouping.cpp
@@ -82,7 +82,7 @@ void SpatialGrouping::exec() {
   Mantid::API::Progress prog(this, 0.0, 1.0, m_positions.size());
 
   bool ignoreMaskedDetectors = false;
-  m_neighbourInfo = Kernel::make_unique<API::NearestNeighbourInfo>(
+  m_neighbourInfo = Kernel::make_unique<API::WorkspaceNearestNeighbourInfo>(
       *inputWorkspace, ignoreMaskedDetectors);
 
   for (size_t i = 0; i < inputWorkspace->getNumberHistograms(); ++i) {
diff --git a/Framework/Algorithms/test/AnnularRingAbsorptionTest.h b/Framework/Algorithms/test/AnnularRingAbsorptionTest.h
index c3131a7605dfc62b27e0a04b6682aa016d4bf9a6..368464508ac3f10825c77ffae089bb965b12668d 100644
--- a/Framework/Algorithms/test/AnnularRingAbsorptionTest.h
+++ b/Framework/Algorithms/test/AnnularRingAbsorptionTest.h
@@ -43,11 +43,11 @@ public:
     MatrixWorkspace_sptr outWS = alg->getProperty("OutputWorkspace");
     TS_ASSERT(outWS);
 
-    const double delta(1e-08);
+    const double delta(1e-04);
     const size_t middle_index = 4;
-    TS_ASSERT_DELTA(0.96859812, outWS->readY(0).front(), delta);
-    TS_ASSERT_DELTA(0.79254304, outWS->readY(0)[middle_index], delta);
-    TS_ASSERT_DELTA(0.67064972, outWS->readY(0).back(), delta);
+    TS_ASSERT_DELTA(0.9694, outWS->readY(0).front(), delta);
+    TS_ASSERT_DELTA(0.8035, outWS->readY(0)[middle_index], delta);
+    TS_ASSERT_DELTA(0.6530, outWS->readY(0).back(), delta);
   }
 
   //-------------------- Failure cases --------------------------------
diff --git a/Framework/Algorithms/test/ConvertUnitsTest.h b/Framework/Algorithms/test/ConvertUnitsTest.h
index 1b493a0c46d4b31381e56de29052160fcf4d19d7..95460c9c0f873323e301e441b8ee8a705e57c947 100644
--- a/Framework/Algorithms/test/ConvertUnitsTest.h
+++ b/Framework/Algorithms/test/ConvertUnitsTest.h
@@ -608,6 +608,23 @@ public:
     // Check EMode has been set
     TS_ASSERT_EQUALS(Mantid::Kernel::DeltaEMode::Direct, output->getEMode());
 
+    ConvertUnits conv4;
+    conv4.initialize();
+    conv4.setProperty("InputWorkspace", ws);
+    conv4.setPropertyValue("OutputWorkspace", outputSpace);
+    conv4.setPropertyValue("Target", "dSpacingPerpendicular");
+    conv4.setPropertyValue("Emode", "Direct");
+    conv4.execute();
+
+    TS_ASSERT_THROWS_NOTHING(
+        output = AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>(
+            outputSpace));
+    TS_ASSERT_EQUALS(output->getAxis(0)->unit()->unitID(),
+                     "dSpacingPerpendicular");
+    TS_ASSERT_EQUALS(output->blocksize(), 2663);
+    // Check EMode has been set
+    TS_ASSERT_EQUALS(Mantid::Kernel::DeltaEMode::Direct, output->getEMode());
+
     AnalysisDataService::Instance().remove(outputSpace);
   }
 
diff --git a/Framework/Algorithms/test/CreateSampleWorkspaceTest.h b/Framework/Algorithms/test/CreateSampleWorkspaceTest.h
index ea15dc7a27829e9e863f4fac7fbad5ecd97a3267..fcc9c159c7fb858a12a0f976ecb91c499fa2d6a9 100644
--- a/Framework/Algorithms/test/CreateSampleWorkspaceTest.h
+++ b/Framework/Algorithms/test/CreateSampleWorkspaceTest.h
@@ -4,6 +4,7 @@
 #include <cxxtest/TestSuite.h>
 
 #include "MantidAPI/AnalysisDataService.h"
+#include "MantidAPI/DetectorInfo.h"
 #include "MantidAPI/FrameworkManager.h"
 #include "MantidGeometry/Instrument.h"
 #include "MantidGeometry/IComponent.h"
@@ -43,7 +44,8 @@ public:
       std::string outWSName, std::string wsType = "", std::string function = "",
       std::string userFunction = "", int numBanks = 2, int bankPixelWidth = 10,
       int numEvents = 1000, bool isRandom = false, std::string xUnit = "TOF",
-      double xMin = 0.0, double xMax = 20000.0, double binWidth = 200.0) {
+      double xMin = 0.0, double xMax = 20000.0, double binWidth = 200.0,
+      int numScanPoints = 1) {
 
     CreateSampleWorkspace alg;
     TS_ASSERT_THROWS_NOTHING(alg.initialize());
@@ -72,6 +74,8 @@ public:
       TS_ASSERT_THROWS_NOTHING(alg.setProperty("XMax", xMax));
     if (binWidth != 200.0)
       TS_ASSERT_THROWS_NOTHING(alg.setProperty("BinWidth", binWidth));
+    if (numScanPoints != 1)
+      TS_ASSERT_THROWS_NOTHING(alg.setProperty("NumScanPoints", numScanPoints))
 
     TS_ASSERT_THROWS_NOTHING(alg.execute(););
     TS_ASSERT(alg.isExecuted());
@@ -87,7 +91,7 @@ public:
     // check the basics
     int numBins = static_cast<int>((xMax - xMin) / binWidth);
     int numHist = numBanks * bankPixelWidth * bankPixelWidth;
-    TS_ASSERT_EQUALS(ws->getNumberHistograms(), numHist);
+    TS_ASSERT_EQUALS(ws->getNumberHistograms(), numHist * numScanPoints);
     TS_ASSERT_EQUALS(ws->blocksize(), numBins);
 
     TS_ASSERT_EQUALS(ws->getAxis(0)->unit()->unitID(), xUnit);
@@ -512,6 +516,42 @@ public:
     // Remove workspace from the data service.
     AnalysisDataService::Instance().remove("outWS");
   }
+
+  void test_ScanningWorkspace_defaults() {
+    // Name of the output workspace.
+    std::string outWSName("scanning_workspace");
+
+    const int numBanks = 2;
+    const int bankPixelWidth = 10;
+    const int numScanPoints = 10;
+
+    MatrixWorkspace_sptr ws = createSampleWorkspace(
+        outWSName, "", "", "", numBanks, bankPixelWidth, 1000, false, "TOF",
+        0.0, 20000.0, 200.0, numScanPoints);
+
+    TS_ASSERT_EQUALS(ws->getNumberHistograms(), numBanks * bankPixelWidth *
+                                                    bankPixelWidth *
+                                                    numScanPoints);
+
+    const auto &detectorInfo = ws->detectorInfo();
+    TS_ASSERT(detectorInfo.isScanning());
+
+    const auto centreDetector = numBanks * bankPixelWidth * bankPixelWidth / 2;
+    const auto radiansToDegrees = 180.0 / M_PI;
+
+    // The centre pixel should go from 0 -> 10 degrees, all at the same l2
+    for (size_t j = 0; j < detectorInfo.scanCount(centreDetector); ++j) {
+      const auto index = std::pair<size_t, size_t>(centreDetector, j);
+      TS_ASSERT_DELTA(10.0, detectorInfo.l2(index), 1e-10);
+      TS_ASSERT_DELTA(j, detectorInfo.twoTheta(index) * radiansToDegrees,
+                      1e-10);
+      TS_ASSERT_DELTA(j, detectorInfo.rotation(index).getEulerAngles("XYZ")[1],
+                      1e-10);
+    }
+
+    // Remove workspace from the data service.
+    AnalysisDataService::Instance().remove(outWSName);
+  }
 };
 
 #endif /* MANTID_ALGORITHMS_CREATESAMPLEWORKSPACETEST_H_ */
diff --git a/Framework/Algorithms/test/FindEPPTest.h b/Framework/Algorithms/test/FindEPPTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..cc12ab85b14d20e3fa71264cfb2750a839499cba
--- /dev/null
+++ b/Framework/Algorithms/test/FindEPPTest.h
@@ -0,0 +1,257 @@
+#ifndef MANTID_ALGORITHMS_FINDEPPTEST_H_
+#define MANTID_ALGORITHMS_FINDEPPTEST_H_
+
+#include <cxxtest/TestSuite.h>
+
+#include "MantidAlgorithms/FindEPP.h"
+#include "MantidAlgorithms/CreateSampleWorkspace.h"
+
+#include "MantidAPI/AnalysisDataService.h"
+#include "MantidAPI/FrameworkManager.h"
+#include "MantidAPI/ITableWorkspace.h"
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidAPI/WorkspaceFactory.h"
+
+using namespace Mantid::Algorithms;
+using namespace Mantid::API;
+
+namespace {
+enum WorkspaceType : size_t {
+  NegativeMaximum = 0,
+  NarrowPeak = 1,
+  FitFailed = 2,
+  Success = 3,
+  Performance = 4
+};
+
+MatrixWorkspace_sptr _create_test_workspace(WorkspaceType type) {
+
+  CreateSampleWorkspace createAlg;
+
+  if (type != NegativeMaximum) {
+    createAlg.initialize();
+    createAlg.setProperty("BankPixelWidth", 1);
+    createAlg.setPropertyValue("OutputWorkspace", "__ws");
+    createAlg.setLogging(false);
+    createAlg.setChild(true);
+  }
+
+  switch (type) {
+
+  case NegativeMaximum: {
+    size_t nBins = 5;
+    MatrixWorkspace_sptr result =
+        WorkspaceFactory::Instance().create("Workspace2D", 1, nBins, nBins);
+    for (size_t bin = 0; bin < nBins; ++bin) {
+      result->mutableY(0)[bin] = -1.;
+      result->mutableX(0)[bin] = double(bin);
+    }
+    return result;
+  }
+
+  case NarrowPeak: {
+    createAlg.setPropertyValue("Function", "User Defined");
+    createAlg.setPropertyValue(
+        "UserDefinedFunction",
+        "name=Gaussian, PeakCentre=5, Height=1, Sigma=0.05");
+    createAlg.setProperty("XMin", 0.);
+    createAlg.setProperty("XMax", 10.);
+    createAlg.setProperty("BinWidth", 0.1);
+    createAlg.setProperty("NumBanks", 1);
+    break;
+  }
+
+  case FitFailed: {
+    createAlg.setPropertyValue("Function", "Exp Decay");
+    createAlg.setProperty("XMin", 0.);
+    createAlg.setProperty("XMax", 100.);
+    createAlg.setProperty("BinWidth", 1.);
+    createAlg.setProperty("NumBanks", 1);
+    break;
+  }
+
+  case Success: {
+    createAlg.setPropertyValue("Function", "User Defined");
+    createAlg.setPropertyValue("UserDefinedFunction",
+                               "name=LinearBackground,A0=0.3;"
+                               "name=Gaussian,"
+                               "PeakCentre=6000, Height=5, Sigma=75");
+    createAlg.setProperty("XMin", 4005.75);
+    createAlg.setProperty("XMax", 7995.75);
+    createAlg.setProperty("BinWidth", 10.5);
+    createAlg.setProperty("NumBanks", 2);
+    break;
+  }
+
+  case Performance: {
+    createAlg.setPropertyValue("Function", "User Defined");
+    createAlg.setPropertyValue("UserDefinedFunction",
+                               "name=LinearBackground,A0=0.3,A1=0.001;"
+                               "name=Gaussian,"
+                               "PeakCentre=6000, Height=5, Sigma=75");
+    createAlg.setProperty("XMin", 4005.75);
+    createAlg.setProperty("XMax", 7995.75);
+    createAlg.setProperty("BinWidth", 5.01);
+    createAlg.setProperty("NumBanks", 100);
+    createAlg.setProperty("BankPixelWidth", 10);
+    createAlg.setProperty("Random", true);
+    break;
+  }
+  }
+
+  createAlg.execute();
+  return createAlg.getProperty("OutputWorkspace");
+}
+}
+
+class FindEPPTest : public CxxTest::TestSuite {
+public:
+  // This pair of boilerplate methods prevent the suite being created statically
+  // This means the constructor isn't called when running other tests
+  static FindEPPTest *createSuite() { return new FindEPPTest(); }
+  static void destroySuite(FindEPPTest *suite) { delete suite; }
+
+  FindEPPTest()
+      : m_columnNames({"WorkspaceIndex", "PeakCentre", "PeakCentreError",
+                       "Sigma", "SigmaError", "Height", "HeightError", "chiSq",
+                       "FitStatus"}),
+        m_delta(1E-4) {
+    FrameworkManager::Instance();
+  }
+
+  void test_init() {
+    FindEPP alg;
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT(alg.isInitialized());
+  }
+
+  void test_success() {
+    MatrixWorkspace_sptr inputWS = _create_test_workspace(Success);
+
+    FindEPP alg;
+    alg.setChild(true);
+    alg.setLogging(false);
+
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "__unused_for_child"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+    TS_ASSERT(alg.isExecuted());
+
+    ITableWorkspace_sptr outputWS = alg.getProperty("OutputWorkspace");
+    _check_table(outputWS, 2);
+
+    for (size_t row = 0; row < 2; ++row) {
+      TS_ASSERT_EQUALS(outputWS->cell<std::string>(row, 8), "success");
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 1), 6005.25, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 2), 8.817, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 3), 89.3248, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 4), 7.2306, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 5), 4.8384, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 6), 0.6161, m_delta);
+      TS_ASSERT_DELTA(outputWS->cell<double>(row, 7), 0.1643, m_delta);
+    }
+  }
+
+  void test_negativeMaximum() {
+    MatrixWorkspace_sptr inputWS = _create_test_workspace(NegativeMaximum);
+
+    FindEPP alg;
+    alg.setChild(true);
+    alg.setLogging(false);
+
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "__unused_for_child"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+    TS_ASSERT(alg.isExecuted());
+
+    ITableWorkspace_sptr outputWS = alg.getProperty("OutputWorkspace");
+    _check_table(outputWS, 1);
+
+    TS_ASSERT_EQUALS(outputWS->cell<std::string>(0, 8), "negativeMaximum");
+    TS_ASSERT_DELTA(outputWS->cell<double>(0, 1), 0., m_delta);
+  }
+
+  void test_narrowPeak() {
+    MatrixWorkspace_sptr inputWS = _create_test_workspace(NarrowPeak);
+
+    FindEPP alg;
+    alg.setChild(true);
+    alg.setLogging(false);
+
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "__unused_for_child"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+    TS_ASSERT(alg.isExecuted());
+
+    ITableWorkspace_sptr outputWS = alg.getProperty("OutputWorkspace");
+    _check_table(outputWS, 1);
+
+    TS_ASSERT_EQUALS(outputWS->cell<std::string>(0, 8), "narrowPeak");
+    TS_ASSERT_DELTA(outputWS->cell<double>(0, 1), 5., m_delta);
+  }
+
+  void test_fitFailed() {
+    MatrixWorkspace_sptr inputWS = _create_test_workspace(FitFailed);
+
+    FindEPP alg;
+    alg.setChild(true);
+    alg.setLogging(false);
+
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "__unused_for_child"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+    TS_ASSERT(alg.isExecuted());
+
+    ITableWorkspace_sptr outputWS = alg.getProperty("OutputWorkspace");
+    _check_table(outputWS, 1);
+
+    TS_ASSERT_EQUALS(outputWS->cell<std::string>(0, 8), "fitFailed");
+    TS_ASSERT_DELTA(outputWS->cell<double>(0, 1), 0., m_delta);
+  }
+
+private:
+  void _check_table(ITableWorkspace_sptr ws, size_t nSpectra) {
+    TS_ASSERT_EQUALS(ws->rowCount(), nSpectra);
+    TS_ASSERT_EQUALS(ws->columnCount(), 9);
+    TS_ASSERT_EQUALS(ws->getColumnNames(), m_columnNames);
+  }
+  std::vector<std::string> m_columnNames;
+  double m_delta;
+};
+
+class FindEPPTestPerformance : public CxxTest::TestSuite {
+public:
+  static FindEPPTestPerformance *createSuite() {
+    return new FindEPPTestPerformance();
+  }
+  static void destroySuite(FindEPPTestPerformance *suite) { delete suite; }
+
+  FindEPPTestPerformance() {}
+
+  void setUp() override {
+    FrameworkManager::Instance();
+    MatrixWorkspace_sptr in = _create_test_workspace(Performance);
+    m_alg.initialize();
+    m_alg.setProperty("InputWorkspace", in);
+    m_alg.setProperty("OutputWorkspace", "__out_ws");
+  }
+
+  void tearDown() override {
+    AnalysisDataService::Instance().remove("__out_ws");
+  }
+
+  void test_performance() { m_alg.execute(); }
+
+private:
+  FindEPP m_alg;
+};
+
+#endif /* MANTID_ALGORITHMS_FINDEPPTEST_H_ */
diff --git a/Framework/Algorithms/test/LineProfileTest.h b/Framework/Algorithms/test/LineProfileTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..29aabefeb41a3861c80cf3db11a47736277a13f7
--- /dev/null
+++ b/Framework/Algorithms/test/LineProfileTest.h
@@ -0,0 +1,387 @@
+#ifndef MANTID_ALGORITHMS_LINEPROFILETEST_H_
+#define MANTID_ALGORITHMS_LINEPROFILETEST_H_
+
+#include <cxxtest/TestSuite.h>
+
+#include "MantidAlgorithms/LineProfile.h"
+
+#include "MantidAlgorithms/CompareWorkspaces.h"
+#include "MantidAPI/Axis.h"
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidTestHelpers/WorkspaceCreationHelper.h"
+
+using Mantid::Algorithms::CompareWorkspaces;
+using Mantid::Algorithms::LineProfile;
+using namespace Mantid::API;
+using namespace Mantid::DataObjects;
+using namespace Mantid::HistogramData;
+using namespace Mantid::Kernel;
+using namespace WorkspaceCreationHelper;
+
+class LineProfileTest : public CxxTest::TestSuite {
+public:
+  // This pair of boilerplate methods prevent the suite being created statically
+  // This means the constructor isn't called when running other tests
+  static LineProfileTest *createSuite() { return new LineProfileTest(); }
+  static void destroySuite(LineProfileTest *suite) { delete suite; }
+
+  void test_Init() {
+    LineProfile alg;
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+  }
+
+  void test_averaging_profile_of_single_horizontal_spectrum() {
+    const size_t nHist = 13;
+    const size_t nBins = 23;
+    MatrixWorkspace_sptr inputWS = create2DWorkspace154(nHist, nBins);
+    const auto inputXMode = inputWS->histogram(0).xMode();
+
+    const int start = 2;
+    const int end = nBins - 2;
+    LineProfile alg;
+    // Don't put output in ADS by default
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "_unused_for_child"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Direction", "Horizontal"))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("Centre", static_cast<double>(nHist) / 2))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("HalfWidth", 0.49))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("Start", static_cast<double>(start)))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("End", static_cast<double>(end)))
+    TS_ASSERT_THROWS_NOTHING(alg.execute())
+    TS_ASSERT(alg.isExecuted())
+
+    Workspace2D_sptr outputWS = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(outputWS);
+    TS_ASSERT_EQUALS(outputWS->getNumberHistograms(), 1)
+    const auto hist = outputWS->histogram(0);
+    TS_ASSERT_EQUALS(hist.xMode(), inputXMode)
+    for (size_t i = 0; i < hist.x().size(); ++i) {
+      TS_ASSERT_EQUALS(hist.x()[i], i + start)
+    }
+    for (const auto y : hist.y()) {
+      TS_ASSERT_EQUALS(y, inputWS->y(0)[0])
+    }
+    for (const auto e : hist.e()) {
+      TS_ASSERT_EQUALS(e, inputWS->e(0)[0])
+    }
+    const auto vertAxis = outputWS->getAxis(1);
+    TS_ASSERT_EQUALS(vertAxis->getValue(0),
+                     static_cast<double>(nHist) / 2 - 0.5)
+    TS_ASSERT_EQUALS(vertAxis->getValue(1),
+                     static_cast<double>(nHist) / 2 + 0.5)
+  }
+
+  void test_summing_profile() {
+    const size_t nHist = 13;
+    const size_t nBins = 23;
+    MatrixWorkspace_sptr inputWS = create2DWorkspace154(nHist, nBins);
+    for (size_t i = 0; i < nBins; ++i) {
+      inputWS->mutableY(nHist / 2)[i] = std::nan("");
+    }
+    const int start = 2;
+    const int end = nBins - 2;
+    Workspace2D_sptr outputWS =
+        profileOverTwoSpectra(inputWS, start, end, "Sum");
+    TS_ASSERT(outputWS);
+    TS_ASSERT_EQUALS(outputWS->getNumberHistograms(), 1)
+    const auto hist = outputWS->histogram(0);
+    for (size_t i = 0; i < hist.x().size(); ++i) {
+      TS_ASSERT_EQUALS(hist.x()[i], i + start)
+    }
+    for (const auto y : hist.y()) {
+      TS_ASSERT_EQUALS(y, 2 * inputWS->y(0)[0])
+    }
+    for (const auto e : hist.e()) {
+      TS_ASSERT_EQUALS(e, 2 * inputWS->e(0)[0])
+    }
+    const auto vertAxis = outputWS->getAxis(1);
+    TS_ASSERT_EQUALS(vertAxis->getValue(0),
+                     static_cast<double>(nHist) / 2 - 0.5)
+    TS_ASSERT_EQUALS(vertAxis->getValue(1),
+                     static_cast<double>(nHist) / 2 + 1.5)
+  }
+
+  void test_horizontal_profile_linewidth_outside_workspace() {
+    const size_t nHist = 13;
+    const size_t nBins = 23;
+    MatrixWorkspace_sptr inputWS = create2DWorkspace154(nHist, nBins);
+    const auto inputXMode = inputWS->histogram(0).xMode();
+
+    const int start = 2;
+    const int end = nBins - 2;
+    LineProfile alg;
+    // Don't put output in ADS by default
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "_unused_for_child"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Direction", "Horizontal"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Centre", 1.0))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("HalfWidth", 3.0))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("Start", static_cast<double>(start)))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("End", static_cast<double>(end)))
+    TS_ASSERT_THROWS_NOTHING(alg.execute())
+    TS_ASSERT(alg.isExecuted())
+
+    Workspace2D_sptr outputWS = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(outputWS);
+    TS_ASSERT_EQUALS(outputWS->getNumberHistograms(), 1)
+    const auto hist = outputWS->histogram(0);
+    TS_ASSERT_EQUALS(hist.xMode(), inputXMode)
+    for (size_t i = 0; i < hist.x().size(); ++i) {
+      TS_ASSERT_EQUALS(hist.x()[i], i + start)
+    }
+    for (const auto y : hist.y()) {
+      TS_ASSERT_EQUALS(y, inputWS->y(0)[0])
+    }
+    for (const auto e : hist.e()) {
+      TS_ASSERT_EQUALS(e,
+                       std::sqrt(4 * inputWS->e(0)[0] * inputWS->e(0)[0]) / 4)
+    }
+    const auto vertAxis = outputWS->getAxis(1);
+    TS_ASSERT_EQUALS(vertAxis->getValue(0), 1.0)
+    TS_ASSERT_EQUALS(vertAxis->getValue(1), 5.0)
+  }
+
+  void test_vertical_profile() {
+    const size_t nHist = 13;
+    const size_t nBins = 23;
+    MatrixWorkspace_sptr inputWS = create2DWorkspace154(nHist, nBins);
+
+    const int start = 2;
+    const int end = nHist - 2;
+    LineProfile alg;
+    // Don't put output in ADS by default
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "_unused_for_child"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Direction", "Vertical"))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("Centre", static_cast<double>(nBins) / 2))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("HalfWidth", 3.0))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("Start", static_cast<double>(start)))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("End", static_cast<double>(end)))
+    TS_ASSERT_THROWS_NOTHING(alg.execute())
+    TS_ASSERT(alg.isExecuted())
+
+    Workspace2D_sptr outputWS = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(outputWS);
+    TS_ASSERT_EQUALS(outputWS->getNumberHistograms(), 1)
+    const auto hist = outputWS->histogram(0);
+    TS_ASSERT_EQUALS(hist.xMode(), Histogram::XMode::Points)
+    for (size_t i = 0; i < hist.x().size(); ++i) {
+      TS_ASSERT_EQUALS(hist.x()[i], i + start)
+    }
+    for (const auto y : hist.y()) {
+      TS_ASSERT_EQUALS(y, inputWS->y(0)[0])
+    }
+    for (const auto e : hist.e()) {
+      TS_ASSERT_EQUALS(e,
+                       std::sqrt(7 * inputWS->e(0)[0] * inputWS->e(0)[0]) / 7)
+    }
+    const auto vertAxis = outputWS->getAxis(1);
+    TS_ASSERT_EQUALS(vertAxis->getValue(0),
+                     static_cast<double>(nBins) / 2 - 3.5)
+    TS_ASSERT_EQUALS(vertAxis->getValue(1),
+                     static_cast<double>(nBins) / 2 + 3.5)
+  }
+
+  void test_vertical_profile_over_entire_workspace() {
+    const size_t nHist = 13;
+    const size_t nBins = 23;
+    MatrixWorkspace_sptr inputWS = create2DWorkspace154(nHist, nBins);
+
+    LineProfile alg;
+    // Don't put output in ADS by default
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "_unused_for_child"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Direction", "Vertical"))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("Centre", static_cast<double>(nBins) / 2))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("HalfWidth", 3.0))
+    TS_ASSERT_THROWS_NOTHING(alg.execute())
+    TS_ASSERT(alg.isExecuted())
+
+    Workspace2D_sptr outputWS = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(outputWS);
+    TS_ASSERT_EQUALS(outputWS->getNumberHistograms(), 1)
+    const auto hist = outputWS->histogram(0);
+    TS_ASSERT_EQUALS(hist.xMode(), Histogram::XMode::Points)
+    for (size_t i = 0; i < hist.x().size(); ++i) {
+      TS_ASSERT_EQUALS(hist.x()[i], i + 1)
+    }
+    for (const auto y : hist.y()) {
+      TS_ASSERT_EQUALS(y, inputWS->y(0)[0])
+    }
+    for (const auto e : hist.e()) {
+      TS_ASSERT_EQUALS(e,
+                       std::sqrt(7 * inputWS->e(0)[0] * inputWS->e(0)[0]) / 7)
+    }
+    const auto vertAxis = outputWS->getAxis(1);
+    TS_ASSERT_EQUALS(vertAxis->getValue(0),
+                     static_cast<double>(nBins) / 2 - 3.5)
+    TS_ASSERT_EQUALS(vertAxis->getValue(1),
+                     static_cast<double>(nBins) / 2 + 3.5)
+  }
+
+  void test_failure_when_profile_outside_workspace() {
+    const size_t nHist = 13;
+    const size_t nBins = 23;
+    MatrixWorkspace_sptr inputWS = create2DWorkspace154(nHist, nBins);
+
+    LineProfile alg;
+    // Don't put output in ADS by default
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "_unused_for_child"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Direction", "Horizontal"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Centre", -10.0))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("HalfWidth", 1.0))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Start", 2.0))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("End", 9.0))
+    TS_ASSERT_THROWS_ANYTHING(alg.execute())
+    TS_ASSERT(!alg.isExecuted())
+  }
+
+  void test_failure_with_non_positive_width() {
+    LineProfile alg;
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("HalfWidth", std::numeric_limits<double>::min()))
+    TS_ASSERT_THROWS_ANYTHING(alg.setProperty("HalfWidth", 0.0))
+    TS_ASSERT_THROWS_ANYTHING(alg.setProperty("HalfWidth", -1.0))
+  }
+
+  void test_failure_start_smaller_than_end() {
+    const size_t nHist = 13;
+    const size_t nBins = 23;
+    MatrixWorkspace_sptr inputWS = create2DWorkspace154(nHist, nBins);
+
+    LineProfile alg;
+    // Don't put output in ADS by default
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "_unused_for_child"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Direction", "Horizontal"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Centre", -10.0))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("HalfWidth", 1.0))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Start", 9.0))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("End", 2.0))
+    const auto issues = alg.validateInputs();
+    const auto it = issues.find("Start");
+    TS_ASSERT_DIFFERS(it, issues.end())
+  }
+
+  void test_ignore_special_values() {
+    const size_t nHist = 13;
+    const size_t nBins = 23;
+    MatrixWorkspace_sptr inputWS = create2DWorkspace154(nHist, nBins);
+    inputWS->mutableY(2)[6] = std::numeric_limits<double>::quiet_NaN();
+    inputWS->mutableY(3)[13] = std::numeric_limits<double>::infinity();
+    const auto inputXMode = inputWS->histogram(0).xMode();
+
+    LineProfile alg;
+    // Don't put output in ADS by default
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "_unused_for_child"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Direction", "Horizontal"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Centre", 3.5))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("HalfWidth", 0.5))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Start", 0.0))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("End", static_cast<double>(nBins)))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("IgnoreNans", true))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("IgnoreInfs", true))
+    TS_ASSERT_THROWS_NOTHING(alg.execute())
+    TS_ASSERT(alg.isExecuted())
+
+    Workspace2D_sptr outputWS = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(outputWS);
+    TS_ASSERT_EQUALS(outputWS->getNumberHistograms(), 1)
+    const auto hist = outputWS->histogram(0);
+    TS_ASSERT_EQUALS(hist.xMode(), inputXMode)
+    for (size_t i = 0; i < hist.x().size(); ++i) {
+      TS_ASSERT_EQUALS(hist.x()[i], i + 1)
+    }
+    for (const auto y : hist.y()) {
+      TS_ASSERT_EQUALS(y, inputWS->y(0)[0])
+    }
+    for (size_t i = 0; i < hist.e().size(); ++i) {
+      if (i == 6 || i == 13) {
+        TS_ASSERT_EQUALS(hist.e()[i], inputWS->e(0)[0])
+        continue;
+      }
+      TS_ASSERT_EQUALS(hist.e()[i],
+                       std::sqrt(2 * inputWS->e(0)[0] * inputWS->e(0)[0]) / 2)
+    }
+    const auto vertAxis = outputWS->getAxis(1);
+    TS_ASSERT_EQUALS(vertAxis->getValue(0), 3)
+    TS_ASSERT_EQUALS(vertAxis->getValue(1), 5)
+  }
+
+private:
+  Workspace2D_sptr profileOverTwoSpectra(MatrixWorkspace_sptr inputWS,
+                                         const int start, const int end,
+                                         const std::string &mode) {
+    LineProfile alg;
+    // Don't put output in ADS by default
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("InputWorkspace", inputWS))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", "_unused_for_child"))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty(
+        "Centre", static_cast<double>(inputWS->getNumberHistograms()) / 2))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("HalfWidth", 0.5))
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("Start", static_cast<double>(start)))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("End", static_cast<double>(end)))
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("Mode", mode))
+    TS_ASSERT_THROWS_NOTHING(alg.execute())
+    TS_ASSERT(alg.isExecuted())
+
+    Workspace2D_sptr outputWS = alg.getProperty("OutputWorkspace");
+    return outputWS;
+  }
+};
+
+#endif /* MANTID_ALGORITHMS_LINEPROFILETEST_H_ */
diff --git a/Framework/Algorithms/test/RectangularBeamProfileTest.h b/Framework/Algorithms/test/RectangularBeamProfileTest.h
index 973a622ca00d9708c449a1db986fc6b036147363..32b043458930dc56387350a7714a069d0ab64981 100644
--- a/Framework/Algorithms/test/RectangularBeamProfileTest.h
+++ b/Framework/Algorithms/test/RectangularBeamProfileTest.h
@@ -82,7 +82,22 @@ public:
     TS_ASSERT_EQUALS(V3D(1.0, 0, 0), ray.unitDir);
   }
 
-  void test_DefineActiveRegion() {
+  void test_DefineActiveRegion_beam_larger_than_sample() {
+    using Mantid::API::Sample;
+    using Mantid::Kernel::V3D;
+    const double width(3.3), height(6.9);
+    const V3D center;
+    RectangularBeamProfile profile(createTestFrame(), center, width, height);
+    Sample testSample;
+    testSample.setShape(*ComponentCreationHelper::createSphere(0.5));
+
+    auto region = profile.defineActiveRegion(testSample);
+    TS_ASSERT(region.isNonNull());
+    TS_ASSERT_EQUALS(V3D(-0.5, -0.5, -0.5), region.minPoint());
+    TS_ASSERT_EQUALS(V3D(0.5, 0.5, 0.5), region.maxPoint());
+  }
+
+  void test_DefineActiveRegion_beam_smaller_than_sample() {
     using Mantid::API::Sample;
     using Mantid::Kernel::V3D;
     const double width(0.1), height(0.2);
diff --git a/Framework/Algorithms/test/ReflectometryReductionOne2Test.h b/Framework/Algorithms/test/ReflectometryReductionOne2Test.h
index c1cb58710a78e1e39f6d2d211f5b132b72a725c2..e4264b1458229f2be965a0a3821933257d707609 100644
--- a/Framework/Algorithms/test/ReflectometryReductionOne2Test.h
+++ b/Framework/Algorithms/test/ReflectometryReductionOne2Test.h
@@ -19,7 +19,7 @@ using namespace WorkspaceCreationHelper;
 class ReflectometryReductionOne2Test : public CxxTest::TestSuite {
 private:
   MatrixWorkspace_sptr m_multiDetectorWS;
-  MatrixWorkspace_sptr m_wavelengthWS;
+  MatrixWorkspace_sptr m_transmissionWS;
 
 public:
   // This pair of boilerplate methods prevent the suite being created statically
@@ -35,11 +35,21 @@ public:
     FrameworkManager::Instance();
     // A multi detector ws
     m_multiDetectorWS =
-        create2DWorkspaceWithReflectometryInstrumentMultiDetector();
-    // A workspace in wavelength
-    m_wavelengthWS =
-        create2DWorkspaceWithReflectometryInstrumentMultiDetector();
-    m_wavelengthWS->getAxis(0)->setUnit("Wavelength");
+        create2DWorkspaceWithReflectometryInstrumentMultiDetector(0, 0.1);
+    // A transmission ws with different spectrum numbers to the run
+    m_transmissionWS =
+        create2DWorkspaceWithReflectometryInstrumentMultiDetector(0, 0.1);
+    m_transmissionWS->getSpectrum(0).setSpectrumNo(2);
+    m_transmissionWS->getSpectrum(1).setSpectrumNo(3);
+    m_transmissionWS->getSpectrum(2).setSpectrumNo(4);
+    m_transmissionWS->getSpectrum(3).setSpectrumNo(5);
+    // Set different values in each spectrum so that we can check the correct
+    // spectra were used for the transmission correction
+    using namespace Mantid::HistogramData;
+    m_transmissionWS->setCounts(0, Counts(m_transmissionWS->y(0).size(), 10));
+    m_transmissionWS->setCounts(1, Counts(m_transmissionWS->y(1).size(), 20));
+    m_transmissionWS->setCounts(2, Counts(m_transmissionWS->y(2).size(), 30));
+    m_transmissionWS->setCounts(3, Counts(m_transmissionWS->y(3).size(), 40));
   }
 
   void test_IvsLam() {
@@ -47,22 +57,10 @@ public:
     // No monitor normalization
     // No direct beam normalization
     // No transmission correction
-
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT(outLam);
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT(outLam->x(0)[0] >= 1.5);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     TS_ASSERT_DELTA(outLam->y(0)[0], 2.0000, 0.0001);
@@ -77,19 +75,9 @@ public:
     // Processing instructions : 1+2
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1+2");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithm(alg, 1.5, 15.0, "1+2");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT(outLam->x(0)[0] >= 1.5);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // Y counts, should be 2.0000 * 2
@@ -105,19 +93,9 @@ public:
     // Processing instructions : 1-3
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1-3");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithm(alg, 1.5, 15.0, "1-3");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT(outLam->x(0)[0] >= 1.5);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // Y counts, should be 2.0000 * 3
@@ -125,20 +103,72 @@ public:
     TS_ASSERT_DELTA(outLam->y(0)[7], 6.0000, 0.0001);
   }
 
+  void test_IvsLam_multiple_detector_groups() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2,1+3 (two separate groups)
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2,1+3");
+    // Run the algorithm. There should be 2 output histograms, one for each
+    // input group. Note that the group order is swapped from the input order
+    // because they are sorted by the first spectrum number in the group,
+    // i.e. as if the input was "1+3,2"
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 14, 2);
+
+    TS_ASSERT(outLam->x(0)[0] >= 1.5);
+    TS_ASSERT(outLam->x(0)[7] <= 15.0);
+    TS_ASSERT(outLam->x(1)[0] >= 1.5);
+    TS_ASSERT(outLam->x(1)[7] <= 15.0);
+    // Y counts, should be 2.0000 * 2 for first group, 2.0000 * 1 for second.
+    TS_ASSERT_DELTA(outLam->y(0)[0], 4.0000, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 4.0000, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(1)[0], 2.0000, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(1)[7], 2.0000, 0.0001);
+  }
+
   void test_bad_processing_instructions() {
     // Processing instructions : 5+6
 
-    auto alg = AlgorithmManager::Instance().create("ReflectometryReductionOne");
-    alg->setChild(true);
-    alg->initialize();
-    alg->setProperty("InputWorkspace", m_multiDetectorWS);
-    alg->setProperty("WavelengthMin", 1.5);
-    alg->setProperty("WavelengthMax", 15.0);
-    alg->setPropertyValue("OutputWorkspace", "IvsQ");
-    alg->setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg->setPropertyValue("ProcessingInstructions", "5+6");
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "5+6");
     // Must throw as spectrum 2 is not defined
-    TS_ASSERT_THROWS_ANYTHING(alg->execute());
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
+  }
+
+  void test_sum_in_lambda() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInLambda (same as default)
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInLambda");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
+
+    TS_ASSERT(outLam->x(0)[0] >= 1.5);
+    TS_ASSERT(outLam->x(0)[7] <= 15.0);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 2.0000, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 2.0000, 0.0001);
+  }
+
+  void test_sum_in_lambda_with_bad_reduction_type() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInLambda (same as default)
+    // ReductionType : DivergentBeam (invalid)
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInLambda");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
   }
 
   void test_IvsLam_direct_beam() {
@@ -146,41 +176,22 @@ public:
     // No monitor normalization
     // Direct beam normalization: 2-3
     // No transmission correction
-    // Processing instructions : 1
+    // Processing instructions : 2
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1");
+    setupAlgorithm(alg, 1.5, 15.0, "2");
     alg.setPropertyValue("RegionOfDirectBeam", "2-3");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
-    // Y counts, should be 0.5 = 1 (from detector ws) / 2 (from direct beam)
-    TS_ASSERT_DELTA(outLam->y(0)[0], 0.5, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.4991, 0.0001);
   }
 
   void test_bad_direct_beam() {
     // Direct beam : 4-5
-
-    auto alg = AlgorithmManager::Instance().create("ReflectometryReductionOne");
-    alg->setChild(true);
-    alg->initialize();
-    alg->setProperty("InputWorkspace", m_multiDetectorWS);
-    alg->setProperty("WavelengthMin", 1.5);
-    alg->setProperty("WavelengthMax", 15.0);
-    alg->setPropertyValue("ProcessingInstructions", "1");
-    alg->setPropertyValue("OutputWorkspace", "IvsQ");
-    alg->setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg->setPropertyValue("RegionOfDirectBeam", "4-5");
-    TS_ASSERT_THROWS_ANYTHING(alg->execute());
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setPropertyValue("RegionOfDirectBeam", "4-5");
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
   }
 
   void test_IvsLam_no_monitors() {
@@ -195,20 +206,10 @@ public:
     // MonitorBackgroundWavelengthMax : Not given
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
+    setupAlgorithm(alg, 1.5, 15.0, "1");
     alg.setProperty("I0MonitorIndex", "0");
-    alg.setPropertyValue("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT(outLam->x(0)[0] >= 1.5);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // No monitors considered because MonitorBackgroundWavelengthMin
@@ -223,7 +224,7 @@ public:
     // Monitor normalization
     // No direct beam normalization
     // No transmission correction
-    // Processing instructions : 1
+    // Processing instructions : 2
 
     // I0MonitorIndex: 0
     // MonitorBackgroundWavelengthMin : 0.5
@@ -233,27 +234,13 @@ public:
     // Modify counts in monitor (only for this test)
     // Modify counts only for range that will be fitted
     auto inputWS = m_multiDetectorWS;
-    auto &Y = inputWS->mutableY(0);
+    auto &Y = m_multiDetectorWS->mutableY(0);
     std::fill(Y.begin(), Y.begin() + 2, 1.0);
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", inputWS);
-    alg.setProperty("WavelengthMin", 0.0);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("I0MonitorIndex", "0");
-    alg.setProperty("MonitorBackgroundWavelengthMin", 0.5);
-    alg.setProperty("MonitorBackgroundWavelengthMax", 3.0);
-    alg.setProperty("NormalizeByIntegratedMonitors", "0");
-    alg.setPropertyValue("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithmMonitorCorrection(alg, 0.0, 15.0, "2", inputWS, false);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 10);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 10);
     TS_ASSERT(outLam->x(0)[0] >= 0.0);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // Expected values are 2.4996 = 3.15301 (detectors) / 1.26139 (monitors)
@@ -281,25 +268,9 @@ public:
     std::fill(Y.begin(), Y.begin() + 2, 1.0);
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", inputWS);
-    alg.setProperty("WavelengthMin", 0.0);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("I0MonitorIndex", "0");
-    alg.setProperty("MonitorBackgroundWavelengthMin", 0.5);
-    alg.setProperty("MonitorBackgroundWavelengthMax", 3.0);
-    alg.setProperty("NormalizeByIntegratedMonitors", "1");
-    alg.setProperty("MonitorIntegrationWavelengthMin", 1.5);
-    alg.setProperty("MonitorIntegrationWavelengthMax", 15.0);
-    alg.setPropertyValue("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithmMonitorCorrection(alg, 0.0, 15.0, "1", inputWS, true);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 16);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 16);
     TS_ASSERT(outLam->x(0)[0] >= 0.0);
     TS_ASSERT(outLam->x(0)[7] <= 15.0);
     // Expected values are 0.1981 = 2.0000 (detectors) / (1.26139*8) (monitors)
@@ -311,20 +282,10 @@ public:
     // Transmission run is the same as input run
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("FirstTransmissionRun", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "1", m_multiDetectorWS,
+                                         false);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     // Expected values are 1 = m_wavelength / m_wavelength
     TS_ASSERT_DELTA(outLam->y(0)[0], 1.0000, 0.0001);
     TS_ASSERT_DELTA(outLam->y(0)[7], 1.0000, 0.0001);
@@ -334,49 +295,67 @@ public:
     // Transmission run is the same as input run
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("FirstTransmissionRun", m_multiDetectorWS);
-    alg.setProperty("SecondTransmissionRun", m_multiDetectorWS);
-    alg.setProperty("StartOverlap", 2.5);
-    alg.setProperty("EndOverlap", 3.0);
-    alg.setProperty("Params", "0.1");
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "1", m_multiDetectorWS,
+                                         true);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     // Expected values are 1 = m_wavelength / m_wavelength
     TS_ASSERT_DELTA(outLam->y(0)[0], 1.0000, 0.0001);
     TS_ASSERT_DELTA(outLam->y(0)[7], 1.0000, 0.0001);
   }
 
+  void test_transmission_correction_with_mapped_spectra() {
+    // Run workspace spectrum numbers are 1,2,3,4.
+    // Transmission workspace has spectrum numbers 2,3,4,5.
+    // Processing instructions 2-3 in the run workspace map to
+    // spectra 3-4, which map to indices 1-2 in the transmission
+    // workspace.
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "2-3",
+                                         m_transmissionWS, true);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.0807, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 0.0802, 0.0001);
+  }
+
+  void test_transmission_correction_with_bad_mapped_spectra() {
+    // Run workspace spectrum numbers are 1,2,3,4.
+    // Transmission workspace has spectrum numbers 2,3,4,5.
+    // Processing instructions 0 in the run workspace maps to
+    // spectrum 1, which doesn't exist in the transmission
+    // workspace.
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "0", m_transmissionWS,
+                                         true);
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
+  }
+
+  void test_transmission_correction_with_different_spectra() {
+    // Run workspace spectrum numbers are 1,2,3,4.  Transmission workspace has
+    // spectrum numbers 2,3,4,5.  Processing instructions 2,3 are used in the
+    // run and transmission workspaces without any mapping i.e. spectra 3-4 in
+    // the run and spectra 4-5 in the transmission workspace are used.
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "2-3",
+                                         m_transmissionWS, true);
+    alg.setProperty("StrictSpectrumChecking", "0");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.0571, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 0.0571, 0.0001);
+  }
+
   void test_exponential_correction() {
     // CorrectionAlgorithm: ExponentialCorrection
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
+    setupAlgorithm(alg, 1.5, 15.0, "2");
     alg.setProperty("CorrectionAlgorithm", "ExponentialCorrection");
     alg.setProperty("C0", 0.2);
     alg.setProperty("C1", 0.1);
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT_DELTA(outLam->y(0)[0], 12.5113, 0.0001);
     TS_ASSERT_DELTA(outLam->y(0)[7], 23.4290, 0.0001);
   }
@@ -385,44 +364,311 @@ public:
     // CorrectionAlgorithm: PolynomialCorrection
 
     ReflectometryReductionOne2 alg;
-    alg.setChild(true);
-    alg.initialize();
-    alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
+    setupAlgorithm(alg, 1.5, 15.0, "2");
     alg.setProperty("CorrectionAlgorithm", "PolynomialCorrection");
     alg.setProperty("Polynomial", "0.1,0.3,0.5");
-    alg.setPropertyValue("OutputWorkspace", "IvsQ");
-    alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
-    alg.execute();
-    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg);
 
-    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outLam->blocksize(), 14);
     TS_ASSERT_DELTA(outLam->y(0)[0], 0.6093, 0.0001);
     TS_ASSERT_DELTA(outLam->y(0)[7], 0.0514, 0.0001);
   }
 
   void test_IvsQ() {
+    // Test IvsQ workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2");
+    MatrixWorkspace_sptr outQ = runAlgorithmQ(alg);
+
+    // X range in outQ
+    TS_ASSERT_DELTA(outQ->x(0)[0], 0.3353, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(0)[7], 0.5962, 0.0001);
+    // Y counts
+    TS_ASSERT_DELTA(outQ->y(0)[0], 2.0000, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(0)[7], 2.0000, 0.0001);
+  }
+
+  void test_IvsQ_multiple_detector_groups() {
+    // Test IvsQ workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2,1+3 (two separate groups)
 
     ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2,1+3");
+    // Run the algorithm. There should be 2 output histograms, one for each
+    // input group. Note that the group order is swapped from the input order
+    // because they are sorted by the first spectrum number in the group,
+    // i.e. as if the input was "1+3,2"
+    MatrixWorkspace_sptr outQ = runAlgorithmQ(alg, 14, 2);
+
+    // X range in outQ
+    TS_ASSERT_DELTA(outQ->x(0)[0], 0.3353, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(0)[7], 0.5961, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(1)[0], 0.3353, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(1)[7], 0.5962, 0.0001);
+    // Y counts, should be 2.0000 * 2 for first group, 2.0000 * 1 for second.
+    TS_ASSERT_DELTA(outQ->y(0)[0], 4.0000, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(0)[7], 4.0000, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(1)[0], 2.0000, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(1)[7], 2.0000, 0.0001);
+  }
+
+  void test_sum_in_q_with_bad_reduction_type() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInQ
+    // ReductionType : not set (invalid)
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInQ");
+    TS_ASSERT_THROWS_ANYTHING(alg.execute());
+  }
+
+  void test_sum_in_q_divergent_beam() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInQ
+    // ReductionType : DivergentBeam
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->x(0)[0], 1.5338, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[7], 6.5622, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[10], 8.7173, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[17], 13.7457, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 1.8323, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 1.7985, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[10], 2.0212, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[17], 1.9430, 0.0001);
+  }
+
+  void test_sum_in_q_non_flat_sample() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // SummationType : SumInQ
+    // ReductionType : NonFlatSample
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "1");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "NonFlatSample");
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->x(0)[0], 1.5339, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[7], 6.5110, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[10], 8.6440, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[17], 13.6211, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 1.8386, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 1.6622, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[10], 1.9205, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[17], 1.7303, 0.0001);
+  }
+
+  void test_sum_in_q_direct_beam() {
+    // Test IvsLam workspace
+    // No monitor normalization
+    // Direct beam normalization: 2-3
+    // No transmission correction
+    // Processing instructions : 2
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2");
+    alg.setPropertyValue("RegionOfDirectBeam", "2-3");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.2911, 0.0001);
+  }
+
+  void test_sum_in_q_monitor_normalization() {
+    // Test IvsLam workspace
+    // Monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2
+    // SummationType : SumInQ
+    // ReductionType : DivergentBeam
+
+    // I0MonitorIndex: 0
+    // MonitorBackgroundWavelengthMin : 0.5
+    // MonitorBackgroundWavelengthMax : 3.0
+    // Normalize by integrated monitors : No
+
+    // Modify counts in monitor (only for this test)
+    // Modify counts only for range that will be fitted
+    auto inputWS = m_multiDetectorWS;
+    auto &Y = m_multiDetectorWS->mutableY(0);
+    std::fill(Y.begin(), Y.begin() + 2, 1.0);
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmMonitorCorrection(alg, 0.0, 15.0, "2", inputWS, false);
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->x(0)[0], 0.1244, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[7], 5.6420, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[10], 8.0067, 0.0001);
+    TS_ASSERT_DELTA(outLam->x(0)[17], 13.5243, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[0], 7.6861, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 1.4879, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[10], 1.5523, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[17], 1.6371, 0.0001);
+  }
+
+  void test_sum_in_q_transmission_correction_run() {
+    // Transmission run is the same as input run
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithmTransmissionCorrection(alg, 1.5, 15.0, "1", m_multiDetectorWS,
+                                         false);
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 0.8015, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 0.5722, 0.0001);
+  }
+
+  void test_sum_in_q_exponential_correction() {
+    // CorrectionAlgorithm: ExponentialCorrection
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    alg.setProperty("CorrectionAlgorithm", "ExponentialCorrection");
+    alg.setProperty("C0", 0.2);
+    alg.setProperty("C1", 0.1);
+    MatrixWorkspace_sptr outLam = runAlgorithmLam(alg, 18);
+
+    TS_ASSERT_DELTA(outLam->y(0)[0], 11.3636, 0.0001);
+    TS_ASSERT_DELTA(outLam->y(0)[7], 17.7963, 0.0001);
+  }
+
+  void test_sum_in_q_IvsQ() {
+    // Test IvsQ workspace
+    // No monitor normalization
+    // No direct beam normalization
+    // No transmission correction
+    // Processing instructions : 2
+
+    ReflectometryReductionOne2 alg;
+    setupAlgorithm(alg, 1.5, 15.0, "2");
+    alg.setProperty("SummationType", "SumInQ");
+    alg.setProperty("ReductionType", "DivergentBeam");
+    alg.setProperty("ThetaIn", 25.0);
+    MatrixWorkspace_sptr outQ = runAlgorithmQ(alg, 18);
+
+    // X range in outQ
+    TS_ASSERT_DELTA(outQ->x(0)[0], 0.3327, 0.0001);
+    TS_ASSERT_DELTA(outQ->x(0)[7], 0.5100, 0.0001);
+    // Y counts
+    TS_ASSERT_DELTA(outQ->y(0)[0], 1.9348, 0.0001);
+    TS_ASSERT_DELTA(outQ->y(0)[7], 2.0204, 0.0001);
+  }
+
+private:
+  // Do standard algorithm setup
+  void setupAlgorithm(ReflectometryReductionOne2 &alg,
+                      const double wavelengthMin, const double wavelengthMax,
+                      const std::string &procInstr) {
     alg.setChild(true);
     alg.initialize();
     alg.setProperty("InputWorkspace", m_multiDetectorWS);
-    alg.setProperty("WavelengthMin", 1.5);
-    alg.setProperty("WavelengthMax", 15.0);
-    alg.setProperty("ProcessingInstructions", "1");
+    alg.setProperty("WavelengthMin", wavelengthMin);
+    alg.setProperty("WavelengthMax", wavelengthMax);
+    alg.setPropertyValue("ProcessingInstructions", procInstr);
     alg.setPropertyValue("OutputWorkspace", "IvsQ");
     alg.setPropertyValue("OutputWorkspaceWavelength", "IvsLam");
+  }
+
+  // Do standard algorithm setup for transmission correction
+  void setupAlgorithmTransmissionCorrection(ReflectometryReductionOne2 &alg,
+                                            const double wavelengthMin,
+                                            const double wavelengthMax,
+                                            const std::string &procInstr,
+                                            MatrixWorkspace_sptr transWS,
+                                            const bool multiple_runs) {
+    setupAlgorithm(alg, wavelengthMin, wavelengthMax, procInstr);
+    alg.setProperty("FirstTransmissionRun", transWS);
+    if (multiple_runs) {
+      alg.setProperty("SecondTransmissionRun", transWS);
+      alg.setProperty("StartOverlap", 2.5);
+      alg.setProperty("EndOverlap", 3.0);
+      alg.setProperty("Params", "0.1");
+    }
+  }
+
+  // Do standard algorithm setup for monitor correction
+  void setupAlgorithmMonitorCorrection(ReflectometryReductionOne2 &alg,
+                                       const double wavelengthMin,
+                                       const double wavelengthMax,
+                                       const std::string &procInstr,
+                                       MatrixWorkspace_sptr inputWS,
+                                       const bool integrate) {
+    setupAlgorithm(alg, wavelengthMin, wavelengthMax, procInstr);
+    alg.setProperty("InputWorkspace", inputWS);
+    alg.setProperty("I0MonitorIndex", "0");
+    alg.setProperty("MonitorBackgroundWavelengthMin", 0.5);
+    alg.setProperty("MonitorBackgroundWavelengthMax", 3.0);
+    if (integrate) {
+      alg.setProperty("NormalizeByIntegratedMonitors", "1");
+      alg.setProperty("MonitorIntegrationWavelengthMin", 1.5);
+      alg.setProperty("MonitorIntegrationWavelengthMax", 15.0);
+    } else {
+      alg.setProperty("NormalizeByIntegratedMonitors", "0");
+    }
+  }
+
+  // Do standard algorithm execution and checks and return IvsLam
+  MatrixWorkspace_sptr runAlgorithmLam(ReflectometryReductionOne2 &alg,
+                                       const size_t blocksize = 14,
+                                       const size_t nHist = 1) {
     alg.execute();
+
+    MatrixWorkspace_sptr outLam = alg.getProperty("OutputWorkspaceWavelength");
+    TS_ASSERT(outLam);
+    TS_ASSERT_EQUALS(outLam->getNumberHistograms(), nHist);
+    TS_ASSERT_EQUALS(outLam->blocksize(), blocksize);
+
+    return outLam;
+  }
+
+  // Do standard algorithm execution and checks and return IvsQ
+  MatrixWorkspace_sptr runAlgorithmQ(ReflectometryReductionOne2 &alg,
+                                     const size_t blocksize = 14,
+                                     const size_t nHist = 1) {
+    alg.execute();
+
     MatrixWorkspace_sptr outQ = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(outQ);
+    TS_ASSERT_EQUALS(outQ->getNumberHistograms(), nHist);
+    TS_ASSERT_EQUALS(outQ->blocksize(), blocksize);
 
-    TS_ASSERT_EQUALS(outQ->getNumberHistograms(), 1);
-    TS_ASSERT_EQUALS(outQ->blocksize(), 14);
-    // X range in outQ
-    TS_ASSERT_DELTA(outQ->x(0)[0], 0.3353, 0.0001);
-    TS_ASSERT_DELTA(outQ->x(0)[7], 0.5962, 0.0001);
+    return outQ;
   }
 };
 
diff --git a/Framework/Crystal/CMakeLists.txt b/Framework/Crystal/CMakeLists.txt
index 6b15b1a513e137ea7be9535421f29014551583a4..47d587b5026f0afea3c7690abb30d8b2c47b535f 100644
--- a/Framework/Crystal/CMakeLists.txt
+++ b/Framework/Crystal/CMakeLists.txt
@@ -12,6 +12,7 @@ set ( SRC_FILES
 	src/CombinePeaksWorkspaces.cpp
 	src/CompositeCluster.cpp
 	src/ConnectedComponentLabeling.cpp
+	src/CountReflections.cpp
 	src/DiffPeaksWorkspaces.cpp
 	src/DisjointElement.cpp
 	src/FilterPeaks.cpp
@@ -43,6 +44,7 @@ set ( SRC_FILES
 	src/PeakHKLErrors.cpp
 	src/PeakIntegration.cpp
 	src/PeakIntensityVsRadius.cpp
+	src/PeakStatisticsTools.cpp
 	src/PeaksInRegion.cpp
 	src/PeaksIntersection.cpp
 	src/PeaksOnSurface.cpp
@@ -84,6 +86,7 @@ set ( INC_FILES
 	inc/MantidCrystal/CombinePeaksWorkspaces.h
 	inc/MantidCrystal/CompositeCluster.h
 	inc/MantidCrystal/ConnectedComponentLabeling.h
+	inc/MantidCrystal/CountReflections.h
 	inc/MantidCrystal/DiffPeaksWorkspaces.h
 	inc/MantidCrystal/DisjointElement.h
 	inc/MantidCrystal/FilterPeaks.h
@@ -117,6 +120,7 @@ set ( INC_FILES
 	inc/MantidCrystal/PeakHKLErrors.h
 	inc/MantidCrystal/PeakIntegration.h
 	inc/MantidCrystal/PeakIntensityVsRadius.h
+	inc/MantidCrystal/PeakStatisticsTools.h
 	inc/MantidCrystal/PeaksInRegion.h
 	inc/MantidCrystal/PeaksIntersection.h
 	inc/MantidCrystal/PeaksOnSurface.h
@@ -156,7 +160,7 @@ set ( TEST_FILES
 	CombinePeaksWorkspacesTest.h
 	CompositeClusterTest.h
 	ConnectedComponentLabelingTest.h
-	DiffPeaksWorkspacesTest.h
+        DiffPeaksWorkspacesTest.h
 	DisjointElementTest.h
 	FilterPeaksTest.h
 	FindClusterFacesTest.h
@@ -186,6 +190,7 @@ set ( TEST_FILES
 	PeakHKLErrorsTest.h
 	PeakIntegrationTest.h
 	PeakIntensityVsRadiusTest.h
+	PeakStatisticsToolsTest.h
 	PeaksInRegionTest.h
 	PeaksOnSurfaceTest.h
 	PredictFractionalPeaksTest.h
diff --git a/Framework/Crystal/inc/MantidCrystal/CountReflections.h b/Framework/Crystal/inc/MantidCrystal/CountReflections.h
new file mode 100644
index 0000000000000000000000000000000000000000..bf1a72a7f0a77e00faf7172980ead02471873238
--- /dev/null
+++ b/Framework/Crystal/inc/MantidCrystal/CountReflections.h
@@ -0,0 +1,60 @@
+#ifndef MANTID_CRYSTAL_COUNTREFLECTIONS_H_
+#define MANTID_CRYSTAL_COUNTREFLECTIONS_H_
+
+#include "MantidAPI/Algorithm.h"
+#include "MantidCrystal/PeakStatisticsTools.h"
+#include "MantidDataObjects/PeaksWorkspace.h"
+#include "MantidGeometry/Crystal/PointGroup.h"
+#include "MantidKernel/V3D.h"
+
+namespace Mantid {
+namespace Crystal {
+
+/** CountReflections
+
+  This algorithm takes a PeaksWorkspace and calculates statistics that are
+  based on point group symmetry and do not depend on intensities. For those
+  statistics look at SortHKL.
+
+  Copyright &copy; 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+class DLLExport CountReflections : public API::Algorithm {
+public:
+  const std::string name() const override;
+  int version() const override;
+  const std::string category() const override;
+  const std::string summary() const override;
+
+private:
+  void init() override;
+  void exec() override;
+
+  API::IPeaksWorkspace_sptr getPeaksWorkspace(
+      const DataObjects::PeaksWorkspace_sptr &templateWorkspace,
+      const PeakStatisticsTools::UniqueReflectionCollection &reflections,
+      const Geometry::PointGroup_sptr &pointGroup) const;
+};
+
+} // namespace Crystal
+} // namespace Mantid
+
+#endif /* MANTID_CRYSTAL_COUNTREFLECTIONS_H_ */
diff --git a/Framework/Crystal/inc/MantidCrystal/FindSXPeaks.h b/Framework/Crystal/inc/MantidCrystal/FindSXPeaks.h
index fd1907f08b2330c90b9ef81c5194bf9e49834aaa..3a874af400b8fb7b55489f0e9b82fbff5acf1a14 100644
--- a/Framework/Crystal/inc/MantidCrystal/FindSXPeaks.h
+++ b/Framework/Crystal/inc/MantidCrystal/FindSXPeaks.h
@@ -9,6 +9,9 @@
 #include "MantidAPI/SpectrumInfo.h"
 #include "MantidDataObjects/PeaksWorkspace.h"
 
+#include <unordered_map>
+#include <vector>
+
 namespace Mantid {
 namespace Crystal {
 
@@ -233,6 +236,10 @@ private:
   void init() override;
   //
   void exec() override;
+  // Calculates/returns the average phi value of the detector(s)
+  double calculatePhi(
+      const std::unordered_map<size_t, std::vector<detid_t>> &detectorMapping,
+      const API::SpectrumInfo &spectrumInfo, size_t wsIndex);
   //
   void reducePeakList(const peakvector &);
   /// The value in X to start the search from
@@ -240,9 +247,9 @@ private:
   /// The value in X to finish the search at
   double m_MaxRange;
   /// The spectrum to start the integration from
-  size_t m_MinSpec;
+  size_t m_MinWsIndex;
   /// The spectrum to finish the integration at
-  size_t m_MaxSpec;
+  size_t m_MaxWsIndex;
   // The peaks workspace that contains the peaks information.
   Mantid::DataObjects::PeaksWorkspace_sptr m_peaks;
 };
diff --git a/Framework/Crystal/inc/MantidCrystal/PeakStatisticsTools.h b/Framework/Crystal/inc/MantidCrystal/PeakStatisticsTools.h
new file mode 100644
index 0000000000000000000000000000000000000000..29e46581d4ca976a058354f0bdb0f5446da117a1
--- /dev/null
+++ b/Framework/Crystal/inc/MantidCrystal/PeakStatisticsTools.h
@@ -0,0 +1,169 @@
+#ifndef MANTID_CRYSTAL_PEAKSTATISTICSTOOLS_H_
+#define MANTID_CRYSTAL_PEAKSTATISTICSTOOLS_H_
+
+#include "MantidDataObjects/Peak.h"
+
+#include "MantidGeometry/Crystal/PointGroup.h"
+#include "MantidGeometry/Crystal/ReflectionCondition.h"
+#include "MantidGeometry/Crystal/UnitCell.h"
+
+#include "MantidKernel/V3D.h"
+
+namespace Mantid {
+namespace Crystal {
+namespace PeakStatisticsTools {
+/**
+ * \class UniqueReflection
+ *
+ * This class is a small helper for SortHKL to hold Peak-objects that
+ * belong to the same family of reflections.
+ *
+ * It has methods to return the intensities and sigmas of the contained
+ * Peak-objects as vectors. Furthermore there is a function that removes
+ * outliers based on the intensities/sigmas.
+ *
+ *
+ */
+class DLLExport UniqueReflection {
+public:
+  UniqueReflection(const Kernel::V3D &hkl) : m_hkl(hkl), m_peaks() {}
+
+  const Kernel::V3D &getHKL() const { return m_hkl; }
+
+  void addPeak(const DataObjects::Peak &peak) { m_peaks.push_back(peak); }
+  const std::vector<DataObjects::Peak> &getPeaks() const { return m_peaks; }
+  size_t count() const { return m_peaks.size(); }
+
+  std::vector<double> getIntensities() const;
+  std::vector<double> getSigmas() const;
+
+  UniqueReflection removeOutliers(double sigmaCritical = 3.0) const;
+  void setPeaksIntensityAndSigma(double intensity, double sigma);
+
+private:
+  Kernel::V3D m_hkl;
+  std::vector<DataObjects::Peak> m_peaks;
+};
+
+/**
+ * \class UniqueReflectionCollection
+ *
+ * This class computes all possible unique reflections within the
+ * specified d-limits, given a certain unit cell, lattice centering
+ * and point group. The cost of this computation depends directly
+ * on the size of the unit cell (larger cells result in more
+ * reflections) and to some extent also on the symmetry (higher symmetry
+ * results in more matrix operations).
+ *
+ * After adding observations using addObservations, various reflection-
+ * counts can be obtained, for example to calculate redundancy or
+ * completeness of the observations.
+ *
+ */
+class DLLExport UniqueReflectionCollection {
+public:
+  UniqueReflectionCollection(
+      const Geometry::UnitCell &cell, const std::pair<double, double> &dLimits,
+      const Geometry::PointGroup_sptr &pointGroup,
+      const Geometry::ReflectionCondition_sptr &centering);
+
+  ~UniqueReflectionCollection() = default;
+
+  void addObservations(const std::vector<DataObjects::Peak> &peaks);
+  UniqueReflection getReflection(const Kernel::V3D &hkl) const;
+
+  size_t getUniqueReflectionCount() const;
+  size_t getObservedUniqueReflectionCount(size_t moreThan = 0) const;
+  std::vector<Kernel::V3D> getUnobservedUniqueReflections() const;
+
+  size_t getObservedReflectionCount() const;
+
+  const std::map<Kernel::V3D, UniqueReflection> &getReflections() const;
+
+protected:
+  /// Alternative constructor for testing purposes, no validation is performed.
+  UniqueReflectionCollection(
+      const std::map<Kernel::V3D, UniqueReflection> &reflections,
+      const Geometry::PointGroup_sptr &pointGroup)
+      : m_reflections(reflections), m_pointgroup(pointGroup) {}
+
+private:
+  std::map<Kernel::V3D, UniqueReflection> m_reflections;
+  Geometry::PointGroup_sptr m_pointgroup;
+};
+
+/**
+ * \class PeaksStatistics
+ *
+ * The PeaksStatistics class is a small helper class that is used
+ * in SortHKL. It takes a UniqueReflectionCollection and calculates
+ * a few data set quality indicators such as Rmerge and Rpim.
+ *
+ * Do not rely on this class to exist forever, parts of it may change
+ * or the entire class may disappear over time.
+ */
+class DLLExport PeaksStatistics {
+public:
+  explicit PeaksStatistics(const UniqueReflectionCollection &reflections)
+      : m_measuredReflections(0), m_uniqueReflections(0), m_completeness(0.0),
+        m_redundancy(0.0), m_rMerge(0.0), m_rPim(0.0), m_meanIOverSigma(0.0),
+        m_dspacingMin(0.0), m_dspacingMax(0.0), m_chiSquared(0.0), m_peaks() {
+    m_peaks.reserve(reflections.getObservedReflectionCount());
+    calculatePeaksStatistics(reflections.getReflections());
+  }
+
+  /// Total number of observed reflections - no symmetry is taken into
+  /// account for this.
+  int m_measuredReflections;
+
+  /// Number of unique reflections. This counts each reflection family once,
+  /// according to the point group.
+  int m_uniqueReflections;
+
+  /// Fraction of observed unique reflections in the resolution range defined
+  /// by d_min and d_max.
+  double m_completeness;
+
+  /// Average number of observations for a unique reflection.
+  double m_redundancy;
+
+  /// Merging R-factor, R_merge, sometimes also called R_sym. This is a basic
+  /// measure for how well the intensities of symmetry equivalent reflections
+  /// agree with each other.
+  double m_rMerge;
+
+  /// Precision indicating R-factor (R_{p.i.m}). Also a measurement of agreement
+  /// between equivalent reflections, but without some of the weeknesses of
+  /// R_merge.
+  double m_rPim;
+
+  /// Average signal to noise ratio in the reflections.
+  double m_meanIOverSigma;
+
+  /// Lower d-spacing limit in the data set, sometimes referred to as upper
+  /// resolution limit.
+  double m_dspacingMin;
+
+  /// Upper d-spacing limit in the data set.
+  double m_dspacingMax;
+
+  double m_chiSquared;
+  std::vector<DataObjects::Peak> m_peaks;
+
+private:
+  void calculatePeaksStatistics(
+      const std::map<Kernel::V3D, UniqueReflection> &uniqueReflections);
+
+  double getIOverSigmaSum(const std::vector<double> &sigmas,
+                          const std::vector<double> &intensities) const;
+  double getRMS(const std::vector<double> &data) const;
+
+  std::pair<double, double>
+  getDSpacingLimits(const std::vector<DataObjects::Peak> &peaks) const;
+};
+
+} // namespace PeakStatisticsTools
+} // namespace Crystal
+} // namespace Mantid
+
+#endif /* MANTID_CRYSTAL_PEAKSTATISTICSTOOLS_H_ */
diff --git a/Framework/Crystal/inc/MantidCrystal/PredictPeaks.h b/Framework/Crystal/inc/MantidCrystal/PredictPeaks.h
index 8ad8a6f555f5e52c2a5b579246a1a4dab057544f..922a6ad9ab78a4b6f2b7365920979482134d4df2 100644
--- a/Framework/Crystal/inc/MantidCrystal/PredictPeaks.h
+++ b/Framework/Crystal/inc/MantidCrystal/PredictPeaks.h
@@ -2,13 +2,17 @@
 #define MANTID_CRYSTAL_PREDICTPEAKS_H_
 
 #include "MantidAPI/Algorithm.h"
+#include "MantidAPI/DetectorSearcher.h"
 #include "MantidDataObjects/PeaksWorkspace.h"
 #include "MantidGeometry/Crystal/ReflectionCondition.h"
 #include "MantidKernel/System.h"
+#include "MantidKernel/NearestNeighbours.h"
 #include <MantidGeometry/Crystal/OrientedLattice.h>
 #include <MantidGeometry/Crystal/StructureFactorCalculator.h>
 #include "MantidKernel/Matrix.h"
 
+#include <tuple>
+
 namespace Mantid {
 namespace Crystal {
 
@@ -63,6 +67,11 @@ private:
                                 const Kernel::DblMatrix &goniometerMatrix);
 
 private:
+  /// Get the predicted detector direction from Q
+  std::tuple<Kernel::V3D, double>
+  getPeakParametersFromQ(const Kernel::V3D &q) const;
+  /// Cache the reference frame and beam direction from the instrument
+  void setReferenceFrameAndBeamDirection();
   void logNumberOfPeaksFound(size_t allowedPeakCount) const;
 
   /// Number of edge pixels with no peaks
@@ -70,11 +79,16 @@ private:
 
   /// Reflection conditions possible
   std::vector<Mantid::Geometry::ReflectionCondition_sptr> m_refConds;
-
+  /// Detector search cache for fast look-up of detectors
+  std::unique_ptr<API::DetectorSearcher> m_detectorCacheSearch;
   /// Run number of input workspace
   int m_runNumber;
   /// Instrument reference
   Geometry::Instrument_const_sptr m_inst;
+  /// Reference frame for the instrument
+  boost::shared_ptr<const Geometry::ReferenceFrame> m_refFrame;
+  /// Direction of the beam for this instrument
+  Kernel::V3D m_refBeamDir;
   /// Output peaks workspace
   Mantid::DataObjects::PeaksWorkspace_sptr m_pw;
   Geometry::StructureFactorCalculator_sptr m_sfCalculator;
diff --git a/Framework/Crystal/inc/MantidCrystal/SortHKL.h b/Framework/Crystal/inc/MantidCrystal/SortHKL.h
index 4f39388cd25228d174b690ea04597dfb1feb954f..f2a72e51f2a684dfc686d3f140d3be196c3d979d 100644
--- a/Framework/Crystal/inc/MantidCrystal/SortHKL.h
+++ b/Framework/Crystal/inc/MantidCrystal/SortHKL.h
@@ -7,6 +7,8 @@
 #include "MantidAPI/IPeaksWorkspace_fwd.h"
 #include "MantidAPI/ITableWorkspace_fwd.h"
 
+#include "MantidCrystal/PeakStatisticsTools.h"
+
 #include "MantidDataObjects/Peak.h"
 #include "MantidDataObjects/PeaksWorkspace.h"
 
@@ -19,88 +21,6 @@
 namespace Mantid {
 namespace Crystal {
 
-/**
- * \class UniqueReflection
- *
- * This class is a small helper for SortHKL to hold Peak-objects that
- * belong to the same family of reflections.
- *
- * It has methods to return the intensities and sigmas of the contained
- * Peak-objects as vectors. Furthermore there is a function that removes
- * outliers based on the intensities/sigmas.
- *
- *
- */
-class DLLExport UniqueReflection {
-public:
-  UniqueReflection(const Kernel::V3D &hkl) : m_hkl(hkl), m_peaks() {}
-
-  const Kernel::V3D &getHKL() const { return m_hkl; }
-
-  void addPeak(const DataObjects::Peak &peak) { m_peaks.push_back(peak); }
-  const std::vector<DataObjects::Peak> &getPeaks() const { return m_peaks; }
-  size_t count() const { return m_peaks.size(); }
-
-  std::vector<double> getIntensities() const;
-  std::vector<double> getSigmas() const;
-
-  void removeOutliers(double sigmaCritical = 3.0);
-  void setPeaksIntensityAndSigma(double intensity, double sigma);
-
-private:
-  Kernel::V3D m_hkl;
-  std::vector<DataObjects::Peak> m_peaks;
-};
-
-/**
- * \class PeaksStatistics
- *
- * The PeaksStatistics class is a small helper class for SortHKL.
- *
- * During construction, a number of statistical indicators is calculated,
- * using the map passed to the constructor.
- *
- * Please note that the map is modified during the calculation and becomes
- * essentially unusable after that, but that is not a problem since the map
- * is currently not meant to be stored anywhere. This class may eventually
- * disappear and might end up being re-implemented in a more general scope.
- */
-class DLLExport PeaksStatistics {
-public:
-  PeaksStatistics(std::map<Kernel::V3D, UniqueReflection> &uniqueReflections,
-                  size_t totalReflectionCount)
-      : m_measuredReflections(0), m_uniqueReflections(0), m_completeness(0.0),
-        m_redundancy(0.0), m_rMerge(0.0), m_rPim(0.0), m_meanIOverSigma(0.0),
-        m_dspacingMin(0.0), m_dspacingMax(0.0), m_chiSquared(0.0), m_peaks() {
-    m_peaks.reserve(totalReflectionCount);
-    calculatePeaksStatistics(uniqueReflections);
-  }
-
-  int m_measuredReflections;
-  int m_uniqueReflections;
-  double m_completeness;
-  double m_redundancy;
-  double m_rMerge;
-  double m_rPim;
-  double m_meanIOverSigma;
-  double m_dspacingMin;
-  double m_dspacingMax;
-  double m_chiSquared;
-
-  std::vector<DataObjects::Peak> m_peaks;
-
-private:
-  void calculatePeaksStatistics(
-      std::map<Kernel::V3D, UniqueReflection> &uniqueReflections);
-
-  double getIOverSigmaSum(const std::vector<double> &sigmas,
-                          const std::vector<double> &intensities) const;
-  double getRMS(const std::vector<double> &data) const;
-
-  std::pair<double, double>
-  getDSpacingLimits(const std::vector<DataObjects::Peak> &peaks) const;
-};
-
 /** Save a PeaksWorkspace to a Gsas-style ASCII .hkl file.
  *
  * @author Vickie Lynch, SNS
@@ -133,7 +53,7 @@ private:
   std::vector<DataObjects::Peak>
   getNonZeroPeaks(const std::vector<DataObjects::Peak> &inputPeaks) const;
 
-  std::map<Kernel::V3D, UniqueReflection>
+  PeakStatisticsTools::UniqueReflectionCollection
   getUniqueReflections(const std::vector<DataObjects::Peak> &peaks,
                        const Geometry::UnitCell &cell) const;
 
@@ -144,14 +64,10 @@ private:
   getDLimits(const std::vector<DataObjects::Peak> &peaks,
              const Geometry::UnitCell &cell) const;
 
-  std::map<Kernel::V3D, UniqueReflection> getPossibleUniqueReflections(
-      const Geometry::UnitCell &cell, const std::pair<double, double> &dLimits,
-      const Geometry::PointGroup_sptr &pointGroup,
-      const Geometry::ReflectionCondition_sptr &centering) const;
-
   API::ITableWorkspace_sptr getStatisticsTable(const std::string &name) const;
-  void insertStatisticsIntoTable(const API::ITableWorkspace_sptr &table,
-                                 const PeaksStatistics &statistics) const;
+  void insertStatisticsIntoTable(
+      const API::ITableWorkspace_sptr &table,
+      const PeakStatisticsTools::PeaksStatistics &statistics) const;
 
   DataObjects::PeaksWorkspace_sptr getOutputPeaksWorkspace(
       const DataObjects::PeaksWorkspace_sptr &inputPeaksWorkspace) const;
diff --git a/Framework/Crystal/src/CountReflections.cpp b/Framework/Crystal/src/CountReflections.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a65b6e4369e3e8392d5c5fbcba389d1f345961ac
--- /dev/null
+++ b/Framework/Crystal/src/CountReflections.cpp
@@ -0,0 +1,217 @@
+#include "MantidCrystal/CountReflections.h"
+#include "MantidCrystal/PeakStatisticsTools.h"
+
+#include "MantidAPI/Sample.h"
+#include "MantidAPI/WorkspaceProperty.h"
+
+#include "MantidDataObjects/PeaksWorkspace.h"
+
+#include "MantidGeometry/Crystal/PointGroupFactory.h"
+#include "MantidGeometry/Crystal/ReflectionCondition.h"
+#include "MantidGeometry/Crystal/OrientedLattice.h"
+
+#include "MantidKernel/make_unique.h"
+#include "MantidKernel/ListValidator.h"
+
+namespace Mantid {
+namespace Crystal {
+
+using Mantid::Kernel::Direction;
+
+using namespace Mantid::API;
+using namespace Mantid::DataObjects;
+using namespace Mantid::Geometry;
+using namespace Mantid::Kernel;
+
+// Register the algorithm into the AlgorithmFactory
+DECLARE_ALGORITHM(CountReflections)
+
+//----------------------------------------------------------------------------------------------
+
+/// Algorithms name for identification. @see Algorithm::name
+const std::string CountReflections::name() const { return "CountReflections"; }
+
+/// Algorithm's version for identification. @see Algorithm::version
+int CountReflections::version() const { return 1; }
+
+/// Algorithm's category for identification. @see Algorithm::category
+const std::string CountReflections::category() const {
+  return "Crystal\\Peaks";
+}
+
+/// Algorithm's summary for use in the GUI and help. @see Algorithm::summary
+const std::string CountReflections::summary() const {
+  return "Calculates statistics for a PeaksWorkspace based on symmetry and "
+         "counting reflections.";
+}
+
+//----------------------------------------------------------------------------------------------
+/** Initialize the algorithm's properties.
+ */
+void CountReflections::init() {
+  declareProperty(Kernel::make_unique<WorkspaceProperty<PeaksWorkspace>>(
+                      "InputWorkspace", "", Direction::Input),
+                  "A workspace with peaks to calculate statistics for. Sample "
+                  "with valid UB-matrix is required.");
+
+  auto centeringSymbols = getAllReflectionConditionSymbols();
+  declareProperty("LatticeCentering", centeringSymbols[0],
+                  boost::make_shared<StringListValidator>(centeringSymbols),
+                  "Lattice centering of the cell.");
+
+  auto pointGroups = PointGroupFactory::Instance().getAllPointGroupSymbols();
+  declareProperty(
+      "PointGroup", "1", boost::make_shared<StringListValidator>(pointGroups),
+      "Point group symmetry for completeness and redundancy calculations.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<double>>(
+                      "MinDSpacing", 1.0, Direction::Input),
+                  "Minimum d-spacing for completeness calculation.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<double>>(
+                      "MaxDSpacing", 100.0, Direction::Input),
+                  "Maximum d-spacing for completeness calculation.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<int>>(
+                      "UniqueReflections", 0, Direction::Output),
+                  "Number of unique reflections in data set.");
+
+  declareProperty(
+      Kernel::make_unique<PropertyWithValue<double>>("Completeness", 0.0,
+                                                     Direction::Output),
+      "Completeness of the data set as a fraction between 0 and 1.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<double>>(
+                      "Redundancy", 0.0, Direction::Output),
+                  "Average redundancy in data set, depending on point group.");
+
+  declareProperty(Kernel::make_unique<PropertyWithValue<double>>(
+                      "MultiplyObserved", 0.0, Direction::Output),
+                  "Fraction of reflections with more than one observation.");
+
+  declareProperty(
+      Kernel::make_unique<WorkspaceProperty<IPeaksWorkspace>>(
+          "MissingReflectionsWorkspace", "", Direction::Output,
+          PropertyMode::Optional),
+      "Reflections in specified d-range that are missing in input workspace.");
+}
+
+//----------------------------------------------------------------------------------------------
+/** Execute the algorithm.
+ */
+void CountReflections::exec() {
+  double dMin = getProperty("MinDSpacing");
+  double dMax = getProperty("MaxDSpacing");
+
+  PointGroup_sptr pointGroup =
+      PointGroupFactory::Instance().createPointGroup(getProperty("PointGroup"));
+
+  ReflectionCondition_sptr centering =
+      getReflectionConditionBySymbol(getProperty("LatticeCentering"));
+
+  PeaksWorkspace_sptr inputPeaksWorkspace = getProperty("InputWorkspace");
+
+  UnitCell cell = inputPeaksWorkspace->sample().getOrientedLattice();
+
+  PeakStatisticsTools::UniqueReflectionCollection reflections(
+      cell, std::make_pair(dMin, dMax), pointGroup, centering);
+
+  auto peaks = inputPeaksWorkspace->getPeaks();
+  reflections.addObservations(peaks);
+
+  double possibleUniqueReflections =
+      static_cast<double>(reflections.getUniqueReflectionCount());
+
+  size_t observedUniqueReflections =
+      reflections.getObservedUniqueReflectionCount();
+
+  double observedUniqueReflectionsD =
+      static_cast<double>(observedUniqueReflections);
+
+  size_t totalReflections = reflections.getObservedReflectionCount();
+
+  if (peaks.size() > totalReflections) {
+    g_log.information() << "There are " << (peaks.size() - totalReflections)
+                        << " peaks in the input workspace that fall outside "
+                           "the resolution limit and are not considered for "
+                           "the calculations." << std::endl;
+  }
+
+  double multiplyObservedReflections =
+      static_cast<double>(reflections.getObservedUniqueReflectionCount(1));
+
+  setProperty("UniqueReflections", static_cast<int>(observedUniqueReflections));
+  setProperty("Completeness",
+              observedUniqueReflectionsD / possibleUniqueReflections);
+  setProperty("Redundancy", static_cast<double>(totalReflections) /
+                                observedUniqueReflectionsD);
+  setProperty("MultiplyObserved",
+              multiplyObservedReflections / observedUniqueReflectionsD);
+
+  IPeaksWorkspace_sptr outputWorkspace =
+      getPeaksWorkspace(inputPeaksWorkspace, reflections, pointGroup);
+
+  if (outputWorkspace) {
+    setProperty("MissingReflectionsWorkspace", outputWorkspace);
+  }
+}
+
+/**
+ * @brief CountReflections::getPeaksWorkspace
+ *
+ * This method expands the missing unique reflections to all reflections,
+ * so that for example (001) would yield (001) and (00-1) for point group -1.
+ *
+ * Then these reflections are translated into peaks and put into the output-
+ * workspace. This method could at some point probably move closer to (or into)
+ * UniqueReflectionCollection.
+ *
+ * @param templateWorkspace :: Input workspace to clone if necessary.
+ * @param reflections :: Vector of unique reflections.
+ * @param pointGroup :: Point group to expand unique reflections.
+ * @return :: PeaksWorkspace with missing reflections.
+ */
+IPeaksWorkspace_sptr CountReflections::getPeaksWorkspace(
+    const PeaksWorkspace_sptr &templateWorkspace,
+    const PeakStatisticsTools::UniqueReflectionCollection &reflections,
+    const PointGroup_sptr &pointGroup) const {
+  std::string outputWorkspaceName =
+      getPropertyValue("MissingReflectionsWorkspace");
+
+  if (outputWorkspaceName.empty()) {
+    return IPeaksWorkspace_sptr();
+  }
+
+  IPeaksWorkspace_sptr rawOutputPeaksWorkspace =
+      getProperty("MissingReflectionsWorkspace");
+
+  PeaksWorkspace_sptr outputPeaksWorkspace =
+      boost::dynamic_pointer_cast<PeaksWorkspace>(rawOutputPeaksWorkspace);
+
+  if (outputPeaksWorkspace != templateWorkspace) {
+    outputPeaksWorkspace = templateWorkspace->clone();
+  }
+
+  const auto &missingPeaks = reflections.getUnobservedUniqueReflections();
+
+  std::vector<Peak> peaks;
+  peaks.reserve(missingPeaks.size() * pointGroup->order());
+
+  for (const auto &reflection : missingPeaks) {
+    auto hkls = pointGroup->getEquivalents(reflection);
+
+    for (const auto &hkl : hkls) {
+      Peak peak;
+      peak.setHKL(hkl);
+
+      peaks.emplace_back(peak);
+    }
+  }
+
+  outputPeaksWorkspace->getPeaks().swap(peaks);
+
+  return boost::static_pointer_cast<IPeaksWorkspace>(outputPeaksWorkspace);
+}
+
+} // namespace Crystal
+} // namespace Mantid
diff --git a/Framework/Crystal/src/FindSXPeaks.cpp b/Framework/Crystal/src/FindSXPeaks.cpp
index 4178d8db8c8fa9fe78bf8c4eae9adcc66acd7abc..3b3e6573f716485537d342bbcfff2a04ea89547a 100644
--- a/Framework/Crystal/src/FindSXPeaks.cpp
+++ b/Framework/Crystal/src/FindSXPeaks.cpp
@@ -3,11 +3,51 @@
 //----------------------------------------------------------------------
 #include "MantidCrystal/FindSXPeaks.h"
 #include "MantidAPI/HistogramValidator.h"
-#include "MantidKernel/VectorHelper.h"
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/WorkspaceUnitValidator.h"
+#include "MantidGeometry/Instrument/DetectorGroup.h"
+#include "MantidIndexing/IndexInfo.h"
 #include "MantidKernel/BoundedValidator.h"
+#include "MantidKernel/CompositeValidator.h"
+
+#include <unordered_map>
+#include <vector>
 
 using namespace Mantid::DataObjects;
 
+namespace {
+// Anonymous namespace
+using namespace Mantid;
+using WsIndexToDetIds = std::unordered_map<size_t, std::vector<detid_t>>;
+
+WsIndexToDetIds mapDetectorsToWsIndexes(const API::DetectorInfo &detectorInfo,
+                                        const detid2index_map &mapping) {
+  const auto &detectorIds = detectorInfo.detectorIDs();
+  WsIndexToDetIds indexToDetMapping;
+
+  indexToDetMapping.reserve(detectorIds.size());
+  for (const auto detectorID : detectorIds) {
+    auto detMapEntry = mapping.find(detectorID);
+    if (detMapEntry == mapping.end()) {
+      throw std::runtime_error(
+          "Detector ID " + std::to_string(detectorID) +
+          " was not found in the workspace index mapping.");
+    }
+
+    const size_t wsIndex = detMapEntry->second;
+    auto indexMapEntry = indexToDetMapping.find(wsIndex);
+    if (indexMapEntry == indexToDetMapping.end()) {
+      // Create a new vector if one does not exist
+      indexToDetMapping[wsIndex] = std::vector<detid_t>{detectorID};
+    } else {
+      // Otherwise add the detector ID to the current list
+      indexToDetMapping[wsIndex].push_back(detectorID);
+    }
+  }
+  return indexToDetMapping;
+}
+}
+
 namespace Mantid {
 namespace Crystal {
 // Register the class into the algorithm factory
@@ -16,17 +56,23 @@ DECLARE_ALGORITHM(FindSXPeaks)
 using namespace Kernel;
 using namespace API;
 
+// Type def the index to detector mapping
+using WsIndexToDetIds = std::unordered_map<size_t, std::vector<detid_t>>;
+
 FindSXPeaks::FindSXPeaks()
-    : API::Algorithm(), m_MinRange(DBL_MAX), m_MaxRange(-DBL_MAX), m_MinSpec(0),
-      m_MaxSpec(0) {}
+    : API::Algorithm(), m_MinRange(DBL_MAX), m_MaxRange(-DBL_MAX),
+      m_MinWsIndex(0), m_MaxWsIndex(0) {}
 
 /** Initialisation method.
  *
  */
 void FindSXPeaks::init() {
+  auto wsValidation = boost::make_shared<CompositeValidator>();
+  wsValidation->add<HistogramValidator>();
+  wsValidation->add<WorkspaceUnitValidator>("TOF");
+
   declareProperty(make_unique<WorkspaceProperty<>>(
-                      "InputWorkspace", "", Direction::Input,
-                      boost::make_shared<HistogramValidator>()),
+                      "InputWorkspace", "", Direction::Input, wsValidation),
                   "The name of the Workspace2D to take as input");
   declareProperty("RangeLower", EMPTY_DBL(),
                   "The X value to search from (default 0)");
@@ -62,33 +108,33 @@ void FindSXPeaks::exec() {
   m_MaxRange = getProperty("RangeUpper");
 
   // the assignment below is intended and if removed will break the unit tests
-  m_MinSpec = static_cast<int>(getProperty("StartWorkspaceIndex"));
-  m_MaxSpec = static_cast<int>(getProperty("EndWorkspaceIndex"));
+  m_MinWsIndex = static_cast<int>(getProperty("StartWorkspaceIndex"));
+  m_MaxWsIndex = static_cast<int>(getProperty("EndWorkspaceIndex"));
   double SB = getProperty("SignalBackground");
 
   // Get the input workspace
   MatrixWorkspace_const_sptr localworkspace = getProperty("InputWorkspace");
 
-  // copy the instrument accross. Cannot generate peaks without doing this
+  // copy the instrument across. Cannot generate peaks without doing this
   // first.
   m_peaks->setInstrument(localworkspace->getInstrument());
 
   size_t numberOfSpectra = localworkspace->getNumberHistograms();
 
   // Check 'StartSpectrum' is in range 0-numberOfSpectra
-  if (m_MinSpec > numberOfSpectra) {
+  if (m_MinWsIndex > numberOfSpectra) {
     g_log.warning("StartSpectrum out of range! Set to 0.");
-    m_MinSpec = 0;
+    m_MinWsIndex = 0;
   }
-  if (m_MinSpec > m_MaxSpec) {
+  if (m_MinWsIndex > m_MaxWsIndex) {
     throw std::invalid_argument(
         "Cannot have StartWorkspaceIndex > EndWorkspaceIndex");
   }
-  if (isEmpty(m_MaxSpec))
-    m_MaxSpec = numberOfSpectra - 1;
-  if (m_MaxSpec > numberOfSpectra - 1 || m_MaxSpec < m_MinSpec) {
+  if (isEmpty(m_MaxWsIndex))
+    m_MaxWsIndex = numberOfSpectra - 1;
+  if (m_MaxWsIndex > numberOfSpectra - 1 || m_MaxWsIndex < m_MinWsIndex) {
     g_log.warning("EndSpectrum out of range! Set to max detector number");
-    m_MaxSpec = numberOfSpectra;
+    m_MaxWsIndex = numberOfSpectra;
   }
   if (m_MinRange > m_MaxRange) {
     g_log.warning("Range_upper is less than Range_lower. Will integrate up to "
@@ -96,23 +142,33 @@ void FindSXPeaks::exec() {
     m_MaxRange = 0.0;
   }
 
-  Progress progress(this, 0, 1, (m_MaxSpec - m_MinSpec + 1));
+  Progress progress(this, 0, 1, (m_MaxWsIndex - m_MinWsIndex + 1));
 
   // Calculate the primary flight path.
   const auto &spectrumInfo = localworkspace->spectrumInfo();
+  const auto &detectorInfo = localworkspace->detectorInfo();
+
+  const WsIndexToDetIds wsIndexToDetIdMap = mapDetectorsToWsIndexes(
+      detectorInfo, localworkspace->getDetectorIDToWorkspaceIndexMap());
 
   peakvector entries;
-  // Reserve 1000 peaks to make later push_back fast for first 1000 peaks, but
-  // unlikely to have more than this.
-  entries.reserve(1000);
-  // Count the peaks so that we can resize the peakvector at the end.
+  entries.reserve(m_MaxWsIndex - m_MinWsIndex);
+  // Count the peaks so that we can resize the peak vector at the end.
   PARALLEL_FOR_IF(Kernel::threadSafe(*localworkspace))
-  for (int i = static_cast<int>(m_MinSpec); i <= static_cast<int>(m_MaxSpec);
-       ++i) {
+  for (int wsIndex = static_cast<int>(m_MinWsIndex);
+       wsIndex <= static_cast<int>(m_MaxWsIndex); ++wsIndex) {
     PARALLEL_START_INTERUPT_REGION
+
+    // If no detector found / monitor, skip onto the next spectrum
+    const size_t wsIndexSize_t = static_cast<size_t>(wsIndex);
+    if (!spectrumInfo.hasDetectors(wsIndexSize_t) ||
+        spectrumInfo.isMonitor(wsIndexSize_t)) {
+      continue;
+    }
+
     // Retrieve the spectrum into a vector
-    const auto &X = localworkspace->x(i);
-    const auto &Y = localworkspace->y(i);
+    const auto &X = localworkspace->x(wsIndex);
+    const auto &Y = localworkspace->y(wsIndex);
 
     // Find the range [min,max]
     auto lowit = (m_MinRange == EMPTY_DBL())
@@ -152,26 +208,11 @@ void FindSXPeaks::exec() {
     double rightBinEdge = *std::next(leftBinPosition);
     double tof = 0.5 * (leftBinEdge + rightBinEdge);
 
-    // If no detector found, skip onto the next spectrum
-    if (!spectrumInfo.hasDetectors(static_cast<size_t>(i))) {
-      continue;
-    }
-    if (!spectrumInfo.hasUniqueDetector(i)) {
-      std::ostringstream sout;
-      sout << "Spectrum at workspace index " << i
-           << " has unsupported number of detectors.";
-      throw std::runtime_error(sout.str());
-    }
-    const auto &det = spectrumInfo.detector(static_cast<size_t>(i));
-
-    double phi = det.getPhi();
-    if (phi < 0) {
-      phi += 2.0 * M_PI;
-    }
-
-    std::vector<int> specs(1, i);
+    const double phi =
+        calculatePhi(wsIndexToDetIdMap, spectrumInfo, wsIndexSize_t);
 
-    SXPeak peak(tof, phi, *maxY, specs, i, spectrumInfo);
+    std::vector<int> specs(1, wsIndex);
+    SXPeak peak(tof, phi, *maxY, specs, wsIndex, spectrumInfo);
     PARALLEL_CRITICAL(entries) { entries.push_back(peak); }
     progress.report();
     PARALLEL_END_INTERUPT_REGION
@@ -185,6 +226,38 @@ void FindSXPeaks::exec() {
   progress.report();
 }
 
+/**
+  * Calculates the average phi value if the workspace contains
+  * multiple detectors per spectrum, or returns the value
+  * of phi if it is a single detector to spectrum mapping.
+  * @param detectorMapping :: The mapping of workspace index to detector id(s)
+  * @param spectrumInfo :: The spectrum info of this workspace
+  * @param wsIndex :: The index to return the phi value of
+  * @return :: The averaged or exact value of phi
+  */
+double FindSXPeaks::calculatePhi(const WsIndexToDetIds &detectorMapping,
+                                 const SpectrumInfo &spectrumInfo,
+                                 size_t wsIndex) {
+  double phi = std::numeric_limits<double>::infinity();
+  const size_t numDetectors = detectorMapping.at(wsIndex).size();
+  const auto &det = spectrumInfo.detector(wsIndex);
+  if (numDetectors == 1) {
+    phi = det.getPhi();
+  } else {
+    // Have to average the value for phi
+    auto detectorGroup = dynamic_cast<const Geometry::DetectorGroup *>(&det);
+    if (!detectorGroup) {
+      throw std::runtime_error("Could not cast to detector group");
+    }
+    detectorGroup->getPhi();
+  }
+
+  if (phi < 0) {
+    phi += 2.0 * M_PI;
+  }
+  return phi;
+}
+
 /**
 Reduce the peak list by removing duplicates
 then convert SXPeaks objects to PeakObjects and add them to the output workspace
diff --git a/Framework/Crystal/src/PeakStatisticsTools.cpp b/Framework/Crystal/src/PeakStatisticsTools.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..de0030952fa6d183812b3ee854bf18f9eddefe6d
--- /dev/null
+++ b/Framework/Crystal/src/PeakStatisticsTools.cpp
@@ -0,0 +1,330 @@
+#include "MantidCrystal/PeakStatisticsTools.h"
+
+#include "MantidGeometry/Crystal/BasicHKLFilters.h"
+#include "MantidGeometry/Crystal/HKLGenerator.h"
+
+#include "MantidKernel/Statistics.h"
+
+#include <boost/make_shared.hpp>
+#include <numeric>
+
+namespace Mantid {
+namespace Crystal {
+namespace PeakStatisticsTools {
+
+using namespace Mantid::DataObjects;
+using namespace Mantid::Geometry;
+using namespace Mantid::Kernel;
+
+/// Returns a vector with the intensities of the Peaks stored in this
+/// reflection.
+std::vector<double> UniqueReflection::getIntensities() const {
+  std::vector<double> intensities;
+  intensities.reserve(m_peaks.size());
+
+  std::transform(
+      m_peaks.begin(), m_peaks.end(), std::back_inserter(intensities),
+      [](const DataObjects::Peak &peak) { return peak.getIntensity(); });
+
+  return intensities;
+}
+
+/// Returns a vector with the intensity sigmas of the Peaks stored in this
+/// reflection.
+std::vector<double> UniqueReflection::getSigmas() const {
+  std::vector<double> sigmas;
+  sigmas.reserve(m_peaks.size());
+
+  std::transform(
+      m_peaks.begin(), m_peaks.end(), std::back_inserter(sigmas),
+      [](const DataObjects::Peak &peak) { return peak.getSigmaIntensity(); });
+
+  return sigmas;
+}
+
+/// Removes peaks whose intensity deviates more than sigmaCritical from the
+/// intensities' mean.
+UniqueReflection UniqueReflection::removeOutliers(double sigmaCritical) const {
+  if (sigmaCritical <= 0.0) {
+    throw std::invalid_argument(
+        "Critical sigma value has to be greater than 0.");
+  }
+
+  UniqueReflection newReflection(m_hkl);
+
+  if (m_peaks.size() > 2) {
+    auto intensities = getIntensities();
+    auto zScores = Kernel::getZscore(intensities);
+
+    for (size_t i = 0; i < zScores.size(); ++i) {
+      if (zScores[i] <= sigmaCritical) {
+        newReflection.addPeak(m_peaks[i]);
+      }
+    }
+  } else {
+    for (auto peak : m_peaks) {
+      newReflection.addPeak(peak);
+    }
+  }
+
+  return newReflection;
+}
+
+/// Sets the intensities and sigmas of all stored peaks to the supplied values.
+void UniqueReflection::setPeaksIntensityAndSigma(double intensity,
+                                                 double sigma) {
+  for (auto &peak : m_peaks) {
+    peak.setIntensity(intensity);
+    peak.setSigmaIntensity(sigma);
+  }
+}
+
+/**
+ * @brief UniqueReflectionCollection::UniqueReflectionCollection
+ *
+ * Takes the supplied parameters to calculate theoretically possible
+ * unique reflections and stores a UniqueReflection for each of those
+ * internally.
+ *
+ * @param cell :: UnitCell of the sample.
+ * @param dLimits :: Resolution limits for the generated reflections.
+ * @param pointGroup :: Point group of the sample.
+ * @param centering :: Lattice centering.
+ */
+UniqueReflectionCollection::UniqueReflectionCollection(
+    const UnitCell &cell, const std::pair<double, double> &dLimits,
+    const PointGroup_sptr &pointGroup,
+    const ReflectionCondition_sptr &centering)
+    : m_reflections(), m_pointgroup(pointGroup) {
+  HKLGenerator generator(cell, dLimits.first);
+  auto dFilter = boost::make_shared<const HKLFilterDRange>(cell, dLimits.first,
+                                                           dLimits.second);
+  auto centeringFilter =
+      boost::make_shared<const HKLFilterCentering>(centering);
+  auto filter = dFilter & centeringFilter;
+
+  // Generate map of UniqueReflection-objects with reflection family as key.
+  for (const auto &hkl : generator) {
+    if (filter->isAllowed(hkl)) {
+      V3D hklFamily = m_pointgroup->getReflectionFamily(hkl);
+      m_reflections.emplace(hklFamily, UniqueReflection(hklFamily));
+    }
+  }
+}
+
+/// Assigns the supplied peaks to the proper UniqueReflection. Peaks for which
+/// the reflection family can not be found are ignored.
+void UniqueReflectionCollection::addObservations(
+    const std::vector<Peak> &peaks) {
+  for (auto const &peak : peaks) {
+    V3D hkl = peak.getHKL();
+    hkl.round();
+
+    auto reflection =
+        m_reflections.find(m_pointgroup->getReflectionFamily(hkl));
+
+    if (reflection != m_reflections.end()) {
+      (*reflection).second.addPeak(peak);
+    }
+  }
+}
+
+/// Returns a copy of the UniqueReflection with the supplied HKL. Raises an
+/// exception if the reflection is not found.
+UniqueReflection
+UniqueReflectionCollection::getReflection(const V3D &hkl) const {
+  return m_reflections.at(m_pointgroup->getReflectionFamily(hkl));
+}
+
+/// Total number of unique reflections (theoretically possible).
+size_t UniqueReflectionCollection::getUniqueReflectionCount() const {
+  return m_reflections.size();
+}
+
+/// Number of unique reflections that have more observations than the supplied
+/// number (default is 0 - gives number of ).
+size_t UniqueReflectionCollection::getObservedUniqueReflectionCount(
+    size_t moreThan) const {
+  return std::count_if(
+      m_reflections.cbegin(), m_reflections.cend(),
+      [=](const std::pair<Kernel::V3D, UniqueReflection> &item) {
+        return item.second.count() > moreThan;
+      });
+}
+
+/// List of unobserved unique reflections in resolution range.
+std::vector<V3D>
+UniqueReflectionCollection::getUnobservedUniqueReflections() const {
+  std::vector<V3D> reflections;
+  reflections.reserve(m_reflections.size());
+
+  for (const auto &reflection : m_reflections) {
+    if (reflection.second.count() == 0) {
+      reflections.push_back(reflection.first);
+    }
+  }
+
+  return reflections;
+}
+
+/// Number of observed reflections.
+size_t UniqueReflectionCollection::getObservedReflectionCount() const {
+  return std::accumulate(
+      m_reflections.cbegin(), m_reflections.cend(), size_t(0),
+      [](size_t totalReflections,
+         const std::pair<Kernel::V3D, UniqueReflection> &item) {
+        return totalReflections + item.second.count();
+      });
+}
+
+/// Returns the internally stored reflection map. May disappear or change if
+/// implementation changes.
+const std::map<V3D, UniqueReflection> &
+UniqueReflectionCollection::getReflections() const {
+  return m_reflections;
+}
+
+/**
+ * @brief PeaksStatistics::calculatePeaksStatistics
+ *
+ * This function iterates through the unique reflections map and computes
+ * statistics for the reflections/peaks. It calls
+ * UniqueReflection::removeOutliers, so outliers are removed before the
+ * statistical quantities are calculated.
+ *
+ * Furthermore it sets the intensities of each peak to the mean of the
+ * group of equivalent reflections.
+ *
+ * @param uniqueReflections :: Map of unique reflections and peaks.
+ */
+void PeaksStatistics::calculatePeaksStatistics(
+    const std::map<V3D, UniqueReflection> &uniqueReflections) {
+  double rMergeNumerator = 0.0;
+  double rPimNumerator = 0.0;
+  double intensitySumRValues = 0.0;
+  double iOverSigmaSum = 0.0;
+
+  for (const auto &unique : uniqueReflections) {
+    /* Since all possible unique reflections are explored
+     * there may be 0 observations for some of them.
+     * In that case, nothing can be done.*/
+    if (unique.second.count() > 0) {
+      ++m_uniqueReflections;
+
+      // Possibly remove outliers.
+      auto outliersRemoved = unique.second.removeOutliers();
+
+      // I/sigma is calculated for all reflections, even if there is only one
+      // observation.
+      auto intensities = outliersRemoved.getIntensities();
+      auto sigmas = outliersRemoved.getSigmas();
+
+      // Accumulate the I/sigma's for current reflection into sum
+      iOverSigmaSum += getIOverSigmaSum(sigmas, intensities);
+
+      if (outliersRemoved.count() > 1) {
+        // Get mean, standard deviation for intensities
+        auto intensityStatistics = Kernel::getStatistics(
+            intensities, StatOptions::Mean | StatOptions::UncorrectedStdDev);
+
+        double meanIntensity = intensityStatistics.mean;
+
+        /* This was in the original algorithm, not entirely sure where it is
+         * used. It's basically the sum of all relative standard deviations.
+         * In a perfect data set with all equivalent reflections exactly
+         * equivalent that would be 0. */
+        m_chiSquared += intensityStatistics.standard_deviation / meanIntensity;
+
+        // For both RMerge and RPim sum(|I - <I>|) is required
+        double sumOfDeviationsFromMean =
+            std::accumulate(intensities.begin(), intensities.end(), 0.0,
+                            [meanIntensity](double sum, double intensity) {
+                              return sum + fabs(intensity - meanIntensity);
+                            });
+
+        // Accumulate into total sum for numerator of RMerge
+        rMergeNumerator += sumOfDeviationsFromMean;
+
+        // For Rpim, the sum is weighted by a factor depending on N
+        double rPimFactor =
+            sqrt(1.0 / (static_cast<double>(outliersRemoved.count()) - 1.0));
+        rPimNumerator += (rPimFactor * sumOfDeviationsFromMean);
+
+        // Collect sum of intensities for R-value calculation
+        intensitySumRValues +=
+            std::accumulate(intensities.begin(), intensities.end(), 0.0);
+
+        // The original algorithm sets the intensities and sigmas to the mean.
+        double sqrtOfMeanSqrSigma = getRMS(sigmas);
+        outliersRemoved.setPeaksIntensityAndSigma(meanIntensity,
+                                                  sqrtOfMeanSqrSigma);
+      }
+
+      const std::vector<Peak> &reflectionPeaks = outliersRemoved.getPeaks();
+      m_peaks.insert(m_peaks.end(), reflectionPeaks.begin(),
+                     reflectionPeaks.end());
+    }
+  }
+
+  m_measuredReflections = static_cast<int>(m_peaks.size());
+
+  if (m_uniqueReflections > 0) {
+    m_redundancy = static_cast<double>(m_measuredReflections) /
+                   static_cast<double>(m_uniqueReflections);
+  }
+
+  m_completeness = static_cast<double>(m_uniqueReflections) /
+                   static_cast<double>(uniqueReflections.size());
+
+  if (intensitySumRValues > 0.0) {
+    m_rMerge = rMergeNumerator / intensitySumRValues;
+    m_rPim = rPimNumerator / intensitySumRValues;
+  }
+
+  if (m_measuredReflections > 0) {
+    m_meanIOverSigma =
+        iOverSigmaSum / static_cast<double>(m_measuredReflections);
+
+    auto dspacingLimits = getDSpacingLimits(m_peaks);
+    m_dspacingMin = dspacingLimits.first;
+    m_dspacingMax = dspacingLimits.second;
+  }
+}
+
+/// Returns the sum of all I/sigma-ratios defined by the two vectors using
+/// std::inner_product.
+double PeaksStatistics::getIOverSigmaSum(
+    const std::vector<double> &sigmas,
+    const std::vector<double> &intensities) const {
+  return std::inner_product(intensities.begin(), intensities.end(),
+                            sigmas.begin(), 0.0, std::plus<double>(),
+                            std::divides<double>());
+}
+
+/// Returns the Root mean square of the supplied vector.
+double PeaksStatistics::getRMS(const std::vector<double> &data) const {
+  double sumOfSquares =
+      std::inner_product(data.begin(), data.end(), data.begin(), 0.0);
+
+  return sqrt(sumOfSquares / static_cast<double>(data.size()));
+}
+
+/// Returns the lowest and hights wavelength in the peak list.
+std::pair<double, double>
+PeaksStatistics::getDSpacingLimits(const std::vector<Peak> &peaks) const {
+  if (peaks.empty()) {
+    return std::make_pair(0.0, 0.0);
+  }
+
+  auto dspacingLimitIterators = std::minmax_element(
+      peaks.begin(), peaks.end(), [](const Peak &lhs, const Peak &rhs) {
+        return lhs.getDSpacing() < rhs.getDSpacing();
+      });
+
+  return std::make_pair((*(dspacingLimitIterators.first)).getDSpacing(),
+                        (*(dspacingLimitIterators.second)).getDSpacing());
+}
+
+} // namespace PeakStatisticsTools
+} // namespace Crystal
+} // namespace Mantid
diff --git a/Framework/Crystal/src/PredictPeaks.cpp b/Framework/Crystal/src/PredictPeaks.cpp
index 42e04a6635526de79e45acfdf64a98b416aff5b6..c88e1bfd59c5bdf559398601376c11ca400893d3 100644
--- a/Framework/Crystal/src/PredictPeaks.cpp
+++ b/Framework/Crystal/src/PredictPeaks.cpp
@@ -1,4 +1,5 @@
 #include "MantidCrystal/PredictPeaks.h"
+#include "MantidAPI/DetectorInfo.h"
 #include "MantidAPI/IMDEventWorkspace.h"
 #include "MantidAPI/MatrixWorkspace.h"
 #include "MantidAPI/Run.h"
@@ -8,12 +9,16 @@
 #include "MantidGeometry/Crystal/HKLGenerator.h"
 #include "MantidGeometry/Crystal/StructureFactorCalculatorSummation.h"
 #include "MantidGeometry/Objects/InstrumentRayTracer.h"
+#include "MantidGeometry/Objects/BoundingBox.h"
+#include "MantidGeometry/Instrument/ReferenceFrame.h"
+#include "MantidKernel/BoundedValidator.h"
 #include "MantidKernel/ListValidator.h"
 #include "MantidKernel/EnabledWhenProperty.h"
+#include "MantidKernel/make_unique.h"
 #include "MantidGeometry/Instrument/RectangularDetector.h"
-#include "MantidKernel/BoundedValidator.h"
 #include "MantidGeometry/Crystal/EdgePixel.h"
 
+#include <fstream>
 using Mantid::Kernel::EnabledWhenProperty;
 
 namespace Mantid {
@@ -219,7 +224,7 @@ void PredictPeaks::exec() {
 
   setInstrumentFromInputWorkspace(inputExperimentInfo);
   setRunNumberFromInputWorkspace(inputExperimentInfo);
-
+  setReferenceFrameAndBeamDirection();
   checkBeamDirection();
 
   // Create the output
@@ -257,6 +262,9 @@ void PredictPeaks::exec() {
   Progress prog(this, 0.0, 1.0, possibleHKLs.size() * gonioVec.size());
   prog.setNotifyStep(0.01);
 
+  m_detectorCacheSearch =
+      Kernel::make_unique<DetectorSearcher>(m_inst, m_pw->detectorInfo());
+
   for (auto &goniometerMatrix : gonioVec) {
     // Final transformation matrix (HKL to Q in lab frame)
     DblMatrix orientedUB = goniometerMatrix * ub;
@@ -470,40 +478,96 @@ void PredictPeaks::calculateQAndAddToOutput(const V3D &hkl,
   // The q-vector direction of the peak is = goniometer * ub * hkl_vector
   // This is in inelastic convention: momentum transfer of the LATTICE!
   // Also, q does have a 2pi factor = it is equal to 2pi/wavelength.
-  V3D q = orientedUB * hkl * (2.0 * M_PI * m_qConventionFactor);
+  const auto q = orientedUB * hkl * (2.0 * M_PI * m_qConventionFactor);
+  const auto params = getPeakParametersFromQ(q);
+  const auto detectorDir = std::get<0>(params);
+  const auto wl = std::get<1>(params);
+
+  const bool useExtendedDetectorSpace =
+      getProperty("PredictPeaksOutsideDetectors");
+  const auto result = m_detectorCacheSearch->findDetectorIndex(q);
+  const auto hitDetector = std::get<0>(result);
+  const auto index = std::get<1>(result);
 
-  // Create the peak using the Q in the lab framewith all its info:
-  Peak p(m_inst, q);
-  if (m_edge > 0) {
-    if (edgePixel(m_inst, p.getBankName(), p.getCol(), p.getRow(), m_edge))
+  if (!hitDetector && !useExtendedDetectorSpace) {
+    return;
+  }
+
+  const auto &detInfo = m_pw->detectorInfo();
+  const auto &det = detInfo.detector(index);
+  std::unique_ptr<Peak> peak;
+
+  if (hitDetector) {
+    // peak hit a detector to add it to the list
+    peak = Kernel::make_unique<Peak>(m_inst, det.getID(), wl);
+    if (!peak->getDetector())
       return;
+
+  } else if (useExtendedDetectorSpace) {
+    // use extended detector space to try and guess peak position
+    const auto returnedComponent =
+        m_inst->getComponentByName("extended-detector-space");
+    // Check that the component is valid
+    const auto component =
+        boost::dynamic_pointer_cast<const ObjComponent>(returnedComponent);
+    if (!component)
+      throw std::runtime_error("PredictPeaks: user requested use of a extended "
+                               "detector space to predict peaks but there is no"
+                               "definition in the IDF");
+
+    // find where this Q vector should intersect with "extended" space
+    Geometry::Track track(detInfo.samplePosition(), detectorDir);
+    if (!component->interceptSurface(track))
+      return;
+
+    // The exit point is the vector to the place that we hit a detector
+    const auto magnitude = track.back().exitPoint.norm();
+    peak = Kernel::make_unique<Peak>(m_inst, q,
+                                     boost::optional<double>(magnitude));
   }
-  /* The constructor calls setQLabFrame, which already calls findDetector, which
-     is expensive. It's not necessary to call it again, instead it's enough to
-     check whether a detector has already been set.
-
-     Peaks are added if they fall on a detector OR is the extended detector
-     space component is defined which can be used to approximate a peak's
-     position in detector space.
-     */
-  bool useExtendedDetectorSpace = getProperty("PredictPeaksOutsideDetectors");
-  if (!p.getDetector() &&
-      !(useExtendedDetectorSpace &&
-        m_inst->getComponentByName("extended-detector-space")))
+
+  if (m_edge > 0 && edgePixel(m_inst, peak->getBankName(), peak->getCol(),
+                              peak->getRow(), m_edge))
     return;
 
   // Only add peaks that hit the detector
-  p.setGoniometerMatrix(goniometerMatrix);
+  peak->setGoniometerMatrix(goniometerMatrix);
   // Save the run number found before.
-  p.setRunNumber(m_runNumber);
-  p.setHKL(hkl * m_qConventionFactor);
+  peak->setRunNumber(m_runNumber);
+  peak->setHKL(hkl * m_qConventionFactor);
 
   if (m_sfCalculator) {
-    p.setIntensity(m_sfCalculator->getFSquared(hkl));
+    peak->setIntensity(m_sfCalculator->getFSquared(hkl));
   }
 
   // Add it to the workspace
-  m_pw->addPeak(p);
+  m_pw->addPeak(*peak);
+}
+
+/** Get the detector direction and wavelength of a peak from it's QLab vector
+ *
+ * @param q :: the q lab vector for this peak
+ * @return a tuple containing the detector direction and the wavelength
+ */
+std::tuple<V3D, double>
+PredictPeaks::getPeakParametersFromQ(const V3D &q) const {
+  double norm_q = q.norm();
+  // Default for ki-kf has -q
+  const double qBeam = q.scalar_prod(m_refBeamDir) * m_qConventionFactor;
+  double one_over_wl = (norm_q * norm_q) / (2.0 * qBeam);
+  double wl = (2.0 * M_PI) / one_over_wl;
+  // Default for ki-kf has -q
+  V3D detectorDir = q * -m_qConventionFactor;
+  detectorDir[m_refFrame->pointingAlongBeam()] = one_over_wl - qBeam;
+  detectorDir.normalize();
+  return std::make_tuple(detectorDir, wl);
+}
+
+/** Cache the reference frame and beam direction using the instrument
+ */
+void PredictPeaks::setReferenceFrameAndBeamDirection() {
+  m_refFrame = m_inst->getReferenceFrame();
+  m_refBeamDir = m_refFrame->vecPointingAlongBeam();
 }
 
 } // namespace Mantid
diff --git a/Framework/Crystal/src/SortHKL.cpp b/Framework/Crystal/src/SortHKL.cpp
index 450084846821adc915a934e963639b84098ba9f5..23ae54e962ce3e2076cf15a81a6836a55c737469 100644
--- a/Framework/Crystal/src/SortHKL.cpp
+++ b/Framework/Crystal/src/SortHKL.cpp
@@ -9,12 +9,9 @@
 
 #include "MantidGeometry/Instrument/RectangularDetector.h"
 #include "MantidGeometry/Crystal/PointGroupFactory.h"
-#include "MantidGeometry/Crystal/HKLGenerator.h"
-#include "MantidGeometry/Crystal/BasicHKLFilters.h"
 #include "MantidGeometry/Crystal/OrientedLattice.h"
 
 #include "MantidKernel/ListValidator.h"
-#include "MantidKernel/Statistics.h"
 #include "MantidKernel/Utils.h"
 
 #include <cmath>
@@ -25,6 +22,7 @@ using namespace Mantid::Geometry;
 using namespace Mantid::DataObjects;
 using namespace Mantid::Kernel;
 using namespace Mantid::API;
+using namespace Mantid::Crystal::PeakStatisticsTools;
 
 namespace Mantid {
 namespace Crystal {
@@ -91,10 +89,10 @@ void SortHKL::exec() {
 
   UnitCell cell = inputPeaksWorkspace->sample().getOrientedLattice();
 
-  std::map<V3D, UniqueReflection> uniqueReflections =
+  UniqueReflectionCollection uniqueReflections =
       getUniqueReflections(peaks, cell);
 
-  PeaksStatistics peaksStatistics(uniqueReflections, peaks.size());
+  PeaksStatistics peaksStatistics(uniqueReflections);
 
   // Store the statistics for output.
   const std::string tableName = getProperty("StatisticsTable");
@@ -145,7 +143,7 @@ SortHKL::getNonZeroPeaks(const std::vector<Peak> &inputPeaks) const {
  * @param cell :: UnitCell to use for calculation of possible reflections.
  * @return Map of unique reflections.
  */
-std::map<V3D, UniqueReflection>
+UniqueReflectionCollection
 SortHKL::getUniqueReflections(const std::vector<Peak> &peaks,
                               const UnitCell &cell) const {
   ReflectionCondition_sptr centering = getCentering();
@@ -153,18 +151,10 @@ SortHKL::getUniqueReflections(const std::vector<Peak> &peaks,
 
   std::pair<double, double> dLimits = getDLimits(peaks, cell);
 
-  std::map<V3D, UniqueReflection> uniqueReflectionInRange =
-      getPossibleUniqueReflections(cell, dLimits, pointGroup, centering);
+  UniqueReflectionCollection reflections(cell, dLimits, pointGroup, centering);
+  reflections.addObservations(peaks);
 
-  for (auto const &peak : peaks) {
-    V3D hkl = peak.getHKL();
-    hkl.round();
-
-    uniqueReflectionInRange.at(pointGroup->getReflectionFamily(hkl))
-        .addPeak(peak);
-  }
-
-  return uniqueReflectionInRange;
+  return reflections;
 }
 
 /// Returns the centering extracted from the user-supplied property.
@@ -208,47 +198,6 @@ std::pair<double, double> SortHKL::getDLimits(const std::vector<Peak> &peaks,
                         cell.d((*dLimitIterators.second).getHKL()));
 }
 
-/**
- * @brief SortHKL::getPossibleUniqueReflections
- *
- * This method returns a map that contains UniqueReflection-objects, one
- * for each unique reflection in the given resolution range. It uses the
- * given cell, point group and centering to determine which reflections
- * are allowed and which ones are equivalent.
- *
- * @param cell :: UnitCell of the sample.
- * @param dLimits :: Resolution limits for the generated reflections.
- * @param pointGroup :: Point group of the sample.
- * @param centering :: Lattice centering (important for completeness
- * calculation).
- *
- * @return Map of UniqueReflection objects with HKL of the reflection family as
- * key
- */
-std::map<V3D, UniqueReflection> SortHKL::getPossibleUniqueReflections(
-    const UnitCell &cell, const std::pair<double, double> &dLimits,
-    const PointGroup_sptr &pointGroup,
-    const ReflectionCondition_sptr &centering) const {
-
-  HKLGenerator generator(cell, dLimits.first);
-  HKLFilter_const_sptr dFilter = boost::make_shared<const HKLFilterDRange>(
-      cell, dLimits.first, dLimits.second);
-  HKLFilter_const_sptr centeringFilter =
-      boost::make_shared<const HKLFilterCentering>(centering);
-  HKLFilter_const_sptr filter = dFilter & centeringFilter;
-
-  // Generate map of UniqueReflection-objects with reflection family as key.
-  std::map<V3D, UniqueReflection> uniqueHKLs;
-  for (const auto &hkl : generator) {
-    if (filter->isAllowed(hkl)) {
-      V3D hklFamily = pointGroup->getReflectionFamily(hkl);
-      uniqueHKLs.emplace(hklFamily, UniqueReflection(hklFamily));
-    }
-  }
-
-  return uniqueHKLs;
-}
-
 /// Create a TableWorkspace for the statistics with appropriate columns or get
 /// one from the ADS.
 ITableWorkspace_sptr
@@ -310,40 +259,6 @@ PeaksWorkspace_sptr SortHKL::getOutputPeaksWorkspace(
   return outputPeaksWorkspace;
 }
 
-/// Returns the sum of all I/sigma-ratios defined by the two vectors using
-/// std::inner_product.
-double PeaksStatistics::getIOverSigmaSum(
-    const std::vector<double> &sigmas,
-    const std::vector<double> &intensities) const {
-  return std::inner_product(intensities.begin(), intensities.end(),
-                            sigmas.begin(), 0.0, std::plus<double>(),
-                            std::divides<double>());
-}
-
-/// Returns the Root mean square of the supplied vector.
-double PeaksStatistics::getRMS(const std::vector<double> &data) const {
-  double sumOfSquares =
-      std::inner_product(data.begin(), data.end(), data.begin(), 0.0);
-
-  return sqrt(sumOfSquares / static_cast<double>(data.size()));
-}
-
-/// Returns the lowest and hights wavelength in the peak list.
-std::pair<double, double>
-PeaksStatistics::getDSpacingLimits(const std::vector<Peak> &peaks) const {
-  if (peaks.empty()) {
-    return std::make_pair(0.0, 0.0);
-  }
-
-  auto dspacingLimitIterators = std::minmax_element(
-      peaks.begin(), peaks.end(), [](const Peak &lhs, const Peak &rhs) {
-        return lhs.getDSpacing() < rhs.getDSpacing();
-      });
-
-  return std::make_pair((*(dspacingLimitIterators.first)).getDSpacing(),
-                        (*(dspacingLimitIterators.second)).getDSpacing());
-}
-
 /// Sorts the peaks in the workspace by H, K and L.
 void SortHKL::sortOutputPeaksByHKL(IPeaksWorkspace_sptr outputPeaksWorkspace) {
   // Sort by HKL
@@ -352,175 +267,5 @@ void SortHKL::sortOutputPeaksByHKL(IPeaksWorkspace_sptr outputPeaksWorkspace) {
   outputPeaksWorkspace->sort(criteria);
 }
 
-/**
- * @brief PeaksStatistics::calculatePeaksStatistics
- *
- * This function iterates through the unique reflections map and computes
- * statistics for the reflections/peaks. It calls
- * UniqueReflection::removeOutliers, so outliers are removed before the
- * statistical quantities are calculated.
- *
- * Furthermore it sets the intensities of each peak to the mean of the
- * group of equivalent reflections.
- *
- * @param uniqueReflections :: Map of unique reflections and peaks.
- */
-void PeaksStatistics::calculatePeaksStatistics(
-    std::map<V3D, UniqueReflection> &uniqueReflections) {
-  double rMergeNumerator = 0.0;
-  double rPimNumerator = 0.0;
-  double intensitySumRValues = 0.0;
-  double iOverSigmaSum = 0.0;
-
-  for (auto &unique : uniqueReflections) {
-    /* Since all possible unique reflections are explored
-     * there may be 0 observations for some of them.
-     * In that case, nothing can be done.*/
-    if (unique.second.count() > 0) {
-      ++m_uniqueReflections;
-
-      // Possibly remove outliers.
-      unique.second.removeOutliers();
-
-      // I/sigma is calculated for all reflections, even if there is only one
-      // observation.
-      const std::vector<double> &intensities = unique.second.getIntensities();
-      const std::vector<double> &sigmas = unique.second.getSigmas();
-
-      // Accumulate the I/sigma's for current reflection into sum
-      iOverSigmaSum += getIOverSigmaSum(sigmas, intensities);
-
-      if (unique.second.count() > 1) {
-        // Get mean, standard deviation for intensities
-        Statistics intensityStatistics = Kernel::getStatistics(
-            intensities, StatOptions::Mean | StatOptions::UncorrectedStdDev);
-
-        double meanIntensity = intensityStatistics.mean;
-
-        /* This was in the original algorithm, not entirely sure where it is
-         * used. It's basically the sum of all relative standard deviations.
-         * In a perfect data set with all equivalent reflections exactly
-         * equivalent that would be 0. */
-        m_chiSquared += intensityStatistics.standard_deviation / meanIntensity;
-
-        // For both RMerge and RPim sum(|I - <I>|) is required
-        double sumOfDeviationsFromMean =
-            std::accumulate(intensities.begin(), intensities.end(), 0.0,
-                            [meanIntensity](double sum, double intensity) {
-                              return sum + fabs(intensity - meanIntensity);
-                            });
-
-        // Accumulate into total sum for numerator of RMerge
-        rMergeNumerator += sumOfDeviationsFromMean;
-
-        // For Rpim, the sum is weighted by a factor depending on N
-        double rPimFactor =
-            sqrt(1.0 / (static_cast<double>(unique.second.count()) - 1.0));
-        rPimNumerator += (rPimFactor * sumOfDeviationsFromMean);
-
-        // Collect sum of intensities for R-value calculation
-        intensitySumRValues +=
-            std::accumulate(intensities.begin(), intensities.end(), 0.0);
-
-        // The original algorithm sets the intensities and sigmas to the mean.
-        double sqrtOfMeanSqrSigma = getRMS(sigmas);
-        unique.second.setPeaksIntensityAndSigma(meanIntensity,
-                                                sqrtOfMeanSqrSigma);
-      }
-
-      const std::vector<Peak> &reflectionPeaks = unique.second.getPeaks();
-      m_peaks.insert(m_peaks.end(), reflectionPeaks.begin(),
-                     reflectionPeaks.end());
-    }
-  }
-
-  m_measuredReflections = static_cast<int>(m_peaks.size());
-
-  if (m_uniqueReflections > 0) {
-    m_redundancy = static_cast<double>(m_measuredReflections) /
-                   static_cast<double>(m_uniqueReflections);
-  }
-
-  m_completeness = static_cast<double>(m_uniqueReflections) /
-                   static_cast<double>(uniqueReflections.size());
-
-  if (intensitySumRValues > 0.0) {
-    m_rMerge = rMergeNumerator / intensitySumRValues;
-    m_rPim = rPimNumerator / intensitySumRValues;
-  }
-
-  if (m_measuredReflections > 0) {
-    m_meanIOverSigma =
-        iOverSigmaSum / static_cast<double>(m_measuredReflections);
-
-    std::pair<double, double> dspacingLimits = getDSpacingLimits(m_peaks);
-    m_dspacingMin = dspacingLimits.first;
-    m_dspacingMax = dspacingLimits.second;
-  }
-}
-
-/// Returns a vector with the intensities of the Peaks stored in this
-/// reflection.
-std::vector<double> UniqueReflection::getIntensities() const {
-  std::vector<double> intensities;
-  intensities.reserve(m_peaks.size());
-
-  std::transform(
-      m_peaks.begin(), m_peaks.end(), std::back_inserter(intensities),
-      [](const DataObjects::Peak &peak) { return peak.getIntensity(); });
-
-  return intensities;
-}
-
-/// Returns a vector with the intensity sigmas of the Peaks stored in this
-/// reflection.
-std::vector<double> UniqueReflection::getSigmas() const {
-  std::vector<double> sigmas;
-  sigmas.reserve(m_peaks.size());
-
-  std::transform(
-      m_peaks.begin(), m_peaks.end(), std::back_inserter(sigmas),
-      [](const DataObjects::Peak &peak) { return peak.getSigmaIntensity(); });
-
-  return sigmas;
-}
-
-/// Removes peaks whose intensity deviates more than sigmaCritical from the
-/// intensities' mean.
-void UniqueReflection::removeOutliers(double sigmaCritical) {
-  if (sigmaCritical <= 0.0) {
-    throw std::invalid_argument(
-        "Critical sigma value has to be greater than 0.");
-  }
-
-  if (m_peaks.size() > 2) {
-    const std::vector<double> &intensities = getIntensities();
-    const std::vector<double> &zScores = Kernel::getZscore(intensities);
-
-    std::vector<size_t> outlierIndices;
-    for (size_t i = 0; i < zScores.size(); ++i) {
-      if (zScores[i] > sigmaCritical) {
-        outlierIndices.push_back(i);
-      }
-    }
-
-    if (!outlierIndices.empty()) {
-      for (auto it = outlierIndices.rbegin(); it != outlierIndices.rend();
-           ++it) {
-        m_peaks.erase(m_peaks.begin() + (*it));
-      }
-    }
-  }
-}
-
-/// Sets the intensities and sigmas of all stored peaks to the supplied values.
-void UniqueReflection::setPeaksIntensityAndSigma(double intensity,
-                                                 double sigma) {
-  for (auto &peak : m_peaks) {
-    peak.setIntensity(intensity);
-    peak.setSigmaIntensity(sigma);
-  }
-}
-
 } // namespace Mantid
 } // namespace Crystal
diff --git a/Framework/Crystal/test/PeakStatisticsToolsTest.h b/Framework/Crystal/test/PeakStatisticsToolsTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..779df462ee03d7bdd77fc92a82f02198f912161c
--- /dev/null
+++ b/Framework/Crystal/test/PeakStatisticsToolsTest.h
@@ -0,0 +1,360 @@
+#ifndef MANTID_CRYSTAL_PEAKSTATISTICSTOOLSTEST_H_
+#define MANTID_CRYSTAL_PEAKSTATISTICSTOOLSTEST_H_
+
+#include <cxxtest/TestSuite.h>
+
+#include "MantidCrystal/PeakStatisticsTools.h"
+#include "MantidGeometry/Crystal/PointGroupFactory.h"
+#include "MantidDataObjects/Peak.h"
+
+using namespace Mantid::Crystal;
+using namespace Mantid::Crystal::PeakStatisticsTools;
+using namespace Mantid::DataObjects;
+using namespace Mantid::Geometry;
+using namespace Mantid::Kernel;
+
+namespace {
+std::vector<Peak> getPeaksWithIandSigma(const std::vector<double> &intensity,
+                                        const std::vector<double> &sigma,
+                                        const V3D &hkl = V3D(0, 0, 1)) {
+  std::vector<Peak> peaks;
+  std::transform(intensity.begin(), intensity.end(), sigma.begin(),
+                 std::back_inserter(peaks),
+                 [hkl](double intensity, double sigma) {
+                   Peak peak;
+                   peak.setIntensity(intensity);
+                   peak.setSigmaIntensity(sigma);
+                   peak.setHKL(hkl);
+                   return peak;
+                 });
+
+  return peaks;
+}
+
+UniqueReflection getReflectionWithPeaks(const std::vector<double> &intensities,
+                                        const std::vector<double> &sigmas,
+                                        double wavelength = 0.0) {
+  std::vector<Peak> peaks = getPeaksWithIandSigma(intensities, sigmas);
+
+  if (wavelength > 0) {
+    for (auto &peak : peaks) {
+      peak.setWavelength(wavelength);
+    }
+  }
+
+  UniqueReflection reflection(V3D(2, 3, 4));
+  for (auto peak : peaks) {
+    reflection.addPeak(peak);
+  }
+
+  return reflection;
+}
+
+UniqueReflectionCollection
+getUniqueReflectionCollection(double a, const std::string &centering,
+                              const std::string &pointGroup, double dMin) {
+  UnitCell cell(a, a, a);
+  PointGroup_sptr pg =
+      PointGroupFactory::Instance().createPointGroup(pointGroup);
+  ReflectionCondition_sptr cent = getReflectionConditionBySymbol(centering);
+
+  return UniqueReflectionCollection(cell, std::make_pair(dMin, 100.0), pg,
+                                    cent);
+}
+
+class MockUniqueReflectionCollection : public UniqueReflectionCollection {
+public:
+  explicit MockUniqueReflectionCollection(
+      const std::map<V3D, UniqueReflection> &reflections,
+      const PointGroup_sptr &pointGroup =
+          PointGroupFactory::Instance().createPointGroup("1"))
+      : UniqueReflectionCollection(reflections, pointGroup) {}
+};
+}
+
+class PeakStatisticsToolsTest : public CxxTest::TestSuite {
+public:
+  // This pair of boilerplate methods prevent the suite being created statically
+  // This means the constructor isn't called when running other tests
+  static PeakStatisticsToolsTest *createSuite() {
+    return new PeakStatisticsToolsTest();
+  }
+  static void destroySuite(PeakStatisticsToolsTest *suite) { delete suite; }
+
+  void test_UniqueReflectionsConstructor() {
+    V3D hkl(1, 1, 1);
+    UniqueReflection reflection(hkl);
+
+    TSM_ASSERT_EQUALS("Constructed UniqueReflection does not have 0 peaks.",
+                      reflection.count(), 0);
+    TSM_ASSERT_EQUALS(
+        "HKL is not equal to constructor argument in UniqueReflection",
+        reflection.getHKL(), hkl);
+  }
+
+  void test_UniqueReflectionsPeaks() {
+    UniqueReflection reflection(V3D(2, 3, 4));
+
+    Peak peak;
+    TS_ASSERT_THROWS_NOTHING(reflection.addPeak(peak));
+    TSM_ASSERT_EQUALS("UniqueReflection count is not 1 after adding peak.",
+                      reflection.count(), 1);
+    TSM_ASSERT_EQUALS(
+        "UniqueReflection peaks vector size is not 1 after adding peak.",
+        reflection.getPeaks().size(), 1);
+  }
+
+  void test_UniqueReflectionsGetIntensitiesAndSigmas() {
+    UniqueReflection reflection(V3D(2, 3, 4));
+
+    std::vector<Peak> peaks = getPeaksWithIandSigma({30.0, 34.0}, {4.5, 6.5});
+    for (auto peak : peaks) {
+      reflection.addPeak(peak);
+    }
+
+    std::vector<double> intensities = reflection.getIntensities();
+    TSM_ASSERT_EQUALS("Intensity vector from UniqueReflection has wrong size.",
+                      intensities.size(), 2);
+    TS_ASSERT_EQUALS(intensities[0], 30.0);
+    TS_ASSERT_EQUALS(intensities[1], 34.0);
+
+    std::vector<double> sigmas = reflection.getSigmas();
+    TSM_ASSERT_EQUALS("Sigma vector from UniqueReflection has wrong size.",
+                      sigmas.size(), 2);
+    TS_ASSERT_EQUALS(sigmas[0], 4.5);
+    TS_ASSERT_EQUALS(sigmas[1], 6.5);
+  }
+
+  void test_UniqueReflectionRemoveOutliersSigmaCrit() {
+    UniqueReflection reflection(V3D(2, 3, 4));
+    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers(3.0));
+    TS_ASSERT_THROWS(reflection.removeOutliers(0.0), std::invalid_argument);
+    TS_ASSERT_THROWS(reflection.removeOutliers(-10.0), std::invalid_argument);
+  }
+
+  void test_UniqueReflectionRemoveOutliersFewPeaks() {
+    std::vector<Peak> peaks = getPeaksWithIandSigma({30.0, 34.0}, {4.5, 6.5});
+
+    UniqueReflection reflection(V3D(2, 3, 4));
+    reflection.addPeak(peaks[0]);
+
+    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers());
+
+    auto outliersRemoved = reflection.removeOutliers();
+    TSM_ASSERT_EQUALS("Peak was removed as outlier although there's only 1.",
+                      outliersRemoved.count(), 1);
+
+    reflection.addPeak(peaks[1]);
+
+    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers());
+
+    outliersRemoved = reflection.removeOutliers();
+    TSM_ASSERT_EQUALS("Peak was removed as outlier although there's only 2.",
+                      outliersRemoved.count(), 2);
+  }
+
+  void test_UniqueReflectionRemoveOutliers() {
+    UniqueReflection reflection =
+        getReflectionWithPeaks({30.0, 34.0, 32.0, 31.0}, {4.5, 6.5, 10.0, 2.3});
+
+    // standard deviation is 1.70782512765993
+    auto cleanReflection = reflection.removeOutliers();
+    TSM_ASSERT_EQUALS(
+        "UniqueReflection removed outlier although it should not.",
+        cleanReflection.count(), 4);
+
+    cleanReflection = reflection.removeOutliers(2.0);
+    TSM_ASSERT_EQUALS(
+        "UniqueReflection removed outlier although it should not.",
+        cleanReflection.count(), 4);
+
+    cleanReflection = reflection.removeOutliers(1.0);
+    TSM_ASSERT_EQUALS(
+        "UniqueReflection did not remove outliers although it should have.",
+        cleanReflection.count(), 2);
+
+    std::vector<double> cleanIntensities = cleanReflection.getIntensities();
+    TS_ASSERT_EQUALS(cleanIntensities[0], 32.0);
+    TS_ASSERT_EQUALS(cleanIntensities[1], 31.0);
+  }
+
+  void test_UniqueReflectionSetIntensityAndSigma() {
+    UniqueReflection reflection =
+        getReflectionWithPeaks({30.0, 34.0, 32.0, 31.0}, {4.5, 6.5, 10.0, 2.3});
+
+    reflection.setPeaksIntensityAndSigma(10.0, 0.1);
+
+    for (auto peak : reflection.getPeaks()) {
+      TSM_ASSERT_EQUALS(
+          "Incorrect peak intensity after set in UniqueReflection.",
+          peak.getIntensity(), 10.0);
+      TSM_ASSERT_EQUALS("Incorrect peak sigma after set in UniqueReflection.",
+                        peak.getSigmaIntensity(), 0.1);
+    }
+  }
+
+  void test_UniqueReflectionCollectionEmpty() {
+    UniqueReflectionCollection reflections =
+        getUniqueReflectionCollection(3.0, "P", "m-3m", 1.5);
+
+    // There should be 4 reflections: 001, 011, 111, 002
+    TS_ASSERT_EQUALS(reflections.getUniqueReflectionCount(), 4);
+
+    // Uses point group to retrieve UniqueReflections
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(0, 0, 1)));
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(0, 0, -1)));
+
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(0, 1, 1)));
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(1, 1, 1)));
+    TS_ASSERT_THROWS_NOTHING(reflections.getReflection(V3D(0, 0, 2)));
+
+    // Reflections that do not exist throw some exception
+    TS_ASSERT_THROWS_ANYTHING(reflections.getReflection(V3D(0, 0, 3)));
+    TS_ASSERT_THROWS_ANYTHING(reflections.getReflection(V3D(2, -1, 0)));
+
+    // No observations
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 0);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 0);
+  }
+
+  void test_UniqueReflectionCollectionAddObservations() {
+    UniqueReflectionCollection reflections =
+        getUniqueReflectionCollection(3.0, "P", "m-3m", 1.5);
+
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 0);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 0);
+    TS_ASSERT_EQUALS(reflections.getUnobservedUniqueReflections().size(), 4);
+
+    reflections.addObservations(
+        getPeaksWithIandSigma({1.0, 1.0}, {2.0, 2.0}, V3D(1, 0, 0)));
+
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 2);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 1);
+    TS_ASSERT_EQUALS(reflections.getUnobservedUniqueReflections().size(), 3);
+
+    // out-of-range peaks are ignored, so the reflection counts do not change
+    reflections.addObservations(
+        getPeaksWithIandSigma({1.0, 1.0}, {2.0, 2.0}, V3D(0, 5, 0)));
+
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 2);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 1);
+  }
+
+  void test_UniqueReflectionCollectionReflectionCounts() {
+    UniqueReflectionCollection reflections =
+        getUniqueReflectionCollection(3.0, "P", "m-3m", 1.5);
+
+    reflections.addObservations(
+        getPeaksWithIandSigma({1.0, 1.0}, {2.0, 2.0}, V3D(1, 0, 0)));
+    reflections.addObservations(
+        getPeaksWithIandSigma({1.0, 1.0, 2.0}, {2.0, 2.0, 3.0}, V3D(1, 1, 0)));
+
+    TS_ASSERT_EQUALS(reflections.getObservedReflectionCount(), 5);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(), 2);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(2), 1);
+    TS_ASSERT_EQUALS(reflections.getObservedUniqueReflectionCount(3), 0);
+
+    TS_ASSERT_EQUALS(reflections.getUnobservedUniqueReflections().size(), 2);
+  }
+
+  void test_PeaksStatisticsNoObservation() {
+    std::map<V3D, UniqueReflection> uniques;
+    uniques.insert(
+        std::make_pair(V3D(1, 1, 1), UniqueReflection(V3D(1, 1, 1))));
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 0);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 0);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 0.0);
+  }
+
+  void test_PeaksStatisticsOneObservation() {
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1}, getReflectionWithPeaks({56.0}, {4.5}, 1.0)}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 1);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 56.0 / 4.5);
+  }
+
+  void test_PeaksStatisticsOneObservationTwoUnique() {
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1}, getReflectionWithPeaks({56.0}, {4.5}, 1.0)},
+        {{1, 1, 2}, UniqueReflection(V3D(1, 1, 2))}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 1);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 0.5);
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 56.0 / 4.5);
+  }
+
+  void test_PeaksStatisticsTwoObservationTwoUnique() {
+
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1}, getReflectionWithPeaks({10.0}, {1.0}, 1.0)},
+        {{1, 1, 2}, getReflectionWithPeaks({20.0}, {1.0}, 2.0)}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 2);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 2);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 15.0);
+  }
+
+  void test_PeaksStatisticsTwoObservationOneUnique() {
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1}, getReflectionWithPeaks({10.0, 20.0}, {0.1, 0.1}, 1.0)}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 2);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 2.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
+    // <I> = 15, sum(I) = 30, sum(|I - <I>|) = 10, rMerge = 10 / 30 = 0.33
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 1.0 / 3.0);
+    // For 2 observations this is the same since sqrt(1 / (2 - 1)) = 1
+    TS_ASSERT_EQUALS(statistics.m_rPim, 1.0 / 3.0);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 150.0);
+  }
+
+  void test_PeaksStatisticsThreeObservationOneUnique() {
+    std::map<V3D, UniqueReflection> uniques{
+        {{1, 1, 1},
+         getReflectionWithPeaks({10.0, 20.0, 15.0}, {0.1, 0.1, 0.1}, 1.0)}};
+    MockUniqueReflectionCollection reflections(uniques);
+
+    PeaksStatistics statistics(reflections);
+    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 3);
+    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
+    TS_ASSERT_EQUALS(statistics.m_redundancy, 3.0);
+    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
+    // <I> = 15, sum(I) = 45, sum(|I - <I>|) = 10, rMerge = 10 / 45 = 0.222
+    TS_ASSERT_EQUALS(statistics.m_rMerge, 1.0 / 4.5);
+    // For rpim the factor is  sqrt(1 / (3 - 1)) = sqrt(0.5)
+    TS_ASSERT_EQUALS(statistics.m_rPim, sqrt(0.5) / 4.5);
+    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 150.0);
+  }
+};
+
+#endif /* MANTID_CRYSTAL_PEAKSTATISTICSTOOLSTEST_H_ */
diff --git a/Framework/Crystal/test/PredictPeaksTest.h b/Framework/Crystal/test/PredictPeaksTest.h
index 8e30cd5026e9abf68b7b15a1a7141bbf5964e403..ef297ab7108c36d69cc87f019564c751fb405c85 100644
--- a/Framework/Crystal/test/PredictPeaksTest.h
+++ b/Framework/Crystal/test/PredictPeaksTest.h
@@ -130,7 +130,38 @@ public:
   }
 
   void test_exec_withExtendedDetectorSpaceOptionCheckedNoDefinition() {
-    do_test_exec("Primitive", 10, std::vector<V3D>(), 1, true, false);
+    std::string outWSName("PredictPeaksTest_OutputWS");
+    // Make the fake input workspace
+    auto inWS = WorkspaceCreationHelper::create2DWorkspace(10000, 1);
+    auto inst =
+        ComponentCreationHelper::createTestInstrumentRectangular(1, 100);
+    inWS->setInstrument(inst);
+
+    // Set ub and Goniometer rotation
+    WorkspaceCreationHelper::setOrientedLattice(inWS, 12.0, 12.0, 12.0);
+    WorkspaceCreationHelper::setGoniometer(inWS, 0., 0., 0.);
+
+    PredictPeaks alg;
+    TS_ASSERT_THROWS_NOTHING(alg.initialize())
+    TS_ASSERT(alg.isInitialized())
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty(
+        "InputWorkspace", boost::dynamic_pointer_cast<Workspace>(inWS)));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("OutputWorkspace", outWSName));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("WavelengthMin", "0.1"));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("WavelengthMax", "10.0"));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("MinDSpacing", "1.0"));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setPropertyValue("ReflectionCondition", "Primitive"));
+    TS_ASSERT_THROWS_NOTHING(
+        alg.setProperty("PredictPeaksOutsideDetectors", true));
+    alg.execute();
+
+    // should fail to execute and throw a runtime error
+    TS_ASSERT(!alg.isExecuted());
+
+    // Remove workspace from the data service.
+    AnalysisDataService::Instance().remove(outWSName);
   }
 
   void test_exec_withInputHKLList() {
@@ -216,4 +247,54 @@ public:
   }
 };
 
+class PredictPeaksTestPerformance : public CxxTest::TestSuite {
+public:
+  void test_manyPeaksRectangular() {
+    MatrixWorkspace_sptr inWS =
+        WorkspaceCreationHelper::create2DWorkspace(10000, 1);
+    Instrument_sptr inst =
+        ComponentCreationHelper::createTestInstrumentRectangular2(1, 100);
+    inWS->setInstrument(inst);
+
+    // Set ub and Goniometer rotation
+    WorkspaceCreationHelper::setOrientedLattice(inWS, 12.0, 12.0, 12.0);
+    WorkspaceCreationHelper::setGoniometer(inWS, 0., 0., 0.);
+
+    PredictPeaks alg;
+    alg.initialize();
+    alg.setProperty("InputWorkspace",
+                    boost::dynamic_pointer_cast<Workspace>(inWS));
+    alg.setPropertyValue("OutputWorkspace", "predict_peaks_performance");
+    alg.setPropertyValue("WavelengthMin", ".5");
+    alg.setPropertyValue("WavelengthMax", "15.0");
+    alg.setPropertyValue("MinDSpacing", ".1");
+    alg.setPropertyValue("ReflectionCondition", "Primitive");
+    alg.execute();
+  }
+
+  void test_manyPeaks() {
+    MatrixWorkspace_sptr inWS =
+        WorkspaceCreationHelper::create2DWorkspace(10000, 1);
+    Instrument_sptr inst =
+        ComponentCreationHelper::createTestInstrumentCylindrical(
+            3, V3D(0, 0, -1), V3D(0, 0, 0), 1.6, 1.0);
+    inWS->setInstrument(inst);
+
+    // Set UB matrix and Goniometer rotation
+    WorkspaceCreationHelper::setOrientedLattice(inWS, 12.0, 12.0, 12.0);
+    WorkspaceCreationHelper::setGoniometer(inWS, 0., 0., 0.);
+
+    PredictPeaks alg;
+    alg.initialize();
+    alg.setProperty("InputWorkspace",
+                    boost::dynamic_pointer_cast<Workspace>(inWS));
+    alg.setPropertyValue("OutputWorkspace", "predict_peaks_performance");
+    alg.setPropertyValue("WavelengthMin", ".5");
+    alg.setPropertyValue("WavelengthMax", "15.0");
+    alg.setPropertyValue("MinDSpacing", ".1");
+    alg.setPropertyValue("ReflectionCondition", "Primitive");
+    alg.execute();
+  }
+};
+
 #endif /* MANTID_CRYSTAL_PREDICTPEAKSTEST_H_ */
diff --git a/Framework/Crystal/test/SortHKLTest.h b/Framework/Crystal/test/SortHKLTest.h
index db8210cfd56239f17121b21a750e2e0e1903e849..b16f1f18e2c9e430ea855216a368ca80362f50f4 100644
--- a/Framework/Crystal/test/SortHKLTest.h
+++ b/Framework/Crystal/test/SortHKLTest.h
@@ -27,207 +27,6 @@ using namespace Mantid::PhysicalConstants;
 
 class SortHKLTest : public CxxTest::TestSuite {
 public:
-  void test_UniqueReflectionsConstructor() {
-    V3D hkl(1, 1, 1);
-    UniqueReflection reflection(hkl);
-
-    TSM_ASSERT_EQUALS("Constructed UniqueReflection does not have 0 peaks.",
-                      reflection.count(), 0);
-    TSM_ASSERT_EQUALS(
-        "HKL is not equal to constructor argument in UniqueReflection",
-        reflection.getHKL(), hkl);
-  }
-
-  void test_UniqueReflectionsPeaks() {
-    UniqueReflection reflection(V3D(2, 3, 4));
-
-    Peak peak;
-    TS_ASSERT_THROWS_NOTHING(reflection.addPeak(peak));
-    TSM_ASSERT_EQUALS("UniqueReflection count is not 1 after adding peak.",
-                      reflection.count(), 1);
-    TSM_ASSERT_EQUALS(
-        "UniqueReflection peaks vector size is not 1 after adding peak.",
-        reflection.getPeaks().size(), 1);
-  }
-
-  void test_UniqueReflectionsGetIntensitiesAndSigmas() {
-    UniqueReflection reflection(V3D(2, 3, 4));
-
-    std::vector<Peak> peaks = getPeaksWithIandSigma({30.0, 34.0}, {4.5, 6.5});
-    for (auto peak : peaks) {
-      reflection.addPeak(peak);
-    }
-
-    std::vector<double> intensities = reflection.getIntensities();
-    TSM_ASSERT_EQUALS("Intensity vector from UniqueReflection has wrong size.",
-                      intensities.size(), 2);
-    TS_ASSERT_EQUALS(intensities[0], 30.0);
-    TS_ASSERT_EQUALS(intensities[1], 34.0);
-
-    std::vector<double> sigmas = reflection.getSigmas();
-    TSM_ASSERT_EQUALS("Sigma vector from UniqueReflection has wrong size.",
-                      sigmas.size(), 2);
-    TS_ASSERT_EQUALS(sigmas[0], 4.5);
-    TS_ASSERT_EQUALS(sigmas[1], 6.5);
-  }
-
-  void test_UniqueReflectionRemoveOutliersSigmaCrit() {
-    UniqueReflection reflection(V3D(2, 3, 4));
-    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers(3.0));
-    TS_ASSERT_THROWS(reflection.removeOutliers(0.0), std::invalid_argument);
-    TS_ASSERT_THROWS(reflection.removeOutliers(-10.0), std::invalid_argument);
-  }
-
-  void test_UniqueReflectionRemoveOutliersFewPeaks() {
-    std::vector<Peak> peaks = getPeaksWithIandSigma({30.0, 34.0}, {4.5, 6.5});
-
-    UniqueReflection reflection(V3D(2, 3, 4));
-    reflection.addPeak(peaks[0]);
-
-    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers());
-    TSM_ASSERT_EQUALS("Peak was removed as outlier although there's only 1.",
-                      reflection.count(), 1);
-
-    reflection.addPeak(peaks[1]);
-
-    TS_ASSERT_THROWS_NOTHING(reflection.removeOutliers());
-    TSM_ASSERT_EQUALS("Peak was removed as outlier although there's only 2.",
-                      reflection.count(), 2);
-  }
-
-  void test_UniqueReflectionRemoveOutliers() {
-    UniqueReflection reflection =
-        getReflectionWithPeaks({30.0, 34.0, 32.0, 31.0}, {4.5, 6.5, 10.0, 2.3});
-
-    // standard deviation is 1.70782512765993
-    reflection.removeOutliers();
-    TSM_ASSERT_EQUALS(
-        "UniqueReflection removed outlier although it should not.",
-        reflection.count(), 4);
-
-    reflection.removeOutliers(2.0);
-    TSM_ASSERT_EQUALS(
-        "UniqueReflection removed outlier although it should not.",
-        reflection.count(), 4);
-
-    reflection.removeOutliers(1.0);
-    TSM_ASSERT_EQUALS(
-        "UniqueReflection did not remove outliers although it should have.",
-        reflection.count(), 2);
-
-    std::vector<double> cleanIntensities = reflection.getIntensities();
-    TS_ASSERT_EQUALS(cleanIntensities[0], 32.0);
-    TS_ASSERT_EQUALS(cleanIntensities[1], 31.0);
-  }
-
-  void test_UniqueReflectionSetIntensityAndSigma() {
-    UniqueReflection reflection =
-        getReflectionWithPeaks({30.0, 34.0, 32.0, 31.0}, {4.5, 6.5, 10.0, 2.3});
-
-    reflection.setPeaksIntensityAndSigma(10.0, 0.1);
-
-    for (auto peak : reflection.getPeaks()) {
-      TSM_ASSERT_EQUALS(
-          "Incorrect peak intensity after set in UniqueReflection.",
-          peak.getIntensity(), 10.0);
-      TSM_ASSERT_EQUALS("Incorrect peak sigma after set in UniqueReflection.",
-                        peak.getSigmaIntensity(), 0.1);
-    }
-  }
-
-  void test_PeaksStatisticsNoObservation() {
-    std::map<V3D, UniqueReflection> uniques;
-    uniques.insert(
-        std::make_pair(V3D(1, 1, 1), UniqueReflection(V3D(1, 1, 1))));
-
-    PeaksStatistics statistics(uniques, 0);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 0);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 0);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 0.0);
-  }
-
-  void test_PeaksStatisticsOneObservation() {
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1}, getReflectionWithPeaks({56.0}, {4.5}, 1.0)}};
-
-    PeaksStatistics statistics(uniques, 1);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 1);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 56.0 / 4.5);
-  }
-
-  void test_PeaksStatisticsOneObservationTwoUnique() {
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1}, getReflectionWithPeaks({56.0}, {4.5}, 1.0)},
-        {{1, 1, 2}, UniqueReflection(V3D(1, 1, 2))}};
-
-    PeaksStatistics statistics(uniques, 1);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 1);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 0.5);
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 56.0 / 4.5);
-  }
-
-  void test_PeaksStatisticsTwoObservationTwoUnique() {
-
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1}, getReflectionWithPeaks({10.0}, {1.0}, 1.0)},
-        {{1, 1, 2}, getReflectionWithPeaks({20.0}, {1.0}, 2.0)}};
-
-    PeaksStatistics statistics(uniques, 2);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 2);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 2);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_rPim, 0.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 15.0);
-  }
-
-  void test_PeaksStatisticsTwoObservationOneUnique() {
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1}, getReflectionWithPeaks({10.0, 20.0}, {0.1, 0.1}, 1.0)}};
-
-    PeaksStatistics statistics(uniques, 2);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 2);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 2.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
-    // <I> = 15, sum(I) = 30, sum(|I - <I>|) = 10, rMerge = 10 / 30 = 0.33
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 1.0 / 3.0);
-    // For 2 observations this is the same since sqrt(1 / (2 - 1)) = 1
-    TS_ASSERT_EQUALS(statistics.m_rPim, 1.0 / 3.0);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 150.0);
-  }
-
-  void test_PeaksStatisticsThreeObservationOneUnique() {
-    std::map<V3D, UniqueReflection> uniques{
-        {{1, 1, 1},
-         getReflectionWithPeaks({10.0, 20.0, 15.0}, {0.1, 0.1, 0.1}, 1.0)}};
-
-    PeaksStatistics statistics(uniques, 3);
-    TS_ASSERT_EQUALS(statistics.m_peaks.size(), 3);
-    TS_ASSERT_EQUALS(statistics.m_uniqueReflections, 1);
-    TS_ASSERT_EQUALS(statistics.m_redundancy, 3.0);
-    TS_ASSERT_EQUALS(statistics.m_completeness, 1.0);
-    // <I> = 15, sum(I) = 45, sum(|I - <I>|) = 10, rMerge = 10 / 45 = 0.222
-    TS_ASSERT_EQUALS(statistics.m_rMerge, 1.0 / 4.5);
-    // For rpim the factor is  sqrt(1 / (3 - 1)) = sqrt(0.5)
-    TS_ASSERT_EQUALS(statistics.m_rPim, sqrt(0.5) / 4.5);
-    TS_ASSERT_EQUALS(statistics.m_meanIOverSigma, 150.0);
-  }
-
   void test_Init() {
     SortHKL alg;
     TS_ASSERT_THROWS_NOTHING(alg.initialize());
@@ -327,43 +126,6 @@ public:
 
   /// Test with a few peaks
   void test_exec() { do_test(2, 4, 4); }
-
-private:
-  std::vector<Peak>
-  getPeaksWithIandSigma(const std::vector<double> &intensity,
-                        const std::vector<double> &sigma) const {
-    std::vector<Peak> peaks;
-    std::transform(intensity.begin(), intensity.end(), sigma.begin(),
-                   std::back_inserter(peaks),
-                   [](double intensity, double sigma) {
-                     Peak peak;
-                     peak.setIntensity(intensity);
-                     peak.setSigmaIntensity(sigma);
-                     return peak;
-                   });
-
-    return peaks;
-  }
-
-  UniqueReflection
-  getReflectionWithPeaks(const std::vector<double> &intensities,
-                         const std::vector<double> &sigmas,
-                         double wavelength = 0.0) const {
-    std::vector<Peak> peaks = getPeaksWithIandSigma(intensities, sigmas);
-
-    if (wavelength > 0) {
-      for (auto &peak : peaks) {
-        peak.setWavelength(wavelength);
-      }
-    }
-
-    UniqueReflection reflection(V3D(2, 3, 4));
-    for (auto peak : peaks) {
-      reflection.addPeak(peak);
-    }
-
-    return reflection;
-  }
 };
 
 #endif /* MANTID_CRYSTAL_SORTHKLTEST_H_ */
diff --git a/Framework/CurveFitting/CMakeLists.txt b/Framework/CurveFitting/CMakeLists.txt
index 6716713c08b5f92af31e8135cb424e5d160b208a..ebd31ccae7f6e20d6e58389c813ebb9a7b355bfb 100644
--- a/Framework/CurveFitting/CMakeLists.txt
+++ b/Framework/CurveFitting/CMakeLists.txt
@@ -81,7 +81,6 @@ set ( SRC_FILES
 	src/Functions/ExpDecayOsc.cpp
 	src/Functions/FlatBackground.cpp
 	src/Functions/FullprofPolynomial.cpp
-	src/Functions/FunctionGenerator.cpp
 	src/Functions/FunctionQDepends.cpp
 	src/Functions/GausDecay.cpp
 	src/Functions/GausOsc.cpp
@@ -238,7 +237,6 @@ set ( INC_FILES
 	inc/MantidCurveFitting/Functions/ExpDecayOsc.h
 	inc/MantidCurveFitting/Functions/FlatBackground.h
 	inc/MantidCurveFitting/Functions/FullprofPolynomial.h
-	inc/MantidCurveFitting/Functions/FunctionGenerator.h
 	inc/MantidCurveFitting/Functions/FunctionQDepends.h
 	inc/MantidCurveFitting/Functions/GausDecay.h
 	inc/MantidCurveFitting/Functions/GausOsc.h
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Algorithms/Fit.h b/Framework/CurveFitting/inc/MantidCurveFitting/Algorithms/Fit.h
index 430d68a1a4505925459feb341ab81840bc6cf57e..d90df8c1efbc63334a0c55152df152da55bfe236 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Algorithms/Fit.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Algorithms/Fit.h
@@ -98,21 +98,31 @@ Code Documentation is available at: <http://doxygen.mantidproject.org>
 class DLLExport Fit : public IFittingAlgorithm {
 public:
   /// Default constructor
-  Fit() : IFittingAlgorithm() {}
+  Fit();
   /// Algorithm's name for identification overriding a virtual method
   const std::string name() const override { return "Fit"; }
   /// Summary of algorithms purpose
   const std::string summary() const override {
     return "Fits a function to data in a Workspace";
   }
-
   /// Algorithm's version for identification overriding a virtual method
   int version() const override { return (1); }
 
-protected:
+private:
   void initConcrete() override;
   void execConcrete() override;
+  void readProperties();
+  void initializeMinimizer(size_t maxIterations);
+  size_t runMinimizer();
+  void finalizeMinimizer(size_t nIterations);
   void copyMinimizerOutput(const API::IFuncMinimizer &minimizer);
+  void createOutput();
+  /// The cost function
+  boost::shared_ptr<CostFunctions::CostFuncFitting> m_costFunction;
+  /// The minimizer
+  boost::shared_ptr<API::IFuncMinimizer> m_minimizer;
+  /// Max number of iterations
+  size_t m_maxIterations;
 };
 
 } // namespace Algorithms
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Constraints/BoundaryConstraint.h b/Framework/CurveFitting/inc/MantidCurveFitting/Constraints/BoundaryConstraint.h
index 83fc5cc4d938188aba69b182d49b30bbc0f5673c..513e2a9e11fad166584000cbcfde41ecf9152023 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Constraints/BoundaryConstraint.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Constraints/BoundaryConstraint.h
@@ -44,15 +44,10 @@ Code Documentation is available at: <http://doxygen.mantidproject.org>
 class DLLExport BoundaryConstraint : public API::IConstraint {
 public:
   /// Default constructor
-  BoundaryConstraint()
-      : API::IConstraint(), m_penaltyFactor(1000.0), m_parameterName(""),
-        m_hasLowerBound(false), m_hasUpperBound(false), m_lowerBound(DBL_MAX),
-        m_upperBound(-DBL_MAX) {}
+  BoundaryConstraint();
 
   /// Constructor with no boundary arguments
-  BoundaryConstraint(const std::string &paramName)
-      : API::IConstraint(), m_penaltyFactor(1000.0), m_parameterName(paramName),
-        m_hasLowerBound(false), m_hasUpperBound(false) {}
+  BoundaryConstraint(const std::string &paramName);
 
   /// Constructor with boundary arguments
   BoundaryConstraint(API::IFunction *fun, const std::string paramName,
@@ -114,7 +109,7 @@ public:
   }
 
   /// Get parameter name
-  std::string getParameterName() const { return m_parameterName; }
+  //  std::string getParameterName() const { return m_parameterName; }
 
   /// overwrite IConstraint base class methods
   double check() override;
@@ -128,7 +123,7 @@ private:
   double m_penaltyFactor;
 
   /// name of parameter you want to constraint
-  std::string m_parameterName;
+  // std::string m_parameterName;
 
   /// has a lower bound set true/false
   bool m_hasLowerBound;
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldMultiSpectrum.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldMultiSpectrum.h
index 328fb8ee636276bd702329613de90214c77c91db..b7d8e824708ddadb36d86965a81f50cf9494da85 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldMultiSpectrum.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldMultiSpectrum.h
@@ -1,9 +1,9 @@
 #ifndef MANTID_CURVEFITTING_CRYSTALFIELDMULTISPECTRUM_H_
 #define MANTID_CURVEFITTING_CRYSTALFIELDMULTISPECTRUM_H_
 
+#include "MantidAPI/FunctionGenerator.h"
 #include "MantidAPI/FunctionValues.h"
 #include "MantidCurveFitting/FortranDefs.h"
-#include "MantidCurveFitting/Functions/FunctionGenerator.h"
 
 namespace Mantid {
 namespace CurveFitting {
@@ -32,7 +32,7 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.
 File change history is stored at: <https://github.com/mantidproject/mantid>
 Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
-class DLLExport CrystalFieldMultiSpectrum : public FunctionGenerator {
+class DLLExport CrystalFieldMultiSpectrum : public API::FunctionGenerator {
 public:
   CrystalFieldMultiSpectrum();
   std::string name() const override { return "CrystalFieldMultiSpectrum"; }
@@ -66,7 +66,7 @@ private:
                       const DoubleFortranVector &en,
                       const ComplexFortranMatrix &wf,
                       const ComplexFortranMatrix &ham, double temperature,
-                      size_t i) const;
+                      double fwhm, size_t i) const;
   /// Calculate excitations at given temperature
   void calcExcitations(int nre, const DoubleFortranVector &en,
                        const ComplexFortranMatrix &wf, double temperature,
@@ -78,6 +78,10 @@ private:
   /// Caches of the width functions
   mutable std::vector<std::vector<double>> m_fwhmX;
   mutable std::vector<std::vector<double>> m_fwhmY;
+  /// Cache the temperatures
+  mutable std::vector<double> m_temperatures;
+  /// Cache the default peak FWHMs
+  mutable std::vector<double> m_FWHMs;
 };
 
 } // namespace Functions
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldPeakUtils.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldPeakUtils.h
index 395b0cd61b765164f5ec00dd5598ed532873a695..c35d797f4c07baef958295c2e28c0a9efb71380f 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldPeakUtils.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldPeakUtils.h
@@ -46,11 +46,12 @@ size_t buildSpectrumFunction(API::CompositeFunction &spectrum,
                              double fwhmVariation, double defaultFWHM,
                              size_t nRequiredPeaks, bool fixAllPeaks);
 size_t updateSpectrumFunction(API::CompositeFunction &spectrum,
+                              const std::string &peakShape,
                               const API::FunctionValues &centresAndIntensities,
-                              size_t nOriginalPeaks, size_t iFirst,
-                              const std::vector<double> &xVec,
+                              size_t iFirst, const std::vector<double> &xVec,
                               const std::vector<double> &yVec,
-                              double fwhmVariation);
+                              double fwhmVariation, double defaultFWHM,
+                              bool fixAllPeaks);
 size_t calculateNPeaks(const API::FunctionValues &centresAndIntensities);
 size_t calculateMaxNPeaks(size_t nPeaks);
 
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldSpectrum.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldSpectrum.h
index b502e5b7e97ce3b0d715e78fc8a7fb0ffc9b9a28..bf96865210adbc3b95acd740951f30e96456aec8 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldSpectrum.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/CrystalFieldSpectrum.h
@@ -1,7 +1,7 @@
 #ifndef MANTID_CURVEFITTING_CRYSTALFIELDSPECTRUM_H_
 #define MANTID_CURVEFITTING_CRYSTALFIELDSPECTRUM_H_
 
-#include "MantidCurveFitting/Functions/FunctionGenerator.h"
+#include "MantidAPI/FunctionGenerator.h"
 
 namespace Mantid {
 namespace CurveFitting {
@@ -30,7 +30,7 @@ along with this program.  If not, see <http://www.gnu.org/licenses/>.
 File change history is stored at: <https://github.com/mantidproject/mantid>
 Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
-class DLLExport CrystalFieldSpectrum : public FunctionGenerator {
+class DLLExport CrystalFieldSpectrum : public API::FunctionGenerator {
 public:
   CrystalFieldSpectrum();
   std::string name() const override { return "CrystalFieldSpectrum"; }
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Gaussian.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Gaussian.h
index c09f2e9a7bb050bdca7ecf1518899d50236e2050..e4478f8b75f318c251af28f2bf57618db20f0d71 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Gaussian.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Gaussian.h
@@ -61,9 +61,9 @@ public:
   void setFwhm(const double w) override;
   void setIntensity(const double i) override;
 
-  void fixCentre() override;
+  void fixCentre(bool isDefault = false) override;
   void unfixCentre() override;
-  void fixIntensity() override;
+  void fixIntensity(bool isDefault = false) override;
   void unfixIntensity() override;
 
   /// overwrite IFunction base class methods
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Lorentzian.h b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Lorentzian.h
index 2a6d0e5975f729c3937b153f94de8a381e39ed8f..fdd10e2f6aa49fcbb2aa4a08273512121c932120 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Lorentzian.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Functions/Lorentzian.h
@@ -56,9 +56,9 @@ public:
   void setHeight(const double h) override;
   void setFwhm(const double w) override;
   void setIntensity(const double i) override { setParameter("Amplitude", i); }
-  void fixCentre() override;
+  void fixCentre(bool isDefault = false) override;
   void unfixCentre() override;
-  void fixIntensity() override;
+  void fixIntensity(bool isDefault = false) override;
   void unfixIntensity() override;
 
   /// overwrite IFunction base class methods
diff --git a/Framework/CurveFitting/inc/MantidCurveFitting/Jacobian.h b/Framework/CurveFitting/inc/MantidCurveFitting/Jacobian.h
index 9fbf274107936a7ad7fefadd7ccfa286911ee7d8..09730cd87e0c4896a2b704dbcaecdfc4599228ac 100644
--- a/Framework/CurveFitting/inc/MantidCurveFitting/Jacobian.h
+++ b/Framework/CurveFitting/inc/MantidCurveFitting/Jacobian.h
@@ -2,6 +2,7 @@
 #define MANTID_CURVEFITTING_GSLFUNCTIONS_H_
 
 #include "MantidAPI/Jacobian.h"
+#include "MantidKernel/Exception.h"
 #include <gsl/gsl_matrix.h>
 
 #include <vector>
@@ -74,7 +75,7 @@ public:
       throw std::out_of_range("Data index in Jacobian is out of range");
     }
     if (iP >= m_np) {
-      throw std::out_of_range("Parameter index in Jacobian is out of range");
+      throw Kernel::Exception::FitSizeWarning(m_np);
     }
     m_data[iY * m_np + iP] = value;
   }
@@ -84,7 +85,7 @@ public:
       throw std::out_of_range("Data index in Jacobian is out of range");
     }
     if (iP >= m_np) {
-      throw std::out_of_range("Parameter index in Jacobian is out of range");
+      throw Kernel::Exception::FitSizeWarning(m_np);
     }
     return m_data[iY * m_np + iP];
   }
diff --git a/Framework/CurveFitting/src/Algorithms/CalculateChiSquared.cpp b/Framework/CurveFitting/src/Algorithms/CalculateChiSquared.cpp
index 8dcd276acc41e5a3ae014d50d2680278d43057cc..25a5738c573e6e8b77c74aae6a309e03785c9846 100644
--- a/Framework/CurveFitting/src/Algorithms/CalculateChiSquared.cpp
+++ b/Framework/CurveFitting/src/Algorithms/CalculateChiSquared.cpp
@@ -117,7 +117,7 @@ void CalculateChiSquared::execConcrete() {
   // Get the number of free fitting parameters
   size_t nParams = 0;
   for (size_t i = 0; i < m_function->nParams(); ++i) {
-    if (!m_function->isFixed(i))
+    if (m_function->isActive(i))
       nParams += 1;
   }
 
@@ -658,7 +658,7 @@ void CalculateChiSquared::estimateErrors() {
 /// Temporary unfix any fixed parameters.
 void CalculateChiSquared::unfixParameters() {
   for (size_t i = 0; i < m_function->nParams(); ++i) {
-    if (m_function->isFixed(i)) {
+    if (!m_function->isActive(i)) {
       m_function->unfix(i);
       m_fixedParameters.push_back(i);
     }
diff --git a/Framework/CurveFitting/src/Algorithms/EstimateFitParameters.cpp b/Framework/CurveFitting/src/Algorithms/EstimateFitParameters.cpp
index 680aa066e296164a201a07e47cdfa715bd91bb87..2485a0cc1882e1119827512e8f49dd2109663eb1 100644
--- a/Framework/CurveFitting/src/Algorithms/EstimateFitParameters.cpp
+++ b/Framework/CurveFitting/src/Algorithms/EstimateFitParameters.cpp
@@ -65,7 +65,7 @@ void fixBadParameters(CostFunctions::CostFuncFitting &costFunction,
   std::vector<double> P, A, D;
   auto &fun = *costFunction.getFittingFunction();
   for (size_t i = 0, j = 0; i < fun.nParams(); ++i) {
-    if (fun.isFixed(i)) {
+    if (!fun.isActive(i)) {
       continue;
     }
     auto lBound = ranges[j].first;
@@ -372,7 +372,7 @@ void EstimateFitParameters::execConcrete() {
   std::vector<std::pair<double, double>> ranges;
   ranges.reserve(costFunction->nParams());
   for (size_t i = 0; i < func->nParams(); ++i) {
-    if (func->isFixed(i)) {
+    if (!func->isActive(i)) {
       continue;
     }
     auto constraint = func->getConstraint(i);
@@ -427,7 +427,7 @@ void EstimateFitParameters::execConcrete() {
       }
 
       for (size_t i = 0, ia = 0; i < m_function->nParams(); ++i) {
-        if (!m_function->isFixed(i)) {
+        if (m_function->isActive(i)) {
           TableRow row = table->appendRow();
           row << m_function->parameterName(i);
           for (size_t j = 0; j < output.size(); ++j) {
diff --git a/Framework/CurveFitting/src/Algorithms/Fit.cpp b/Framework/CurveFitting/src/Algorithms/Fit.cpp
index f9341b161e864f711dbd3e75f82c1d7083e7452e..112646cdac41be804e415309eb3aaf1c95c4278c 100644
--- a/Framework/CurveFitting/src/Algorithms/Fit.cpp
+++ b/Framework/CurveFitting/src/Algorithms/Fit.cpp
@@ -4,6 +4,7 @@
 #include "MantidCurveFitting/Algorithms/Fit.h"
 #include "MantidCurveFitting/CostFunctions/CostFuncFitting.h"
 
+#include "MantidAPI/CompositeFunction.h"
 #include "MantidAPI/FuncMinimizerFactory.h"
 #include "MantidAPI/IFuncMinimizer.h"
 #include "MantidAPI/ITableWorkspace.h"
@@ -12,8 +13,11 @@
 #include "MantidAPI/WorkspaceFactory.h"
 
 #include "MantidKernel/BoundedValidator.h"
+#include "MantidKernel/Exception.h"
 #include "MantidKernel/StartsWithValidator.h"
 
+#include <boost/make_shared.hpp>
+
 namespace Mantid {
 namespace CurveFitting {
 namespace Algorithms {
@@ -21,6 +25,9 @@ namespace Algorithms {
 // Register the class into the algorithm factory
 DECLARE_ALGORITHM(Fit)
 
+/// Default constructor
+Fit::Fit() : IFittingAlgorithm(), m_maxIterations() {}
+
 /** Initialisation method
 */
 void Fit::initConcrete() {
@@ -97,6 +104,32 @@ void Fit::initConcrete() {
                   "Output is an empty string).");
 }
 
+/// Read in the properties specific to Fit.
+void Fit::readProperties() {
+  std::string ties = getPropertyValue("Ties");
+  if (!ties.empty()) {
+    m_function->addTies(ties);
+  }
+  std::string contstraints = getPropertyValue("Constraints");
+  if (!contstraints.empty()) {
+    m_function->addConstraints(contstraints);
+  }
+
+  // Try to retrieve optional properties
+  int intMaxIterations = getProperty("MaxIterations");
+  m_maxIterations = static_cast<size_t>(intMaxIterations);
+}
+
+/// Initialize the minimizer for this fit.
+/// @param maxIterations :: Maximum number of iterations.
+void Fit::initializeMinimizer(size_t maxIterations) {
+  m_costFunction = getCostFunctionInitialized();
+  std::string minimizerName = getPropertyValue("Minimizer");
+  m_minimizer =
+      API::FuncMinimizerFactory::Instance().createMinimizer(minimizerName);
+  m_minimizer->initialize(m_costFunction, maxIterations);
+}
+
 /**
   * Copy all output workspace properties from the minimizer to Fit algorithm.
   * @param minimizer :: The minimizer to copy from.
@@ -113,89 +146,88 @@ void Fit::copyMinimizerOutput(const API::IFuncMinimizer &minimizer) {
   }
 }
 
-/** Executes the algorithm
-*
-*  @throw runtime_error Thrown if algorithm cannot execute
-*/
-void Fit::execConcrete() {
-
-  std::string ties = getPropertyValue("Ties");
-  if (!ties.empty()) {
-    m_function->addTies(ties);
-  }
-  std::string contstraints = getPropertyValue("Constraints");
-  if (!contstraints.empty()) {
-    m_function->addConstraints(contstraints);
-  }
-
-  auto costFunc = getCostFunctionInitialized();
-
-  // Try to retrieve optional properties
-  int intMaxIterations = getProperty("MaxIterations");
-  const size_t maxIterations = static_cast<size_t>(intMaxIterations);
-
-  // get the minimizer
-  std::string minimizerName = getPropertyValue("Minimizer");
-  API::IFuncMinimizer_sptr minimizer =
-      API::FuncMinimizerFactory::Instance().createMinimizer(minimizerName);
-  minimizer->initialize(costFunc, maxIterations);
-
-  const int64_t nsteps = maxIterations * m_function->estimateNoProgressCalls();
-  API::Progress prog(this, 0.0, 1.0, nsteps);
-  m_function->setProgressReporter(&prog);
+/// Run the minimizer's iteration loop.
+/// @returns :: Number of actual iterations.
+size_t Fit::runMinimizer() {
+  const int64_t nsteps =
+      m_maxIterations * m_function->estimateNoProgressCalls();
+  auto prog = boost::make_shared<API::Progress>(this, 0.0, 1.0, nsteps);
+  m_function->setProgressReporter(prog);
 
   // do the fitting until success or iteration limit is reached
   size_t iter = 0;
-  bool success = false;
-  std::string errorString;
+  bool isFinished = false;
   g_log.debug("Starting minimizer iteration\n");
-  while (iter < maxIterations) {
+  while (iter < m_maxIterations) {
     g_log.debug() << "Starting iteration " << iter << "\n";
-    m_function->iterationStarting();
-    if (!minimizer->iterate(iter)) {
-      errorString = minimizer->getError();
-      g_log.debug() << "Iteration stopped. Minimizer status string="
-                    << errorString << "\n";
-
-      success = errorString.empty() || errorString == "success";
-      if (success) {
-        errorString = "success";
+    try {
+      // Perform a single iteration. isFinished is set when minimizer wants to
+      // quit.
+      m_function->iterationStarting();
+      isFinished = !m_minimizer->iterate(iter);
+      m_function->iterationFinished();
+    } catch (Kernel::Exception::FitSizeWarning &) {
+      // This is an attempt to recover after the function changes its number of
+      // parameters or ties during the iteration.
+      if (auto cf = dynamic_cast<API::CompositeFunction *>(m_function.get())) {
+        // Make sure the composite function is valid.
+        cf->checkFunction();
       }
+      // Re-create the cost function and minimizer.
+      initializeMinimizer(m_maxIterations - iter);
+    }
+
+    prog->report();
+
+    if (isFinished) {
+      // It was the last iteration. Break out of the loop and return the number
+      // of finished iterations.
       break;
     }
-    prog.report();
-    m_function->iterationFinished();
     ++iter;
   }
   g_log.debug() << "Number of minimizer iterations=" << iter << "\n";
+  return iter;
+}
+
+/// Finalize the minimizer.
+/// @param nIterations :: The actual number of iterations done by the minimizer.
+void Fit::finalizeMinimizer(size_t nIterations) {
+  m_minimizer->finalize();
 
-  minimizer->finalize();
+  auto errorString = m_minimizer->getError();
+  g_log.debug() << "Iteration stopped. Minimizer status string=" << errorString
+                << "\n";
 
-  if (iter >= maxIterations) {
+  bool success = errorString.empty() || errorString == "success";
+  if (success) {
+    errorString = "success";
+  }
+
+  if (nIterations >= m_maxIterations) {
     if (!errorString.empty()) {
       errorString += '\n';
     }
-    errorString += "Failed to converge after " + std::to_string(maxIterations) +
-                   " iterations.";
+    errorString += "Failed to converge after " +
+                   std::to_string(m_maxIterations) + " iterations.";
   }
 
   // return the status flag
   setPropertyValue("OutputStatus", errorString);
+}
+
+/// Create algorithm output worksapces.
+void Fit::createOutput() {
 
   // degrees of freedom
-  size_t dof = costFunc->getDomain()->size() - costFunc->nParams();
+  size_t dof = m_costFunction->getDomain()->size() - m_costFunction->nParams();
   if (dof == 0)
     dof = 1;
-  double rawcostfuncval = minimizer->costFunctionVal();
+  double rawcostfuncval = m_minimizer->costFunctionVal();
   double finalCostFuncVal = rawcostfuncval / double(dof);
 
   setProperty("OutputChi2overDoF", finalCostFuncVal);
 
-  // fit ended, creating output
-
-  // get the workspace
-  API::Workspace_const_sptr ws = getProperty("InputWorkspace");
-
   bool doCreateOutput = getProperty("CreateOutput");
   std::string baseName = getPropertyValue("Output");
   if (!baseName.empty()) {
@@ -205,19 +237,22 @@ void Fit::execConcrete() {
   if (doCreateOutput) {
     doCalcErrors = true;
   }
-  if (costFunc->nParams() == 0) {
+  if (m_costFunction->nParams() == 0) {
     doCalcErrors = false;
   }
 
   GSLMatrix covar;
   if (doCalcErrors) {
     // Calculate the covariance matrix and the errors.
-    costFunc->calCovarianceMatrix(covar);
-    costFunc->calFittingErrors(covar, rawcostfuncval);
+    m_costFunction->calCovarianceMatrix(covar);
+    m_costFunction->calFittingErrors(covar, rawcostfuncval);
   }
 
   if (doCreateOutput) {
-    copyMinimizerOutput(*minimizer);
+    copyMinimizerOutput(*m_minimizer);
+
+    // get the workspace
+    API::Workspace_const_sptr ws = getProperty("InputWorkspace");
 
     if (baseName.empty()) {
       baseName = ws->getName();
@@ -240,25 +275,22 @@ void Fit::execConcrete() {
     covariance->addColumn("str", "Name");
     // set plot type to Label = 6
     covariance->getColumn(covariance->columnCount() - 1)->setPlotType(6);
-    // std::vector<std::string> paramThatAreFitted; // used for populating 1st
-    // "name" column
     for (size_t i = 0; i < m_function->nParams(); i++) {
       if (m_function->isActive(i)) {
         covariance->addColumn("double", m_function->parameterName(i));
-        // paramThatAreFitted.push_back(m_function->parameterName(i));
       }
     }
 
     size_t np = m_function->nParams();
     size_t ia = 0;
     for (size_t i = 0; i < np; i++) {
-      if (m_function->isFixed(i))
+      if (!m_function->isActive(i))
         continue;
       Mantid::API::TableRow row = covariance->appendRow();
       row << m_function->parameterName(i);
       size_t ja = 0;
       for (size_t j = 0; j < np; j++) {
-        if (m_function->isFixed(j))
+        if (!m_function->isActive(j))
           continue;
         if (j == i)
           row << 100.0;
@@ -307,23 +339,14 @@ void Fit::execConcrete() {
     }
     // Add chi-squared value at the end of parameter table
     Mantid::API::TableRow row = result->appendRow();
-#if 1
+
     std::string costfuncname = getPropertyValue("CostFunction");
     if (costfuncname == "Rwp")
       row << "Cost function value" << rawcostfuncval;
     else
       row << "Cost function value" << finalCostFuncVal;
-    setProperty("OutputParameters", result);
-#else
-    row << "Cost function value" << finalCostFuncVal;
-    Mantid::API::TableRow row2 = result->appendRow();
-    std::string name(getPropertyValue("CostFunction"));
-    name += " value";
-    row2 << name << rawcostfuncval;
-#endif
 
     setProperty("OutputParameters", result);
-
     bool outputParametersOnly = getProperty("OutputParametersOnly");
 
     if (!outputParametersOnly) {
@@ -334,10 +357,33 @@ void Fit::execConcrete() {
       }
       m_domainCreator->separateCompositeMembersInOutput(unrollComposites,
                                                         convolveMembers);
-      m_domainCreator->createOutputWorkspace(
-          baseName, m_function, costFunc->getDomain(), costFunc->getValues());
+      m_domainCreator->createOutputWorkspace(baseName, m_function,
+                                             m_costFunction->getDomain(),
+                                             m_costFunction->getValues());
     }
   }
+}
+
+/** Executes the algorithm
+*
+*  @throw runtime_error Thrown if algorithm cannot execute
+*/
+void Fit::execConcrete() {
+
+  // Read Fit's own properties
+  readProperties();
+
+  // Get the minimizer
+  initializeMinimizer(m_maxIterations);
+
+  // Run the minimizer
+  auto nIterations = runMinimizer();
+
+  // Finilize the minimizer.
+  finalizeMinimizer(nIterations);
+
+  // fit ended, creating output
+  createOutput();
 
   progress(1.0);
 }
diff --git a/Framework/CurveFitting/src/Algorithms/FitPowderDiffPeaks.cpp b/Framework/CurveFitting/src/Algorithms/FitPowderDiffPeaks.cpp
index c4271f59cdfb37eb93ad3201f8ca5564561dfe23..7671a6f74f8202c9a4ffad876d660c30d76fb00c 100644
--- a/Framework/CurveFitting/src/Algorithms/FitPowderDiffPeaks.cpp
+++ b/Framework/CurveFitting/src/Algorithms/FitPowderDiffPeaks.cpp
@@ -3102,7 +3102,7 @@ string getFunctionInfo(IFunction_sptr function) {
   outss << "Number of Parameters = " << numpars << '\n';
   for (size_t i = 0; i < numpars; ++i)
     outss << parnames[i] << " = " << function->getParameter(i)
-          << ", \t\tFitted = " << !function->isFixed(i) << '\n';
+          << ", \t\tFitted = " << function->isActive(i) << '\n';
 
   return outss.str();
 }
diff --git a/Framework/CurveFitting/src/Algorithms/RefinePowderInstrumentParameters3.cpp b/Framework/CurveFitting/src/Algorithms/RefinePowderInstrumentParameters3.cpp
index 9c9fe229f0f55b6617e23e5ad150fd7da6315ef7..ae0a4d084156beec33f401813a0fdc83445f44b0 100644
--- a/Framework/CurveFitting/src/Algorithms/RefinePowderInstrumentParameters3.cpp
+++ b/Framework/CurveFitting/src/Algorithms/RefinePowderInstrumentParameters3.cpp
@@ -822,7 +822,7 @@ double RefinePowderInstrumentParameters3::calculateFunctionError(
   vector<bool> vecFix(parnames.size(), false);
 
   for (size_t i = 0; i < parnames.size(); ++i) {
-    bool fixed = function->isFixed(i);
+    bool fixed = !function->isActive(i);
     vecFix[i] = fixed;
     if (!fixed)
       function->fix(i);
diff --git a/Framework/CurveFitting/src/Constraints/BoundaryConstraint.cpp b/Framework/CurveFitting/src/Constraints/BoundaryConstraint.cpp
index 86f3f52be2a7fe4549198c0a4926b5a0c3d1074a..823084b4ee9afccc8018d5e9dcf83d376c1d6ec8 100644
--- a/Framework/CurveFitting/src/Constraints/BoundaryConstraint.cpp
+++ b/Framework/CurveFitting/src/Constraints/BoundaryConstraint.cpp
@@ -4,9 +4,11 @@
 #include "MantidCurveFitting/Constraints/BoundaryConstraint.h"
 #include "MantidAPI/Expression.h"
 #include "MantidAPI/ConstraintFactory.h"
+#include "MantidAPI/IFunction.h"
 #include "MantidKernel/Logger.h"
 #include <boost/lexical_cast.hpp>
 #include <sstream>
+#include <iostream>
 
 namespace Mantid {
 namespace CurveFitting {
@@ -21,6 +23,19 @@ DECLARE_CONSTRAINT(BoundaryConstraint)
 // using namespace Kernel;
 using namespace API;
 
+/// Default constructor
+BoundaryConstraint::BoundaryConstraint()
+    : API::IConstraint(), m_penaltyFactor(1000.0), m_hasLowerBound(false),
+      m_hasUpperBound(false), m_lowerBound(DBL_MAX), m_upperBound(-DBL_MAX) {}
+
+/// Constructor with no boundary arguments
+/// @param paramName :: The parameter name
+BoundaryConstraint::BoundaryConstraint(const std::string &paramName)
+    : API::IConstraint(), m_penaltyFactor(1000.0), m_hasLowerBound(false),
+      m_hasUpperBound(false) {
+  UNUSED_ARG(paramName);
+}
+
 /** Constructor with boundary arguments
  * @param fun :: The function
  * @param paramName :: The parameter name
@@ -34,18 +49,16 @@ BoundaryConstraint::BoundaryConstraint(API::IFunction *fun,
                                        const std::string paramName,
                                        const double lowerBound,
                                        const double upperBound, bool isDefault)
-    : m_penaltyFactor(1000.0), m_parameterName(paramName),
-      m_hasLowerBound(true), m_hasUpperBound(true), m_lowerBound(lowerBound),
-      m_upperBound(upperBound) {
+    : m_penaltyFactor(1000.0), m_hasLowerBound(true), m_hasUpperBound(true),
+      m_lowerBound(lowerBound), m_upperBound(upperBound) {
   reset(fun, fun->parameterIndex(paramName), isDefault);
 }
 
 BoundaryConstraint::BoundaryConstraint(API::IFunction *fun,
                                        const std::string paramName,
                                        const double lowerBound, bool isDefault)
-    : m_penaltyFactor(1000.0), m_parameterName(paramName),
-      m_hasLowerBound(true), m_hasUpperBound(false), m_lowerBound(lowerBound),
-      m_upperBound(-DBL_MAX) {
+    : m_penaltyFactor(1000.0), m_hasLowerBound(true), m_hasUpperBound(false),
+      m_lowerBound(lowerBound), m_upperBound(-DBL_MAX) {
   reset(fun, fun->parameterIndex(paramName), isDefault);
 }
 
@@ -113,7 +126,6 @@ void BoundaryConstraint::initialize(API::IFunction *fun,
   try {
     size_t i = fun->parameterIndex(parName);
     reset(fun, i, isDefault);
-    m_parameterName = parName;
   } catch (...) {
     g_log.error() << "Parameter " << parName << " not found in function "
                   << fun->name() << '\n';
@@ -146,31 +158,29 @@ void BoundaryConstraint::setParamToSatisfyConstraint() {
   if (!(m_hasLowerBound || m_hasUpperBound)) {
     g_log.warning()
         << "No bounds have been set on BoundaryConstraint for parameter "
-        << m_parameterName << ". Therefore"
+        << parameterName() << ". Therefore"
         << " this constraint serves no purpose!";
     return;
   }
 
-  double paramValue = getFunction()->getParameter(getIndex());
+  double paramValue = getParameter();
 
-  if (m_hasLowerBound)
-    if (paramValue < m_lowerBound)
-      getFunction()->setParameter(getIndex(), m_lowerBound, false);
-  if (m_hasUpperBound)
-    if (paramValue > m_upperBound)
-      getFunction()->setParameter(getIndex(), m_upperBound, false);
+  if (m_hasLowerBound && paramValue < m_lowerBound)
+    setParameter(m_lowerBound, false);
+  if (m_hasUpperBound && paramValue > m_upperBound)
+    setParameter(m_upperBound, false);
 }
 
 double BoundaryConstraint::check() {
   if (!(m_hasLowerBound || m_hasUpperBound)) {
     g_log.warning()
         << "No bounds have been set on BoundaryConstraint for parameter "
-        << m_parameterName << ". Therefore"
+        << parameterName() << ". Therefore"
         << " this constraint serves no purpose!";
     return 0.0;
   }
 
-  double paramValue = getFunction()->getParameter(getIndex());
+  double paramValue = getParameter();
 
   double penalty = 0.0;
 
@@ -198,7 +208,7 @@ double BoundaryConstraint::checkDeriv() {
     return penalty;
   }
 
-  double paramValue = getFunction()->getParameter(getIndex());
+  double paramValue = getParameter();
 
   if (m_hasLowerBound)
     if (paramValue < m_lowerBound) {
@@ -224,7 +234,7 @@ double BoundaryConstraint::checkDeriv2() {
     return penalty;
   }
 
-  double paramValue = getFunction()->getParameter(getIndex());
+  double paramValue = getParameter();
 
   if (m_hasLowerBound)
     if (paramValue < m_lowerBound)
@@ -241,7 +251,7 @@ std::string BoundaryConstraint::asString() const {
   if (m_hasLowerBound) {
     ostr << m_lowerBound << '<';
   }
-  ostr << getFunction()->parameterName(getIndex());
+  ostr << parameterName();
   if (m_hasUpperBound) {
     ostr << '<' << m_upperBound;
   }
diff --git a/Framework/CurveFitting/src/CostFunctions/CostFuncFitting.cpp b/Framework/CurveFitting/src/CostFunctions/CostFuncFitting.cpp
index aaa5aef879b33277ea5059d91e9566aba6553c95..65423f52afde1892047b246890e9f20978767a95 100644
--- a/Framework/CurveFitting/src/CostFunctions/CostFuncFitting.cpp
+++ b/Framework/CurveFitting/src/CostFunctions/CostFuncFitting.cpp
@@ -4,6 +4,7 @@
 #include "MantidCurveFitting/CostFunctions/CostFuncFitting.h"
 #include "MantidCurveFitting/GSLJacobian.h"
 #include "MantidAPI/IConstraint.h"
+#include "MantidKernel/Exception.h"
 
 #include <gsl/gsl_multifit_nlin.h>
 #include <limits>
@@ -163,12 +164,12 @@ void CostFuncFitting::calFittingErrors(const GSLMatrix &covar, double chi2) {
       new Kernel::Matrix<double>(np, np));
   size_t ia = 0;
   for (size_t i = 0; i < np; ++i) {
-    if (m_function->isFixed(i)) {
+    if (!m_function->isActive(i)) {
       m_function->setError(i, 0);
     } else {
       size_t ja = 0;
       for (size_t j = 0; j < np; ++j) {
-        if (!m_function->isFixed(j)) {
+        if (m_function->isActive(j)) {
           (*covarMatrix)[i][j] = covar.get(ia, ja);
           ++ja;
         }
@@ -193,7 +194,7 @@ void CostFuncFitting::calTransformationMatrixNumerically(GSLMatrix &tm) {
   tm.resize(na, na);
   size_t ia = 0;
   for (size_t i = 0; i < np; ++i) {
-    if (m_function->isFixed(i))
+    if (!m_function->isActive(i))
       continue;
     double p0 = m_function->getParameter(i);
     for (size_t j = 0; j < na; ++j) {
@@ -236,11 +237,11 @@ void CostFuncFitting::reset() const {
  * @param params :: A vector to copy the parameters from
  */
 void CostFuncFitting::setParameters(const GSLVector &params) {
-  if (nParams() != params.size()) {
-    throw std::runtime_error(
-        "Parameter vector has wrong size in CostFuncLeastSquares.");
+  auto np = nParams();
+  if (np != params.size()) {
+    throw Kernel::Exception::FitSizeWarning(params.size(), np);
   }
-  for (size_t i = 0; i < nParams(); ++i) {
+  for (size_t i = 0; i < np; ++i) {
     setParameter(i, params.get(i));
   }
   m_function->applyTies();
@@ -251,10 +252,11 @@ void CostFuncFitting::setParameters(const GSLVector &params) {
  * @param params :: A vector to copy the parameters to
  */
 void CostFuncFitting::getParameters(GSLVector &params) const {
-  if (params.size() != nParams()) {
-    params.resize(nParams());
+  auto np = nParams();
+  if (params.size() != np) {
+    params.resize(np);
   }
-  for (size_t i = 0; i < nParams(); ++i) {
+  for (size_t i = 0; i < np; ++i) {
     params.set(i, getParameter(i));
   }
 }
diff --git a/Framework/CurveFitting/src/FuncMinimizers/LevenbergMarquardtMDMinimizer.cpp b/Framework/CurveFitting/src/FuncMinimizers/LevenbergMarquardtMDMinimizer.cpp
index 9e173b59b4e2e00bee7402d495b4473c2cd4d1d4..21f3835ec015550a0f10fe8d559310e1db8c77c7 100644
--- a/Framework/CurveFitting/src/FuncMinimizers/LevenbergMarquardtMDMinimizer.cpp
+++ b/Framework/CurveFitting/src/FuncMinimizers/LevenbergMarquardtMDMinimizer.cpp
@@ -177,11 +177,13 @@ bool LevenbergMarquardtMDMinimizer::iterate(size_t) {
   // save previous state
   m_leastSquares->push();
   // Update the parameters of the cost function.
-  for (size_t i = 0; i < n; ++i) {
-    double d = m_leastSquares->getParameter(i) + dx.get(i);
-    m_leastSquares->setParameter(i, d);
-    if (debug) {
-      g_log.warning() << "Parameter(" << i << ")=" << d << '\n';
+  GSLVector parameters(n);
+  m_leastSquares->getParameters(parameters);
+  parameters += dx;
+  m_leastSquares->setParameters(parameters);
+  if (debug) {
+    for (size_t i = 0; i < n; ++i) {
+      g_log.warning() << "Parameter(" << i << ")=" << parameters[i] << '\n';
     }
   }
   m_leastSquares->getFittingFunction()->applyTies();
diff --git a/Framework/CurveFitting/src/Functions/ComptonScatteringCountRate.cpp b/Framework/CurveFitting/src/Functions/ComptonScatteringCountRate.cpp
index 665f81cffca2e0b9dc4ff1522bd8756815856911..c8c203e55af27f2cb21351c049469fde7b45138a 100644
--- a/Framework/CurveFitting/src/Functions/ComptonScatteringCountRate.cpp
+++ b/Framework/CurveFitting/src/Functions/ComptonScatteringCountRate.cpp
@@ -312,7 +312,7 @@ void ComptonScatteringCountRate::cacheComptonProfile(
   auto fixedParams = profile->intensityParameterIndices();
   for (auto fixedParam : fixedParams) {
     const size_t indexOfFixed = paramsOffset + fixedParam;
-    this->fix(indexOfFixed);
+    this->setParameterStatus(indexOfFixed, Tied);
     m_fixedParamIndices.push_back(indexOfFixed);
   }
 }
@@ -330,12 +330,11 @@ void ComptonScatteringCountRate::cacheBackground(
     const size_t npars =
         static_cast<size_t>(m_bkgdPolyN + 1); // + constant term
     // we assume the parameters are at index 0->N on the background so we need
-    // to
-    // reverse them
+    // to reverse them
     for (size_t i = npars; i > 0; --i) // i = from npars->1
     {
       const size_t indexOfFixed = paramsOffset + (i - 1);
-      this->fix(indexOfFixed);
+      this->setParameterStatus(indexOfFixed, Tied);
       m_fixedParamIndices.push_back(indexOfFixed);
     }
   } else {
diff --git a/Framework/CurveFitting/src/Functions/CrystalFieldMultiSpectrum.cpp b/Framework/CurveFitting/src/Functions/CrystalFieldMultiSpectrum.cpp
index 62bda55ed9eb48d39d9e07a6d5b5ab4f66b1fbec..7dec77fd50fe295284bc82eff16b8b33c8432078 100644
--- a/Framework/CurveFitting/src/Functions/CrystalFieldMultiSpectrum.cpp
+++ b/Framework/CurveFitting/src/Functions/CrystalFieldMultiSpectrum.cpp
@@ -15,6 +15,7 @@
 #include "MantidAPI/ParameterTie.h"
 
 #include "MantidKernel/Exception.h"
+#include <iostream>
 
 namespace Mantid {
 namespace CurveFitting {
@@ -110,6 +111,7 @@ size_t CrystalFieldMultiSpectrum::getNumberDomains() const {
 
 std::vector<IFunction_sptr>
 CrystalFieldMultiSpectrum::createEquivalentFunctions() const {
+  checkTargetFunction();
   std::vector<IFunction_sptr> funs;
   auto &composite = dynamic_cast<CompositeFunction &>(*m_target);
   for (size_t i = 0; i < composite.nFunctions(); ++i) {
@@ -123,24 +125,24 @@ void CrystalFieldMultiSpectrum::setAttribute(const std::string &name,
                                              const Attribute &attr) {
   if (name == "Temperatures") {
     // Define (declare) the parameters for intensity scaling.
-    auto nSpec = attr.asVector().size();
+    const auto nSpec = attr.asVector().size();
     dynamic_cast<Peaks &>(*m_source).declareIntensityScaling(nSpec);
     m_nOwnParams = m_source->nParams();
     m_fwhmX.resize(nSpec);
     m_fwhmY.resize(nSpec);
     for (size_t iSpec = 0; iSpec < nSpec; ++iSpec) {
-      auto suffix = std::to_string(iSpec);
+      const auto suffix = std::to_string(iSpec);
       declareAttribute("FWHMX" + suffix, Attribute(m_fwhmX[iSpec]));
       declareAttribute("FWHMY" + suffix, Attribute(m_fwhmY[iSpec]));
     }
   }
   if (name == "PhysicalProperties") {
-    auto physpropId = attr.asVector();
-    auto nSpec = physpropId.size();
+    const auto physpropId = attr.asVector();
+    const auto nSpec = physpropId.size();
     auto &source = dynamic_cast<Peaks &>(*m_source);
     for (size_t iSpec = 0; iSpec < nSpec; ++iSpec) {
-      auto suffix = std::to_string(iSpec);
-      auto pptype = static_cast<int>(physpropId[iSpec]);
+      const auto suffix = std::to_string(iSpec);
+      const auto pptype = static_cast<int>(physpropId[iSpec]);
       switch (pptype) {
       case MagneticMoment: // Hmag, Hdir, inverse, Unit, powder
         declareAttribute("Hmag" + suffix, Attribute(1.0));
@@ -182,32 +184,34 @@ void CrystalFieldMultiSpectrum::buildTargetFunction() const {
   ham += hz;
 
   // Get the temperatures from the attribute
-  auto temperatures = getAttribute("Temperatures").asVector();
-  if (temperatures.empty()) {
+  m_temperatures = getAttribute("Temperatures").asVector();
+  if (m_temperatures.empty()) {
     throw std::runtime_error("Vector of temperatures cannot be empty.");
   }
   // Get the FWHMs from the attribute and check for consistency.
-  auto fwhms = getAttribute("FWHMs").asVector();
-  if (fwhms.size() != temperatures.size()) {
-    if (fwhms.empty()) {
+  m_FWHMs = getAttribute("FWHMs").asVector();
+  if (m_FWHMs.size() != m_temperatures.size()) {
+    if (m_FWHMs.empty()) {
       throw std::runtime_error("Vector of FWHMs cannot be empty.");
     }
-    if (fwhms.size() == 1) {
-      auto fwhm = fwhms.front();
-      fwhms.resize(temperatures.size(), fwhm);
+    if (m_FWHMs.size() == 1) {
+      auto fwhm = m_FWHMs.front();
+      m_FWHMs.resize(m_temperatures.size(), fwhm);
     } else {
       throw std::runtime_error("Vector of FWHMs must either have same size as "
-                               "Temperatures or have size 1.");
+                               "Temperatures (" +
+                               std::to_string(m_temperatures.size()) +
+                               ") or have size 1.");
     }
   }
-  auto nSpec = temperatures.size();
+  const auto nSpec = m_temperatures.size();
   // Get a list of "spectra" which corresponds to physical properties
-  auto physprops = getAttribute("PhysicalProperties").asVector();
+  const auto physprops = getAttribute("PhysicalProperties").asVector();
   if (physprops.empty()) {
     m_physprops.resize(nSpec, 0); // Assume no physical properties - just INS
   } else if (physprops.size() != nSpec) {
     if (physprops.size() == 1) {
-      int physprop = (int)physprops.front();
+      int physprop = static_cast<int>(physprops.front());
       m_physprops.resize(nSpec, physprop);
     } else {
       throw std::runtime_error("Vector of PhysicalProperties must have same "
@@ -216,7 +220,7 @@ void CrystalFieldMultiSpectrum::buildTargetFunction() const {
   } else {
     m_physprops.clear();
     for (auto elem : physprops) {
-      m_physprops.push_back((int)elem);
+      m_physprops.push_back(static_cast<int>(elem));
     }
   }
   // Create the single-spectrum functions.
@@ -228,7 +232,7 @@ void CrystalFieldMultiSpectrum::buildTargetFunction() const {
   for (size_t i = 0; i < nSpec; ++i) {
     if (m_physprops[i] > 0) {
       // This "spectrum" is actually a physical properties dataset.
-      fun->addFunction(buildPhysprop(nre, en, wf, ham, temperatures[i], i));
+      fun->addFunction(buildPhysprop(nre, en, wf, ham, m_temperatures[i], i));
     } else {
       if (m_fwhmX[i].empty()) {
         auto suffix = std::to_string(i);
@@ -236,7 +240,7 @@ void CrystalFieldMultiSpectrum::buildTargetFunction() const {
         m_fwhmY[i] = IFunction::getAttribute("FWHMY" + suffix).asVector();
       }
       fun->addFunction(
-          buildSpectrum(nre, en, wf, temperatures[i], fwhms[i], i));
+          buildSpectrum(nre, en, wf, m_temperatures[i], m_FWHMs[i], i));
     }
     fun->setDomainIndex(i, i);
   }
@@ -267,7 +271,7 @@ void CrystalFieldMultiSpectrum::calcExcitations(
   } else {
     intensityScaling = getParameter(source.m_IntensityScalingIdx[iSpec]);
   }
-  auto nPeaks = eExcitations.size();
+  const auto nPeaks = eExcitations.size();
   values.expand(2 * nPeaks);
   for (size_t i = 0; i < nPeaks; ++i) {
     values.setCalculated(i, eExcitations.get(i));
@@ -283,11 +287,11 @@ API::IFunction_sptr CrystalFieldMultiSpectrum::buildSpectrum(
   calcExcitations(nre, en, wf, temperature, values, iSpec);
   m_nPeaks[iSpec] = CrystalFieldUtils::calculateNPeaks(values);
 
-  auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
-  auto peakShape = IFunction::getAttribute("PeakShape").asString();
+  const auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
+  const auto peakShape = IFunction::getAttribute("PeakShape").asString();
   auto bkgdShape = IFunction::getAttribute("Background").asUnquotedString();
-  size_t nRequiredPeaks = IFunction::getAttribute("NPeaks").asInt();
-  bool fixAllPeaks = getAttribute("FixAllPeaks").asBool();
+  const size_t nRequiredPeaks = IFunction::getAttribute("NPeaks").asInt();
+  const bool fixAllPeaks = getAttribute("FixAllPeaks").asBool();
 
   if (!bkgdShape.empty() && bkgdShape.find("name=") != 0 &&
       bkgdShape.front() != '(') {
@@ -322,7 +326,7 @@ API::IFunction_sptr CrystalFieldMultiSpectrum::buildPhysprop(
     IFunction_sptr retval = IFunction_sptr(new CrystalFieldSusceptibility);
     auto &spectrum = dynamic_cast<CrystalFieldSusceptibility &>(*retval);
     spectrum.setEigensystem(en, wf, nre);
-    auto suffix = std::to_string(iSpec);
+    const auto suffix = std::to_string(iSpec);
     spectrum.setAttribute("Hdir", getAttribute("Hdir" + suffix));
     spectrum.setAttribute("inverse", getAttribute("inverse" + suffix));
     spectrum.setAttribute("powder", getAttribute("powder" + suffix));
@@ -335,7 +339,7 @@ API::IFunction_sptr CrystalFieldMultiSpectrum::buildPhysprop(
     auto &spectrum = dynamic_cast<CrystalFieldMagnetisation &>(*retval);
     spectrum.setHamiltonian(ham, nre);
     spectrum.setAttribute("Temperature", Attribute(temperature));
-    auto suffix = std::to_string(iSpec);
+    const auto suffix = std::to_string(iSpec);
     spectrum.setAttribute("Unit", getAttribute("Unit" + suffix));
     spectrum.setAttribute("Hdir", getAttribute("Hdir" + suffix));
     spectrum.setAttribute("powder", getAttribute("powder" + suffix));
@@ -345,7 +349,7 @@ API::IFunction_sptr CrystalFieldMultiSpectrum::buildPhysprop(
     IFunction_sptr retval = IFunction_sptr(new CrystalFieldMoment);
     auto &spectrum = dynamic_cast<CrystalFieldMoment &>(*retval);
     spectrum.setHamiltonian(ham, nre);
-    auto suffix = std::to_string(iSpec);
+    const auto suffix = std::to_string(iSpec);
     spectrum.setAttribute("Unit", getAttribute("Unit" + suffix));
     spectrum.setAttribute("Hdir", getAttribute("Hdir" + suffix));
     spectrum.setAttribute("Hmag", getAttribute("Hmag" + suffix));
@@ -374,11 +378,11 @@ void CrystalFieldMultiSpectrum::updateTargetFunction() const {
   peakCalculator.calculateEigenSystem(en, wf, ham, hz, nre);
   ham += hz;
 
-  auto temperatures = getAttribute("Temperatures").asVector();
   auto &fun = dynamic_cast<MultiDomainFunction &>(*m_target);
   try {
-    for (size_t i = 0; i < temperatures.size(); ++i) {
-      updateSpectrum(*fun.getFunction(i), nre, en, wf, ham, temperatures[i], i);
+    for (size_t i = 0; i < m_temperatures.size(); ++i) {
+      updateSpectrum(*fun.getFunction(i), nre, en, wf, ham, m_temperatures[i],
+                     m_FWHMs[i], i);
     }
   } catch (std::out_of_range &) {
     buildTargetFunction();
@@ -390,7 +394,7 @@ void CrystalFieldMultiSpectrum::updateTargetFunction() const {
 void CrystalFieldMultiSpectrum::updateSpectrum(
     API::IFunction &spectrum, int nre, const DoubleFortranVector &en,
     const ComplexFortranMatrix &wf, const ComplexFortranMatrix &ham,
-    double temperature, size_t iSpec) const {
+    double temperature, double fwhm, size_t iSpec) const {
   switch (m_physprops[iSpec]) {
   case HeatCapacity: {
     auto &heatcap = dynamic_cast<CrystalFieldHeatCapacity &>(spectrum);
@@ -416,13 +420,15 @@ void CrystalFieldMultiSpectrum::updateSpectrum(
     break;
   }
   default:
-    auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
+    const auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
+    const auto peakShape = IFunction::getAttribute("PeakShape").asString();
+    const bool fixAllPeaks = getAttribute("FixAllPeaks").asBool();
     FunctionValues values;
     calcExcitations(nre, en, wf, temperature, values, iSpec);
     auto &composite = dynamic_cast<API::CompositeFunction &>(spectrum);
     m_nPeaks[iSpec] = CrystalFieldUtils::updateSpectrumFunction(
-        composite, values, m_nPeaks[iSpec], 1, m_fwhmX[iSpec], m_fwhmY[iSpec],
-        fwhmVariation);
+        composite, peakShape, values, 1, m_fwhmX[iSpec], m_fwhmY[iSpec],
+        fwhmVariation, fwhm, fixAllPeaks);
   }
 }
 
diff --git a/Framework/CurveFitting/src/Functions/CrystalFieldPeakUtils.cpp b/Framework/CurveFitting/src/Functions/CrystalFieldPeakUtils.cpp
index 0831b47dff7622d6290b92f0477fe80761fae177..55606bca66b3444c41c3e70c5d6bfc5b3f495594 100644
--- a/Framework/CurveFitting/src/Functions/CrystalFieldPeakUtils.cpp
+++ b/Framework/CurveFitting/src/Functions/CrystalFieldPeakUtils.cpp
@@ -8,6 +8,7 @@
 
 #include <algorithm>
 #include <math.h>
+#include <iostream>
 
 namespace Mantid {
 namespace CurveFitting {
@@ -96,10 +97,77 @@ size_t calculateMaxNPeaks(size_t nPeaks) { return nPeaks + nPeaks / 2 + 1; }
 /// @param fwhm :: A width value to pass to the peak.
 inline void ignorePeak(API::IPeakFunction &peak, double fwhm) {
   peak.setHeight(0.0);
-  peak.fixAll();
+  peak.fixAll(true);
   peak.setFwhm(fwhm);
 }
 
+/// Set the width of a peak.
+/// @param peak :: A peak function to set width to.
+/// @param centre :: Peak centre.
+/// @param xVec :: x-values of a tabulated width function.
+/// @param yVec :: y-values of a tabulated width function.
+/// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+/// @param defaultFWHM :: A default value for the FWHM to use if xVec and yVec
+///        are empty.
+/// @param useDefaultFWHM :: If true always use defaultFWHM for the width.
+void setPeakWidth(API::IPeakFunction &peak, double centre,
+                  const std::vector<double> &xVec,
+                  const std::vector<double> &yVec, double fwhmVariation,
+                  double defaultFWHM, bool useDefaultFWHM) {
+  if (useDefaultFWHM) {
+    peak.setFwhm(defaultFWHM);
+  } else {
+    auto fwhm = calculateWidth(centre, xVec, yVec);
+    if (fwhm > 0.0) {
+      peak.setFwhm(fwhm);
+      setWidthConstraint(peak, fwhm, fwhmVariation);
+    } else {
+      ignorePeak(peak, defaultFWHM);
+    }
+  }
+}
+
+/// Create a single peak.
+/// @param peakShape :: A shape of the of created peak as a name of an
+/// IPeakFunction.
+/// @param centre :: Peak centre.
+/// @param intensity :: Integrated intensity of the peak.
+/// @param xVec :: x-values of a tabulated width function.
+/// @param yVec :: y-values of a tabulated width function.
+/// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+/// @param defaultFWHM :: A default value for the FWHM to use if xVec and yVec
+///        are empty.
+/// @param isGood :: If the peak good and may have free fitting parameters.
+/// @param fixAllPeaks :: If true all parameters should be fixed.
+API::IPeakFunction_sptr createPeak(const std::string &peakShape, double centre,
+                                   double intensity,
+                                   const std::vector<double> &xVec,
+                                   const std::vector<double> &yVec,
+                                   double fwhmVariation, double defaultFWHM,
+                                   bool isGood, bool fixAllPeaks) {
+  auto fun = API::FunctionFactory::Instance().createFunction(peakShape);
+  auto peak = boost::dynamic_pointer_cast<API::IPeakFunction>(fun);
+  if (!peak) {
+    throw std::runtime_error("A peak function is expected.");
+  }
+  bool useDefaultFWHM = xVec.empty();
+  const bool fixByDefault = true;
+  if (isGood) {
+    peak->setCentre(centre);
+    peak->setIntensity(intensity);
+    setPeakWidth(*peak, centre, xVec, yVec, fwhmVariation, defaultFWHM,
+                 useDefaultFWHM);
+    peak->fixCentre(fixByDefault);
+    peak->fixIntensity(fixByDefault);
+  } else {
+    ignorePeak(*peak, defaultFWHM);
+  }
+  if (fixAllPeaks) {
+    peak->fixAll(fixByDefault);
+  }
+  return peak;
+}
+
 /// Populates a spectrum with peaks of type given by peakShape argument.
 /// @param spectrum :: A composite function that is a collection of peaks.
 /// @param peakShape :: A shape of each peak as a name of an IPeakFunction.
@@ -125,97 +193,129 @@ size_t buildSpectrumFunction(API::CompositeFunction &spectrum,
     throw std::runtime_error("WidthX and WidthY must have the same size.");
   }
 
-  bool useDefaultFWHM = xVec.empty();
   auto nPeaks = calculateNPeaks(centresAndIntensities);
   auto maxNPeaks = calculateMaxNPeaks(nPeaks);
   if (nRequiredPeaks > maxNPeaks) {
     maxNPeaks = nRequiredPeaks;
   }
   for (size_t i = 0; i < maxNPeaks; ++i) {
-    auto fun = API::FunctionFactory::Instance().createFunction(peakShape);
-    auto peak = boost::dynamic_pointer_cast<API::IPeakFunction>(fun);
-    if (!peak) {
-      throw std::runtime_error("A peak function is expected.");
-    }
-    if (i < nPeaks) {
-      auto centre = centresAndIntensities.getCalculated(i);
-      peak->setCentre(centre);
-      peak->setIntensity(centresAndIntensities.getCalculated(i + nPeaks));
-      if (useDefaultFWHM) {
-        peak->setFwhm(defaultFWHM);
-      } else {
-        auto fwhm = calculateWidth(centre, xVec, yVec);
-        if (fwhm > 0.0) {
-          peak->setFwhm(fwhm);
-          setWidthConstraint(*peak, fwhm, fwhmVariation);
-        } else {
-          ignorePeak(*peak, defaultFWHM);
-        }
-      }
-      peak->fixCentre();
-      peak->fixIntensity();
-    } else {
-      ignorePeak(*peak, defaultFWHM);
+    const bool isGood = i < nPeaks;
+    const auto centre = isGood ? centresAndIntensities.getCalculated(i) : 0.0;
+    const auto intensity =
+        isGood ? centresAndIntensities.getCalculated(i + nPeaks) : 0.0;
+    auto peak = createPeak(peakShape, centre, intensity, xVec, yVec,
+                           fwhmVariation, defaultFWHM, isGood, fixAllPeaks);
+    spectrum.addFunction(peak);
+  }
+  return nPeaks;
+}
+
+/// Update width of a peak.
+/// @param peak :: A peak to update.
+/// @param centre :: Peak centre.
+/// @param xVec :: x-values of a tabulated width function.
+/// @param yVec :: y-values of a tabulated width function.
+/// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+void updatePeakWidth(API::IPeakFunction &peak, double centre,
+                     const std::vector<double> &xVec,
+                     const std::vector<double> &yVec, double fwhmVariation) {
+  bool mustUpdateWidth = !xVec.empty();
+  if (mustUpdateWidth) {
+    auto fwhm = peak.fwhm();
+    auto expectedFwhm = calculateWidth(centre, xVec, yVec);
+    if (expectedFwhm <= 0.0) {
+      ignorePeak(peak, fwhm);
+    } else if (fabs(fwhm - expectedFwhm) > fwhmVariation) {
+      peak.setFwhm(expectedFwhm);
+      setWidthConstraint(peak, expectedFwhm, fwhmVariation);
     }
+  }
+}
+
+/// Update a single peak.
+/// @param peak :: A peak to update.
+/// @param centre :: New peak centre.
+/// @param intensity :: New integrated intensity.
+/// @param xVec :: x-values of a tabulated width function.
+/// @param yVec :: y-values of a tabulated width function.
+/// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+/// @param isGood :: If the peak good and may have free fitting parameters.
+/// @param fixAllPeaks :: If true all parameters should be fixed.
+void updatePeak(API::IPeakFunction &peak, double centre, double intensity,
+                const std::vector<double> &xVec,
+                const std::vector<double> &yVec, double fwhmVariation,
+                bool isGood, bool fixAllPeaks) {
+  const bool fixByDefault = true;
+  if (isGood) {
+    peak.unfixAllDefault();
+    peak.setCentre(centre);
+    peak.setIntensity(intensity);
+    updatePeakWidth(peak, centre, xVec, yVec, fwhmVariation);
+    peak.unfixIntensity();
+    peak.fixIntensity(fixByDefault);
     if (fixAllPeaks) {
-      peak->fixAll();
+      peak.fixAll(fixByDefault);
     }
-    spectrum.addFunction(peak);
+  } else {
+    peak.setHeight(0.0);
+    peak.fixAllActive(fixByDefault);
   }
-  return nPeaks;
 }
 
 /// Update the peaks parameters after recalculationof the crystal field.
 /// @param spectrum :: A composite function containings the peaks to update.
 ///                    May contain other functions (background) fix indices
 ///                    < iFirst.
+/// @param peakShape :: A shape of each peak as a name of an IPeakFunction.
 /// @param centresAndIntensities :: A FunctionValues object containing centres
 ///        and intensities for the peaks. First nPeaks calculated values are the
 ///        centres and the following nPeaks values are the intensities.
-/// @param nOriginalPeaks :: Number of actual peaks the spectrum had before the
-///        update.This update can change the number of actual peaks.
 /// @param iFirst :: The first index in the composite function (spectrum) at
 ///        which the peaks begin.
 /// @param xVec :: x-values of a tabulated width function.
 /// @param yVec :: y-values of a tabulated width function.
 /// @param fwhmVariation :: A variation in the peak width allowed in a fit.
+/// @param defaultFWHM :: A default value for the FWHM to use if xVec and yVec
+///        are empty.
+/// @param fixAllPeaks :: If true fix all peak parameters
 /// @return :: The new number of fitted peaks.
 size_t updateSpectrumFunction(API::CompositeFunction &spectrum,
+                              const std::string &peakShape,
                               const FunctionValues &centresAndIntensities,
-                              size_t nOriginalPeaks, size_t iFirst,
-                              const std::vector<double> &xVec,
+                              size_t iFirst, const std::vector<double> &xVec,
                               const std::vector<double> &yVec,
-                              double fwhmVariation) {
+                              double fwhmVariation, double defaultFWHM,
+                              bool fixAllPeaks) {
   size_t nGoodPeaks = calculateNPeaks(centresAndIntensities);
   size_t maxNPeaks = calculateMaxNPeaks(nGoodPeaks);
-  bool mustUpdateWidth = !xVec.empty();
+  size_t nFunctions = spectrum.nFunctions();
 
   for (size_t i = 0; i < maxNPeaks; ++i) {
-    auto fun = spectrum.getFunction(i + iFirst);
-    auto &peak = dynamic_cast<API::IPeakFunction &>(*fun);
-    if (i < nGoodPeaks) {
-      auto centre = centresAndIntensities.getCalculated(i);
-      peak.setCentre(centre);
-      peak.setIntensity(centresAndIntensities.getCalculated(i + nGoodPeaks));
-      if (mustUpdateWidth) {
-        auto fwhm = peak.fwhm();
-        auto expectedFwhm = calculateWidth(centre, xVec, yVec);
-        if (expectedFwhm <= 0.0) {
-          ignorePeak(peak, fwhm);
-        } else if (fabs(fwhm - expectedFwhm) > fwhmVariation) {
-          peak.setFwhm(expectedFwhm);
-          setWidthConstraint(peak, expectedFwhm, fwhmVariation);
-        }
-      }
-      peak.unfixIntensity();
-      peak.fixIntensity();
+    const bool isGood = i < nGoodPeaks;
+    auto centre = isGood ? centresAndIntensities.getCalculated(i) : 0.0;
+    auto intensity =
+        isGood ? centresAndIntensities.getCalculated(i + nGoodPeaks) : 0.0;
+
+    if (i < nFunctions) {
+      auto fun = spectrum.getFunction(i + iFirst);
+      auto &peak = dynamic_cast<API::IPeakFunction &>(*fun);
+      updatePeak(peak, centre, intensity, xVec, yVec, fwhmVariation, isGood,
+                 fixAllPeaks);
     } else {
-      peak.setHeight(0.0);
-      if (i > nOriginalPeaks) {
-        peak.fixAll();
-      }
+      auto peakPtr =
+          createPeak(peakShape, centre, intensity, xVec, yVec, fwhmVariation,
+                     defaultFWHM, isGood, fixAllPeaks);
+      spectrum.addFunction(peakPtr);
     }
   }
+  // If there are any peaks above the maxNPeaks, ignore them
+  // but don't remove
+  for (size_t i = maxNPeaks; i < nFunctions - iFirst; ++i) {
+    auto fun = spectrum.getFunction(i + iFirst);
+    auto &peak = dynamic_cast<API::IPeakFunction &>(*fun);
+    const auto fwhm = peak.fwhm();
+    ignorePeak(peak, fwhm);
+  }
   return nGoodPeaks;
 }
 
diff --git a/Framework/CurveFitting/src/Functions/CrystalFieldSpectrum.cpp b/Framework/CurveFitting/src/Functions/CrystalFieldSpectrum.cpp
index 8c60bd38a19644861b01f5723b28eda4576461d4..5776320e823c0791e23d64d999a8b717ddec87d3 100644
--- a/Framework/CurveFitting/src/Functions/CrystalFieldSpectrum.cpp
+++ b/Framework/CurveFitting/src/Functions/CrystalFieldSpectrum.cpp
@@ -78,22 +78,21 @@ void CrystalFieldSpectrum::updateTargetFunction() const {
     return;
   }
   m_dirty = false;
+  auto peakShape = getAttribute("PeakShape").asString();
   auto xVec = getAttribute("FWHMX").asVector();
   auto yVec = getAttribute("FWHMY").asVector();
   auto fwhmVariation = getAttribute("FWHMVariation").asDouble();
+  auto defaultFWHM = getAttribute("FWHM").asDouble();
+  bool fixAllPeaks = getAttribute("FixAllPeaks").asBool();
   FunctionDomainGeneral domain;
   FunctionValues values;
   m_source->function(domain, values);
   m_target->setAttribute("NumDeriv", this->getAttribute("NumDeriv"));
   auto &spectrum = dynamic_cast<CompositeFunction &>(*m_target);
   m_nPeaks = CrystalFieldUtils::calculateNPeaks(values);
-  auto maxNPeaks = CrystalFieldUtils::calculateMaxNPeaks(m_nPeaks);
-  if (maxNPeaks > spectrum.nFunctions()) {
-    buildTargetFunction();
-  } else {
-    CrystalFieldUtils::updateSpectrumFunction(spectrum, values, m_nPeaks, 0,
-                                              xVec, yVec, fwhmVariation);
-  }
+  CrystalFieldUtils::updateSpectrumFunction(spectrum, peakShape, values, 0,
+                                            xVec, yVec, fwhmVariation,
+                                            defaultFWHM, fixAllPeaks);
   storeReadOnlyAttribute("NPeaks", Attribute(static_cast<int>(m_nPeaks)));
 }
 
@@ -109,29 +108,15 @@ std::string CrystalFieldSpectrum::asString() const {
       ostr << ',' << attName << '=' << attValue;
     }
   }
-  // Print own parameters
-  for (size_t i = 0; i < m_nOwnParams; i++) {
-    const ParameterTie *tie = getTie(i);
-    if (!tie || !tie->isDefault()) {
-      ostr << ',' << parameterName(i) << '=' << getParameter(i);
-    }
-  }
-
-  // collect non-default constraints
-  std::vector<std::string> constraints;
-  for (size_t i = 0; i < m_nOwnParams; i++) {
-    auto constraint = writeConstraint(i);
-    if (!constraint.empty()) {
-      constraints.push_back(constraint);
-    }
-  }
-
-  // collect the non-default ties
   std::vector<std::string> ties;
+  // Print own parameters
   for (size_t i = 0; i < m_nOwnParams; i++) {
-    auto tie = writeTie(i);
-    if (!tie.empty()) {
-      ties.push_back(tie);
+    std::ostringstream paramOut;
+    paramOut << parameterName(i) << '=' << getParameter(i);
+    if (isActive(i)) {
+      ostr << ',' << paramOut.str();
+    } else if (isFixed(i)) {
+      ties.push_back(paramOut.str());
     }
   }
 
@@ -155,24 +140,21 @@ std::string CrystalFieldSpectrum::asString() const {
         ostr << ",f" << ip << "." << peak.parameterName(i) << '='
              << peak.getParameter(i);
       }
-      auto constraint = writeConstraint(i);
-      if (!constraint.empty()) {
-        constraints.push_back(constraint);
-      }
-      auto tieStr = writeTie(i);
-      if (!tieStr.empty()) {
-        ties.push_back(tieStr);
-      }
     }
   } // for peaks
 
+  // collect non-default constraints
+  std::string constraints = writeConstraints();
   // print constraints
   if (!constraints.empty()) {
-    ostr << ",constraints=("
-         << Kernel::Strings::join(constraints.begin(), constraints.end(), ",")
-         << ")";
+    ostr << ",constraints=(" << constraints << ")";
   }
 
+  // collect the non-default ties
+  auto tiesString = writeTies();
+  if (!tiesString.empty()) {
+    ties.push_back(tiesString);
+  }
   // print the ties
   if (!ties.empty()) {
     ostr << ",ties=(" << Kernel::Strings::join(ties.begin(), ties.end(), ",")
diff --git a/Framework/CurveFitting/src/Functions/Gaussian.cpp b/Framework/CurveFitting/src/Functions/Gaussian.cpp
index c3308e094e617165874b59e961e65550551b5e0f..48e567132d9e85f7f5e19cdf4a67b9030c3b13c0 100644
--- a/Framework/CurveFitting/src/Functions/Gaussian.cpp
+++ b/Framework/CurveFitting/src/Functions/Gaussian.cpp
@@ -107,14 +107,16 @@ void Gaussian::setIntensity(const double i) {
   }
 }
 
-void Gaussian::fixCentre() { fixParameter("PeakCentre"); }
+void Gaussian::fixCentre(bool isDefault) {
+  fixParameter("PeakCentre", isDefault);
+}
 
 void Gaussian::unfixCentre() { unfixParameter("PeakCentre"); }
 
-void Gaussian::fixIntensity() {
+void Gaussian::fixIntensity(bool isDefault) {
   std::string formula =
       std::to_string(intensity() / sqrt(2.0 * M_PI)) + "/Sigma";
-  tie("Height", formula, true);
+  tie("Height", formula, isDefault);
 }
 
 void Gaussian::unfixIntensity() { removeTie("Height"); }
diff --git a/Framework/CurveFitting/src/Functions/GramCharlierComptonProfile.cpp b/Framework/CurveFitting/src/Functions/GramCharlierComptonProfile.cpp
index 8054cfe6b052041f1d57b1897c349476638ea65b..deca9e25ada136615b2f854f0b6f926b2dcd29f5 100644
--- a/Framework/CurveFitting/src/Functions/GramCharlierComptonProfile.cpp
+++ b/Framework/CurveFitting/src/Functions/GramCharlierComptonProfile.cpp
@@ -176,7 +176,7 @@ GramCharlierComptonProfile::intensityParameterIndices() const {
   }
   // Include Kfse if it is not fixed
   const size_t kIndex = this->parameterIndex(KFSE_NAME);
-  if (!isFixed(kIndex)) {
+  if (isActive(kIndex)) {
     indices.push_back(kIndex);
   }
 
@@ -375,7 +375,7 @@ void GramCharlierComptonProfile::cacheYSpaceValues(
   // Is FSE fixed at the moment?
   // The ComptonScatteringCountRate fixes it but we still need to know if the
   // user wanted it fixed
-  m_userFixedFSE = this->isFixed(this->parameterIndex(KFSE_NAME));
+  m_userFixedFSE = !this->isActive(this->parameterIndex(KFSE_NAME));
 
   const auto &yspace = ySpace();
   const auto &modq = modQ();
diff --git a/Framework/CurveFitting/src/Functions/Lorentzian.cpp b/Framework/CurveFitting/src/Functions/Lorentzian.cpp
index 321c971ae6f401ca9defe4a3a768ec03a5f2eedc..4b949cadbc1774ec370e4b3935419d66856341f0 100644
--- a/Framework/CurveFitting/src/Functions/Lorentzian.cpp
+++ b/Framework/CurveFitting/src/Functions/Lorentzian.cpp
@@ -56,11 +56,15 @@ void Lorentzian::setFwhm(const double w) {
   setParameter("FWHM", w);
 }
 
-void Lorentzian::fixCentre() { fixParameter("PeakCentre"); }
+void Lorentzian::fixCentre(bool isDefault) {
+  fixParameter("PeakCentre", isDefault);
+}
 
 void Lorentzian::unfixCentre() { unfixParameter("PeakCentre"); }
 
-void Lorentzian::fixIntensity() { fixParameter("Amplitude"); }
+void Lorentzian::fixIntensity(bool isDefault) {
+  fixParameter("Amplitude", isDefault);
+}
 
 void Lorentzian::unfixIntensity() { unfixParameter("Amplitude"); }
 
diff --git a/Framework/CurveFitting/src/GSLFunctions.cpp b/Framework/CurveFitting/src/GSLFunctions.cpp
index f34899b7133b45615d3fc4e5328680d631aacb6e..bb3276dc3d55b3283c0d1af16841775cde555c86 100644
--- a/Framework/CurveFitting/src/GSLFunctions.cpp
+++ b/Framework/CurveFitting/src/GSLFunctions.cpp
@@ -135,8 +135,6 @@ int gsl_df(const gsl_vector *x, void *params, gsl_matrix *J) {
   for (size_t iY = 0; iY < p->n; iY++)
     for (size_t iP = 0; iP < p->p; iP++) {
       J->data[iY * p->p + iP] *= values->getFitWeight(iY);
-      // std::cerr << iY << ' ' << iP << ' ' << J->data[iY*p->p + iP] <<
-      // '\n';
     }
 
   return GSL_SUCCESS;
diff --git a/Framework/CurveFitting/test/Algorithms/FitTest.h b/Framework/CurveFitting/test/Algorithms/FitTest.h
index 3ff7813dd922d863295ea6a5b31e19d371ed22ae..fe65df76444257751752a0d5e07af46c296d3a7e 100644
--- a/Framework/CurveFitting/test/Algorithms/FitTest.h
+++ b/Framework/CurveFitting/test/Algorithms/FitTest.h
@@ -17,6 +17,7 @@
 #include "MantidDataObjects/TableWorkspace.h"
 #include "MantidDataObjects/Workspace2D.h"
 
+#include "MantidTestHelpers/FunctionCreationHelper.h"
 #include "MantidTestHelpers/MultiDomainFunctionHelper.h"
 #include "MantidTestHelpers/WorkspaceCreationHelper.h"
 
@@ -2035,6 +2036,101 @@ public:
     AnalysisDataService::Instance().clear();
   }
 
+  void test_fit_size_change() {
+    auto ws = WorkspaceCreationHelper::create2DWorkspaceFromFunction(
+        [](double x, int) { return 2 * exp(-(5 * x + x * x - 3 * x * x * x)); },
+        1, 0, 1, 0.1);
+    {
+      API::IFunction_sptr fun =
+          boost::make_shared<TestHelpers::FunctionChangesNParams>();
+      TS_ASSERT_EQUALS(fun->nParams(), 1);
+
+      Fit fit;
+      fit.initialize();
+      fit.setRethrows(true);
+      fit.setProperty("Function", fun);
+      fit.setProperty("InputWorkspace", ws);
+      TS_ASSERT_THROWS_NOTHING(fit.execute());
+      TS_ASSERT_EQUALS(fun->nParams(), 5);
+      TS_ASSERT_DELTA(fun->getParameter(0), 1.9936, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(1), -9.4991, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(2), 19.1074, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(3), -17.8434, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(4), 6.3465, 0.1);
+    }
+    {
+      API::IFunction_sptr fun =
+          boost::make_shared<TestHelpers::FunctionChangesNParams>();
+      TS_ASSERT_EQUALS(fun->nParams(), 1);
+
+      Fit fit;
+      fit.initialize();
+      fit.setRethrows(true);
+      fit.setProperty("Function", fun);
+      fit.setProperty("InputWorkspace", ws);
+      fit.setProperty("Minimizer", "Levenberg-MarquardtMD");
+      TS_ASSERT_THROWS_NOTHING(fit.execute());
+      TS_ASSERT_EQUALS(fun->nParams(), 5);
+      TS_ASSERT_DELTA(fun->getParameter(0), 1.9936, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(1), -9.4991, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(2), 19.1074, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(3), -17.8434, 0.1);
+      TS_ASSERT_DELTA(fun->getParameter(4), 6.3465, 0.1);
+      std::string status = fit.getProperty("OutputStatus");
+      TS_ASSERT_EQUALS(status, "success");
+    }
+
+    AnalysisDataService::Instance().clear();
+  }
+
+  void test_fit_size_change_1() {
+    auto ws = WorkspaceCreationHelper::create2DWorkspaceFromFunction(
+        [](double x, int) { return 2 + x - 0.1 * x * x; }, 1, 0, 1, 0.1);
+    {
+      API::IFunction_sptr fun =
+          boost::make_shared<TestHelpers::FunctionChangesNParams>();
+      TS_ASSERT_EQUALS(fun->nParams(), 1);
+
+      Fit fit;
+      fit.initialize();
+      fit.setRethrows(true);
+      fit.setProperty("Function", fun);
+      fit.setProperty("InputWorkspace", ws);
+      TS_ASSERT_THROWS_NOTHING(fit.execute());
+      TS_ASSERT_EQUALS(fun->nParams(), 5);
+      TS_ASSERT_DELTA(fun->getParameter(0), 2.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(1), 1.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(2), -0.1, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(3), 0.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(4), 0.0, 0.0001);
+      std::string status = fit.getProperty("OutputStatus");
+      TS_ASSERT_EQUALS(status, "success");
+    }
+    {
+      API::IFunction_sptr fun =
+          boost::make_shared<TestHelpers::FunctionChangesNParams>();
+      TS_ASSERT_EQUALS(fun->nParams(), 1);
+
+      Fit fit;
+      fit.initialize();
+      fit.setRethrows(true);
+      fit.setProperty("Function", fun);
+      fit.setProperty("InputWorkspace", ws);
+      fit.setProperty("Minimizer", "Levenberg-MarquardtMD");
+      TS_ASSERT_THROWS_NOTHING(fit.execute());
+      TS_ASSERT_EQUALS(fun->nParams(), 5);
+      TS_ASSERT_DELTA(fun->getParameter(0), 2.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(1), 1.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(2), -0.1, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(3), 0.0, 0.0001);
+      TS_ASSERT_DELTA(fun->getParameter(4), 0.0, 0.0001);
+      std::string status = fit.getProperty("OutputStatus");
+      TS_ASSERT_EQUALS(status, "success");
+    }
+
+    AnalysisDataService::Instance().clear();
+  }
+
 private:
   /// build test input workspaces for the Pawley function Fit tests
   MatrixWorkspace_sptr getWorkspacePawley(const std::string &functionString,
diff --git a/Framework/CurveFitting/test/CMakeLists.txt b/Framework/CurveFitting/test/CMakeLists.txt
index 077bb0bbeda4ebacaf35ad1479cb26092b31d472..59c9204470fd1f3ad3f5d6d19185d9e1e3ca84a8 100644
--- a/Framework/CurveFitting/test/CMakeLists.txt
+++ b/Framework/CurveFitting/test/CMakeLists.txt
@@ -6,6 +6,7 @@ if ( CXXTEST_FOUND )
   # It will go out of scope at the end of this file so doesn't need un-setting
   set ( TESTHELPER_SRCS ../../TestHelpers/src/ComponentCreationHelper.cpp
                         ../../TestHelpers/src/InstrumentCreationHelper.cpp
+                        ../../TestHelpers/src/FunctionCreationHelper.cpp
                         ../../TestHelpers/src/MultiDomainFunctionHelper.cpp
                         ../../TestHelpers/src/StartFrameworkManager.cpp
                         ../../TestHelpers/src/TearDownWorld.cpp
diff --git a/Framework/CurveFitting/test/CompositeFunctionTest.h b/Framework/CurveFitting/test/CompositeFunctionTest.h
index 15433437e8e287e06da2459340885d0293c70ff7..f839e29a6edfc6de2104fbb24be80794607e3595 100644
--- a/Framework/CurveFitting/test/CompositeFunctionTest.h
+++ b/Framework/CurveFitting/test/CompositeFunctionTest.h
@@ -404,6 +404,87 @@ public:
     TS_ASSERT_DELTA(mfun->getParameter("f1.b"), 4.4, 0.01);
     TS_ASSERT_EQUALS(s.getError(), "success");
   }
+
+  void test_constraints_str() {
+    auto fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,constraints=(Height>0)");
+    TS_ASSERT_EQUALS(
+        fun->asString(),
+        "name=Gaussian,Height=0,PeakCentre=0,Sigma=0,constraints=(0<Height)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,constraints=(Height>0);name=LinearBackground,"
+        "constraints=(A0<0)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=0,PeakCentre=0,"
+                                      "Sigma=0,constraints=(0<Height);name="
+                                      "LinearBackground,A0=0,A1=0,constraints=("
+                                      "A0<0)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian;name=LinearBackground;"
+        "constraints=(f0.Height>0, f1.A0<0)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=0,PeakCentre=0,"
+                                      "Sigma=0;name=LinearBackground,A0=0,A1=0;"
+                                      "constraints=(0<f0.Height,f1.A0<0)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,constraints=(Height>0);name=LinearBackground,"
+        "constraints=(A0<0);constraints=(f0.Sigma<0, f1.A1>10)");
+    TS_ASSERT_EQUALS(fun->asString(),
+                     "name=Gaussian,Height=0,PeakCentre=0,Sigma=0,constraints=("
+                     "0<Height);name=LinearBackground,A0=0,A1=0,constraints=("
+                     "A0<0);constraints=(f0.Sigma<0,10<f1.A1)");
+  }
+
+  void test_ties_str() {
+    auto fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=10)");
+    TS_ASSERT_EQUALS(
+        fun->asString(),
+        "name=Gaussian,Height=10,PeakCentre=0,Sigma=0,ties=(Height=10)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=10*Sigma)");
+    TS_ASSERT_EQUALS(
+        fun->asString(),
+        "name=Gaussian,Height=0,PeakCentre=0,Sigma=0,ties=(Height=10*Sigma)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=10);name=LinearBackground,"
+        "ties=(A0=0)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=10,PeakCentre=0,"
+                                      "Sigma=0,ties=(Height=10);name="
+                                      "LinearBackground,A0=0,A1=0,ties=(A0=0)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=10*Sigma);name=LinearBackground,"
+        "ties=(A0=A1)");
+    TS_ASSERT_EQUALS(fun->asString(),
+                     "name=Gaussian,Height=0,PeakCentre=0,Sigma=0,ties=(Height="
+                     "10*Sigma);name=LinearBackground,A0=0,A1=0,ties=(A0=A1)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian;name=LinearBackground;"
+        "ties=(f0.Height=2, f1.A0=f1.A1)");
+    TS_ASSERT_EQUALS(fun->asString(),
+                     "name=Gaussian,Height=2,PeakCentre=0,Sigma=0,ties=(Height="
+                     "2);name=LinearBackground,A0=0,A1=0;ties=(f1.A0=f1.A1)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian;name=LinearBackground;"
+        "ties=(f0.Height=f1.A0=f1.A1)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=0,PeakCentre=0,"
+                                      "Sigma=0;name=LinearBackground,A0=0,A1=0;"
+                                      "ties=(f1.A0=f1.A1,f0.Height=f1.A1)");
+
+    fun = FunctionFactory::Instance().createInitialized(
+        "name=Gaussian,ties=(Height=0);name=LinearBackground,"
+        "ties=(A0=A1);ties=(f0.Sigma=f1.A1)");
+    TS_ASSERT_EQUALS(fun->asString(), "name=Gaussian,Height=0,PeakCentre=0,"
+                                      "Sigma=0,ties=(Height=0);name="
+                                      "LinearBackground,A0=0,A1=0,ties=(A0=A1);"
+                                      "ties=(f0.Sigma=f1.A1)");
+  }
 };
 
 #endif /*CURVEFITTING_COMPOSITEFUNCTIONTEST_H_*/
diff --git a/Framework/CurveFitting/test/Constraints/BoundaryConstraintTest.h b/Framework/CurveFitting/test/Constraints/BoundaryConstraintTest.h
index fb4e27623588072a569982443cc31493c2f98ba3..9d716aa452d709d3dbc16d046abc6cd0ce2e48b4 100644
--- a/Framework/CurveFitting/test/Constraints/BoundaryConstraintTest.h
+++ b/Framework/CurveFitting/test/Constraints/BoundaryConstraintTest.h
@@ -22,40 +22,6 @@ using namespace Mantid::CurveFitting::Constraints;
 
 class BoundaryConstraintTest : public CxxTest::TestSuite {
 public:
-  void test1() {
-    // set up fitting function
-    Gaussian gaus;
-    gaus.initialize();
-    gaus.setCentre(11.2);
-    gaus.setHeight(100.7);
-    gaus.setParameter("Sigma", 1.1);
-
-    BoundaryConstraint bc;
-    bc.reset(&gaus, 2);
-
-    TS_ASSERT(!bc.hasLower());
-    TS_ASSERT(!bc.hasUpper());
-
-    bc.setLower(1.0);
-    bc.setUpper(2.0);
-
-    TS_ASSERT(bc.hasLower());
-    TS_ASSERT(bc.hasUpper());
-
-    BoundaryConstraint bc2;
-
-    bc2.reset(&gaus, 2);
-    bc2.setBounds(10, 20);
-
-    TS_ASSERT_DELTA(bc2.lower(), 10, 0.0001);
-    TS_ASSERT_DELTA(bc2.upper(), 20, 0.0001);
-
-    TS_ASSERT_DELTA(gaus.getParameter("Sigma"), 1.1, 0.0001);
-
-    bc2.setParamToSatisfyConstraint();
-    TS_ASSERT_DELTA(gaus.getParameter("Sigma"), 10.0, 0.0001);
-  }
-
   void testInitialize1() {
     Gaussian gaus;
     gaus.initialize();
@@ -64,7 +30,7 @@ public:
     expr.parse("10<Sigma<20");
     bc.initialize(&gaus, expr, false);
 
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc.lower(), 10, 0.0001);
     TS_ASSERT_DELTA(bc.upper(), 20, 0.0001);
   }
@@ -77,7 +43,7 @@ public:
     expr.parse("20>Sigma>10");
     bc.initialize(&gaus, expr, false);
 
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc.lower(), 10, 0.0001);
     TS_ASSERT_DELTA(bc.upper(), 20, 0.0001);
   }
@@ -90,7 +56,7 @@ public:
     expr.parse("10<Sigma");
     bc.initialize(&gaus, expr, false);
 
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc.lower(), 10, 0.0001);
     TS_ASSERT(!bc.hasUpper());
   }
@@ -103,7 +69,7 @@ public:
     expr.parse("Sigma<20");
     bc.initialize(&gaus, expr, false);
 
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc.upper(), 20, 0.0001);
     TS_ASSERT(!bc.hasLower());
   }
@@ -134,8 +100,8 @@ public:
     TS_ASSERT(bc.hasLower());
     TS_ASSERT(!bc.hasUpper());
     TS_ASSERT_EQUALS(bc.lower(), 0.0);
-    TS_ASSERT_EQUALS(bc.getParameterName(), "Sigma");
-    TS_ASSERT_EQUALS(bc.getFunction(), &gaus);
+    TS_ASSERT_EQUALS(bc.parameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc.getLocalFunction(), &gaus);
   }
 
   void testAsString() {
@@ -147,7 +113,7 @@ public:
       expr.parse("Sigma<20");
       bc->initialize(&gaus, expr, false);
 
-      TS_ASSERT_EQUALS(bc->getParameterName(), "Sigma");
+      TS_ASSERT_EQUALS(bc->parameterName(), "Sigma");
       TS_ASSERT_DELTA(bc->upper(), 20, 0.0001);
       TS_ASSERT(!bc->hasLower());
       gaus.addConstraint(std::move(bc));
@@ -162,7 +128,7 @@ public:
     auto bc = dynamic_cast<BoundaryConstraint *>(c);
     TS_ASSERT(bc);
 
-    TS_ASSERT_EQUALS(bc->getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc->parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc->upper(), 20, 0.0001);
     TS_ASSERT(!bc->hasLower());
   }
@@ -192,7 +158,7 @@ public:
     BoundaryConstraint *bc = dynamic_cast<BoundaryConstraint *>(c);
     TS_ASSERT(bc);
 
-    TS_ASSERT_EQUALS(bc->getParameterName(), "Sigma");
+    TS_ASSERT_EQUALS(bc->parameterName(), "Sigma");
     TS_ASSERT_DELTA(bc->upper(), 20, 0.0001);
     TS_ASSERT(!bc->hasLower());
 
@@ -201,7 +167,7 @@ public:
     bc = dynamic_cast<BoundaryConstraint *>(c);
     TS_ASSERT(bc);
 
-    TS_ASSERT_EQUALS(bc->getParameterName(), "Height");
+    TS_ASSERT_EQUALS(bc->parameterName(), "Height");
     TS_ASSERT_DELTA(bc->lower(), 1.3, 0.0001);
     TS_ASSERT_DELTA(bc->upper(), 3.4, 0.0001);
   }
diff --git a/Framework/CurveFitting/test/FunctionFactoryConstraintTest.h b/Framework/CurveFitting/test/FunctionFactoryConstraintTest.h
index 1a2aba36b775eb8a2fff21c66474ec01cc68aab8..b4083d4e9b7d694db1cec6fc0dc7a9ad15836d0c 100644
--- a/Framework/CurveFitting/test/FunctionFactoryConstraintTest.h
+++ b/Framework/CurveFitting/test/FunctionFactoryConstraintTest.h
@@ -349,8 +349,8 @@ public:
 
     TS_ASSERT(fun1->isFixed(0));
     TS_ASSERT(fun1->isFixed(1));
-    TS_ASSERT(fun1->isFixed(2));
-    TS_ASSERT(!fun1->isFixed(3));
+    TS_ASSERT(!fun1->isActive(2));
+    TS_ASSERT(fun1->isActive(3));
   }
 };
 
diff --git a/Framework/CurveFitting/test/Functions/CrystalFieldMultiSpectrumTest.h b/Framework/CurveFitting/test/Functions/CrystalFieldMultiSpectrumTest.h
index 5a8fff0339917e708fdec74900e29803172e996d..4656d63c7a60cc0013af393e219cae68645a27ad 100644
--- a/Framework/CurveFitting/test/Functions/CrystalFieldMultiSpectrumTest.h
+++ b/Framework/CurveFitting/test/Functions/CrystalFieldMultiSpectrumTest.h
@@ -94,6 +94,29 @@ public:
     TS_ASSERT_DELTA(fun.getParameter("f0.f3.FWHM"), 1.5, 1e-3);
   }
 
+  void test_evaluate_1() {
+    auto funStr = "name=CrystalFieldSpectrum,Ion=Ce,Temperature=44,"
+                  "ToleranceIntensity=0.001,B20=0.37737,B22=3.9770,"
+                  "B40=-0.031787,B42=-0.11611,B44=-0.12544,"
+                  "f0.FWHM=1.6,f1.FWHM=2.0,f2.FWHM=2.3";
+    auto ws = createWorkspace();
+    auto alg = AlgorithmFactory::Instance().create("EvaluateFunction", -1);
+    alg->initialize();
+    alg->setPropertyValue("Function", funStr);
+    alg->setProperty("InputWorkspace", ws);
+    alg->setProperty("OutputWorkspace", "out");
+    alg->execute();
+
+    auto out =
+        AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>("out");
+    TS_ASSERT(out);
+    TS_ASSERT_EQUALS(out->getNumberHistograms(), 3);
+    TS_ASSERT_DELTA(out->readY(1)[0], 1.094 * c_mbsr, 0.001 * c_mbsr);
+    TS_ASSERT_DELTA(out->readY(1)[1], 0.738 * c_mbsr, 0.001 * c_mbsr);
+    TS_ASSERT_DELTA(out->readY(1)[2], 0.373 * c_mbsr, 0.001 * c_mbsr);
+    AnalysisDataService::Instance().clear();
+  }
+
   void test_evaluate() {
     auto funStr = "name=CrystalFieldMultiSpectrum,Ion=Ce,Temperatures=(44, "
                   "50),ToleranceIntensity=0.001,B20=0.37737,B22=3.9770,"
diff --git a/Framework/CurveFitting/test/Functions/CrystalFieldPeaksTest.h b/Framework/CurveFitting/test/Functions/CrystalFieldPeaksTest.h
index a7473a8b8ab0f38cabb4d65ab170e1bea00720ea..bf887a888ca94658b6f14e13e9e2eb4e24971313 100644
--- a/Framework/CurveFitting/test/Functions/CrystalFieldPeaksTest.h
+++ b/Framework/CurveFitting/test/Functions/CrystalFieldPeaksTest.h
@@ -492,7 +492,7 @@ public:
     TS_ASSERT(isFixed(fun, "B43"));
     TS_ASSERT(isFixed(fun, "IB43"));
     auto i = fun.parameterIndex("B44");
-    TS_ASSERT(fun.isFixed(i));
+    TS_ASSERT(!fun.isActive(i));
     TS_ASSERT(isFixed(fun, "IB44"));
 
     TS_ASSERT(!isFixed(fun, "B60"));
@@ -503,7 +503,7 @@ public:
     TS_ASSERT(isFixed(fun, "B63"));
     TS_ASSERT(isFixed(fun, "IB63"));
     i = fun.parameterIndex("B64");
-    TS_ASSERT(fun.isFixed(i));
+    TS_ASSERT(!fun.isActive(i));
     TS_ASSERT(isFixed(fun, "IB64"));
     TS_ASSERT(isFixed(fun, "B65"));
     TS_ASSERT(isFixed(fun, "IB65"));
diff --git a/Framework/CurveFitting/test/Functions/CrystalFieldSpectrumTest.h b/Framework/CurveFitting/test/Functions/CrystalFieldSpectrumTest.h
index abd7a8ae4ca724728a458eb254a483aaa208ba94..69c276617c77e4ad2c9d5fdcb940ffb32a936fed 100644
--- a/Framework/CurveFitting/test/Functions/CrystalFieldSpectrumTest.h
+++ b/Framework/CurveFitting/test/Functions/CrystalFieldSpectrumTest.h
@@ -214,15 +214,15 @@ public:
     auto constraint = fun->getConstraint(i);
     TS_ASSERT(constraint);
     if (constraint) {
-      TS_ASSERT_EQUALS(constraint->asString(), "1.3<FWHM");
-      TS_ASSERT_EQUALS(constraint->getIndex(), 2);
+      TS_ASSERT_EQUALS(constraint->asString(), "1.3<f1.FWHM");
+      TS_ASSERT_EQUALS(constraint->getLocalIndex(), 39);
     }
     i = fun->parameterIndex("B44");
     constraint = fun->getConstraint(i);
     TS_ASSERT(constraint);
     if (constraint) {
       TS_ASSERT_EQUALS(constraint->asString(), "0<B44<10");
-      TS_ASSERT_EQUALS(constraint->getIndex(), 13);
+      TS_ASSERT_EQUALS(constraint->getLocalIndex(), 13);
     }
   }
 
@@ -682,6 +682,165 @@ public:
     }
   }
 
+  void test_new_peaks() {
+    std::string funDef = "name=CrystalFieldSpectrum,Ion=Ce,Symmetry=C2v,"
+                         "Temperature=44.0,FWHM=1.1";
+    auto fun = FunctionFactory::Instance().createInitialized(funDef);
+    TS_ASSERT_EQUALS(fun->nParams(), 40);
+    TS_ASSERT_DELTA(fun->getParameter(34), 310.38, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(35), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(36), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(37), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(38), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(39), 1.10, 1e-2);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    TS_ASSERT_EQUALS(fun->nParams(), 49);
+    TS_ASSERT_DELTA(fun->getParameter(34), 203.87, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(35), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(36), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(37), 86.29, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(38), 27.04, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(39), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(40), 20.08, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(41), 44.24, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(42), 1.1, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(43), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(44), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(45), 1.1, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(46), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(47), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(48), 1.1, 1e-2);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0);
+    fun->setParameter("B22", 0);
+    fun->setParameter("B40", 0);
+    fun->setParameter("B42", 0);
+
+    TS_ASSERT_EQUALS(fun->nParams(), 49);
+    TS_ASSERT_DELTA(fun->getParameter(34), 310.38, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(35), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(36), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(37), 0.00, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(38), 27.04, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(39), 1.10, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(40), 0.0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(41), 44.24, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(42), 1.1, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(43), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(44), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(45), 1.1, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(46), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(47), 0, 1e-2);
+    TS_ASSERT_DELTA(fun->getParameter(48), 1.1, 1e-2);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(!fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+  }
+
+  void test_new_peaks_fixed_peak_width() {
+    std::string funDef = "name=CrystalFieldSpectrum,Ion=Ce,Symmetry=C2v,"
+                         "Temperature=44.0,FWHM=1.1";
+    auto fun = FunctionFactory::Instance().createInitialized(funDef);
+    TS_ASSERT_EQUALS(fun->nParams(), 40);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    fun->fix(39);
+
+    TS_ASSERT_EQUALS(fun->nParams(), 49);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0);
+    fun->setParameter("B22", 0);
+    fun->setParameter("B40", 0);
+    fun->setParameter("B42", 0);
+
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(!fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+  }
+
+  void test_new_peaks_tied_peak_width() {
+    std::string funDef = "name=CrystalFieldSpectrum,Ion=Ce,Symmetry=C2v,"
+                         "Temperature=44.0,FWHM=1.1";
+    auto fun = FunctionFactory::Instance().createInitialized(funDef);
+    TS_ASSERT_EQUALS(fun->nParams(), 40);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    fun->tie("f1.FWHM", "f0.FWHM");
+
+    TS_ASSERT_EQUALS(fun->nParams(), 49);
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0);
+    fun->setParameter("B22", 0);
+    fun->setParameter("B40", 0);
+    fun->setParameter("B42", 0);
+
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(!fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+
+    fun->setParameter("B20", 0.37737);
+    fun->setParameter("B22", 3.977);
+    fun->setParameter("B40", 0.031787);
+    fun->setParameter("B42", -0.11611);
+
+    TS_ASSERT(fun->isActive(36));
+    TS_ASSERT(!fun->isActive(39));
+    TS_ASSERT(fun->isActive(42));
+    TS_ASSERT(!fun->isActive(45));
+    TS_ASSERT(!fun->isActive(48));
+  }
+
 private:
   std::pair<double, double> getBounds(API::IFunction &fun,
                                       const std::string &parName) {
diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadILLIndirect.h b/Framework/DataHandling/inc/MantidDataHandling/LoadILLIndirect.h
index f8a9036c5344a3779bb19bd50384804a89995d5f..88d4e811d22aa040b97b890812c51bf64e669f8e 100644
--- a/Framework/DataHandling/inc/MantidDataHandling/LoadILLIndirect.h
+++ b/Framework/DataHandling/inc/MantidDataHandling/LoadILLIndirect.h
@@ -2,6 +2,7 @@
 #define MANTID_DATAHANDLING_LOADILLINDIRECT_H_
 
 #include "MantidAPI/IFileLoader.h"
+#include "MantidAPI/DeprecatedAlgorithm.h"
 #include "MantidNexus/NexusClasses.h"
 #include "MantidDataHandling/LoadHelper.h"
 
@@ -33,7 +34,8 @@ namespace DataHandling {
   Code Documentation is available at: <http://doxygen.mantidproject.org>
 */
 class DLLExport LoadILLIndirect
-    : public API::IFileLoader<Kernel::NexusDescriptor> {
+    : public API::IFileLoader<Kernel::NexusDescriptor>,
+      public API::DeprecatedAlgorithm {
 public:
   LoadILLIndirect();
   /// Returns a confidence value that this algorithm can load a file
diff --git a/Framework/DataHandling/inc/MantidDataHandling/LoadILLTOF2.h b/Framework/DataHandling/inc/MantidDataHandling/LoadILLTOF2.h
index ca7c8b2f328ed1e4df75dcd6f9878e4a9c812ea7..30ad9480441c387a90d401036465d13770d2b027 100644
--- a/Framework/DataHandling/inc/MantidDataHandling/LoadILLTOF2.h
+++ b/Framework/DataHandling/inc/MantidDataHandling/LoadILLTOF2.h
@@ -74,7 +74,7 @@ private:
                                 const std::vector<std::vector<int>> &);
   void loadSpectra(size_t &spec, const size_t numberOfTubes,
                    const std::vector<Mantid::detid_t> &detectorIDs,
-                   NeXus::NXInt data, Mantid::API::Progress progress);
+                   const NeXus::NXInt &data, Mantid::API::Progress &progress);
 
   void runLoadInstrument();
 
diff --git a/Framework/DataHandling/src/LoadHelper.cpp b/Framework/DataHandling/src/LoadHelper.cpp
index f87b80eeb4194953f14e2ee6d50800acf80e14a6..f5b7ede1fe357615f5d80ad667ea5fcd709b4b64 100644
--- a/Framework/DataHandling/src/LoadHelper.cpp
+++ b/Framework/DataHandling/src/LoadHelper.cpp
@@ -205,17 +205,21 @@ void LoadHelper::recurseAndAddNexusFieldsToWsRun(NXhandle nxfileID,
 
       NXstatus opengroup_status;
       NXstatus opendata_status;
+      NXstatus getinfo_status;
 
       if ((opengroup_status = NXopengroup(nxfileID, nxname, nxclass)) ==
           NX_OK) {
 
-        // Go down to one level
-        std::string p_nxname(
-            nxname); // current names can be useful for next level
-        std::string p_nxclass(nxclass);
+        if (std::string(nxclass) != "ILL_data_scan_vars") {
 
-        recurseAndAddNexusFieldsToWsRun(nxfileID, runDetails, p_nxname,
-                                        p_nxclass, level + 1);
+          // Go down to one level, if the group is known to nexus
+          std::string p_nxname(
+              nxname); // current names can be useful for next level
+          std::string p_nxclass(nxclass);
+
+          recurseAndAddNexusFieldsToWsRun(nxfileID, runDetails, p_nxname,
+                                          p_nxclass, level + 1);
+        }
 
         NXclosegroup(nxfileID);
       } // if(NXopengroup
@@ -229,10 +233,9 @@ void LoadHelper::recurseAndAddNexusFieldsToWsRun(NXhandle nxfileID,
                         << nxname << ")\n";
           /* nothing */
         } else { // create a property
-          int rank;
-          int dims[4];
+          int rank = 0;
+          int dims[4] = {0, 0, 0, 0};
           int type;
-          dims[0] = dims[1] = dims[2] = dims[3] = 0;
 
           std::string property_name =
               (parent_name.empty() ? nxname : parent_name + "." + nxname);
@@ -241,133 +244,167 @@ void LoadHelper::recurseAndAddNexusFieldsToWsRun(NXhandle nxfileID,
                         << property_name << '\n';
 
           // Get the value
-          NXgetinfo(nxfileID, &rank, dims, &type);
+          if ((getinfo_status = NXgetinfo(nxfileID, &rank, dims, &type)) ==
+              NX_OK) {
+
+            g_log.debug() << indent_str << "Rank of " << property_name << " is "
+                          << rank << "\n" << indent_str << "Dimensions are "
+                          << dims[0] << ", " << dims[1] << ", " << dims[2]
+                          << ", " << dims[3] << "\n";
 
-          // Note, we choose to only build properties on small float arrays
-          // filter logic is below
-          bool build_small_float_array = false; // default
+            // Note, we choose to only build properties on small float arrays
+            // filter logic is below
+            bool build_small_float_array = false; // default
+            bool read_property = true;
 
-          if ((type == NX_FLOAT32) || (type == NX_FLOAT64)) {
-            if ((rank == 1) && (dims[0] <= 9)) {
-              build_small_float_array = true;
+            if ((type == NX_FLOAT32) || (type == NX_FLOAT64)) {
+              if ((rank == 1) && (dims[0] <= 9)) {
+                build_small_float_array = true;
+              } else {
+                g_log.debug() << indent_str
+                              << "ignored multi dimensional number "
+                                 "data with more than 10 elements "
+                              << property_name << '\n';
+                read_property = false;
+              }
+            } else if (type != NX_CHAR) {
+              if ((rank > 1) || (dims[0] > 1) || (dims[1] > 1) ||
+                  (dims[2] > 1) || (dims[3] > 1)) {
+                g_log.debug() << indent_str
+                              << "ignored non-scalar numeric data on "
+                              << property_name << '\n';
+                read_property = false;
+              }
             } else {
-              g_log.debug() << indent_str
-                            << "ignored multi dimension float data on "
-                            << property_name << '\n';
-            }
-          } else if (type != NX_CHAR) {
-            if ((rank != 1) || (dims[0] != 1) || (dims[1] != 1) ||
-                (dims[2] != 1) || (dims[3] != 1)) {
-              g_log.debug() << indent_str << "ignored multi dimension data on "
-                            << property_name << '\n';
+              if ((rank > 1) || (dims[1] > 1) || (dims[2] > 1) ||
+                  (dims[3] > 1)) {
+                g_log.debug() << indent_str << "ignored string array data on "
+                              << property_name << '\n';
+                read_property = false;
+              }
             }
-          }
 
-          void *dataBuffer;
-          NXmalloc(&dataBuffer, rank, dims, type);
+            if (read_property) {
 
-          if (NXgetdata(nxfileID, dataBuffer) != NX_OK) {
-            NXfree(&dataBuffer);
-            throw std::runtime_error("Cannot read data from NeXus file");
-          }
+              void *dataBuffer;
+              NXmalloc(&dataBuffer, rank, dims, type);
 
-          if (type == NX_CHAR) {
-            std::string property_value(
-                reinterpret_cast<const char *>(dataBuffer));
-            if (boost::algorithm::ends_with(property_name, "_time")) {
-              // That's a time value! Convert to Mantid standard
-              property_value = dateTimeInIsoFormat(property_value);
-            }
-            runDetails.addProperty(property_name, property_value);
-
-          } else if ((type == NX_FLOAT32) || (type == NX_FLOAT64) ||
-                     (type == NX_INT16) || (type == NX_INT32) ||
-                     (type == NX_UINT16)) {
-
-            // Look for "units"
-            NXstatus units_status;
-            char units_sbuf[NX_MAXNAMELEN];
-            int units_len = NX_MAXNAMELEN;
-            int units_type = NX_CHAR;
-
-            char unitsAttrName[] = "units";
-            units_status = NXgetattr(nxfileID, unitsAttrName, units_sbuf,
-                                     &units_len, &units_type);
-            if (units_status != NX_ERROR) {
-              g_log.debug() << indent_str << "[ " << property_name
-                            << " has unit " << units_sbuf << " ]\n";
-            }
+              if (NXgetdata(nxfileID, dataBuffer) == NX_OK) {
 
-            if ((type == NX_FLOAT32) || (type == NX_FLOAT64)) {
-              // Mantid numerical properties are double only.
-              double property_double_value = 0.0;
-
-              // Simple case, one value
-              if (dims[0] == 1) {
-                if (type == NX_FLOAT32) {
-                  property_double_value =
-                      *(reinterpret_cast<float *>(dataBuffer));
-                } else if (type == NX_FLOAT64) {
-                  property_double_value =
-                      *(reinterpret_cast<double *>(dataBuffer));
-                }
-                if (units_status != NX_ERROR)
-                  runDetails.addProperty(property_name, property_double_value,
-                                         std::string(units_sbuf));
-                else
-                  runDetails.addProperty(property_name, property_double_value);
-              } else if (build_small_float_array) {
-                // An array, converted to "name_index", with index < 10 (see
-                // test above)
-                for (int dim_index = 0; dim_index < dims[0]; dim_index++) {
-                  if (type == NX_FLOAT32) {
-                    property_double_value =
-                        (reinterpret_cast<float *>(dataBuffer))[dim_index];
-                  } else if (type == NX_FLOAT64) {
-                    property_double_value =
-                        (reinterpret_cast<double *>(dataBuffer))[dim_index];
+                if (type == NX_CHAR) {
+                  std::string property_value(
+                      reinterpret_cast<const char *>(dataBuffer));
+                  if (boost::algorithm::ends_with(property_name, "_time")) {
+                    // That's a time value! Convert to Mantid standard
+                    property_value = dateTimeInIsoFormat(property_value);
+                  }
+                  runDetails.addProperty(property_name, property_value);
+
+                } else if ((type == NX_FLOAT32) || (type == NX_FLOAT64) ||
+                           (type == NX_INT16) || (type == NX_INT32) ||
+                           (type == NX_UINT16)) {
+
+                  // Look for "units"
+                  NXstatus units_status;
+                  char units_sbuf[NX_MAXNAMELEN];
+                  int units_len = NX_MAXNAMELEN;
+                  int units_type = NX_CHAR;
+
+                  char unitsAttrName[] = "units";
+                  units_status = NXgetattr(nxfileID, unitsAttrName, units_sbuf,
+                                           &units_len, &units_type);
+                  if (units_status != NX_ERROR) {
+                    g_log.debug() << indent_str << "[ " << property_name
+                                  << " has unit " << units_sbuf << " ]\n";
                   }
-                  std::string indexed_property_name = property_name +
-                                                      std::string("_") +
-                                                      std::to_string(dim_index);
-                  if (units_status != NX_ERROR)
-                    runDetails.addProperty(indexed_property_name,
-                                           property_double_value,
-                                           std::string(units_sbuf));
-                  else
-                    runDetails.addProperty(indexed_property_name,
-                                           property_double_value);
-                }
-              }
 
-            } else {
-              // int case
-              int property_int_value = 0;
-              if (type == NX_INT16) {
-                property_int_value =
-                    *(reinterpret_cast<short int *>(dataBuffer));
-              } else if (type == NX_INT32) {
-                property_int_value = *(reinterpret_cast<int *>(dataBuffer));
-              } else if (type == NX_UINT16) {
-                property_int_value =
-                    *(reinterpret_cast<short unsigned int *>(dataBuffer));
+                  if ((type == NX_FLOAT32) || (type == NX_FLOAT64)) {
+                    // Mantid numerical properties are double only.
+                    double property_double_value = 0.0;
+
+                    // Simple case, one value
+                    if (dims[0] == 1) {
+                      if (type == NX_FLOAT32) {
+                        property_double_value =
+                            *(reinterpret_cast<float *>(dataBuffer));
+                      } else if (type == NX_FLOAT64) {
+                        property_double_value =
+                            *(reinterpret_cast<double *>(dataBuffer));
+                      }
+                      if (units_status != NX_ERROR)
+                        runDetails.addProperty(property_name,
+                                               property_double_value,
+                                               std::string(units_sbuf));
+                      else
+                        runDetails.addProperty(property_name,
+                                               property_double_value);
+                    } else if (build_small_float_array) {
+                      // An array, converted to "name_index", with index < 10
+                      // (see
+                      // test above)
+                      for (int dim_index = 0; dim_index < dims[0];
+                           dim_index++) {
+                        if (type == NX_FLOAT32) {
+                          property_double_value = (reinterpret_cast<float *>(
+                              dataBuffer))[dim_index];
+                        } else if (type == NX_FLOAT64) {
+                          property_double_value = (reinterpret_cast<double *>(
+                              dataBuffer))[dim_index];
+                        }
+                        std::string indexed_property_name =
+                            property_name + std::string("_") +
+                            std::to_string(dim_index);
+                        if (units_status != NX_ERROR)
+                          runDetails.addProperty(indexed_property_name,
+                                                 property_double_value,
+                                                 std::string(units_sbuf));
+                        else
+                          runDetails.addProperty(indexed_property_name,
+                                                 property_double_value);
+                      }
+                    }
+
+                  } else {
+                    // int case
+                    int property_int_value = 0;
+                    if (type == NX_INT16) {
+                      property_int_value =
+                          *(reinterpret_cast<short int *>(dataBuffer));
+                    } else if (type == NX_INT32) {
+                      property_int_value =
+                          *(reinterpret_cast<int *>(dataBuffer));
+                    } else if (type == NX_UINT16) {
+                      property_int_value =
+                          *(reinterpret_cast<short unsigned int *>(dataBuffer));
+                    }
+
+                    if (units_status != NX_ERROR)
+                      runDetails.addProperty(property_name, property_int_value,
+                                             std::string(units_sbuf));
+                    else
+                      runDetails.addProperty(property_name, property_int_value);
+
+                  } // if (type==...
+
+                } else {
+                  g_log.debug() << indent_str << "unexpected data on "
+                                << property_name << '\n';
+                } // test on nxdata type
+
+              } else {
+                g_log.debug() << indent_str << "could not read the value of "
+                              << property_name << '\n';
               }
 
-              if (units_status != NX_ERROR)
-                runDetails.addProperty(property_name, property_int_value,
-                                       std::string(units_sbuf));
-              else
-                runDetails.addProperty(property_name, property_int_value);
-
-            } // if (type==...
-
-          } else {
-            g_log.debug() << indent_str << "unexpected data on "
-                          << property_name << '\n';
-          } // test on nxdata type
+              NXfree(&dataBuffer);
+              dataBuffer = nullptr;
+            }
 
-          NXfree(&dataBuffer);
-          dataBuffer = nullptr;
+          } // if NXgetinfo OK
+          else {
+            g_log.debug() << indent_str << "unexpected status ("
+                          << getinfo_status << ") on " << nxname << '\n';
+          }
 
         } // if (parent_class == "NXData" || parent_class == "NXMonitor") else
 
diff --git a/Framework/DataHandling/src/LoadILLIndirect.cpp b/Framework/DataHandling/src/LoadILLIndirect.cpp
index 769f3491a7c9563515f9e3d0aa200d9087680b66..206e6a9342656e3cc9d4dd0e05c0b969e2f0e390 100644
--- a/Framework/DataHandling/src/LoadILLIndirect.cpp
+++ b/Framework/DataHandling/src/LoadILLIndirect.cpp
@@ -32,6 +32,8 @@ LoadILLIndirect::LoadILLIndirect()
       m_numberOfPixelsPerTube(0), m_numberOfChannels(0),
       m_numberOfSimpleDetectors(0), m_numberOfHistograms(0) {
   m_supportedInstruments.emplace_back("IN16B");
+  useAlgorithm("LoadILLIndirect", 2);
+  deprecatedDate("01.04.2017");
 }
 
 //----------------------------------------------------------------------------------------------
diff --git a/Framework/DataHandling/src/LoadILLTOF2.cpp b/Framework/DataHandling/src/LoadILLTOF2.cpp
index 0ae774efd2026c43b9ad8b036b8b0741e8d55ec2..5ff8014cd7b65713faefffdae252011d216ecb20 100644
--- a/Framework/DataHandling/src/LoadILLTOF2.cpp
+++ b/Framework/DataHandling/src/LoadILLTOF2.cpp
@@ -404,7 +404,7 @@ void LoadILLTOF2::loadDataIntoTheWorkSpace(
  */
 void LoadILLTOF2::loadSpectra(size_t &spec, const size_t numberOfTubes,
                               const std::vector<detid_t> &detectorIDs,
-                              NXInt data, Progress progress) {
+                              const NXInt &data, Progress &progress) {
   for (size_t i = 0; i < numberOfTubes; ++i) {
     for (size_t j = 0; j < m_numberOfPixelsPerTube; ++j) {
       int *data_p = &data(static_cast<int>(i), static_cast<int>(j), 0);
diff --git a/Framework/DataHandling/test/CMakeLists.txt b/Framework/DataHandling/test/CMakeLists.txt
index 8848a389e927d3c9537e63520b6d2712f2932299..b7c682a43d705580bbb05697d80b3f64408ab6a1 100644
--- a/Framework/DataHandling/test/CMakeLists.txt
+++ b/Framework/DataHandling/test/CMakeLists.txt
@@ -21,6 +21,7 @@ if ( CXXTEST_FOUND )
             DataObjects
             Geometry
             HistogramData
+            Indexing
             Kernel
             Nexus
             ${NEXUS_LIBRARIES}
diff --git a/Framework/DataHandling/test/CreateChunkingFromInstrumentTest.h b/Framework/DataHandling/test/CreateChunkingFromInstrumentTest.h
index 0f11d5fbac794d1ecedc67d6aba9ebb6243fce74..2f9870ab6109d8f8b9026357aa49256482c25f4f 100644
--- a/Framework/DataHandling/test/CreateChunkingFromInstrumentTest.h
+++ b/Framework/DataHandling/test/CreateChunkingFromInstrumentTest.h
@@ -34,7 +34,8 @@ public:
     CreateChunkingFromInstrument alg;
     TS_ASSERT_THROWS_NOTHING(alg.initialize())
     TS_ASSERT(alg.isInitialized())
-    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("InstrumentName", "pg3"));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue(
+        "InstrumentFilename", "POWGEN_Definition_2015-08-01.xml"));
     TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("ChunkBy", "Group"););
     TS_ASSERT_THROWS_NOTHING(
         alg.setPropertyValue("OutputWorkspace", outWSName));
diff --git a/Framework/DataObjects/CMakeLists.txt b/Framework/DataObjects/CMakeLists.txt
index 7184dae6b3753aee838959cd73ea34aa13015bab..64c3428740bba11d35d9c4c780b352631950caf1 100644
--- a/Framework/DataObjects/CMakeLists.txt
+++ b/Framework/DataObjects/CMakeLists.txt
@@ -39,6 +39,7 @@ set ( SRC_FILES
 	src/PropertyWithValue.cpp
 	src/RebinnedOutput.cpp
 	src/ReflectometryTransform.cpp
+	src/ScanningWorkspaceBuilder.cpp
 	src/SpecialWorkspace2D.cpp
 	src/SplittersWorkspace.cpp
 	src/TableColumn.cpp
@@ -117,6 +118,7 @@ set ( INC_FILES
 	inc/MantidDataObjects/PeaksWorkspace.h
 	inc/MantidDataObjects/RebinnedOutput.h
 	inc/MantidDataObjects/ReflectometryTransform.h
+	inc/MantidDataObjects/ScanningWorkspaceBuilder.h
 	inc/MantidDataObjects/SkippingPolicy.h
 	inc/MantidDataObjects/SpecialWorkspace2D.h
 	inc/MantidDataObjects/SplittersWorkspace.h
@@ -175,6 +177,7 @@ set ( TEST_FILES
 	RebinnedOutputTest.h
 	RefAxisTest.h
 	ReflectometryTransformTest.h
+	ScanningWorkspaceBuilderTest.h
 	SkippingPolicyTest.h
 	SpecialWorkspace2DTest.h
 	SplittersWorkspaceTest.h
diff --git a/Framework/DataObjects/inc/MantidDataObjects/ScanningWorkspaceBuilder.h b/Framework/DataObjects/inc/MantidDataObjects/ScanningWorkspaceBuilder.h
new file mode 100644
index 0000000000000000000000000000000000000000..beca41f1f2b8d5909c4504346845bda025bc0bc8
--- /dev/null
+++ b/Framework/DataObjects/inc/MantidDataObjects/ScanningWorkspaceBuilder.h
@@ -0,0 +1,119 @@
+#ifndef MANTID_DATAOBJECTS_SCANNINGWORKSPACEBUILDER_H_
+#define MANTID_DATAOBJECTS_SCANNINGWORKSPACEBUILDER_H_
+
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidDataObjects/DllConfig.h"
+#include "MantidIndexing/IndexInfo.h"
+#include "MantidKernel/DateAndTime.h"
+#include "MantidKernel/V3D.h"
+#include "MantidKernel/Quat.h"
+
+#include <vector>
+
+namespace Mantid {
+namespace DataObjects {
+
+/** ScanningWorkspaceBuilder : This is a helper class to make it easy to build a
+  scanning workspace (a workspace with moving detectors), where all the
+  information about the scan is known in advance. The constructor takes the
+  arguments for the basic construction, then checks are made for consistency as
+  other information about the scanning workspace is set.
+
+  Things that must be set for successful building:
+   - Number of detectors, number of time indexes and number of bins (set via the
+  constructor)
+   - The instrument set via setInstrument
+   - The time ranges set via setTimeRanges
+
+  Some helper methods exist for specific cases, such as the whole instrument
+  rotating around the sample.
+
+  One current limitation to note here, that is not a general restriction within
+  Mantid, is that every detector must have the same set of time indexes.
+
+  Copyright &copy; 2017 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+class MANTID_DATAOBJECTS_DLL ScanningWorkspaceBuilder {
+public:
+  enum class IndexingType { Default, TimeOriented, DetectorOriented };
+
+  ScanningWorkspaceBuilder(
+      const boost::shared_ptr<const Geometry::Instrument> &instrument,
+      const size_t nTimeIndexes, const size_t nBins);
+
+  void setHistogram(HistogramData::Histogram histogram);
+
+  void
+  setTimeRanges(std::vector<std::pair<Kernel::DateAndTime, Kernel::DateAndTime>>
+                    timeRanges);
+  void setTimeRanges(const Kernel::DateAndTime &startTime,
+                     const std::vector<double> &durations);
+  void setPositions(std::vector<std::vector<Kernel::V3D>> positions);
+  void setRotations(std::vector<std::vector<Kernel::Quat>> rotations);
+  void setRelativeRotationsForScans(const std::vector<double> &instrumentAngles,
+                                    const Kernel::V3D &rotationPosition,
+                                    const Kernel::V3D &rotationAxis);
+
+  void setIndexingType(const IndexingType indexingType);
+
+  API::MatrixWorkspace_sptr buildWorkspace() const;
+
+private:
+  size_t m_nDetectors;
+  size_t m_nTimeIndexes;
+  size_t m_nBins;
+
+  boost::shared_ptr<const Geometry::Instrument> m_instrument;
+
+  HistogramData::Histogram m_histogram;
+
+  std::vector<std::pair<Kernel::DateAndTime, Kernel::DateAndTime>> m_timeRanges;
+  std::vector<std::vector<Kernel::V3D>> m_positions;
+  std::vector<std::vector<Kernel::Quat>> m_rotations;
+
+  std::vector<double> m_instrumentAngles;
+  Kernel::V3D m_rotationAxis;
+  Kernel::V3D m_rotationPosition;
+
+  IndexingType m_indexingType;
+
+  void buildOutputDetectorInfo(API::DetectorInfo &outputDetectorInfo) const;
+
+  void buildPositions(API::DetectorInfo &outputDetectorInfo) const;
+  void buildRotations(API::DetectorInfo &outputDetectorInfo) const;
+  void
+  buildRelativeRotationsForScans(API::DetectorInfo &outputDetectorInfo) const;
+
+  void createTimeOrientedIndexInfo(API::MatrixWorkspace &ws) const;
+  void createDetectorOrientedIndexInfo(API::MatrixWorkspace &ws) const;
+
+  void verifyTimeIndexSize(const size_t timeIndexSize,
+                           const std::string &description) const;
+  void verifyDetectorSize(const size_t detectorSize,
+                          const std::string &description) const;
+  void validateInputs() const;
+};
+
+} // namespace DataObjects
+} // namespace Mantid
+
+#endif /* MANTID_DATAOBJECTS_SCANNINGWORKSPACEBUILDER_H_ */
diff --git a/Framework/DataObjects/src/ScanningWorkspaceBuilder.cpp b/Framework/DataObjects/src/ScanningWorkspaceBuilder.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a7ebce28e1b49c962fbb5ec6b696b47568b02bf3
--- /dev/null
+++ b/Framework/DataObjects/src/ScanningWorkspaceBuilder.cpp
@@ -0,0 +1,317 @@
+#include "MantidDataObjects/ScanningWorkspaceBuilder.h"
+
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/WorkspaceFactory.h"
+#include "MantidDataObjects/Workspace2D.h"
+#include "MantidDataObjects/WorkspaceCreation.h"
+#include "MantidGeometry/Instrument.h"
+#include "MantidHistogramData/BinEdges.h"
+#include "MantidHistogramData/Histogram.h"
+#include "MantidHistogramData/LinearGenerator.h"
+#include "MantidTypes/SpectrumDefinition.h"
+
+using namespace Mantid::API;
+using namespace Mantid::HistogramData;
+using namespace Mantid::Indexing;
+
+namespace Mantid {
+namespace DataObjects {
+
+ScanningWorkspaceBuilder::ScanningWorkspaceBuilder(
+    const boost::shared_ptr<const Geometry::Instrument> &instrument,
+    const size_t nTimeIndexes, const size_t nBins)
+    : m_nDetectors(instrument->getNumberDetectors()),
+      m_nTimeIndexes(nTimeIndexes), m_nBins(nBins), m_instrument(instrument),
+      m_histogram(BinEdges(nBins + 1, LinearGenerator(1.0, 1.0)),
+                  Counts(nBins, 0.0)),
+      m_indexingType(IndexingType::Default) {}
+
+/**
+ * Set a histogram to be used for all the workspace spectra. This can be used to
+ *set the correct bin edges, but only if the binning is identical for every
+ *spectra.
+ *
+ * @param histogram A histogram with bin edges defined
+ */
+void ScanningWorkspaceBuilder::setHistogram(
+    const HistogramData::Histogram histogram) {
+  if (histogram.size() != m_nBins)
+    throw std::logic_error(
+        "Histogram supplied does not have the correct size.");
+
+  m_histogram = std::move(histogram);
+}
+
+/**
+ * Set time ranges from a vector of start time, end time pairs.
+ *
+ * @param timeRanges A vector of DateAndTime pairs, corresponding to the start
+ *and end times
+ */
+void ScanningWorkspaceBuilder::setTimeRanges(const std::vector<
+    std::pair<Kernel::DateAndTime, Kernel::DateAndTime>> timeRanges) {
+  verifyTimeIndexSize(timeRanges.size(), "start time, end time pairs");
+  m_timeRanges = std::move(timeRanges);
+}
+
+/**
+ * Set time ranges from a start time and a vector of durations
+ *
+ * @param startTime A DateAndTime object corresponding to the start of the first
+ *scan
+ * @param durations A vector of doubles containing the duration in seconds
+ */
+void ScanningWorkspaceBuilder::setTimeRanges(
+    const Kernel::DateAndTime &startTime,
+    const std::vector<double> &durations) {
+  verifyTimeIndexSize(durations.size(), "time durations");
+
+  std::vector<std::pair<Kernel::DateAndTime, Kernel::DateAndTime>> timeRanges =
+      {std::pair<Kernel::DateAndTime, Kernel::DateAndTime>(
+          startTime, startTime + durations[0])};
+
+  for (size_t i = 1; i < m_nTimeIndexes; ++i) {
+    const auto newStartTime = timeRanges[i - 1].second;
+    const auto endTime = newStartTime + durations[i];
+    timeRanges.push_back(std::pair<Kernel::DateAndTime, Kernel::DateAndTime>(
+        newStartTime, endTime));
+  }
+
+  setTimeRanges(std::move(timeRanges));
+}
+
+/**
+ * Supply a vector of vectors which contain positions. The inner vectors should
+ *contain the position for each time index, the outer vector the vector for each
+ *detector.
+ *
+ * @param positions A vector of vectors containing positions
+ */
+void ScanningWorkspaceBuilder::setPositions(
+    const std::vector<std::vector<Kernel::V3D>> positions) {
+
+  if (!m_positions.empty() || !m_instrumentAngles.empty())
+    throw std::logic_error("Can not set positions, as positions or instrument "
+                           "angles have already been set.");
+
+  for (const auto &vector : positions) {
+    verifyTimeIndexSize(vector.size(), "positions");
+  }
+  verifyDetectorSize(positions.size(), "positions");
+
+  m_positions = std::move(positions);
+}
+
+/**
+ * Supply a vector of vectors which contain rotations. The inner vectors should
+ *contain the rotation for each time index, the outer vector the vector for each
+ *detector.
+ *
+ * @param rotations A vector of vectors containing rotations
+ */
+void ScanningWorkspaceBuilder::setRotations(
+    const std::vector<std::vector<Kernel::Quat>> rotations) {
+
+  if (!m_rotations.empty() || !m_instrumentAngles.empty())
+    throw std::logic_error("Can not set rotations, as rotations or instrument "
+                           "angles have already been set.");
+
+  for (const auto &vector : rotations) {
+    verifyTimeIndexSize(vector.size(), "rotations");
+  }
+  verifyDetectorSize(rotations.size(), "rotations");
+
+  m_rotations = std::move(rotations);
+}
+
+/**
+ * Set a vector of rotations corresponding to each time index. These angles
+ *rotate the detector banks around the source, setting the corresponding
+ *positions and rotations of the detectors.
+ *
+ * Here explicit assumptions are made - that the source is at (0, 0, 0), and the
+ *rotation is in the X-Z plane. This corresponds to the common case of moving
+ *detectors to increase angular coverage.
+ *
+ * @param relativeRotations a vector of angles, the size matching the number of
+ *time indexes
+ * @param rotationPosition the position to rotate around, e.g. the sample
+ *position
+ * @param rotationAxis the axis to rotate around. e.g. the vertical axis to
+ *rotate the instrument in the horizontal plane
+ */
+void ScanningWorkspaceBuilder::setRelativeRotationsForScans(
+    const std::vector<double> &relativeRotations,
+    const Kernel::V3D &rotationPosition, const Kernel::V3D &rotationAxis) {
+
+  if (!m_positions.empty() || !m_rotations.empty())
+    throw std::logic_error("Can not set instrument angles, as positions and/or "
+                           "rotations have already been set.");
+
+  verifyTimeIndexSize(relativeRotations.size(), "instrument angles");
+  m_instrumentAngles = relativeRotations;
+  m_rotationPosition = rotationPosition;
+  m_rotationAxis = rotationAxis;
+}
+
+/**
+ * Set the indexing type, either to time or detector oriented indexing.
+ *
+ * @param indexingType An index type enum
+ */
+void ScanningWorkspaceBuilder::setIndexingType(
+    const IndexingType indexingType) {
+  if (m_indexingType != IndexingType::Default)
+    throw std::logic_error("Indexing type has been set already.");
+
+  m_indexingType = indexingType;
+}
+
+/**
+ * Verify everything has been set that is required and return the workspace.
+ *
+ * @return Workspace2D with the scanning information set
+ */
+MatrixWorkspace_sptr ScanningWorkspaceBuilder::buildWorkspace() const {
+  validateInputs();
+
+  auto outputWorkspace = create<Workspace2D>(
+      m_instrument, m_nDetectors * m_nTimeIndexes, m_histogram);
+
+  auto &outputDetectorInfo = outputWorkspace->mutableDetectorInfo();
+  outputDetectorInfo.setScanInterval(0, m_timeRanges[0]);
+
+  buildOutputDetectorInfo(outputDetectorInfo);
+
+  if (!m_positions.empty())
+    buildPositions(outputDetectorInfo);
+
+  if (!m_rotations.empty())
+    buildRotations(outputDetectorInfo);
+
+  if (!m_instrumentAngles.empty())
+    buildRelativeRotationsForScans(outputDetectorInfo);
+
+  switch (m_indexingType) {
+  case IndexingType::Default:
+    outputWorkspace->setIndexInfo(
+        Indexing::IndexInfo(m_nDetectors * m_nTimeIndexes));
+    break;
+  case IndexingType::TimeOriented:
+    createTimeOrientedIndexInfo(*outputWorkspace);
+    break;
+  case IndexingType::DetectorOriented:
+    createDetectorOrientedIndexInfo(*outputWorkspace);
+    break;
+  }
+
+  return boost::shared_ptr<MatrixWorkspace>(std::move(outputWorkspace));
+}
+
+void ScanningWorkspaceBuilder::buildOutputDetectorInfo(
+    DetectorInfo &outputDetectorInfo) const {
+  auto mergeWorkspace =
+      create<Workspace2D>(m_instrument, m_nDetectors, m_histogram.binEdges());
+  for (size_t i = 1; i < m_nTimeIndexes; ++i) {
+    auto &mergeDetectorInfo = mergeWorkspace->mutableDetectorInfo();
+    for (size_t j = 0; j < m_nDetectors; ++j) {
+      mergeDetectorInfo.setScanInterval(j, m_timeRanges[i]);
+    }
+    outputDetectorInfo.merge(mergeDetectorInfo);
+  }
+}
+
+void ScanningWorkspaceBuilder::buildRotations(
+    DetectorInfo &outputDetectorInfo) const {
+  for (size_t i = 0; i < m_nDetectors; ++i) {
+    for (size_t j = 0; j < m_nTimeIndexes; ++j) {
+      outputDetectorInfo.setRotation({i, j}, m_rotations[i][j]);
+    }
+  }
+}
+
+void ScanningWorkspaceBuilder::buildPositions(
+    DetectorInfo &outputDetectorInfo) const {
+  for (size_t i = 0; i < m_nDetectors; ++i) {
+    for (size_t j = 0; j < m_nTimeIndexes; ++j) {
+      outputDetectorInfo.setPosition({i, j}, m_positions[i][j]);
+    }
+  }
+}
+
+void ScanningWorkspaceBuilder::buildRelativeRotationsForScans(
+    DetectorInfo &outputDetectorInfo) const {
+  for (size_t i = 0; i < outputDetectorInfo.size(); ++i) {
+    for (size_t j = 0; j < outputDetectorInfo.scanCount(i); ++j) {
+      auto position = outputDetectorInfo.position({i, j});
+      const auto rotation = Kernel::Quat(m_instrumentAngles[j], m_rotationAxis);
+      position -= m_rotationPosition;
+      rotation.rotate(position);
+      position += m_rotationPosition;
+      outputDetectorInfo.setPosition({i, j}, position);
+      const auto &oldRotation = outputDetectorInfo.rotation({i, j});
+      outputDetectorInfo.setRotation({i, j}, rotation * oldRotation);
+    }
+  }
+}
+
+void ScanningWorkspaceBuilder::createTimeOrientedIndexInfo(
+    MatrixWorkspace &ws) const {
+  auto indexInfo = ws.indexInfo();
+  auto spectrumDefinitions =
+      std::vector<SpectrumDefinition>(m_nDetectors * m_nTimeIndexes);
+
+  for (size_t detIndex = 0; detIndex < m_nDetectors; ++detIndex) {
+    for (size_t timeIndex = 0; timeIndex < m_nTimeIndexes; ++timeIndex) {
+      spectrumDefinitions[detIndex * m_nTimeIndexes + timeIndex].add(detIndex,
+                                                                     timeIndex);
+    }
+  }
+
+  indexInfo.setSpectrumDefinitions(spectrumDefinitions);
+  ws.setIndexInfo(indexInfo);
+}
+
+void ScanningWorkspaceBuilder::createDetectorOrientedIndexInfo(
+    MatrixWorkspace &ws) const {
+  auto indexInfo = ws.indexInfo();
+  auto spectrumDefinitions =
+      std::vector<SpectrumDefinition>(m_nDetectors * m_nTimeIndexes);
+
+  for (size_t timeIndex = 0; timeIndex < m_nTimeIndexes; ++timeIndex) {
+    for (size_t detIndex = 0; detIndex < m_nDetectors; ++detIndex) {
+      spectrumDefinitions[timeIndex * m_nDetectors + detIndex].add(detIndex,
+                                                                   timeIndex);
+    }
+  }
+
+  indexInfo.setSpectrumDefinitions(spectrumDefinitions);
+  ws.setIndexInfo(indexInfo);
+}
+
+void ScanningWorkspaceBuilder::verifyTimeIndexSize(
+    const size_t timeIndexSize, const std::string &description) const {
+  if (timeIndexSize != m_nTimeIndexes) {
+    throw std::logic_error(
+        "Number of " + description +
+        " supplied does not match the number of time indexes.");
+  }
+}
+
+void ScanningWorkspaceBuilder::verifyDetectorSize(
+    const size_t detectorSize, const std::string &description) const {
+  if (detectorSize != m_nDetectors) {
+    throw std::logic_error("Number of " + description +
+                           " supplied does not match the number of detectors.");
+  }
+}
+
+void ScanningWorkspaceBuilder::validateInputs() const {
+  if (m_timeRanges.empty())
+    throw std::logic_error("Can not build workspace - time ranges have not "
+                           "been set. Please call setTimeRanges() before "
+                           "building.");
+}
+
+} // namespace DataObjects
+} // namespace Mantid
diff --git a/Framework/DataObjects/test/ScanningWorkspaceBuilderTest.h b/Framework/DataObjects/test/ScanningWorkspaceBuilderTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..7596a1bb447436044fc358e6c4180ed111b0012b
--- /dev/null
+++ b/Framework/DataObjects/test/ScanningWorkspaceBuilderTest.h
@@ -0,0 +1,627 @@
+#ifndef MANTID_DATAOBJECTS_SCANNINGWORKSPACEBUILDERTEST_H_
+#define MANTID_DATAOBJECTS_SCANNINGWORKSPACEBUILDERTEST_H_
+
+#include <cxxtest/TestSuite.h>
+
+#include "MantidDataObjects/ScanningWorkspaceBuilder.h"
+
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidHistogramData/BinEdges.h"
+#include "MantidHistogramData/Histogram.h"
+#include "MantidHistogramData/LinearGenerator.h"
+#include "MantidGeometry/Instrument.h"
+#include "MantidTestHelpers/WorkspaceCreationHelper.h"
+#include "MantidTypes/SpectrumDefinition.h"
+
+#include <cmath>
+
+using namespace Mantid::API;
+using namespace Mantid::Geometry;
+using namespace Mantid::HistogramData;
+using namespace Mantid::Kernel;
+using Mantid::DataObjects::ScanningWorkspaceBuilder;
+
+namespace {
+Instrument_const_sptr createSimpleInstrument(size_t nDetectors, size_t nBins) {
+  const auto &wsWithInstrument =
+      WorkspaceCreationHelper::create2DWorkspaceWithFullInstrument(
+          int(nDetectors), int(nBins));
+  return wsWithInstrument->getInstrument();
+}
+}
+
+class ScanningWorkspaceBuilderTest : public CxxTest::TestSuite {
+public:
+  // This pair of boilerplate methods prevent the suite being created statically
+  // This means the constructor isn't called when running other tests
+  static ScanningWorkspaceBuilderTest *createSuite() {
+    return new ScanningWorkspaceBuilderTest();
+  }
+  static void destroySuite(ScanningWorkspaceBuilderTest *suite) {
+    delete suite;
+  }
+
+  void tearDown() override {
+    positions.clear();
+    rotations.clear();
+    relativeRotations.clear();
+  }
+
+  void test_create_scanning_workspace_with_instrument_and_time_ranges() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges));
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace());
+
+    const auto &detectorInfo = ws->detectorInfo();
+
+    // Now check every detector has every time range set correctly
+    checkTimeRanges(detectorInfo);
+    // Quick check to see if the instrument is set as expected
+    TS_ASSERT_EQUALS(instrument->getNumberDetectors(),
+                     ws->getInstrument()->getNumberDetectors())
+  }
+
+  void test_create_scanning_workspace_with_histogram() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    BinEdges x(nBins + 1, LinearGenerator(0.0, 1.0));
+    Counts y(std::vector<double>(nBins, 5.0));
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges));
+    TS_ASSERT_THROWS_NOTHING(builder.setHistogram(Histogram(x, y)));
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace());
+
+    for (size_t i = 0; i < ws->getNumberHistograms(); ++i) {
+      const auto &hist = ws->histogram(i);
+
+      const auto &xValues = hist.x();
+      for (size_t i = 0; i < xValues.size(); ++i)
+        TS_ASSERT_EQUALS(xValues[i], double(i))
+
+      const auto &yValues = hist.y();
+      for (size_t i = 0; i < yValues.size(); ++i)
+        TS_ASSERT_EQUALS(yValues[i], 5.0)
+    }
+  }
+
+  void test_create_scanning_workspace_with_incorrectly_sized_histogram() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto wrongNBins = nBins - 2;
+    BinEdges x(wrongNBins + 1, LinearGenerator(0.0, 1.0));
+    Counts y(std::vector<double>(wrongNBins, 5.0));
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_EQUALS(
+        builder.setHistogram(Histogram(x, y)), const std::logic_error &e,
+        std::string(e.what()),
+        "Histogram supplied does not have the correct size.")
+  }
+
+  void test_create_scanning_workspace_with_time_durations() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(0, timeDurations))
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace())
+
+    const auto &detectorInfo = ws->detectorInfo();
+
+    // Now check every detector has every time range set correctly
+    checkTimeRanges(detectorInfo);
+  }
+
+  void test_create_scanning_workspace_fails_if_no_time_ranges_set() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+
+    TS_ASSERT_THROWS_EQUALS(builder.buildWorkspace(), const std::logic_error &e,
+                            std::string(e.what()),
+                            "Can not build workspace - time ranges have not "
+                            "been set. Please call setTimeRanges() before "
+                            "building.")
+  }
+
+  void
+  test_create_scanning_workspace_fails_if_time_ranges_have_the_wrong_dimensions() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    std::vector<std::pair<DateAndTime, DateAndTime>> timeRangesWrongSize = {
+        {0, 1}, {1, 2}};
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_EQUALS(
+        builder.setTimeRanges(std::move(timeRangesWrongSize)),
+        const std::logic_error &e, std::string(e.what()),
+        "Number of start time, end time pairs supplied "
+        "does not match the number of time indexes.")
+  }
+
+  void
+  test_create_scanning_workspace_fails_if_time_durations_have_the_wrong_dimensions() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    std::vector<double> timeDurationsWrongSize = {0, 1e-9};
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_EQUALS(builder.setTimeRanges(0, timeDurationsWrongSize),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Number of time durations supplied does not match "
+                            "the number of time indexes.")
+  }
+
+  void test_creating_workspace_with_positions() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges))
+    initalisePositions(nDetectors, nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setPositions(std::move(positions)))
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace())
+
+    const auto &detectorInfo = ws->detectorInfo();
+
+    for (size_t i = 0; i < nDetectors; ++i) {
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        TS_ASSERT_EQUALS(V3D(double(i), double(j), 1.0),
+                         detectorInfo.position({i, j}))
+      }
+    }
+  }
+
+  void test_creating_workspace_with_positions_with_too_many_detectors() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initalisePositions(nDetectors + 1, nTimeIndexes);
+    TS_ASSERT_THROWS_EQUALS(
+        builder.setPositions(std::move(positions)), const std::logic_error &e,
+        std::string(e.what()),
+        "Number of positions supplied does not match the number of detectors.")
+  }
+
+  void test_creating_workspace_with_positions_with_too_many_time_indexes() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initalisePositions(nDetectors, nTimeIndexes + 1);
+    TS_ASSERT_THROWS_EQUALS(builder.setPositions(std::move(positions)),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Number of positions supplied does not match the "
+                            "number of time indexes.")
+  }
+
+  void test_creating_workspace_with_rotations() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges))
+    initaliseRotations(nDetectors, nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setRotations(std::move(rotations)))
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace())
+
+    const auto &detectorInfo = ws->detectorInfo();
+
+    for (size_t i = 0; i < nDetectors; ++i) {
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        auto quat = Quat(double(i), double(j), 1.0, 2.0);
+        quat.normalize();
+        TS_ASSERT_EQUALS(quat, detectorInfo.rotation({i, j}))
+      }
+    }
+  }
+
+  void test_creating_workspace_with_rotations_with_too_many_detectors() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initaliseRotations(nDetectors + 1, nTimeIndexes);
+    TS_ASSERT_THROWS_EQUALS(
+        builder.setRotations(std::move(rotations)), const std::logic_error &e,
+        std::string(e.what()),
+        "Number of rotations supplied does not match the number of detectors.")
+  }
+
+  void test_creating_workspace_with_rotations_with_too_many_time_indexes() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initaliseRotations(nDetectors, nTimeIndexes + 1);
+    TS_ASSERT_THROWS_EQUALS(builder.setRotations(std::move(rotations)),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Number of rotations supplied does not match the "
+                            "number of time indexes.")
+  }
+
+  void test_creating_workspace_with_relative_rotations() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges))
+    initialiseRelativeRotations(nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setRelativeRotationsForScans(
+        relativeRotations, V3D(0, 0, 0), V3D(0, 1, 0)))
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace())
+
+    const auto &detInfo = ws->detectorInfo();
+
+    for (size_t i = 0; i < nDetectors; ++i) {
+      TS_ASSERT_DELTA(0.0, detInfo.position({i, 0}).X(), 1e-12)
+      TS_ASSERT_DELTA(5.0, detInfo.position({i, 0}).Z(), 1e-12)
+
+      TS_ASSERT_DELTA(2.5, detInfo.position({i, 1}).X(), 1e-12)
+      TS_ASSERT_DELTA(5.0 * sqrt(3) / 2, detInfo.position({i, 1}).Z(), 1e-12)
+
+      TS_ASSERT_DELTA(5.0 * sqrt(3) / 2, detInfo.position({i, 2}).X(), 1e-12)
+      TS_ASSERT_DELTA(2.5, detInfo.position({i, 2}).Z(), 1e-12)
+
+      TS_ASSERT_DELTA(5.0, detInfo.position({i, 3}).X(), 1e-12)
+      TS_ASSERT_DELTA(0.0, detInfo.position({i, 3}).Z(), 1e-12)
+
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        TS_ASSERT_DELTA(double(i) * 0.1, detInfo.position({i, j}).Y(), 1e-12)
+      }
+    }
+
+    for (size_t i = 0; i < nDetectors; ++i) {
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        // Rounding to nearest int required to avoid problem of Euler angles
+        // returning -180/0/180
+        TS_ASSERT_DELTA(0.0, std::lround(detInfo.rotation({i, j})
+                                             .getEulerAngles("XYZ")[0]) %
+                                 180,
+                        1e-12)
+        TS_ASSERT_DELTA(0.0, std::lround(detInfo.rotation({i, j})
+                                             .getEulerAngles("XYZ")[2]) %
+                                 180,
+                        1e-12)
+      }
+
+      TS_ASSERT_DELTA(
+          0.0,
+          std::lround(detInfo.rotation({i, 0}).getEulerAngles("XYZ")[1]) % 180,
+          1e-12)
+      TS_ASSERT_DELTA(30.0, detInfo.rotation({i, 1}).getEulerAngles("XYZ")[1],
+                      1e-12)
+      TS_ASSERT_DELTA(60.0, detInfo.rotation({i, 2}).getEulerAngles("XYZ")[1],
+                      1e-12)
+      TS_ASSERT_DELTA(90.0, detInfo.rotation({i, 3}).getEulerAngles("XYZ")[1],
+                      1e-12)
+    }
+  }
+
+  void test_creating_workspace_with_relative_rotations_and_offset() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges))
+    initialiseRelativeRotations(nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setRelativeRotationsForScans(
+        relativeRotations, V3D(0, 0, 1), V3D(0, 1, 0)))
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace())
+
+    const auto &detInfo = ws->detectorInfo();
+
+    for (size_t i = 0; i < nDetectors; ++i) {
+      TS_ASSERT_DELTA(0.0, detInfo.position({i, 0}).X(), 1e-12)
+      TS_ASSERT_DELTA(5.0, detInfo.position({i, 0}).Z(), 1e-12)
+
+      TS_ASSERT_DELTA(4.0, detInfo.position({i, 3}).X(), 1e-12)
+      TS_ASSERT_DELTA(1.0, detInfo.position({i, 3}).Z(), 1e-12)
+
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        TS_ASSERT_DELTA(double(i) * 0.1, detInfo.position({i, j}).Y(), 1e-12)
+      }
+    }
+  }
+
+  void
+  test_creating_workspace_with_relative_rotations_on_previously_rotated_detectors() {
+
+    const auto &instWS =
+        WorkspaceCreationHelper::create2DWorkspaceWithFullInstrument(
+            int(nDetectors), int(nBins));
+    auto &instDetInfo = instWS->mutableDetectorInfo();
+
+    Quat rotation = Quat(90.0, V3D(0, 0, 1));
+
+    for (size_t i = 0; i < instDetInfo.size(); ++i) {
+      instDetInfo.setRotation(i, rotation);
+    }
+
+    const auto &instrument = instWS->getInstrument();
+    TS_ASSERT(instrument->hasDetectorInfo())
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges))
+    initialiseRelativeRotations(nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setRelativeRotationsForScans(
+        relativeRotations, V3D(0, 0, 1), V3D(0, 1, 0)))
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace())
+
+    const auto &detInfo = ws->detectorInfo();
+
+    for (size_t i = 0; i < nDetectors; ++i) {
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        TS_ASSERT_DELTA(0.0, detInfo.rotation({i, j}).getEulerAngles("YXZ")[1],
+                        1e-12)
+        TS_ASSERT_DELTA(90.0, detInfo.rotation({i, j}).getEulerAngles("YXZ")[2],
+                        1e-12)
+      }
+
+      TS_ASSERT_DELTA(
+          0.0,
+          std::lround(detInfo.rotation({i, 0}).getEulerAngles("XYZ")[1]) % 180,
+          1e-12)
+      TS_ASSERT_DELTA(30.0, detInfo.rotation({i, 1}).getEulerAngles("XYZ")[1],
+                      1e-12)
+      TS_ASSERT_DELTA(60.0, detInfo.rotation({i, 2}).getEulerAngles("XYZ")[1],
+                      1e-12)
+      TS_ASSERT_DELTA(90.0, detInfo.rotation({i, 3}).getEulerAngles("XYZ")[1],
+                      1e-12)
+    }
+  }
+
+  void
+  test_creating_workspace_with_relative_rotations_fails_with_wrong_time_index_size() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initialiseRelativeRotations(nTimeIndexes + 1);
+    TS_ASSERT_THROWS_EQUALS(builder.setRelativeRotationsForScans(
+                                relativeRotations, V3D(0, 0, 0), V3D(0, 1, 0)),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Number of instrument angles supplied does not "
+                            "match the number of time indexes.")
+  }
+
+  void
+  test_creating_workspace_with_positions_fails_with_positions_already_set() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initalisePositions(nDetectors, nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setPositions(std::move(positions)))
+    TS_ASSERT_THROWS_EQUALS(builder.setPositions(std::move(positions)),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Can not set positions, as positions "
+                            "or instrument angles have already been set.")
+  }
+
+  void
+  test_creating_workspace_with_rotations_fails_with_positions_already_set() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initaliseRotations(nDetectors, nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setRotations(std::move(rotations)))
+    TS_ASSERT_THROWS_EQUALS(builder.setRotations(std::move(rotations)),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Can not set rotations, as rotations "
+                            "or instrument angles have already been set.")
+  }
+
+  void
+  test_creating_workspace_with_positions_fails_with_relative_rotations_set() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initialiseRelativeRotations(nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setRelativeRotationsForScans(
+        relativeRotations, V3D(0, 0, 0), V3D(0, 1, 0)))
+    initalisePositions(nDetectors, nTimeIndexes);
+    TS_ASSERT_THROWS_EQUALS(builder.setPositions(std::move(positions)),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Can not set positions, as positions "
+                            "or instrument angles have already been set.")
+  }
+
+  void
+  test_creating_workspace_with_rotations_fails_with_relative_rotations_set() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initialiseRelativeRotations(nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setRelativeRotationsForScans(
+        relativeRotations, V3D(0, 0, 0), V3D(0, 1, 0)))
+    initaliseRotations(nDetectors, nTimeIndexes);
+    TS_ASSERT_THROWS_EQUALS(builder.setRotations(std::move(rotations)),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Can not set rotations, as rotations "
+                            "or instrument angles have already been set.")
+  }
+
+  void
+  test_creating_workspace_with_relative_rotations_fails_with_positions_already_set() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initalisePositions(nDetectors, nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setPositions(std::move(positions)))
+    initialiseRelativeRotations(nTimeIndexes);
+    TS_ASSERT_THROWS_EQUALS(builder.setRelativeRotationsForScans(
+                                relativeRotations, V3D(0, 0, 0), V3D(0, 1, 0)),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Can not set instrument angles, as positions "
+                            "and/or rotations have already been set.")
+  }
+
+  void
+  test_creating_workspace_with_relative_rotations_fails_with_rotations_already_set() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    initaliseRotations(nDetectors, nTimeIndexes);
+    TS_ASSERT_THROWS_NOTHING(builder.setRotations(std::move(rotations)))
+    initialiseRelativeRotations(nTimeIndexes);
+    TS_ASSERT_THROWS_EQUALS(builder.setRelativeRotationsForScans(
+                                relativeRotations, V3D(0, 0, 0), V3D(0, 1, 0)),
+                            const std::logic_error &e, std::string(e.what()),
+                            "Can not set instrument angles, as positions "
+                            "and/or rotations have already been set.")
+  }
+
+  void test_creating_workspace_with_time_oriented_index_info() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges));
+    TS_ASSERT_THROWS_NOTHING(builder.setIndexingType(
+        ScanningWorkspaceBuilder::IndexingType::TimeOriented))
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace());
+
+    const auto &indexInfo = ws->indexInfo();
+    const auto &detectorIDs = ws->detectorInfo().detectorIDs();
+    const auto &spectrumDefinitions = *(indexInfo.spectrumDefinitions());
+    for (size_t i = 0; i < nDetectors; ++i) {
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        const auto index = i * nTimeIndexes + j;
+        TS_ASSERT_EQUALS(spectrumDefinitions[index].size(), 1)
+        TS_ASSERT_EQUALS(spectrumDefinitions[index][0].first, i)
+        TS_ASSERT_EQUALS(spectrumDefinitions[index][0].second, j)
+        TS_ASSERT_EQUALS(detectorIDs[spectrumDefinitions[index][0].first],
+                         i + 1)
+      }
+    }
+  }
+
+  void test_creating_workspace_with_detector_oriented_index_info() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges));
+    TS_ASSERT_THROWS_NOTHING(builder.setIndexingType(
+        ScanningWorkspaceBuilder::IndexingType::DetectorOriented))
+    MatrixWorkspace_const_sptr ws;
+    TS_ASSERT_THROWS_NOTHING(ws = builder.buildWorkspace());
+
+    const auto &indexInfo = ws->indexInfo();
+    const auto &detectorIDs = ws->detectorInfo().detectorIDs();
+    const auto &spectrumDefinitions = *(indexInfo.spectrumDefinitions());
+    for (size_t i = 0; i < nTimeIndexes; ++i) {
+      for (size_t j = 0; j < nDetectors; ++j) {
+        const auto index = i * nDetectors + j;
+        TS_ASSERT_EQUALS(spectrumDefinitions[index].size(), 1)
+        TS_ASSERT_EQUALS(spectrumDefinitions[index][0].first, j)
+        TS_ASSERT_EQUALS(spectrumDefinitions[index][0].second, i)
+        TS_ASSERT_EQUALS(detectorIDs[spectrumDefinitions[index][0].first],
+                         j + 1)
+      }
+    }
+  }
+
+  void test_setting_indexing_type_twice_throws_and_error() {
+    const auto &instrument = createSimpleInstrument(nDetectors, nBins);
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    TS_ASSERT_THROWS_NOTHING(builder.setTimeRanges(timeRanges));
+    TS_ASSERT_THROWS_NOTHING(builder.setIndexingType(
+        ScanningWorkspaceBuilder::IndexingType::DetectorOriented))
+    TS_ASSERT_THROWS_EQUALS(
+        builder.setIndexingType(
+            ScanningWorkspaceBuilder::IndexingType::TimeOriented),
+        const std::logic_error &e, std::string(e.what()),
+        "Indexing type has been set already.")
+  }
+
+private:
+  size_t nDetectors = 5;
+  size_t nTimeIndexes = 4;
+  size_t nBins = 10;
+
+  const std::vector<std::pair<DateAndTime, DateAndTime>> timeRanges = {
+      {0, 1}, {1, 3}, {3, 6}, {6, 10}};
+
+  std::vector<double> timeDurations = {1e-9, 2e-9, 3e-9, 4e-9};
+
+  std::vector<std::vector<V3D>> positions;
+  std::vector<std::vector<Quat>> rotations;
+  std::vector<double> relativeRotations;
+
+  void initalisePositions(size_t nDetectors, size_t nTimeIndexes) {
+    for (size_t i = 0; i < nDetectors; ++i) {
+      std::vector<V3D> timePositions;
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        timePositions.push_back(V3D(double(i), double(j), 1.0));
+      }
+      positions.push_back(timePositions);
+    }
+  }
+
+  void initaliseRotations(size_t nDetectors, size_t nTimeIndexes) {
+    for (size_t i = 0; i < nDetectors; ++i) {
+      std::vector<Quat> timeRotations;
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        timeRotations.push_back(Quat(double(i), double(j), 1.0, 2.0));
+      }
+      rotations.push_back(timeRotations);
+    }
+  }
+
+  void initialiseRelativeRotations(size_t nTimeIndexes) {
+    for (size_t i = 0; i < nTimeIndexes; ++i) {
+      relativeRotations.push_back(double(i) * 30.0);
+    }
+  }
+
+  Instrument_const_sptr createSimpleInstrument(size_t nDetectors,
+                                               size_t nBins) {
+    const auto &wsWithInstrument =
+        WorkspaceCreationHelper::create2DWorkspaceWithFullInstrument(
+            int(nDetectors), int(nBins));
+    return wsWithInstrument->getInstrument();
+  }
+
+  void checkTimeRanges(const DetectorInfo &detectorInfo) {
+    for (size_t i = 0; i < nDetectors; ++i) {
+      for (size_t j = 0; j < nTimeIndexes; ++j) {
+        TS_ASSERT_EQUALS(detectorInfo.scanInterval({i, j}), timeRanges[j]);
+      }
+    }
+  }
+};
+
+class ScanningWorkspaceBuilderTestPerformance : public CxxTest::TestSuite {
+public:
+  void test_large_scanning_workspace() {
+    make_scanning_workspace(1000, 500, 1000);
+  }
+
+  void test_lots_of_small_scanning_workspaces() {
+    for (size_t i = 0; i < 200; ++i)
+      make_scanning_workspace(100, 50, 100);
+  }
+
+  void make_scanning_workspace(size_t nDetectors, size_t nTimeIndexes,
+                               size_t nBins) {
+
+    const auto &instrument = createSimpleInstrument(nDetectors, nTimeIndexes);
+
+    std::vector<std::pair<DateAndTime, DateAndTime>> timeRanges;
+    for (size_t i = 0; i < nTimeIndexes; ++i) {
+      timeRanges.push_back(std::pair<DateAndTime, DateAndTime>(
+          DateAndTime(i * 2), DateAndTime(i * 2 + 1)));
+    }
+
+    auto builder = ScanningWorkspaceBuilder(instrument, nTimeIndexes, nBins);
+    builder.setTimeRanges(timeRanges);
+    MatrixWorkspace_const_sptr ws;
+    ws = builder.buildWorkspace();
+  }
+};
+
+#endif /* MANTID_DATAOBJECTS_SCANNINGWORKSPACEBUILDERTEST_H_ */
diff --git a/Framework/Geometry/inc/MantidGeometry/Crystal/PointGroup.h b/Framework/Geometry/inc/MantidGeometry/Crystal/PointGroup.h
index 936cc621a2047bed32512d355d41e91e61b006d7..2032ca0603b9d75aed9522717e0e0445138f594a 100644
--- a/Framework/Geometry/inc/MantidGeometry/Crystal/PointGroup.h
+++ b/Framework/Geometry/inc/MantidGeometry/Crystal/PointGroup.h
@@ -63,7 +63,7 @@ public:
   Kernel::V3D getReflectionFamily(const Kernel::V3D &hkl) const;
 
 protected:
-  std::vector<Kernel::V3D> getEquivalentSet(const Kernel::V3D &hkl) const;
+  std::vector<Kernel::V3D> getAllEquivalents(const Kernel::V3D &hkl) const;
 
   CrystalSystem getCrystalSystemFromGroup() const;
   LatticeSystem getLatticeSystemFromCrystalSystemAndGroup(
diff --git a/Framework/Geometry/inc/MantidGeometry/Crystal/ReflectionCondition.h b/Framework/Geometry/inc/MantidGeometry/Crystal/ReflectionCondition.h
index 80e26f9a58149275abfd84643adf23d0ecec563e..3eb01e35222b08a2e55dc6298bac78ac20214849 100644
--- a/Framework/Geometry/inc/MantidGeometry/Crystal/ReflectionCondition.h
+++ b/Framework/Geometry/inc/MantidGeometry/Crystal/ReflectionCondition.h
@@ -172,6 +172,12 @@ typedef boost::shared_ptr<ReflectionCondition> ReflectionCondition_sptr;
 
 MANTID_GEOMETRY_DLL std::vector<ReflectionCondition_sptr>
 getAllReflectionConditions();
+MANTID_GEOMETRY_DLL std::vector<std::string> getAllReflectionConditionNames();
+MANTID_GEOMETRY_DLL std::vector<std::string> getAllReflectionConditionSymbols();
+MANTID_GEOMETRY_DLL ReflectionCondition_sptr
+getReflectionConditionByName(const std::string &name);
+MANTID_GEOMETRY_DLL ReflectionCondition_sptr
+getReflectionConditionBySymbol(const std::string &symbol);
 
 } // namespace Mantid
 } // namespace Geometry
diff --git a/Framework/Geometry/src/Crystal/PointGroup.cpp b/Framework/Geometry/src/Crystal/PointGroup.cpp
index cb22e404cbbe09b3b9ebca272ae4152328d567ff..dedfda48c6dd29eabe32f511b86593ab09eb017c 100644
--- a/Framework/Geometry/src/Crystal/PointGroup.cpp
+++ b/Framework/Geometry/src/Crystal/PointGroup.cpp
@@ -31,7 +31,14 @@ using Kernel::IntMatrix;
  * @return :: std::vector containing all equivalent hkls.
  */
 std::vector<V3D> PointGroup::getEquivalents(const V3D &hkl) const {
-  return getEquivalentSet(hkl);
+  auto equivalents = getAllEquivalents(hkl);
+
+  std::sort(equivalents.begin(), equivalents.end(), std::greater<V3D>());
+
+  equivalents.erase(std::unique(equivalents.begin(), equivalents.end()),
+                    equivalents.end());
+
+  return equivalents;
 }
 
 /**
@@ -48,7 +55,9 @@ std::vector<V3D> PointGroup::getEquivalents(const V3D &hkl) const {
  * @return :: hkl specific to a family of index-triplets
  */
 V3D PointGroup::getReflectionFamily(const Kernel::V3D &hkl) const {
-  return *getEquivalentSet(hkl).begin();
+  auto equivalents = getAllEquivalents(hkl);
+
+  return *std::max_element(equivalents.begin(), equivalents.end());
 }
 
 /// Protected constructor - can not be used directly.
@@ -65,9 +74,9 @@ std::string PointGroup::getSymbol() const { return m_symbolHM; }
 
 bool PointGroup::isEquivalent(const Kernel::V3D &hkl,
                               const Kernel::V3D &hkl2) const {
-  std::vector<V3D> hklEquivalents = getEquivalentSet(hkl);
+  auto hklEquivalents = getAllEquivalents(hkl);
 
-  return (std::find(hklEquivalents.begin(), hklEquivalents.end(), hkl2) !=
+  return (std::find(hklEquivalents.cbegin(), hklEquivalents.cend(), hkl2) !=
           hklEquivalents.end());
 }
 
@@ -75,29 +84,24 @@ bool PointGroup::isEquivalent(const Kernel::V3D &hkl,
  * Generates a set of hkls
  *
  * This method applies all transformation matrices to the supplied hkl and puts
- * it into a set, which is returned in the end. Using a set ensures that each
- * hkl occurs once and only once. This set is the set of equivalent hkls,
- * specific to a concrete point group.
+ * them into a vector, which is returned in the end. For special reflections
+ * such as 100 or 110 or 111, the vector may contain duplicates that need to
+ * be filtered out.
  *
  * The symmetry operations need to be set prior to calling this method by a call
  * to PointGroup::setTransformationMatrices.
  *
  * @param hkl :: Arbitrary hkl
- * @return :: set of hkls.
+ * @return :: vector of hkls.
  */
-std::vector<V3D> PointGroup::getEquivalentSet(const Kernel::V3D &hkl) const {
+std::vector<V3D> PointGroup::getAllEquivalents(const Kernel::V3D &hkl) const {
   std::vector<V3D> equivalents;
   equivalents.reserve(m_allOperations.size());
 
   for (const auto &operation : m_allOperations) {
-    equivalents.push_back(operation.transformHKL(hkl));
+    equivalents.emplace_back(operation.transformHKL(hkl));
   }
 
-  std::sort(equivalents.begin(), equivalents.end(), std::greater<V3D>());
-
-  equivalents.erase(std::unique(equivalents.begin(), equivalents.end()),
-                    equivalents.end());
-
   return equivalents;
 }
 
diff --git a/Framework/Geometry/src/Crystal/ReflectionCondition.cpp b/Framework/Geometry/src/Crystal/ReflectionCondition.cpp
index eb9001c92a8c181e0ad316edc59a0a137d3c1037..dc787b9596593929d9a7ce22794c5ef6a1d85cad 100644
--- a/Framework/Geometry/src/Crystal/ReflectionCondition.cpp
+++ b/Framework/Geometry/src/Crystal/ReflectionCondition.cpp
@@ -1,5 +1,6 @@
 #include "MantidGeometry/Crystal/ReflectionCondition.h"
 #include "MantidKernel/System.h"
+#include <algorithm>
 
 namespace Mantid {
 namespace Geometry {
@@ -26,5 +27,82 @@ std::vector<ReflectionCondition_sptr> getAllReflectionConditions() {
   return out;
 }
 
+/// Helper function that transforms all ReflectionConditions to strings.
+std::vector<std::string> transformReflectionConditions(
+    const std::function<std::string(const ReflectionCondition_sptr &)> &fn) {
+  auto conditions = getAllReflectionConditions();
+
+  std::vector<std::string> names;
+  std::transform(conditions.cbegin(), conditions.cend(),
+                 std::back_inserter(names), fn);
+
+  return names;
+}
+
+/// Returns all ReflectionCondition names.
+std::vector<std::string> getAllReflectionConditionNames() {
+  return transformReflectionConditions(
+      [](const ReflectionCondition_sptr &condition) {
+        return condition->getName();
+      });
+}
+
+/// Returns all centering symbols.
+std::vector<std::string> getAllReflectionConditionSymbols() {
+  return transformReflectionConditions(
+      [](const ReflectionCondition_sptr &condition) {
+        return condition->getSymbol();
+      });
+}
+
+/**
+ * @brief Returns a reflection condition according to a filter function
+ *
+ * This small helper function returns a ReflectionCondition_sptr for which
+ * the supplied function returns true. If no ReflectionCondition is found,
+ * an std::invalid_argument exception is thrown. The message of the exception
+ * contains the hint-parameter, which could be string that was used as a
+ * matching criterion to find the ReflectionCondition.
+ *
+ * @param fn :: Unary predicate for matching ReflectionCondition
+ * @param hint :: Hint to include in exception message. Name or symbol.
+ * @return ReflectionCondition for which fn matches.
+ */
+ReflectionCondition_sptr getReflectionConditionWhere(
+    const std::function<bool(const ReflectionCondition_sptr &)> &fn,
+    const std::string &hint) {
+  auto conditions = getAllReflectionConditions();
+
+  auto it = std::find_if(conditions.cbegin(), conditions.cend(), fn);
+
+  if (it == conditions.cend()) {
+    throw std::invalid_argument("No ReflectionCondition found that matches '" +
+                                hint + "'.");
+  }
+
+  return *it;
+}
+
+/// Returns the requested ReflectionCondition, see
+/// getAllReflectionConditionNames for possible names.
+ReflectionCondition_sptr getReflectionConditionByName(const std::string &name) {
+  return getReflectionConditionWhere(
+      [=](const ReflectionCondition_sptr &condition) {
+        return condition->getName() == name;
+      },
+      name);
+}
+
+/// Returns the ReflectionCondition for the specified centering symbol, see
+/// getAllReflectionConditionSymbols for possible symbols.
+ReflectionCondition_sptr
+getReflectionConditionBySymbol(const std::string &symbol) {
+  return getReflectionConditionWhere(
+      [=](const ReflectionCondition_sptr &condition) {
+        return condition->getSymbol() == symbol;
+      },
+      symbol);
+}
+
 } // namespace Mantid
 } // namespace Geometry
diff --git a/Framework/Geometry/test/ReflectionConditionTest.h b/Framework/Geometry/test/ReflectionConditionTest.h
index 58875720487a3e13c50dd7a87cd2d7e6722f9e97..c8841ae0fee41586c4868b1a3a63d6d9acdd669d 100644
--- a/Framework/Geometry/test/ReflectionConditionTest.h
+++ b/Framework/Geometry/test/ReflectionConditionTest.h
@@ -68,6 +68,54 @@ public:
     // All centering symbols are present if the set is empty.
     TS_ASSERT_EQUALS(centeringSymbols.size(), 0);
   }
+
+  void test_getReflectionConditionNames() {
+    auto conditions = getAllReflectionConditions();
+    auto names = getAllReflectionConditionNames();
+
+    TS_ASSERT_EQUALS(conditions.size(), names.size());
+
+    // there should not be any duplicates in the names
+    std::unordered_set<std::string> nameSet(names.begin(), names.end());
+
+    TS_ASSERT_EQUALS(nameSet.size(), names.size())
+  }
+
+  void test_getReflectionConditionSymbols() {
+    auto conditions = getAllReflectionConditions();
+    auto symbols = getAllReflectionConditionSymbols();
+
+    TS_ASSERT_EQUALS(conditions.size(), symbols.size());
+
+    // there should not be any duplicates in the names
+    std::unordered_set<std::string> symbolSet(symbols.begin(), symbols.end());
+
+    TS_ASSERT_EQUALS(symbolSet.size(), symbols.size())
+  }
+
+  void test_getReflectionConditionByName() {
+    auto names = getAllReflectionConditionNames();
+
+    for (auto name : names) {
+      TSM_ASSERT_THROWS_NOTHING("Problem with ReflectionCondition: " + name,
+                                getReflectionConditionByName(name));
+    }
+
+    TS_ASSERT_THROWS(getReflectionConditionByName("invalid"),
+                     std::invalid_argument);
+  }
+
+  void test_getReflectionConditionBySymbol() {
+    auto symbols = getAllReflectionConditionSymbols();
+
+    for (auto symbol : symbols) {
+      TSM_ASSERT_THROWS_NOTHING("Problem with ReflectionCondition: " + symbol,
+                                getReflectionConditionBySymbol(symbol));
+    }
+
+    TS_ASSERT_THROWS(getReflectionConditionBySymbol("Q"),
+                     std::invalid_argument);
+  }
 };
 
 #endif /* MANTID_GEOMETRY_REFLECTIONCONDITIONTEST_H_ */
diff --git a/Framework/Kernel/CMakeLists.txt b/Framework/Kernel/CMakeLists.txt
index 12b62e27fb4911a45ef790318ed8230eb6c0ed8d..e7231730b93505f501c7d9d16d96687018db5f3d 100644
--- a/Framework/Kernel/CMakeLists.txt
+++ b/Framework/Kernel/CMakeLists.txt
@@ -236,6 +236,7 @@ set ( INC_FILES
 	inc/MantidKernel/NDPseudoRandomNumberGenerator.h
 	inc/MantidKernel/NDRandomNumberGenerator.h
 	inc/MantidKernel/NetworkProxy.h
+        inc/MantidKernel/NearestNeighbours.h
 	inc/MantidKernel/NeutronAtom.h
 	inc/MantidKernel/NexusDescriptor.h
 	inc/MantidKernel/NormalDistribution.h
@@ -388,6 +389,7 @@ set ( TEST_FILES
 	MutexTest.h
 	NDPseudoRandomNumberGeneratorTest.h
 	NDRandomNumberGeneratorTest.h
+        NearestNeighboursTest.h
 	NeutronAtomTest.h
 	NexusDescriptorTest.h
 	NormalDistributionTest.h
diff --git a/Framework/Kernel/inc/MantidKernel/Exception.h b/Framework/Kernel/inc/MantidKernel/Exception.h
index 448f840938e5557997b3c7b864e812b7aebf6ff3..369597d468aea5a6c2ec5799c7e3cb76eb7350db 100644
--- a/Framework/Kernel/inc/MantidKernel/Exception.h
+++ b/Framework/Kernel/inc/MantidKernel/Exception.h
@@ -4,9 +4,9 @@
 //----------------------------------------------------------------------
 // Includes
 //----------------------------------------------------------------------
+#include "MantidKernel/DllConfig.h"
 #include <stdexcept>
 #include <string>
-#include "MantidKernel/DllConfig.h"
 
 namespace Mantid {
 namespace Kernel {
@@ -347,6 +347,17 @@ public:
   const int &errorCode() const;
 };
 
+/// Exception thrown when a fitting function changes number of parameters
+/// during fit.
+class MANTID_KERNEL_DLL FitSizeWarning final : public std::exception {
+  std::string m_message;
+
+public:
+  explicit FitSizeWarning(size_t oldSize);
+  FitSizeWarning(size_t oldSize, size_t newSize);
+  const char *what() const noexcept override;
+};
+
 } // namespace Exception
 } // namespace Kernel
 } // namespace Mantid
diff --git a/Framework/Kernel/inc/MantidKernel/NearestNeighbours.h b/Framework/Kernel/inc/MantidKernel/NearestNeighbours.h
new file mode 100644
index 0000000000000000000000000000000000000000..39c823e956eeb1d0bd6a638f957fe78e9e95f1cd
--- /dev/null
+++ b/Framework/Kernel/inc/MantidKernel/NearestNeighbours.h
@@ -0,0 +1,196 @@
+#ifndef MANTID_KERNEL_NEARESTNEIGHBOURS_H_
+#define MANTID_KERNEL_NEARESTNEIGHBOURS_H_
+
+#include "MantidKernel/DllConfig.h"
+#include "MantidKernel/ANN/ANN.h"
+#include "MantidKernel/make_unique.h"
+
+#include <Eigen/Core>
+#include <vector>
+
+/**
+  NearestNeighbours is a thin wrapper class around the ANN library for finding
+  the k nearest neighbours.
+
+  Given a vector of Eigen::Vectors this class will generate a KDTree. The tree
+  can then be interrogated to find the closest k neighbours to a given position.
+
+  This classes is templated with a parameter N which defines the dimensionality
+  of the vector type used. i.e. if N = 3 then Eigen::Vector3d is used.
+
+  @author Samuel Jackson
+  @date 2017
+
+  Copyright &copy; 2016 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+  National Laboratory & European Spallation Source
+
+  This file is part of Mantid.
+
+  Mantid is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  Mantid is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+  File change history is stored at: <https://github.com/mantidproject/mantid>
+  Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+
+namespace Mantid {
+namespace Kernel {
+
+//------------------------------------------------------------------------------
+// Helper classes
+//------------------------------------------------------------------------------
+
+/**
+ * NNDataPoints is a thin RAII wrapper class around the ANNpointArray type. This
+ * takes care of the proper allocation and deallocation of memory.
+ */
+class NNDataPoints {
+public:
+  /** Construct a new set of data points
+   *
+   * @param nPts :: the number of data points
+   * @param nElems :: the number of elements for each point
+   */
+  NNDataPoints(const int nPts, const int nElems) : m_nPts(nPts) {
+    m_data = annAllocPts(m_nPts, nElems);
+  }
+
+  ~NNDataPoints() { annDeallocPts(m_data); }
+
+  /** Return a handle to the raw ANNpointArray wrapped by this class
+   *
+   * @return handle to the raw ANNpointArray
+   */
+  ANNpointArray rawData() { return m_data; }
+
+  /** Access a raw point in the collection of points
+   *
+   * This will check the index used is within bounds and return nullptr if
+   * outside of those bounds
+   *
+   * @param i :: the index of the point to return a handle to
+   * @return handle to a single point in the collection of points
+   */
+  ANNcoord *mutablePoint(const int i) {
+    if (i < m_nPts)
+      return m_data[i];
+    else
+      return nullptr;
+  }
+
+private:
+  /// Number of points stored
+  const int m_nPts;
+  /// Array of points for use with NN search
+  ANNpointArray m_data;
+};
+
+//------------------------------------------------------------------------------
+// NearestNeighbours implementation
+//------------------------------------------------------------------------------
+
+template <size_t N = 3> class DLLExport NearestNeighbours {
+
+public:
+  // typedefs for code brevity
+  typedef Eigen::Matrix<double, N, 1> VectorType;
+  typedef std::vector<std::tuple<VectorType, size_t, double>>
+      NearestNeighbourResults;
+
+  /** Create a nearest neighbour search object
+   *
+   * @param points :: vector of Eigen::Vectors to search through
+   */
+  NearestNeighbours(const std::vector<VectorType> &points) {
+    const auto numPoints = static_cast<int>(points.size());
+    if (numPoints == 0)
+      std::runtime_error(
+          "Need at least one point to initialise NearestNeighbours.");
+
+    m_dataPoints = make_unique<NNDataPoints>(numPoints, static_cast<int>(N));
+
+    for (size_t i = 0; i < points.size(); ++i) {
+      Eigen::Map<VectorType>(m_dataPoints->mutablePoint(static_cast<int>(i)), N,
+                             1) = points[i];
+    }
+    m_kdTree = make_unique<ANNkd_tree>(m_dataPoints->rawData(), numPoints,
+                                       static_cast<int>(N));
+  }
+
+  ~NearestNeighbours() { annClose(); }
+
+  NearestNeighbours(const NearestNeighbours &) = delete;
+
+  /** Find the k nearest neighbours to a given point
+   *
+   * This is a thin wrapper around the ANN library annkSearch method
+   *
+   * @param pos :: the position to find th k nearest neighbours of
+   * @param k :: the number of neighbours to find
+   * @param error :: error term for finding approximate nearest neighbours. if
+   * 	zero then exact neighbours will be found. (default = 0.0).
+   * @return vector neighbours as tuples of (position, index, distance)
+   */
+  NearestNeighbourResults findNearest(const VectorType &pos, const size_t k = 1,
+                                      const double error = 0.0) {
+    const auto numNeighbours = static_cast<int>(k);
+    // create arrays to store the indices & distances of nearest neighbours
+    auto nnIndexList = std::unique_ptr<ANNidx[]>(new ANNidx[numNeighbours]);
+    auto nnDistList = std::unique_ptr<ANNdist[]>(new ANNdist[numNeighbours]);
+
+    // create ANNpoint from Eigen array
+    auto point = std::unique_ptr<ANNcoord[]>(annAllocPt(N));
+    Eigen::Map<VectorType>(point.get(), N, 1) = pos;
+
+    // find the k nearest neighbours
+    m_kdTree->annkSearch(point.get(), numNeighbours, nnIndexList.get(),
+                         nnDistList.get(), error);
+
+    return makeResults(k, std::move(nnIndexList), std::move(nnDistList));
+  }
+
+private:
+  /** Helper function to create a instance of NearestNeighbourResults
+   *
+   * @param k :: the number of neighbours searched for
+   * @param nnIndexList :: the ordered list of indicies matching the closest k
+   *neighbours
+   * @param nnDistList :: the ordered list of distances matching the closest k
+   *neighbours
+   * @return a new NearestNeighbourResults object from the found items
+   */
+  NearestNeighbourResults
+  makeResults(const size_t k, const std::unique_ptr<ANNidx[]> nnIndexList,
+              const std::unique_ptr<ANNdist[]> nnDistList) {
+    NearestNeighbourResults results;
+    results.reserve(k);
+
+    for (size_t i = 0; i < k; ++i) {
+      // create Eigen array from ANNpoint
+      auto pos = m_dataPoints->mutablePoint(nnIndexList[i]);
+      VectorType point = Eigen::Map<VectorType>(pos, N, 1);
+      results.emplace_back(point, nnIndexList[i], nnDistList[i]);
+    }
+
+    return results;
+  }
+
+  /// handle to the list of data points to search through
+  std::unique_ptr<NNDataPoints> m_dataPoints;
+  /// handle to the ANN KD-tree used for searching
+  std::unique_ptr<ANNkd_tree> m_kdTree;
+};
+}
+}
+
+#endif
diff --git a/Framework/Kernel/inc/MantidKernel/Unit.h b/Framework/Kernel/inc/MantidKernel/Unit.h
index 83a5ba8af90b342f9683e2befbaecb0ebf35cef6..a2d8f33fcd064af3a6c4740dfdfef67459824953 100644
--- a/Framework/Kernel/inc/MantidKernel/Unit.h
+++ b/Framework/Kernel/inc/MantidKernel/Unit.h
@@ -408,6 +408,33 @@ protected:
   double factorFrom; ///< Constant factor for from conversion
 };
 
+//=================================================================================================
+/// d-SpacingPerpendicular in Angstrom
+class MANTID_KERNEL_DLL dSpacingPerpendicular : public Unit {
+public:
+  const std::string unitID() const override; ///< "dSpacingPerpendicular"
+  const std::string caption() const override {
+    return "d-SpacingPerpendicular";
+  }
+  const UnitLabel label() const override;
+
+  double singleToTOF(const double x) const override;
+  double singleFromTOF(const double tof) const override;
+  void init() override;
+  Unit *clone() const override;
+  double conversionTOFMin() const override;
+  double conversionTOFMax() const override;
+
+  /// Constructor
+  dSpacingPerpendicular();
+
+protected:
+  double factorTo;   ///< Constant factor for to conversion
+  double sfpTo;      ///< Extra correction factor in to conversion
+  double factorFrom; ///< Constant factor for from conversion
+  double sfpFrom;    ///< Extra correction factor in to conversion
+};
+
 //=================================================================================================
 /// Momentum Transfer in Angstrom^-1
 class MANTID_KERNEL_DLL MomentumTransfer : public Unit {
diff --git a/Framework/Kernel/src/Exception.cpp b/Framework/Kernel/src/Exception.cpp
index 387951f26b3b89cde1c56a04af5f54581869a0cd..ec245c1586f0749463da02d2909ea11072f43b7f 100644
--- a/Framework/Kernel/src/Exception.cpp
+++ b/Framework/Kernel/src/Exception.cpp
@@ -1,5 +1,5 @@
-#include <sstream>
 #include "MantidKernel/Exception.h"
+#include <sstream>
 
 namespace Mantid {
 namespace Kernel {
@@ -353,6 +353,29 @@ const char *InternetError::what() const noexcept { return outMessage.c_str(); }
 */
 const int &InternetError::errorCode() const { return m_errorCode; }
 
+//-------------------------
+// FitSizeError Error class
+//-------------------------
+
+/// Constructor.
+/// @param oldSize :: Old number of free fitting parameters
+FitSizeWarning::FitSizeWarning(size_t oldSize)
+    : std::exception(),
+      m_message(
+          "Number of fitting parameters is different from original value of " +
+          std::to_string(oldSize)) {}
+
+/// Constructor.
+/// @param oldSize :: Old number of free fitting parameters
+/// @param newSize :: New number of free fitting parameters
+FitSizeWarning::FitSizeWarning(size_t oldSize, size_t newSize)
+    : std::exception(),
+      m_message("Number of fitting parameters changed from " +
+                std::to_string(oldSize) + " to " + std::to_string(newSize)) {}
+
+/// Get the warning message.
+const char *FitSizeWarning::what() const noexcept { return m_message.c_str(); }
+
 } // namespace Exception
 } // namespace Kernel
 } // namespace Mantid
diff --git a/Framework/Kernel/src/NexusDescriptor.cpp b/Framework/Kernel/src/NexusDescriptor.cpp
index 3efd1c29011fc55152493066945571888012f503..8aa73127a37efa2ef8cf6e99321c7ae89e82a91c 100644
--- a/Framework/Kernel/src/NexusDescriptor.cpp
+++ b/Framework/Kernel/src/NexusDescriptor.cpp
@@ -1,5 +1,7 @@
 #include "MantidKernel/NexusDescriptor.h"
 
+#include <boost/algorithm/string.hpp>
+
 #include <nexus/NeXusFile.hpp>
 #include <nexus/NeXusException.hpp>
 
@@ -247,7 +249,7 @@ void NexusDescriptor::walkFile(::NeXus::File &file, const std::string &rootPath,
     const std::string &entryClass = it->second;
     const std::string entryPath =
         std::string(rootPath).append("/").append(entryName);
-    if (entryClass == "SDS") {
+    if (entryClass == "SDS" || entryClass == "ILL_data_scan_vars") {
       pmap.emplace(entryPath, entryClass);
     } else if (entryClass == "CDF0.0") {
       // Do nothing with this
diff --git a/Framework/Kernel/src/Unit.cpp b/Framework/Kernel/src/Unit.cpp
index 1e8ee389c90e51e23fa51132005a683858b00442..1146334d2f5aeaae1762e80c48143aff8f1cebde 100644
--- a/Framework/Kernel/src/Unit.cpp
+++ b/Framework/Kernel/src/Unit.cpp
@@ -583,6 +583,61 @@ double dSpacing::conversionTOFMax() const { return DBL_MAX / factorTo; }
 
 Unit *dSpacing::clone() const { return new dSpacing(*this); }
 
+// ==================================================================================================
+/* D-SPACING Perpendicular
+ * ==================================================================================================
+ *
+ * Conversion uses equation: dp^2 = lambda^2 - 2[Angstrom^2]*ln(cos(theta))
+ */
+DECLARE_UNIT(dSpacingPerpendicular)
+
+const UnitLabel dSpacingPerpendicular::label() const {
+  return Symbol::Angstrom;
+}
+
+dSpacingPerpendicular::dSpacingPerpendicular()
+    : Unit(), factorTo(DBL_MIN), factorFrom(DBL_MIN) {}
+
+void dSpacingPerpendicular::init() {
+  factorTo =
+      (PhysicalConstants::NeutronMass * (l1 + l2)) / PhysicalConstants::h;
+
+  // Now adjustments for the scale of units used
+  const double TOFisinMicroseconds = 1e6;
+  const double toAngstroms = 1e10;
+  factorTo *= TOFisinMicroseconds / toAngstroms;
+  factorFrom = factorTo;
+  if (factorFrom == 0.0)
+    factorFrom = DBL_MIN; // Protect against divide by zero
+  double cos_theta = cos(twoTheta / 2.0);
+  sfpTo = 0.0;
+  if (cos_theta > 0)
+    sfpTo = 2.0 * log(cos_theta);
+  sfpFrom = sfpTo;
+}
+
+double dSpacingPerpendicular::singleToTOF(const double x) const {
+  double sqrtarg = x * x + sfpTo;
+  // consider very small values to be a rounding error
+  if (sqrtarg < 1.0e-17)
+    return 0.0;
+  return sqrt(sqrtarg) * factorTo;
+}
+double dSpacingPerpendicular::singleFromTOF(const double tof) const {
+  double temp = tof / factorFrom;
+  return sqrt(temp * temp - sfpFrom);
+}
+double dSpacingPerpendicular::conversionTOFMin() const {
+  return sqrt(-1.0 * sfpFrom);
+}
+double dSpacingPerpendicular::conversionTOFMax() const {
+  return sqrt(std::numeric_limits<double>::max()) / factorFrom;
+}
+
+Unit *dSpacingPerpendicular::clone() const {
+  return new dSpacingPerpendicular(*this);
+}
+
 // ================================================================================
 /* MOMENTUM TRANSFER
  * ================================================================================
diff --git a/Framework/Kernel/test/NearestNeighboursTest.h b/Framework/Kernel/test/NearestNeighboursTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..53f453aa126da20eee9bceeb3bfe5a3edcf69704
--- /dev/null
+++ b/Framework/Kernel/test/NearestNeighboursTest.h
@@ -0,0 +1,65 @@
+#ifndef MANTID_KERNEL_NEARESTNEIGHBOURSTEST_H_
+#define MANTID_KERNEL_NEARESTNEIGHBOURSTEST_H_
+
+#include <cxxtest/TestSuite.h>
+#include "MantidKernel/NearestNeighbours.h"
+
+using Mantid::Kernel::NearestNeighbours;
+using namespace Eigen;
+
+class NearestNeighboursTest : public CxxTest::TestSuite {
+public:
+  NearestNeighboursTest() {}
+
+  void test_construct() {
+    std::vector<Vector3d> pts1 = {Vector3d(1, 1, 1), Vector3d(2, 2, 2)};
+    TS_ASSERT_THROWS_NOTHING(NearestNeighbours<3> nn(pts1));
+
+    std::vector<Vector2d> pts2 = {Vector2d(1, 1), Vector2d(2, 2)};
+    TS_ASSERT_THROWS_NOTHING(NearestNeighbours<2> nn(pts2));
+  }
+
+  void test_find_nearest() {
+    std::vector<Eigen::Vector3d> pts = {Vector3d(1, 1, 1), Vector3d(2, 2, 2)};
+    NearestNeighbours<3> nn(pts);
+
+    auto results = nn.findNearest(Vector3d(1, 1, 0.9));
+    TS_ASSERT_EQUALS(results.size(), 1)
+
+    Eigen::Vector3d pos = std::get<0>(results[0]);
+    auto index = std::get<1>(results[0]);
+    auto dist = std::get<2>(results[0]);
+    TS_ASSERT_EQUALS(pos[0], 1)
+    TS_ASSERT_EQUALS(pos[1], 1)
+    TS_ASSERT_EQUALS(pos[2], 1)
+    TS_ASSERT_EQUALS(index, 0)
+    TS_ASSERT_DELTA(dist, 0, 0.01)
+  }
+
+  void test_find_nearest_2() {
+    std::vector<Eigen::Vector2d> pts = {Vector2d(1, 1), Vector2d(2, 2),
+                                        Vector2d(2, 3)};
+    NearestNeighbours<2> nn(pts);
+
+    auto results = nn.findNearest(Vector2d(1, 0.9), 2);
+    TS_ASSERT_EQUALS(results.size(), 2)
+
+    Eigen::Vector2d pos = std::get<0>(results[0]);
+    auto index = std::get<1>(results[0]);
+    auto dist = std::get<2>(results[0]);
+    TS_ASSERT_EQUALS(pos[0], 1)
+    TS_ASSERT_EQUALS(pos[1], 1)
+    TS_ASSERT_EQUALS(index, 0)
+    TS_ASSERT_DELTA(dist, 0, 0.01)
+
+    pos = std::get<0>(results[1]);
+    index = std::get<1>(results[1]);
+    dist = std::get<2>(results[1]);
+    TS_ASSERT_EQUALS(pos[0], 2)
+    TS_ASSERT_EQUALS(pos[1], 2)
+    TS_ASSERT_EQUALS(index, 1)
+    TS_ASSERT_DELTA(dist, 2.21, 0.01)
+  }
+};
+
+#endif
diff --git a/Framework/Kernel/test/UnitTest.h b/Framework/Kernel/test/UnitTest.h
index 97be03c941fad28fd34c5b9fa3571603e262e478..0e6aad1d046e4e5b736adbb17b8d0e2055c16bd1 100644
--- a/Framework/Kernel/test/UnitTest.h
+++ b/Framework/Kernel/test/UnitTest.h
@@ -217,6 +217,9 @@ public:
     unit = dSpacing().clone();
     TS_ASSERT(dynamic_cast<dSpacing *>(unit));
     delete unit;
+    unit = dSpacingPerpendicular().clone();
+    TS_ASSERT(dynamic_cast<dSpacingPerpendicular *>(unit));
+    delete unit;
     unit = MomentumTransfer().clone();
     TS_ASSERT(dynamic_cast<MomentumTransfer *>(unit));
     delete unit;
@@ -592,6 +595,66 @@ public:
     }
   }
 
+  //----------------------------------------------------------------------
+  // d-SpacingPerpebdicular tests
+  //----------------------------------------------------------------------
+
+  void testdSpacingPerpendicular_unitID() {
+    TS_ASSERT_EQUALS(dp.unitID(), "dSpacingPerpendicular")
+  }
+
+  void testdSpacingPerpendicular_caption() {
+    TS_ASSERT_EQUALS(dp.caption(), "d-SpacingPerpendicular")
+  }
+
+  void testdSpacingPerpendicular_label() {
+    TS_ASSERT_EQUALS(dp.label().ascii(), "Angstrom")
+    TS_ASSERT_EQUALS(dp.label().utf8(), L"\u212b")
+  }
+
+  void testdSpacingPerpendicular_cast() {
+    Unit *u = NULL;
+    TS_ASSERT_THROWS_NOTHING(u = dynamic_cast<Unit *>(&dp));
+    TS_ASSERT_EQUALS(u->unitID(), "dSpacingPerpendicular");
+  }
+
+  void testdSpacingPerpendicular_toTOF() {
+    std::vector<double> x(1, 1.0), y(1, 1.0);
+    std::vector<double> yy = y;
+    TS_ASSERT_THROWS_NOTHING(dp.toTOF(x, y, 1.0, 1.0, 1.0, 1, 1.0, 1.0))
+    TS_ASSERT_DELTA(x[0], 434.5529, 0.0001)
+    TS_ASSERT(yy == y)
+  }
+
+  void testdSpacingPerpendicular_fromTOF() {
+    std::vector<double> x(1, 1001.1), y(1, 1.0);
+    std::vector<double> yy = y;
+    TS_ASSERT_THROWS_NOTHING(dp.fromTOF(x, y, 1.0, 1.0, 1.0, 1, 1.0, 1.0))
+    TS_ASSERT_DELTA(x[0], 2.045075, 0.000001)
+    TS_ASSERT(yy == y)
+  }
+
+  void testdSpacingPerpendicularRange() {
+    std::vector<double> sample, rezult;
+
+    std::string err_mess = convert_units_check_range(dp, sample, rezult);
+    TSM_ASSERT(" ERROR:" + err_mess, err_mess.size() == 0);
+
+    for (size_t i = 0; i < sample.size(); i++) {
+      if (std::fabs(sample[i]) < 10 * FLT_EPSILON) {
+        TSM_ASSERT_DELTA(
+            "d-spacingPerpendicular limits Failed for conversion N: " +
+                boost::lexical_cast<std::string>(i),
+            sample[i], rezult[i], 10 * FLT_EPSILON);
+      } else {
+        TSM_ASSERT_DELTA(
+            "d-spacingPerpendicular limits Failed for conversion N: " +
+                boost::lexical_cast<std::string>(i),
+            rezult[i] / sample[i], 1., 10 * FLT_EPSILON);
+      }
+    }
+  }
+
   //----------------------------------------------------------------------
   // Momentum Transfer tests
   //----------------------------------------------------------------------
@@ -1334,6 +1397,7 @@ private:
   Units::Energy energy;
   Units::Energy_inWavenumber energyk;
   Units::dSpacing d;
+  Units::dSpacingPerpendicular dp;
   Units::MomentumTransfer q;
   Units::QSquared q2;
   Units::DeltaE dE;
diff --git a/Framework/MDAlgorithms/CMakeLists.txt b/Framework/MDAlgorithms/CMakeLists.txt
index 248281fdc21459691ad4e02a54486bcec7e2924d..0219f3a29207a4a1ed464a35395e888780d6e481 100644
--- a/Framework/MDAlgorithms/CMakeLists.txt
+++ b/Framework/MDAlgorithms/CMakeLists.txt
@@ -55,6 +55,7 @@ set ( SRC_FILES
     src/ImportMDHistoWorkspaceBase.cpp
     src/Integrate3DEvents.cpp
     src/IntegrateEllipsoids.cpp
+    src/IntegrateEllipsoidsTwoStep.cpp
     src/IntegrateFlux.cpp
     src/IntegrateMDHistoWorkspace.cpp
     src/IntegratePeaksMD.cpp
@@ -185,6 +186,7 @@ set ( INC_FILES
     inc/MantidMDAlgorithms/ImportMDHistoWorkspaceBase.h
     inc/MantidMDAlgorithms/Integrate3DEvents.h
     inc/MantidMDAlgorithms/IntegrateEllipsoids.h
+    inc/MantidMDAlgorithms/IntegrateEllipsoidsTwoStep.h
     inc/MantidMDAlgorithms/IntegrateFlux.h
     inc/MantidMDAlgorithms/IntegrateMDHistoWorkspace.h
     inc/MantidMDAlgorithms/IntegratePeaksMD.h
@@ -315,6 +317,7 @@ set ( TEST_FILES
     ImportMDHistoWorkspaceTest.h
     Integrate3DEventsTest.h
     IntegrateEllipsoidsTest.h
+    IntegrateEllipsoidsTwoStepTest.h
     IntegrateFluxTest.h
     IntegrateMDHistoWorkspaceTest.h
     IntegratePeaksMD2Test.h
diff --git a/Framework/MDAlgorithms/inc/MantidMDAlgorithms/Integrate3DEvents.h b/Framework/MDAlgorithms/inc/MantidMDAlgorithms/Integrate3DEvents.h
index c89e1bc245d777392672b8b516fb8d51322790e6..4669451844eb172ef2b6802b7da58b024c889c7f 100644
--- a/Framework/MDAlgorithms/inc/MantidMDAlgorithms/Integrate3DEvents.h
+++ b/Framework/MDAlgorithms/inc/MantidMDAlgorithms/Integrate3DEvents.h
@@ -1,12 +1,17 @@
 #ifndef INTEGRATE_3D_EVENTS_H
 #define INTEGRATE_3D_EVENTS_H
 
-#include <vector>
-#include <boost/shared_ptr.hpp>
-#include <unordered_map>
+#include "MantidDataObjects/Peak.h"
+#include "MantidDataObjects/PeakShapeEllipsoid.h"
 #include "MantidKernel/V3D.h"
 #include "MantidKernel/Matrix.h"
 
+#include <boost/shared_ptr.hpp>
+
+#include <tuple>
+#include <unordered_map>
+#include <vector>
+
 namespace Mantid {
 namespace Geometry {
 class PeakShape;
@@ -16,6 +21,15 @@ class PeakShapeEllipsoid;
 }
 namespace MDAlgorithms {
 
+struct IntegrationParameters {
+  std::vector<Kernel::V3D> E1Vectors;
+  double backgroundInnerRadius;
+  double backgroundOuterRadius;
+  double regionRadius;
+  double peakRadius;
+  bool specifySize;
+};
+
 /**
     @class Integrate3DEvents
 
@@ -76,7 +90,34 @@ public:
       double back_outer_radius, std::vector<double> &axes_radii, double &inti,
       double &sigi);
 
+  /// Find the net integrated intensity of a peak, using ellipsoidal volumes
+  std::pair<boost::shared_ptr<const Mantid::Geometry::PeakShape>,
+            std::tuple<double, double, double>>
+  integrateStrongPeak(const IntegrationParameters &params,
+                      const Kernel::V3D &peak_q, double &inti, double &sigi);
+
+  boost::shared_ptr<const Geometry::PeakShape>
+  integrateWeakPeak(const IntegrationParameters &params,
+                    Mantid::DataObjects::PeakShapeEllipsoid_const_sptr shape,
+                    const std::tuple<double, double, double> &libPeak,
+                    const Mantid::Kernel::V3D &peak_q, double &inti,
+                    double &sigi);
+
+  double estimateSignalToNoiseRatio(const IntegrationParameters &params,
+                                    const Mantid::Kernel::V3D &center);
+
 private:
+  /// Get a list of events for a given Q
+  boost::optional<const std::vector<std::pair<double, Mantid::Kernel::V3D>> &>
+  getEvents(const Mantid::Kernel::V3D &peak_q);
+
+  bool correctForDetectorEdges(std::tuple<double, double, double> &radii,
+                               const std::vector<Mantid::Kernel::V3D> &E1Vecs,
+                               const Mantid::Kernel::V3D &peak_q,
+                               const std::vector<double> &axesRadii,
+                               const std::vector<double> &bkgInnerRadii,
+                               const std::vector<double> &bkgOuterRadii);
+
   /// Calculate the number of events in an ellipsoid centered at 0,0,0
   static double numInEllipsoid(
       std::vector<std::pair<double, Mantid::Kernel::V3D>> const &events,
@@ -122,9 +163,18 @@ private:
       std::vector<double> const &sigmas, bool specify_size, double peak_radius,
       double back_inner_radius, double back_outer_radius,
       std::vector<double> &axes_radii, double &inti, double &sigi);
+
+  /// Compute if a particular Q falls on the edge of a detector
   double detectorQ(std::vector<Kernel::V3D> E1Vec,
-                   const Mantid::Kernel::V3D QLabFrame, std::vector<double> &r);
+                   const Mantid::Kernel::V3D QLabFrame,
+                   const std::vector<double> &r);
+
+  std::tuple<double, double, double>
+  calculateRadiusFactors(const IntegrationParameters &params,
+                         double max_sigma) const;
+
   // Private data members
+
   PeakQMap m_peak_qs;         // hashtable with peak Q-vectors
   EventListMap m_event_lists; // hashtable with lists of events for each peak
   Kernel::DblMatrix m_UBinv;  // matrix mapping from Q to h,k,l
diff --git a/Framework/MDAlgorithms/inc/MantidMDAlgorithms/IntegrateEllipsoidsTwoStep.h b/Framework/MDAlgorithms/inc/MantidMDAlgorithms/IntegrateEllipsoidsTwoStep.h
new file mode 100644
index 0000000000000000000000000000000000000000..23b7c18a4b1bfc5aa3ac2adc72e20f12c0b7b34f
--- /dev/null
+++ b/Framework/MDAlgorithms/inc/MantidMDAlgorithms/IntegrateEllipsoidsTwoStep.h
@@ -0,0 +1,84 @@
+#ifndef MANTID_MDALGORITHMS_INTEGRATE_ELLIPSOIDS_TWO_STEP_H_
+#define MANTID_MDALGORITHMS_INTEGRATE_ELLIPSOIDS_TWO_STEP_H_
+
+#include "MantidAPI/Algorithm.h"
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidAPI/Progress.h"
+#include "MantidKernel/Matrix.h"
+#include "MantidDataObjects/EventWorkspace.h"
+#include "MantidDataObjects/PeaksWorkspace.h"
+#include "MantidDataObjects/Workspace2D.h"
+#include "MantidMDAlgorithms/Integrate3DEvents.h"
+
+namespace Mantid {
+namespace MDAlgorithms {
+
+/** @class IntegrateEllipsoidsTwoStep
+
+  IntegrateEllipsoidsTwoStep provides a two pass peak integration algorithm.
+
+Copyright &copy; 2011-14 ISIS Rutherford Appleton Laboratory, NScD Oak Ridge
+National Laboratory & European Spallation Source
+
+This file is part of Mantid.
+
+Mantid is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+Mantid is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+File change history is stored at: <https://github.com/mantidproject/mantid>.
+Code Documentation is available at: <http://doxygen.mantidproject.org>
+*/
+
+class DLLExport IntegrateEllipsoidsTwoStep : public API::Algorithm {
+public:
+  /// Get the name of this algorithm
+  const std::string name() const override;
+  /// Get the version of this algorithm
+  int version() const override;
+  /// Get the category of this algorithm
+  const std::string category() const override;
+  /// Summary of algorithms purpose
+  const std::string summary() const override {
+    return "Integrate Single Crystal Diffraction Bragg peaks using 3D "
+           "ellipsoids.";
+  }
+
+private:
+  void init() override;
+  void exec() override;
+  IntegrationParameters
+  makeIntegrationParameters(const Kernel::V3D &peak_q) const;
+
+  void qListFromHistoWS(Integrate3DEvents &integrator, API::Progress &prog,
+                        DataObjects::Workspace2D_sptr &wksp,
+                        const Kernel::DblMatrix &UBinv, bool hkl_integ);
+  void qListFromEventWS(Integrate3DEvents &integrator, API::Progress &prog,
+                        DataObjects::EventWorkspace_sptr &wksp,
+                        const Kernel::DblMatrix &UBinv, bool hkl_integ);
+  /// Calculate if this Q is on a detector
+  void calculateE1(const API::DetectorInfo &detectorInfo);
+  void runMaskDetectors(Mantid::DataObjects::PeaksWorkspace_sptr peakWS,
+                        std::string property, std::string values);
+
+  /// integrate a collection of strong peaks
+  DataObjects::PeaksWorkspace_sptr
+  integratePeaks(DataObjects::PeaksWorkspace_sptr peaks,
+                 API::MatrixWorkspace_sptr ws);
+  /// save all detector pixels
+  std::vector<Kernel::V3D> E1Vec;
+};
+
+} // namespace MDAlgorithms
+} // namespace Mantid
+
+#endif /* MANTID_MDALGORITHMS_INTEGRATE_ELLIPSOIDS_TWO_STEP_H_ */
diff --git a/Framework/MDAlgorithms/inc/MantidMDAlgorithms/IntegratePeaksCWSD.h b/Framework/MDAlgorithms/inc/MantidMDAlgorithms/IntegratePeaksCWSD.h
index 138d93afc4c94e982cb421a1a6c972cfc6765d79..04360a2a6c4aaa783ce10adaf2cbb5446591945d 100644
--- a/Framework/MDAlgorithms/inc/MantidMDAlgorithms/IntegratePeaksCWSD.h
+++ b/Framework/MDAlgorithms/inc/MantidMDAlgorithms/IntegratePeaksCWSD.h
@@ -67,6 +67,9 @@ private:
 
   void mergePeaks();
 
+  /// Implement this method to normalize the intensity of each Pt.
+  void normalizePeaksIntensities();
+
   DataObjects::PeaksWorkspace_sptr
   createPeakworkspace(Kernel::V3D peakCenter, API::IMDEventWorkspace_sptr mdws);
 
diff --git a/Framework/MDAlgorithms/src/ConvertCWSDExpToMomentum.cpp b/Framework/MDAlgorithms/src/ConvertCWSDExpToMomentum.cpp
index 1d9c5ce5367b477928bef82b85cfc30b965825e4..99cf13f2d04bac443735b49f4e4c380e489332a7 100644
--- a/Framework/MDAlgorithms/src/ConvertCWSDExpToMomentum.cpp
+++ b/Framework/MDAlgorithms/src/ConvertCWSDExpToMomentum.cpp
@@ -612,21 +612,21 @@ ConvertCWSDExpToMomentum::loadSpiceData(const std::string &filename,
     IAlgorithm_sptr loader = createChildAlgorithm("LoadSpiceXML2DDet");
     loader->initialize();
     loader->setProperty("Filename", filename);
-    std::vector<size_t> sizelist(2);
-    sizelist[0] = 256;
-    sizelist[1] = 256;
-    loader->setProperty("DetectorGeometry", sizelist);
+    // std::vector<size_t> sizelist(2);
+    // sizelist[0] = 256;
+    // sizelist[1] = 256;
+    // loader->setProperty("DetectorGeometry", sizelist);
     loader->setProperty("LoadInstrument", true);
     loader->setProperty("ShiftedDetectorDistance", m_detSampleDistanceShift);
     loader->setProperty("DetectorCenterXShift", m_detXShift);
     loader->setProperty("DetectorCenterYShift", m_detYShift);
 
     // TODO/FIXME - This is not a nice solution for detector geometry
-    std::string idffile = getPropertyValue("InstrumentFilename");
-    if (idffile.size() > 0) {
-      loader->setProperty("InstrumentFilename", idffile);
-      loader->setProperty("DetectorGeometry", "512, 512");
-    }
+    // std::string idffile = getPropertyValue("InstrumentFilename");
+    // if (idffile.size() > 0) {
+    //   loader->setProperty("InstrumentFilename", idffile);
+    //   loader->setProperty("DetectorGeometry", "512, 512");
+    // }
 
     double wavelength = getProperty("UserDefinedWavelength");
 
diff --git a/Framework/MDAlgorithms/src/Integrate3DEvents.cpp b/Framework/MDAlgorithms/src/Integrate3DEvents.cpp
index 0169c85c26249b18b2e4fc9f1a7e2ef3270b9b70..1ca503c063f054551766ced19daab854b3232440 100644
--- a/Framework/MDAlgorithms/src/Integrate3DEvents.cpp
+++ b/Framework/MDAlgorithms/src/Integrate3DEvents.cpp
@@ -1,10 +1,13 @@
 #include "MantidMDAlgorithms/Integrate3DEvents.h"
 #include "MantidDataObjects/NoShape.h"
 #include "MantidDataObjects/PeakShapeEllipsoid.h"
+
 #include <boost/make_shared.hpp>
 #include <boost/math/special_functions/round.hpp>
 #include <cmath>
 #include <fstream>
+#include <tuple>
+#include <numeric>
 
 extern "C" {
 #include <cstdio>
@@ -69,6 +72,266 @@ void Integrate3DEvents::addEvents(
   }
 }
 
+std::pair<boost::shared_ptr<const Geometry::PeakShape>,
+          std::tuple<double, double, double>>
+Integrate3DEvents::integrateStrongPeak(const IntegrationParameters &params,
+                                       const V3D &peak_q, double &inti,
+                                       double &sigi) {
+
+  inti = 0.0; // default values, in case something
+  sigi = 0.0; // is wrong with the peak.
+  auto result = getEvents(peak_q);
+  if (!result)
+    return std::make_pair(boost::make_shared<NoShape>(),
+                          make_tuple(0., 0., 0.));
+
+  const auto &events = result.get();
+  if (events.empty())
+    return std::make_pair(boost::make_shared<NoShape>(),
+                          make_tuple(0., 0., 0.));
+
+  DblMatrix cov_matrix(3, 3);
+  makeCovarianceMatrix(events, cov_matrix, params.regionRadius);
+
+  std::vector<V3D> eigen_vectors;
+  getEigenVectors(cov_matrix, eigen_vectors);
+
+  std::vector<double> sigmas;
+  for (int i = 0; i < 3; i++) {
+    sigmas.push_back(stdDev(events, eigen_vectors[i], params.regionRadius));
+  }
+
+  bool invalid_peak =
+      std::any_of(sigmas.cbegin(), sigmas.cend(), [](const double sigma) {
+        return std::isnan(sigma) || sigma <= 0;
+      });
+
+  if (invalid_peak)
+    return std::make_pair(boost::make_shared<NoShape>(),
+                          make_tuple(0., 0., 0.));
+
+  const auto max_sigma = *std::max_element(sigmas.begin(), sigmas.end());
+  if (max_sigma == 0)
+    return std::make_pair(boost::make_shared<NoShape>(),
+                          make_tuple(0., 0., 0.));
+
+  auto rValues = calculateRadiusFactors(params, max_sigma);
+  auto &r1 = std::get<0>(rValues), r2 = std::get<1>(rValues),
+       r3 = std::get<2>(rValues);
+
+  std::vector<double> abcBackgroundOuterRadii, abcBackgroundInnerRadii;
+  std::vector<double> peakRadii;
+  for (int i = 0; i < 3; i++) {
+    abcBackgroundOuterRadii.push_back(r3 * sigmas[i]);
+    abcBackgroundInnerRadii.push_back(r2 * sigmas[i]);
+    peakRadii.push_back(r1 * sigmas[i]);
+  }
+
+  const auto isPeakOnDetector =
+      correctForDetectorEdges(rValues, params.E1Vectors, peak_q, peakRadii,
+                              abcBackgroundInnerRadii, abcBackgroundOuterRadii);
+
+  if (!isPeakOnDetector)
+    return std::make_pair(boost::make_shared<NoShape>(),
+                          make_tuple(0.0, 0.0, 0.));
+
+  const auto backgrd = numInEllipsoidBkg(
+      events, eigen_vectors, abcBackgroundOuterRadii, abcBackgroundInnerRadii);
+  const auto core = numInEllipsoid(events, eigen_vectors, sigmas);
+  const auto peak = numInEllipsoid(events, eigen_vectors, peakRadii);
+  const auto ratio = pow(r1, 3) / (pow(r3, 3) - pow(r2, 3));
+
+  inti = peak - ratio * backgrd;
+  sigi = sqrt(peak + ratio * ratio * backgrd);
+
+  if (inti < 0) {
+    inti = 0;
+    sigi = 0;
+  }
+
+  // compute the fraction of peak within the standard core
+  const auto total = (core + peak) - ratio * backgrd;
+  const auto frac = std::min(1.0, std::abs(inti / total));
+  // compute the uncertainty in the fraction
+  const auto df_ds_core = (1 - frac) / peak;
+  const auto df_ds_peak = frac / peak;
+  const auto fracError =
+      sqrt(peak * pow(df_ds_core, 2) + core * pow(df_ds_peak, 2));
+
+  // create the peaks shape for the strong peak
+  const auto shape = boost::make_shared<const PeakShapeEllipsoid>(
+      eigen_vectors, peakRadii, abcBackgroundInnerRadii,
+      abcBackgroundOuterRadii, Mantid::Kernel::QLab,
+      "IntegrateEllipsoidsTwoStep");
+
+  return std::make_pair(shape, std::make_tuple(frac, fracError, max_sigma));
+}
+
+boost::shared_ptr<const Geometry::PeakShape>
+Integrate3DEvents::integrateWeakPeak(
+    const IntegrationParameters &params, PeakShapeEllipsoid_const_sptr shape,
+    const std::tuple<double, double, double> &libPeak, const V3D &center,
+    double &inti, double &sigi) {
+
+  inti = 0.0; // default values, in case something
+  sigi = 0.0; // is wrong with the peak.
+
+  auto result = getEvents(center);
+  if (!result)
+    return boost::make_shared<NoShape>();
+
+  const auto &events = result.get();
+
+  const auto &directions = shape->directions();
+  const auto &abcBackgroundInnerRadii = shape->abcRadiiBackgroundInner();
+  const auto &abcBackgroundOuterRadii = shape->abcRadiiBackgroundOuter();
+  const auto &abcRadii = shape->abcRadii();
+
+  const auto max_sigma = std::get<2>(libPeak);
+  auto rValues = calculateRadiusFactors(params, max_sigma);
+
+  const auto isPeakOnDetector =
+      correctForDetectorEdges(rValues, params.E1Vectors, center, abcRadii,
+                              abcBackgroundInnerRadii, abcBackgroundOuterRadii);
+
+  if (!isPeakOnDetector)
+    return shape;
+
+  const double r1 = std::get<0>(rValues), r2 = std::get<1>(rValues),
+               r3 = std::get<2>(rValues);
+
+  // integrate
+  double backgrd = numInEllipsoidBkg(
+      events, directions, abcBackgroundOuterRadii, abcBackgroundInnerRadii);
+  double peak_w_back = numInEllipsoid(events, directions, abcRadii);
+  double ratio = pow(r1, 3) / (pow(r3, 3) - pow(r2, 3));
+
+  const auto frac = std::get<0>(libPeak);
+  const auto fracError = std::get<1>(libPeak);
+
+  inti = peak_w_back - ratio * backgrd;
+  sigi = inti + ratio * ratio * backgrd;
+
+  // correct for fractional intensity
+  sigi = sigi / pow(inti, 2);
+  sigi += pow((fracError / frac), 2);
+
+  inti = inti * frac;
+  sigi = sqrt(sigi) * inti;
+
+  if (inti < 0) {
+    inti = 0;
+    sigi = 0;
+  }
+
+  return shape;
+}
+
+double Integrate3DEvents::estimateSignalToNoiseRatio(
+    const IntegrationParameters &params, const V3D &center) {
+
+  auto result = getEvents(center);
+  if (!result)
+    return .0;
+
+  const auto &events = result.get();
+  if (events.empty())
+    return .0;
+
+  DblMatrix cov_matrix(3, 3);
+  makeCovarianceMatrix(events, cov_matrix, params.regionRadius);
+
+  std::vector<V3D> eigen_vectors;
+  getEigenVectors(cov_matrix, eigen_vectors);
+
+  std::vector<double> sigmas;
+  for (int i = 0; i < 3; i++) {
+    sigmas.push_back(stdDev(events, eigen_vectors[i], params.regionRadius));
+  }
+
+  const auto max_sigma = *std::max_element(sigmas.begin(), sigmas.end());
+  if (max_sigma == 0)
+    return .0;
+
+  auto rValues = calculateRadiusFactors(params, max_sigma);
+  auto &r1 = std::get<0>(rValues), r2 = std::get<1>(rValues),
+       r3 = std::get<2>(rValues);
+
+  std::vector<double> abcBackgroundOuterRadii, abcBackgroundInnerRadii;
+  std::vector<double> peakRadii;
+  for (int i = 0; i < 3; i++) {
+    abcBackgroundOuterRadii.push_back(r3 * sigmas[i]);
+    abcBackgroundInnerRadii.push_back(r2 * sigmas[i]);
+    peakRadii.push_back(r1 * sigmas[i]);
+  }
+
+  // Background / Peak / Background
+  double backgrd = numInEllipsoidBkg(
+      events, eigen_vectors, abcBackgroundOuterRadii, abcBackgroundInnerRadii);
+
+  double peak_w_back = numInEllipsoid(events, eigen_vectors, peakRadii);
+
+  double ratio = pow(r1, 3) / (pow(r3, 3) - pow(r2, 3));
+  double inti = peak_w_back - ratio * backgrd;
+
+  return inti / std::max(1.0, (ratio * backgrd));
+}
+
+boost::optional<const std::vector<std::pair<double, V3D>> &>
+Integrate3DEvents::getEvents(const V3D &peak_q) {
+  const auto hkl_key = getHklKey(peak_q);
+
+  if (hkl_key == 0)
+    return boost::optional<const std::vector<std::pair<double, V3D>> &>();
+
+  const auto pos = m_event_lists.find(hkl_key);
+  using EventListType = const decltype(pos->second) &;
+
+  if (m_event_lists.end() == pos)
+    return boost::optional<EventListType>();
+
+  if (pos->second.size() < 3) // if there are not enough events
+    return boost::optional<EventListType>();
+
+  return boost::make_optional<EventListType>(pos->second);
+}
+
+bool Integrate3DEvents::correctForDetectorEdges(
+    std::tuple<double, double, double> &radii, const std::vector<V3D> &E1Vecs,
+    const V3D &peak_q, const std::vector<double> &axesRadii,
+    const std::vector<double> &bkgInnerRadii,
+    const std::vector<double> &bkgOuterRadii) {
+
+  if (E1Vecs.empty())
+    return true;
+
+  auto &r1 = std::get<0>(radii), r2 = std::get<1>(radii),
+       r3 = std::get<2>(radii);
+  auto h3 = 1.0 - detectorQ(E1Vecs, peak_q, bkgOuterRadii);
+  // scaled from area of circle minus segment when r normalized to 1
+  auto m3 = std::sqrt(
+      1.0 -
+      (std::acos(1.0 - h3) - (1.0 - h3) * std::sqrt(2.0 * h3 - h3 * h3)) /
+          M_PI);
+  auto h1 = 1.0 - detectorQ(E1Vecs, peak_q, axesRadii);
+  // Do not use peak if edge of detector is inside integration radius
+  if (h1 > 0.0)
+    return false;
+
+  r3 *= m3;
+  if (r2 != r1) {
+    auto h2 = 1.0 - detectorQ(E1Vecs, peak_q, bkgInnerRadii);
+    // scaled from area of circle minus segment when r normalized to 1
+    auto m2 = std::sqrt(
+        1.0 -
+        (std::acos(1.0 - h2) - (1.0 - h2) * std::sqrt(2.0 * h2 - h2 * h2)) /
+            M_PI);
+    r2 *= m2;
+  }
+
+  return true;
+}
+
 /**
  * Integrate the events around the specified peak Q-vector.  The principal
  * axes of the events near this Q-vector and the standard deviations in the
@@ -141,14 +404,10 @@ Integrate3DEvents::ellipseIntegrateEvents(
     sigmas.push_back(stdDev(some_events, eigen_vectors[i], m_radius));
   }
 
-  bool invalid_peak = false;
-  for (int i = 0; i < 3; i++) {
-    if ((std::isnan)(sigmas[i])) {
-      invalid_peak = true;
-    } else if (sigmas[i] <= 0) {
-      invalid_peak = true;
-    }
-  }
+  bool invalid_peak =
+      std::any_of(sigmas.cbegin(), sigmas.cend(), [](const double sigma) {
+        return std::isnan(sigma) || sigma <= 0;
+      });
 
   if (invalid_peak)                       // if data collapses to a line or
   {                                       // to a plane, the volume of the
@@ -582,7 +841,7 @@ PeakShapeEllipsoid_const_sptr Integrate3DEvents::ellipseIntegrateEvents(
  */
 double Integrate3DEvents::detectorQ(std::vector<Kernel::V3D> E1Vec,
                                     const Mantid::Kernel::V3D QLabFrame,
-                                    std::vector<double> &r) {
+                                    const std::vector<double> &r) {
   double quot = 1.0;
   for (auto &E1 : E1Vec) {
     V3D distv = QLabFrame -
@@ -595,6 +854,45 @@ double Integrate3DEvents::detectorQ(std::vector<Kernel::V3D> E1Vec,
   }
   return quot;
 }
+
+/** Calculate the radius to use for each axis of the ellipsoid from the
+ * parameters provided
+ *
+ * @param params :: the integration parameters
+ * @param max_sigma :: the largest sigma of all axes
+ * @return tuple of values representing the radius for each axis.
+ */
+std::tuple<double, double, double>
+Integrate3DEvents::calculateRadiusFactors(const IntegrationParameters &params,
+                                          double max_sigma) const {
+  double r1 = 0, r2 = 0, r3 = 0;
+
+  if (!params.specifySize) {
+    r1 = 3;
+    r2 = 3;
+    r3 = r2 * 1.25992105; // A factor of 2 ^ (1/3) will make the background
+    // shell volume equal to the peak region volume.
+
+    // if necessary restrict the background ellipsoid
+    // to lie within the specified sphere, and adjust
+    // the other sizes, proportionally
+    if (r3 * max_sigma > params.regionRadius) {
+      r3 = params.regionRadius / max_sigma;
+      r1 = r3 * 0.79370053f; // This value for r1 and r2 makes the background
+      r2 = r1;               // shell volume equal to the peak region volume.
+    }
+  } else {
+    // scale specified sizes by 1/max_sigma
+    // so when multiplied by the individual
+    // sigmas in different directions, the
+    r1 = params.peakRadius / max_sigma;
+    r2 = params.backgroundInnerRadius / max_sigma;
+    r3 = params.backgroundOuterRadius / max_sigma;
+  }
+
+  return std::make_tuple(r1, r2, r3);
+}
+
 } // namespace MDAlgorithms
 
 } // namespace Mantid
diff --git a/Framework/MDAlgorithms/src/IntegrateEllipsoids.cpp b/Framework/MDAlgorithms/src/IntegrateEllipsoids.cpp
index 9fa72ce1ae2209cb1b944f5a8de5c1a6c064aaf7..c9022bae8c85636046b13162c0dc9efcc57bc085 100644
--- a/Framework/MDAlgorithms/src/IntegrateEllipsoids.cpp
+++ b/Framework/MDAlgorithms/src/IntegrateEllipsoids.cpp
@@ -24,6 +24,7 @@
 #include "MantidMDAlgorithms/UnitsConversionHelper.h"
 
 #include <boost/math/special_functions/round.hpp>
+#include <cmath>
 
 using namespace Mantid::API;
 using namespace Mantid::HistogramData;
@@ -175,6 +176,8 @@ void IntegrateEllipsoids::qListFromHistoWS(Integrate3DEvents &integrator,
         if (hkl_integ)
           qVec = UBinv * qVec;
 
+        if (std::isnan(qVec[0]) || std::isnan(qVec[1]) || std::isnan(qVec[2]))
+          continue;
         // Account for counts in histograms by increasing the qList with the
         // same q-point
         qList.emplace_back(yVal, qVec);
diff --git a/Framework/MDAlgorithms/src/IntegrateEllipsoidsTwoStep.cpp b/Framework/MDAlgorithms/src/IntegrateEllipsoidsTwoStep.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6e54b4c1d988f3621d7815f5af3d4a8822f98fb7
--- /dev/null
+++ b/Framework/MDAlgorithms/src/IntegrateEllipsoidsTwoStep.cpp
@@ -0,0 +1,553 @@
+#include "MantidMDAlgorithms/IntegrateEllipsoidsTwoStep.h"
+
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/InstrumentValidator.h"
+#include "MantidAPI/Run.h"
+#include "MantidAPI/Sample.h"
+#include "MantidDataObjects/PeaksWorkspace.h"
+#include "MantidDataObjects/PeakShapeEllipsoid.h"
+#include "MantidDataObjects/Workspace2D.h"
+#include "MantidDataObjects/EventWorkspace.h"
+#include "MantidGeometry/Crystal/IndexingUtils.h"
+#include "MantidGeometry/Crystal/OrientedLattice.h"
+#include "MantidKernel/BoundedValidator.h"
+#include "MantidKernel/CompositeValidator.h"
+#include "MantidKernel/make_unique.h"
+#include "MantidKernel/NearestNeighbours.h"
+#include "MantidMDAlgorithms/Integrate3DEvents.h"
+#include "MantidMDAlgorithms/MDTransfFactory.h"
+#include "MantidMDAlgorithms/MDTransfQ3D.h"
+#include "MantidMDAlgorithms/UnitsConversionHelper.h"
+
+#include <cmath>
+#include <string>
+#include <tuple>
+#include <boost/math/special_functions/round.hpp>
+
+using namespace Mantid::API;
+using namespace Mantid::DataObjects;
+using namespace Mantid::Kernel;
+
+namespace Mantid {
+namespace MDAlgorithms {
+
+// Register the algorithm into the AlgorithmFactory
+DECLARE_ALGORITHM(IntegrateEllipsoidsTwoStep)
+
+//---------------------------------------------------------------------
+/// Algorithm's name for identification. @see Algorithm::name
+const std::string IntegrateEllipsoidsTwoStep::name() const {
+  return "IntegrateEllipsoidsTwoStep";
+}
+
+/// Algorithm's version for identification. @see Algorithm::version
+int IntegrateEllipsoidsTwoStep::version() const { return 1; }
+
+/// Algorithm's category for identification. @see Algorithm::category
+const std::string IntegrateEllipsoidsTwoStep::category() const {
+  return "Crystal\\Integration";
+}
+
+void IntegrateEllipsoidsTwoStep::init() {
+  auto ws_valid = boost::make_shared<CompositeValidator>();
+  ws_valid->add<InstrumentValidator>();
+
+  auto mustBePositive = boost::make_shared<BoundedValidator<double>>();
+  mustBePositive->setLower(0.0);
+
+  declareProperty(make_unique<WorkspaceProperty<MatrixWorkspace>>(
+                      "InputWorkspace", "", Direction::Input, ws_valid),
+                  "An input MatrixWorkspace with time-of-flight units along "
+                  "X-axis and defined instrument with defined sample");
+
+  declareProperty(make_unique<WorkspaceProperty<PeaksWorkspace>>(
+                      "PeaksWorkspace", "", Direction::InOut),
+                  "Workspace with peaks to be integrated");
+
+  declareProperty("RegionRadius", .35, mustBePositive,
+                  "Only events at most this distance from a peak will be "
+                  "considered when integrating");
+
+  declareProperty(
+      "SpecifySize", false,
+      "If true, use the following for the major axis sizes, else use 3-sigma");
+
+  declareProperty("PeakSize", .18, mustBePositive,
+                  "Half-length of major axis for peak ellipsoid");
+
+  declareProperty("BackgroundInnerSize", .18, mustBePositive,
+                  "Half-length of major axis for inner ellipsoidal surface of "
+                  "background region");
+
+  declareProperty("BackgroundOuterSize", .23, mustBePositive,
+                  "Half-length of major axis for outer ellipsoidal surface of "
+                  "background region");
+
+  declareProperty("IntegrateInHKL", false,
+                  "If true, integrate in HKL space not Q space.");
+  declareProperty(
+      "IntegrateIfOnEdge", true,
+      "Set to false to not integrate if peak radius is off edge of detector."
+      "Background will be scaled if background radius is off edge.");
+
+  declareProperty("AdaptiveQBackground", false,
+                  "Default is false.   If true, "
+                  "BackgroundOuterRadius + AdaptiveQMultiplier * **|Q|** and "
+                  "BackgroundInnerRadius + AdaptiveQMultiplier * **|Q|**");
+
+  declareProperty("AdaptiveQMultiplier", 0.0,
+                  "PeakRadius + AdaptiveQMultiplier * **|Q|** "
+                  "so each peak has a "
+                  "different integration radius.  Q includes the 2*pi factor.");
+
+  declareProperty("WeakPeakThreshold", 1.0, mustBePositive,
+                  "Intensity threshold use to classify a peak as weak.");
+
+  declareProperty(
+      make_unique<WorkspaceProperty<PeaksWorkspace>>("OutputWorkspace", "",
+                                                     Direction::Output),
+      "The output PeaksWorkspace will be a copy of the input PeaksWorkspace "
+      "with the peaks' integrated intensities.");
+}
+
+void IntegrateEllipsoidsTwoStep::exec() {
+  PeaksWorkspace_sptr input_peak_ws = getProperty("PeaksWorkspace");
+  MatrixWorkspace_sptr input_ws = getProperty("InputWorkspace");
+  EventWorkspace_sptr eventWS =
+      boost::dynamic_pointer_cast<EventWorkspace>(input_ws);
+
+  Workspace2D_sptr histoWS = boost::dynamic_pointer_cast<Workspace2D>(input_ws);
+  if (!eventWS && !histoWS) {
+    throw std::runtime_error("IntegrateEllipsoids needs either a "
+                             "EventWorkspace or Workspace2D as input.");
+  }
+
+  const double weakPeakThreshold = getProperty("WeakPeakThreshold");
+
+  // validation of inputs
+  if (!input_peak_ws) {
+    throw std::runtime_error("Could not read the Peaks Workspace");
+  }
+
+  if (!input_ws) {
+    throw std::runtime_error("Could not read the Input Workspace");
+  }
+
+  PeaksWorkspace_sptr peak_ws = getProperty("OutputWorkspace");
+  if (peak_ws != input_peak_ws) {
+    peak_ws = input_peak_ws->clone();
+  }
+
+  Progress prog(this, 0.5, 1.0, input_ws->getNumberHistograms());
+
+  std::vector<Peak> &peaks = peak_ws->getPeaks();
+  size_t n_peaks = peak_ws->getNumberPeaks();
+  size_t indexed_count = 0;
+  std::vector<V3D> peak_q_list;
+  std::vector<V3D> hkl_vectors;
+  for (size_t i = 0; i < n_peaks; i++) // Note: we skip un-indexed peaks
+  {
+    V3D hkl(peaks[i].getH(), peaks[i].getK(), peaks[i].getL());
+    if (Geometry::IndexingUtils::ValidIndex(hkl, 1.0)) // use tolerance == 1 to
+                                                       // just check for (0,0,0)
+    {
+      peak_q_list.emplace_back(peaks[i].getQLabFrame());
+      V3D miller_ind(static_cast<double>(boost::math::iround<double>(hkl[0])),
+                     static_cast<double>(boost::math::iround<double>(hkl[1])),
+                     static_cast<double>(boost::math::iround<double>(hkl[2])));
+      hkl_vectors.push_back(miller_ind);
+      indexed_count++;
+    }
+  }
+
+  if (indexed_count < 3) {
+    throw std::runtime_error(
+        "At least three linearly independent indexed peaks are needed.");
+  }
+  // Get UB using indexed peaks and
+  // lab-Q vectors
+  Matrix<double> UB(3, 3, false);
+  Geometry::IndexingUtils::Optimize_UB(UB, hkl_vectors, peak_q_list);
+  Matrix<double> UBinv(UB);
+  UBinv.Invert();
+  UBinv *= (1.0 / (2.0 * M_PI));
+
+  std::vector<std::pair<double, V3D>> qList;
+  for (size_t i = 0; i < n_peaks; i++) {
+    qList.emplace_back(1., V3D(peaks[i].getQLabFrame()));
+  }
+
+  const bool integrateEdge = getProperty("IntegrateIfOnEdge");
+  if (!integrateEdge) {
+    // This only fails in the unit tests which say that MaskBTP is not
+    // registered
+    try {
+      runMaskDetectors(input_peak_ws, "Tube", "edges");
+      runMaskDetectors(input_peak_ws, "Pixel", "edges");
+    } catch (...) {
+      g_log.error("Can't execute MaskBTP algorithm for this instrument to set "
+                  "edge for IntegrateIfOnEdge option");
+    }
+    calculateE1(
+        input_peak_ws->detectorInfo()); // fill E1Vec for use in detectorQ
+  }
+
+  const bool integrateInHKL = getProperty("IntegrateInHKL");
+  Integrate3DEvents integrator(qList, UBinv, getProperty("RegionRadius"));
+
+  if (eventWS) {
+    // process as EventWorkspace
+    qListFromEventWS(integrator, prog, eventWS, UBinv, integrateInHKL);
+  } else {
+    // process as Workspace2D
+    qListFromHistoWS(integrator, prog, histoWS, UBinv, integrateInHKL);
+  }
+
+  std::vector<std::pair<int, V3D>> weakPeaks, strongPeaks;
+
+  // Compute signal to noise ratio for all peaks
+  int index = 0;
+  for (const auto &item : qList) {
+    const auto center = item.second;
+    IntegrationParameters params = makeIntegrationParameters(center);
+    auto sig2noise = integrator.estimateSignalToNoiseRatio(params, center);
+
+    auto &peak = peak_ws->getPeak(index);
+    peak.setIntensity(0);
+    peak.setSigmaIntensity(0);
+
+    const auto result = std::make_pair(index, center);
+    if (sig2noise < weakPeakThreshold) {
+      g_log.notice() << "Peak " << peak.getHKL() << " with Q = " << center
+                     << " is a weak peak with signal to noise " << sig2noise
+                     << "\n";
+      weakPeaks.push_back(result);
+    } else {
+      g_log.notice() << "Peak " << peak.getHKL() << " with Q = " << center
+                     << " is a strong peak with signal to noise " << sig2noise
+                     << "\n";
+      strongPeaks.push_back(result);
+    }
+    ++index;
+  }
+
+  std::vector<std::pair<boost::shared_ptr<const Geometry::PeakShape>,
+                        std::tuple<double, double, double>>> shapeLibrary;
+
+  // Integrate strong peaks
+  for (const auto &item : strongPeaks) {
+    const auto index = item.first;
+    const auto q = item.second;
+    double inti, sigi;
+
+    IntegrationParameters params = makeIntegrationParameters(q);
+    const auto result = integrator.integrateStrongPeak(params, q, inti, sigi);
+    shapeLibrary.push_back(result);
+
+    auto &peak = peak_ws->getPeak(index);
+    peak.setIntensity(inti);
+    peak.setSigmaIntensity(sigi);
+    peak.setPeakShape(std::get<0>(result));
+  }
+
+  std::vector<Eigen::Vector3d> points;
+  std::transform(strongPeaks.begin(), strongPeaks.end(),
+                 std::back_inserter(points),
+                 [&](const std::pair<int, V3D> &item) {
+                   const auto q = item.second;
+                   return Eigen::Vector3d(q[0], q[1], q[2]);
+                 });
+
+  if (points.empty())
+    throw std::runtime_error("Cannot integrate peaks when all peaks are below "
+                             "the signal to noise ratio.");
+
+  NearestNeighbours<3> kdTree(points);
+
+  // Integrate weak peaks
+  for (const auto &item : weakPeaks) {
+    double inti, sigi;
+    const auto index = item.first;
+    const auto q = item.second;
+
+    const auto result = kdTree.findNearest(Eigen::Vector3d(q[0], q[1], q[2]));
+    const auto strongIndex = static_cast<int>(std::get<1>(result[0]));
+
+    auto &peak = peak_ws->getPeak(index);
+    auto &strongPeak = peak_ws->getPeak(strongIndex);
+
+    g_log.notice() << "Integrating weak peak " << peak.getHKL()
+                   << " using strong peak " << strongPeak.getHKL() << "\n";
+
+    const auto libShape = shapeLibrary[static_cast<int>(strongIndex)];
+    const auto shape =
+        boost::dynamic_pointer_cast<const PeakShapeEllipsoid>(libShape.first);
+    const auto frac = std::get<0>(libShape.second);
+
+    g_log.notice() << "Weak peak will be adjusted by " << frac << "\n";
+    IntegrationParameters params =
+        makeIntegrationParameters(strongPeak.getQLabFrame());
+    const auto weakShape = integrator.integrateWeakPeak(
+        params, shape, libShape.second, q, inti, sigi);
+
+    peak.setIntensity(inti);
+    peak.setSigmaIntensity(sigi);
+    peak.setPeakShape(weakShape);
+  }
+
+  // This flag is used by the PeaksWorkspace to evaluate whether it has been
+  // integrated.
+  peak_ws->mutableRun().addProperty("PeaksIntegrated", 1, true);
+  setProperty("OutputWorkspace", peak_ws);
+}
+
+IntegrationParameters
+IntegrateEllipsoidsTwoStep::makeIntegrationParameters(const V3D &peak_q) const {
+  IntegrationParameters params;
+  params.peakRadius = getProperty("PeakSize");
+  params.backgroundInnerRadius = getProperty("BackgroundInnerSize");
+  params.backgroundOuterRadius = getProperty("BackgroundOuterSize");
+  params.regionRadius = getProperty("RegionRadius");
+  params.specifySize = getProperty("SpecifySize");
+  params.E1Vectors = E1Vec;
+
+  const bool adaptiveQBackground = getProperty("AdaptiveQBackground");
+  const double adaptiveQMultiplier = getProperty("AdaptiveQMultiplier");
+  const double adaptiveQBackgroundMultiplier =
+      (adaptiveQBackground) ? adaptiveQMultiplier : 0.0;
+
+  // modulus of Q
+  const double lenQpeak = peak_q.norm();
+  // change params to support adaptive Q
+  params.peakRadius = adaptiveQMultiplier * lenQpeak + params.peakRadius;
+  params.backgroundInnerRadius =
+      adaptiveQBackgroundMultiplier * lenQpeak + params.backgroundInnerRadius;
+  params.backgroundOuterRadius =
+      adaptiveQBackgroundMultiplier * lenQpeak + params.backgroundOuterRadius;
+  return params;
+}
+
+void IntegrateEllipsoidsTwoStep::qListFromEventWS(Integrate3DEvents &integrator,
+                                                  Progress &prog,
+                                                  EventWorkspace_sptr &wksp,
+                                                  DblMatrix const &UBinv,
+                                                  bool hkl_integ) {
+  // loop through the eventlists
+
+  const std::string ELASTIC("Elastic");
+  /// Only convert to Q-vector.
+  const std::string Q3D("Q3D");
+  const std::size_t DIMS(3);
+
+  MDWSDescription m_targWSDescr;
+  m_targWSDescr.setMinMax(std::vector<double>(3, -2000.),
+                          std::vector<double>(3, 2000.));
+  m_targWSDescr.buildFromMatrixWS(wksp, Q3D, ELASTIC);
+  m_targWSDescr.setLorentsCorr(false);
+
+  // generate the detectors table
+  Mantid::API::Algorithm_sptr childAlg = createChildAlgorithm(
+      "PreprocessDetectorsToMD", 0.,
+      .5); // HACK. soft dependency on non-dependent package.
+  childAlg->setProperty("InputWorkspace", wksp);
+  childAlg->executeAsChildAlg();
+
+  DataObjects::TableWorkspace_sptr table =
+      childAlg->getProperty("OutputWorkspace");
+  if (!table)
+    throw(std::runtime_error(
+        "Can not retrieve results of \"PreprocessDetectorsToMD\""));
+
+  m_targWSDescr.m_PreprDetTable = table;
+
+  int numSpectra = static_cast<int>(wksp->getNumberHistograms());
+  PARALLEL_FOR_IF(Kernel::threadSafe(*wksp))
+  for (int i = 0; i < numSpectra; ++i) {
+    PARALLEL_START_INTERUPT_REGION
+
+    // units conversion helper
+    UnitsConversionHelper unitConverter;
+    unitConverter.initialize(m_targWSDescr, "Momentum");
+
+    // initialize the MD coordinates conversion class
+    MDTransfQ3D qConverter;
+    qConverter.initialize(m_targWSDescr);
+
+    std::vector<double> buffer(DIMS);
+    // get a reference to the event list
+    EventList &events = wksp->getSpectrum(i);
+
+    events.switchTo(WEIGHTED_NOTIME);
+    events.compressEvents(1e-5, &events);
+
+    // check to see if the event list is empty
+    if (events.empty()) {
+      prog.report();
+      continue; // nothing to do
+    }
+
+    // update which pixel is being converted
+    std::vector<Mantid::coord_t> locCoord(DIMS, 0.);
+    unitConverter.updateConversion(i);
+    qConverter.calcYDepCoordinates(locCoord, i);
+
+    // loop over the events
+    double signal(1.);  // ignorable garbage
+    double errorSq(1.); // ignorable garbage
+    const std::vector<WeightedEventNoTime> &raw_events =
+        events.getWeightedEventsNoTime();
+    std::vector<std::pair<double, V3D>> qList;
+    for (const auto &raw_event : raw_events) {
+      double val = unitConverter.convertUnits(raw_event.tof());
+      qConverter.calcMatrixCoord(val, locCoord, signal, errorSq);
+      for (size_t dim = 0; dim < DIMS; ++dim) {
+        buffer[dim] = locCoord[dim];
+      }
+      V3D qVec(buffer[0], buffer[1], buffer[2]);
+      if (hkl_integ)
+        qVec = UBinv * qVec;
+      qList.emplace_back(raw_event.m_weight, qVec);
+    } // end of loop over events in list
+    PARALLEL_CRITICAL(addEvents) { integrator.addEvents(qList, hkl_integ); }
+
+    prog.report();
+    PARALLEL_END_INTERUPT_REGION
+  } // end of loop over spectra
+  PARALLEL_CHECK_INTERUPT_REGION
+}
+
+/**
+ * @brief qListFromHistoWS creates qlist from input workspaces of type
+ * Workspace2D
+ * @param integrator : itegrator object on which qlists are accumulated
+ * @param prog : progress object
+ * @param wksp : input Workspace2D
+ * @param UBinv : inverse of UB matrix
+ * @param hkl_integ ; boolean for integrating in HKL space
+ */
+void IntegrateEllipsoidsTwoStep::qListFromHistoWS(Integrate3DEvents &integrator,
+                                                  Progress &prog,
+                                                  Workspace2D_sptr &wksp,
+                                                  DblMatrix const &UBinv,
+                                                  bool hkl_integ) {
+
+  // loop through the eventlists
+  const std::string ELASTIC("Elastic");
+  /// Only convert to Q-vector.
+  const std::string Q3D("Q3D");
+  const std::size_t DIMS(3);
+
+  MDWSDescription m_targWSDescr;
+  m_targWSDescr.setMinMax(std::vector<double>(3, -2000.),
+                          std::vector<double>(3, 2000.));
+  m_targWSDescr.buildFromMatrixWS(wksp, Q3D, ELASTIC);
+  m_targWSDescr.setLorentsCorr(false);
+
+  // generate the detectors table
+  Mantid::API::Algorithm_sptr childAlg = createChildAlgorithm(
+      "PreprocessDetectorsToMD", 0.,
+      .5); // HACK. soft dependency on non-dependent package.
+  childAlg->setProperty("InputWorkspace", wksp);
+  childAlg->executeAsChildAlg();
+
+  DataObjects::TableWorkspace_sptr table =
+      childAlg->getProperty("OutputWorkspace");
+  if (!table)
+    throw(std::runtime_error(
+        "Can not retrieve results of \"PreprocessDetectorsToMD\""));
+  else
+    m_targWSDescr.m_PreprDetTable = table;
+
+  int numSpectra = static_cast<int>(wksp->getNumberHistograms());
+  PARALLEL_FOR_IF(Kernel::threadSafe(*wksp))
+  for (int i = 0; i < numSpectra; ++i) {
+    PARALLEL_START_INTERUPT_REGION
+
+    // units conversion helper
+    UnitsConversionHelper unitConverter;
+    unitConverter.initialize(m_targWSDescr, "Momentum");
+
+    // initialize the MD coordinates conversion class
+    MDTransfQ3D qConverter;
+    qConverter.initialize(m_targWSDescr);
+
+    // get tof and counts
+    const auto &xVals = wksp->points(i);
+    const auto &yVals = wksp->counts(i);
+
+    // update which pixel is being converted
+    std::vector<Mantid::coord_t> locCoord(DIMS, 0.);
+    unitConverter.updateConversion(i);
+    qConverter.calcYDepCoordinates(locCoord, i);
+
+    // loop over the events
+    double signal(1.);  // ignorable garbage
+    double errorSq(1.); // ignorable garbage
+
+    std::vector<std::pair<double, V3D>> qList;
+
+    for (size_t j = 0; j < yVals.size(); ++j) {
+      const double &yVal = yVals[j];
+      if (yVal > 0) // TODO, is this condition right?
+      {
+        double val = unitConverter.convertUnits(xVals[j]);
+        qConverter.calcMatrixCoord(val, locCoord, signal, errorSq);
+        V3D qVec(locCoord[0], locCoord[1], locCoord[2]);
+        if (hkl_integ)
+          qVec = UBinv * qVec;
+
+        if (std::isnan(qVec[0]) || std::isnan(qVec[1]) || std::isnan(qVec[2]))
+          continue;
+        // Account for counts in histograms by increasing the qList with the
+        // same q-point
+        qList.emplace_back(yVal, qVec);
+      }
+    }
+    PARALLEL_CRITICAL(addHisto) { integrator.addEvents(qList, hkl_integ); }
+    prog.report();
+    PARALLEL_END_INTERUPT_REGION
+  } // end of loop over spectra
+  PARALLEL_CHECK_INTERUPT_REGION
+}
+
+/*
+ * Define edges for each instrument by masking. For CORELLI, tubes 1 and 16, and
+ *pixels 0 and 255.
+ * Get Q in the lab frame for every peak, call it C
+ * For every point on the edge, the trajectory in reciprocal space is a straight
+ *line, going through O=V3D(0,0,0).
+ * Calculate a point at a fixed momentum, say k=1. Q in the lab frame
+ *E=V3D(-k*sin(tt)*cos(ph),-k*sin(tt)*sin(ph),k-k*cos(ph)).
+ * Normalize E to 1: E=E*(1./E.norm())
+ *
+ * @param inst: instrument
+ */
+void IntegrateEllipsoidsTwoStep::calculateE1(
+    const API::DetectorInfo &detectorInfo) {
+  for (size_t i = 0; i < detectorInfo.size(); ++i) {
+    if (detectorInfo.isMonitor(i))
+      continue; // skip monitor
+    if (!detectorInfo.isMasked(i))
+      continue; // edge is masked so don't check if not masked
+    const auto &det = detectorInfo.detector(i);
+    double tt1 = det.getTwoTheta(V3D(0, 0, 0), V3D(0, 0, 1)); // two theta
+    double ph1 = det.getPhi();                                // phi
+    V3D E1 = V3D(-std::sin(tt1) * std::cos(ph1), -std::sin(tt1) * std::sin(ph1),
+                 1. - std::cos(tt1)); // end of trajectory
+    E1 = E1 * (1. / E1.norm());       // normalize
+    E1Vec.push_back(E1);
+  }
+}
+
+void IntegrateEllipsoidsTwoStep::runMaskDetectors(
+    Mantid::DataObjects::PeaksWorkspace_sptr peakWS, std::string property,
+    std::string values) {
+  IAlgorithm_sptr alg = createChildAlgorithm("MaskBTP");
+  alg->setProperty<Workspace_sptr>("Workspace", peakWS);
+  alg->setProperty(property, values);
+  if (!alg->execute())
+    throw std::runtime_error(
+        "MaskDetectors Child Algorithm has not executed successfully");
+}
+}
+}
diff --git a/Framework/MDAlgorithms/src/IntegratePeaksCWSD.cpp b/Framework/MDAlgorithms/src/IntegratePeaksCWSD.cpp
index b0de2bd78dd58eca7b76b22361342afad4239e53..9dccb27505ce3f4606fd5d5c930f316f823469c1 100644
--- a/Framework/MDAlgorithms/src/IntegratePeaksCWSD.cpp
+++ b/Framework/MDAlgorithms/src/IntegratePeaksCWSD.cpp
@@ -105,6 +105,8 @@ void IntegratePeaksCWSD::exec() {
   // Merge peak if necessary
   if (m_doMergePeak)
     mergePeaks();
+  else
+    normalizePeaksIntensities(); // normalize the intensity of each Pt.
 
   // Output
   DataObjects::PeaksWorkspace_sptr outws =
@@ -161,6 +163,7 @@ void IntegratePeaksCWSD::processInputs() {
         "Either being normalized by time or being normalized "
         "by monitor must be selected if merge-peak is selected.");
   m_scaleFactor = getProperty("ScaleFactor");
+  g_log.warning() << "[DB...BAT] Scale factor = " << m_scaleFactor << "\n";
 
   // monitor counts
   if (m_normalizeByMonitor)
@@ -544,9 +547,8 @@ std::map<int, double> IntegratePeaksCWSD::getMeasureTime() {
     std::string duration_str = expinfo->run().getProperty("duration")->value();
     double duration = std::stod(duration_str);
     run_time_map.insert(std::make_pair(run_number, duration));
-    g_log.information() << "MD workspace exp info " << iexpinfo << ": run "
-                        << run_number << ", measuring time = " << duration
-                        << "\n";
+    g_log.warning() << "MD workspace exp info " << iexpinfo << ": run "
+                    << run_number << ", measuring time = " << duration << "\n";
   }
 
   return run_time_map;
@@ -573,5 +575,28 @@ void IntegratePeaksCWSD::getPeakInformation() {
   }
 }
 
+//----------------------------------------------------------------------------------------------
+/** Normalize the peak's intensities per Pt. to either time or monitor counts
+ * @brief IntegratePeaksCSWD::normalizePeaksIntensities
+ */
+void IntegratePeaksCWSD::normalizePeaksIntensities() {
+  // go over each peak (of run)
+  std::map<int, double>::iterator count_iter;
+  for (count_iter = m_runPeakCountsMap.begin();
+       count_iter != m_runPeakCountsMap.end(); ++count_iter) {
+    int run_number_i = count_iter->first;
+    // get monitor value
+    std::map<int, signal_t>::iterator mon_iter =
+        m_runNormMap.find(run_number_i);
+    // normalize peak intensities stored in m_runNormMap
+    if (mon_iter != m_runNormMap.end()) {
+      signal_t monitor_i = mon_iter->second;
+      count_iter->second /= monitor_i;
+    }
+  } // END-FOR
+
+  return;
+}
+
 } // namespace Mantid
 } // namespace MDAlgorithms
diff --git a/Framework/MDAlgorithms/src/Quantification/SimulateResolutionConvolvedModel.cpp b/Framework/MDAlgorithms/src/Quantification/SimulateResolutionConvolvedModel.cpp
index ab4a7cc60d7f52ccf4a2dc76b816ee5912e342ce..ceb1b43b96a43edf38513cb31aaa67ab6b1d29ec 100644
--- a/Framework/MDAlgorithms/src/Quantification/SimulateResolutionConvolvedModel.cpp
+++ b/Framework/MDAlgorithms/src/Quantification/SimulateResolutionConvolvedModel.cpp
@@ -13,6 +13,7 @@
 #include "MantidMDAlgorithms/Quantification/ForegroundModelFactory.h"
 #include "MantidMDAlgorithms/Quantification/MDResolutionConvolutionFactory.h"
 #include "MantidMDAlgorithms/Quantification/ResolutionConvolvedCrossSection.h"
+#include <boost/make_shared.hpp>
 
 namespace Mantid {
 namespace MDAlgorithms {
@@ -91,15 +92,15 @@ void SimulateResolutionConvolvedModel::init() {
 void SimulateResolutionConvolvedModel::exec() {
   m_inputWS = getProperty("InputWorkspace");
   // First estimate of progress calls
-  API::Progress progress(this, 0.0, 1.0,
-                         static_cast<size_t>(m_inputWS->getNPoints()));
-  progress.report("Caching simulation input");
+  auto progress = boost::make_shared<API::Progress>(
+      this, 0.0, 1.0, static_cast<size_t>(m_inputWS->getNPoints()));
+  progress->report("Caching simulation input");
   auto resolution = createFunction();
   createDomains();
 
   // Do the real work
-  progress.setNumSteps(resolution->estimateNoProgressCalls());
-  resolution->setProgressReporter(&progress);
+  progress->setNumSteps(resolution->estimateNoProgressCalls());
+  resolution->setProgressReporter(progress);
   resolution->function(*m_domain, *m_calculatedValues);
 
   // If output workspace exists just add the events to that
diff --git a/Framework/MDAlgorithms/test/CMakeLists.txt b/Framework/MDAlgorithms/test/CMakeLists.txt
index 32b99b0e04cd0a3ae6f1c7bf8e5699aca80bdbcf..3c0613e240a770253e487ed52edd8fb4f398eb94 100644
--- a/Framework/MDAlgorithms/test/CMakeLists.txt
+++ b/Framework/MDAlgorithms/test/CMakeLists.txt
@@ -12,6 +12,7 @@ if ( CXXTEST_FOUND )
                         ../../TestHelpers/src/ScopedFileHelper.cpp
                         ../../TestHelpers/src/InstrumentCreationHelper.cpp
                         ../../TestHelpers/src/WorkspaceCreationHelper.cpp
+                        ../../TestHelpers/src/SingleCrystalDiffractionTestHelper.cpp
                          )
 
   cxxtest_add_test ( MDAlgorithmsTest ${TEST_FILES} ${GMOCK_TEST_FILES})
diff --git a/Framework/MDAlgorithms/test/Integrate3DEventsTest.h b/Framework/MDAlgorithms/test/Integrate3DEventsTest.h
index 182b6c3a4c69167fc6c74c54d464123cbc8dd839..3334170a75b0198839a95ba4aad9bacb59fa4a5e 100644
--- a/Framework/MDAlgorithms/test/Integrate3DEventsTest.h
+++ b/Framework/MDAlgorithms/test/Integrate3DEventsTest.h
@@ -6,6 +6,7 @@
 #include "MantidDataObjects/PeakShapeEllipsoid.h"
 
 #include <cxxtest/TestSuite.h>
+#include <random>
 
 using namespace Mantid;
 using namespace Mantid::DataObjects;
@@ -122,6 +123,260 @@ public:
       TS_ASSERT_DELTA(sigi, sigi_some[i], 0.01);
     }
   }
+
+  void test_integrateWeakPeakInPerfectCase() {
+    /* Check that we can integrate a weak peak using a strong peak in the
+     * perfect case when there is absolutely no background
+     */
+
+    // synthesize two peaks
+    V3D peak_1(20, 0, 0);
+    V3D peak_2(0, 20, 0);
+    std::vector<std::pair<double, V3D>> peak_q_list{{1., peak_1}, {1., peak_2}};
+
+    // synthesize a UB-inverse to map
+    DblMatrix UBinv(3, 3, false); // Q to h,k,l
+    UBinv.setRow(0, V3D(.1, 0, 0));
+    UBinv.setRow(1, V3D(0, .2, 0));
+    UBinv.setRow(2, V3D(0, 0, .25));
+
+    std::vector<std::pair<double, V3D>> event_Qs;
+    const int numStrongEvents = 10000;
+    const int numWeakEvents = 100;
+    generatePeak(event_Qs, peak_1, 0.1, numStrongEvents, 1); // strong peak
+    generatePeak(event_Qs, peak_2, 0.1, numWeakEvents, 1);   // weak peak
+
+    IntegrationParameters params;
+    params.peakRadius = 1.0;
+    params.backgroundInnerRadius = 1.0;
+    params.backgroundOuterRadius = 1.2;
+    params.regionRadius = 1.2;
+    params.specifySize = true;
+
+    // Create integraton region + events & UB
+    Integrate3DEvents integrator(peak_q_list, UBinv, params.regionRadius);
+    integrator.addEvents(event_Qs, false);
+
+    double strong_inti = 0, strong_sigi = 0;
+    auto result = integrator.integrateStrongPeak(params, peak_1, strong_inti,
+                                                 strong_sigi);
+    const auto shape =
+        boost::dynamic_pointer_cast<const PeakShapeEllipsoid>(result.first);
+    const auto frac = std::get<0>(result.second);
+
+    // Check the fraction of the peak that is contained within a "standard core"
+    // the standard core is just the sigma in each direction
+    TS_ASSERT_DELTA(frac, 0.8369, 0.01);
+
+    // Check the integrated intensity for a strong peak is exactly what we set
+    // it to be when generating the peak
+    TS_ASSERT_DELTA(strong_inti, numStrongEvents, 0.01);
+    TS_ASSERT_DELTA(strong_sigi, 100, 0.1);
+
+    // Now integrate weak peak using the parameters we got from the strong peak
+    double weak_inti, weak_sigi;
+    integrator.integrateWeakPeak(params, shape, result.second, peak_2,
+                                 weak_inti, weak_sigi);
+
+    // Check the integrated intensity for a weak peak is exactly what we set it
+    // to be weighted by the fraction of strong peak contained in a standard
+    // core. This is not exactly the same because of the weighting from the
+    // strong peak
+    TS_ASSERT_DELTA(weak_inti, 83.6960, 0.5);
+    TS_ASSERT_DELTA(weak_sigi, 8.37, 0.1);
+  }
+
+  void test_integrateWeakPeakWithBackground() {
+    /* Check that we can integrate a weak peak using a strong peak in the
+     * perfect case when there is absolutely no background
+     */
+
+    // synthesize two peaks
+    V3D peak_1(20, 0, 0);
+    V3D peak_2(0, 20, 0);
+    std::vector<std::pair<double, V3D>> peak_q_list{{1., peak_1}, {1., peak_2}};
+
+    // synthesize a UB-inverse to map
+    DblMatrix UBinv(3, 3, false); // Q to h,k,l
+    UBinv.setRow(0, V3D(.1, 0, 0));
+    UBinv.setRow(1, V3D(0, .2, 0));
+    UBinv.setRow(2, V3D(0, 0, .25));
+
+    std::vector<std::pair<double, V3D>> event_Qs;
+    const int numStrongEvents = 10000;
+    const int numWeakEvents = 100;
+    generatePeak(event_Qs, peak_1, 0.1, numStrongEvents, 1); // strong peak
+    generatePeak(event_Qs, peak_2, 0.1, numWeakEvents, 1);   // weak peak
+    generateUniformBackground(event_Qs, 10, -30, 30);
+
+    IntegrationParameters params;
+    params.peakRadius = 0.5;
+    params.backgroundInnerRadius = 0.5;
+    params.backgroundOuterRadius = 0.8;
+    params.regionRadius = 0.5;
+
+    // Create integraton regions + events & UB
+    Integrate3DEvents integrator(peak_q_list, UBinv, params.regionRadius);
+    integrator.addEvents(event_Qs, false);
+
+    double strong_inti, strong_sigi;
+    auto result = integrator.integrateStrongPeak(params, peak_1, strong_inti,
+                                                 strong_sigi);
+    const auto shape =
+        boost::dynamic_pointer_cast<const PeakShapeEllipsoid>(result.first);
+    const auto frac = std::get<0>(result.second);
+
+    // Check the fraction of the peak that is contained within a "standard core"
+    // the standard core is just the sigma in each direction
+    TS_ASSERT_DELTA(frac, 0.8284, 0.01);
+
+    // Check the integrated intensity for a strong peak is close to what we set
+    // it to be when generating the peak
+    TS_ASSERT_DELTA(strong_inti, numStrongEvents, 600);
+    TS_ASSERT_DELTA(strong_sigi, 100, 0.1);
+
+    // Now integrate weak peak using the parameters we got from the strong peak
+    double weak_inti, weak_sigi;
+    integrator.integrateWeakPeak(params, shape, result.second, peak_2,
+                                 weak_inti, weak_sigi);
+
+    // Check the integrated intensity for a weak peak is exactly what we set it
+    // to be weighted by the fraction of strong peak contained in a standard
+    // core. This is not exactly the same because of the weighting from the
+    // strong peak
+    TS_ASSERT_DELTA(weak_inti, numWeakEvents, 35);
+    TS_ASSERT_DELTA(weak_sigi, 8.62, 0.2);
+  }
+
+  void test_estimateSignalToNoiseRatioInPerfectCase() {
+    V3D peak_1(20, 0, 0);
+    V3D peak_2(0, 20, 0);
+    V3D peak_3(0, 0, 20);
+    std::vector<std::pair<double, V3D>> peak_q_list{
+        {1., peak_1}, {1., peak_2}, {1., peak_3}};
+
+    // synthesize a UB-inverse to map
+    DblMatrix UBinv(3, 3, false); // Q to h,k,l
+    UBinv.setRow(0, V3D(.1, 0, 0));
+    UBinv.setRow(1, V3D(0, .2, 0));
+    UBinv.setRow(2, V3D(0, 0, .25));
+
+    std::vector<std::pair<double, V3D>> event_Qs;
+    const int numStrongEvents = 10000;
+    const int numWeakEvents = 100;
+    generatePeak(event_Qs, peak_1, 0.1, numStrongEvents, 1);   // strong peak
+    generatePeak(event_Qs, peak_2, 0.1, numWeakEvents, 1);     // weak peak
+    generatePeak(event_Qs, peak_3, 0.1, numWeakEvents / 2, 1); // very weak peak
+
+    // Create integraton region + events & UB
+    Integrate3DEvents integrator(peak_q_list, UBinv, 1.5);
+    integrator.addEvents(event_Qs, false);
+
+    IntegrationParameters params;
+    params.peakRadius = 0.5;
+    params.backgroundInnerRadius = 0.5;
+    params.backgroundOuterRadius = 0.8;
+    params.regionRadius = 0.5;
+    params.specifySize = true;
+
+    const auto ratio1 = integrator.estimateSignalToNoiseRatio(params, peak_1);
+    const auto ratio2 = integrator.estimateSignalToNoiseRatio(params, peak_2);
+    const auto ratio3 = integrator.estimateSignalToNoiseRatio(params, peak_3);
+
+    TS_ASSERT_DELTA(ratio1, numStrongEvents, 0.0001);
+    TS_ASSERT_DELTA(ratio2, numWeakEvents, 0.0001);
+    TS_ASSERT_DELTA(ratio3, numWeakEvents / 2, 0.0001);
+  }
+
+  void test_estimateSignalToNoiseRatioWithBackground() {
+    V3D peak_1(20, 0, 0);
+    V3D peak_2(0, 20, 0);
+    V3D peak_3(0, 0, 20);
+    std::vector<std::pair<double, V3D>> peak_q_list{
+        {1., peak_1}, {1., peak_2}, {1., peak_3}};
+
+    // synthesize a UB-inverse to map
+    DblMatrix UBinv(3, 3, false); // Q to h,k,l
+    UBinv.setRow(0, V3D(.1, 0, 0));
+    UBinv.setRow(1, V3D(0, .2, 0));
+    UBinv.setRow(2, V3D(0, 0, .25));
+
+    std::vector<std::pair<double, V3D>> event_Qs;
+    const int numStrongEvents = 10000;
+    const int numWeakEvents = 100;
+    generatePeak(event_Qs, peak_1, 0.1, numStrongEvents, 1);   // strong peak
+    generatePeak(event_Qs, peak_2, 0.1, numWeakEvents, 1);     // weak peak
+    generatePeak(event_Qs, peak_3, 0.1, numWeakEvents / 2, 1); // very weak peak
+    generateUniformBackground(event_Qs, 10, -30, 30);
+
+    // Create integraton region + events & UB
+    Integrate3DEvents integrator(peak_q_list, UBinv, 1.5);
+    integrator.addEvents(event_Qs, false);
+
+    IntegrationParameters params;
+    params.peakRadius = 0.5;
+    params.backgroundInnerRadius = 0.5;
+    params.backgroundOuterRadius = 0.8;
+    params.regionRadius = 0.5;
+    params.specifySize = true;
+
+    const auto ratio1 = integrator.estimateSignalToNoiseRatio(params, peak_1);
+    const auto ratio2 = integrator.estimateSignalToNoiseRatio(params, peak_2);
+    const auto ratio3 = integrator.estimateSignalToNoiseRatio(params, peak_3);
+
+    TS_ASSERT_DELTA(ratio1, 171.90, 0.05);
+    TS_ASSERT_DELTA(ratio2, 1.2632, 0.05);
+    TS_ASSERT_DELTA(ratio3, 0.1824, 0.05);
+  }
+
+  /** Generate a symmetric Gaussian peak
+    *
+    * @param event_Qs :: vector of event Qs
+    * @param center :: location of the center of the peak
+    * @param sigma :: standard deviation of the peak
+    * @param numSamples :: number of samples to draw
+    * @param seed :: the seed to the pseudo-random number generator
+    */
+  void generatePeak(std::vector<std::pair<double, V3D>> &event_Qs, V3D center,
+                    double sigma = 5, size_t numSamples = 1000, int seed = 1) {
+
+    std::mt19937 gen;
+    std::normal_distribution<> d(0, sigma);
+    gen.seed(seed);
+
+    for (size_t i = 0; i < numSamples; ++i) {
+      V3D offset(d(gen), d(gen), d(gen));
+      event_Qs.push_back(std::make_pair(1., center + offset));
+    }
+  }
+
+  /** Generate a uniform background
+   *
+   * @param event_Qs :: vector of event Qs
+   * @param countsPerQ :: average value for the flat background
+   * @param lower :: the smallest extent of Q space in all directions
+   * @param upper :: the largest extent of Q space in all directions
+   * @param countVariation :: how much the average background can vary by
+   * @param seed :: the random seed to use (default 1)
+   */
+  void generateUniformBackground(std::vector<std::pair<double, V3D>> &event_Qs,
+                                 size_t countsPerQ, const double lower,
+                                 const double upper,
+                                 const int countVariation = 3,
+                                 const double step = 0.5, int seed = 1) {
+    const auto counts = static_cast<double>(countsPerQ);
+    std::mt19937 gen;
+    std::uniform_real_distribution<> d(-countVariation, countVariation);
+    gen.seed(seed);
+
+    for (double i = lower; i < upper; i += step) {
+      for (double j = lower; j < upper; j += step) {
+        for (double k = lower; k < upper; k += step) {
+          event_Qs.emplace_back(counts + d(gen), V3D(i, j, k));
+        }
+      }
+    }
+  }
 };
 
 #endif /* MANTID_MDEVENTS_INTEGRATE_3D_EVENTS_TEST_H_ */
diff --git a/Framework/MDAlgorithms/test/IntegrateEllipsoidsTwoStepTest.h b/Framework/MDAlgorithms/test/IntegrateEllipsoidsTwoStepTest.h
new file mode 100644
index 0000000000000000000000000000000000000000..de8839a154e1dc37388cdc8587afa652b7636bd0
--- /dev/null
+++ b/Framework/MDAlgorithms/test/IntegrateEllipsoidsTwoStepTest.h
@@ -0,0 +1,410 @@
+#include "MantidMDAlgorithms/IntegrateEllipsoidsTwoStep.h"
+
+#include "MantidAPI/Axis.h"
+#include "MantidAPI/Run.h"
+#include "MantidAPI/Sample.h"
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidKernel/NearestNeighbours.h"
+#include "MantidDataObjects/PeaksWorkspace.h"
+#include "MantidDataObjects/EventWorkspace.h"
+#include "MantidGeometry/Crystal/OrientedLattice.h"
+#include "MantidKernel/V3D.h"
+#include "MantidTestHelpers/ComponentCreationHelper.h"
+#include "MantidTestHelpers/SingleCrystalDiffractionTestHelper.h"
+
+#include <cxxtest/TestSuite.h>
+#include <tuple>
+#include <random>
+
+using namespace Mantid;
+using namespace Mantid::MDAlgorithms;
+using namespace Mantid::DataObjects;
+using Mantid::Kernel::V3D;
+using Mantid::Geometry::OrientedLattice;
+using namespace Mantid::SingleCrystalDiffractionTestHelper;
+
+class IntegrateEllipsoidsTwoStepTest : public CxxTest::TestSuite {
+
+public:
+  void test_init() {
+    Mantid::MDAlgorithms::IntegrateEllipsoidsTwoStep alg;
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+  }
+
+  void test_exec_events_with_no_background() {
+    const int numEventsPerPeak = 10000;
+    // Very tight distribution with events happening at a single point
+    const auto sigmas = std::make_tuple(.002, .002, 0.1);
+
+    // Build some diffraction data
+    WorkspaceBuilder builder;
+    builder.setNumPixels(100);
+    builder.addBackground(false);
+    builder.addPeakByHKL(V3D(1, -5, -3), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -4), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -3, -5), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -2), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, 0), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(2, -3, -4), numEventsPerPeak, sigmas);
+
+    auto data = builder.build();
+    auto eventWS = std::get<0>(data);
+    auto peaksWS = std::get<1>(data);
+
+    // Run algorithm
+    IntegrateEllipsoidsTwoStep alg;
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    alg.setProperty("InputWorkspace", eventWS);
+    alg.setProperty("PeaksWorkspace", peaksWS);
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("SpecifySize", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("PeakSize", 0.35));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundInnerSize", 0.35));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundOuterSize", 0.4));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("WeakPeakThreshold", 0.1));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("IntegrateIfOnEdge", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("OutputWorkspace", "dummy"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+
+    // Check output
+    TS_ASSERT(alg.isExecuted());
+    PeaksWorkspace_sptr integratedPeaksWS = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(integratedPeaksWS);
+
+    TSM_ASSERT_EQUALS("Wrong number of peaks in output workspace",
+                      integratedPeaksWS->getNumberPeaks(),
+                      peaksWS->getNumberPeaks());
+    const auto &run = integratedPeaksWS->mutableRun();
+    TSM_ASSERT("Output workspace must be integrated",
+               run.hasProperty("PeaksIntegrated"));
+    TSM_ASSERT_EQUALS("Output workspace must be integrated",
+                      run.getProperty("PeaksIntegrated")->value(), "1");
+
+    for (int i = 0; i < 5; ++i) {
+      TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(i),
+                       integratedPeaksWS->getPeak(i).getIntensity(),
+                       numEventsPerPeak, 5);
+    }
+  }
+
+  void test_exec_histogram_with_no_background() {
+    const int numEventsPerPeak = 10000;
+    const auto sigmas = std::make_tuple(.002, .002, 0.01);
+    const std::vector<double> rebinParams = {800, 5, 10000};
+
+    WorkspaceBuilder builder;
+    builder.setNumPixels(100);
+    builder.addBackground(false);
+    builder.outputAsHistogram(true);
+    builder.setRebinParameters(rebinParams);
+
+    builder.addPeakByHKL(V3D(1, -5, -3), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -4), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -3, -5), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -2), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, 0), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(2, -3, -4), numEventsPerPeak, sigmas);
+
+    auto data = builder.build();
+    auto histoWS = std::get<0>(data);
+    auto peaksWS = std::get<1>(data);
+
+    IntegrateEllipsoidsTwoStep alg;
+    alg.setChild(true);
+    alg.setRethrows(true);
+    alg.initialize();
+    alg.setProperty("InputWorkspace", histoWS);
+    alg.setProperty("PeaksWorkspace", peaksWS);
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("SpecifySize", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("PeakSize", .5));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundInnerSize", .5));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundOuterSize", .6));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("WeakPeakThreshold", 0.1));
+    alg.setPropertyValue("OutputWorkspace", "dummy");
+    alg.execute();
+    PeaksWorkspace_sptr integratedPeaksWS = alg.getProperty("OutputWorkspace");
+    TSM_ASSERT_EQUALS("Wrong number of peaks in output workspace",
+                      integratedPeaksWS->getNumberPeaks(),
+                      peaksWS->getNumberPeaks());
+
+    for (int i = 0; i < 5; ++i) {
+      TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(i),
+                       integratedPeaksWS->getPeak(i).getIntensity(),
+                       numEventsPerPeak, 5);
+    }
+  }
+
+  void test_exec_events_with_background() {
+    const int numEventsPerPeak = 10000;
+
+    // Very tight distribution with events happening at a single point
+    const auto sigmas = std::make_tuple(.002, .002, 0.1);
+    const auto backgroundDetSize = 0.05;
+    const auto backgroundTOFSize = 100.0;
+    const auto nBackgroundEvents = 1000;
+
+    // Build some diffraction data
+    WorkspaceBuilder builder;
+    builder.setNumPixels(100);
+    builder.addBackground(true);
+    builder.setBackgroundParameters(nBackgroundEvents, backgroundDetSize,
+                                    backgroundTOFSize);
+
+    builder.addPeakByHKL(V3D(1, -5, -3), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -4), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -3, -5), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -2), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, 0), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(2, -3, -4), numEventsPerPeak, sigmas);
+
+    auto data = builder.build();
+    auto eventWS = std::get<0>(data);
+    auto peaksWS = std::get<1>(data);
+
+    // Run algorithm
+    IntegrateEllipsoidsTwoStep alg;
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    alg.setProperty("InputWorkspace", eventWS);
+    alg.setProperty("PeaksWorkspace", peaksWS);
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("SpecifySize", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("PeakSize", 0.35));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundInnerSize", 0.35));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundOuterSize", 0.4));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("WeakPeakThreshold", 0.1));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("IntegrateIfOnEdge", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("OutputWorkspace", "dummy"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+
+    // Check output
+    TS_ASSERT(alg.isExecuted());
+    PeaksWorkspace_sptr integratedPeaksWS = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(integratedPeaksWS);
+
+    TSM_ASSERT_EQUALS("Wrong number of peaks in output workspace",
+                      integratedPeaksWS->getNumberPeaks(),
+                      peaksWS->getNumberPeaks());
+    const auto &run = integratedPeaksWS->mutableRun();
+    TSM_ASSERT("Output workspace must be integrated",
+               run.hasProperty("PeaksIntegrated"));
+    TSM_ASSERT_EQUALS("Output workspace must be integrated",
+                      run.getProperty("PeaksIntegrated")->value(), "1");
+
+    for (int i = 0; i < 5; ++i) {
+      TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(i),
+                       integratedPeaksWS->getPeak(i).getIntensity(),
+                       numEventsPerPeak, 450);
+    }
+  }
+
+  void test_exec_histogram_with_background() {
+    const int numEventsPerPeak = 10000;
+    const auto sigmas = std::make_tuple(.002, .002, 0.01);
+    const std::vector<double> rebinParams = {800, 5, 10000};
+    const auto backgroundDetSize = 0.05;
+    const auto backgroundTOFSize = 100.0;
+    const auto nBackgroundEvents = 1000;
+
+    WorkspaceBuilder builder;
+    builder.setNumPixels(100);
+    builder.addBackground(true);
+    builder.setBackgroundParameters(nBackgroundEvents, backgroundDetSize,
+                                    backgroundTOFSize);
+    builder.outputAsHistogram(true);
+    builder.setRebinParameters(rebinParams);
+
+    builder.addPeakByHKL(V3D(1, -5, -3), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -4), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -3, -5), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -2), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, 0), numEventsPerPeak, sigmas);
+    builder.addPeakByHKL(V3D(2, -3, -4), numEventsPerPeak, sigmas);
+
+    auto data = builder.build();
+    auto histoWS = std::get<0>(data);
+    auto peaksWS = std::get<1>(data);
+
+    IntegrateEllipsoidsTwoStep alg;
+    alg.setChild(true);
+    alg.setRethrows(true);
+    alg.initialize();
+    alg.setProperty("InputWorkspace", histoWS);
+    alg.setProperty("PeaksWorkspace", peaksWS);
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("SpecifySize", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("PeakSize", .5));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundInnerSize", .5));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundOuterSize", .6));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("WeakPeakThreshold", 0.1));
+    alg.setPropertyValue("OutputWorkspace", "dummy");
+    alg.execute();
+    PeaksWorkspace_sptr integratedPeaksWS = alg.getProperty("OutputWorkspace");
+    TSM_ASSERT_EQUALS("Wrong number of peaks in output workspace",
+                      integratedPeaksWS->getNumberPeaks(),
+                      peaksWS->getNumberPeaks());
+
+    for (int i = 0; i < 5; ++i) {
+      TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(i),
+                       integratedPeaksWS->getPeak(i).getIntensity(),
+                       numEventsPerPeak, 700);
+    }
+  }
+
+  void test_exec_events_with_weak_peaks() {
+    const int numEventsPerStrongPeak = 10000;
+    const int numEventsPerWeakPeak = 100;
+
+    // Very tight distribution with events happening at a single point
+    const auto sigmas = std::make_tuple(.002, .002, 0.1);
+    const auto backgroundDetSize = 0.05;
+    const auto backgroundTOFSize = 100.0;
+    const auto nBackgroundEvents = 1000;
+
+    // Build some diffraction data
+    WorkspaceBuilder builder;
+    builder.setNumPixels(100);
+    builder.addBackground(true);
+    builder.setBackgroundParameters(nBackgroundEvents, backgroundDetSize,
+                                    backgroundTOFSize);
+
+    builder.addPeakByHKL(V3D(1, -5, -3), numEventsPerStrongPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -4), numEventsPerStrongPeak, sigmas);
+
+    builder.addPeakByHKL(V3D(1, -3, -5), numEventsPerWeakPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -2), numEventsPerWeakPeak, sigmas);
+
+    builder.addPeakByHKL(V3D(1, -4, 0), numEventsPerStrongPeak, sigmas);
+    builder.addPeakByHKL(V3D(2, -3, -4), numEventsPerStrongPeak, sigmas);
+
+    auto data = builder.build();
+    auto eventWS = std::get<0>(data);
+    auto peaksWS = std::get<1>(data);
+
+    // Run algorithm
+    IntegrateEllipsoidsTwoStep alg;
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    alg.setProperty("InputWorkspace", eventWS);
+    alg.setProperty("PeaksWorkspace", peaksWS);
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("SpecifySize", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("PeakSize", 0.35));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundInnerSize", 0.35));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundOuterSize", 0.4));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("WeakPeakThreshold", 5.0));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("IntegrateIfOnEdge", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("OutputWorkspace", "dummy"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+
+    // Check output
+    TS_ASSERT(alg.isExecuted());
+    PeaksWorkspace_sptr integratedPeaksWS = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(integratedPeaksWS);
+
+    TSM_ASSERT_EQUALS("Wrong number of peaks in output workspace",
+                      integratedPeaksWS->getNumberPeaks(),
+                      peaksWS->getNumberPeaks());
+    const auto &run = integratedPeaksWS->mutableRun();
+    TSM_ASSERT("Output workspace must be integrated",
+               run.hasProperty("PeaksIntegrated"));
+    TSM_ASSERT_EQUALS("Output workspace must be integrated",
+                      run.getProperty("PeaksIntegrated")->value(), "1");
+
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(0),
+                     integratedPeaksWS->getPeak(0).getIntensity(),
+                     numEventsPerStrongPeak, 300);
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(1),
+                     integratedPeaksWS->getPeak(1).getIntensity(),
+                     numEventsPerStrongPeak, 300);
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(2),
+                     integratedPeaksWS->getPeak(2).getIntensity(),
+                     numEventsPerWeakPeak, 100);
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(3),
+                     integratedPeaksWS->getPeak(3).getIntensity(),
+                     numEventsPerWeakPeak, 100);
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(4),
+                     integratedPeaksWS->getPeak(4).getIntensity(),
+                     numEventsPerStrongPeak, 450);
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(5),
+                     integratedPeaksWS->getPeak(5).getIntensity(),
+                     numEventsPerStrongPeak, 800);
+  }
+
+  void test_exec_events_with_adaptive_q() {
+    const int numEventsPerStrongPeak = 10000;
+    const int numEventsPerWeakPeak = 100;
+
+    // Very tight distribution with events happening at a single point
+    const auto sigmas = std::make_tuple(.002, .002, 0.1);
+    const auto backgroundDetSize = 0.05;
+    const auto backgroundTOFSize = 100.0;
+    const auto nBackgroundEvents = 1000;
+
+    // Build some diffraction data
+    WorkspaceBuilder builder;
+    builder.setNumPixels(100);
+    builder.addBackground(true);
+    builder.setBackgroundParameters(nBackgroundEvents, backgroundDetSize,
+                                    backgroundTOFSize);
+
+    builder.addPeakByHKL(V3D(1, -5, -3), numEventsPerStrongPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -4), numEventsPerStrongPeak, sigmas);
+    builder.addPeakByHKL(V3D(2, -3, -4), numEventsPerStrongPeak, sigmas);
+
+    builder.addPeakByHKL(V3D(1, -3, -5), numEventsPerWeakPeak, sigmas);
+    builder.addPeakByHKL(V3D(1, -4, -2), numEventsPerWeakPeak, sigmas);
+
+    auto data = builder.build();
+    auto eventWS = std::get<0>(data);
+    auto peaksWS = std::get<1>(data);
+
+    // Run algorithm
+    IntegrateEllipsoidsTwoStep alg;
+    alg.setChild(true);
+    alg.setRethrows(true);
+    TS_ASSERT_THROWS_NOTHING(alg.initialize());
+    alg.setProperty("InputWorkspace", eventWS);
+    alg.setProperty("PeaksWorkspace", peaksWS);
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("SpecifySize", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("PeakSize", 0.35));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundInnerSize", 0.35));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("BackgroundOuterSize", 0.4));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("WeakPeakThreshold", 100.0));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("IntegrateIfOnEdge", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("AdaptiveQBackground", true));
+    TS_ASSERT_THROWS_NOTHING(alg.setProperty("AdaptiveQMultiplier", 0.01));
+    TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("OutputWorkspace", "dummy"));
+    TS_ASSERT_THROWS_NOTHING(alg.execute());
+
+    // Check output
+    TS_ASSERT(alg.isExecuted());
+    PeaksWorkspace_sptr integratedPeaksWS = alg.getProperty("OutputWorkspace");
+    TS_ASSERT(integratedPeaksWS);
+
+    TSM_ASSERT_EQUALS("Wrong number of peaks in output workspace",
+                      integratedPeaksWS->getNumberPeaks(),
+                      peaksWS->getNumberPeaks());
+    const auto &run = integratedPeaksWS->mutableRun();
+    TSM_ASSERT("Output workspace must be integrated",
+               run.hasProperty("PeaksIntegrated"));
+    TSM_ASSERT_EQUALS("Output workspace must be integrated",
+                      run.getProperty("PeaksIntegrated")->value(), "1");
+
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(0),
+                     integratedPeaksWS->getPeak(0).getIntensity(),
+                     numEventsPerStrongPeak, 150);
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(1),
+                     integratedPeaksWS->getPeak(1).getIntensity(),
+                     numEventsPerStrongPeak, 150);
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(2),
+                     integratedPeaksWS->getPeak(2).getIntensity(),
+                     numEventsPerStrongPeak, 900);
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(3),
+                     integratedPeaksWS->getPeak(3).getIntensity(),
+                     numEventsPerWeakPeak, 300);
+    TSM_ASSERT_DELTA("Wrong intensity for peak " + std::to_string(4),
+                     integratedPeaksWS->getPeak(4).getIntensity(),
+                     numEventsPerWeakPeak, 300);
+  }
+};
diff --git a/Framework/PythonInterface/inc/MantidPythonInterface/api/FitFunctions/IFunctionAdapter.h b/Framework/PythonInterface/inc/MantidPythonInterface/api/FitFunctions/IFunctionAdapter.h
index 170c3334896d3cda61b1354d26252c26be2645f9..5a180e64328ed7d3a7ba1bb4873efd39ad6fbfc9 100644
--- a/Framework/PythonInterface/inc/MantidPythonInterface/api/FitFunctions/IFunctionAdapter.h
+++ b/Framework/PythonInterface/inc/MantidPythonInterface/api/FitFunctions/IFunctionAdapter.h
@@ -56,12 +56,14 @@ public:
   /// Returns the attribute's value as a Python object
   static PyObject *getAttributeValue(IFunction &self,
                                      const API::IFunction::Attribute &attr);
+  /// Set the attribute's value
+  static void setAttributePythonValue(IFunction &self, const std::string &name,
+                                      const boost::python::object &value);
   /// Called by the framework when an attribute has been set
   void setAttribute(const std::string &attName,
                     const API::IFunction::Attribute &attr) override;
-  /// Store the attribute's value in the default IFunction's cache
-  void storeAttributePythonValue(const std::string &name,
-                                 const boost::python::object &value);
+  /// Split this function (if needed) into a list of independent functions
+  static boost::python::object createPythonEquivalentFunctions(IFunction &self);
 
   // Each overload of declareParameter requires a different name as we
   // can't use a function pointer with a virtual base class
diff --git a/Framework/PythonInterface/inc/MantidPythonInterface/api/PythonAlgorithm/AlgorithmAdapter.h b/Framework/PythonInterface/inc/MantidPythonInterface/api/PythonAlgorithm/AlgorithmAdapter.h
index 32c0f58fa80185cb77f9ab509bded35360cac58c..54c2f6efa879e5d089691408fd1706a4e6e5d92c 100644
--- a/Framework/PythonInterface/inc/MantidPythonInterface/api/PythonAlgorithm/AlgorithmAdapter.h
+++ b/Framework/PythonInterface/inc/MantidPythonInterface/api/PythonAlgorithm/AlgorithmAdapter.h
@@ -59,6 +59,8 @@ public:
   const std::string summary() const override;
   /// Returns a category of the algorithm.
   const std::string category() const override;
+  /// Returns optional documentation URL of the algorithm
+  const std::string helpURL() const override;
   /// Allow the isRunning method to be overridden
   bool isRunning() const override;
   /// Allow the cancel method to be overridden
diff --git a/Framework/PythonInterface/mantid/api/src/Exports/IAlgorithm.cpp b/Framework/PythonInterface/mantid/api/src/Exports/IAlgorithm.cpp
index cc92119a4c67aa89fc64093d332f1f88962e56f5..889e326b37407529948cd91e19f6da1a88b608af 100644
--- a/Framework/PythonInterface/mantid/api/src/Exports/IAlgorithm.cpp
+++ b/Framework/PythonInterface/mantid/api/src/Exports/IAlgorithm.cpp
@@ -346,6 +346,8 @@ void export_ialgorithm() {
            "Returns the list of categories this algorithm belongs to")
       .def("summary", &IAlgorithm::summary, arg("self"),
            "Returns a summary message describing the algorithm")
+      .def("helpURL", &IAlgorithm::helpURL, arg("self"),
+           "Returns optional URL for algorithm documentation")
       .def("workspaceMethodName", &IAlgorithm::workspaceMethodName, arg("self"),
            "Returns a name that will be used when attached as a workspace "
            "method. Empty string indicates do not attach")
diff --git a/Framework/PythonInterface/mantid/api/src/Exports/IFunction.cpp b/Framework/PythonInterface/mantid/api/src/Exports/IFunction.cpp
index f592ed556176c3a30e8e470dbb89ad2aa622f295..dec138bd642ca0805d275c34a1d33a9cb70bf91c 100644
--- a/Framework/PythonInterface/mantid/api/src/Exports/IFunction.cpp
+++ b/Framework/PythonInterface/mantid/api/src/Exports/IFunction.cpp
@@ -49,6 +49,13 @@ typedef void (IFunction::*setParameterType2)(const std::string &,
                                              const double &value, bool);
 BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(setParameterType2_Overloads,
                                        setParameter, 2, 3)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(tie_Overloads, tie, 2, 3)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(addConstraints_Overloads, addConstraints,
+                                       1, 2)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(fixParameter_Overloads, fixParameter, 1,
+                                       2)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(fix_Overloads, fix, 1, 2)
+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(fixAll_Overloads, fixAll, 0, 1)
 #ifdef __clang__
 #pragma clang diagnostic pop
 #endif
@@ -122,9 +129,9 @@ void export_IFunction() {
            (arg("self"), arg("name")),
            "Return the value of the named attribute")
 
-      .def("storeAttributeValue", &IFunctionAdapter::storeAttributePythonValue,
+      .def("setAttributeValue", &IFunctionAdapter::setAttributePythonValue,
            (arg("self"), arg("name"), arg("value")),
-           "Store an attribute value in the default cache")
+           "Set a value of a named attribute")
 
       .def("declareParameter", &IFunctionAdapter::declareFitParameter,
            (arg("self"), arg("name"), arg("init_value"), arg("description")),
@@ -139,6 +146,59 @@ void export_IFunction() {
            (arg("self"), arg("name")),
            "Declare a fitting parameter settings its default value to 0.0")
 
+      .def("fixParameter", &IFunction::fix,
+           fix_Overloads((arg("self"), arg("i"), arg("isDefault")),
+                         "Fix the ith parameter"))
+
+      .def("fixParameter", &IFunction::fixParameter,
+           fixParameter_Overloads((arg("self"), arg("name"), arg("isDefault")),
+                                  "Fix the named parameter"))
+
+      .def("freeParameter", &IFunction::unfix, (arg("self"), arg("i")),
+           "Free the ith parameter")
+
+      .def("freeParameter", &IFunction::unfixParameter,
+           (arg("self"), arg("name")), "Free the named parameter")
+
+      .def("isFixed", &IFunction::isFixed, (arg("self"), arg("i")),
+           "Return whether the ith parameter is fixed or tied")
+
+      .def("fixAll", &IFunction::fixAll,
+           fixAll_Overloads((arg("self"), arg("isDefault")),
+                            "Fix all parameters"))
+
+      .def("freeAll", &IFunction::unfixAll, (arg("self")),
+           "Free all parameters")
+
+      .def("tie", &IFunction::tie,
+           tie_Overloads(
+               (arg("self"), arg("name"), arg("expr"), arg("isDefault")),
+               "Tie a named parameter to an expression"))
+
+      .def("removeTie", (bool (IFunction::*)(size_t)) & IFunction::removeTie,
+           (arg("self"), arg("i")), "Remove the tie of the ith parameter")
+
+      .def("removeTie",
+           (void (IFunction::*)(const std::string &)) & IFunction::removeTie,
+           (arg("self"), arg("name")), "Remove the tie of the named parameter")
+
+      .def("addConstraints", &IFunction::addConstraints,
+           addConstraints_Overloads(
+               (arg("self"), arg("constraints"), arg("isDefault")),
+               "Constrain named parameters"))
+
+      .def("removeConstraint", &IFunction::removeConstraint,
+           (arg("self"), arg("name")),
+           "Remove the constraint on the named parameter")
+
+      .def("getNumberDomains", &IFunction::getNumberDomains, (arg("self")),
+           "Get number of domains of a multi-domain function")
+
+      .def("createEquivalentFunctions",
+           &IFunctionAdapter::createPythonEquivalentFunctions, (arg("self")),
+           "Split this function (if needed) into a list of "
+           "independent functions")
+
       //-- Deprecated functions that have the wrong names --
       .def("categories", &getCategories, arg("self"),
            "Returns a list of the categories for an algorithm")
diff --git a/Framework/PythonInterface/mantid/api/src/FitFunctions/IFunctionAdapter.cpp b/Framework/PythonInterface/mantid/api/src/FitFunctions/IFunctionAdapter.cpp
index 508265caed4cf0b14519753be0f68153e3c61fe8..b10aa0db5ca9a52ae2d5f991055748dd12a59176 100644
--- a/Framework/PythonInterface/mantid/api/src/FitFunctions/IFunctionAdapter.cpp
+++ b/Framework/PythonInterface/mantid/api/src/FitFunctions/IFunctionAdapter.cpp
@@ -2,6 +2,7 @@
 #include "MantidPythonInterface/kernel/Environment/CallMethod.h"
 
 #include <boost/python/class.hpp>
+#include <boost/python/list.hpp>
 
 namespace Mantid {
 namespace PythonInterface {
@@ -41,9 +42,17 @@ IFunction::Attribute createAttributeFromPythonValue(const object &value) {
   else if (PyBytes_Check(rawptr) == 1) {
 #endif
     attr = IFunction::Attribute(extract<std::string>(rawptr)());
+  } else if (PyList_Check(rawptr) == 1) {
+    auto n = PyList_Size(rawptr);
+    std::vector<double> vec;
+    for (Py_ssize_t i = 0; i < n; ++i) {
+      auto v = extract<double>(PyList_GetItem(rawptr, i))();
+      vec.push_back(v);
+    }
+    attr = IFunction::Attribute(vec);
   } else
     throw std::invalid_argument(
-        "Invalid attribute type. Allowed types=float,int,str,bool");
+        "Invalid attribute type. Allowed types=float,int,str,bool,list(float)");
 
   return attr;
 }
@@ -125,12 +134,27 @@ IFunctionAdapter::getAttributeValue(IFunction &self,
     result = to_python_value<const std::string &>()(attr.asString());
   else if (type == "bool")
     result = to_python_value<const bool &>()(attr.asBool());
+  else if (type == "std::vector<double>")
+    result = to_python_value<const std::vector<double> &>()(attr.asVector());
   else
     throw std::runtime_error("Unknown attribute type, cannot convert C++ type "
                              "to Python. Contact developement team.");
   return result;
 }
 
+/**
+ * Set the attribute's value in the default IFunction's cache
+ * @param self :: A reference to a function object that has the attribute.
+ * @param name :: The name of the attribute
+ * @param value :: The value to set
+ */
+void IFunctionAdapter::setAttributePythonValue(IFunction &self,
+                                               const std::string &name,
+                                               const object &value) {
+  auto attr = createAttributeFromPythonValue(value);
+  self.setAttribute(name, attr);
+}
+
 /**
  * Calls setAttributeValue on the Python object if it exists otherwise calls the
  * base class method
@@ -148,15 +172,20 @@ void IFunctionAdapter::setAttribute(const std::string &attName,
   }
 }
 
-/**
- * Store the attribute's value in the default IFunction's cache
- * @param name :: The name of the attribute
- * @param value :: The value to store
+/** Split this function (if needed) into a list of independent functions.
+ * @param self :: A reference to a function object. If it's a multi-domain
+ *    function the result should in general contain more than 1 function.
+ *    For a single domain function it should have a single element (self).
+ * @return A python list of IFunction_sprs.
  */
-void IFunctionAdapter::storeAttributePythonValue(const std::string &name,
-                                                 const object &value) {
-  auto attr = createAttributeFromPythonValue(value);
-  storeAttributeValue(name, attr);
+boost::python::object
+IFunctionAdapter::createPythonEquivalentFunctions(IFunction &self) {
+  auto functions = self.createEquivalentFunctions();
+  boost::python::list list;
+  for (auto fun : functions) {
+    list.append(fun);
+  }
+  return list;
 }
 
 /**
diff --git a/Framework/PythonInterface/mantid/api/src/PythonAlgorithm/AlgorithmAdapter.cpp b/Framework/PythonInterface/mantid/api/src/PythonAlgorithm/AlgorithmAdapter.cpp
index 8a110298ee50ea2d56958080a07ca3b397d204f1..515d5c2052a7a5bfdb6caad6bf8e508e99b6dffd 100644
--- a/Framework/PythonInterface/mantid/api/src/PythonAlgorithm/AlgorithmAdapter.cpp
+++ b/Framework/PythonInterface/mantid/api/src/PythonAlgorithm/AlgorithmAdapter.cpp
@@ -103,9 +103,21 @@ const std::string AlgorithmAdapter<BaseAlgorithm>::summary() const {
 }
 
 /**
- * @return True if the algorithm is considered to be running
+ * Optional documentation URL of the algorithm, empty string if not overridden.
  */
 template <typename BaseAlgorithm>
+const std::string AlgorithmAdapter<BaseAlgorithm>::helpURL() const {
+  try {
+    return callMethod<std::string>(getSelf(), "helpURL");
+  } catch (UndefinedAttributeError &) {
+    return std::string();
+  }
+}
+
+/**
+*@return True if the algorithm is considered to be running
+*/
+template <typename BaseAlgorithm>
 bool AlgorithmAdapter<BaseAlgorithm>::isRunning() const {
   if (!m_isRunningObj) {
     return SuperClass::isRunning();
diff --git a/Framework/PythonInterface/plugins/algorithms/AlignAndFocusPowderFromFiles.py b/Framework/PythonInterface/plugins/algorithms/AlignAndFocusPowderFromFiles.py
index 53ec2455bd3c57a2bdc9411fa58e07df00c59b3f..f31cc942d72e2ddff864d4b459d6acccd3c86b3b 100644
--- a/Framework/PythonInterface/plugins/algorithms/AlignAndFocusPowderFromFiles.py
+++ b/Framework/PythonInterface/plugins/algorithms/AlignAndFocusPowderFromFiles.py
@@ -20,6 +20,7 @@ PROPS_FOR_ALIGN = ["CalFileName", "GroupFilename", "GroupingWorkspace",
                    "CropWavelengthMin", "CropWavelengthMax",
                    "LowResSpectrumOffset", "ReductionProperties"]
 PROPS_FOR_ALIGN.extend(PROPS_FOR_INSTR)
+PROPS_FOR_PD_CHARACTER = ['FrequencyLogNames', 'WaveLengthLogNames']
 
 
 def determineChunking(filename, chunkSize):
@@ -78,6 +79,7 @@ class AlignAndFocusPowderFromFiles(DataProcessorAlgorithm):
                              'Characterizations table')
 
         self.copyProperties("AlignAndFocusPowder", PROPS_FOR_ALIGN)
+        self.copyProperties('PDDetermineCharacterizations', PROPS_FOR_PD_CHARACTER)
 
     def _getLinearizedFilenames(self, propertyName):
         runnumbers = self.getProperty(propertyName).value
@@ -101,13 +103,18 @@ class AlignAndFocusPowderFromFiles(DataProcessorAlgorithm):
         tempname = '__%s_temp' % wkspname
         Load(Filename=filename, OutputWorkspace=tempname,
              MetaDataOnly=True)
+
+        # put together argument list
+        args = dict(InputWorkspace=tempname,
+                    ReductionProperties=self.getProperty('ReductionProperties').valueAsStr)
+        for name in PROPS_FOR_PD_CHARACTER:
+            prop = self.getProperty(name)
+            if not prop.isDefault:
+                args[name] = prop.value
         if self.charac is not None:
-            PDDetermineCharacterizations(InputWorkspace=tempname,
-                                         Characterizations=self.charac,
-                                         ReductionProperties=self.getProperty('ReductionProperties').valueAsStr)
-        else:
-            PDDetermineCharacterizations(InputWorkspace=tempname,
-                                         ReductionProperties=self.getProperty('ReductionProperties').valueAsStr)
+            args['Characterizations'] = self.charac
+
+        PDDetermineCharacterizations(**args)
         DeleteWorkspace(Workspace=tempname)
 
     def __getCacheName(self, wkspname):
@@ -223,6 +230,10 @@ class AlignAndFocusPowderFromFiles(DataProcessorAlgorithm):
                 if self.kwargs['PreserveEvents']:
                     CompressEvents(InputWorkspace=finalname, OutputWorkspace=finalname)
 
+        # with more than one chunk or file the integrated proton charge is
+        # generically wrong
+        mtd[finalname].run().integrateProtonCharge()
+
         # set the output workspace
         self.setProperty('OutputWorkspace', mtd[finalname])
 
diff --git a/Framework/PythonInterface/plugins/algorithms/BASISReduction.py b/Framework/PythonInterface/plugins/algorithms/BASISReduction.py
index 3d63236e7d3544a29492961baf300512b7d435d2..28c0eca22ecd0948cc8300fec0a361f1a5767b19 100644
--- a/Framework/PythonInterface/plugins/algorithms/BASISReduction.py
+++ b/Framework/PythonInterface/plugins/algorithms/BASISReduction.py
@@ -379,7 +379,8 @@ class BASISReduction(PythonAlgorithm):
         self._sumRuns(run_set, wsName, wsName_mon, extra_extension)
         self._calibData(wsName, wsName_mon)
         if not self._debugMode:
-            sapi.DeleteWorkspace(wsName_mon)  # delete monitors
+            if not self._noMonNorm:
+                sapi.DeleteWorkspace(wsName_mon)  # delete monitors
         return wsName
 
     def _group_and_SofQW(self, wsName, etRebins, isSample=True):
diff --git a/Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py b/Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py
index 7bc2595d2bb4dffef4f26c57bac8be3e4d3c6c3d..a2a672070c19655a13a053d12d75562353d07425 100644
--- a/Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py
+++ b/Framework/PythonInterface/plugins/algorithms/DeltaPDF3D.py
@@ -34,13 +34,13 @@ class DeltaPDF3D(PythonAlgorithm):
 
         self.declareProperty("RemoveReflections", True, "Remove HKL reflections")
         condition = EnabledWhenProperty("RemoveReflections", PropertyCriterion.IsDefault)
-        self.declareProperty("Shape", "cube", doc="Shape to cut out reflections",
+        self.declareProperty("Shape", "sphere", doc="Shape to cut out reflections",
                              validator=StringListValidator(['sphere', 'cube']))
         self.setPropertySettings("Shape", condition)
         val_min_zero = FloatArrayBoundedValidator()
         val_min_zero.setLower(0.)
         self.declareProperty(FloatArrayProperty("Size", [0.2], validator=val_min_zero),
-                             "Width of cube/diameter of sphere used to remove reflections, in (HKL)")
+                             "Width of cube/diameter of sphere used to remove reflections, in (HKL) (one or three values)")
         self.setPropertySettings("Size", condition)
         self.declareProperty("SpaceGroup", "",
                              doc="Space group for reflection removal, either full name or number. If empty all HKL's will be removed.")
@@ -48,16 +48,22 @@ class DeltaPDF3D(PythonAlgorithm):
 
         self.declareProperty("CropSphere", False, "Limit min/max q values. Can help with edge effects.")
         condition = EnabledWhenProperty("CropSphere", PropertyCriterion.IsNotDefault)
-        self.declareProperty(FloatArrayProperty("SphereMin", [Property.EMPTY_DBL], validator=val_min_zero), "Min Sphere")
+        self.declareProperty(FloatArrayProperty("SphereMin", [Property.EMPTY_DBL], validator=val_min_zero),
+                             "HKL values below which will be removed (one or three values)")
         self.setPropertySettings("SphereMin", condition)
-        self.declareProperty(FloatArrayProperty("SphereMax", [Property.EMPTY_DBL], validator=val_min_zero), "Max Sphere")
+        self.declareProperty(FloatArrayProperty("SphereMax", [Property.EMPTY_DBL], validator=val_min_zero),
+                             "HKL values above which will be removed (one or three values)")
         self.setPropertySettings("SphereMax", condition)
+        self.declareProperty("FillValue", Property.EMPTY_DBL, "Value to replace with outside sphere")
+        self.setPropertySettings("FillValue", condition)
 
         self.declareProperty("Convolution", True, "Apply convolution to fill in removed reflections")
         condition = EnabledWhenProperty("Convolution", PropertyCriterion.IsDefault)
         self.declareProperty("ConvolutionWidth", 2.0, validator=FloatBoundedValidator(0.),
                              doc="Width of gaussian convolution in pixels")
         self.setPropertySettings("ConvolutionWidth", condition)
+        self.declareProperty("Deconvolution", False, "Apply deconvolution after fourier transform")
+        self.setPropertySettings("Deconvolution", condition)
 
         # Reflections
         self.setPropertyGroup("RemoveReflections","Reflection Removal")
@@ -69,10 +75,12 @@ class DeltaPDF3D(PythonAlgorithm):
         self.setPropertyGroup("CropSphere","Cropping to a sphere")
         self.setPropertyGroup("SphereMin","Cropping to a sphere")
         self.setPropertyGroup("SphereMax","Cropping to a sphere")
+        self.setPropertyGroup("FillValue","Cropping to a sphere")
 
         # Convolution
         self.setPropertyGroup("Convolution","Convolution")
         self.setPropertyGroup("ConvolutionWidth","Convolution")
+        self.setPropertyGroup("Deconvolution","Convolution")
 
     def validateInputs(self):
         issues = dict()
@@ -85,10 +93,10 @@ class DeltaPDF3D(PythonAlgorithm):
         if dimX.name != '[H,0,0]' or dimY.name != '[0,K,0]' or dimZ.name != '[0,0,L]':
             issues['InputWorkspace'] = 'dimensions must be [H,0,0], [0,K,0] and [0,0,L]'
 
-        if (dimX.getMaximum() != -dimX.getMinimum() or
-                dimY.getMaximum() != -dimY.getMinimum() or
-                dimZ.getMaximum() != -dimZ.getMinimum()):
-            issues['InputWorkspace'] = 'dimensions must be centered on zero'
+        for d in range(inWS.getNumDims()):
+            dim = inWS.getDimension(d)
+            if not np.isclose(dim.getMaximum(), -dim.getMinimum()):
+                issues['InputWorkspace'] = 'dimensions must be centered on zero'
 
         if self.getProperty("Convolution").value:
             try:
@@ -145,6 +153,10 @@ class DeltaPDF3D(PythonAlgorithm):
         Y=np.linspace(Ymin,Ymax,Ybins+1)
         Z=np.linspace(Zmin,Zmax,Zbins+1)
 
+        X, Y, Z = np.ogrid[(dimX.getX(0)+dimX.getX(1))/2:(dimX.getX(Xbins)+dimX.getX(Xbins-1))/2:Xbins*1j,
+                           (dimY.getX(0)+dimY.getX(1))/2:(dimY.getX(Ybins)+dimY.getX(Ybins-1))/2:Ybins*1j,
+                           (dimZ.getX(0)+dimZ.getX(1))/2:(dimZ.getX(Zbins)+dimZ.getX(Zbins-1))/2:Zbins*1j]
+
         if self.getProperty("RemoveReflections").value:
             progress.report("Removing Reflections")
             size = self.getProperty("Size").value
@@ -173,33 +185,39 @@ class DeltaPDF3D(PythonAlgorithm):
                                        int((k-size[1]-Ymin)/Ywidth+1):int((k+size[1]-Ymin)/Ywidth),
                                        int((l-size[2]-Zmin)/Zwidth+1):int((l+size[2]-Zmin)/Zwidth)]=np.nan
             else:  # sphere
-                Xst = ((X[:-1]+X[1:])/2).reshape((Xbins, 1, 1))
-                Yst = ((Y[:-1]+Y[1:])/2).reshape((1, Ybins, 1))
-                Zst = ((Z[:-1]+Z[1:])/2).reshape((1, 1, Zbins))
+                mask=((X-np.round(X))**2/size[0]**2 + (Y-np.round(Y))**2/size[1]**2 + (Z-np.round(Z))**2/size[2]**2 < 1)
 
-                for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
-                    for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
-                        for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
-                            if not check_space_group or sg.isAllowedReflection([h,k,l]):
-                                signal[(Xst-h)**2/size[0]**2 + (Yst-k)**2/size[1]**2 + (Zst-l)**2/size[2]**2 < 1]=np.nan
+                # Unmask invalid reflections
+                if check_space_group:
+                    for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
+                        for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
+                            for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
+                                if not sg.isAllowedReflection([h,k,l]):
+                                    mask[int((h-0.5-Xmin)/Xwidth+1):int((h+0.5-Xmin)/Xwidth),
+                                         int((k-0.5-Ymin)/Ywidth+1):int((k+0.5-Ymin)/Ywidth),
+                                         int((l-0.5-Zmin)/Zwidth+1):int((l+0.5-Zmin)/Zwidth)]=False
+
+                signal[mask]=np.nan
 
         if self.getProperty("CropSphere").value:
             progress.report("Cropping to sphere")
             sphereMin = self.getProperty("SphereMin").value
 
-            Xs, Ys, Zs = np.mgrid[(X[0]+X[1])/2:(X[-1]+X[-2])/2:Xbins*1j,
-                                  (Y[0]+Y[1])/2:(Y[-1]+Y[-2])/2:Ybins*1j,
-                                  (Z[0]+Z[1])/2:(Z[-1]+Z[-2])/2:Zbins*1j]
-
             if sphereMin[0] < Property.EMPTY_DBL:
                 if len(sphereMin)==1:
                     sphereMin = np.repeat(sphereMin, 3)
-                signal[Xs**2/sphereMin[0]**2 + Ys**2/sphereMin[1]**2 + Zs**2/sphereMin[2]**2 < 1]=np.nan
+                signal[X**2/sphereMin[0]**2 + Y**2/sphereMin[1]**2 + Z**2/sphereMin[2]**2 < 1]=np.nan
+
             sphereMax = self.getProperty("SphereMax").value
+
             if sphereMax[0] < Property.EMPTY_DBL:
                 if len(sphereMax)==1:
                     sphereMax = np.repeat(sphereMax, 3)
-                signal[Xs**2/sphereMax[0]**2 + Ys**2/sphereMax[1]**2 + Zs**2/sphereMax[2]**2 > 1]=np.nan
+                if self.getProperty("FillValue").value == Property.EMPTY_DBL:
+                    fill_value = np.nan
+                else:
+                    fill_value = self.getProperty("FillValue").value
+                signal[X**2/sphereMax[0]**2 + Y**2/sphereMax[1]**2 + Z**2/sphereMax[2]**2 > 1]=fill_value
 
         if self.getProperty("Convolution").value:
             progress.report("Convoluting signal")
@@ -220,11 +238,15 @@ class DeltaPDF3D(PythonAlgorithm):
         signal[np.isnan(signal)]=0
         signal[np.isinf(signal)]=0
 
-        signal=np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(signal))).real
+        signal=np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(signal)))
         number_of_bins = signal.shape
 
+        # Do deconvolution
+        if self.getProperty("Convolution").value and self.getProperty("Deconvolution").value:
+            signal /= self._deconvolution(np.array(signal.shape))
+
         # CreateMDHistoWorkspace expects Fortan `column-major` ordering
-        signal = signal.flatten('F')
+        signal = signal.real.flatten('F')
 
         createWS_alg = self.createChildAlgorithm("CreateMDHistoWorkspace", enableLogging=False)
         createWS_alg.setProperty("SignalInput", signal)
@@ -247,7 +269,7 @@ class DeltaPDF3D(PythonAlgorithm):
 
     def _convolution(self, signal):
         from astropy.convolution import convolve, convolve_fft, Gaussian1DKernel
-        G1D = Gaussian1DKernel(2).array
+        G1D = Gaussian1DKernel(self.getProperty("ConvolutionWidth").value).array
         G3D = G1D * G1D.reshape((-1,1)) * G1D.reshape((-1,1,1))
         try:
             logger.debug('Trying astropy.convolution.convolve_fft for convolution')
@@ -256,6 +278,18 @@ class DeltaPDF3D(PythonAlgorithm):
             logger.debug('Using astropy.convolution.convolve for convolution')
             return convolve(signal, G3D)
 
+    def _deconvolution(self, shape):
+        from astropy.convolution import Gaussian1DKernel
+        G1D = Gaussian1DKernel(self.getProperty("ConvolutionWidth").value).array
+        G3D = G1D * G1D.reshape((-1,1)) * G1D.reshape((-1,1,1))
+        G3D_shape = np.array(G3D.shape)
+        G3D = np.pad(G3D,pad_width=np.array([np.maximum(np.floor((shape-G3D_shape)/2),np.zeros(len(shape))),
+                                             np.maximum(np.ceil((shape-G3D_shape)/2),np.zeros(len(shape)))],
+                                            dtype=np.int).transpose(),mode='constant')
+        deconv = np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(G3D)))
+        iarr = (deconv.shape-shape)//2
+        return deconv[iarr[0]:shape[0]+iarr[0],iarr[1]:shape[1]+iarr[1],iarr[2]:shape[2]+iarr[2]]
+
     def _calc_new_extents(self, inWS):
         # Calculate new extents for fft space
         extents=''
diff --git a/Framework/PythonInterface/plugins/algorithms/LoadCIF.py b/Framework/PythonInterface/plugins/algorithms/LoadCIF.py
index 6854025decbe3fb80bd96caf2d3fa87e641c2002..15ad464ddf2adeba70425b0631ade117d9f52f00 100644
--- a/Framework/PythonInterface/plugins/algorithms/LoadCIF.py
+++ b/Framework/PythonInterface/plugins/algorithms/LoadCIF.py
@@ -76,8 +76,11 @@ class SpaceGroupBuilder(object):
 
     def _getCleanSpaceGroupSymbol(self, rawSpaceGroupSymbol):
         # Remove :1 and :H from the symbol. Those are not required at the moment because they are the default.
-        removalRe = re.compile(':[1H]', re.IGNORECASE)
-        return re.sub(removalRe, '', rawSpaceGroupSymbol).strip()
+        # Also substitute 'R' and 'Z' endings used by ICSD to indicate alternative origin choice or settings
+        mappings = {':[1Hh]':'', ' S$':'', ' H$':'', ' Z$':' :2', ' R$':' :r'}
+        for k, v in mappings.items():
+            rawSpaceGroupSymbol = re.sub(k, v, rawSpaceGroupSymbol)
+        return rawSpaceGroupSymbol.strip()
 
     def _getSpaceGroupFromNumber(self, cifData):
         spaceGroupNumber = [int(cifData[x]) for x in
diff --git a/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py b/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py
index 3e0d662c0bab2582ab528674e39ad3dcd7c9713c..9aa40f7e2982ea51066690b06bb991bf0b66b99d 100644
--- a/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py
+++ b/Framework/PythonInterface/plugins/algorithms/LoadDNSLegacy.py
@@ -1,6 +1,7 @@
 from __future__ import (absolute_import, division, print_function)
 import mantid.simpleapi as api
 import numpy as np
+from scipy.constants import m_n, h
 import os
 import sys
 from mantid.api import PythonAlgorithm, AlgorithmFactory, WorkspaceProperty, \
@@ -22,6 +23,7 @@ class LoadDNSLegacy(PythonAlgorithm):
         """
         PythonAlgorithm.__init__(self)
         self.tolerance = 1e-2
+        self.instrument = None
 
     def category(self):
         """
@@ -44,7 +46,7 @@ class LoadDNSLegacy(PythonAlgorithm):
                              "Name of DNS experimental data file.")
 
         self.declareProperty(FileProperty("CoilCurrentsTable", "",
-                                          FileAction.Load, ['.txt']),
+                                          FileAction.OptionalLoad, ['.txt']),
                              "Name of file containing table of coil currents and polarisations.")
 
         self.declareProperty(WorkspaceProperty("OutputWorkspace",
@@ -57,12 +59,23 @@ class LoadDNSLegacy(PythonAlgorithm):
 
     def get_polarisation_table(self):
         # load polarisation table
+        poltable = []
         poltable_name = self.getPropertyValue("CoilCurrentsTable")
+        if not poltable_name:
+            # read the table from IDF
+            for p in ['x', 'y', 'z']:
+                currents = self.instrument.getStringParameter("{}_currents".format(p))[0].split(';')
+                for cur in currents:
+                    row = {'polarisation': p, 'comment': '7'}
+                    row['C_a'], row['C_b'], row['C_c'], row['C_z'] = [float(c) for c in cur.split(',')]
+                    poltable.append(row)
+            self.log().debug("Loaded polarisation table:\n" + str(poltable))
+            return poltable
         try:
-            currents = np.genfromtxt(poltable_name, names=True, dtype=None)
+            currents = np.genfromtxt(poltable_name, names=True, dtype='U2,U2,f8,f8,f8,f8')
+            self.log().debug("Coil currents are: " + str(currents))
         except ValueError as err:
             raise RuntimeError("Invalid coil currents table: " + str(err))
-        poltable = []
         colnames = currents.dtype.names
         poltable = [dict(list(zip(colnames, cur))) for cur in currents]
         self.log().debug("Loaded polarisation table:\n" + str(poltable))
@@ -98,6 +111,8 @@ class LoadDNSLegacy(PythonAlgorithm):
             message = "File " + filename + " does not contain any data!"
             self.log().error(message)
             raise RuntimeError(message)
+        # sample logs
+        logs = {"names": [], "values": [], "units": []}
 
         # load run information
         metadata = DNSdata()
@@ -108,6 +123,10 @@ class LoadDNSLegacy(PythonAlgorithm):
             self.log().error(message)
             raise RuntimeError(message)
 
+        tmp = api.LoadEmptyInstrument(InstrumentName='DNS')
+        self.instrument = tmp.getInstrument()
+        api.DeleteWorkspace(tmp)
+
         # load polarisation table and determine polarisation
         poltable = self.get_polarisation_table()
         pol = self.get_polarisation(metadata, poltable)
@@ -116,10 +135,48 @@ class LoadDNSLegacy(PythonAlgorithm):
             self.log().warning("Failed to determine polarisation for " + filename +
                                ". Values have been set to undefined.")
         ndet = 24
-        # this needed to be able to use ConvertToMD
-        dataX = np.zeros(2*ndet)
-        dataX.fill(metadata.wavelength + 0.00001)
-        dataX[::2] -= 0.000002
+        unitX="Wavelength"
+        if metadata.tof_channel_number < 2:
+            dataX = np.zeros(2*ndet)
+            dataX.fill(metadata.wavelength + 0.00001)
+            dataX[::2] -= 0.000002
+        else:
+            unitX="TOF"
+
+            # get instrument parameters
+            l1 = np.linalg.norm(self.instrument.getSample().getPos() - self.instrument.getSource().getPos())
+            self.log().notice("L1 = {} m".format(l1))
+            dt_factor = float(self.instrument.getStringParameter("channel_width_factor")[0])
+
+            # channel width
+            dt = metadata.tof_channel_width*dt_factor
+            # calculate tof1
+            velocity = h/(m_n*metadata.wavelength*1e-10)   # m/s
+            tof1 = 1e+06*l1/velocity        # microseconds
+            self.log().debug("TOF1 = {} microseconds".format(tof1))
+            self.log().debug("Delay time = {} microsecond".format(metadata.tof_delay_time))
+            # create dataX array
+            x0 = tof1 + metadata.tof_delay_time
+            self.log().debug("TOF1 = {} microseconds".format(tof1))
+            dataX = np.linspace(x0, x0+metadata.tof_channel_number*dt, metadata.tof_channel_number+1)
+
+            # sample logs
+            logs["names"].extend(["channel_width", "TOF1", "delay_time", "tof_channels"])
+            logs["values"].extend([dt, tof1, metadata.tof_delay_time, metadata.tof_channel_number])
+            logs["units"].extend(["microseconds", "microseconds", "microseconds", ""])
+            if metadata.tof_elastic_channel:
+                logs["names"].append("EPP")
+                logs["values"].append(metadata.tof_elastic_channel)
+                logs["units"].append("")
+            if metadata.chopper_rotation_speed:
+                logs["names"].append("chopper_speed")
+                logs["values"].append(metadata.chopper_rotation_speed)
+                logs["units"].append("Hz")
+            if metadata.chopper_slits:
+                logs["names"].append("chopper_slits")
+                logs["values"].append(metadata.chopper_slits)
+                logs["units"].append("")
+
         # data normalization
         factor = 1.0
         yunit = "Counts"
@@ -141,7 +198,7 @@ class LoadDNSLegacy(PythonAlgorithm):
         dataE = np.sqrt(data_array[0:ndet, 1:])/factor
         # create workspace
         api.CreateWorkspace(OutputWorkspace=outws_name, DataX=dataX, DataY=dataY,
-                            DataE=dataE, NSpec=ndet, UnitX="Wavelength")
+                            DataE=dataE, NSpec=ndet, UnitX=unitX)
         outws = api.AnalysisDataService.retrieve(outws_name)
         api.LoadInstrument(outws, InstrumentName='DNS', RewriteSpectraMap=True)
 
@@ -156,68 +213,40 @@ class LoadDNSLegacy(PythonAlgorithm):
         # rotate the detector bank to the proper position
         api.RotateInstrumentComponent(outws, "bank0", X=0, Y=1, Z=0, Angle=metadata.deterota)
         # add sample log Ei and wavelength
-        api.AddSampleLog(outws, LogName='Ei', LogText=str(metadata.incident_energy),
-                         LogType='Number', LogUnit='meV')
-        api.AddSampleLog(outws, LogName='wavelength', LogText=str(metadata.wavelength),
-                         LogType='Number', LogUnit='Angstrom')
+        logs["names"].extend(["Ei", "wavelength"])
+        logs["values"].extend([metadata.incident_energy, metadata.wavelength])
+        logs["units"].extend(["meV", "Angstrom"])
+
         # add other sample logs
-        api.AddSampleLog(outws, LogName='deterota', LogText=str(metadata.deterota),
-                         LogType='Number', LogUnit='Degrees')
-        api.AddSampleLog(outws, 'mon_sum',
-                         LogText=str(float(metadata.monitor_counts)), LogType='Number')
-        api.AddSampleLog(outws, LogName='duration', LogText=str(metadata.duration),
-                         LogType='Number', LogUnit='Seconds')
-        api.AddSampleLog(outws, LogName='huber', LogText=str(metadata.huber),
-                         LogType='Number', LogUnit='Degrees')
-        api.AddSampleLog(outws, LogName='omega', LogText=str(metadata.huber - metadata.deterota),
-                         LogType='Number', LogUnit='Degrees')
-        api.AddSampleLog(outws, LogName='T1', LogText=str(metadata.temp1),
-                         LogType='Number', LogUnit='K')
-        api.AddSampleLog(outws, LogName='T2', LogText=str(metadata.temp2),
-                         LogType='Number', LogUnit='K')
-        api.AddSampleLog(outws, LogName='Tsp', LogText=str(metadata.tsp),
-                         LogType='Number', LogUnit='K')
-        # flipper
-        api.AddSampleLog(outws, LogName='flipper_precession',
-                         LogText=str(metadata.flipper_precession_current),
-                         LogType='Number', LogUnit='A')
-        api.AddSampleLog(outws, LogName='flipper_z_compensation',
-                         LogText=str(metadata.flipper_z_compensation_current),
-                         LogType='Number', LogUnit='A')
+        logs["names"].extend(["deterota", "mon_sum", "duration", "huber", "omega", "T1", "T2", "Tsp"])
+        logs["values"].extend([metadata.deterota, metadata.monitor_counts, metadata.duration,
+                               metadata.huber, metadata.huber - metadata.deterota,
+                               metadata.temp1, metadata.temp2, metadata.tsp])
+        logs["units"].extend(["Degrees", "Counts", "Seconds", "Degrees", "Degrees", "K", "K", "K"])
+
+        # flipper, coil currents and polarisation
         flipper_status = 'OFF'    # flipper OFF
         if abs(metadata.flipper_precession_current) > sys.float_info.epsilon:
             flipper_status = 'ON'    # flipper ON
-        api.AddSampleLog(outws, LogName='flipper',
-                         LogText=flipper_status, LogType='String')
-        # coil currents
-        api.AddSampleLog(outws, LogName='C_a', LogText=str(metadata.a_coil_current),
-                         LogType='Number', LogUnit='A')
-        api.AddSampleLog(outws, LogName='C_b', LogText=str(metadata.b_coil_current),
-                         LogType='Number', LogUnit='A')
-        api.AddSampleLog(outws, LogName='C_c', LogText=str(metadata.c_coil_current),
-                         LogType='Number', LogUnit='A')
-        api.AddSampleLog(outws, LogName='C_z', LogText=str(metadata.z_coil_current),
-                         LogType='Number', LogUnit='A')
-        # type of polarisation
-        api.AddSampleLog(outws, 'polarisation', LogText=pol[0], LogType='String')
-        api.AddSampleLog(outws, 'polarisation_comment', LogText=str(pol[1]), LogType='String')
+        logs["names"].extend(["flipper_precession", "flipper_z_compensation", "flipper",
+                              "C_a", "C_b", "C_c", "C_z", "polarisation", "polarisation_comment"])
+        logs["values"].extend([metadata.flipper_precession_current,
+                               metadata.flipper_z_compensation_current, flipper_status,
+                               metadata.a_coil_current, metadata.b_coil_current,
+                               metadata.c_coil_current, metadata.z_coil_current,
+                               str(pol[0]), str(pol[1])])
+        logs["units"].extend(["A", "A", "", "A", "A", "A", "A", "", ""])
+
         # slits
-        api.AddSampleLog(outws, LogName='slit_i_upper_blade_position',
-                         LogText=str(metadata.slit_i_upper_blade_position),
-                         LogType='Number', LogUnit='mm')
-        api.AddSampleLog(outws, LogName='slit_i_lower_blade_position',
-                         LogText=str(metadata.slit_i_lower_blade_position),
-                         LogType='Number', LogUnit='mm')
-        api.AddSampleLog(outws, LogName='slit_i_left_blade_position',
-                         LogText=str(metadata.slit_i_left_blade_position),
-                         LogType='Number', LogUnit='mm')
-        api.AddSampleLog(outws, 'slit_i_right_blade_position',
-                         LogText=str(metadata.slit_i_right_blade_position),
-                         LogType='Number', LogUnit='mm')
-        # data normalization
+        logs["names"].extend(["slit_i_upper_blade_position", "slit_i_lower_blade_position",
+                              "slit_i_left_blade_position", "slit_i_right_blade_position"])
+        logs["values"].extend([metadata.slit_i_upper_blade_position, metadata.slit_i_lower_blade_position,
+                               metadata.slit_i_left_blade_position, metadata.slit_i_right_blade_position])
+        logs["units"].extend(["mm", "mm", "mm", "mm"])
 
         # add information whether the data are normalized (duration/monitor/no):
         api.AddSampleLog(outws, LogName='normalized', LogText=norm, LogType='String')
+        api.AddSampleLogMultiple(outws, LogNames=logs["names"], LogValues=logs["values"], LogUnits=logs["units"])
 
         outws.setYUnit(yunit)
         outws.setYUnitLabel(ylabel)
diff --git a/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py b/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py
index 158b3cf90d0eb86c8eb9de38f87f5d3a31ba63f7..5fd4663c3dd95db1e797a66f852095fb07f53bfe 100644
--- a/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py
+++ b/Framework/PythonInterface/plugins/algorithms/MatchPeaks.py
@@ -228,8 +228,10 @@ class MatchPeaks(PythonAlgorithm):
         @return          :: bin numbers of the peak positions
         """
 
+        fit_table_name = input_ws.getName() + '_epp'
+
         if isinstance(input_ws, MatrixWorkspace):
-            fit_table = FindEPP(InputWorkspace=input_ws)
+            fit_table = FindEPP(InputWorkspace=input_ws, OutputWorkspace=fit_table_name)
         elif isinstance(input_ws, ITableWorkspace):
             fit_table = input_ws
         else:
@@ -277,12 +279,7 @@ class MatchPeaks(PythonAlgorithm):
 
             logger.debug('Spectrum {0} will be shifted to bin {1}'.format(i,peak_bin[i]))
 
-        # Clean-up unused TableWorkspaces in try-catch
-        # Direct deletion causes problems when running in parallel for too many workspaces
-        try:
-            DeleteWorkspace(fit_table)
-        except ValueError:
-            logger.debug('Fit table already deleted')
+        DeleteWorkspace(fit_table)
 
         return peak_bin
 
diff --git a/Framework/PythonInterface/plugins/algorithms/PDToPDFgetN.py b/Framework/PythonInterface/plugins/algorithms/PDToPDFgetN.py
index ca4030a07254d41f336300011d3d53d4db000d63..d060bfde63b3f59389550ed9926526392065a356 100644
--- a/Framework/PythonInterface/plugins/algorithms/PDToPDFgetN.py
+++ b/Framework/PythonInterface/plugins/algorithms/PDToPDFgetN.py
@@ -1,7 +1,10 @@
 #pylint: disable=no-init
 from __future__ import (absolute_import, division, print_function)
-from mantid.simpleapi import *
-from mantid.api import *
+from mantid.simpleapi import AlignAndFocusPowder, AlignAndFocusPowderFromFiles, \
+    NormaliseByCurrent, PDDetermineCharacterizations,PDLoadCharacterizations, \
+    SaveGSS, SetUncertainties
+from mantid.api import mtd, AlgorithmFactory, DataProcessorAlgorithm, FileAction, \
+    FileProperty, MatrixWorkspaceProperty, PropertyMode
 from mantid.kernel import Direction, FloatArrayProperty
 import mantid
 
@@ -28,8 +31,7 @@ class PDToPDFgetN(DataProcessorAlgorithm):
                                           defaultValue="", action=FileAction.OptionalLoad,
                                           extensions=["_event.nxs", ".nxs.h5"]),
                              "Event file")
-        self.declareProperty("MaxChunkSize", 0.0,
-                             "Specify maximum Gbytes of file to read in one chunk.  Default is whole file.")
+        self.copyProperties('AlignAndFocusPowderFromFiles', 'MaxChunkSize')
         self.declareProperty("FilterBadPulses", 95.,
                              doc="Filter out events measured while proton " +
                              "charge is more than 5% below average")
@@ -47,6 +49,7 @@ class PDToPDFgetN(DataProcessorAlgorithm):
         self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "",
                                                      direction=Direction.Output),
                              doc="Handle to reduced workspace")
+        self.copyProperties('AlignAndFocusPowderFromFiles', 'CacheDir')
         self.declareProperty(FileProperty(name="PDFgetNFile", defaultValue="", action=FileAction.Save,
                                           extensions=[".getn"]), "Output filename")
         self.setPropertyGroup("OutputWorkspace", group)
@@ -59,13 +62,9 @@ class PDToPDFgetN(DataProcessorAlgorithm):
                                           action=FileAction.OptionalLoad,
                                           extensions=["txt"]),
                              "File with characterization runs denoted")
-
-        self.declareProperty("RemovePromptPulseWidth", 0.0,
-                             "Width of events (in microseconds) near the prompt pulse to remove. 0 disables")
-        self.declareProperty("CropWavelengthMin", 0.,
-                             "Crop the data at this minimum wavelength.")
-        self.declareProperty("CropWavelengthMax", 0.,
-                             "Crop the data at this maximum wavelength.")
+        self.copyProperties('AlignAndFocusPowderFromFiles',
+                            ['FrequencyLogNames', 'WaveLengthLogNames', 'RemovePromptPulseWidth',
+                             'CropWavelengthMin', 'CropWavelengthMax'])
 
         self.declareProperty(FloatArrayProperty("Binning", values=[0., 0., 0.],
                                                 direction=Direction.Input),
@@ -102,7 +101,7 @@ class PDToPDFgetN(DataProcessorAlgorithm):
         return issues
 
     def _loadCharacterizations(self):
-        self._focusPos = {}
+        self._alignArgs = {}
         self._iparmFile = None
 
         charFilename = self.getProperty("CharacterizationRunsFile").value
@@ -113,48 +112,55 @@ class PDToPDFgetN(DataProcessorAlgorithm):
         results = PDLoadCharacterizations(Filename=charFilename,
                                           OutputWorkspace="characterizations")
         self._iparmFile = results[1]
-        self._focusPos['PrimaryFlightPath'] = results[2]
-        self._focusPos['SpectrumIDs'] = results[3]
-        self._focusPos['L2'] = results[4]
-        self._focusPos['Polar'] = results[5]
-        self._focusPos['Azimuthal'] = results[6]
+        self._alignArgs['PrimaryFlightPath'] = results[2]
+        self._alignArgs['SpectrumIDs'] = results[3]
+        self._alignArgs['L2'] = results[4]
+        self._alignArgs['Polar'] = results[5]
+        self._alignArgs['Azimuthal'] = results[6]
 
     def PyExec(self):
         self._loadCharacterizations()
+        charac = ""
+        if mtd.doesExist("characterizations"):
+            charac = "characterizations"
+
+        # arguments for both AlignAndFocusPowder and AlignAndFocusPowderFromFiles
+        self._alignArgs['OutputWorkspace'] = self.getPropertyValue("OutputWorkspace")
+        self._alignArgs['RemovePromptPulseWidth'] = self.getProperty("RemovePromptPulseWidth").value
+        self._alignArgs['CompressTolerance'] = COMPRESS_TOL_TOF
+        self._alignArgs['PreserveEvents'] = True
+        self._alignArgs['CalFileName'] = self.getProperty("CalibrationFile").value
+        self._alignArgs['Params']=self.getProperty("Binning").value
+        self._alignArgs['ResampleX']=self.getProperty("ResampleX").value
+        self._alignArgs['Dspacing']=True
+        self._alignArgs['CropWavelengthMin'] = self.getProperty('CropWavelengthMin').value
+        self._alignArgs['CropWavelengthMax'] = self.getProperty('CropWavelengthMax').value
+        self._alignArgs['ReductionProperties'] = '__snspowderreduction'
 
         wksp = self.getProperty("InputWorkspace").value
-        if wksp is None:
-            wksp = LoadEventAndCompress(Filename=self.getProperty("Filename").value,
-                                        OutputWorkspace=self.getPropertyValue("OutputWorkspace"),
-                                        MaxChunkSize=self.getProperty("MaxChunkSize").value,
-                                        FilterBadPulses=self.getProperty("FilterBadPulses").value,
-                                        CompressTOFTolerance=COMPRESS_TOL_TOF)
-            if wksp.getNumberEvents() <= 0: # checked InputWorkspace during validateInputs
-                raise RuntimeError("Workspace contains no events")
-        else:
+        if wksp is None:  # run from file with caching
+            wksp = AlignAndFocusPowderFromFiles(Filename=self.getProperty("Filename").value,
+                                                CacheDir=self.getProperty("CacheDir").value,
+                                                MaxChunkSize=self.getProperty("MaxChunkSize").value,
+                                                FilterBadPulses=self.getProperty("FilterBadPulses").value,
+                                                Characterizations=charac,
+                                                FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
+                                                WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value,
+                                                **(self._alignArgs))
+        else:  # process the input workspace
             self.log().information("Using input workspace. Ignoring properties 'Filename', " +
                                    "'OutputWorkspace', 'MaxChunkSize', and 'FilterBadPulses'")
 
-        charac = ""
-        if mtd.doesExist("characterizations"):
-            charac = "characterizations"
+            # get the correct row of the table
+            PDDetermineCharacterizations(InputWorkspace=wksp,
+                                         Characterizations=charac,
+                                         ReductionProperties="__snspowderreduction",
+                                         FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
+                                         WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value)
+
+            wksp = AlignAndFocusPowder(InputWorkspace=wksp,
+                                       **(self._alignArgs))
 
-        # get the correct row of the table
-        PDDetermineCharacterizations(InputWorkspace=wksp,
-                                     Characterizations=charac,
-                                     ReductionProperties="__snspowderreduction")
-
-        wksp = AlignAndFocusPowder(InputWorkspace=wksp, OutputWorkspace=wksp,
-                                   CalFileName=self.getProperty("CalibrationFile").value,
-                                   Params=self.getProperty("Binning").value,
-                                   ResampleX=self.getProperty("ResampleX").value, Dspacing=True,
-                                   PreserveEvents=False,
-                                   RemovePromptPulseWidth=self.getProperty("RemovePromptPulseWidth").value,
-                                   CompressTolerance=COMPRESS_TOL_TOF,
-                                   CropWavelengthMin=self.getProperty("CropWavelengthMin").value,
-                                   CropWavelengthMax=self.getProperty("CropWavelengthMax").value,
-                                   ReductionProperties="__snspowderreduction",
-                                   **(self._focusPos))
         wksp = NormaliseByCurrent(InputWorkspace=wksp, OutputWorkspace=wksp)
         wksp.getRun()['gsas_monitor'] = 1
         if self._iparmFile is not None:
diff --git a/Framework/PythonInterface/plugins/algorithms/SNSPowderReduction.py b/Framework/PythonInterface/plugins/algorithms/SNSPowderReduction.py
index 4b8f7a5f24372a386aefe2363bbef592f5d6112e..7c5f44729e2f80775a540387bce93d46175188a0 100644
--- a/Framework/PythonInterface/plugins/algorithms/SNSPowderReduction.py
+++ b/Framework/PythonInterface/plugins/algorithms/SNSPowderReduction.py
@@ -5,11 +5,11 @@ import os
 
 import mantid.simpleapi as api
 from mantid.api import mtd, AlgorithmFactory, AnalysisDataService, DataProcessorAlgorithm, \
-    FileAction, FileProperty, ITableWorkspaceProperty, MultipleFileProperty, PropertyMode, \
-    WorkspaceProperty, ITableWorkspace, MatrixWorkspace
+    FileAction, FileProperty, ITableWorkspaceProperty, PropertyMode, WorkspaceProperty, \
+    ITableWorkspace, MatrixWorkspace
 from mantid.kernel import ConfigService, Direction, FloatArrayProperty, \
     FloatBoundedValidator, IntArrayBoundedValidator, IntArrayProperty, \
-    Property, PropertyManagerDataService, StringArrayProperty, StringListValidator
+    PropertyManagerDataService, StringListValidator
 from mantid.dataobjects import SplittersWorkspace  # SplittersWorkspace
 # Use xrange in Python 2
 from six.moves import range #pylint: disable=redefined-builtin
@@ -150,11 +150,8 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         return "The algorithm used for reduction of powder diffraction data obtained on SNS instruments (e.g. PG3) "
 
     def PyInit(self):
-        self.declareProperty(MultipleFileProperty(name="Filename",
-                                                  extensions=EXTENSIONS_NXS),
-                             "Event file")
-        self.declareProperty("PreserveEvents", True,
-                             "Argument to supply to algorithms that can change from events to histograms.")
+        self.copyProperties('AlignAndFocusPowderFromFiles', ['Filename', 'PreserveEvents'])
+
         self.declareProperty("Sum", False,
                              "Sum the runs. Does nothing for characterization runs")
         self.declareProperty("PushDataPositive", "None",
@@ -179,7 +176,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                              doc="If specified overrides value in CharacterizationRunsFile. If -1 turns off correction."
                                  "")
         self.declareProperty(FileProperty(name="CalibrationFile",defaultValue="",action=FileAction.Load,
-                                          extensions=[".h5", ".hd5", ".hdf", ".cal"]))
+                                          extensions=[".h5", ".hd5", ".hdf", ".cal"]))  # CalFileName
         self.declareProperty(FileProperty(name="GroupingFile",defaultValue="",action=FileAction.OptionalLoad,
                                           extensions=[".xml"]), "Overrides grouping from CalibrationFile")
         self.declareProperty(FileProperty(name="CharacterizationRunsFile",
@@ -188,30 +185,16 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                                           extensions=["txt"]), "File with characterization runs denoted")
         self.declareProperty(FileProperty(name="ExpIniFilename", defaultValue="", action=FileAction.OptionalLoad,
                                           extensions=[".ini"]))
-        self.declareProperty("UnwrapRef", 0.,
-                             "Reference total flight path for frame unwrapping. Zero skips the correction")
-        self.declareProperty("LowResRef", 0.,
-                             "Reference DIFC for resolution removal. Zero skips the correction")
-        self.declareProperty("CropWavelengthMin", 0.,
-                             "Crop the data at this minimum wavelength. Overrides LowResRef.")
-        self.declareProperty("CropWavelengthMax", 0.,
-                             "Crop the data at this maximum wavelength. Forces use of CropWavelengthMin.")
-        self.declareProperty("RemovePromptPulseWidth", 0.0,
-                             "Width of events (in microseconds) near the prompt pulse to remove. 0 disables")
-        self.declareProperty("MaxChunkSize", 0.0,
-                             "Specify maximum Gbytes of file to read in one chunk.  Default is whole file.")
-        self.declareProperty("FilterCharacterizations", False,
-                             "Filter the characterization runs using above parameters. This only works for event files."
-                             "")
+        self.copyProperties('AlignAndFocusPowderFromFiles',
+                            ['UnwrapRef', 'LowResRef', 'CropWavelengthMin', 'CropWavelengthMax', 'RemovePromptPulseWidth',
+                             'MaxChunkSize'])
         self.declareProperty(FloatArrayProperty("Binning", values=[0., 0., 0.],
                                                 direction=Direction.Input),
-                             "Positive is linear bins, negative is logorithmic")
-        self.declareProperty("ResampleX", 0,
-                             "Number of bins in x-axis. Non-zero value overrides \"Params\" property. "
-                             "Negative value means logorithmic binning.")
+                             "Positive is linear bins, negative is logorithmic")  # Params
+        self.copyProperties('AlignAndFocusPowderFromFiles', ['ResampleX'])
         self.declareProperty("BinInDspace", True,
                              "If all three bin parameters a specified, whether they are in dspace (true) or "
-                             "time-of-flight (false)")
+                             "time-of-flight (false)")  # DSpacing
         # section of vanadium run processing
         self.declareProperty("StripVanadiumPeaks", True,
                              "Subtract fitted vanadium peaks from the known positions.")
@@ -224,7 +207,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         self.declareProperty("BackgroundSmoothParams", "", "Default=off, suggested 20,2")
 
         # filtering
-        self.declareProperty("FilterBadPulses", 95.,
+        self.declareProperty("FilterBadPulses", 95.,  # different default value
                              doc="Filter out events measured while proton charge is more than 5% below average")
         self.declareProperty("ScaleData", defaultValue=1., validator=FloatBoundedValidator(lower=0., exclusive=True),
                              doc="Constant to multiply the data before writing out. This does not apply to "
@@ -234,6 +217,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                              "'pdfgetn', and 'topas'")
         self.declareProperty("OutputFilePrefix", "", "Overrides the default filename for the output file (Optional).")
         self.declareProperty(FileProperty(name="OutputDirectory",defaultValue="",action=FileAction.Directory))
+        self.copyProperties('AlignAndFocusPowderFromFiles', 'CacheDir')
         self.declareProperty("FinalDataUnits", "dSpacing", StringListValidator(["dSpacing","MomentumTransfer"]))
 
         workspace_prop = WorkspaceProperty('SplittersWorkspace', '', Direction.Input, PropertyMode.Optional)
@@ -247,19 +231,13 @@ class SNSPowderReduction(DataProcessorAlgorithm):
 
         self.declareProperty("LowResolutionSpectraOffset", -1,
                              "If larger and equal to 0, then process low resolution TOF and offset is the spectra "
-                             "number. Otherwise, ignored.")
+                             "number. Otherwise, ignored.")  # LowResolutionSpectraOffset
 
         self.declareProperty("NormalizeByCurrent", True, "Normalize by current")
 
         self.declareProperty("CompressTOFTolerance", 0.01, "Tolerance to compress events in TOF.")
 
-        self.declareProperty(StringArrayProperty("FrequencyLogNames", ["SpeedRequest1", "Speed1", "frequency"],
-                                                 direction=Direction.Input),
-                             "Possible log names for frequency.")
-
-        self.declareProperty(StringArrayProperty("WaveLengthLogNames", ["LambdaRequest", "lambda"],
-                                                 direction=Direction.Input),
-                             "Candidate log names for wave length.")
+        self.copyProperties('AlignAndFocusPowderFromFiles', ['FrequencyLogNames', 'WaveLengthLogNames'])
 
         return
 
@@ -289,7 +267,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         self._vanPeakFWHM = self.getProperty("VanadiumFWHM").value
         self._vanSmoothing = self.getProperty("VanadiumSmoothParams").value
         self._vanRadius = self.getProperty("VanadiumRadius").value
-        calib = self.getProperty("CalibrationFile").value
+        self.calib = self.getProperty("CalibrationFile").value
         self._scaleFactor = self.getProperty("ScaleData").value
         self._outDir = self.getProperty("OutputDirectory").value
         self._outPrefix = self.getProperty("OutputFilePrefix").value.strip()
@@ -365,8 +343,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             if self._splittersWS is not None:
                 raise NotImplementedError("Summing spectra and filtering events are not supported simultaneously.")
 
-            sam_ws_name = self._focusAndSum(samRuns, sample_time_filter_wall, calib,
-                                            reload_if_loaded=reload_event_file,
+            sam_ws_name = self._focusAndSum(samRuns, reload_if_loaded=reload_event_file,
                                             preserveEvents=preserveEvents)
             assert isinstance(sam_ws_name, str), 'Returned from _focusAndSum() must be a string but not' \
                                                  '%s. ' % str(type(sam_ws_name))
@@ -379,11 +356,15 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             for sam_run_number in samRuns:
                 # first round of processing the sample
                 self._info = None
-                returned = self._focusChunks(sam_run_number, sample_time_filter_wall, calib,
-                                             splitwksp=self._splittersWS,
-                                             normalisebycurrent=self._normalisebycurrent,
-                                             reload_if_loaded=reload_event_file,
-                                             preserveEvents=preserveEvents)
+                if sample_time_filter_wall[0] == 0. and sample_time_filter_wall[-1] == 0. \
+                        and self._splittersWS is None:
+                    returned = self._focusAndSum([sam_run_number], reload_if_loaded=reload_event_file,
+                                                 preserveEvents=preserveEvents)
+                else:
+                    returned = self._focusChunks(sam_run_number, sample_time_filter_wall,
+                                                 splitwksp=self._splittersWS,
+                                                 reload_if_loaded=reload_event_file,
+                                                 preserveEvents=preserveEvents)
 
                 if isinstance(returned, list):
                     # Returned with a list of workspaces
@@ -416,8 +397,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             # process the container
             can_run_numbers = self._info["container"].value
             can_run_numbers = ['%s_%d' % (self._instrument, value) for value in can_run_numbers]
-            can_run_ws_name = self._process_container_runs(can_run_numbers, sample_time_filter_wall,
-                                                           samRunIndex, calib, preserveEvents)
+            can_run_ws_name = self._process_container_runs(can_run_numbers, samRunIndex, preserveEvents)
             if can_run_ws_name is not None:
                 workspacelist.append(can_run_ws_name)
 
@@ -426,7 +406,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             van_run_number_list = ['%s_%d' % (self._instrument, value) for value in van_run_number_list]
             van_specified = not noRunSpecified(van_run_number_list)
             if van_specified:
-                van_run_ws_name = self._process_vanadium_runs(van_run_number_list, sample_time_filter_wall, samRunIndex, calib)
+                van_run_ws_name = self._process_vanadium_runs(van_run_number_list, samRunIndex)
                 workspacelist.append(van_run_ws_name)
             else:
                 van_run_ws_name = None
@@ -536,17 +516,19 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         charFilename = self.getProperty("CharacterizationRunsFile").value
         expIniFilename = self.getProperty("ExpIniFilename").value
 
+        self._charTable = ''
         if charFilename is None or len(charFilename) <= 0:
             self.iparmFile = None
             return
 
+        self._charTable = 'characterizations'
         results = api.PDLoadCharacterizations(Filename=charFilename,
                                               ExpIniFilename=expIniFilename,
-                                              OutputWorkspace="characterizations")
+                                              OutputWorkspace=self._charTable)
         # export the characterizations table
-        self._charTable = results[0]
-        self.declareProperty(ITableWorkspaceProperty("CharacterizationsTable", "characterizations", Direction.Output))
-        self.setProperty("CharacterizationsTable", self._charTable)
+        charTable = results[0]
+        self.declareProperty(ITableWorkspaceProperty("CharacterizationsTable", self._charTable, Direction.Output))
+        self.setProperty("CharacterizationsTable", charTable)
 
         # get the focus positions from the properties
         self.iparmFile = results[1]
@@ -682,7 +664,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             raise RuntimeError("Cannot add incompatible wavelengths (%f != %f)"
                                % (left["wavelength"].value, right["wavelength"].value))
 
-    def _loadAndSum(self, filename_list, outName, **filterWall):
+    def _loadAndSum(self, filename_list, outName):
         """
         Load and sum
         Purpose:
@@ -713,7 +695,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                                      OutputWorkspace=ws_name,
                                      MaxChunkSize=self._chunks,
                                      FilterBadPulses=self._filterBadPulses,
-                                     CompressTOFTolerance=self.COMPRESS_TOL_TOF, **filterWall)
+                                     CompressTOFTolerance=self.COMPRESS_TOL_TOF)
             if is_event_workspace(ws_name):
                 self.log().notice('Load event file %s, compress it and get %d events.' %
                                   (filename, get_workspace(ws_name).getNumberEvents()))
@@ -759,7 +741,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         return outName
 
     #pylint: disable=too-many-arguments
-    def _focusAndSum(self, filenames, filterWall, calib, preserveEvents=True, reload_if_loaded=True):
+    def _focusAndSum(self, filenames, preserveEvents=True, reload_if_loaded=True, final_name=None):
         """Load, sum, and focus data in chunks
         Purpose:
             Load, sum and focus data in chunks;
@@ -769,64 +751,54 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             The experimental runs are focused and summed together
         @param run_number_list:
         @param extension:
-        @param filterWall:
-        @param calib:
         @param preserveEvents:
         @return: string as the summed workspace's name
         """
-        sumRun = None
-        info = None
-
-        for filename in filenames:
-            self.log().information("[Sum] Process run number %s. " % filename)
-
-            # focus one run
-            out_ws_name = self._focusChunks(filename, filterWall, calib,
-                                            reload_if_loaded=reload_if_loaded,
-                                            normalisebycurrent=False,
-                                            preserveEvents=preserveEvents)
-            assert isinstance(out_ws_name, str), 'Output from _focusChunks() should be a string but' \
-                                                 ' not %s.' % str(type(out_ws_name))
-            assert self.does_workspace_exist(out_ws_name)
-
-            tempinfo = self._getinfo(out_ws_name)
-
-            # sum reduced runs
-            if sumRun is None:
-                # First run. No need to sumRun
-                sumRun = out_ws_name
-                info = tempinfo
-            else:
-                # Non-first run. Add this run to current summed run
-                self.checkInfoMatch(info, tempinfo)
-                # add current workspace to sub sum
-                api.Plus(LHSWorkspace=sumRun, RHSWorkspace=out_ws_name, OutputWorkspace=sumRun,
-                         ClearRHSWorkspace=allEventWorkspaces(sumRun, out_ws_name))
-                if is_event_workspace(sumRun) and self.COMPRESS_TOL_TOF > 0.:
-                    api.CompressEvents(InputWorkspace=sumRun, OutputWorkspace=sumRun,
-                                       Tolerance=self.COMPRESS_TOL_TOF)  # 10ns
-                # after adding all events, delete the current workspace.
-                api.DeleteWorkspace(out_ws_name)
-            # ENDIF
-        # ENDFOR (processing each)
+        if final_name is None:
+            final_name = getBasename(filenames[0])
+        api.AlignAndFocusPowderFromFiles(Filename=','.join(filenames),
+                                         OutputWorkspace=final_name,
+                                         MaxChunkSize=self._chunks,
+                                         FilterBadPulses=self._filterBadPulses,
+                                         Characterizations=self._charTable,
+                                         CacheDir=self.getProperty("CacheDir").value,
+                                         CalFileName=self.calib,
+                                         GroupFilename=self.getProperty("GroupingFile").value,
+                                         Params=self._binning,
+                                         ResampleX=self._resampleX,
+                                         Dspacing=self._bin_in_dspace,
+                                         PreserveEvents=preserveEvents,
+                                         RemovePromptPulseWidth=self._removePromptPulseWidth,
+                                         CompressTolerance=self.COMPRESS_TOL_TOF,
+                                         UnwrapRef=self._LRef,
+                                         LowResRef=self._DIFCref,
+                                         LowResSpectrumOffset=self._lowResTOFoffset,
+                                         CropWavelengthMin=self._wavelengthMin,
+                                         CropWavelengthMax=self._wavelengthMax,
+                                         FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
+                                         WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value,
+                                         ReductionProperties="__snspowderreduction_inner",
+                                         **self._focusPos)
+
+        #TODO make sure that this funny function is called
+        #self.checkInfoMatch(info, tempinfo)
 
         if self._normalisebycurrent is True:
-            api.NormaliseByCurrent(InputWorkspace=sumRun,
-                                   OutputWorkspace=sumRun,
+            api.NormaliseByCurrent(InputWorkspace=final_name,
+                                   OutputWorkspace=final_name,
                                    RecalculatePCharge=True)
-            get_workspace(sumRun).getRun()['gsas_monitor'] = 1
+            get_workspace(final_name).getRun()['gsas_monitor'] = 1
 
-        return sumRun
+        return final_name
 
     #pylint: disable=too-many-arguments,too-many-locals,too-many-branches
-    def _focusChunks(self, filename, filter_wall, calib,  # noqa
-                     normalisebycurrent, splitwksp=None, preserveEvents=True,
+    def _focusChunks(self, filename, filter_wall=(0.,0.),  # noqa
+                     splitwksp=None, preserveEvents=True,
                      reload_if_loaded=True):  # noqa
         """
         Load, (optional) split and focus data in chunks
         @param filename: integer for run number
         @param filter_wall:  Enabled if splitwksp is defined
-        @param calib:
         @param normalisebycurrent: Set to False if summing runs for correct math
         @param splitwksp: SplittersWorkspace (if None then no split)
         @param preserveEvents:
@@ -903,7 +875,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                 self.log().notice('Align and focus workspace %s' % out_ws_name_chunk_split)
                 api.AlignAndFocusPowder(InputWorkspace=out_ws_name_chunk_split,
                                         OutputWorkspace=out_ws_name_chunk_split,
-                                        CalFileName=calib,
+                                        CalFileName=self.calib,
                                         GroupFilename=self.getProperty("GroupingFile").value,
                                         Params=self._binning,
                                         ResampleX=self._resampleX,
@@ -987,7 +959,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                                    OutputWorkspace=output_wksp_list[split_index],
                                    Tolerance=self.COMPRESS_TOL_TOF)  # 100ns
             try:
-                if normalisebycurrent is True:
+                if self._normalisebycurrent is True:
                     api.NormaliseByCurrent(InputWorkspace=output_wksp_list[split_index],
                                            OutputWorkspace=output_wksp_list[split_index],
                                            RecalculatePCharge=True)
@@ -1028,24 +1000,14 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         assert self.does_workspace_exist(wksp_name)
 
         # Determine characterization
-        if mtd.doesExist("characterizations"):
-            # get the correct row of the table if table workspace 'charactersizations' exists
-            api.PDDetermineCharacterizations(InputWorkspace=wksp_name,
-                                             Characterizations="characterizations",
-                                             ReductionProperties="__snspowderreduction",
-                                             BackRun=self.getProperty("BackgroundNumber").value,
-                                             NormRun=self.getProperty("VanadiumNumber").value,
-                                             NormBackRun=self.getProperty("VanadiumBackgroundNumber").value,
-                                             FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
-                                             WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value)
-        else:
-            api.PDDetermineCharacterizations(InputWorkspace=wksp_name,
-                                             ReductionProperties="__snspowderreduction",
-                                             BackRun=self.getProperty("BackgroundNumber").value,
-                                             NormRun=self.getProperty("VanadiumNumber").value,
-                                             NormBackRun=self.getProperty("VanadiumBackgroundNumber").value,
-                                             FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
-                                             WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value)
+        api.PDDetermineCharacterizations(InputWorkspace=wksp_name,
+                                         Characterizations=self._charTable,
+                                         ReductionProperties="__snspowderreduction",
+                                         BackRun=self.getProperty("BackgroundNumber").value,
+                                         NormRun=self.getProperty("VanadiumNumber").value,
+                                         NormBackRun=self.getProperty("VanadiumBackgroundNumber").value,
+                                         FrequencyLogNames=self.getProperty("FrequencyLogNames").value,
+                                         WaveLengthLogNames=self.getProperty("WaveLengthLogNames").value)
 
         # convert the result into a dict
         return PropertyManagerDataService.retrieve("__snspowderreduction")
@@ -1268,8 +1230,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
 
         return do_split_raw_wksp, num_out_wksp
 
-    def _process_container_runs(self, can_run_numbers, timeFilterWall, samRunIndex, calib,
-                                preserveEvents):
+    def _process_container_runs(self, can_run_numbers, samRunIndex, preserveEvents):
         """ Process container runs
         :param can_run_numbers:
         :return:
@@ -1282,15 +1243,6 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         else:
             # reduce container run such that it can be removed from sample run
 
-            # set up the filters
-            if self.getProperty("FilterCharacterizations").value:
-                # use common time filter
-                canFilterWall = timeFilterWall
-            else:
-                # no time filter
-                canFilterWall = (0., 0.)
-            # END-IF
-
             if len(can_run_numbers) == 1:
                 # only 1 container run
                 can_run_number = can_run_numbers[0]
@@ -1306,19 +1258,10 @@ class SNSPowderReduction(DataProcessorAlgorithm):
                                  OutputWorkspace=can_run_ws_name,
                                  Target="TOF")
             else:
-                # load the container run
+                fileArg = [can_run_number]
                 if self.getProperty("Sum").value:
-                    can_run_ws_name = self._focusAndSum(can_run_numbers, canFilterWall, calib,
-                                                        preserveEvents=preserveEvents)
-                else:
-                    can_run_ws_name = self._focusChunks(can_run_number, canFilterWall, calib,
-                                                        normalisebycurrent=self._normalisebycurrent,
-                                                        preserveEvents=preserveEvents)
-
-                # convert unit to TOF
-                api.ConvertUnits(InputWorkspace=can_run_ws_name,
-                                 OutputWorkspace=can_run_ws_name,
-                                 Target="TOF")
+                    fileArg = can_run_numbers
+                self._focusAndSum(fileArg, preserveEvents, final_name=can_run_ws_name)
 
                 # smooth background
                 smoothParams = self.getProperty("BackgroundSmoothParams").value
@@ -1335,7 +1278,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
 
         return can_run_ws_name
 
-    def _process_vanadium_runs(self, van_run_number_list, timeFilterWall, samRunIndex, calib, **dummy_focuspos):
+    def _process_vanadium_runs(self, van_run_number_list, samRunIndex, **dummy_focuspos):
         """
         Purpose: process vanadium runs
         Requirements: if more than 1 run in given run number list, then samRunIndex must be given.
@@ -1343,7 +1286,6 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         :param van_run_number_list: list of vanadium run
         :param timeFilterWall: time filter wall
         :param samRunIndex: sample run index
-        :param calib: calibration run
         :param focuspos:
         :return:
         """
@@ -1364,18 +1306,12 @@ class SNSPowderReduction(DataProcessorAlgorithm):
         else:
             # Explicitly load, reduce and correct vanadium runs
 
-            # set up filter wall for van run
-            if self.getProperty("FilterCharacterizations").value:
-                vanFilterWall = {'FilterByTimeStart': timeFilterWall[0], 'FilterByTimeStop': timeFilterWall[1]}
-            else:
-                vanFilterWall = {'FilterByTimeStart': Property.EMPTY_DBL, 'FilterByTimeStop': Property.EMPTY_DBL}
-
             # load the vanadium
             van_run_ws_name = getBasename(van_run_number)
             if self.getProperty("Sum").value:
-                van_run_ws_name = self._loadAndSum(van_run_number_list, van_run_ws_name, **vanFilterWall)
+                van_run_ws_name = self._loadAndSum(van_run_number_list, van_run_ws_name)
             else:
-                van_run_ws_name = self._loadAndSum([van_run_number], van_run_ws_name, **vanFilterWall)
+                van_run_ws_name = self._loadAndSum([van_run_number], van_run_ws_name)
 
             # load the vanadium background (if appropriate)
             van_bkgd_run_number_list = self._info["vanadium_background"].value
@@ -1391,9 +1327,9 @@ class SNSPowderReduction(DataProcessorAlgorithm):
 
                 # load background runs and sum if necessary
                 if self.getProperty("Sum").value:
-                    van_bkgd_ws_name = self._loadAndSum(van_bkgd_run_number_list, van_bkgd_ws_name, **vanFilterWall)
+                    van_bkgd_ws_name = self._loadAndSum(van_bkgd_run_number_list, van_bkgd_ws_name)
                 else:
-                    van_bkgd_ws_name = self._loadAndSum([van_bkgd_run_number], van_bkgd_ws_name, **vanFilterWall)
+                    van_bkgd_ws_name = self._loadAndSum([van_bkgd_run_number], van_bkgd_ws_name)
 
                 van_bkgd_ws = get_workspace(van_bkgd_ws_name)
                 if van_bkgd_ws.id() == EVENT_WORKSPACE_ID and van_bkgd_ws.getNumberEvents() <= 0:
@@ -1437,7 +1373,7 @@ class SNSPowderReduction(DataProcessorAlgorithm):
             self.log().warning('Reducing vanadium run %s.' % van_run_ws_name)
             api.AlignAndFocusPowder(InputWorkspace=van_run_ws_name,
                                     OutputWorkspace=van_run_ws_name,
-                                    CalFileName=calib,
+                                    CalFileName=self.calib,
                                     GroupFilename=self.getProperty("GroupingFile").value,
                                     Params=self._binning,
                                     ResampleX=self._resampleX,
diff --git a/Framework/PythonInterface/plugins/algorithms/SaveYDA.py b/Framework/PythonInterface/plugins/algorithms/SaveYDA.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3000cb3b85e66c83a5dfbeda526efc43d87c822
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/SaveYDA.py
@@ -0,0 +1,316 @@
+from __future__ import absolute_import, division, print_function
+
+from mantid.api import PythonAlgorithm, AlgorithmFactory, MatrixWorkspaceProperty, WorkspaceUnitValidator, \
+                       InstrumentValidator, FileProperty, FileAction
+from mantid.kernel import Direction, CompositeValidator
+from mantid.dataobjects import Workspace2D
+
+import yaml
+from yaml import Dumper
+
+from collections import OrderedDict
+
+import math
+
+
+class SaveYDA(PythonAlgorithm):
+    """ Save data in yaml/frida 2.0 format from a Workspace2D.
+    """
+
+    def category(self):
+        """Return category
+        """
+        return "DataHandling\\Text"
+
+    def name(self):
+        """Return name
+        """
+        return "SaveYDA"
+
+    def summary(self):
+        """Return summary
+        """
+        return "Save Workspace to a Frida 2.0 yaml format"
+
+    def PyInit(self):
+        """Declare properties
+        """
+        wsValidators = CompositeValidator()
+        # X axis must be a NumericAxis in energy transfer units.
+        wsValidators.add(WorkspaceUnitValidator("DeltaE"))
+        # Workspace must have an Instrument
+        wsValidators.add(InstrumentValidator())
+
+        self.declareProperty(MatrixWorkspaceProperty(name="InputWorkspace", defaultValue="", direction=Direction.Input,
+                             validator=wsValidators), doc="Workspace name for input")
+        self.declareProperty(FileProperty(name="Filename", defaultValue="", action=FileAction.Save, extensions=""),
+                             doc="The name to use when writing the file")
+
+    def validateInputs(self):
+        """Basic validation for inputs.
+        :return: issues with not valid Inputs in dictionary
+        """
+        issues = dict()
+        # Only MomentumTransfer is allowed
+        allowUn = "MomentumTransfer"
+        ws = self.getProperty("InputWorkspace").value
+        # Y axis must be either a SpectrumAxis or a NumericAxis in q units.
+        # workspace must be a Workspace2D
+        if ws:
+            ax = ws.getAxis(1)
+
+            if not ax.isSpectra() and ax.getUnit().unitID() != allowUn:
+                issues["InputWorkspace"] = "Y axis is not 'Spectrum Axis' or 'Momentum Transfer'"
+
+            if not isinstance(ws, Workspace2D):
+                issues["InputWorkspace"] = "Input Workspace is not a Workspace2D"
+
+        return issues
+
+    def PyExec(self):
+        """ Main execution body
+        """
+        # Properties
+        ws = self.getProperty("InputWorkspace").value
+        filename = self.getProperty("Filename").value
+
+        run = ws.getRun()
+        ax = ws.getAxis(1)
+        nHist = ws.getNumberHistograms()
+
+        # check sample logs exists
+        if len(run.getLogData()) == 0:
+            raise NotImplementedError("No sample log data exist in workspace: "
+                                      + self.getPropertyValue("InputWorkspace"))
+
+        # save sample log data in lists, commented sequences an commented maps
+        # commented sequences and maps are used to keep Data in the order they get inserted
+        # if a log does not exist a warning is written on the log and the data is not saved in the file
+
+        metadata = OrderedDict()
+
+        metadata["format"] = "yaml/frida 2.0"
+        metadata["type"] = "generic tabular data"
+
+        hist = []
+
+        if run.hasProperty("proposal_number"):
+            propn = "Proposal number " + run.getLogData("proposal_number").value
+            hist.append(propn)
+        else:
+            self.log().warning("no proposal number found")
+
+        if run.hasProperty("proposal_title"):
+            propt = run.getLogData("proposal_title").value
+            hist.append(propt)
+        else:
+            self.log().warning("no proposal title found")
+
+        if run.hasProperty("experiment_team"):
+            expt = run.getLogData("experiment_team").value
+            hist.append(expt)
+        else:
+            self.log().warning("no experiment team found")
+
+        hist.append("data reduced with mantid")
+
+        rpar = []
+
+        if run.hasProperty("temperature"):
+            temperature = float(run.getLogData("temperature").value)
+
+            temp = OrderedDict()
+            temp["name"] = "T"
+            temp["unit"] = "K"
+            temp["val"] = round(temperature, 14)
+            temp["stdv"] = 0
+
+            rpar.append(temp)
+        else:
+            self.log().warning("no temperature found")
+
+        if run.hasProperty("Ei"):
+            eimeV = float(run.getLogData("Ei").value)
+
+            ei = OrderedDict()
+            ei["name"] = "Ei"
+            ei["unit"] = "meV"
+            ei["val"] = round(eimeV, 14)
+            ei["stdv"] = 0
+
+            rpar.append(ei)
+        else:
+            self.log().warning("no Ei found")
+
+        coord = OrderedDict()
+
+        x = FlowOrderedDict()
+
+        x["name"] = "w"
+        x["unit"] = "meV"
+
+        coord["x"] = x
+
+        y = FlowOrderedDict()
+
+        y["name"] = "S(q,w)"
+        y["unit"] = "meV-1"
+
+        coord["y"] = y
+
+        z = FlowOrderedDict()
+
+        if ax.isSpectra():
+            zname = "2th"
+            zunit = "deg"
+        else:
+            zname = "q"
+            zunit = "A-1"
+
+        z["name"] = zname
+        z["unit"] = zunit
+
+        coord["z"] = FlowList()
+        coord["z"].append(z)
+
+        slices = []
+
+        bin = []
+
+        # if y axis is SpectrumAxis
+        if ax.isSpectra:
+            samplePos = ws.getInstrument().getSample().getPos()
+            sourcePos = ws.getInstrument().getSource().getPos()
+            beamPos = samplePos - sourcePos
+            for i in range(nHist):
+                detector = ws.getDetector(i)
+                # convert radians to degrees
+                twoTheta = detector.getTwoTheta(samplePos, beamPos)*180/math.pi
+                twoTheta = round(twoTheta, 14)
+                bin.append(twoTheta)
+        elif ax.length() == nHist:
+            # if y axis contains bin centers
+            for i in range(ax.length()):
+                xval = round(ax.getValue(), 14)
+                bin.append(xval)
+        else:
+            # get the bin centers not the bin edges
+            bin = self._get_bin_centers(ax)
+
+        for i in range(nHist):
+
+            slicethis = OrderedDict()
+
+            # add j to slices, j = counts
+            slicethis["j"] = i
+
+            # save in list and commented Map to keep format
+            val = FlowOrderedDict()
+            val["val"] = bin[i]
+            # z is bin centers of y axis, SpectrumAxis or NumericAxis in q units
+            slicethis["z"] = FlowList()
+            slicethis["z"].append(val)
+
+            xax = ws.readX(i)
+            # get the bin centers not the bin edges
+            xcenters = self._get_bin_centers(xax)
+            # x axis is NumericAxis in energy transfer units
+            xx = [float(j) for j in xcenters]
+            slicethis["x"] = FlowList(xx)
+
+            ys = ws.dataY(i)
+            # y is dataY of the workspace
+            yy = [float(round(j, 14)) for j in ys]
+            slicethis["y"] = FlowList(yy)
+
+            slices.append(slicethis)
+
+        data = OrderedDict()
+
+        data["Meta"] = metadata
+        data["History"] = hist
+        data["Coord"] = coord
+        data["RPar"] = rpar
+        data["Slices"] = slices
+        data["Slices"] = slices
+
+        # create yaml file
+        try:
+            with open(filename, "w") as outfile:
+                yaml.dump(data, outfile, default_flow_style=False, canonical=False, Dumper=MyDumper)
+                outfile.close()
+        except:
+            raise RuntimeError("Can't write in File" + filename)
+
+    def _get_bin_centers(self, ax):
+        """ calculates the bin centers from the bin edges
+        :param ax: bin center axis
+        :return: list of bin centers
+        """
+        bin = []
+
+        for i in range(1, ax.size):
+            axval = round((ax[i]+ax[i-1])/2, 14)
+            bin.append(axval)
+
+        return bin
+
+
+class MyDumper(Dumper):
+    """ regulates the indent for yaml Dumper
+    """
+    def increase_indent(self, flow=False, indentless=False):
+        return super(MyDumper, self).increase_indent(flow, False)
+
+
+class FlowOrderedDict(OrderedDict):
+    """ Helper class to switch between flow style and no flow style
+
+    Equal to OrderedDict class but other yaml representer
+    """
+    pass
+
+
+class FlowList(list):
+    """ Helper class to switch between flow style and no flow style
+
+    Equal to list class but other yaml representer
+    """
+    pass
+
+
+def _flow_list_rep(dumper, data):
+    """Yaml representer for list in flow style
+    """
+    return dumper.represent_sequence(u'tag:yaml.org,2002:seq', data, flow_style=True)
+
+
+def _flow_ord_dic_rep(dumper, data):
+    """Yaml representer for OrderedDict in flow style
+    """
+    return dumper.represent_mapping(u'tag:yaml.org,2002:map', data, flow_style=True)
+
+
+def _represent_ordered_dict(dumper, data):
+    """Yaml representer for OrderedDict
+
+    regulates dumping for class OrderedDict
+    """
+    value = []
+
+    for item_key, item_value in data.items():
+        node_key = dumper.represent_data(item_key)
+        node_value = dumper.represent_data(item_value)
+
+        value.append((node_key, node_value))
+
+    return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
+
+# Adding representers to yaml
+yaml.add_representer(OrderedDict, _represent_ordered_dict)
+yaml.add_representer(FlowList, _flow_list_rep)
+yaml.add_representer(FlowOrderedDict, _flow_ord_dic_rep)
+
+#---------------------------------------------------------------------------------------------------------------------#
+
+AlgorithmFactory.subscribe(SaveYDA)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesQuasi.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesQuasi.py
index ce10d94d7bf42eb6b46abc3712bd0631a6561538..cd3908232e0272f728eac9a6c53407b3edce6e3f 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesQuasi.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesQuasi.py
@@ -1,4 +1,4 @@
-#pylint: disable=invalid-name,too-many-instance-attributes,too-many-branches,no-init,redefined-builtin
+# pylint: disable=invalid-name,too-many-instance-attributes,too-many-branches,no-init,redefined-builtin
 from __future__ import (absolute_import, division, print_function)
 from six.moves import range
 from six import next
@@ -13,16 +13,16 @@ from mantid.kernel import StringListValidator, Direction
 import mantid.simpleapi as s_api
 from mantid import config, logger
 from IndirectCommon import *
+
 MTD_PLOT = import_mantidplot()
 
 if is_supported_f2py_platform():
-    QLr     = import_f2py("QLres")
-    QLd     = import_f2py("QLdata")
-    Qse     = import_f2py("QLse")
+    QLr = import_f2py("QLres")
+    QLd = import_f2py("QLdata")
+    Qse = import_f2py("QLse")
 
 
 class BayesQuasi(PythonAlgorithm):
-
     _program = None
     _samWS = None
     _resWS = None
@@ -42,16 +42,16 @@ class BayesQuasi(PythonAlgorithm):
         return "Workflow\\MIDAS"
 
     def summary(self):
-        return "This algorithm runs the Fortran QLines programs which fits a Delta function of"+\
-               " amplitude 0 and Lorentzians of amplitude A(j) and HWHM W(j) where j=1,2,3. The"+\
-               " whole function is then convoled with the resolution function."
+        return "This algorithm runs the Fortran QLines programs which fits a Delta function of" + \
+               " amplitude 0 and Lorentzians of amplitude A(j) and HWHM W(j) where j=1,2,3. The" + \
+               " whole function is then convolved with the resolution function."
 
     def version(self):
         return 1
 
     def PyInit(self):
         self.declareProperty(name='Program', defaultValue='QL',
-                             validator=StringListValidator(['QL','QSe']),
+                             validator=StringListValidator(['QL', 'QSe']),
                              doc='The type of program to run (either QL or QSe)')
 
         self.declareProperty(MatrixWorkspaceProperty('SampleWorkspace', '', direction=Direction.Input),
@@ -81,7 +81,7 @@ class BayesQuasi(PythonAlgorithm):
                              doc='Fit option for using the elastic peak')
 
         self.declareProperty(name='Background', defaultValue='Flat',
-                             validator=StringListValidator(['Sloping','Flat','Zero']),
+                             validator=StringListValidator(['Sloping', 'Flat', 'Zero']),
                              doc='Fit option for the type of background')
 
         self.declareProperty(name='FixedWidth', defaultValue=True,
@@ -131,7 +131,7 @@ class BayesQuasi(PythonAlgorithm):
         self._wfile = self.getPropertyValue('WidthFile')
         self._loop = self.getProperty('Loop').value
 
-    #pylint: disable=too-many-locals,too-many-statements
+    # pylint: disable=too-many-locals,too-many-statements
     def PyExec(self):
 
         # Check for platform support
@@ -142,18 +142,18 @@ class BayesQuasi(PythonAlgorithm):
             raise RuntimeError(unsupported_msg)
 
         from IndirectBayes import (CalcErange, GetXYE)
-        setup_prog = Progress(self, start=0.0, end=0.3, nreports = 5)
+        setup_prog = Progress(self, start=0.0, end=0.3, nreports=5)
         self.log().information('BayesQuasi input')
 
         erange = [self._e_min, self._e_max]
         nbins = [self._sam_bins, self._res_bins]
         setup_prog.report('Converting to binary for Fortran')
-        #convert true/false to 1/0 for fortran
+        # convert true/false to 1/0 for fortran
         o_el = 1 if self._elastic else 0
         o_w1 = 1 if self._width else 0
         o_res = 1 if self._res_norm else 0
 
-        #fortran code uses background choices defined using the following numbers
+        # fortran code uses background choices defined using the following numbers
         setup_prog.report('Encoding input options')
         if self._background == 'Sloping':
             o_bgd = 2
@@ -170,11 +170,11 @@ class BayesQuasi(PythonAlgorithm):
             workdir = os.getcwd()
             logger.information('Default Save directory is not set. Defaulting to current working Directory: ' + workdir)
 
-        array_len = 4096                           # length of array in Fortran
+        array_len = 4096  # length of array in Fortran
         setup_prog.report('Checking X Range')
-        CheckXrange(erange,'Energy')
+        CheckXrange(erange, 'Energy')
 
-        nbin,nrbin = nbins[0], nbins[1]
+        nbin, nrbin = nbins[0], nbins[1]
 
         logger.information('Sample is ' + self._samWS)
         logger.information('Resolution is ' + self._resWS)
@@ -195,16 +195,16 @@ class BayesQuasi(PythonAlgorithm):
         erange = [self._e_min, self._e_max]
 
         setup_prog.report('Checking Analysers')
-        CheckAnalysers(self._samWS,self._resWS)
+        CheckAnalysers(self._samWS, self._resWS)
         setup_prog.report('Obtaining EFixed, theta and Q')
         efix = getEfixed(self._samWS)
         theta, Q = GetThetaQ(self._samWS)
 
-        nsam,ntc = CheckHistZero(self._samWS)
+        nsam, ntc = CheckHistZero(self._samWS)
 
         totalNoSam = nsam
 
-        #check if we're performing a sequential fit
+        # check if we're performing a sequential fit
         if not self._loop:
             nsam = 1
 
@@ -213,54 +213,54 @@ class BayesQuasi(PythonAlgorithm):
         setup_prog.report('Checking Histograms')
         if self._program == 'QL':
             if nres == 1:
-                prog = 'QLr'                        # res file
+                prog = 'QLr'  # res file
             else:
-                prog = 'QLd'                        # data file
-                CheckHistSame(self._samWS,'Sample',self._resWS,'Resolution')
+                prog = 'QLd'  # data file
+                CheckHistSame(self._samWS, 'Sample', self._resWS, 'Resolution')
         elif self._program == 'QSe':
             if nres == 1:
-                prog = 'QSe'                        # res file
+                prog = 'QSe'  # res file
             else:
                 raise ValueError('Stretched Exp ONLY works with RES file')
 
-        logger.information('Version is ' +prog)
-        logger.information(' Number of spectra = '+str(nsam))
-        logger.information(' Erange : '+str(erange[0])+' to '+str(erange[1]))
+        logger.information('Version is ' + prog)
+        logger.information(' Number of spectra = ' + str(nsam))
+        logger.information(' Erange : ' + str(erange[0]) + ' to ' + str(erange[1]))
 
         setup_prog.report('Reading files')
-        Wy,We = self._read_width_file(self._width,self._wfile,totalNoSam)
-        dtn,xsc = self._read_norm_file(self._res_norm,self._resnormWS,totalNoSam)
+        Wy, We = self._read_width_file(self._width, self._wfile, totalNoSam)
+        dtn, xsc = self._read_norm_file(self._res_norm, self._resnormWS, totalNoSam)
 
         setup_prog.report('Establishing output workspace name')
-        fname = self._samWS[:-4] + '_'+ prog
+        fname = self._samWS[:-4] + '_' + prog
         probWS = fname + '_Prob'
         fitWS = fname + '_Fit'
-        wrks=os.path.join(workdir, self._samWS[:-4])
-        logger.information(' lptfile : '+wrks+'_'+prog+'.lpt')
-        lwrk=len(wrks)
-        wrks.ljust(140,' ')
-        wrkr=self._resWS
-        wrkr.ljust(140,' ')
+        wrks = os.path.join(workdir, self._samWS[:-4])
+        logger.information(' lptfile : ' + wrks + '_' + prog + '.lpt')
+        lwrk = len(wrks)
+        wrks.ljust(140, ' ')
+        wrkr = self._resWS
+        wrkr.ljust(140, ' ')
 
         setup_prog.report('Initialising probability list')
         # initialise probability list
         if self._program == 'QL':
             prob0, prob1, prob2 = [], [], []
         xQ = np.array([Q[0]])
-        for m in range(1,nsam):
-            xQ = np.append(xQ,Q[m])
+        for m in range(1, nsam):
+            xQ = np.append(xQ, Q[m])
         xProb = xQ
-        xProb = np.append(xProb,xQ)
-        xProb = np.append(xProb,xQ)
-        eProb = np.zeros(3*nsam)
+        xProb = np.append(xProb, xQ)
+        xProb = np.append(xProb, xQ)
+        eProb = np.zeros(3 * nsam)
 
         group = ''
-        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam*3)
-        for spectrum in range(0,nsam):
-            logger.information('Group ' +str(spectrum)+ ' at angle '+ str(theta[spectrum]))
-            nsp = spectrum+1
+        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam * 3)
+        for spectrum in range(0, nsam):
+            logger.information('Group ' + str(spectrum) + ' at angle ' + str(theta[spectrum]))
+            nsp = spectrum + 1
 
-            nout,bnorm,Xdat,Xv,Yv,Ev = CalcErange(self._samWS,spectrum,erange,nbin)
+            nout, bnorm, Xdat, Xv, Yv, Ev = CalcErange(self._samWS, spectrum, erange, nbin)
             Ndat = nout[0]
             Imin = nout[1]
             Imax = nout[2]
@@ -268,33 +268,33 @@ class BayesQuasi(PythonAlgorithm):
                 mm = spectrum
             else:
                 mm = 0
-            Nb,Xb,Yb,Eb = GetXYE(self._resWS,mm,array_len)     # get resolution data
+            Nb, Xb, Yb, Eb = GetXYE(self._resWS, mm, array_len)  # get resolution data
             numb = [nsam, nsp, ntc, Ndat, nbin, Imin, Imax, Nb, nrbin]
             rscl = 1.0
             reals = [efix, theta[spectrum], rscl, bnorm]
 
             if prog == 'QLr':
                 workflow_prog.report('Processing Sample number %i as Lorentzian' % spectrum)
-                nd,xout,yout,eout,yfit,yprob=QLr.qlres(numb,Xv,Yv,Ev,reals,fitOp,
-                                                       Xdat,Xb,Yb,Wy,We,dtn,xsc,
-                                                       wrks,wrkr,lwrk)
-                message = ' Log(prob) : '+str(yprob[0])+' '+str(yprob[1])+' '+str(yprob[2])+' '+str(yprob[3])
+                nd, xout, yout, eout, yfit, yprob = QLr.qlres(numb, Xv, Yv, Ev, reals, fitOp,
+                                                              Xdat, Xb, Yb, Wy, We, dtn, xsc,
+                                                              wrks, wrkr, lwrk)
+                message = ' Log(prob) : ' + str(yprob[0]) + ' ' + str(yprob[1]) + ' ' + str(yprob[2]) + ' ' + str(yprob[3])
                 logger.information(message)
             if prog == 'QLd':
                 workflow_prog.report('Processing Sample number %i' % spectrum)
-                nd,xout,yout,eout,yfit,yprob=QLd.qldata(numb,Xv,Yv,Ev,reals,fitOp,
-                                                        Xdat,Xb,Yb,Eb,Wy,We,
-                                                        wrks,wrkr,lwrk)
-                message = ' Log(prob) : '+str(yprob[0])+' '+str(yprob[1])+' '+str(yprob[2])+' '+str(yprob[3])
+                nd, xout, yout, eout, yfit, yprob = QLd.qldata(numb, Xv, Yv, Ev, reals, fitOp,
+                                                               Xdat, Xb, Yb, Eb, Wy, We,
+                                                               wrks, wrkr, lwrk)
+                message = ' Log(prob) : ' + str(yprob[0]) + ' ' + str(yprob[1]) + ' ' + str(yprob[2]) + ' ' + str(yprob[3])
                 logger.information(message)
             if prog == 'QSe':
                 workflow_prog.report('Processing Sample number %i as Stretched Exp' % spectrum)
-                nd,xout,yout,eout,yfit,yprob=Qse.qlstexp(numb,Xv,Yv,Ev,reals,fitOp,
-                                                         Xdat,Xb,Yb,Wy,We,dtn,xsc,
-                                                         wrks,wrkr,lwrk)
+                nd, xout, yout, eout, yfit, yprob = Qse.qlstexp(numb, Xv, Yv, Ev, reals, fitOp,
+                                                                Xdat, Xb, Yb, Wy, We, dtn, xsc,
+                                                                wrks, wrkr, lwrk)
             dataX = xout[:nd]
-            dataX = np.append(dataX,2*xout[nd-1]-xout[nd-2])
-            yfit_list = np.split(yfit[:4*nd],4)
+            dataX = np.append(dataX, 2 * xout[nd - 1] - xout[nd - 2])
+            yfit_list = np.split(yfit[:4 * nd], 4)
             dataF1 = yfit_list[1]
             if self._program == 'QL':
                 dataF2 = yfit_list[2]
@@ -303,25 +303,25 @@ class BayesQuasi(PythonAlgorithm):
             datX = dataX
             datY = yout[:nd]
             datE = eout[:nd]
-            datX = np.append(datX,dataX)
-            datY = np.append(datY,dataF1[:nd])
-            datE = np.append(datE,dataG)
+            datX = np.append(datX, dataX)
+            datY = np.append(datY, dataF1[:nd])
+            datE = np.append(datE, dataG)
             res1 = dataF1[:nd] - yout[:nd]
-            datX = np.append(datX,dataX)
-            datY = np.append(datY,res1)
-            datE = np.append(datE,dataG)
+            datX = np.append(datX, dataX)
+            datY = np.append(datY, res1)
+            datE = np.append(datE, dataG)
             nsp = 3
             names = 'data,fit.1,diff.1'
             res_plot = [0, 1, 2]
             if self._program == 'QL':
                 workflow_prog.report('Processing Lorentzian result data')
-                datX = np.append(datX,dataX)
-                datY = np.append(datY,dataF2[:nd])
-                datE = np.append(datE,dataG)
+                datX = np.append(datX, dataX)
+                datY = np.append(datY, dataF2[:nd])
+                datE = np.append(datE, dataG)
                 res2 = dataF2[:nd] - yout[:nd]
-                datX = np.append(datX,dataX)
-                datY = np.append(datY,res2)
-                datE = np.append(datE,dataG)
+                datX = np.append(datX, dataX)
+                datY = np.append(datY, res2)
+                datE = np.append(datE, dataG)
                 nsp += 2
                 names += ',fit.2,diff.2'
                 res_plot.append(4)
@@ -330,8 +330,8 @@ class BayesQuasi(PythonAlgorithm):
                 prob2.append(yprob[2])
 
             # create result workspace
-            fitWS = fname+'_Workspaces'
-            fout = fname+'_Workspace_'+ str(spectrum)
+            fitWS = fname + '_Workspaces'
+            fout = fname + '_Workspace_' + str(spectrum)
 
             workflow_prog.report('Creating OutputWorkspace')
             s_api.CreateWorkspace(OutputWorkspace=fout, DataX=datX, DataY=datY, DataE=datE,
@@ -342,29 +342,32 @@ class BayesQuasi(PythonAlgorithm):
 
         comp_prog = Progress(self, start=0.7, end=0.8, nreports=2)
         comp_prog.report('Creating Group Workspace')
-        s_api.GroupWorkspaces(InputWorkspaces=group,OutputWorkspace=fitWS)
+        s_api.GroupWorkspaces(InputWorkspaces=group, OutputWorkspace=fitWS)
 
         if self._program == 'QL':
             comp_prog.report('Processing Lorentzian probability data')
             yPr0 = np.array([prob0[0]])
             yPr1 = np.array([prob1[0]])
             yPr2 = np.array([prob2[0]])
-            for m in range(1,nsam):
-                yPr0 = np.append(yPr0,prob0[m])
-                yPr1 = np.append(yPr1,prob1[m])
-                yPr2 = np.append(yPr2,prob2[m])
+            for m in range(1, nsam):
+                yPr0 = np.append(yPr0, prob0[m])
+                yPr1 = np.append(yPr1, prob1[m])
+                yPr2 = np.append(yPr2, prob2[m])
             yProb = yPr0
-            yProb = np.append(yProb,yPr1)
-            yProb = np.append(yProb,yPr2)
+            yProb = np.append(yProb, yPr1)
+            yProb = np.append(yProb, yPr2)
             s_api.CreateWorkspace(OutputWorkspace=probWS, DataX=xProb, DataY=yProb, DataE=eProb,
                                   Nspec=3, UnitX='MomentumTransfer')
             outWS = self.C2Fw(fname)
         if self._program == 'QSe':
-            comp_prog.report('Runnning C2Se')
+            comp_prog.report('Running C2Se')
             outWS = self.C2Se(fname)
 
-        log_prog = Progress(self, start=0.8, end =1.0, nreports=8)
-        #Add some sample logs to the output workspaces
+        # Sort x axis
+        s_api.SortXAxis(InputWorkspace=outWS, OutputWorkspace=outWS, EnableLogging=False)
+
+        log_prog = Progress(self, start=0.8, end=1.0, nreports=8)
+        # Add some sample logs to the output workspaces
         log_prog.report('Copying Logs to outputWorkspace')
         s_api.CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=outWS)
         log_prog.report('Adding Sample logs to Output workspace')
@@ -373,13 +376,14 @@ class BayesQuasi(PythonAlgorithm):
         s_api.CopyLogs(InputWorkspace=self._samWS, OutputWorkspace=fitWS)
         log_prog.report('Adding sample logs to Fit workspace')
         self._add_sample_logs(fitWS, prog, erange, nbins)
-        log_prog.report('Finialising log copying')
+        log_prog.report('Finalising log copying')
 
         self.setProperty('OutputWorkspaceFit', fitWS)
         self.setProperty('OutputWorkspaceResult', outWS)
         log_prog.report('Setting workspace properties')
 
         if self._program == 'QL':
+            s_api.SortXAxis(InputWorkspace=probWS, OutputWorkspace=probWS, EnableLogging=False)
             self.setProperty('OutputWorkspaceProb', probWS)
 
     def _add_sample_logs(self, workspace, fit_program, e_range, binning):
@@ -413,9 +417,9 @@ class BayesQuasi(PythonAlgorithm):
         log_alg.execute()
 
     def C2Se(self, sname):
-        outWS = sname+'_Result'
-        asc = self._read_ascii_file(sname+'.qse')
-        var = asc[3].split()                            #split line on spaces
+        outWS = sname + '_Result'
+        asc = self._read_ascii_file(sname + '.qse')
+        var = asc[3].split()  # split line on spaces
         nspec = var[0]
         var = ExtractInt(asc[6])
         first = 7
@@ -429,8 +433,8 @@ class BayesQuasi(PythonAlgorithm):
         dataE = np.array([])
         data = np.array([dataX, dataY, dataE])
 
-        for _ in range(0,ns):
-            first,Q,_,fw,it,be = self.SeBlock(asc,first)
+        for _ in range(0, ns):
+            first, Q, _, fw, it, be = self.SeBlock(asc, first)
             Xout.append(Q)
             Yf.append(fw[0])
             Ef.append(fw[1])
@@ -461,9 +465,9 @@ class BayesQuasi(PythonAlgorithm):
     def _add_xye_data(self, data, xout, Y, E):
 
         dX, dY, dE = data[0], data[1], data[2]
-        dX = np.append(dX,np.array(xout))
-        dY = np.append(dY,np.array(Y))
-        dE = np.append(dE,np.array(E))
+        dX = np.append(dX, np.array(xout))
+        dY = np.append(dY, np.array(Y))
+        dE = np.append(dE, np.array(E))
         data = (dX, dY, dE)
 
         return dX, dY, dE, data
@@ -478,51 +482,51 @@ class BayesQuasi(PythonAlgorithm):
                 asc.append(line)
         return asc
 
-    def SeBlock(self, a, index):                                 #read Ascii block of Integers
+    def SeBlock(self, a, index):  # read Ascii block of Integers
         index += 1
-        val = ExtractFloat(a[index])               #Q,AMAX,HWHM
+        val = ExtractFloat(a[index])  # Q,AMAX,HWHM
         Q = val[0]
         AMAX = val[1]
         HWHM = val[2]
         index += 1
-        val = ExtractFloat(a[index])               #A0
-        int0 = [AMAX*val[0]]
+        val = ExtractFloat(a[index])  # A0
+        int0 = [AMAX * val[0]]
         index += 1
-        val = ExtractFloat(a[index])                #AI,FWHM index peak
-        fw = [2.*HWHM*val[1]]
-        integer = [AMAX*val[0]]
+        val = ExtractFloat(a[index])  # AI,FWHM index peak
+        fw = [2. * HWHM * val[1]]
+        integer = [AMAX * val[0]]
         index += 1
-        val = ExtractFloat(a[index])                 #SIG0
+        val = ExtractFloat(a[index])  # SIG0
         int0.append(val[0])
         index += 1
-        val = ExtractFloat(a[index])                  #SIG3K
-        integer.append(AMAX*math.sqrt(math.fabs(val[0])+1.0e-20))
+        val = ExtractFloat(a[index])  # SIG3K
+        integer.append(AMAX * math.sqrt(math.fabs(val[0]) + 1.0e-20))
         index += 1
-        val = ExtractFloat(a[index])                  #SIG1K
-        fw.append(2.0*HWHM*math.sqrt(math.fabs(val[0])+1.0e-20))
+        val = ExtractFloat(a[index])  # SIG1K
+        fw.append(2.0 * HWHM * math.sqrt(math.fabs(val[0]) + 1.0e-20))
         index += 1
-        be = ExtractFloat(a[index])                  #EXPBET
+        be = ExtractFloat(a[index])  # EXPBET
         index += 1
-        val = ExtractFloat(a[index])                  #SIG2K
-        be.append(math.sqrt(math.fabs(val[0])+1.0e-20))
+        val = ExtractFloat(a[index])  # SIG2K
+        be.append(math.sqrt(math.fabs(val[0]) + 1.0e-20))
         index += 1
-        return index, Q, int0 ,fw , integer, be                                      #values as list
+        return index, Q, int0, fw, integer, be  # values as list
 
-    def _get_res_norm(self, resnormWS,ngrp):
-        if ngrp == 0:                                # read values from WS
-            dtnorm = s_api.mtd[resnormWS+'_Intensity'].readY(0)
-            xscale = s_api.mtd[resnormWS+'_Stretch'].readY(0)
-        else:                                        # constant values
+    def _get_res_norm(self, resnormWS, ngrp):
+        if ngrp == 0:  # read values from WS
+            dtnorm = s_api.mtd[resnormWS + '_Intensity'].readY(0)
+            xscale = s_api.mtd[resnormWS + '_Stretch'].readY(0)
+        else:  # constant values
             dtnorm = []
             xscale = []
-            for _ in range(0,ngrp):
+            for _ in range(0, ngrp):
                 dtnorm.append(1.0)
                 xscale.append(1.0)
-        dtn=PadArray(dtnorm,51)                      # pad for Fortran call
-        xsc=PadArray(xscale,51)
-        return dtn,xsc
+        dtn = PadArray(dtnorm, 51)  # pad for Fortran call
+        xsc = PadArray(xscale, 51)
+        return dtn, xsc
 
-    def _read_norm_file(self, readRes,resnormWS,nsam):            # get norm & scale values
+    def _read_norm_file(self, readRes, resnormWS, nsam):  # get norm & scale values
         resnorm_root = resnormWS
         # Obtain root of resnorm group name
         if '_Intensity' in resnormWS:
@@ -530,25 +534,25 @@ class BayesQuasi(PythonAlgorithm):
         if '_Stretch' in resnormWS:
             resnorm_root = resnormWS[:-8]
 
-        if readRes:                   # use ResNorm file option=o_res
-            Xin = s_api.mtd[resnorm_root+'_Intensity'].readX(0)
-            nrm = len(Xin)                        # no. points from length of x array
+        if readRes:  # use ResNorm file option=o_res
+            Xin = s_api.mtd[resnorm_root + '_Intensity'].readX(0)
+            nrm = len(Xin)  # no. points from length of x array
             if nrm == 0:
                 raise ValueError('ResNorm file has no Intensity points')
-            Xin = s_api.mtd[resnorm_root+'_Stretch'].readX(0)  # no. points from length of x array
+            Xin = s_api.mtd[resnorm_root + '_Stretch'].readX(0)  # no. points from length of x array
             if len(Xin) == 0:
                 raise ValueError('ResNorm file has no xscale points')
-            if nrm != nsam:                # check that no. groups are the same
-                raise ValueError('ResNorm groups (' +str(nrm) + ') not = Sample (' +str(nsam) +')')
+            if nrm != nsam:  # check that no. groups are the same
+                raise ValueError('ResNorm groups (' + str(nrm) + ') not = Sample (' + str(nsam) + ')')
             else:
-                dtn,xsc = self._get_res_norm(resnorm_root,0)
+                dtn, xsc = self._get_res_norm(resnorm_root, 0)
         else:
             # do not use ResNorm file
-            dtn,xsc = self._get_res_norm(resnorm_root,nsam)
-        return dtn,xsc
+            dtn, xsc = self._get_res_norm(resnorm_root, nsam)
+        return dtn, xsc
 
-    #Reads in a width ASCII file
-    def _read_width_file(self, readWidth,widthFile,numSampleGroups):
+    # Reads in a width ASCII file
+    def _read_width_file(self, readWidth, widthFile, numSampleGroups):
         widthY, widthE = [], []
         if readWidth:
             logger.information('Width file is ' + widthFile)
@@ -566,72 +570,72 @@ class BayesQuasi(PythonAlgorithm):
             numLines = len(asc)
             if numLines == 0:
                 raise ValueError('No groups in width file')
-            if numLines != numSampleGroups:                # check that no. groups are the same
-                raise ValueError('Width groups (' +str(numLines) + ') not = Sample (' +str(numSampleGroups) +')')
+            if numLines != numSampleGroups:  # check that no. groups are the same
+                raise ValueError('Width groups (' + str(numLines) + ') not = Sample (' + str(numSampleGroups) + ')')
         else:
             # no file: just use constant values
             widthY = np.zeros(numSampleGroups)
             widthE = np.zeros(numSampleGroups)
         # pad for Fortran call
-        widthY = PadArray(widthY,51)
-        widthE = PadArray(widthE,51)
+        widthY = PadArray(widthY, 51)
+        widthE = PadArray(widthE, 51)
 
         return widthY, widthE
 
     def C2Fw(self, sname):
-        output_workspace = sname+'_Result'
+        output_workspace = sname + '_Result'
         num_spectra = 0
         axis_names = []
         x, y, e = [], [], []
-        for nl in range(1,4):
-            num_params = nl*3+1
+        for nl in range(1, 4):
+            num_params = nl * 3 + 1
             num_spectra += num_params
 
             amplitude_data, width_data = [], []
-            amplitude_error, width_error  = [], []
+            amplitude_error, width_error = [], []
 
-            #read data from file output by fortran code
-            file_name = sname + '.ql' +str(nl)
+            # read data from file output by fortran code
+            file_name = sname + '.ql' + str(nl)
             x_data, peak_data, peak_error = self._read_ql_file(file_name, nl)
             x_data = np.asarray(x_data)
 
             amplitude_data, width_data, height_data = peak_data
             amplitude_error, width_error, height_error = peak_error
 
-            #transpose y and e data into workspace rows
+            # transpose y and e data into workspace rows
             amplitude_data, width_data = np.asarray(amplitude_data).T, np.asarray(width_data).T
             amplitude_error, width_error = np.asarray(amplitude_error).T, np.asarray(width_error).T
             height_data, height_error = np.asarray(height_data), np.asarray(height_error)
 
-            #calculate EISF and EISF error
-            total = height_data+amplitude_data
+            # calculate EISF and EISF error
+            total = height_data + amplitude_data
             EISF_data = height_data / total
-            total_error = height_error**2 + amplitude_error**2
-            EISF_error = EISF_data * np.sqrt((height_error**2/height_data**2) + (total_error/total**2))
+            total_error = height_error ** 2 + amplitude_error ** 2
+            EISF_error = EISF_data * np.sqrt((height_error ** 2 / height_data ** 2) + (total_error / total ** 2))
 
-            #interlace amplitudes and widths of the peaks
+            # interlace amplitudes and widths of the peaks
             y.append(np.asarray(height_data))
             for amp, width, EISF in zip(amplitude_data, width_data, EISF_data):
                 y.append(amp)
                 y.append(width)
                 y.append(EISF)
 
-            #iterlace amplitude and width errors of the peaks
+            # interlace amplitude and width errors of the peaks
             e.append(np.asarray(height_error))
             for amp, width, EISF in zip(amplitude_error, width_error, EISF_error):
                 e.append(amp)
                 e.append(width)
                 e.append(EISF)
 
-            #create x data and axis names for each function
-            axis_names.append('f'+str(nl)+'.f0.'+'Height')
+            # create x data and axis names for each function
+            axis_names.append('f' + str(nl) + '.f0.' + 'Height')
             x.append(x_data)
-            for j in range(1,nl+1):
-                axis_names.append('f'+str(nl)+'.f'+str(j)+'.Amplitude')
+            for j in range(1, nl + 1):
+                axis_names.append('f' + str(nl) + '.f' + str(j) + '.Amplitude')
                 x.append(x_data)
-                axis_names.append('f'+str(nl)+'.f'+str(j)+'.FWHM')
+                axis_names.append('f' + str(nl) + '.f' + str(j) + '.FWHM')
                 x.append(x_data)
-                axis_names.append('f'+str(nl)+'.f'+str(j)+'.EISF')
+                axis_names.append('f' + str(nl) + '.f' + str(j) + '.EISF')
                 x.append(x_data)
 
         x = np.asarray(x).flatten()
@@ -644,83 +648,84 @@ class BayesQuasi(PythonAlgorithm):
         return output_workspace
 
     def _yield_floats(self, block):
-        #yield a list of floats from a list of lines of text
-        #encapsulates the iteration over a block of lines
+        # yield a list of floats from a list of lines of text
+        # encapsulates the iteration over a block of lines
         for line in block:
             yield ExtractFloat(line)
 
     def _read_ql_file(self, file_name, nl):
-        #offet to ignore header
+        # offset to ignore header
         header_offset = 8
-        block_size = 4+nl*3
+        block_size = 4 + nl * 3
 
         asc = self._read_ascii_file(file_name)
-        #extract number of blocks from the file header
+        # extract number of blocks from the file header
         num_blocks = int(ExtractFloat(asc[3])[0])
 
         q_data = []
         amp_data, FWHM_data, height_data = [], [], []
         amp_error, FWHM_error, height_error = [], [], []
 
-        #iterate over each block of fit parameters in the file
-        #each block corresponds to a single column in the final workspace
+        # iterate over each block of fit parameters in the file
+        # each block corresponds to a single column in the final workspace
         for block_num in range(num_blocks):
-            lower_index = header_offset+(block_size*block_num)
-            upper_index = lower_index+block_size
+            lower_index = header_offset + (block_size * block_num)
+            upper_index = lower_index + block_size
 
-            #create iterator for each line in the block
+            # create iterator for each line in the block
             line_pointer = self._yield_floats(asc[lower_index:upper_index])
 
-            #Q,AMAX,HWHM,BSCL,GSCL
+            # Q,AMAX,HWHM,BSCL,GSCL
             line = next(line_pointer)
             Q, AMAX, HWHM, _, _ = line
             q_data.append(Q)
 
-            #A0,A1,A2,A4
+            # A0,A1,A2,A4
             line = next(line_pointer)
-            block_height = AMAX*line[0]
+            block_height = AMAX * line[0]
 
-            #parse peak data from block
+            # parse peak data from block
             block_FWHM = []
             block_amplitude = []
             for _ in range(nl):
-                #Amplitude,FWHM for each peak
+                # Amplitude,FWHM for each peak
                 line = next(line_pointer)
-                amp = AMAX*line[0]
-                FWHM = 2.*HWHM*line[1]
+                amp = AMAX * line[0]
+                FWHM = 2. * HWHM * line[1]
                 block_amplitude.append(amp)
                 block_FWHM.append(FWHM)
 
-            #next parse error data from block
-            #SIG0
+            # next parse error data from block
+            # SIG0
             line = next(line_pointer)
             block_height_e = line[0]
 
             block_FWHM_e = []
             block_amplitude_e = []
             for _ in range(nl):
-                #Amplitude error,FWHM error for each peak
-                #SIGIK
+                # Amplitude error,FWHM error for each peak
+                # SIGIK
                 line = next(line_pointer)
-                amp = AMAX*math.sqrt(math.fabs(line[0])+1.0e-20)
+                amp = AMAX * math.sqrt(math.fabs(line[0]) + 1.0e-20)
                 block_amplitude_e.append(amp)
 
-                #SIGFK
+                # SIGFK
                 line = next(line_pointer)
-                FWHM = 2.0*HWHM*math.sqrt(math.fabs(line[0])+1.0e-20)
+                FWHM = 2.0 * HWHM * math.sqrt(math.fabs(line[0]) + 1.0e-20)
                 block_FWHM_e.append(FWHM)
 
-            #append data from block
+            # append data from block
             amp_data.append(block_amplitude)
             FWHM_data.append(block_FWHM)
             height_data.append(block_height)
 
-            #append error values from block
+            # append error values from block
             amp_error.append(block_amplitude_e)
             FWHM_error.append(block_FWHM_e)
             height_error.append(block_height_e)
 
         return q_data, (amp_data, FWHM_data, height_data), (amp_error, FWHM_error, height_error)
 
+
 # Register algorithm with Mantid
 AlgorithmFactory.subscribe(BayesQuasi)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesStretch.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesStretch.py
index f173b1db8998799ad83fc6d0fce6eae18be5a831..b13b0565765d903b0ca85052cbdee06c229d5145 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesStretch.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/BayesStretch.py
@@ -1,4 +1,4 @@
-#pylint: disable=invalid-name,too-many-instance-attributes,too-many-branches,no-init
+# pylint: disable=invalid-name,too-many-instance-attributes,too-many-branches,no-init
 from __future__ import (absolute_import, division, print_function)
 from IndirectImport import *
 
@@ -11,11 +11,10 @@ import os
 import numpy as np
 
 if is_supported_f2py_platform():
-    Que     = import_f2py("Quest")
+    Que = import_f2py("Quest")
 
 
 class BayesStretch(PythonAlgorithm):
-
     _sam_name = None
     _sam_ws = None
     _res_name = None
@@ -59,7 +58,7 @@ class BayesStretch(PythonAlgorithm):
                              doc='Fit option for using the elastic peak')
 
         self.declareProperty(name='Background', defaultValue='Flat',
-                             validator=StringListValidator(['Sloping','Flat','Zero']),
+                             validator=StringListValidator(['Sloping', 'Flat', 'Zero']),
                              doc='Fit option for the type of background')
 
         self.declareProperty(name='NumberSigma', defaultValue=50,
@@ -96,13 +95,13 @@ class BayesStretch(PythonAlgorithm):
 
         return issues
 
-    #pylint: disable=too-many-locals
+    # pylint: disable=too-many-locals
     def PyExec(self):
         run_f2py_compatibility_test()
 
         from IndirectBayes import (CalcErange, GetXYE)
         from IndirectCommon import (CheckXrange, CheckAnalysers, getEfixed, GetThetaQ, CheckHistZero)
-        setup_prog = Progress(self, start=0.0, end=0.3, nreports = 5)
+        setup_prog = Progress(self, start=0.0, end=0.3, nreports=5)
         logger.information('BayesStretch input')
         logger.information('Sample is %s' % self._sam_name)
         logger.information('Resolution is %s' % self._res_name)
@@ -123,9 +122,9 @@ class BayesStretch(PythonAlgorithm):
         theta, Q = GetThetaQ(self._sam_name)
 
         setup_prog.report('Checking Histograms')
-        nsam,ntc = CheckHistZero(self._sam_name)
+        nsam, ntc = CheckHistZero(self._sam_name)
 
-        #check if we're performing a sequential fit
+        # check if we're performing a sequential fit
         if not self._loop:
             nsam = 1
 
@@ -135,22 +134,22 @@ class BayesStretch(PythonAlgorithm):
 
         setup_prog.report('Creating FORTRAN Input')
         fname = self._sam_name[:-4] + '_Stretch'
-        wrks=os.path.join(workdir, self._sam_name[:-4])
+        wrks = os.path.join(workdir, self._sam_name[:-4])
         logger.information('lptfile : %s_Qst.lpt' % wrks)
-        lwrk=len(wrks)
+        lwrk = len(wrks)
         wrks.ljust(140, ' ')
-        wrkr=self._res_name
+        wrkr = self._res_name
         wrkr.ljust(140, ' ')
-        eBet0 = np.zeros(self._nbet)                  # set errors to zero
-        eSig0 = np.zeros(self._nsig)                  # set errors to zero
+        eBet0 = np.zeros(self._nbet)  # set errors to zero
+        eSig0 = np.zeros(self._nsig)  # set errors to zero
         rscl = 1.0
         Qaxis = ''
 
-        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam*3)
+        workflow_prog = Progress(self, start=0.3, end=0.7, nreports=nsam * 3)
 
         # Empty arrays to hold Sigma and Bet x,y,e values
-        xSig, ySig, eSig = [],[],[]
-        xBet, yBet, eBet = [],[],[]
+        xSig, ySig, eSig = [], [], []
+        xBet, yBet, eBet = [], [], []
 
         for m in range(nsam):
             logger.information('Group %i at angle %f' % (m, theta[m]))
@@ -168,13 +167,13 @@ class BayesStretch(PythonAlgorithm):
             reals = [efix, theta[m], rscl, bnorm]
 
             workflow_prog.report('Processing spectrum number %i' % m)
-            xsout, ysout, xbout, ybout, zpout=Que.quest(numb, Xv, Yv, Ev, reals, fitOp,
-                                                        Xdat, Xb, Yb, wrks, wrkr, lwrk)
-            dataXs = xsout[:self._nsig]               # reduce from fixed FORTRAN array
+            xsout, ysout, xbout, ybout, zpout = Que.quest(numb, Xv, Yv, Ev, reals, fitOp,
+                                                          Xdat, Xb, Yb, wrks, wrkr, lwrk)
+            dataXs = xsout[:self._nsig]  # reduce from fixed FORTRAN array
             dataYs = ysout[:self._nsig]
             dataXb = xbout[:self._nbet]
             dataYb = ybout[:self._nbet]
-            zpWS = fname + '_Zp' +str(m)
+            zpWS = fname + '_Zp' + str(m)
             if m > 0:
                 Qaxis += ','
             Qaxis += str(Q[m])
@@ -184,7 +183,7 @@ class BayesStretch(PythonAlgorithm):
             dataEz = []
 
             for n in range(self._nsig):
-                yfit_list = np.split(zpout[:self._nsig*self._nbet], self._nsig)
+                yfit_list = np.split(zpout[:self._nsig * self._nbet], self._nsig)
                 dataYzp = yfit_list[n]
 
                 dataXz = np.append(dataXz, xbout[:self._nbet])
@@ -194,19 +193,19 @@ class BayesStretch(PythonAlgorithm):
             zpWS = fname + '_Zp' + str(m)
             self._create_workspace(zpWS, [dataXz, dataYz, dataEz], self._nsig, dataXs, True)
 
-            xSig = np.append(xSig,dataXs)
-            ySig = np.append(ySig,dataYs)
-            eSig = np.append(eSig,eSig0)
-            xBet = np.append(xBet,dataXb)
-            yBet = np.append(yBet,dataYb)
-            eBet = np.append(eBet,eBet0)
+            xSig = np.append(xSig, dataXs)
+            ySig = np.append(ySig, dataYs)
+            eSig = np.append(eSig, eSig0)
+            xBet = np.append(xBet, dataXb)
+            yBet = np.append(yBet, dataYb)
+            eBet = np.append(eBet, eBet0)
 
             if m == 0:
                 groupZ = zpWS
             else:
-                groupZ = groupZ +','+ zpWS
+                groupZ = groupZ + ',' + zpWS
 
-        #create workspaces for sigma and beta
+        # create workspaces for sigma and beta
         workflow_prog.report('Creating OutputWorkspace')
         self._create_workspace(fname + '_Sigma', [xSig, ySig, eSig], nsam, Qaxis)
         self._create_workspace(fname + '_Beta', [xBet, yBet, eBet], nsam, Qaxis)
@@ -219,31 +218,35 @@ class BayesStretch(PythonAlgorithm):
         s_api.GroupWorkspaces(InputWorkspaces=groupZ,
                               OutputWorkspace=contour_ws)
 
-        #Add some sample logs to the output workspaces
-        log_prog = Progress(self, start=0.8, end =1.0, nreports=6)
+        # Add some sample logs to the output workspaces
+        log_prog = Progress(self, start=0.8, end=1.0, nreports=6)
         log_prog.report('Copying Logs to Fit workspace')
         copy_log_alg = self.createChildAlgorithm('CopyLogs', enableLogging=False)
         copy_log_alg.setProperty('InputWorkspace', self._sam_name)
-        copy_log_alg.setProperty('OutputWorkspace',fit_ws)
+        copy_log_alg.setProperty('OutputWorkspace', fit_ws)
         copy_log_alg.execute()
 
         log_prog.report('Adding Sample logs to Fit workspace')
         self._add_sample_logs(fit_ws, self._erange, self._nbins[0])
 
         log_prog.report('Copying logs to Contour workspace')
-        copy_log_alg.setProperty('InputWorkspace',self._sam_name)
-        copy_log_alg.setProperty('OutputWorkspace',contour_ws)
+        copy_log_alg.setProperty('InputWorkspace', self._sam_name)
+        copy_log_alg.setProperty('OutputWorkspace', contour_ws)
         copy_log_alg.execute()
 
         log_prog.report('Adding sample logs to Contour workspace')
         self._add_sample_logs(contour_ws, self._erange, self._nbins[0])
         log_prog.report('Finialising log copying')
 
+        # sort x axis
+        s_api.SortXAxis(InputWorkspace=fit_ws, OutputWorkspace=fit_ws, EnableLogging=False)
+        s_api.SortXAxis(InputWorkspace=contour_ws, OutputWorkspace=contour_ws, EnableLogging=False)
+
         self.setProperty('OutputWorkspaceFit', fit_ws)
         self.setProperty('OutputWorkspaceContour', contour_ws)
         log_prog.report('Setting workspace properties')
 
-#----------------------------- Helper functions -----------------------------
+    # ----------------------------- Helper functions -----------------------------
 
     def _encode_fit_ops(self, elastic, background):
         """
@@ -274,8 +277,8 @@ class BayesStretch(PythonAlgorithm):
             logger.information('Defaulting to current working Directory: ' + workdir)
         return workdir
 
-    #pylint: disable=too-many-arguments
-    def _create_workspace(self, name, xye, num_spec, vert_axis, is_zp_ws = False):
+    # pylint: disable=too-many-arguments
+    def _create_workspace(self, name, xye, num_spec, vert_axis, is_zp_ws=False):
         """
         Creates a workspace from FORTRAN data
 
@@ -299,11 +302,11 @@ class BayesStretch(PythonAlgorithm):
         unitx = ws.getAxis(0).setUnit("Label")
         if is_zp_ws:
             unity = ws.getAxis(1).setUnit("Label")
-            unitx.setLabel('beta' , '')
-            unity.setLabel('sigma' , '')
+            unitx.setLabel('beta', '')
+            unity.setLabel('sigma', '')
         else:
             if name[:4] == 'Beta':
-                unitx.setLabel('beta' , '')
+                unitx.setLabel('beta', '')
             else:
                 unitx.setLabel('sigma', '')
 
@@ -314,7 +317,7 @@ class BayesStretch(PythonAlgorithm):
         energy_min, energy_max = erange
 
         log_names = ['res_file', 'background', 'elastic_peak',
-                     'energy_min', 'energy_max','sample_binning']
+                     'energy_min', 'energy_max', 'sample_binning']
         log_values = [self._res_name, str(self._background), str(self._elastic),
                       energy_min, energy_max, sample_binning]
 
@@ -322,7 +325,7 @@ class BayesStretch(PythonAlgorithm):
         add_log.setProperty('Workspace', workspace)
         add_log.setProperty('LogNames', log_names)
         add_log.setProperty('LogValues', log_values)
-        add_log.setProperty('ParseType', True) # Should determine String/Number type
+        add_log.setProperty('ParseType', True)  # Should determine String/Number type
         add_log.execute()
 
     def _get_properties(self):
@@ -343,4 +346,4 @@ class BayesStretch(PythonAlgorithm):
         self._nbins = [self._sam_bins, 1]
 
 
-AlgorithmFactory.subscribe(BayesStretch)         # Register algorithm with Mantid
+AlgorithmFactory.subscribe(BayesStretch)  # Register algorithm with Mantid
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransfer.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransfer.py
index ddea9a7709c946aea7ad1f54587b51d5d8186f25..0e5b3cdf2039fdb48947313277ba54afac6e9598 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransfer.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransfer.py
@@ -2,17 +2,18 @@ from __future__ import (absolute_import, division, print_function)
 
 import os
 import numpy as np
-from mantid.simpleapi import *  # noqa
-from mantid.kernel import *  # noqa
-from mantid.api import *  # noqa
 from mantid import config, mtd, logger
+from mantid.kernel import StringListValidator, Direction
+from mantid.api import PythonAlgorithm, MultipleFileProperty, FileProperty, \
+    WorkspaceGroupProperty, FileAction, Progress
+from mantid.simpleapi import *  # noqa
 
 
 def _ws_or_none(s):
     return mtd[s] if s != '' else None
 
 
-def extract_workspace(ws, ws_out, x_start, x_end):
+def _extract_workspace(ws, ws_out, x_start, x_end):
     """
     Extracts a part of the workspace and
     shifts the x-axis to start from 0
@@ -43,12 +44,14 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
     _red_ws = None
     _psd_int_range = None
     _use_map_file = None
+    _spectrum_axis = None
+    _efixed = None
 
     def category(self):
         return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction"
 
     def summary(self):
-        return 'Performs energy transfer reduction for ILL indirect geometry data, instrument IN16B.'
+        return 'Performs initial energy transfer reduction for ILL indirect geometry data, instrument IN16B.'
 
     def name(self):
         return "IndirectILLEnergyTransfer"
@@ -90,6 +93,10 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
                                                     direction=Direction.Output),
                              doc='Group name for the reduced workspace(s).')
 
+        self.declareProperty(name='SpectrumAxis', defaultValue='SpectrumNumber',
+                             validator=StringListValidator(['SpectrumNumber', '2Theta', 'Q', 'Q2']),
+                             doc='The spectrum axis conversion target.')
+
     def validateInputs(self):
 
         issues = dict()
@@ -113,6 +120,7 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
         self._reflection = self.getPropertyValue('Reflection')
         self._dead_channels = self.getProperty('CropDeadMonitorChannels').value
         self._red_ws = self.getPropertyValue('OutputWorkspace')
+        self._spectrum_axis = self.getPropertyValue('SpectrumAxis')
 
         if self._map_file or (self._psd_int_range[0] == 1 and self._psd_int_range[1] == 128):
             self._use_map_file = True
@@ -280,6 +288,8 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
 
         LoadParameterFile(Workspace=self._ws, Filename=self._parameter_file)
 
+        self._efixed = self._instrument.getNumberParameter('Efixed')[0]
+
         self._setup_run_properties()
 
         if self._mirror_sense == 14:      # two wings, extract left and right
@@ -287,8 +297,8 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
             size = mtd[self._ws].blocksize()
             left = self._ws + '_left'
             right = self._ws + '_right'
-            extract_workspace(self._ws, left, 0, int(size/2))
-            extract_workspace(self._ws, right, int(size/2), size)
+            _extract_workspace(self._ws, left, 0, int(size/2))
+            _extract_workspace(self._ws, right, int(size/2), size)
             DeleteWorkspace(self._ws)
             self._reduce_one_wing(left)
             self._reduce_one_wing(right)
@@ -333,6 +343,18 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
 
         self._convert_to_energy(ws, n_cropped_bins)
 
+        target = None
+        if self._spectrum_axis == '2Theta':
+            target = 'Theta'
+        elif self._spectrum_axis == 'Q':
+            target = 'ElasticQ'
+        elif self._spectrum_axis == 'Q2':
+            target = 'ElasticQSquared'
+
+        if self._spectrum_axis != 'SpectrumNumber':
+            ConvertSpectrumAxis(InputWorkspace=ws,OutputWorkspace=ws,
+                                EMode='Indirect',Target=target,EFixed=self._efixed)
+
     def _group_detectors_with_range(self, ws):
         """
         Groups (sums) the multi-detector's pixels according to given range
@@ -382,6 +404,9 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
             if mtd[int].readY(0)[0] !=0: # this needs to be checked
                 Scale(InputWorkspace=ws, OutputWorkspace=ws, Factor=1. / mtd[int].readY(0)[0])
 
+            # remember the integral of the monitor
+            AddSampleLog(Workspace=ws, LogName="MonitorIntegral", LogType="Number", LogText=str(mtd[int].readY(0)[0]))
+
             DeleteWorkspace(int)
 
         elif self._reduction_type == 'IFWS':
@@ -404,6 +429,9 @@ class IndirectILLEnergyTransfer(PythonAlgorithm):
             if mtd[int].readY(0)[0] != 0: # this needs to be checked
                 Scale(InputWorkspace = ws, OutputWorkspace = ws, Factor = 1./mtd[int].readY(0)[0])
 
+            # remember the integral of the monitor
+            AddSampleLog(Workspace=ws, LogName="MonitorIntegral", LogType="Number", LogText=str(mtd[int].readY(0)[0]))
+
             DeleteWorkspace(i1)
             DeleteWorkspace(i2)
             DeleteWorkspace(int)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionFWS.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionFWS.py
index f9bb92bb73b8caa16eb9d7e8a321d06e797fd5b7..c5d21175b91ffbe997995827973fd6c9d56ce708 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionFWS.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionFWS.py
@@ -1,17 +1,11 @@
 from __future__ import (absolute_import, division, print_function)
 
-
-from mantid.simpleapi import *  # noqa
-from mantid.kernel import *  # noqa
-from mantid.api import *  # noqa
-from mantid import mtd
-import os
 import numpy as np
 import time
-
-
-def _insert_energy_value(ws_name, energy):
-    return ws_name.replace('_red', '_' + str(energy) + '_red')
+from mantid import mtd
+from mantid.kernel import StringListValidator, Direction, FloatBoundedValidator
+from mantid.api import PythonAlgorithm, MultipleFileProperty, FileProperty, FileAction, WorkspaceGroupProperty, Progress
+from mantid.simpleapi import *  # noqa
 
 
 class IndirectILLReductionFWS(PythonAlgorithm):
@@ -35,7 +29,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
     _all_runs = None
 
     def category(self):
-        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction"
+        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction;ILL\\Indirect"
 
     def summary(self):
         return 'Performs fixed-window scan (FWS) multiple file reduction (both elastic and inelastic) ' \
@@ -109,6 +103,10 @@ class IndirectILLReductionFWS(PythonAlgorithm):
                                                     direction=Direction.Output),
                              doc='Output workspace group')
 
+        self.declareProperty(name='SpectrumAxis', defaultValue='SpectrumNumber',
+                             validator=StringListValidator(['SpectrumNumber', '2Theta', 'Q', 'Q2']),
+                             doc='The spectrum axis conversion target.')
+
     def validateInputs(self):
 
         issues = dict()
@@ -125,14 +123,28 @@ class IndirectILLReductionFWS(PythonAlgorithm):
         self._back_scaling = self.getProperty('BackgroundScalingFactor').value
         self._back_option = self.getPropertyValue('BackgroundOption')
         self._calib_option = self.getPropertyValue('CalibrationOption')
+        self._spectrum_axis = self.getPropertyValue('SpectrumAxis')
 
         # arguments to pass to IndirectILLEnergyTransfer
         self._common_args['MapFile'] = self.getPropertyValue('MapFile')
         self._common_args['Analyser'] = self.getPropertyValue('Analyser')
         self._common_args['Reflection'] = self.getPropertyValue('Reflection')
         self._common_args['ManualPSDIntegrationRange'] = self.getProperty('ManualPSDIntegrationRange').value
+        self._common_args['SpectrumAxis'] = self._spectrum_axis
+
+        self._red_ws = self.getPropertyValue('OutputWorkspace')
 
-        self._red_ws = self.getPropertyValue('OutputWorkspace') + '_red'
+        suffix = ''
+        if self._spectrum_axis == 'SpectrumNumber':
+            suffix = '_red'
+        elif self._spectrum_axis == '2Theta':
+            suffix = '_2theta'
+        elif self._spectrum_axis == 'Q':
+            suffix = '_q'
+        elif self._spectrum_axis == 'Q2':
+            suffix = '_q2'
+
+        self._red_ws += suffix
 
         # Nexus metadata criteria for FWS type of data (both EFWS and IFWS)
         self._criteria = '($/entry0/instrument/Doppler/maximum_delta_energy$ == 0. or ' \
@@ -214,9 +226,18 @@ class IndirectILLReductionFWS(PythonAlgorithm):
             right = mtd[groupws].getItem(1).getName()
             sum = '__sum_'+groupws
             Plus(LHSWorkspace=left, RHSWorkspace=right, OutputWorkspace=sum)
+
+            left_monitor = mtd[left].getRun().getLogData('MonitorIntegral').value
+            right_monitor = mtd[right].getRun().getLogData('MonitorIntegral').value
+
+            if left_monitor != 0. and right_monitor != 0.:
+                Scale(InputWorkspace=sum, OutputWorkspace=sum, Factor=0.5)
+
             DeleteWorkspace(left)
             DeleteWorkspace(right)
+
             RenameWorkspace(InputWorkspace=sum, OutputWorkspace=groupws)
+
         else:
             RenameWorkspace(InputWorkspace=mtd[groupws].getItem(0), OutputWorkspace=groupws)
 
@@ -339,7 +360,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
 
         for energy in self._all_runs[self._SAMPLE]:
             if energy in self._all_runs[label]:
-                ws = _insert_energy_value(self._red_ws, energy) + '_' + label
+                ws = self._insert_energy_value(self._red_ws + '_' + label, energy, label)
                 x_range = mtd[ws].readX(0)[-1] - mtd[ws].readX(0)[0]
                 if mtd[ws].blocksize() > 1:
                     Integration(InputWorkspace=ws, OutputWorkspace=ws)
@@ -354,13 +375,13 @@ class IndirectILLReductionFWS(PythonAlgorithm):
 
         for energy in self._all_runs[self._SAMPLE]:
             if energy in self._all_runs[label]:
-                ref = _insert_energy_value(self._red_ws, energy)
+                ref = self._insert_energy_value(self._red_ws, energy, self._SAMPLE)
                 ws = ref + '_' + label
                 if mtd[ws].blocksize() > 1:
                     SplineInterpolation(WorkspaceToInterpolate=ws,
                                         WorkspaceToMatch=ref,
                                         OutputWorkspace=ws)
-                    # add Linear2Point=True, when ready
+                    # TODO: add Linear2Point=True when ready
 
     def _subtract_background(self):
         '''
@@ -369,7 +390,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
 
         for energy in self._all_runs[self._SAMPLE]:
             if energy in self._all_runs[self._BACKGROUND]:
-                sample_ws = _insert_energy_value(self._red_ws, energy)
+                sample_ws = self._insert_energy_value(self._red_ws, energy, self._SAMPLE)
                 back_ws = sample_ws + '_' + self._BACKGROUND
                 Minus(LHSWorkspace=sample_ws, RHSWorkspace=back_ws, OutputWorkspace=sample_ws)
             else:
@@ -383,7 +404,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
 
         for energy in self._all_runs[self._SAMPLE]:
             if energy in self._all_runs[self._CALIBRATION]:
-                sample_ws = _insert_energy_value(self._red_ws, energy)
+                sample_ws = self._insert_energy_value(self._red_ws, energy, self._SAMPLE)
                 calib_ws = sample_ws + '_' + self._CALIBRATION
                 Divide(LHSWorkspace=sample_ws, RHSWorkspace=calib_ws, OutputWorkspace=sample_ws)
                 self._scale_calibration(sample_ws,calib_ws)
@@ -468,7 +489,7 @@ class IndirectILLReductionFWS(PythonAlgorithm):
             ws_list = self._all_runs[label][energy]
             size = len(self._all_runs[label][energy])
 
-            wsname = _insert_energy_value(groupname, energy)
+            wsname = self._insert_energy_value(groupname, energy, label)
 
             togroup.append(wsname)
             nspectra = mtd[ws_list[0]].getNumberHistograms()
@@ -535,5 +556,25 @@ class IndirectILLReductionFWS(PythonAlgorithm):
         else:
             axis.setUnit("Label").setLabel(self._observable, '')
 
+    def _insert_energy_value(self, ws_name, energy, label):
+        '''
+        Inserts the doppler's energy value in the workspace name
+        in between the user input and automatic suffix
+        @param ws_name : workspace name
+        @param energy : energy value
+        @param label : sample, background, or calibration
+        @return : new name with energy value inside
+        Example:
+        user_input_2theta > user_input_1.5_2theta
+        user_input_red_background > user_input_1.5_red_background
+        '''
+        suffix_pos = ws_name.rfind('_')
+
+        if label != self._SAMPLE:
+            # find second to last underscore
+            suffix_pos = ws_name.rfind('_', 0, suffix_pos)
+
+        return ws_name[:suffix_pos] + '_' + str(energy) + ws_name[suffix_pos:]
+
 # Register algorithm with Mantid
 AlgorithmFactory.subscribe(IndirectILLReductionFWS)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionQENS.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionQENS.py
index 880acb246755fd4dbb41040976cd2e96c8344037..a4eda94cf26a39a3c14b47d28dd13754f29165a1 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionQENS.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectILLReductionQENS.py
@@ -1,11 +1,13 @@
 from __future__ import (absolute_import, division, print_function)
 
-from mantid.simpleapi import *  # noqa
-from mantid.kernel import *  # noqa
-from mantid.api import *  # noqa
-from mantid import mtd
-import numpy
 import os
+import numpy
+from mantid import mtd
+from mantid.kernel import StringListValidator, Direction, FloatBoundedValidator, \
+    FloatArrayMandatoryValidator, IntBoundedValidator
+from mantid.api import PythonAlgorithm, MultipleFileProperty, FileProperty, \
+    FileAction, WorkspaceGroupProperty, Progress
+from mantid.simpleapi import *  # noqa
 
 
 class IndirectILLReductionQENS(PythonAlgorithm):
@@ -23,9 +25,10 @@ class IndirectILLReductionQENS(PythonAlgorithm):
     _common_args = {}
     _peak_range = []
     _runs = None
+    _spectrum_axis = None
 
     def category(self):
-        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction"
+        return "Workflow\\MIDAS;Workflow\\Inelastic;Inelastic\\Indirect;Inelastic\\Reduction;ILL\\Indirect"
 
     def summary(self):
         return 'Performs quasi-elastic neutron scattering (QENS) multiple file reduction ' \
@@ -111,6 +114,10 @@ class IndirectILLReductionQENS(PythonAlgorithm):
                                                     direction=Direction.Output),
                              doc='Group name for the reduced workspace(s).')
 
+        self.declareProperty(name='SpectrumAxis', defaultValue='SpectrumNumber',
+                             validator=StringListValidator(['SpectrumNumber', '2Theta', 'Q', 'Q2']),
+                             doc='The spectrum axis conversion target.')
+
     def validateInputs(self):
 
         issues = dict()
@@ -141,8 +148,21 @@ class IndirectILLReductionQENS(PythonAlgorithm):
         self._unmirror_option = self.getProperty('UnmirrorOption').value
         self._back_scaling = self.getProperty('BackgroundScalingFactor').value
         self._peak_range = self.getProperty('CalibrationPeakRange').value
+        self._spectrum_axis = self.getPropertyValue('SpectrumAxis')
+
+        self._red_ws = self.getPropertyValue('OutputWorkspace')
+
+        suffix = ''
+        if self._spectrum_axis == 'SpectrumNumber':
+            suffix = '_red'
+        elif self._spectrum_axis == '2Theta':
+            suffix = '_2theta'
+        elif self._spectrum_axis == 'Q':
+            suffix = '_q'
+        elif self._spectrum_axis == 'Q2':
+            suffix = '_q2'
 
-        self._red_ws = self.getPropertyValue('OutputWorkspace') + '_red'
+        self._red_ws += suffix
 
         # arguments to pass to IndirectILLEnergyTransfer
         self._common_args['MapFile'] = self.getPropertyValue('MapFile')
@@ -150,6 +170,7 @@ class IndirectILLReductionQENS(PythonAlgorithm):
         self._common_args['Reflection'] = self.getPropertyValue('Reflection')
         self._common_args['ManualPSDIntegrationRange'] = self.getProperty('ManualPSDIntegrationRange').value
         self._common_args['CropDeadMonitorChannels'] = self.getProperty('CropDeadMonitorChannels').value
+        self._common_args['SpectrumAxis'] = self._spectrum_axis
 
         if self._sum_all_runs is True:
             self.log().notice('All the sample runs will be summed')
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSReductionCore.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSReductionCore.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc17388d70108e4e009abe6a2d2ce18a87a4fe1f
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSReductionCore.py
@@ -0,0 +1,416 @@
+# pylint: disable=invalid-name
+
+""" SANSReductionCore algorithm runs the sequence of reduction steps which are necessary to reduce a data set."""
+
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import (Direction, PropertyManagerProperty, StringListValidator)
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode,
+                        IEventWorkspace, Progress)
+
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import (create_child_algorithm, append_to_sans_file_tag)
+from sans.common.enums import (DetectorType, DataType)
+
+
+class SANSReductionCore(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Reduction'
+
+    def summary(self):
+        return ' Runs the the core reduction elements.'
+
+    def PyInit(self):
+        # ----------
+        # INPUT
+        # ----------
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+
+        # WORKSPACES
+        # Scatter Workspaces
+        self.declareProperty(MatrixWorkspaceProperty('ScatterWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The scatter workspace. This workspace does not contain monitors.')
+        self.declareProperty(MatrixWorkspaceProperty('ScatterMonitorWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The scatter monitor workspace. This workspace only contains monitors.')
+
+        # Transmission Workspace
+        self.declareProperty(MatrixWorkspaceProperty('TransmissionWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The transmission workspace.')
+
+        # Direct Workspace
+        self.declareProperty(MatrixWorkspaceProperty('DirectWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The direct workspace.')
+
+        self.setPropertyGroup("ScatterWorkspace", 'Data')
+        self.setPropertyGroup("ScatterMonitorWorkspace", 'Data')
+        self.setPropertyGroup("TransmissionWorkspace", 'Data')
+        self.setPropertyGroup("DirectWorkspace", 'Data')
+
+        # The component
+        allowed_detectors = StringListValidator([DetectorType.to_string(DetectorType.LAB),
+                                                 DetectorType.to_string(DetectorType.HAB)])
+        self.declareProperty("Component", DetectorType.to_string(DetectorType.LAB),
+                             validator=allowed_detectors, direction=Direction.Input,
+                             doc="The component of the instrument which is to be reduced.")
+
+        # The data type
+        allowed_data = StringListValidator([DataType.to_string(DataType.Sample),
+                                            DataType.to_string(DataType.Can)])
+        self.declareProperty("DataType", DataType.to_string(DataType.Sample),
+                             validator=allowed_data, direction=Direction.Input,
+                             doc="The component of the instrument which is to be reduced.")
+
+        # ----------
+        # OUTPUT
+        # ----------
+        self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", '', direction=Direction.Output),
+                             doc='The output workspace.')
+
+        self.declareProperty(MatrixWorkspaceProperty('SumOfCounts', '', optional=PropertyMode.Optional,
+                                                     direction=Direction.Output),
+                             doc='The sum of the counts of the output workspace.')
+
+        self.declareProperty(MatrixWorkspaceProperty('SumOfNormFactors', '', optional=PropertyMode.Optional,
+                                                     direction=Direction.Output),
+                             doc='The sum of the counts of the output workspace.')
+
+    def PyExec(self):
+        # Get the input
+        state = self._get_state()
+        state_serialized = state.property_manager
+        component_as_string = self.getProperty("Component").value
+        progress = self._get_progress()
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 1. Crop workspace by detector name
+        #    This will create a reduced copy of the original workspace with only those spectra which are relevant
+        #    for this particular reduction.
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Cropping ...")
+        workspace = self._get_cropped_workspace(component_as_string)
+
+        # --------------------------------------------------------------------------------------------
+        # 2. Perform dark run subtraction
+        #    This will subtract a dark background from the scatter workspace. Note that dark background subtraction
+        #    will also affect the transmission calculation later on.
+        # --------------------------------------------------------------------------------------------------------------
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 3. Create event slice
+        #    If we are dealing with an event workspace as input, this will cut out a time-based (user-defined) slice.
+        #    In case of a histogram workspace, nothing happens.
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Event slicing ...")
+        data_type_as_string = self.getProperty("DataType").value
+        monitor_workspace = self._get_monitor_workspace()
+        workspace, monitor_workspace, slice_event_factor = self._slice(state_serialized, workspace, monitor_workspace,
+                                                                       data_type_as_string)
+
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN
+        # IMPORTANT: This section of the code should only be temporary. It allows us to convert to histogram
+        # early on and hence compare the new reduction results with the output of the new reduction chain.
+        # Once the new reduction chain is established, we should remove the compatibility feature.
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        compatibility = state.compatibility
+        is_event_workspace = isinstance(workspace, IEventWorkspace)
+        if compatibility.use_compatibility_mode and is_event_workspace:
+            # We convert the workspace here to a histogram workspace, since we cannot otherwise
+            # compare the results between the old and the new reduction workspace in a meaningful manner.
+            # The old one is histogram and the new one is event.
+            # Rebin to monitor workspace
+            if compatibility.time_rebin_string:
+                rebin_name = "Rebin"
+                rebin_option = {"InputWorkspace": workspace,
+                                "Params": compatibility.time_rebin_string,
+                                "OutputWorkspace": EMPTY_NAME,
+                                "PreserveEvents": False}
+                rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
+                rebin_alg.execute()
+                workspace = rebin_alg.getProperty("OutputWorkspace").value
+            else:
+                rebin_name = "RebinToWorkspace"
+                rebin_option = {"WorkspaceToRebin": workspace,
+                                "WorkspaceToMatch": monitor_workspace,
+                                "OutputWorkspace": EMPTY_NAME,
+                                "PreserveEvents": False}
+                rebin_alg = create_child_algorithm(self, rebin_name, **rebin_option)
+                rebin_alg.execute()
+                workspace = rebin_alg.getProperty("OutputWorkspace").value
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+        # ------------------------------------------------------------
+        # 4. Move the workspace into the correct position
+        #    The detectors in the workspaces are set such that the beam centre is at (0,0). The position is
+        #    a user-specified value which can be obtained with the help of the beam centre finder.
+        # ------------------------------------------------------------
+        progress.report("Moving ...")
+        workspace = self._move(state_serialized, workspace, component_as_string)
+        monitor_workspace = self._move(state_serialized, monitor_workspace, component_as_string)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 5. Apply masking (pixel masking and time masking)
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Masking ...")
+        workspace = self._mask(state_serialized, workspace, component_as_string)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 6. Convert to Wavelength
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Converting to wavelength ...")
+        workspace = self._convert_to_wavelength(state_serialized, workspace)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 7. Multiply by volume and absolute scale
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Multiplying by volume and absolute scale ...")
+        workspace = self._scale(state_serialized, workspace)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # 8. Create adjustment workspaces, those are
+        #     1. pixel-based adjustments
+        #     2. wavelength-based adjustments
+        #     3. pixel-and-wavelength-based adjustments
+        # Note that steps 4 to 7 could run in parallel if we don't use wide angle correction. If we do then the
+        # creation of the adjustment workspaces requires the sample workspace itself and we have to run it sequentially.
+        # We could consider to have a serial and a parallel strategy here, depending on the wide angle correction
+        # settings. On the other hand it is not clear that this would be an advantage with the GIL.
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Creating adjustment workspaces ...")
+        wavelength_adjustment_workspace, pixel_adjustment_workspace, wavelength_and_pixel_adjustment_workspace =\
+            self._adjustment(state_serialized, workspace, monitor_workspace, component_as_string, data_type_as_string)
+
+        # ------------------------------------------------------------
+        # 9. Convert event workspaces to histogram workspaces
+        # ------------------------------------------------------------
+        progress.report("Converting to histogram mode ...")
+        workspace = self._convert_to_histogram(workspace)
+
+        # ------------------------------------------------------------
+        # 10. Convert to Q
+        # -----------------------------------------------------------
+        progress.report("Converting to q ...")
+        workspace, sum_of_counts, sum_of_norms = self._convert_to_q(state_serialized,
+                                                                    workspace,
+                                                                    wavelength_adjustment_workspace,
+                                                                    pixel_adjustment_workspace,
+                                                                    wavelength_and_pixel_adjustment_workspace)
+        progress.report("Completed SANSReductionCore ...")
+
+        # ------------------------------------------------------------
+        # Populate the output
+        # ------------------------------------------------------------
+        self.setProperty("OutputWorkspace", workspace)
+
+        # ------------------------------------------------------------
+        # Diagnostic output
+        # ------------------------------------------------------------
+        if sum_of_counts:
+            self.setProperty("SumOfCounts", sum_of_counts)
+        if sum_of_norms:
+            self.setProperty("SumOfNormFactors", sum_of_norms)
+
+        # TODO: Publish temporary workspaces if required
+        # This includes partial workspaces of Q1D and unfitted transmission data
+
+    def _get_cropped_workspace(self, component):
+        scatter_workspace = self.getProperty("ScatterWorkspace").value
+        crop_name = "SANSCrop"
+        crop_options = {"InputWorkspace": scatter_workspace,
+                        "OutputWorkspace": EMPTY_NAME,
+                        "Component": component}
+        crop_alg = create_child_algorithm(self, crop_name, **crop_options)
+        crop_alg.execute()
+        return crop_alg.getProperty("OutputWorkspace").value
+
+    def _slice(self, state_serialized, workspace, monitor_workspace, data_type_as_string):
+        slice_name = "SANSSliceEvent"
+        slice_options = {"SANSState": state_serialized,
+                         "InputWorkspace": workspace,
+                         "InputWorkspaceMonitor": monitor_workspace,
+                         "OutputWorkspace": EMPTY_NAME,
+                         "OutputWorkspaceMonitor": "dummy2",
+                         "DataType": data_type_as_string}
+        slice_alg = create_child_algorithm(self, slice_name, **slice_options)
+        slice_alg.execute()
+
+        workspace = slice_alg.getProperty("OutputWorkspace").value
+        monitor_workspace = slice_alg.getProperty("OutputWorkspaceMonitor").value
+        slice_event_factor = slice_alg.getProperty("SliceEventFactor").value
+        return workspace, monitor_workspace, slice_event_factor
+
+    def _move(self, state_serialized, workspace, component, is_transmission=False):
+        # First we set the workspace to zero, since it might have been moved around by the user in the ADS
+        # Second we use the initial move to bring the workspace into the correct position
+        move_name = "SANSMove"
+        move_options = {"SANSState": state_serialized,
+                        "Workspace": workspace,
+                        "MoveType": "SetToZero",
+                        "Component": ""}
+        move_alg = create_child_algorithm(self, move_name, **move_options)
+        move_alg.execute()
+        workspace = move_alg.getProperty("Workspace").value
+
+        # Do the initial move
+        move_alg.setProperty("MoveType", "InitialMove")
+        move_alg.setProperty("Component", component)
+        move_alg.setProperty("Workspace", workspace)
+        move_alg.setProperty("IsTransmissionWorkspace", is_transmission)
+        move_alg.execute()
+        return move_alg.getProperty("Workspace").value
+
+    def _mask(self, state_serialized, workspace, component):
+        mask_name = "SANSMaskWorkspace"
+        mask_options = {"SANSState": state_serialized,
+                        "Workspace": workspace,
+                        "Component": component}
+        mask_alg = create_child_algorithm(self, mask_name, **mask_options)
+        mask_alg.execute()
+        return mask_alg.getProperty("Workspace").value
+
+    def _convert_to_wavelength(self, state_serialized, workspace):
+        wavelength_name = "SANSConvertToWavelength"
+        wavelength_options = {"SANSState": state_serialized,
+                              "InputWorkspace": workspace}
+        wavelength_alg = create_child_algorithm(self, wavelength_name, **wavelength_options)
+        wavelength_alg.setPropertyValue("OutputWorkspace", EMPTY_NAME)
+        wavelength_alg.setProperty("OutputWorkspace", workspace)
+        wavelength_alg.execute()
+        return wavelength_alg.getProperty("OutputWorkspace").value
+
+    def _scale(self, state_serialized, workspace):
+        scale_name = "SANSScale"
+        scale_options = {"SANSState": state_serialized,
+                         "InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME}
+        scale_alg = create_child_algorithm(self, scale_name, **scale_options)
+        scale_alg.execute()
+        return scale_alg.getProperty("OutputWorkspace").value
+
+    def _adjustment(self, state_serialized, workspace, monitor_workspace, component_as_string, data_type):
+        transmission_workspace = self._get_transmission_workspace()
+        direct_workspace = self._get_direct_workspace()
+
+        adjustment_name = "SANSCreateAdjustmentWorkspaces"
+        adjustment_options = {"SANSState": state_serialized,
+                              "Component": component_as_string,
+                              "DataType": data_type,
+                              "MonitorWorkspace": monitor_workspace,
+                              "SampleData": workspace,
+                              "OutputWorkspaceWavelengthAdjustment": EMPTY_NAME,
+                              "OutputWorkspacePixelAdjustment": EMPTY_NAME,
+                              "OutputWorkspaceWavelengthAndPixelAdjustment": EMPTY_NAME}
+        if transmission_workspace:
+            transmission_workspace = self._move(state_serialized, transmission_workspace, component_as_string,
+                                                is_transmission=True)
+            adjustment_options.update({"TransmissionWorkspace": transmission_workspace})
+
+        if direct_workspace:
+            direct_workspace = self._move(state_serialized, direct_workspace, component_as_string, is_transmission=True)
+            adjustment_options.update({"DirectWorkspace": direct_workspace})
+
+        adjustment_alg = create_child_algorithm(self, adjustment_name, **adjustment_options)
+        adjustment_alg.execute()
+
+        wavelength_adjustment = adjustment_alg.getProperty("OutputWorkspaceWavelengthAdjustment").value
+        pixel_adjustment = adjustment_alg.getProperty("OutputWorkspacePixelAdjustment").value
+        wavelength_and_pixel_adjustment = adjustment_alg.getProperty(
+                                           "OutputWorkspaceWavelengthAndPixelAdjustment").value
+        return wavelength_adjustment, pixel_adjustment, wavelength_and_pixel_adjustment
+
+    def _convert_to_histogram(self, workspace):
+        if isinstance(workspace, IEventWorkspace):
+            convert_name = "RebinToWorkspace"
+            convert_options = {"WorkspaceToRebin": workspace,
+                               "WorkspaceToMatch": workspace,
+                               "OutputWorkspace": "OutputWorkspace",
+                               "PreserveEvents": False}
+            convert_alg = create_child_algorithm(self, convert_name, **convert_options)
+            convert_alg.execute()
+            workspace = convert_alg.getProperty("OutputWorkspace").value
+            append_to_sans_file_tag(workspace, "_histogram")
+
+        return workspace
+
+    def _convert_to_q(self, state_serialized, workspace, wavelength_adjustment_workspace, pixel_adjustment_workspace,
+                      wavelength_and_pixel_adjustment_workspace):
+        """
+        A conversion to momentum transfer is performed in this step.
+
+        The conversion can be either to the modulus of Q in which case the output is a 1D workspace, or it can
+        be a 2D reduction where the y axis is Qy, ie it is a numeric axis.
+        @param state: a SANSState object
+        @param workspace: the workspace to convert to momentum transfer.
+        @param wavelength_adjustment_workspace: the wavelength adjustment workspace.
+        @param pixel_adjustment_workspace: the pixel adjustment workspace.
+        @param wavelength_and_pixel_adjustment_workspace: the wavelength and pixel adjustment workspace.
+        @return: a reduced workspace
+        """
+        convert_name = "SANSConvertToQ"
+        convert_options = {"InputWorkspace": workspace,
+                           "OutputWorkspace": EMPTY_NAME,
+                           "SANSState": state_serialized,
+                           "OutputParts": True}
+        if wavelength_adjustment_workspace:
+            convert_options.update({"InputWorkspaceWavelengthAdjustment": wavelength_adjustment_workspace})
+        if pixel_adjustment_workspace:
+            convert_options.update({"InputWorkspacePixelAdjustment": pixel_adjustment_workspace})
+        if wavelength_and_pixel_adjustment_workspace:
+            convert_options.update({"InputWorkspaceWavelengthAndPixelAdjustment":
+                                    wavelength_and_pixel_adjustment_workspace})
+        convert_alg = create_child_algorithm(self, convert_name, **convert_options)
+        convert_alg.execute()
+        data_workspace = convert_alg.getProperty("OutputWorkspace").value
+        sum_of_counts = convert_alg.getProperty("SumOfCounts").value
+        sum_of_norms = convert_alg.getProperty("SumOfNormFactors").value
+        return data_workspace, sum_of_counts, sum_of_norms
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        try:
+            state = self._get_state()
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSSingleReduction": str(err)})
+        return errors
+
+    def _get_state(self):
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+        state.property_manager = state_property_manager
+        return state
+
+    def _get_transmission_workspace(self):
+        transmission_workspace = self.getProperty("TransmissionWorkspace").value
+        return self._get_cloned_workspace(transmission_workspace) if transmission_workspace else None
+
+    def _get_direct_workspace(self):
+        direct_workspace = self.getProperty("DirectWorkspace").value
+        return self._get_cloned_workspace(direct_workspace) if direct_workspace else None
+
+    def _get_monitor_workspace(self):
+        monitor_workspace = self.getProperty("ScatterMonitorWorkspace").value
+        return self._get_cloned_workspace(monitor_workspace)
+
+    def _get_cloned_workspace(self, workspace):
+        clone_name = "CloneWorkspace"
+        clone_options = {"InputWorkspace": workspace,
+                         "OutputWorkspace": EMPTY_NAME}
+        clone_alg = create_child_algorithm(self, clone_name, **clone_options)
+        clone_alg.execute()
+        return clone_alg.getProperty("OutputWorkspace").value
+
+    def _get_progress(self):
+        return Progress(self, start=0.0, end=1.0, nreports=10)
+
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSReductionCore)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSSingleReduction.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSSingleReduction.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b1296ca49ee857e2bd9c7aceb15cb31c2513096
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANS/SANSSingleReduction.py
@@ -0,0 +1,383 @@
+# pylint: disable=invalid-name
+
+""" SANSSingleReduction algorithm performs a single reduction."""
+
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import (Direction, PropertyManagerProperty, Property)
+from mantid.api import (DataProcessorAlgorithm, MatrixWorkspaceProperty, AlgorithmFactory, PropertyMode, Progress)
+
+from sans.state.state_base import create_deserialized_sans_state_from_property_manager
+from sans.common.enums import (ReductionMode, DataType, ISISReductionMode)
+from sans.common.general_functions import (create_child_algorithm, does_can_workspace_exist_on_ads)
+from sans.algorithm_detail.single_execution import (run_core_reduction, get_final_output_workspaces,
+                                                    get_merge_bundle_for_merge_request, run_optimized_for_can)
+from sans.algorithm_detail.bundles import ReductionSettingBundle
+
+
+class SANSSingleReduction(DataProcessorAlgorithm):
+    def category(self):
+        return 'SANS\\Reduction'
+
+    def summary(self):
+        return 'Performs a single reduction of SANS data.'
+
+    def PyInit(self):
+        # ----------
+        # INPUT
+        # ----------
+        self.declareProperty(PropertyManagerProperty('SANSState'),
+                             doc='A property manager which fulfills the SANSState contract.')
+
+        self.declareProperty("UseOptimizations", True, direction=Direction.Input,
+                             doc="When enabled the ADS is being searched for already loaded and reduced workspaces. "
+                                 "Depending on your concrete reduction, this could provide a significant"
+                                 " performance boost")
+
+        # Sample Scatter Workspaces
+        self.declareProperty(MatrixWorkspaceProperty('SampleScatterWorkspace', '',
+                                                     optional=PropertyMode.Mandatory, direction=Direction.Input),
+                             doc='The sample scatter workspace. This workspace does not contain monitors.')
+        self.declareProperty(MatrixWorkspaceProperty('SampleScatterMonitorWorkspace', '',
+                                                     optional=PropertyMode.Mandatory, direction=Direction.Input),
+                             doc='The sample scatter monitor workspace. This workspace only contains monitors.')
+
+        # Sample Transmission Workspace
+        self.declareProperty(MatrixWorkspaceProperty('SampleTransmissionWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The sample transmission workspace.')
+
+        # Sample Direct Workspace
+        self.declareProperty(MatrixWorkspaceProperty('SampleDirectWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The sample scatter direct workspace.')
+
+        self.setPropertyGroup("SampleScatterWorkspace", 'Sample')
+        self.setPropertyGroup("SampleScatterMonitorWorkspace", 'Sample')
+        self.setPropertyGroup("SampleTransmissionWorkspace", 'Sample')
+        self.setPropertyGroup("SampleDirectWorkspace", 'Sample')
+
+        # Can Scatter Workspaces
+        self.declareProperty(MatrixWorkspaceProperty('CanScatterWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The can scatter workspace. This workspace does not contain monitors.')
+        self.declareProperty(MatrixWorkspaceProperty('CanScatterMonitorWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The can scatter monitor workspace. This workspace only contains monitors.')
+
+        # Sample Transmission Workspace
+        self.declareProperty(MatrixWorkspaceProperty('CanTransmissionWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The can transmission workspace.')
+
+        # Sample Direct Workspace
+        self.declareProperty(MatrixWorkspaceProperty('CanDirectWorkspace', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Input),
+                             doc='The sample scatter direct workspace.')
+
+        self.setPropertyGroup("CanScatterWorkspace", 'Can')
+        self.setPropertyGroup("CanScatterMonitorWorkspace", 'Can')
+        self.setPropertyGroup("CanTransmissionWorkspace", 'Can')
+        self.setPropertyGroup("CanDirectWorkspace", 'Can')
+
+        # ----------
+        # OUTPUT
+        # ----------
+        self.declareProperty('OutScaleFactor', defaultValue=Property.EMPTY_DBL, direction=Direction.Output,
+                             doc='Applied scale factor.')
+
+        self.declareProperty('OutShiftFactor', defaultValue=Property.EMPTY_DBL, direction=Direction.Output,
+                             doc='Applied shift factor.')
+
+        # This breaks our flexibility with the reduction mode. We need to check if we can populate this based on
+        # the available reduction modes for the state input. TODO: check if this is possible
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceLAB', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The output workspace for the low-angle bank.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceHAB', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The output workspace for the high-angle bank.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceMerged', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The output workspace for the merged reduction.')
+        self.setPropertyGroup("OutScaleFactor", 'Output')
+        self.setPropertyGroup("OutShiftFactor", 'Output')
+        self.setPropertyGroup("OutputWorkspaceLAB", 'Output')
+        self.setPropertyGroup("OutputWorkspaceHAB", 'Output')
+        self.setPropertyGroup("OutputWorkspaceMerged", 'Output')
+
+        # CAN output
+        # We want to output the can workspaces since they can be persited in the case of optimizations
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceLABCan', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can output workspace for the low-angle bank, provided there is one.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceLABCanCount', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can count output workspace for the low-angle bank, provided there is one.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceLABCanNorm', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can norm output workspace for the low-angle bank, provided there is one.')
+
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceHABCan', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can output workspace for the high-angle bank, provided there is one.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceHABCanCount', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can count output workspace for the high-angle bank, provided there is one.')
+        self.declareProperty(MatrixWorkspaceProperty('OutputWorkspaceHABCanNorm', '',
+                                                     optional=PropertyMode.Optional, direction=Direction.Output),
+                             doc='The can norm output workspace for the high-angle bank, provided there is one.')
+        self.setPropertyGroup("OutputWorkspaceLABCan", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceLABCanCount", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceLABCanNorm", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceHABCan", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceHABCanCount", 'Can Output')
+        self.setPropertyGroup("OutputWorkspaceHABCanNorm", 'Can Output')
+
+    def PyExec(self):
+        # Get state
+        state = self._get_state()
+
+        # Get reduction mode
+        overall_reduction_mode = self._get_reduction_mode(state)
+
+        # Decide which core reduction information to run, i.e. HAB, LAB, ALL, MERGED. In the case of ALL and MERGED,
+        # the required simple reduction modes need to be run. Normally this is HAB and LAB, future implementations
+        # might have more detectors though (or different types)
+        reduction_setting_bundles = self._get_reduction_setting_bundles(state, overall_reduction_mode)
+
+        # Run core reductions
+        use_optimizations = self.getProperty("UseOptimizations").value
+
+        # Create the reduction core algorithm
+        reduction_name = "SANSReductionCore"
+        reduction_options = {}
+        reduction_alg = create_child_algorithm(self, reduction_name, **reduction_options)
+
+        # Set up progress
+        progress = self._get_progress(len(reduction_setting_bundles), overall_reduction_mode)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Reduction
+        # --------------------------------------------------------------------------------------------------------------
+        output_bundles = []
+        output_parts_bundles = []
+
+        for reduction_setting_bundle in reduction_setting_bundles:
+            progress.report("Running a single reduction ...")
+            # We want to make use of optimizations here. If a can workspace has already been reduced with the same can
+            # settings and is stored in the ADS, then we should use it (provided the user has optimizations enabled).
+            if use_optimizations and reduction_setting_bundle.data_type is DataType.Can:
+                output_bundle, output_parts_bundle = run_optimized_for_can(reduction_alg,
+                                                                           reduction_setting_bundle)
+            else:
+                output_bundle, output_parts_bundle = run_core_reduction(reduction_alg,
+                                                                        reduction_setting_bundle)
+            output_bundles.append(output_bundle)
+            output_parts_bundles.append(output_parts_bundle)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Deal with merging
+        # --------------------------------------------------------------------------------------------------------------
+        reduction_mode_vs_output_workspaces = {}
+        # Merge if required with stitching etc.
+        if overall_reduction_mode is ReductionMode.Merged:
+            progress.report("Merging reductions ...")
+            merge_bundle = get_merge_bundle_for_merge_request(output_parts_bundles, self)
+            self.set_shift_and_scale_output(merge_bundle)
+            reduction_mode_vs_output_workspaces.update({ReductionMode.Merged: merge_bundle.merged_workspace})
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Deal with non-merged
+        # Note that we have non-merged workspaces even in the case of a merged reduction, ie LAB and HAB results
+        # --------------------------------------------------------------------------------------------------------------
+        progress.report("Final clean up...")
+        output_workspaces_non_merged = get_final_output_workspaces(output_bundles, self)
+        reduction_mode_vs_output_workspaces.update(output_workspaces_non_merged)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Set the output workspaces
+        # --------------------------------------------------------------------------------------------------------------
+        # Set sample logs
+        # Todo: Set sample log -> Userfile and unfitted transmission workspace. Should probably set on
+        # higher level (SANSBatch)
+        # Set the output workspaces
+        self.set_output_workspaces(reduction_mode_vs_output_workspaces)
+
+        # --------------------------------------------------------------------------------------------------------------
+        # Set the reduced can workspaces on the output if optimizations are
+        # enabled. This will allow SANSBatchReduction to add them to the ADS.
+        # --------------------------------------------------------------------------------------------------------------
+        if use_optimizations:
+            self.set_reduced_can_workspace_on_output(output_bundles, output_parts_bundles)
+
+    def validateInputs(self):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        try:
+            state = self._get_state()
+            state.validate()
+        except ValueError as err:
+            errors.update({"SANSSingleReduction": str(err)})
+        return errors
+
+    def _get_state(self):
+        state_property_manager = self.getProperty("SANSState").value
+        state = create_deserialized_sans_state_from_property_manager(state_property_manager)
+        state.property_manager = state_property_manager
+        return state
+
+    def _get_reduction_mode(self, state):
+        reduction_info = state.reduction
+        reduction_mode = reduction_info.reduction_mode
+        return reduction_mode
+
+    def _get_reduction_setting_bundles(self, state, reduction_mode):
+        # We need to output the parts if we request a merged reduction mode. This is necessary for stitching later on.
+        output_parts = reduction_mode is ReductionMode.Merged
+
+        # If the reduction mode is MERGED, then we need to make sure that all reductions for that selection
+        # are executed, i.e. we need to split it up
+        if reduction_mode is ReductionMode.Merged:
+            # If we are dealing with a merged reduction we need to know which detectors should be merged.
+            reduction_info = state.reduction
+            reduction_modes = reduction_info.get_merge_strategy()
+        elif reduction_mode is ReductionMode.All:
+            reduction_info = state.reduction
+            reduction_modes = reduction_info.get_all_reduction_modes()
+        else:
+            reduction_modes = [reduction_mode]
+
+        # Create the Scatter information
+        sample_info = self._create_reduction_bundles_for_data_type(state=state,
+                                                                   data_type=DataType.Sample,
+                                                                   reduction_modes=reduction_modes,
+                                                                   output_parts=output_parts,
+                                                                   scatter_name="SampleScatterWorkspace",
+                                                                   scatter_monitor_name="SampleScatterMonitorWorkspace",
+                                                                   transmission_name="SampleTransmissionWorkspace",
+                                                                   direct_name="SampleDirectWorkspace")
+
+        # Create the Can information
+        can_info = self._create_reduction_bundles_for_data_type(state=state,
+                                                                data_type=DataType.Can,
+                                                                reduction_modes=reduction_modes,
+                                                                output_parts=output_parts,
+                                                                scatter_name="CanScatterWorkspace",
+                                                                scatter_monitor_name="CanScatterMonitorWorkspace",
+                                                                transmission_name="CanTransmissionWorkspace",
+                                                                direct_name="CanDirectWorkspace")
+        reduction_setting_bundles = sample_info
+
+        # Make sure that the can information has at least a scatter and a monitor workspace
+        for can_bundle in can_info:
+            if can_bundle.scatter_workspace is not None and can_bundle.scatter_monitor_workspace is not None:
+                reduction_setting_bundles.append(can_bundle)
+        return reduction_setting_bundles
+
+    def _create_reduction_bundles_for_data_type(self, state, data_type, reduction_modes, output_parts,
+                                                scatter_name, scatter_monitor_name, transmission_name, direct_name):
+        # Get workspaces
+        scatter_workspace = self.getProperty(scatter_name).value
+
+        scatter_monitor_workspace = self.getProperty(scatter_monitor_name).value
+        transmission_workspace = self.getProperty(transmission_name).value
+        direct_workspace = self.getProperty(direct_name).value
+
+        # Iterate over all requested reduction types, i.e. LAB, HAB, ..
+        reduction_setting_bundles = []
+        for reduction_mode in reduction_modes:
+            reduction_setting_bundle = ReductionSettingBundle(state=state,
+                                                              data_type=data_type,
+                                                              reduction_mode=reduction_mode,
+                                                              output_parts=output_parts,
+                                                              scatter_workspace=scatter_workspace,
+                                                              scatter_monitor_workspace=scatter_monitor_workspace,
+                                                              transmission_workspace=transmission_workspace,
+                                                              direct_workspace=direct_workspace)
+            reduction_setting_bundles.append(reduction_setting_bundle)
+        return reduction_setting_bundles
+
+    def set_shift_and_scale_output(self, merge_bundle):
+        self.setProperty("OutScaleFactor", merge_bundle.scale)
+        self.setProperty("OutShiftFactor", merge_bundle.shift)
+
+    def set_output_workspaces(self, reduction_mode_vs_output_workspaces):
+        """
+        Sets the output workspaces which can be HAB, LAB or Merged.
+
+        At this step we also provide a workspace name to the sample logs which can be used later on for saving
+        :param reduction_mode_vs_output_workspaces:  map from reduction mode to output workspace
+        """
+        # Note that this breaks the flexibility that we have established with the reduction mode. We have not hardcoded
+        # HAB or LAB anywhere which means that in the future there could be other detectors of relevance. Here we
+        # reference HAB and LAB directly since we currently don't want to rely on dynamic properties. See also in PyInit
+        for reduction_mode, output_workspace in list(reduction_mode_vs_output_workspaces.items()):
+            if reduction_mode is ReductionMode.Merged:
+                self.setProperty("OutputWorkspaceMerged", output_workspace)
+            elif reduction_mode is ISISReductionMode.LAB:
+                self.setProperty("OutputWorkspaceLAB", output_workspace)
+            elif reduction_mode is ISISReductionMode.HAB:
+                self.setProperty("OutputWorkspaceHAB", output_workspace)
+            else:
+                raise RuntimeError("SANSSingleReduction: Cannot set the output workspace. The selected reduction "
+                                   "mode {0} is unknown.".format(reduction_mode))
+
+    def set_reduced_can_workspace_on_output(self, output_bundles, output_bundles_part):
+        """
+        Sets the reduced can workspaces on the output properties.
+
+        The reduced can workspaces can be:
+        1. LAB Can
+        2. LAB Can Count
+        3. LAB Can Norm
+        4. HAB Can
+        5. HAB Can Count
+        6. HAB Can Norm
+        :param output_bundles: a list of output bundles
+        :param output_bundles_part: a list of partial output bundles
+        """
+        # Find the LAB Can and HAB Can entries if they exist
+        for output_bundle in output_bundles:
+            if output_bundle.data_type is DataType.Can:
+                reduction_mode = output_bundle.reduction_mode
+                output_workspace = output_bundle.output_workspace
+                # Make sure that the output workspace is not None which can be the case if there has never been a
+                # can set for the reduction.
+                if output_workspace is not None and not does_can_workspace_exist_on_ads(output_workspace):
+                    if reduction_mode is ISISReductionMode.LAB:
+                        self.setProperty("OutputWorkspaceLABCan", output_workspace)
+                    elif reduction_mode is ISISReductionMode.HAB:
+                        self.setProperty("OutputWorkspaceHABCan", output_bundle.output_workspace)
+                    else:
+                        raise RuntimeError("SANSSingleReduction: The reduction mode {0} should not"
+                                           " be set with a can.".format(reduction_mode))
+
+        # Find the partial output bundles fo LAB Can and HAB Can if they exist
+        for output_bundle_part in output_bundles_part:
+            if output_bundle_part.data_type is DataType.Can:
+                reduction_mode = output_bundle_part.reduction_mode
+                output_workspace_count = output_bundle_part.output_workspace_count
+                output_workspace_norm = output_bundle_part.output_workspace_norm
+                # Make sure that the output workspace is not None which can be the case if there has never been a
+                # can set for the reduction.
+                if output_workspace_norm is not None and output_workspace_count is not None and \
+                        not does_can_workspace_exist_on_ads(output_workspace_norm) and \
+                        not does_can_workspace_exist_on_ads(output_workspace_count):
+                    if reduction_mode is ISISReductionMode.LAB:
+                        self.setProperty("OutputWorkspaceLABCanCount", output_workspace_count)
+                        self.setProperty("OutputWorkspaceLABCanNorm", output_workspace_norm)
+                    elif reduction_mode is ISISReductionMode.HAB:
+                        self.setProperty("OutputWorkspaceHABCanCount", output_workspace_count)
+                        self.setProperty("OutputWorkspaceHABCanNorm", output_workspace_norm)
+                    else:
+                        raise RuntimeError("SANSSingleReduction: The reduction mode {0} should not"
+                                           " be set with a partial can.".format(reduction_mode))
+
+    def _get_progress(self, number_of_reductions, overall_reduction_mode):
+        number_from_merge = 1 if overall_reduction_mode is ReductionMode.Merged else 0
+        number_of_progress_reports = number_of_reductions + number_from_merge + 1
+        return Progress(self, start=0.0, end=1.0, nreports=number_of_progress_reports)
+
+
+# Register algorithm with Mantid
+AlgorithmFactory.subscribe(SANSSingleReduction)
diff --git a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSStitch.py b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSStitch.py
index 4cc5ad0f1f3068c3d263b011525f99b84dd20a17..ce1208b881bd81272b54916db3c7e11f9afcb3c1 100644
--- a/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSStitch.py
+++ b/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SANSStitch.py
@@ -161,7 +161,8 @@ class SANSStitch(DataProcessorAlgorithm):
         # We want: (Cf+shift*Nf+Cr)/(Nf/scale + Nr)
         shifted_norm_front = self._scale(nF, shift_factor)
         scaled_norm_front = self._scale(nF, 1.0 / scale_factor)
-        numerator = self._add(self._add(cF, shifted_norm_front), cR)
+        add_counts_and_shift = self._add(cF, shifted_norm_front)
+        numerator = self._add(add_counts_and_shift, cR)
         denominator = self._add(scaled_norm_front, nR)
         merged_q = self._divide(numerator, denominator)
         return merged_q
@@ -199,7 +200,10 @@ class SANSStitch(DataProcessorAlgorithm):
         x_vals = ws.readX(0)
         start_x = x_vals[start]
         # Make sure we're inside the bin that we want to crop
-        end_x = x_vals[stop + 1]
+        if len(y_vals) == len(x_vals):
+            end_x = x_vals[stop]
+        else:
+            end_x = x_vals[stop + 1]
         return self._crop_to_x_range(ws=ws,x_min=start_x, x_max=end_x)
 
     def _run_fit(self, q_high_angle, q_low_angle, scale_factor, shift_factor):
diff --git a/Framework/PythonInterface/plugins/algorithms/dnsdata.py b/Framework/PythonInterface/plugins/algorithms/dnsdata.py
index 667e20f8dad35ad44a79cf461bb5d73d0712cf68..c94073886c40873816d95c1fb47c9985f266993c 100644
--- a/Framework/PythonInterface/plugins/algorithms/dnsdata.py
+++ b/Framework/PythonInterface/plugins/algorithms/dnsdata.py
@@ -1,7 +1,7 @@
 # pylint: disable=too-many-instance-attributes,too-few-public-methods
 from __future__ import (absolute_import, division, print_function)
 import re
-import datetime
+from dateutil.parser import parse
 
 
 class DNSdata(object):
@@ -182,11 +182,14 @@ class DNSdata(object):
             if self.tof_channel_number > 1:
                 self.tof_channel_width = float(b6splitted[3].split()[3])
                 self.tof_delay_time = float(b6splitted[4].split()[2])
-                self.tof_elastic_channel = int(b6splitted[6].split()[3])
+                if len(b6splitted[6].split()) > 3:
+                    self.tof_elastic_channel = int(b6splitted[6].split()[3])
                 # chopper rotation speed
-                self.chopper_rotation_speed = float(b6splitted[7].split()[2])
+                if len(b6splitted[7].split()) > 2:
+                    self.chopper_rotation_speed = float(b6splitted[7].split()[2])
                 # chopper number of slits
-                self.chopper_slits = int(b6splitted[5].split()[2])
+                if len(b6splitted[5].split()) > 2:
+                    self.chopper_slits = int(b6splitted[5].split()[2])
 
             # parse block 7 (Time and monitor)
             # assume everything to be at the fixed positions
@@ -203,11 +206,9 @@ class DNSdata(object):
             self.monitor_counts = int(line[1])
             # start_time and end_time (if specified)
             outfmt = "%Y-%m-%dT%H:%M:%S"
-            sinfmt = "start   at %a %b  %d %H:%M:%S %Y"
-            einfmt = "stopped at %a %b  %d %H:%M:%S %Y"
             try:
-                self.start_time = datetime.datetime.strptime(b7splitted[5], sinfmt).strftime(outfmt)
-                self.end_time = datetime.datetime.strptime(b7splitted[6], einfmt).strftime(outfmt)
+                self.start_time = parse(b7splitted[5][10:].strip()).strftime(outfmt)
+                self.end_time = parse(b7splitted[6][10:].strip()).strftime(outfmt)
             except ValueError:
                 # if start and end time are not given, let them empty
                 pass
diff --git a/Framework/PythonInterface/test/python/mantid/api/AlgorithmManagerTest.py b/Framework/PythonInterface/test/python/mantid/api/AlgorithmManagerTest.py
index 9eababd3b7bda1f98bd8a127e54a36ba6176475b..5f5823d63a87f405503cfaf61cb8dcc1049c64de 100644
--- a/Framework/PythonInterface/test/python/mantid/api/AlgorithmManagerTest.py
+++ b/Framework/PythonInterface/test/python/mantid/api/AlgorithmManagerTest.py
@@ -15,6 +15,7 @@ class AlgorithmManagerTest(unittest.TestCase):
         self.assertEquals(alg.name(), "ConvertUnits")
         self.assertEquals(alg.version(), 1)
         self.assertEquals(alg.category(), "Transforms\\Units")
+        self.assertEquals(alg.helpURL(), "")
 
     def test_create_unknown_alg_throws(self):
         self.assertRaises(RuntimeError, AlgorithmManager.create,"DoesNotExist")
diff --git a/Framework/PythonInterface/test/python/mantid/api/AlgorithmTest.py b/Framework/PythonInterface/test/python/mantid/api/AlgorithmTest.py
index 96e223076e53debf067f23b498f28cf25a164cb9..e93b83e27edb3d123e5d575c5fba3a25e8aad784 100644
--- a/Framework/PythonInterface/test/python/mantid/api/AlgorithmTest.py
+++ b/Framework/PythonInterface/test/python/mantid/api/AlgorithmTest.py
@@ -21,6 +21,7 @@ class AlgorithmTest(unittest.TestCase):
         self.assertEquals('DataHandling', self._load.category())
         self.assertEquals(1, len(self._load.categories()))
         self.assertEquals('DataHandling', self._load.categories()[0])
+        self.assertEquals('', self._load.helpURL())
 
     def test_get_unknown_property_raises_error(self):
         self.assertRaises(RuntimeError, self._load.getProperty, "NotAProperty")
diff --git a/Framework/PythonInterface/test/python/mantid/api/IFunction1DTest.py b/Framework/PythonInterface/test/python/mantid/api/IFunction1DTest.py
index 4a3fed4636a223f92a55e95a4721aa3392bee9da..ca268465222b8e65528a80082f0ff1f5d9b83412 100644
--- a/Framework/PythonInterface/test/python/mantid/api/IFunction1DTest.py
+++ b/Framework/PythonInterface/test/python/mantid/api/IFunction1DTest.py
@@ -19,6 +19,7 @@ class Times2(IFunction1D):
         self.declareAttribute("DoubleAtt", 3.4)
         self.declareAttribute("StringAtt", "filename")
         self.declareAttribute("BoolAtt", True)
+        self.declareAttribute("ListAtt", [1, 2, 3])
 
         self.declareParameter("ParamZeroInitNoDescr")
         self.declareParameter("ParamNoDescr", 1.5)
@@ -55,8 +56,8 @@ class IFunction1DTest(unittest.TestCase):
     def test_declareAttribute_only_accepts_known_types(self):
         func = Times2()
         func.initialize() # Contains known types
-        self.assertEquals(4, func.nAttributes()) # Make sure initialize ran
-        self.assertRaises(ValueError, func.declareAttribute, "ListAtt", [1,2,3])
+        self.assertEquals(5, func.nAttributes()) # Make sure initialize ran
+        self.assertRaises(ValueError, func.declareAttribute, "DictAtt", {1,2,3})
 
     def test_correct_attribute_values_are_returned_when_asked(self):
         func = Times2()
diff --git a/Framework/PythonInterface/test/python/mantid/api/PythonAlgorithmTraitsTest.py b/Framework/PythonInterface/test/python/mantid/api/PythonAlgorithmTraitsTest.py
index 23d67225cf8cab8d0ccc0c4ded6eb0925232d7a5..bbbf4a8895ffffb87378dbc36a61a914428de1d9 100644
--- a/Framework/PythonInterface/test/python/mantid/api/PythonAlgorithmTraitsTest.py
+++ b/Framework/PythonInterface/test/python/mantid/api/PythonAlgorithmTraitsTest.py
@@ -28,6 +28,9 @@ class TestPyAlgOverriddenAttrs(PythonAlgorithm):
     def category(self):
         return "BestAlgorithms"
 
+    def helpURL(self):
+        return "Optional documentation URL"
+
     def isRunning(self):
         return True
 
@@ -106,6 +109,7 @@ class PythonAlgorithmTest(unittest.TestCase):
         self.assertEquals(alg.name(), "TestPyAlgOverriddenAttrs")
         self.assertEquals(alg.version(), 2)
         self.assertEquals(alg.category(), "BestAlgorithms")
+        self.assertEquals(alg.helpURL(), "Optional documentation URL")
 
     def test_alg_can_be_cancelled(self):
         alg = AlgorithmManager.createUnmanaged("CancellableAlg")
diff --git a/Framework/PythonInterface/test/python/mantid/kernel/UnitFactoryTest.py b/Framework/PythonInterface/test/python/mantid/kernel/UnitFactoryTest.py
index 53d9b21d8bbc4ca8eecc4bc58183d1af0dfb9d87..2ee8306b50c7fec572c5eb2e49e299c6fbcf32ef 100644
--- a/Framework/PythonInterface/test/python/mantid/kernel/UnitFactoryTest.py
+++ b/Framework/PythonInterface/test/python/mantid/kernel/UnitFactoryTest.py
@@ -25,7 +25,7 @@ class UnitFactoryTest(unittest.TestCase):
         core_units = ['Empty', 'Label', 'TOF', 'Wavelength','Energy',
                       'Energy_inWavenumber', 'dSpacing', 'MomentumTransfer',
                       'QSquared', 'DeltaE', 'DeltaE_inWavenumber',
-                      'DeltaE_inFrequency', 'Momentum']
+                      'DeltaE_inFrequency', 'Momentum', 'dSpacingPerpendicular']
         self.assertTrue(len(core_units) <= len(known_units))
 
         for unit in core_units:
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt b/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
index f973f4d3a2337991f5d4bda267a718e457b6e58c..e8e28380fccd8978f2c604c1d41ec0f6d770e220 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
@@ -94,6 +94,7 @@ set ( TEST_PY_FILES
   VesuvioTOFFitTest.py
   PoldiCreatePeaksFromFileTest.py
   LoadCIFTest.py
+  SaveYDATest.py
 )
 
 check_tests_valid ( ${CMAKE_CURRENT_SOURCE_DIR} ${TEST_PY_FILES} )
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py
index 14eef9a18081873906761dbcc2e0b700cbdc2708..8fe71f71d0b0d2add3b42dff6629dab24dd6e729 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/FindEPPTest.py
@@ -24,7 +24,7 @@ class FindEPPTest(unittest.TestCase):
     def testTable(self):
         # tests that correct table is created
         OutputWorkspaceName = "outputws1"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertEqual(2, wsoutput.rowCount())
@@ -38,7 +38,7 @@ class FindEPPTest(unittest.TestCase):
         ws2 = CloneWorkspace(self._input_ws)
         group = GroupWorkspaces([self._input_ws, ws2])
         OutputWorkspaceName = "output_wsgroup"
-        alg_test = run_algorithm("FindEPP", InputWorkspace='group', OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace='group', OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertTrue(isinstance(wsoutput, WorkspaceGroup))
@@ -50,7 +50,7 @@ class FindEPPTest(unittest.TestCase):
     def testFitSuccess(self):
         # tests successful fit
         OutputWorkspaceName = "outputws2"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertEqual(['success', 'success'], wsoutput.column(8))
@@ -71,7 +71,7 @@ class FindEPPTest(unittest.TestCase):
                                           NumBanks=2, BankPixelWidth=1, XMin=0, XMax=10, BinWidth=0.1)
 
         OutputWorkspaceName = "outputws3"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=ws_linear, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=ws_linear, OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertEqual(['failed', 'failed'], wsoutput.column(8))
@@ -89,7 +89,7 @@ class FindEPPTest(unittest.TestCase):
                                           NumBanks=2, BankPixelWidth=1, XMin=0, XMax=10, BinWidth=0.1)
 
         OutputWorkspaceName = "outputws4"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=ws_narrow, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=ws_narrow, OutputWorkspace=OutputWorkspaceName, Version=1)
         self.assertTrue(alg_test.isExecuted())
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         self.assertEqual(['failed', 'failed'], wsoutput.column(8))
@@ -103,7 +103,7 @@ class FindEPPTest(unittest.TestCase):
 
     def testFitOutputWorkspacesAreDeleted(self):
         OutputWorkspaceName = "outputws1"
-        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName)
+        alg_test = run_algorithm("FindEPP", InputWorkspace=self._input_ws, OutputWorkspace=OutputWorkspaceName, Version=1)
         wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
         DeleteWorkspace(wsoutput)
         oldOption = mantid.config['MantidOptions.InvisibleWorkspaces']
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/LoadCIFTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/LoadCIFTest.py
index b8e136fddb23b998332ad6b7e9ecae40b45ded2d..0fdbab965bf4b2a2e1ccc12fe25085ec5d5494df 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/LoadCIFTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/LoadCIFTest.py
@@ -61,6 +61,10 @@ class SpaceGroupBuilderTest(unittest.TestCase):
 
         self.assertEqual(fn('P m -3 m :1'), 'P m -3 m')
         self.assertEqual(fn('P m -3 m :H'), 'P m -3 m')
+        self.assertEqual(fn('F d -3 m S'), 'F d -3 m')
+        self.assertEqual(fn('F d -3 m Z'), 'F d -3 m :2')
+        self.assertEqual(fn('R 3 H'), 'R 3')
+        self.assertEqual(fn('R 3 R'), 'R 3 :r')
 
     def test_getSpaceGroupFromNumber_invalid(self):
         invalid_old = {u'_symmetry_int_tables_number': u'400'}
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/LoadDNSLegacyTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/LoadDNSLegacyTest.py
index 059b81760cd61e57d5fae6d26fccccb8525afdf5..e82f2ead43cc8554d74cb51eed0edc608d5f1a66 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/LoadDNSLegacyTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/LoadDNSLegacyTest.py
@@ -36,7 +36,7 @@ class LoadDNSLegacyTest(unittest.TestCase):
         self.assertEqual(-8.54, run.getProperty('deterota').value)
         self.assertEqual(8332872, run.getProperty('mon_sum').value)
         self.assertEqual('z', run.getProperty('polarisation').value)
-        self.assertEqual('7', run.getProperty('polarisation_comment').value)
+        self.assertEqual('7', str(run.getProperty('polarisation_comment').value))
         self.assertEqual('no', run.getProperty('normalized').value)
         # check whether detector bank is rotated
         det = ws.getDetector(0)
@@ -128,5 +128,64 @@ class LoadDNSLegacyTest(unittest.TestCase):
         run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
         return
 
+    def test_LoadNoCurtable(self):
+        outputWorkspaceName = "LoadDNSLegacyTest_Test6"
+        filename = "dn134011vana.d_dat"
+        alg_test = run_algorithm("LoadDNSLegacy", Filename=filename, Normalization='no',
+                                 OutputWorkspace=outputWorkspaceName)
+        self.assertTrue(alg_test.isExecuted())
+
+        # Verify some values
+        ws = AnalysisDataService.retrieve(outputWorkspaceName)
+        # dimensions
+        self.assertEqual(24, ws.getNumberHistograms())
+        self.assertEqual(2,  ws.getNumDims())
+        # data array
+        self.assertEqual(31461, ws.readY(1))
+        self.assertEqual(13340, ws.readY(23))
+        # sample logs
+        run = ws.getRun()
+        self.assertEqual(-8.54, run.getProperty('deterota').value)
+        self.assertEqual(8332872, run.getProperty('mon_sum').value)
+        self.assertEqual('z', run.getProperty('polarisation').value)
+        self.assertEqual('7', str(run.getProperty('polarisation_comment').value))
+        self.assertEqual('no', run.getProperty('normalized').value)
+        # check whether detector bank is rotated
+        det = ws.getDetector(0)
+        self.assertAlmostEqual(8.54, ws.detectorSignedTwoTheta(det)*180/pi)
+        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
+        return
+
+    def test_LoadTOF(self):
+        outputWorkspaceName = "LoadDNSLegacyTest_Test7"
+        filename = "dnstof.d_dat"
+        tof1 = 385.651     # must be changed if L1 will change
+        alg_test = run_algorithm("LoadDNSLegacy", Filename=filename, Normalization='no',
+                                 OutputWorkspace=outputWorkspaceName)
+        self.assertTrue(alg_test.isExecuted())
+
+        # Verify some values
+        ws = AnalysisDataService.retrieve(outputWorkspaceName)
+        # dimensions
+        self.assertEqual(24, ws.getNumberHistograms())
+        self.assertEqual(100,  ws.getNumberBins())
+        # data array
+        self.assertEqual(1, ws.readY(19)[5])
+        self.assertAlmostEqual(tof1, ws.readX(0)[0], 3)
+        self.assertAlmostEqual(tof1+802.0*100, ws.readX(0)[100], 3)
+        # sample logs
+        run = ws.getRun()
+        self.assertEqual(-7.5, run.getProperty('deterota').value)
+        self.assertEqual(100, run.getProperty('tof_channels').value)
+        self.assertEqual(51428, run.getProperty('mon_sum').value)
+        self.assertEqual('z', run.getProperty('polarisation').value)
+        self.assertEqual('7', str(run.getProperty('polarisation_comment').value))
+        self.assertEqual('no', run.getProperty('normalized').value)
+        # check whether detector bank is rotated
+        det = ws.getDetector(0)
+        self.assertAlmostEqual(7.5, ws.detectorSignedTwoTheta(det)*180/pi)
+        run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
+        return
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/SaveYDATest.py b/Framework/PythonInterface/test/python/plugins/algorithms/SaveYDATest.py
new file mode 100644
index 0000000000000000000000000000000000000000..94e273f871611ff59e933e9b6a6b03fcffdb2cca
--- /dev/null
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/SaveYDATest.py
@@ -0,0 +1,243 @@
+from __future__ import (absolute_import, division, print_function)
+
+import mantid
+from mantid.api import mtd
+from mantid.simpleapi import CreateWorkspace, CreateSampleWorkspace, SaveYDA, ConvertSpectrumAxis, \
+    LoadInstrument, AddSampleLog
+import numpy as np
+import os
+import unittest
+
+
+class SaveYDATest(unittest.TestCase):
+
+    def setUp(self):
+
+        self.prop_num = 3
+        self.prop_title = "PropTitle"
+        self.exp_team = "Experiment Team"
+        self.temperature = 100.0
+        self.Ei = 1.0
+        self.data_x = range(1, 5)
+        self.data_y = [2.0, 3.0, 4.0]
+        self._n_ws = self._create_workspace()
+        self._n_file = self._file(self._n_ws, "File")
+        ws = self._create_workspace(sample=False)
+        self._no_sample_file = self._file(ws, "noSampleFile")
+
+    def cleanup(self, ws_name, filename):
+
+        if os.path.exists(filename):
+            os.remove(filename)
+        if mantid.mtd.doesExist(ws_name):
+            mantid.api.AnalysisDataService.remove(ws_name)
+
+    def test_meta_data(self):
+        """ Test to save Meta data from workspace with all sample logs needed by SaveYDA
+        """
+        meta = []
+        # read form file
+        for i in range(3):
+            meta.append(self._n_file.readline())
+        # verify values
+        self.assertEqual(meta[0], "Meta:\n")
+        self.assertEqual(meta[1], "  format: yaml/frida 2.0\n")
+        self.assertEqual(meta[2], "  type: generic tabular data\n")
+
+    def test_history_all_samples(self):
+        """ Test to save history from workspace with all sample logs
+        """
+        history = []
+        for i in range(0, 8):
+            s = self._n_file.readline()
+            if i >= 3:
+                history.append(s)
+
+        self.assertEqual(history[0], "History:\n")
+        self.assertEqual(history[1], "  - Proposal number " + str(self.prop_num) + "\n")
+        self.assertEqual(history[2], "  - " + self.prop_title + "\n")
+        self.assertEqual(history[3], "  - " + self.exp_team + "\n")
+        self.assertEqual(history[4], "  - data reduced with mantid\n")
+
+    def test_history_no_samples(self):
+        """ Test save history from workspace without needed sample logs
+        """
+        history = []
+        for i in range(0, 5):
+            s = self._no_sample_file.readline()
+            if i >= 3:
+                history.append(s)
+
+        self.assertEqual(history[0], "History:\n")
+        self.assertEqual(history[1], "  - data reduced with mantid\n")
+
+    def test_coord(self):
+        """ Test save coordinates from workspace with all sample logs
+        """
+        coord = []
+        # Y axis is SpectrumAxis
+        for i in range(0, 12):
+            s = self._n_file.readline()
+            if i >= 8:
+                coord.append(s)
+
+        self.assertEqual(coord[0], "Coord:\n")
+        self.assertEqual(coord[1], "  x: {name: w, unit: meV}\n")
+        self.assertEqual(coord[2], "  y: {name: \'S(q,w)\', unit: meV-1}\n")
+        self.assertEqual(coord[3], "  z: [{name: 2th, unit: deg}]\n")
+
+        ws = self._create_workspace(yAxSpec=False)
+        f = self._file(ws, "File")
+        coord = []
+        # Y axis is NumericAxis in q units
+        for i in range(0, 12):
+            s = f.readline()
+            if i >= 8:
+                coord.append(s)
+
+        self.assertEqual(coord[0], "Coord:\n")
+        self.assertEqual(coord[1], "  x: {name: w, unit: meV}\n")
+        self.assertEqual(coord[2], "  y: {name: \'S(q,w)\', unit: meV-1}\n")
+        self.assertEqual(coord[3], "  z: [{name: q, unit: A-1}]\n")
+
+    def test_rpar(self):
+        """ Test save RPar from workspace with and without sample logs
+        """
+        r_par = []
+        # workspace with all ample logs
+        for i in range(21):
+            s = self._n_file.readline()
+            if i >= 12:
+                r_par.append(s)
+
+        self.assertEqual(r_par[0], "RPar:\n")
+        self.assertEqual(r_par[1], "  - name: T\n")
+        self.assertEqual(r_par[2], "    unit: K\n")
+        self.assertEqual(r_par[3], "    val: " + str(self.temperature) + "\n")
+        self.assertEqual(r_par[4], "    stdv: 0\n")
+        self.assertEqual(r_par[5], "  - name: Ei\n")
+        self.assertEqual(r_par[6], "    unit: meV\n")
+        self.assertEqual(r_par[7], "    val: " + str(self.Ei) + "\n")
+        self.assertEqual(r_par[8], "    stdv: 0\n")
+
+        r_par = []
+        # workspace with no sample logs
+        for i in range(10):
+            s = self._no_sample_file.readline()
+            if i >= 9:
+                r_par.append(s)
+
+        self.assertEqual(r_par[0], "RPar: []\n")
+
+    def test_slices(self):
+        """ Test save slices from workspace with no sample logs
+        """
+        slices = []
+        for i in range(15):
+            s = self._no_sample_file.readline()
+            if i >= 10:
+                slices.append(s)
+
+        self.assertEqual(slices[0], "Slices:\n")
+        self.assertEqual(slices[1], "  - j: 0\n")
+        self.assertTrue(slices[2].startswith("    z: [{val: 14.1499"))
+        self.assertTrue(slices[2].endswith("}]\n"))
+        self.assertEqual(slices[3], "    x: [" + str((self.data_x[0] + self.data_x[1]) / 2) + ", "
+                         + str((self.data_x[1] + self.data_x[2]) / 2) + ", " + str((self.data_x[2] + self.data_x[3]) / 2)
+                         + "]" + "\n")
+        self.assertEqual(slices[4], "    y: " + str(self.data_y) + "\n")
+
+    def test_event_ws(self):
+        """ Test algorithm is not running with EventWorkspace
+        """
+        ws = self._create_workspace(False)
+        self.assertRaises(RuntimeError, SaveYDA, InputWorkspace=ws, Filename="File")
+
+    def test_x_not_detaE(self):
+        """ Test algorithm is not running if X axis is not DeltaE
+        """
+        ws = self._create_workspace(xAx=False)
+        self.assertRaises(ValueError, SaveYDA, InputWorkspace=ws, Filename="File")
+
+    def test_no_Instrument(self):
+        """ Test algorithm is not running is workspace has no instrument
+        """
+        ws = self._create_workspace(instrument=False)
+        self.assertRaises(ValueError, SaveYDA, InputWorkspace=ws, Filename="File")
+
+    def test_y_not_mt_or_spec(self):
+        """ Test algorithm is not running if Y axis is not SpectrumAxis or MomentumTransfer
+        """
+        ws = self._create_workspace(yAxMt=False, yAxSpec=False)
+        self.assertRaises(RuntimeError, SaveYDA, InputWorkspace=ws, Filename="File")
+
+    def _init_ws_normal(self):
+        """ init normal workspace, normal workspace is workspace with all sample logs and save file from workspace
+        """
+        self._n_ws = self._create_workspace()
+        self._n_file = self._file(self._n_ws, "normalFile")
+
+    def _add_all_sample_logs(self, ws):
+        """ add all sample logs to a workspace
+        :param ws: workspace where sample logs should be added
+        """
+        AddSampleLog(ws, "proposal_number", str(self.prop_num))
+        AddSampleLog(ws, "proposal_title", self.prop_title)
+        AddSampleLog(ws, "experiment_team", self.exp_team)
+        AddSampleLog(ws, "temperature", str(self.temperature), LogUnit="F")
+        AddSampleLog(ws, "Ei", str(self.Ei), LogUnit="meV")
+
+    def _file(self, ws, filename):
+        """ create file form workspace and open to read from the file
+        :param ws: workspace file will be saved from
+        :param filename: name of the file to save
+        :return f: open file
+        """
+        SaveYDA(InputWorkspace=ws, Filename=filename)
+        f = open(filename, "r")
+        return f
+
+    def _create_workspace(self, ws_2D=True, sample=True, xAx=True, yAxSpec=True,
+                          yAxMt=True, instrument=True):
+        """ create Workspace
+        :param ws_2D: should workspace be 2D?
+        :param sample: should workspace have sample logs?
+        :param xAx: should x axis be DeltaE?
+        :param yAxMt: should y axis be MomentumTransfer?
+        :param yAxSpec: should y axis be SpectrumAxis?
+        :param instrument: should workspace have a instrument?
+        """
+        # Event Workspace
+        if not ws_2D:
+            ws = CreateSampleWorkspace("Event", "One Peak", XUnit="DeltaE")
+            return ws
+        if not xAx:
+            ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="TOF")
+            return ws
+        if not instrument:
+            ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE")
+            return ws
+        if not yAxMt and not yAxSpec:
+            ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE")
+            LoadInstrument(ws, False, InstrumentName="TOFTOF")
+            ConvertSpectrumAxis(InputWorkspace=ws, OutputWorkspace=ws, Target="theta", EMode="Direct")
+            return ws
+        if not yAxSpec and yAxMt:
+            ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE")
+            LoadInstrument(ws, False, InstrumentName="TOFTOF")
+            self._add_all_sample_logs(ws)
+            ConvertSpectrumAxis(InputWorkspace=ws, OutputWorkspace="ws2", Target ="ElasticQ", EMode="Direct")
+            ws2 = mtd["ws2"]
+            return ws2
+        if not sample:
+            ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE")
+            LoadInstrument(ws, False, InstrumentName="TOFTOF")
+            return ws
+        else:
+            ws = CreateWorkspace(DataX=self.data_x, DataY=self.data_y, DataE=np.sqrt(self.data_y), NSpec=1, UnitX="DeltaE")
+            LoadInstrument(ws, False, InstrumentName="TOFTOF")
+            self._add_all_sample_logs(ws)
+            return ws
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransferTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransferTest.py
index e666e353815b51c272cb49c7589b89bc8acd97a3..518934434cb49f12b323f740ded20fcee60bac9c 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransferTest.py
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/IndirectILLEnergyTransferTest.py
@@ -81,6 +81,11 @@ class IndirectILLEnergyTransferTest(unittest.TestCase):
         res = IndirectILLEnergyTransfer(**args)
         self._check_workspace_group(res, 2, 18, 512)
 
+    def test_spectrum_axis(self):
+        args = {'Run': self._runs['one_wing_EFWS'], 'SpectrumAxis': '2Theta'}
+        res = IndirectILLEnergyTransfer(**args)
+        self.assertTrue(res.getItem(0).getAxis(1).getUnit().unitID(), "Theta")
+
     def _check_workspace_group(self, wsgroup, nentries, nspectra, nbins):
 
         self.assertTrue(isinstance(wsgroup, WorkspaceGroup),
diff --git a/Framework/PythonInterface/test/python/plugins/functions/AttributeTest.py b/Framework/PythonInterface/test/python/plugins/functions/AttributeTest.py
index fa6a404d3f022f62301568341b7b0dcd436027a0..215175e9043f53d28cce5ba7d0e4e6fa153ea957 100644
--- a/Framework/PythonInterface/test/python/plugins/functions/AttributeTest.py
+++ b/Framework/PythonInterface/test/python/plugins/functions/AttributeTest.py
@@ -20,7 +20,6 @@ class AttributeExample(IFunction1D):
                     self._freq = value
             if name == "Sine":
                     self._sine = value
-            self.storeAttributeValue(name, value)
 
         def function1D(self,xvals):
             ampl=self.getParameterValue("Amplitude")
diff --git a/Framework/TestHelpers/inc/MantidTestHelpers/FunctionCreationHelper.h b/Framework/TestHelpers/inc/MantidTestHelpers/FunctionCreationHelper.h
new file mode 100644
index 0000000000000000000000000000000000000000..c1370c265f1dbcc88b15427b5be2252f19c48ffd
--- /dev/null
+++ b/Framework/TestHelpers/inc/MantidTestHelpers/FunctionCreationHelper.h
@@ -0,0 +1,31 @@
+#ifndef MANTID_TESTHELPERS_FUNCTIONCREATIONHELPER_H_
+#define MANTID_TESTHELPERS_FUNCTIONCREATIONHELPER_H_
+
+#include "MantidAPI/IFunction1D.h"
+#include "MantidAPI/ParamFunction.h"
+
+namespace Mantid {
+
+namespace TestHelpers {
+
+class FunctionChangesNParams : public Mantid::API::IFunction1D,
+                               public Mantid::API::ParamFunction {
+public:
+  FunctionChangesNParams();
+  std::string name() const override;
+  void iterationStarting() override;
+  void iterationFinished() override;
+
+protected:
+  void function1D(double *out, const double *xValues,
+                  const size_t nData) const override;
+  void functionDeriv1D(Mantid::API::Jacobian *out, const double *xValues,
+                       const size_t nData) override;
+  size_t m_maxNParams = 5;
+  bool m_canChange = false;
+};
+
+} // namespace TestHelpers
+} // namespace Mantid
+
+#endif // MANTID_TESTHELPERS_FUNCTIONCREATIONHELPER_H_
diff --git a/Framework/TestHelpers/inc/MantidTestHelpers/SingleCrystalDiffractionTestHelper.h b/Framework/TestHelpers/inc/MantidTestHelpers/SingleCrystalDiffractionTestHelper.h
new file mode 100644
index 0000000000000000000000000000000000000000..cfc136293707a2c5a775a5bed6c5b5092ae10b40
--- /dev/null
+++ b/Framework/TestHelpers/inc/MantidTestHelpers/SingleCrystalDiffractionTestHelper.h
@@ -0,0 +1,109 @@
+#ifndef SINGLECRYSTALDIFFRACTIONHELPER_H_
+#define SINGLECRYSTALDIFFRACTIONHELPER_H_
+
+#include "MantidAPI/DetectorInfo.h"
+#include "MantidAPI/MatrixWorkspace.h"
+#include "MantidDataObjects/EventWorkspace.h"
+#include "MantidDataObjects/PeaksWorkspace.h"
+#include "MantidKernel/NearestNeighbours.h"
+#include "MantidTestHelpers/ComponentCreationHelper.h"
+
+#include <tuple>
+#include <random>
+
+namespace Mantid {
+namespace SingleCrystalDiffractionTestHelper {
+
+class WorkspaceBuilder {
+
+public:
+  WorkspaceBuilder()
+      : m_numPixels(0), m_totalNPixels(0), m_outputAsHistogram(false),
+        m_generator(std::random_device()()){};
+
+  /// Set the total number of peaks to use
+  void setNumPixels(const int numPixels);
+  /// Set whether to create an event workspace or a histogram workspace
+  void outputAsHistogram(const bool outputAsHistogram) {
+    m_outputAsHistogram = outputAsHistogram;
+  };
+  /// Set the rebin parameters to use
+  void setRebinParameters(const std::vector<double> &rebinParams) {
+    m_rebinParams = rebinParams;
+  }
+  void addBackground(const bool useBackground) {
+    m_useBackground = useBackground;
+  }
+  /// Set the parameters for the uniform background
+  void setBackgroundParameters(const int nEvents, const double detRange,
+                               const double tofRange) {
+    m_backgroundParameters = std::make_tuple(nEvents, detRange, tofRange);
+  }
+  /// Set the random seed for generating events
+  void setRandomSeed(const int seed) { m_generator.seed(seed); }
+  /// Add a HKL peak to the diffraction dataset
+  void addPeakByHKL(const Mantid::Kernel::V3D &hkl, const int numEvents,
+                    const std::tuple<double, double, double> &sigmas);
+  /// Make a tuple of event workspace and peaks workspace
+  std::tuple<Mantid::API::MatrixWorkspace_sptr,
+             Mantid::DataObjects::PeaksWorkspace_sptr>
+  build();
+
+private:
+  using HKLPeakDescriptor =
+      std::tuple<Mantid::Kernel::V3D, int, std::tuple<double, double, double>>;
+
+  /// Create a dummy instrument
+  void createInstrument();
+  /// Create a peaks workspace with the request HKL peaks
+  void createPeaksWorkspace();
+  /// Create an empty event workspace with the instrument attached
+  void createEventWorkspace();
+  /// Create a neighbour search tree for finding nearest neighbours
+  void createNeighbourSearch();
+  /// Create peaks at the requested HKL positions
+  void createPeaks();
+  /// Create a single HKL peak in the event workspace
+  void createPeak(const HKLPeakDescriptor &descriptor);
+  /// Create a flat background for the workspace
+  void createBackground(const int index);
+  /// Rebin the event workspace to a histogram workspace
+  void rebinWorkspace();
+
+  /// Nearest neighbour search tree for detectors
+  std::unique_ptr<Mantid::Kernel::NearestNeighbours<3>> m_detectorSearcher;
+  /// List of peak descriptors for creating peaks
+  std::vector<HKLPeakDescriptor> m_peakDescriptors;
+  /// Handle to the instrument object
+  Mantid::Geometry::Instrument_sptr m_instrument;
+  /// Handle to the final output workspace (event OR histogram)
+  Mantid::API::MatrixWorkspace_sptr m_workspace;
+  /// Handle to the event workspace
+  Mantid::DataObjects::EventWorkspace_sptr m_eventWorkspace;
+  /// Handle to the peaks workspace
+  Mantid::DataObjects::PeaksWorkspace_sptr m_peaksWorkspace;
+
+  // Instance variables for builder settings
+
+  /// number of pixels along a single axis on the detector bank
+  int m_numPixels;
+  /// total number of pixels in the detector bank
+  int m_totalNPixels;
+  /// whether to output event or histogram data
+  bool m_outputAsHistogram;
+  /// whether to add a background
+  bool m_useBackground;
+  /// rebin parameters
+  std::vector<double> m_rebinParams;
+  /// background parameters
+  std::tuple<int, double, double> m_backgroundParameters;
+
+  // Other instance varianbles
+
+  /// Random generator for making events
+  std::mt19937 m_generator;
+};
+}
+}
+
+#endif
diff --git a/Framework/TestHelpers/inc/MantidTestHelpers/WorkspaceCreationHelper.h b/Framework/TestHelpers/inc/MantidTestHelpers/WorkspaceCreationHelper.h
index c8ada29dfd6dafd424a41f6edacab386d6f5081d..a9d3f7f7fa1b51482746cce2a8e270067ba4232b 100644
--- a/Framework/TestHelpers/inc/MantidTestHelpers/WorkspaceCreationHelper.h
+++ b/Framework/TestHelpers/inc/MantidTestHelpers/WorkspaceCreationHelper.h
@@ -372,7 +372,8 @@ create2DWorkspaceWithReflectometryInstrument(double startX = 0);
 /// Create a 2D workspace with one monitor and three detectors based around
 /// a virtual reflectometry instrument.
 Mantid::API::MatrixWorkspace_sptr
-create2DWorkspaceWithReflectometryInstrumentMultiDetector(double startX = 0);
+create2DWorkspaceWithReflectometryInstrumentMultiDetector(
+    double startX = 0, const double detSize = 0.0);
 
 void createInstrumentForWorkspaceWithDistances(
     Mantid::API::MatrixWorkspace_sptr workspace,
diff --git a/Framework/TestHelpers/src/FunctionCreationHelper.cpp b/Framework/TestHelpers/src/FunctionCreationHelper.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d68907a9fc124ae40c0559eb4d963c8c6d76bb2c
--- /dev/null
+++ b/Framework/TestHelpers/src/FunctionCreationHelper.cpp
@@ -0,0 +1,60 @@
+#include "MantidTestHelpers/FunctionCreationHelper.h"
+#include "MantidKernel/Exception.h"
+
+namespace Mantid {
+namespace TestHelpers {
+
+std::string FunctionChangesNParams::name() const {
+  return "FunctionChangesNParams";
+}
+
+FunctionChangesNParams::FunctionChangesNParams()
+    : Mantid::API::IFunction1D(), Mantid::API::ParamFunction() {
+  this->declareParameter("A0", 0.0);
+}
+
+void FunctionChangesNParams::iterationStarting() { m_canChange = true; }
+
+void FunctionChangesNParams::iterationFinished() {
+  auto np = nParams();
+  if (m_canChange && np < m_maxNParams) {
+    declareParameter("A" + std::to_string(np), 0.0);
+    throw Mantid::Kernel::Exception::FitSizeWarning(np, nParams());
+  }
+  m_canChange = false;
+}
+
+void FunctionChangesNParams::function1D(double *out, const double *xValues,
+                                        const size_t nData) const {
+  auto np = nParams();
+  for (size_t i = 0; i < nData; ++i) {
+    double x = xValues[i];
+    double y = getParameter(np - 1);
+    if (np > 1) {
+      for (size_t ip = np - 1; ip > 0; --ip) {
+        y = getParameter(ip - 1) + x * y;
+      }
+    }
+    out[i] = y;
+  }
+}
+
+void FunctionChangesNParams::functionDeriv1D(Mantid::API::Jacobian *out,
+                                             const double *xValues,
+                                             const size_t nData) {
+  auto np = nParams();
+  for (size_t i = 0; i < nData; ++i) {
+    double x = xValues[i];
+    double y = 1.0;
+    out->set(i, 0, y);
+    if (np > 1) {
+      for (size_t ip = 1; ip < np; ++ip) {
+        y = x * y;
+        out->set(i, ip, y);
+      }
+    }
+  }
+}
+
+} // namespace TestHelpers
+} // namespace Mantid
diff --git a/Framework/TestHelpers/src/SingleCrystalDiffractionTestHelper.cpp b/Framework/TestHelpers/src/SingleCrystalDiffractionTestHelper.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9ac67b8d9ad56821cbee5e43627c342faa1e9e42
--- /dev/null
+++ b/Framework/TestHelpers/src/SingleCrystalDiffractionTestHelper.cpp
@@ -0,0 +1,258 @@
+/* Test functions for algorithms for single crysal diffraction
+ */
+
+#include "MantidAPI/AlgorithmManager.h"
+#include "MantidAPI/Axis.h"
+#include "MantidAPI/Run.h"
+#include "MantidAPI/Sample.h"
+#include "MantidDataObjects/PeaksWorkspace.h"
+#include "MantidDataObjects/EventWorkspace.h"
+#include "MantidGeometry/Crystal/OrientedLattice.h"
+#include "MantidKernel/V3D.h"
+#include "MantidTestHelpers/SingleCrystalDiffractionTestHelper.h"
+#include "MantidTestHelpers/ComponentCreationHelper.h"
+
+#include <cmath>
+#include <random>
+#include <tuple>
+
+using namespace Mantid;
+using namespace Mantid::API;
+using namespace Mantid::DataObjects;
+using namespace Mantid::Geometry;
+using namespace Mantid::Kernel;
+
+namespace Mantid {
+namespace SingleCrystalDiffractionTestHelper {
+
+void WorkspaceBuilder::setNumPixels(const int numPixels) {
+  m_numPixels = numPixels;
+  m_totalNPixels = numPixels * numPixels;
+}
+
+/** Add a peak to the data set to be generated
+ *
+ * This will create a peak in the event workspace at the given HKL postion.
+ *
+ * The sigmas parameter is a tuple that controls the distribution of events in
+ * the workspace. The first two elements control the x and y variance on the
+ * detector bank face. The final element controls the variance in the time of
+ * flight spectrum.
+ *
+ * @param hkl :: the HKL position of the peak
+ * @param numEvents :: the number of events to create for the peak
+ * @param sigmas :: tuple controlling the distribution of events
+ */
+void WorkspaceBuilder::addPeakByHKL(
+    const V3D &hkl, const int numEvents,
+    const std::tuple<double, double, double> &sigmas) {
+  m_peakDescriptors.emplace_back(hkl, numEvents, sigmas);
+}
+
+/** Build a new set of diffraction data
+ *
+ * This will use the configured parameters supplied by the user to create a new
+ * event workspace with events at the specified HKL peak positions.
+ *
+ * This will return a tuple where the first element is a matrix workspace
+ * pointer that is either an event workspace or a histogram workspace depending
+ * on the options set. The second element will be a peaks workspace.
+ *
+ * @return a tuple containing a matrix workspace and a peaks workspace
+ */
+std::tuple<MatrixWorkspace_sptr, PeaksWorkspace_sptr>
+WorkspaceBuilder::build() {
+  createInstrument();
+  createPeaksWorkspace();
+  createEventWorkspace();
+  createNeighbourSearch();
+  createPeaks();
+
+  if (m_outputAsHistogram)
+    rebinWorkspace();
+
+  return std::make_tuple(m_workspace, m_peaksWorkspace);
+}
+
+/** Create a new instrument.
+ *
+ * This will create a simple rectangular instrument with the requested number
+ * of pixels
+ *
+ */
+void WorkspaceBuilder::createInstrument() {
+  m_instrument = ComponentCreationHelper::createTestInstrumentRectangular(
+      1 /*num_banks*/, m_numPixels /*pixels in each direction yields n by n*/,
+      0.01, 1.0);
+}
+
+/** Create an empty peaks workspace
+ *
+ * This will create an empty peaks workspace with a oriented lattice and will
+ * also set the instrument.
+ *
+ */
+void WorkspaceBuilder::createPeaksWorkspace() {
+  // Create a peaks workspace
+  m_peaksWorkspace = boost::make_shared<PeaksWorkspace>();
+  // Set the instrument to be the fake rectangular bank above.
+  m_peaksWorkspace->setInstrument(m_instrument);
+  // Set the oriented lattice for a cubic crystal
+  OrientedLattice ol(6, 6, 6, 90, 90, 90);
+  ol.setUFromVectors(V3D(6, 0, 0), V3D(0, 6, 0));
+  m_peaksWorkspace->mutableSample().setOrientedLattice(&ol);
+}
+
+/** Create an empty event workspace
+ *
+ * This will create an empty event workspace with the instrument attached
+ */
+void WorkspaceBuilder::createEventWorkspace() {
+  // Make an event workspace and add fake peak data
+  m_eventWorkspace = boost::make_shared<EventWorkspace>();
+  m_eventWorkspace->setInstrument(m_instrument);
+  m_eventWorkspace->initialize(m_totalNPixels /*n spectra*/, 3 /* x-size */,
+                               3 /* y-size */);
+  m_eventWorkspace->getAxis(0)->setUnit("TOF");
+  // Give the spectra-detector mapping for all event lists
+  for (int i = 0; i < m_totalNPixels; ++i) {
+    EventList &el = m_eventWorkspace->getSpectrum(i);
+    el.setDetectorID(i + m_totalNPixels);
+  }
+
+  // set the output workspace to be the event workspace
+  // this may or may not be converted later to a histogram
+  m_workspace = m_eventWorkspace;
+}
+
+/** Create peaks for all HKL descriptors passed to the builder
+ */
+void WorkspaceBuilder::createPeaks() {
+  int index = 0;
+  for (const auto &descriptor : m_peakDescriptors) {
+    createPeak(descriptor);
+    if (m_useBackground)
+      createBackground(index);
+    ++index;
+  }
+}
+
+/** Create a single peak for a given HKL descriptor
+ *
+ * This will create a Gaussian distributed set of events located at the TOF
+ * position of a corresponding HKL value.
+ *
+ * This distribution of events is controlled by the sigmas parameter of the HKL
+ * descriptor which describes the variance in the x, y, and TOF position.
+ *
+ * @param descriptor a HKLPeakDescriptor which describes the position, intensity
+ * and variance in a peak
+ */
+void WorkspaceBuilder::createPeak(const HKLPeakDescriptor &descriptor) {
+  const auto hkl = std::get<0>(descriptor);
+  const auto nEvents = std::get<1>(descriptor);
+  const auto sigmas = std::get<2>(descriptor);
+
+  // Create the peak and add it to the peaks ws
+  const auto peak = std::unique_ptr<Peak>(m_peaksWorkspace->createPeakHKL(hkl));
+  m_peaksWorkspace->addPeak(*peak);
+
+  // Get detector ID and TOF position of peak
+  const auto detectorId = peak->getDetectorID();
+  const auto tofExact = peak->getTOF();
+  const auto &info = m_eventWorkspace->detectorInfo();
+  const auto detPos = info.position(info.indexOf(detectorId));
+
+  const auto xSigma = std::get<0>(sigmas);
+  const auto ySigma = std::get<1>(sigmas);
+  const auto tofSigma = std::get<2>(sigmas);
+
+  // distributions for beam divergence and TOF broadening
+  std::normal_distribution<> xDist(0, xSigma);
+  std::normal_distribution<> yDist(0, ySigma);
+  std::normal_distribution<> tofDist(tofExact, tofSigma);
+
+  // add events to the workspace
+  for (int i = 0; i < nEvents; ++i) {
+    const auto xOffset = xDist(m_generator);
+    const auto yOffset = yDist(m_generator);
+    const auto tof = tofDist(m_generator);
+
+    const auto pos = V3D(detPos[0] + xOffset, detPos[1] + yOffset, detPos[2]);
+    const auto result = m_detectorSearcher->findNearest(
+        Eigen::Vector3d(pos[0], pos[1], pos[2]));
+    const auto index = std::get<1>(result[0]);
+    auto &el = m_eventWorkspace->getSpectrum(index);
+    el.addEventQuickly(TofEvent(tof));
+  }
+}
+
+/** Create a uniform background around each peak in the workspace
+ *
+ * This will NOT add background to the entire workspace as that would cause the
+ * generator to take too long to be used in a unit test. Instead this will
+ * generate a uniform background in a "box" around a peak.
+ *
+ * @param index :: index of the peak to create a uniform background for
+ */
+void WorkspaceBuilder::createBackground(const int index) {
+  const auto &peak = m_peaksWorkspace->getPeak(index);
+  const auto detectorId = peak.getDetectorID();
+  const auto tofExact = peak.getTOF();
+  const auto &info = m_eventWorkspace->detectorInfo();
+  const auto detPos = info.position(info.indexOf(detectorId));
+
+  const auto nBackgroundEvents = std::get<0>(m_backgroundParameters);
+  const auto backgroundDetSize = std::get<1>(m_backgroundParameters);
+  const auto backgroundTOFSize = std::get<2>(m_backgroundParameters);
+
+  std::uniform_real_distribution<> backgroundXDist(-backgroundDetSize,
+                                                   backgroundDetSize);
+  std::uniform_real_distribution<> backgroundYDist(-backgroundDetSize,
+                                                   backgroundDetSize);
+  std::uniform_real_distribution<> backgroundTOFDist(
+      tofExact - backgroundTOFSize, tofExact + backgroundTOFSize);
+
+  for (int i = 0; i < nBackgroundEvents; ++i) {
+    const auto xOffset = backgroundXDist(m_generator);
+    const auto yOffset = backgroundYDist(m_generator);
+    const auto tof = backgroundTOFDist(m_generator);
+
+    const auto pos = V3D(detPos[0] + xOffset, detPos[1] + yOffset, detPos[2]);
+    const auto result = m_detectorSearcher->findNearest(
+        Eigen::Vector3d(pos[0], pos[1], pos[2]));
+    const auto index = std::get<1>(result[0]);
+
+    auto &el = m_eventWorkspace->getSpectrum(index);
+    el.addEventQuickly(TofEvent(tof));
+  }
+}
+
+/** Create a KD-Tree of detector positions that can be used to find the closest
+ * detector to a given event position
+ */
+void WorkspaceBuilder::createNeighbourSearch() {
+  const auto &info = m_eventWorkspace->detectorInfo();
+  std::vector<Eigen::Vector3d> points;
+  for (size_t i = 0; i < info.size(); ++i) {
+    const auto pos = info.position(i);
+    points.emplace_back(pos[0], pos[1], pos[2]);
+  }
+  m_detectorSearcher = Kernel::make_unique<NearestNeighbours<3>>(points);
+}
+
+/** Rebin the event workspace using the parameters provided
+ */
+void WorkspaceBuilder::rebinWorkspace() {
+  auto rebinAlg = AlgorithmManager::Instance().createUnmanaged("Rebin");
+  rebinAlg->setChild(true);
+  rebinAlg->initialize();
+  rebinAlg->setProperty("InputWorkspace", m_eventWorkspace);
+  rebinAlg->setProperty("Params", m_rebinParams);
+  rebinAlg->setProperty("PreserveEvents", false); // Make a histo workspace
+  rebinAlg->setPropertyValue("OutputWorkspace", "__SXD_test_helper_rebin");
+  rebinAlg->execute();
+  m_workspace = rebinAlg->getProperty("OutputWorkspace");
+}
+}
+}
diff --git a/Framework/TestHelpers/src/WorkspaceCreationHelper.cpp b/Framework/TestHelpers/src/WorkspaceCreationHelper.cpp
index 32887532c4c9330edad99480b96fb126682b577c..7fedb9644cc90949d3aca1d1eaa7cb4cbf91c04d 100644
--- a/Framework/TestHelpers/src/WorkspaceCreationHelper.cpp
+++ b/Framework/TestHelpers/src/WorkspaceCreationHelper.cpp
@@ -526,9 +526,11 @@ create2DWorkspaceWithReflectometryInstrument(double startX) {
 * multiple detectors
 * @return workspace with instrument attached.
 * @param startX : X Tof start value for the workspace.
+* @param detSize : optional detector height (default is 0 which puts all
+* detectors at the same position)
 */
-MatrixWorkspace_sptr
-create2DWorkspaceWithReflectometryInstrumentMultiDetector(double startX) {
+MatrixWorkspace_sptr create2DWorkspaceWithReflectometryInstrumentMultiDetector(
+    double startX, const double detSize) {
   Instrument_sptr instrument = boost::make_shared<Instrument>();
   instrument->setReferenceFrame(
       boost::make_shared<ReferenceFrame>(Y /*up*/, X /*along*/, Left, "0,0,0"));
@@ -548,24 +550,29 @@ create2DWorkspaceWithReflectometryInstrumentMultiDetector(double startX) {
   instrument->add(monitor);
   instrument->markAsMonitor(monitor);
 
+  // Place the central detector at 45 degrees (i.e. the distance
+  // from the sample in Y is the same as the distance in X).
+  const double detPosX = 20;
+  const double detPosY = detPosX - sample->getPos().X();
+
   Detector *det1 = new Detector(
       "point-detector", 2,
       ComponentCreationHelper::createCuboid(0.01, 0.02, 0.03), nullptr);
-  det1->setPos(20, (20 - sample->getPos().X()), 0);
+  det1->setPos(detPosX, detPosY - detSize, 0); // offset below centre
   instrument->add(det1);
   instrument->markAsDetector(det1);
 
   Detector *det2 = new Detector(
       "point-detector", 3,
       ComponentCreationHelper::createCuboid(0.01, 0.02, 0.03), nullptr);
-  det2->setPos(20, (20 - sample->getPos().X()), 0);
+  det2->setPos(detPosX, detPosY, 0); // at centre
   instrument->add(det2);
   instrument->markAsDetector(det2);
 
   Detector *det3 = new Detector(
       "point-detector", 4,
       ComponentCreationHelper::createCuboid(0.01, 0.02, 0.03), nullptr);
-  det3->setPos(20, (20 - sample->getPos().X()), 0);
+  det3->setPos(detPosX, detPosY + detSize, 0); // offset above centre
   instrument->add(det3);
   instrument->markAsDetector(det3);
 
diff --git a/MantidPlot/src/ApplicationWindow.cpp b/MantidPlot/src/ApplicationWindow.cpp
index e04dcb5b389be8c1935bebf389a044fb94893fd6..7e25c17a310391517283cabd4359233bd320e6c4 100644
--- a/MantidPlot/src/ApplicationWindow.cpp
+++ b/MantidPlot/src/ApplicationWindow.cpp
@@ -6051,7 +6051,7 @@ bool ApplicationWindow::saveProject(bool compress) {
   return true;
 }
 
-void ApplicationWindow::prepareSaveProject() {
+int ApplicationWindow::execSaveProjectDialog() {
   std::vector<IProjectSerialisable *> windows;
 
   for (auto window : getSerialisableWindows()) {
@@ -6071,9 +6071,11 @@ void ApplicationWindow::prepareSaveProject() {
       projectname, *serialiser, windows, this);
   connect(m_projectSaveView, SIGNAL(projectSaved()), this,
           SLOT(postSaveProject()));
-  m_projectSaveView->show();
+  return m_projectSaveView->exec();
 }
 
+void ApplicationWindow::prepareSaveProject() { execSaveProjectDialog(); }
+
 /**
  * The project was just saved. Update the main window.
  */
@@ -9175,32 +9177,6 @@ void ApplicationWindow::closeWindow(MdiSubWindow *window) {
   emit modified();
 }
 
-/**
- * Called when the user choses to close the program
- */
-void ApplicationWindow::prepareToCloseMantid() {
-  if (!saved) {
-    QString savemsg =
-        tr("Save changes to project: <p><b> %1 </b> ?").arg(projectname);
-    int result =
-        QMessageBox::information(this, tr("MantidPlot"), savemsg, tr("Yes"),
-                                 tr("No"), tr("Cancel"), 0, 2);
-    if (result == 0) {
-      prepareSaveProject();
-      // When we're finished saving trigger the close event
-      connect(m_projectSaveView, SIGNAL(finished(int)), qApp,
-              SLOT(closeAllWindows()));
-      return;
-    } else if (result == 2) {
-      // User wanted to cancel, do nothing
-      return;
-    }
-  }
-
-  // Call to close all the windows and shutdown Mantid
-  QApplication::closeAllWindows();
-}
-
 /** Add a serialisable window to the application
  * @param window :: the window to add
  */
@@ -9809,6 +9785,25 @@ void ApplicationWindow::closeEvent(QCloseEvent *ce) {
     // script is running.
   }
 
+  if (!saved) {
+    QString savemsg =
+        tr("Save changes to project: <p><b> %1 </b> ?").arg(projectname);
+    int result =
+        QMessageBox::information(this, tr("MantidPlot"), savemsg, tr("Yes"),
+                                 tr("No"), tr("Cancel"), 0, 2);
+    if (result == 0) {
+      auto response = execSaveProjectDialog();
+      if (response != QDialog::Accepted) {
+        ce->ignore();
+        return;
+      }
+    } else if (result == 2) {
+      // User wanted to cancel, do nothing
+      ce->ignore();
+      return;
+    }
+  }
+
   // Close the remaining MDI windows. The Python API is required to be active
   // when the MDI window destructor is called so that those references can be
   // cleaned up meaning we cannot rely on the deleteLater functionality to
@@ -9851,6 +9846,7 @@ void ApplicationWindow::closeEvent(QCloseEvent *ce) {
   scriptingEnv()->finalize();
 
   ce->accept();
+  qApp->closeAllWindows();
 }
 
 void ApplicationWindow::customEvent(QEvent *e) {
@@ -11821,8 +11817,7 @@ void ApplicationWindow::createActions() {
   actionCloseAllWindows = new MantidQt::MantidWidgets::TrackedAction(
       QIcon(getQPixmap("quit_xpm")), tr("&Quit"), this);
   actionCloseAllWindows->setShortcut(tr("Ctrl+Q"));
-  connect(actionCloseAllWindows, SIGNAL(triggered()), this,
-          SLOT(prepareToCloseMantid()));
+  connect(actionCloseAllWindows, SIGNAL(triggered()), this, SLOT(close()));
 
   actionDeleteFitTables = new MantidQt::MantidWidgets::TrackedAction(
       QIcon(getQPixmap("close_xpm")), tr("Delete &Fit Tables"), this);
diff --git a/MantidPlot/src/ApplicationWindow.h b/MantidPlot/src/ApplicationWindow.h
index b1a274f27167744bc8b52030723fbb7775f4f6a6..6cacd88828e1dc32670559761869eaad20147d62 100644
--- a/MantidPlot/src/ApplicationWindow.h
+++ b/MantidPlot/src/ApplicationWindow.h
@@ -270,6 +270,8 @@ public slots:
   void saveProjectAs(const QString &fileName = QString(),
                      bool compress = false);
   bool saveProject(bool compress = false);
+  /// Run the project saver dialog
+  int execSaveProjectDialog();
   /// Show the project saver dialog
   void prepareSaveProject();
   /// Update application window post save
@@ -603,7 +605,6 @@ public slots:
   void closeActiveWindow();
   void closeSimilarWindows();
   void closeWindow(MdiSubWindow *window);
-  void prepareToCloseMantid();
 
   //!  Does all the cleaning work before actually deleting a window!
   void removeWindowFromLists(MdiSubWindow *w);
diff --git a/MantidPlot/src/Mantid/MantidMatrix.cpp b/MantidPlot/src/Mantid/MantidMatrix.cpp
index 7da2478f098100c867dbc652bd2c8e57f78d13f6..2d05ad95e4ca9ea965464fcc0755d96227904d21 100644
--- a/MantidPlot/src/Mantid/MantidMatrix.cpp
+++ b/MantidPlot/src/Mantid/MantidMatrix.cpp
@@ -566,8 +566,12 @@ QString MantidMatrix::workspaceName() const {
 }
 
 QwtDoubleRect MantidMatrix::boundingRect() {
+  const int defaultNumberSpectroGramRows = 700;
+  const int defaultNumberSpectroGramColumns = 700;
   if (m_boundingRect.isNull()) {
-    m_spectrogramRows = numRows() > 100 ? numRows() : 100;
+    m_spectrogramRows = numRows() > defaultNumberSpectroGramRows
+                            ? numRows()
+                            : defaultNumberSpectroGramRows;
 
     // This is only meaningful if a 2D (or greater) workspace
     if (m_workspace->axes() > 1) {
@@ -634,10 +638,12 @@ QwtDoubleRect MantidMatrix::boundingRect() {
           }
         }
         m_spectrogramCols = static_cast<int>((x_end - x_start) / ddx);
-        if (m_spectrogramCols < 100)
-          m_spectrogramCols = 100;
+        if (m_spectrogramCols < defaultNumberSpectroGramColumns)
+          m_spectrogramCols = defaultNumberSpectroGramColumns;
       } else {
-        m_spectrogramCols = numCols() > 100 ? numCols() : 100;
+        m_spectrogramCols = numCols() > defaultNumberSpectroGramColumns
+                                ? numCols()
+                                : defaultNumberSpectroGramColumns;
       }
       m_boundingRect = QwtDoubleRect(qMin(x_start, x_end) - 0.5 * dx,
                                      qMin(y_start, y_end) - 0.5 * dy,
diff --git a/MantidPlot/src/Mantid/MantidMatrixFunction.cpp b/MantidPlot/src/Mantid/MantidMatrixFunction.cpp
index 78bb78f184bc19e8667a4825d62e368fa55e1fc3..678a3b634a84ac1c831ef350c8b356fa6c7074f4 100644
--- a/MantidPlot/src/Mantid/MantidMatrixFunction.cpp
+++ b/MantidPlot/src/Mantid/MantidMatrixFunction.cpp
@@ -70,10 +70,11 @@ double MantidMatrixFunction::operator()(double x, double y) {
 
   size_t j = indexX(i, x);
 
-  if (j < columns())
+  if (j < columns()) {
     return m_workspace->y(i)[j];
-  else
+  } else {
     return m_outside;
+  }
 }
 
 double MantidMatrixFunction::getMinPositiveValue() const {
@@ -149,32 +150,77 @@ MantidMatrixFunction::getHistogramX(int row) const {
   return m_workspace->x(row);
 }
 
-size_t MantidMatrixFunction::indexX(size_t row, double s) const {
-  size_t n = m_workspace->blocksize();
-
+/**
+ * Performs a binary search for an x value in the x data of a particular
+ * spectrum. There are two scenarios to consider which are illustrated by
+ * examples
+ *
+ * 1. Histogram Data:
+ * The x value of the example is 6500
+ *
+ * Y:       6      6       16        6         6
+ * X: 2000    4000    8000    12000     16000     20000
+ *
+ * The algorithm will determine that the index of X which is closest to 6500 is
+ *2,
+ * but the Y index with the correct data is 1 (since the value should be 6 not
+ *16)
+ *
+ * 2. Point Data:
+ * Y:   6      6       16        6         6
+ * X: 2000    4000    8000    12000     16000
+ *
+ * The algorithm will determine that the index of X which is closest to 6500 is
+ *2,
+ * and the Y index with the correct data is 2 as well since there is a
+ *one-to-one
+ * mapping between the indices of Y and X.
+ *
+ * @param row: the workspace index to search in
+ * @param xValue: the value to search for
+ * @return the index of the Y data which is associated with the x value.
+ */
+size_t MantidMatrixFunction::indexX(size_t row, double xValue) const {
+  auto isHistogram = m_workspace->isHistogramData();
   const auto &X = m_workspace->x(row);
-  if (n == 0 || s < X[0] || s > X[n - 1])
+  const auto n = X.size();
+
+  auto provideIndexForPointData =
+      [&X](size_t start, size_t stop, double xValue, double midValue) {
+        if (fabs(X[stop] - xValue) < fabs(midValue - xValue))
+          return stop;
+        return start;
+      };
+
+  if (n == 0 || xValue < X[0] || xValue > X[n - 1]) {
     return std::numeric_limits<size_t>::max();
+  }
 
-  size_t i = 0, j = n - 1, k = n / 2;
+  size_t start = 0, stop = n - 1, mid = n / 2;
   for (size_t it = 0; it < n; it++) {
-    const double ss = X[k];
-    if (ss == s)
-      return k;
-    if (abs(static_cast<int>(i) - static_cast<int>(j)) < 2) {
-      double ds = fabs(ss - s);
-      if (fabs(X[j] - s) < ds)
-        return j;
-      return i;
+    const double midValue = X[mid];
+    if (midValue == xValue)
+      return mid;
+
+    // If we reach two neighbouring x values, then we need to decide
+    // which index to pick.
+    if (abs(static_cast<int>(start) - static_cast<int>(stop)) < 2) {
+      if (isHistogram) {
+        return start;
+      } else {
+        return provideIndexForPointData(start, stop, xValue, midValue);
+      }
     }
-    if (s > ss)
-      i = k;
+
+    // Reset the interval to search
+    if (xValue > midValue)
+      start = mid;
     else
-      j = k;
-    k = i + (j - i) / 2;
+      stop = mid;
+    mid = start + (stop - start) / 2;
   }
 
-  return i;
+  return start;
 }
 
 size_t MantidMatrixFunction::indexY(double s) const {
diff --git a/MantidPlot/src/Mantid/MantidMatrixFunction.h b/MantidPlot/src/Mantid/MantidMatrixFunction.h
index d47da242fb763196d77a58b8a96698eadf30f5e0..65b5dcdf9dc9e7ba54313a5218f66588ebfe4b51 100644
--- a/MantidPlot/src/Mantid/MantidMatrixFunction.h
+++ b/MantidPlot/src/Mantid/MantidMatrixFunction.h
@@ -75,7 +75,7 @@ private:
 
   void init(const Mantid::API::MatrixWorkspace_const_sptr &workspace);
   void reset(const Mantid::API::MatrixWorkspace_const_sptr &workspace);
-  size_t indexX(size_t row, double s) const;
+  size_t indexX(size_t row, double xValue) const;
   size_t indexY(double s) const;
 
   /* Data */
diff --git a/MantidPlot/src/Mantid/MantidUI.cpp b/MantidPlot/src/Mantid/MantidUI.cpp
index 20acd88e7a6d380b913c0a5aef7dd74297229ec2..396484eadda3491e40b14112a6416d7dfee95643 100644
--- a/MantidPlot/src/Mantid/MantidUI.cpp
+++ b/MantidPlot/src/Mantid/MantidUI.cpp
@@ -396,9 +396,25 @@ void MantidUI::shutdown() {
       Poco::Thread::sleep(100);
     }
   }
+  // Close any open algorithm dialogs. They contain alorithm references so
+  // should be cleaned up before the framework (and the Python environment)
+  // is destroyed. We traverse the object tree rather than tracking the
+  // creation as it is possible to create a dialog without going through
+  // factory methods.
+  const auto &childWidgets = m_appWindow->children();
+  for (auto child : childWidgets) {
+    if (auto *widget = qobject_cast<MantidQt::API::AlgorithmDialog *>(child)) {
+      // We want to delete this now and not defer it to later in the
+      // event loop
+      widget->setAttribute(Qt::WA_DeleteOnClose, false);
+      widget->close();
+      delete widget;
+      child = nullptr;
+    }
+  }
+
   // If any python objects need to be cleared away then the GIL needs to be
-  // held. This doesn't feel like
-  // it is in the right place but it will do no harm
+  // held.
   ScopedPythonGIL gil;
   // Relevant notifications are connected to signals that will close all
   // dependent windows
diff --git a/MantidPlot/src/ProjectSaveView.cpp b/MantidPlot/src/ProjectSaveView.cpp
index 5fe91179c2f067538859bb00e7416fea694ad113..74b030c511d3179978d067016775d01cf90256b1 100644
--- a/MantidPlot/src/ProjectSaveView.cpp
+++ b/MantidPlot/src/ProjectSaveView.cpp
@@ -205,6 +205,9 @@ void ProjectSaveView::save(bool checked) {
   emit projectSaved();
 
   close();
+  // Set the result code after calling close() because
+  // close() sets it to QDialog::Rejected
+  setResult(QDialog::Accepted);
 }
 
 /**
diff --git a/MantidQt/API/inc/MantidQtAPI/MantidQwtWorkspaceData.h b/MantidQt/API/inc/MantidQtAPI/MantidQwtWorkspaceData.h
index e6bf6801417f5c9f6c15a986d3d620ea57fd6feb..19aa809d348090bce5cab7de862e357b3613aa2c 100644
--- a/MantidQt/API/inc/MantidQtAPI/MantidQwtWorkspaceData.h
+++ b/MantidQt/API/inc/MantidQtAPI/MantidQwtWorkspaceData.h
@@ -53,19 +53,19 @@ public:
   virtual size_t esize() const;
   virtual double e(size_t i) const;
   virtual double ex(size_t i) const;
+  bool isPlottable() const;
   virtual void setLogScaleY(bool on);
   virtual bool logScaleY() const;
-  virtual void saveLowestPositiveValue(const double v);
+  void setMinimumPositiveValue(const double v);
   virtual double getYMin() const;
   virtual double getYMax() const;
-
   virtual void setXOffset(const double x);
   virtual void setYOffset(const double y);
   virtual void setWaterfallPlot(bool on);
   virtual bool isWaterfallPlot() const;
   double offsetY() const { return m_offsetY; }
 
-  void calculateYMinAndMax(/*const std::vector<double> &yvalues*/) const;
+  void calculateYMinAndMax() const;
 
 protected:
   virtual double getX(size_t i) const = 0;
@@ -74,6 +74,8 @@ protected:
   virtual double getEX(size_t i) const = 0;
 
 private:
+  enum class DataStatus : uint8_t { Undefined, NotPlottable, Plottable };
+
   /// Indicates that the data is plotted on a log y scale
   bool m_logScaleY;
 
@@ -86,6 +88,9 @@ private:
   /// highest y value
   mutable double m_maxY;
 
+  /// True if data is 'sensible' to plot
+  mutable DataStatus m_plottable;
+
   /// Indicates whether or not waterfall plots are enabled
   bool m_isWaterfall;
 
diff --git a/MantidQt/API/inc/MantidQtAPI/QwtWorkspaceSpectrumData.h b/MantidQt/API/inc/MantidQtAPI/QwtWorkspaceSpectrumData.h
index 9bd9e55162297634c93ba61e80c935cfc13b7ca3..271e2ab4fc530ff9837e451e0d531003e4db5f42 100644
--- a/MantidQt/API/inc/MantidQtAPI/QwtWorkspaceSpectrumData.h
+++ b/MantidQt/API/inc/MantidQtAPI/QwtWorkspaceSpectrumData.h
@@ -51,8 +51,6 @@ public:
   /// Number of error bars to plot
   size_t esize() const override;
 
-  // double getYMin() const override;
-  // double getYMax() const override;
   /// Return the label to use for the X axis
   QString getXAxisLabel() const override;
   /// Return the label to use for the Y axis
@@ -61,17 +59,8 @@ public:
   bool isHistogram() const { return m_isHistogram; }
   bool dataIsNormalized() const { return m_dataIsNormalized; }
 
-  ///// Inform the data that it is to be plotted on a log y scale
-  // void setLogScale(bool on) override;
-  // bool logScaleY() const override { return m_logScaleY; }
-  // void saveLowestPositiveValue(const double v) override;
   bool setAsDistribution(bool on = true);
 
-  //// Sets offsets for and enables waterfall plots
-  // void setXOffset(const double x) override;
-  // void setYOffset(const double y) override;
-  // void setWaterfallPlot(bool on) override;
-
 protected:
   // Assignment operator (virtualized). MSVC not happy with compiler generated
   // one
diff --git a/MantidQt/API/src/MantidQwtIMDWorkspaceData.cpp b/MantidQt/API/src/MantidQwtIMDWorkspaceData.cpp
index 076e3c3a3ace055eddecc9f4587f117d5206d99c..04eabd9041b3abfb26d2410cd82d44eafac7b12d 100644
--- a/MantidQt/API/src/MantidQwtIMDWorkspaceData.cpp
+++ b/MantidQt/API/src/MantidQwtIMDWorkspaceData.cpp
@@ -159,7 +159,12 @@ void MantidQwtIMDWorkspaceData::calculateMinMax() { calculateYMinAndMax(); }
 //-----------------------------------------------------------------------------
 /** Size of the data set
  */
-size_t MantidQwtIMDWorkspaceData::size() const { return m_Y.size(); }
+size_t MantidQwtIMDWorkspaceData::size() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return m_Y.size();
+}
 
 /** Return the x value of data point i
 @param i :: Index
@@ -199,7 +204,12 @@ double MantidQwtIMDWorkspaceData::getEX(size_t i) const {
 double MantidQwtIMDWorkspaceData::getE(size_t i) const { return m_E[i]; }
 
 /// Number of error bars to plot
-size_t MantidQwtIMDWorkspaceData::esize() const { return m_E.size(); }
+size_t MantidQwtIMDWorkspaceData::esize() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return m_E.size();
+}
 
 bool MantidQwtIMDWorkspaceData::setAsDistribution(bool on) {
   m_isDistribution = on;
diff --git a/MantidQt/API/src/MantidQwtWorkspaceData.cpp b/MantidQt/API/src/MantidQwtWorkspaceData.cpp
index 7e450ded1b99a6fb408bb78a1a5b13fd22d9efe4..0913a30986af6adf4e9d347b5ab9449bd919d693 100644
--- a/MantidQt/API/src/MantidQwtWorkspaceData.cpp
+++ b/MantidQt/API/src/MantidQwtWorkspaceData.cpp
@@ -2,9 +2,19 @@
 
 #include <cmath>
 
+namespace {
+/// Minimum value considered positive
+constexpr double MIN_POSITIVE = 1e-3;
+/// Maximum value considered positive
+constexpr double MAX_POSITIVE = 1e30;
+/// Arbitrary multiplier between min/max if they are equal
+constexpr double MIN_MAX_DELTA = 1.001;
+}
+
 MantidQwtWorkspaceData::MantidQwtWorkspaceData(bool logScaleY)
     : m_logScaleY(logScaleY), m_minY(0), m_minPositive(0), m_maxY(0),
-      m_isWaterfall(false), m_offsetX(0), m_offsetY(0) {}
+      m_plottable(DataStatus::Undefined), m_isWaterfall(false), m_offsetX(0),
+      m_offsetY(0) {}
 
 MantidQwtWorkspaceData::MantidQwtWorkspaceData(
     const MantidQwtWorkspaceData &data) {
@@ -18,6 +28,7 @@ operator=(const MantidQwtWorkspaceData &data) {
   m_minY = data.m_minY;
   m_minPositive = data.m_minPositive;
   m_maxY = data.m_maxY;
+  m_plottable = data.m_plottable;
   m_isWaterfall = data.m_isWaterfall;
   m_offsetX = data.m_offsetX;
   m_offsetY = data.m_offsetY;
@@ -27,11 +38,16 @@ operator=(const MantidQwtWorkspaceData &data) {
 /// Calculate absolute minimum and maximum values in a vector. Also find the
 /// smallest positive value.
 void MantidQwtWorkspaceData::calculateYMinAndMax() const {
-
-  const double maxDouble = std::numeric_limits<double>::max();
-  double curMin = maxDouble;
-  double curMinPos = maxDouble;
-  double curMax = -maxDouble;
+  // Set this to true to get the "real" data size
+  // It's correct value is then recalculated below. This is not
+  // too nice but a big refactor is not worth it given the new
+  // workbench/plotting developments.
+  m_plottable = DataStatus::Plottable;
+  m_minY = m_maxY = m_minPositive = 0.0;
+
+  double ymin(std::numeric_limits<double>::max()),
+      ymax(-std::numeric_limits<double>::max()),
+      yminPos(std::numeric_limits<double>::max());
   for (size_t i = 0; i < size(); ++i) {
     auto val = y(i);
     // skip NaNs
@@ -39,41 +55,47 @@ void MantidQwtWorkspaceData::calculateYMinAndMax() const {
       continue;
 
     // Update our values as appropriate
-    if (val < curMin)
-      curMin = val;
-    if (val < curMinPos && val > 0)
-      curMinPos = val;
-    if (val > curMax)
-      curMax = val;
-  }
-
-  // Save the results
-  if (curMin == maxDouble) {
-    m_minY = 0.0;
-    m_minPositive = 0.1;
-    m_maxY = 1.0;
-    return;
-  } else {
-    m_minY = curMin;
-  }
-
-  if (curMax == curMin) {
-    curMax *= 1.1;
+    if (val < ymin)
+      ymin = val;
+    if (val > 0.0 && val < yminPos)
+      yminPos = val;
+    if (val > ymax)
+      ymax = val;
   }
-  m_maxY = curMax;
 
-  if (curMinPos == maxDouble) {
-    m_minPositive = 0.1;
+  if (ymin < std::numeric_limits<double>::max()) {
+    // Values are sensible range
+    m_minY = ymin;
+    // Ensure there is a difference beteween max and min
+    m_maxY = (ymax != ymin) ? ymax : ymin * MIN_MAX_DELTA;
+
+    // Minimum positive value is kept for log scales
+    if (yminPos < std::numeric_limits<double>::max()) {
+      m_minPositive = yminPos;
+      m_plottable = DataStatus::Plottable;
+    } else {
+      // All values are <= 0
+      m_minPositive = MIN_POSITIVE;
+      m_plottable =
+          logScaleY() ? DataStatus::NotPlottable : DataStatus::Plottable;
+    }
   } else {
-    m_minPositive = curMinPos;
+    // Set to arbitrary values (this is unlikely to happen)
+    m_minY = 0.0;
+    m_maxY = MAX_POSITIVE;
+    m_minPositive = MIN_POSITIVE;
+    m_plottable = DataStatus::NotPlottable;
   }
 }
 
-void MantidQwtWorkspaceData::setLogScaleY(bool on) { m_logScaleY = on; }
+void MantidQwtWorkspaceData::setLogScaleY(bool on) {
+  m_logScaleY = on;
+  calculateYMinAndMax();
+}
 
 bool MantidQwtWorkspaceData::logScaleY() const { return m_logScaleY; }
 
-void MantidQwtWorkspaceData::saveLowestPositiveValue(const double v) {
+void MantidQwtWorkspaceData::setMinimumPositiveValue(const double v) {
   if (v > 0)
     m_minPositive = v;
 }
@@ -91,7 +113,7 @@ bool MantidQwtWorkspaceData::isWaterfallPlot() const { return m_isWaterfall; }
  * @return the lowest y value.
  */
 double MantidQwtWorkspaceData::getYMin() const {
-  if (m_minPositive == 0.0) {
+  if (m_plottable == DataStatus::Undefined) {
     calculateYMinAndMax();
   }
   return m_logScaleY ? m_minPositive : m_minY;
@@ -102,7 +124,7 @@ double MantidQwtWorkspaceData::getYMin() const {
  * @return the highest y value.
  */
 double MantidQwtWorkspaceData::getYMax() const {
-  if (m_minPositive == 0.0) {
+  if (m_plottable == DataStatus::Undefined) {
     calculateYMinAndMax();
   }
   if (m_logScaleY && m_maxY <= 0)
@@ -121,7 +143,12 @@ double MantidQwtWorkspaceData::y(size_t i) const {
   return tmp;
 }
 
-size_t MantidQwtWorkspaceData::esize() const { return this->size(); }
+size_t MantidQwtWorkspaceData::esize() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return this->size();
+}
 
 double MantidQwtWorkspaceData::e(size_t i) const {
   double ei = getE(i);
@@ -137,5 +164,19 @@ double MantidQwtWorkspaceData::e(size_t i) const {
 
 double MantidQwtWorkspaceData::ex(size_t i) const { return getEX(i); }
 
+/**
+ * @brief MantidQwtWorkspaceData::isPlottable
+ * Data is considered plottable if either:
+ *   - scale != log or
+ *   - scale == log & all(y) > 0.0
+ * @return True if the data is considered plottable, false otherwise
+ */
+bool MantidQwtWorkspaceData::isPlottable() const {
+  return (m_plottable == DataStatus::Plottable);
+}
+
+//------------------------------------------------------------------------------
+// MantidQwtMatrixWorkspaceData class
+//------------------------------------------------------------------------------
 MantidQwtMatrixWorkspaceData::MantidQwtMatrixWorkspaceData(bool logScaleY)
     : MantidQwtWorkspaceData(logScaleY) {}
diff --git a/MantidQt/API/src/QwtWorkspaceBinData.cpp b/MantidQt/API/src/QwtWorkspaceBinData.cpp
index 1408a915bf058796e1eab73bbab25a4042fc90eb..fff267417f87ed37bc4f7df67df7fe0566875c70 100644
--- a/MantidQt/API/src/QwtWorkspaceBinData.cpp
+++ b/MantidQt/API/src/QwtWorkspaceBinData.cpp
@@ -32,7 +32,12 @@ QwtWorkspaceBinData *QwtWorkspaceBinData::copyWithNewSource(
 
 /** Size of the data set
  */
-size_t QwtWorkspaceBinData::size() const { return m_Y.size(); }
+size_t QwtWorkspaceBinData::size() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return m_Y.size();
+}
 
 /**
 Return the x value of data point i
@@ -71,7 +76,6 @@ QString QwtWorkspaceBinData::getYAxisLabel() const { return m_yTitle; }
 QwtWorkspaceBinData &QwtWorkspaceBinData::
 operator=(const QwtWorkspaceBinData &rhs) {
   if (this != &rhs) {
-    static_cast<MantidQwtMatrixWorkspaceData &>(*this) = rhs;
     m_binIndex = rhs.m_binIndex;
     m_X = rhs.m_X;
     m_Y = rhs.m_Y;
diff --git a/MantidQt/API/src/QwtWorkspaceSpectrumData.cpp b/MantidQt/API/src/QwtWorkspaceSpectrumData.cpp
index 1c1af2e4584898088869fbac26698cdae9117657..b0f92cfd1bd488120eabcd74f9b71a9d3a473179 100644
--- a/MantidQt/API/src/QwtWorkspaceSpectrumData.cpp
+++ b/MantidQt/API/src/QwtWorkspaceSpectrumData.cpp
@@ -53,6 +53,9 @@ QwtWorkspaceSpectrumData *QwtWorkspaceSpectrumData::copyWithNewSource(
 /** Size of the data set
  */
 size_t QwtWorkspaceSpectrumData::size() const {
+  if (!isPlottable()) {
+    return 0;
+  }
   if (m_binCentres || m_isHistogram) {
     return m_Y.size();
   }
@@ -94,7 +97,12 @@ double QwtWorkspaceSpectrumData::getE(size_t i) const {
   return ei;
 }
 
-size_t QwtWorkspaceSpectrumData::esize() const { return m_E.size(); }
+size_t QwtWorkspaceSpectrumData::esize() const {
+  if (!isPlottable()) {
+    return 0;
+  }
+  return m_E.size();
+}
 
 /**
  * @return A string containin the text to use as an X axis label
@@ -121,7 +129,6 @@ bool QwtWorkspaceSpectrumData::setAsDistribution(bool on) {
 QwtWorkspaceSpectrumData &QwtWorkspaceSpectrumData::
 operator=(const QwtWorkspaceSpectrumData &rhs) {
   if (this != &rhs) {
-    static_cast<MantidQwtMatrixWorkspaceData &>(*this) = rhs;
     m_wsIndex = rhs.m_wsIndex;
     m_X = rhs.m_X;
     m_Y = rhs.m_Y;
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.h
index 04ffa09f590186cb84dc64a5d243f5b90d80288f..98395d7c36a65aeeb3d99d8d8af2438068cdc78e 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.h
@@ -55,9 +55,9 @@ private:
   double m_backScaling = 1.;
   double m_peakRange[2];
   int m_pixelRange[2];
+  std::string m_suffix;
   void save();
   void plot();
-  void convertTo2Theta();
 };
 } // namespace CustomInterfaces
 } // namespace Mantid
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.ui b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.ui
index 5a6f59d938ebe022efe069ad21db8305209cfe9b..1d349951b7f0267c05b39aeac05060c268230ae7 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.ui
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Indirect/ILLEnergyTransfer.ui
@@ -53,7 +53,7 @@
          <string>Input files</string>
         </property>
         <property name="label" stdset="0">
-         <string>Run File</string>
+         <string>Sample Run</string>
         </property>
         <property name="multipleFiles" stdset="0">
          <bool>true</bool>
@@ -467,7 +467,7 @@
          <string>IndirectILLReductionQENS|BackgroundRun</string>
         </property>
         <property name="label" stdset="0">
-         <string>Background File</string>
+         <string>Empty Container Run</string>
         </property>
         <property name="multipleFiles" stdset="0">
          <bool>true</bool>
@@ -556,7 +556,7 @@
          <string>Calibration files (vanadium)</string>
         </property>
         <property name="label" stdset="0">
-         <string>Calibration File </string>
+         <string>Vanadium Run</string>
         </property>
         <property name="algorithmAndProperty" stdset="0">
          <string>IndirectILLReductionQENS|CalibrationRun</string>
@@ -696,7 +696,7 @@
          <string>Plot the reduced workspace</string>
         </property>
         <property name="text">
-         <string>Plot Result</string>
+         <string>Plot</string>
         </property>
        </widget>
       </item>
@@ -746,15 +746,39 @@
        </widget>
       </item>
       <item>
-       <widget class="QCheckBox" name="ck2Theta">
-        <property name="toolTip">
-         <string>Convert the y-axis of the output also to 2theta.</string>
-        </property>
+       <widget class="QLabel" name="lbSpectrumAxis">
         <property name="text">
-         <string>Convert to 2Theta</string>
+         <string>Spectrum Axis:</string>
         </property>
        </widget>
       </item>
+      <item>
+       <widget class="QComboBox" name="cbSpectrumTarget">
+        <property name="toolTip">
+         <string>Choose the target for spectrum axis.</string>
+        </property>
+        <item>
+         <property name="text">
+          <string>SpectrumNumber</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>2Theta</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>Q</string>
+         </property>
+        </item>
+        <item>
+         <property name="text">
+          <string>Q2</string>
+         </property>
+        </item>
+       </widget>
+      </item>
       <item>
        <widget class="Line" name="line_2">
         <property name="orientation">
@@ -774,7 +798,7 @@
          <string>Save the reduced workspace</string>
         </property>
         <property name="text">
-         <string>Save Result</string>
+         <string>Save</string>
         </property>
        </widget>
       </item>
@@ -1288,7 +1312,7 @@
   </connection>
  </connections>
  <buttongroups>
-  <buttongroup name="buttonGroup_2"/>
   <buttongroup name="buttonGroup"/>
+  <buttongroup name="buttonGroup_2"/>
  </buttongroups>
 </ui>
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysis.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysis.h
index cba60e4209dae75e4f7f4d68698c1007c0b17e6b..cd1a37e8bebf3ce4b291169a6c5cb8c480421c70 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysis.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysis.h
@@ -101,7 +101,10 @@ signals:
 private slots:
   /// Guess Alpha clicked
   void guessAlphaClicked();
-
+  void handleGroupBox();
+  void handlePeriodBox();
+  void setChosenGroupSlot(QString &group);
+  void setChosenPeriodSlot(QString &period);
   /// Checks whether two specified periods are equal and, if they are, sets
   /// second one to None
   void checkForEqualPeriods();
@@ -334,6 +337,9 @@ private:
   /// Plots specific WS spectrum (used by plotPair and plotGroup)
   void plotSpectrum(const QString &wsName, bool logScale = false);
 
+  /// set labels for a single data set
+  void updateLabels(std::string &name);
+
   /// Get current plot style parameters. wsName and wsIndex are used to get
   /// default values if
   /// something is not specified
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysisFitDataPresenter.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysisFitDataPresenter.h
index 15fc9737389bf3d2985d78bba7f765a8a7cc8fc7..5dc36a42339d35148192c74a3947e3600670c763 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysisFitDataPresenter.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Muon/MuonAnalysisFitDataPresenter.h
@@ -120,7 +120,11 @@ public:
   void setOverwrite(bool enabled) { m_overwrite = enabled; }
   /// Updates label to avoid overwriting existing results
   void checkAndUpdateFitLabel(bool sequentialFit);
-
+  /// Generate names of workspaces to be created
+  std::vector<std::string> generateWorkspaceNames(bool overwrite) const;
+signals:
+  void setChosenGroupSignal(const QString &group);
+  void setChosenPeriodSignal(const QString &period);
 public slots:
   /// Transforms fit results when a simultaneous fit finishes
   void handleFitFinished(const QString &status = QString("success")) const;
@@ -140,8 +144,6 @@ public slots:
   void doPreFitChecks(bool sequentialFit);
 
 private:
-  /// Generate names of workspaces to be created
-  std::vector<std::string> generateWorkspaceNames(bool overwrite) const;
   /// Create analysis workspace
   Mantid::API::Workspace_sptr createWorkspace(const std::string &name,
                                               std::string &groupLabel) const;
@@ -199,4 +201,4 @@ private:
 } // namespace CustomInterfaces
 } // namespace Mantid
 
-#endif /* MANTID_CUSTOMINTERFACES_MUONANALYSISFITDATAPRESENTER_H_ */
\ No newline at end of file
+#endif /* MANTID_CUSTOMINTERFACES_MUONANALYSISFITDATAPRESENTER_H_ */
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/IReflEventView.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/IReflEventView.h
index f04ffc9b5a281d107f50b08d2a376d72e8a31193..f4b9b584e96552ad4bc9384e29182299c05645ef 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/IReflEventView.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/IReflEventView.h
@@ -46,6 +46,9 @@ public:
   /// Returns the presenter managing this view
   virtual IReflEventPresenter *getPresenter() const = 0;
 
+  /// Slice type enums
+  enum class SliceType { UniformEven, Uniform, Custom, LogValue };
+
   virtual std::string getTimeSlicingValues() const = 0;
   virtual std::string getTimeSlicingType() const = 0;
 };
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/QtReflEventView.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/QtReflEventView.h
index 6b4e98a8594238cfb07df3d1d0c36e2e02061b1c..a69f90912cfccc072fb29e01ec5263cf1acff5fe 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/QtReflEventView.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/QtReflEventView.h
@@ -64,7 +64,9 @@ private:
   std::unique_ptr<IReflEventPresenter> m_presenter;
 
   /// Current slice type
-  mutable std::string m_sliceType;
+  mutable SliceType m_sliceType;
+  /// Slice type to string conversion map
+  std::map<SliceType, std::string> m_sliceTypeMap;
 
   /// List of radio buttons
   std::vector<QRadioButton *> m_buttonList;
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflDataProcessorPresenter.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflDataProcessorPresenter.h
index fef415c81f56581339e5ab0e5a98c293fbbe9219..f8517f21576737a415ccb0757839bf7527c2622b 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflDataProcessorPresenter.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflDataProcessorPresenter.h
@@ -91,14 +91,21 @@ private:
   void parseCustom(const std::string &timeSlicing,
                    std::vector<double> &startTimes,
                    std::vector<double> &stopTimes);
+  // Parse log value slicing and filter from input string
+  void parseLogValue(const std::string &inputStr, std::string &logFilter,
+                     std::vector<double> &minValues,
+                     std::vector<double> &maxValues);
 
   // Load a run as event workspace
   bool loadEventRun(const std::string &runNo);
   // Load a run (non-event workspace)
   void loadNonEventRun(const std::string &runNo);
+
   // Take a slice from event workspace
   std::string takeSlice(const std::string &runNo, size_t sliceIndex,
-                        double startTime, double stopTime);
+                        double startTime, double stopTime,
+                        const std::string &logFilter = "");
+
   // Asks user if they wish to proceed if a type of workspace exists in the ADS
   bool proceedIfWSTypeInADS(const MantidQt::MantidWidgets::TreeData &data,
                             const bool findEventWS);
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflEventWidget.ui b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflEventWidget.ui
index ec8c55a209f457db248a6c5f91fabb6752abdee7..3f540e2646ba770c64d12ce6564f1dad276c7aad 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflEventWidget.ui
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/Reflectometry/ReflEventWidget.ui
@@ -129,7 +129,7 @@
             <item>
               <widget class="QLabel" name="customLabel">
                 <property name="text">
-                  <string> Python list (sec)</string>
+                  <string>Python list (sec)</string>
                 </property>
               </widget>
             </item>
@@ -159,6 +159,69 @@
           </layout>
         </widget>
       </item>
+      <item>
+        <widget class="QGroupBox" name="logValueGroup">
+          <property name="title">
+            <string>Slicing by log value</string>
+          </property>
+          <layout class="QHBoxLayout" name="logValueLayout">
+            <item>
+              <widget class="QRadioButton" name="logValueButton">
+                <attribute name="buttonGroup">
+                  <string notr="true">slicingOptionsButtonGroup</string>
+                </attribute>
+              </widget>
+            </item>
+            <item>
+              <widget class="QLabel" name="logValueLabel">
+                <property name="text">
+                  <string>Python list (sec)</string>
+                </property>
+              </widget>
+            </item>
+            <item>
+              <widget class="QLineEdit" name="logValueEdit">
+                <property name="sizeHint" stdset="0">
+                  <size>
+                    <width>10</width>
+                    <height>10</height>
+                  </size>
+                </property>
+              </widget>
+            </item>
+            <item>
+              <widget class="QLabel" name="logValueTypeLabel">
+                <property name="text">
+                  <string>Log name</string>
+                </property>
+              </widget>
+            </item>
+            <item>
+              <widget class="QLineEdit" name="logValueTypeEdit">
+                <property name="sizeHint" stdset="0">
+                  <size>
+                    <width>10</width>
+                    <height>10</height>
+                  </size>
+                </property>
+              </widget>
+            </item>
+            <item>
+              <spacer name="logValueSpacer1">
+                <property name="orientation">
+                  <enum>Qt::Horizontal</enum>
+                </property>
+                <property name="sizeHint" stdset="0">
+                  <size>
+                    <width>20</width>
+                    <height>20</height>
+                  </size>
+                </property>
+              </spacer>
+            </item>
+          </layout>
+        </widget>
+      </item>
       <item>
         <spacer name="verticalSpacer">
           <property name="orientation">
diff --git a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/SANSRunWindow.h b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/SANSRunWindow.h
index 2d6fa9ab13fde2862a0e6bf2dfeb08f9ab2669bb..7c1bd882acd50dc2c1b14150a33c187e745779cb 100644
--- a/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/SANSRunWindow.h
+++ b/MantidQt/CustomInterfaces/inc/MantidQtCustomInterfaces/SANSRunWindow.h
@@ -514,6 +514,10 @@ private:
   bool isValidUserFile();
   /// Update IDF file path
   void updateIDFFilePath();
+  /// Update IDF file path when running in Batch mode
+  void updateIDFFilePathForBatch();
+  //// Update IDF information
+  void updateIDFInfo(const QString &command);
 
   UserSubWindow *slicingWindow;
 };
diff --git a/MantidQt/CustomInterfaces/src/Indirect/ILLEnergyTransfer.cpp b/MantidQt/CustomInterfaces/src/Indirect/ILLEnergyTransfer.cpp
index 821b09c0e9bd40e643f8776b79870999ed2b5b86..93ccd6d290390efc46986dc6e1842d1c4182c024 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/ILLEnergyTransfer.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/ILLEnergyTransfer.cpp
@@ -232,6 +232,20 @@ void ILLEnergyTransfer::run() {
   reductionAlg->setProperty("Reflection",
                             instDetails["reflection"].toStdString());
 
+  std::string target = m_uiForm.cbSpectrumTarget->currentText().toStdString();
+  reductionAlg->setProperty("SpectrumAxis", target);
+
+  // Keep track of the suffix
+  if (target == "SpectrumNumber") {
+    m_suffix = "_red";
+  } else if (target == "2Theta") {
+    m_suffix = "_2theta";
+  } else if (target == "Q") {
+    m_suffix = "_q";
+  } else if (target == "Q2") {
+    m_suffix = "_q2";
+  }
+
   // Handle mapping file
   bool useMapFile = m_uiForm.rdGroupChoose->isChecked();
   if (useMapFile) {
@@ -269,9 +283,6 @@ void ILLEnergyTransfer::algorithmComplete(bool error) {
     if (m_uiForm.ckPlot->isChecked()) {
       plot();
     }
-    if (m_uiForm.ck2Theta->isChecked()) {
-      convertTo2Theta();
-    }
   }
 
   // Nothing to do here
@@ -285,7 +296,8 @@ void ILLEnergyTransfer::plot() {
                     "from IndirectReductionCommon import plot_reduction\n";
   pyInput += "plot_reduction(mtd[\"";
   pyInput += m_uiForm.leOutWS->text();
-  pyInput += "_red\"].getItem(0).getName(),\"Contour\")\n";
+  pyInput += QString::fromStdString(m_suffix);
+  pyInput += "\"].getItem(0).getName(),\"Contour\")\n";
   m_pythonRunner.runPythonCode(pyInput);
 }
 
@@ -296,23 +308,11 @@ void ILLEnergyTransfer::save() {
   QString pyInput;
   pyInput += "SaveNexusProcessed(\"";
   pyInput += m_uiForm.leOutWS->text();
-  pyInput += "_red\",\"";
+  pyInput += QString::fromStdString(m_suffix);
+  pyInput += "\",\"";
   pyInput += m_uiForm.leOutWS->text();
-  pyInput += "_red.nxs\")\n";
-  m_pythonRunner.runPythonCode(pyInput);
-}
-
-/**
- * Handles the conversion of y-axis to 2theta
- */
-void ILLEnergyTransfer::convertTo2Theta() {
-  QString pyInput;
-  QString inputWS = m_uiForm.leOutWS->text();
-  pyInput += "ConvertSpectrumAxis(InputWorkspace=\"";
-  pyInput += inputWS;
-  pyInput += "_red\",EMode=\"Indirect\",Target=\"Theta\",OutputWorkspace=\"";
-  pyInput += inputWS;
-  pyInput += "_2theta\")\n";
+  pyInput += QString::fromStdString(m_suffix);
+  pyInput += ".nxs\")\n";
   m_pythonRunner.runPythonCode(pyInput);
 }
 
diff --git a/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp b/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp
index 08dc707c1441ec441e735c6b4180fe5396e7851d..07c65385b2aea9e7a8a8283d555b97eb132a730f 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/IndirectDiffractionReduction.cpp
@@ -191,8 +191,7 @@ void IndirectDiffractionReduction::plotResults() {
       const auto workspaceExists =
           AnalysisDataService::Instance().doesExist(it);
       if (workspaceExists)
-        pyInput += "plotSpectrum('" + QString::fromStdString(it) +
-                   "', 0, error_bars = True)\n";
+        pyInput += "plotSpectrum('" + QString::fromStdString(it) + "', 0)\n";
       else
         showInformationBox(QString::fromStdString(
             "Workspace '" + it + "' not found\nUnable to plot workspace"));
diff --git a/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp b/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp
index 6137629fa50bfd24b3072c238fbf5493789322e0..011e992ca85117dca7e1a6f447fdcccaa065b9eb 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/IndirectTab.cpp
@@ -259,7 +259,7 @@ void IndirectTab::plotSpectrum(const QStringList &workspaceNames, int wsIndex) {
   pyInput += workspaceNames.join("','");
   pyInput += "'], ";
   pyInput += QString::number(wsIndex);
-  pyInput += ", error_bars = True)\n";
+  pyInput += ")\n";
 
   m_pythonRunner.runPythonCode(pyInput);
 }
@@ -303,7 +303,7 @@ void IndirectTab::plotSpectrum(const QStringList &workspaceNames, int specStart,
   pyInput += QString::number(specStart);
   pyInput += ",";
   pyInput += QString::number(specEnd + 1);
-  pyInput += ")), error_bars = True)\n";
+  pyInput += ")))\n";
 
   m_pythonRunner.runPythonCode(pyInput);
 }
@@ -355,7 +355,7 @@ void IndirectTab::plotSpectra(const QStringList &workspaceNames,
     pyInput += " ,";
     pyInput += QString::number(wsIndices[i]);
   }
-  pyInput += "], error_bars = True)\n";
+  pyInput += "])\n";
   m_pythonRunner.runPythonCode(pyInput);
 }
 
@@ -418,7 +418,7 @@ void IndirectTab::plotTimeBin(const QStringList &workspaceNames, int binIndex) {
   pyInput += workspaceNames.join("','");
   pyInput += "'], ";
   pyInput += QString::number(binIndex);
-  pyInput += ", error_bars=True)\n";
+  pyInput += ")\n";
 
   m_pythonRunner.runPythonCode(pyInput);
 }
diff --git a/MantidQt/CustomInterfaces/src/Indirect/IqtFit.cpp b/MantidQt/CustomInterfaces/src/Indirect/IqtFit.cpp
index ebb15944ddbfeb597eb52297081d508b2c8efb05..c51cea3e7cdea0eabf77d15a7a129d5ac8f62e6b 100644
--- a/MantidQt/CustomInterfaces/src/Indirect/IqtFit.cpp
+++ b/MantidQt/CustomInterfaces/src/Indirect/IqtFit.cpp
@@ -730,7 +730,7 @@ void IqtFit::constrainIntensities(CompositeFunction_sptr func) {
   switch (m_uiForm.cbFitType->currentIndex()) {
   case 0: // 1 Exp
   case 2: // 1 Str
-    if (!func->isFixed(index)) {
+    if (func->isActive(index)) {
       func->tie(paramName, "1-f0.A0");
     } else {
       std::string paramValue =
@@ -741,7 +741,7 @@ void IqtFit::constrainIntensities(CompositeFunction_sptr func) {
     break;
   case 1: // 2 Exp
   case 3: // 1 Exp & 1 Str
-    if (!func->isFixed(index)) {
+    if (func->isActive(index)) {
       func->tie(paramName, "1-f2.Height-f0.A0");
     } else {
       std::string paramValue =
diff --git a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp
index ec02f8c16bb24ae20e1dc556c80f964fbc80c75b..a3a8758ae40a38e78356e45513e40d79ccf597e8 100644
--- a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp
+++ b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysis.cpp
@@ -318,6 +318,17 @@ void MuonAnalysis::initLayout() {
   // Manage User Directories
   connect(m_uiForm.manageDirectoriesBtn, SIGNAL(clicked()), this,
           SLOT(openDirectoryDialog()));
+  connect(this, SIGNAL(setChosenGroupSignal(QString &)), this,
+          SLOT(setChosenGroupSlot(QString &)));
+  connect(this, SIGNAL(setChosenPeriodSignal(QString &)), this,
+          SLOT(setChosenPeriodSlot(QString &)));
+}
+
+void MuonAnalysis::setChosenGroupSlot(QString &group) {
+  m_uiForm.fitBrowser->setChosenGroup(group);
+}
+void MuonAnalysis::setChosenPeriodSlot(QString &period) {
+  m_uiForm.fitBrowser->setChosenPeriods(period);
 }
 
 /**
@@ -398,7 +409,9 @@ void MuonAnalysis::plotSelectedItem() {
 void MuonAnalysis::plotItem(ItemType itemType, int tableRow,
                             PlotType plotType) {
   m_updating = true;
-  m_dataSelector->clearChosenGroups();
+  m_uiForm.fitBrowser->clearChosenGroups();
+  m_uiForm.fitBrowser->clearChosenPeriods();
+
   AnalysisDataServiceImpl &ads = AnalysisDataService::Instance();
 
   try {
@@ -1844,8 +1857,8 @@ void MuonAnalysis::selectMultiPeak(const QString &wsName,
                    std::back_inserter(groupsAndPairs), &QString::fromStdString);
     std::transform(groups.pairNames.begin(), groups.pairNames.end(),
                    std::back_inserter(groupsAndPairs), &QString::fromStdString);
-    m_dataSelector->setAvailableGroups(groupsAndPairs);
-    m_dataSelector->setNumPeriods(m_numPeriods);
+    m_uiForm.fitBrowser->setAvailableGroups(groupsAndPairs);
+    m_uiForm.fitBrowser->setNumPeriods(m_numPeriods);
 
     // Set the selected run, group/pair and period
     m_fitDataPresenter->setAssignedFirstRun(wsName, filePath);
@@ -2126,6 +2139,11 @@ void MuonAnalysis::loadFittings() {
           SLOT(dataToFitChanged()));
   connect(m_uiForm.plotCreation, SIGNAL(currentIndexChanged(int)), this,
           SLOT(updateDataPresenterOverwrite(int)));
+  connect(m_uiForm.fitBrowser, SIGNAL(groupBoxClicked()), this,
+          SLOT(handleGroupBox()));
+  connect(m_uiForm.fitBrowser, SIGNAL(periodBoxClicked()), this,
+          SLOT(handlePeriodBox()));
+
   m_fitDataPresenter->setOverwrite(isOverwriteEnabled());
   // Set multi fit mode on/off as appropriate
   const auto &multiFitState = m_optionTab->getMultiFitState();
@@ -2134,7 +2152,43 @@ void MuonAnalysis::loadFittings() {
   const auto &TFAsymmState = m_optionTab->getTFAsymmState();
   setTFAsymm(TFAsymmState);
 }
-
+/**
+* Handle "groups" selected/deselected
+* Update stored value
+*/
+void MuonAnalysis::handleGroupBox() {
+  // send the group to dataselector
+  m_dataSelector->setGroupsSelected(m_uiForm.fitBrowser->getChosenGroups());
+  // update labels for single fit
+  auto names = m_fitDataPresenter->generateWorkspaceNames(true);
+  if (names.size() == 1) {
+    updateLabels(names[0]);
+  }
+  m_fitDataPresenter->handleSelectedDataChanged(true);
+}
+/**
+* Handle"periods" selected/deselected
+* Update stored value
+*/
+void MuonAnalysis::handlePeriodBox() {
+  // send the group to dataselector
+  m_dataSelector->setPeriodsSelected(m_uiForm.fitBrowser->getChosenPeriods());
+  // update labels for single fit
+  auto names = m_fitDataPresenter->generateWorkspaceNames(true);
+  if (names.size() == 1) {
+    updateLabels(names[0]);
+  }
+  m_fitDataPresenter->handleSelectedDataChanged(true);
+}
+/**
+* Updates the labels (legend and ws) for
+* a single fit when within the mulit-
+* fit GUI.
+* @param name :: the name for the label.
+*/
+void MuonAnalysis::updateLabels(std::string &name) {
+  m_uiForm.fitBrowser->setOutputName(name);
+}
 /**
  * Allow/disallow loading.
  */
@@ -2424,8 +2478,11 @@ void MuonAnalysis::changeTab(int newTabIndex) {
   if (newTab == m_uiForm.DataAnalysis) // Entering DA tab
   {
     // Save last fitting range
-    auto xmin = m_dataSelector->getStartTime();
-    auto xmax = m_dataSelector->getEndTime();
+    auto xmin = m_uiForm.fitBrowser->startX();
+    auto xmax = m_uiForm.fitBrowser->endX();
+    // make sure data selector has same values
+    m_dataSelector->setStartTime(xmin);
+    m_dataSelector->setEndTime(xmax);
 
     // Say MantidPlot to use Muon Analysis fit prop. browser
     emit setFitPropertyBrowser(m_uiForm.fitBrowser);
@@ -2456,6 +2513,13 @@ void MuonAnalysis::changeTab(int newTabIndex) {
     // repeat setting the fitting ranges as the above code can set them to an
     // unwanted default value
     setFittingRanges(xmin, xmax);
+    // make sure groups are not on if single fit
+    if (m_optionTab->getMultiFitState() == Muon::MultiFitState::Disabled) {
+      m_uiForm.fitBrowser->setSingleFitLabel(m_currentDataName.toStdString());
+    } else {
+      m_uiForm.fitBrowser->setAllGroups();
+      m_uiForm.fitBrowser->setChosenPeriods("1");
+    }
   } else if (newTab == m_uiForm.ResultsTable) {
     m_resultTableTab->refresh();
   }
@@ -3064,9 +3128,6 @@ void MuonAnalysis::multiFitCheckboxChanged(int state) {
     setTFAsymm(Muon::TFAsymmState::Disabled);
   }
   m_fitFunctionPresenter->setMultiFitState(multiFitState);
-  if (multiFitState == Muon::MultiFitState::Disabled) {
-    m_dataSelector->clearChosenGroups();
-  }
 }
 /**
 * Called when the "TF Asymmetry" checkbox is changed (settings tab.)
diff --git a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysisFitDataPresenter.cpp b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysisFitDataPresenter.cpp
index 13f3ee185166939d094a36ea6284ebf7cd7e0486..69f5de4123504dcdc93b449cde2106f5b2649d14 100644
--- a/MantidQt/CustomInterfaces/src/Muon/MuonAnalysisFitDataPresenter.cpp
+++ b/MantidQt/CustomInterfaces/src/Muon/MuonAnalysisFitDataPresenter.cpp
@@ -401,7 +401,8 @@ MuonAnalysisFitDataPresenter::createWorkspace(const std::string &name,
     if (params.periods.empty()) {
       analysisOptions.summedPeriods = "1";
     } else {
-      std::replace(params.periods.begin(), params.periods.end(), ',', '+');
+      // need a comma seperated list
+      std::replace(params.periods.begin(), params.periods.end(), '+', ',');
       const size_t minus = params.periods.find('-');
       analysisOptions.summedPeriods = params.periods.substr(0, minus);
       if (minus != std::string::npos && minus != params.periods.size()) {
@@ -794,10 +795,10 @@ void MuonAnalysisFitDataPresenter::setUpDataSelector(
   const auto &groups = m_dataSelector->getChosenGroups();
   const auto &periods = m_dataSelector->getPeriodSelections();
   if (!groups.contains(groupToSet)) {
-    m_dataSelector->setChosenGroup(groupToSet);
+    emit setChosenGroupSignal(groupToSet);
   }
   if (!periodToSet.isEmpty() && !periods.contains(periodToSet)) {
-    m_dataSelector->setChosenPeriod(periodToSet);
+    emit setChosenPeriodSignal(periodToSet);
   }
 
   // If given an optional file path to "current run", cache it for later use
diff --git a/MantidQt/CustomInterfaces/src/Reflectometry/QtReflEventView.cpp b/MantidQt/CustomInterfaces/src/Reflectometry/QtReflEventView.cpp
index 00eb1af537f0195f866b83881bb74602fc737bfe..35b6542845f1bc4cace413c79af6b1d375a3d368 100644
--- a/MantidQt/CustomInterfaces/src/Reflectometry/QtReflEventView.cpp
+++ b/MantidQt/CustomInterfaces/src/Reflectometry/QtReflEventView.cpp
@@ -13,10 +13,17 @@ QtReflEventView::QtReflEventView(QWidget *parent) {
   UNUSED_ARG(parent);
   initLayout();
 
+  // Insert slice-type to string pairs
+  m_sliceTypeMap[SliceType::UniformEven] = "UniformEven";
+  m_sliceTypeMap[SliceType::Uniform] = "Uniform";
+  m_sliceTypeMap[SliceType::Custom] = "Custom";
+  m_sliceTypeMap[SliceType::LogValue] = "LogValue";
+
   // Add slicing option buttons to list
   m_buttonList.push_back(m_ui.uniformEvenButton);
   m_buttonList.push_back(m_ui.uniformButton);
   m_buttonList.push_back(m_ui.customButton);
+  m_buttonList.push_back(m_ui.logValueButton);
 
   // Whenever one of the slicing option buttons is selected, their corresponding
   // entry is enabled, otherwise they remain disabled.
@@ -54,20 +61,34 @@ std::string QtReflEventView::getTimeSlicingValues() const {
 
   std::string values;
 
-  if (m_sliceType == "UniformEven")
+  switch (m_sliceType) {
+  case SliceType::UniformEven:
     values = m_ui.uniformEvenEdit->text().toStdString();
-  else if (m_sliceType == "Uniform")
+    break;
+  case SliceType::Uniform:
     values = m_ui.uniformEdit->text().toStdString();
-  else if (m_sliceType == "Custom")
+    break;
+  case SliceType::Custom:
     values = m_ui.customEdit->text().toStdString();
+    break;
+  case SliceType::LogValue:
+    std::string slicingValues = m_ui.logValueEdit->text().toStdString();
+    std::string logFilter = m_ui.logValueTypeEdit->text().toStdString();
+    if (!slicingValues.empty() && !logFilter.empty())
+      values = "Slicing=\"" + slicingValues + "\",LogFilter=" + logFilter;
+    break;
+  }
 
   return values;
 }
 
-/** Returns the type of time slicing that was selected
+/** Returns the type of time slicing that was selected as string
 * @return :: Time slicing type
 */
-std::string QtReflEventView::getTimeSlicingType() const { return m_sliceType; }
+std::string QtReflEventView::getTimeSlicingType() const {
+
+  return m_sliceTypeMap.at(m_sliceType);
+}
 
 /** Enable slicing option entries for checked button and disable all others.
 */
@@ -75,8 +96,8 @@ void QtReflEventView::toggleSlicingOptions() const {
 
   const auto checkedButton = m_ui.slicingOptionsButtonGroup->checkedButton();
 
-  const std::vector<std::string> slicingTypes = {"UniformEven", "Uniform",
-                                                 "Custom"};
+  SliceType slicingTypes[4] = {SliceType::UniformEven, SliceType::Uniform,
+                               SliceType::Custom, SliceType::LogValue};
 
   std::vector<bool> entriesEnabled(m_buttonList.size(), false);
   for (size_t i = 0; i < m_buttonList.size(); i++) {
@@ -87,9 +108,20 @@ void QtReflEventView::toggleSlicingOptions() const {
     }
   }
 
+  // UniformEven
   m_ui.uniformEvenEdit->setEnabled(entriesEnabled[0]);
+  m_ui.uniformEvenLabel->setEnabled(entriesEnabled[0]);
+  // Uniform
   m_ui.uniformEdit->setEnabled(entriesEnabled[1]);
+  m_ui.uniformLabel->setEnabled(entriesEnabled[1]);
+  // Custom
   m_ui.customEdit->setEnabled(entriesEnabled[2]);
+  m_ui.customLabel->setEnabled(entriesEnabled[2]);
+  // LogValue
+  m_ui.logValueEdit->setEnabled(entriesEnabled[3]);
+  m_ui.logValueLabel->setEnabled(entriesEnabled[3]);
+  m_ui.logValueTypeEdit->setEnabled(entriesEnabled[3]);
+  m_ui.logValueTypeLabel->setEnabled(entriesEnabled[3]);
 }
 
 } // namespace CustomInterfaces
diff --git a/MantidQt/CustomInterfaces/src/Reflectometry/ReflDataProcessorPresenter.cpp b/MantidQt/CustomInterfaces/src/Reflectometry/ReflDataProcessorPresenter.cpp
index 93f15734fcd07c050ad853576e12a3da00a96df0..ca6b309327e89e630eaaaf66ffc152750fb69a77 100644
--- a/MantidQt/CustomInterfaces/src/Reflectometry/ReflDataProcessorPresenter.cpp
+++ b/MantidQt/CustomInterfaces/src/Reflectometry/ReflDataProcessorPresenter.cpp
@@ -5,6 +5,7 @@
 #include "MantidAPI/Run.h"
 #include "MantidQtMantidWidgets/DataProcessorUI/DataProcessorTreeManager.h"
 #include "MantidQtMantidWidgets/DataProcessorUI/DataProcessorView.h"
+#include "MantidQtMantidWidgets/DataProcessorUI/ParseKeyValueString.h"
 #include "MantidQtMantidWidgets/ProgressPresenter.h"
 
 using namespace MantidQt::MantidWidgets;
@@ -183,10 +184,13 @@ bool ReflDataProcessorPresenter::processGroupAsEventWS(
   size_t numGroupSlices = INT_MAX;
 
   std::vector<double> startTimes, stopTimes;
+  std::string logFilter; // Set if we are slicing by log value
 
-  // For custom slicing, the start/stop times are the same for all rows
+  // For custom/log value slicing the start/stop times are the same for all rows
   if (timeSlicingType == "Custom")
     parseCustom(timeSlicingValues, startTimes, stopTimes);
+  if (timeSlicingType == "LogValue")
+    parseLogValue(timeSlicingValues, logFilter, startTimes, stopTimes);
 
   for (const auto &row : group) {
 
@@ -194,7 +198,7 @@ bool ReflDataProcessorPresenter::processGroupAsEventWS(
     const auto data = row.second;         // Vector containing data for this row
     std::string runNo = row.second.at(0); // The run number
 
-    if (timeSlicingType != "Custom") {
+    if (timeSlicingType == "UniformEven" || timeSlicingType == "Uniform") {
       const std::string runName = "TOF_" + runNo;
       parseUniform(timeSlicingValues, timeSlicingType, runName, startTimes,
                    stopTimes);
@@ -205,8 +209,9 @@ bool ReflDataProcessorPresenter::processGroupAsEventWS(
 
     for (size_t i = 0; i < numSlices; i++) {
       try {
-        auto wsName = takeSlice(runNo, i, startTimes[i], stopTimes[i]);
         std::vector<std::string> slice(data);
+        std::string wsName =
+            takeSlice(runNo, i, startTimes[i], stopTimes[i], logFilter);
         slice[0] = wsName;
         auto newData = reduceRow(slice);
         newData[0] = data[0];
@@ -225,8 +230,8 @@ bool ReflDataProcessorPresenter::processGroupAsEventWS(
   // Post-process (if needed)
   if (multiRow) {
 
-    // All slices are common for uniform even or custom slicing
-    if (timeSlicingType == "UniformEven" || timeSlicingType == "Custom")
+    // All slices are common for uniform even, custom and log value slicing
+    if (timeSlicingType != "Uniform")
       numGroupSlices = startTimes.size();
 
     addNumGroupSlicesEntry(groupID, numGroupSlices);
@@ -351,28 +356,47 @@ void ReflDataProcessorPresenter::parseCustom(const std::string &timeSlicing,
   std::transform(timesStr.begin(), timesStr.end(), std::back_inserter(times),
                  [](const std::string &astr) { return std::stod(astr); });
 
-  size_t numTimes = times.size();
+  size_t numSlices = times.size() > 1 ? times.size() - 1 : 1;
 
   // Add the start/stop times
-  startTimes = std::vector<double>(numTimes - 1);
-  stopTimes = std::vector<double>(numTimes - 1);
+  startTimes = std::vector<double>(numSlices);
+  stopTimes = std::vector<double>(numSlices);
 
-  if (numTimes == 1) {
+  if (times.size() == 1) {
     startTimes[0] = 0;
     stopTimes[0] = times[0];
   } else {
-    for (size_t i = 0; i < numTimes - 1; i++) {
+    for (size_t i = 0; i < numSlices; i++) {
       startTimes[i] = times[i];
       stopTimes[i] = times[i + 1];
     }
   }
 }
 
-/** Loads an event workspace and puts it into the ADS
+/** Parses a string to extract log value filter and time slicing
  *
- * @param runNo :: the run number as a string
- * @return :: True if algorithm was executed. False otherwise
+ * @param inputStr :: The string to parse
+ * @param logFilter :: The log filter to use
+ * @param startTimes :: Start times for the set of slices
+ * @param stopTimes :: Stop times for the set of slices
  */
+void ReflDataProcessorPresenter::parseLogValue(const std::string &inputStr,
+                                               std::string &logFilter,
+                                               std::vector<double> &startTimes,
+                                               std::vector<double> &stopTimes) {
+
+  auto strMap = parseKeyValueString(inputStr);
+  std::string timeSlicing = strMap.at("Slicing");
+  logFilter = strMap.at("LogFilter");
+
+  parseCustom(timeSlicing, startTimes, stopTimes);
+}
+
+/** Loads an event workspace and puts it into the ADS
+*
+* @param runNo :: The run number as a string
+* @return :: True if algorithm was executed. False otherwise
+*/
 bool ReflDataProcessorPresenter::loadEventRun(const std::string &runNo) {
 
   bool runFound;
@@ -394,9 +418,9 @@ bool ReflDataProcessorPresenter::loadEventRun(const std::string &runNo) {
 }
 
 /** Loads a non-event workspace and puts it into the ADS
- *
- * @param runNo :: the run number as a string
- */
+*
+* @param runNo :: The run number as a string
+*/
 void ReflDataProcessorPresenter::loadNonEventRun(const std::string &runNo) {
 
   bool runFound; // unused but required
@@ -445,42 +469,52 @@ std::string ReflDataProcessorPresenter::loadRun(const std::string &run,
 
 /** Takes a slice from a run and puts the 'sliced' workspace into the ADS
 *
-* @param runNo :: the run number as a string
-* @param sliceIndex :: the index of the slice being taken
-* @param startTime :: start time
-* @param stopTime :: stop time
+* @param runNo :: The run number as a string
+* @param sliceIndex :: The index of the slice being taken
+* @param startTime :: Start time
+* @param stopTime :: Stop time
+* @param logFilter :: The log filter to use if slicing by log value
 * @return :: the name of the sliced workspace (without prefix 'TOF_')
 */
-std::string ReflDataProcessorPresenter::takeSlice(const std::string &runNo,
-                                                  size_t sliceIndex,
-                                                  double startTime,
-                                                  double stopTime) {
+std::string ReflDataProcessorPresenter::takeSlice(
+    const std::string &runNo, size_t sliceIndex, double startTime,
+    double stopTime, const std::string &logFilter) {
 
   std::string runName = "TOF_" + runNo;
   std::string sliceName = runName + "_slice_" + std::to_string(sliceIndex);
   std::string monName = runName + "_monitors";
+  std::string filterAlg =
+      logFilter.empty() ? "FilterByTime" : "FilterByLogValue";
 
-  // Filter by time
-  IAlgorithm_sptr filter = AlgorithmManager::Instance().create("FilterByTime");
+  // Filter the run using the appropriate filter algorithm
+  IAlgorithm_sptr filter = AlgorithmManager::Instance().create(filterAlg);
   filter->initialize();
   filter->setProperty("InputWorkspace", runName);
   filter->setProperty("OutputWorkspace", sliceName);
-  filter->setProperty("StartTime", startTime);
-  filter->setProperty("StopTime", stopTime);
+  if (filterAlg == "FilterByTime") {
+    filter->setProperty("StartTime", startTime);
+    filter->setProperty("StopTime", stopTime);
+  } else { // FilterByLogValue
+    filter->setProperty("MinimumValue", startTime);
+    filter->setProperty("MaximumValue", stopTime);
+    filter->setProperty("TimeTolerance", 1.0);
+    filter->setProperty("LogName", logFilter);
+  }
+
   filter->execute();
 
-  // Get the normalization constant for this slice
-  MatrixWorkspace_sptr mws =
-      AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>(runName);
+  // Obtain the normalization constant for this slice
+  IEventWorkspace_sptr mws =
+      AnalysisDataService::Instance().retrieveWS<IEventWorkspace>(runName);
   double total = mws->run().getProtonCharge();
-  mws = AnalysisDataService::Instance().retrieveWS<MatrixWorkspace>(sliceName);
+  mws = AnalysisDataService::Instance().retrieveWS<IEventWorkspace>(sliceName);
   double slice = mws->run().getProtonCharge();
-  double fraction = slice / total;
+  double scaleFactor = slice / total;
 
   IAlgorithm_sptr scale = AlgorithmManager::Instance().create("Scale");
   scale->initialize();
   scale->setProperty("InputWorkspace", monName);
-  scale->setProperty("Factor", fraction);
+  scale->setProperty("Factor", scaleFactor);
   scale->setProperty("OutputWorkspace", "__" + monName + "_temp");
   scale->execute();
 
diff --git a/MantidQt/CustomInterfaces/src/SANSRunWindow.cpp b/MantidQt/CustomInterfaces/src/SANSRunWindow.cpp
index c304f7ca2fca20570033be5ecb1b15a578b5c1ce..e7935cf7cf3fc55326acde21cb0a5f0895b06fef 100644
--- a/MantidQt/CustomInterfaces/src/SANSRunWindow.cpp
+++ b/MantidQt/CustomInterfaces/src/SANSRunWindow.cpp
@@ -2483,6 +2483,9 @@ void SANSRunWindow::handleReduceButtonClick(const QString &typeStr) {
       return;
     }
 
+    // Update the IDF file path for batch reductions
+    updateIDFFilePathForBatch();
+
     // check for the detectors combination option
     // transform the SANS Diagnostic gui option in: 'rear', 'front' , 'both',
     // 'merged', None WavRangeReduction option
@@ -5109,19 +5112,32 @@ bool SANSRunWindow::isValidUserFile() {
   return true;
 }
 
-void SANSRunWindow::updateIDFFilePath() {
-  QString getIdf = "i.get_current_idf_path_in_reducer()\n";
-  QString resultIdf(runPythonCode(getIdf, false));
-  auto teset1 = resultIdf.toStdString();
+void SANSRunWindow::updateIDFInfo(const QString &command) {
+  QString resultIdf(runPythonCode(command, false));
   resultIdf = resultIdf.simplified();
-  auto test2 = resultIdf.toStdString();
   if (resultIdf != m_constants.getPythonEmptyKeyword() &&
       !resultIdf.isEmpty()) {
-    auto test = resultIdf.toStdString();
     m_uiForm.current_idf_path->setText(resultIdf);
   }
 }
 
+void SANSRunWindow::updateIDFFilePathForBatch() {
+
+  if (m_uiForm.batch_table->rowCount() == 0) {
+    return;
+  }
+  // We base the IDF entry on the sample scatter entry of the first row
+  auto *table_item = m_uiForm.batch_table->item(0, 0);
+  auto scatter_sample_run = table_item->text();
+  QString getIdf = "i.get_idf_path_for_run(\"" + scatter_sample_run + "\")\n";
+  updateIDFInfo(getIdf);
+}
+
+void SANSRunWindow::updateIDFFilePath() {
+  QString getIdf = "i.get_current_idf_path_in_reducer()\n";
+  updateIDFInfo(getIdf);
+}
+
 void SANSRunWindow::onUpdateGeometryRequest() {
   auto sampleWidth = m_uiForm.sample_width->text();
   auto sampleHeight = m_uiForm.sample_height->text();
diff --git a/MantidQt/CustomInterfaces/test/MuonAnalysisFitDataPresenterTest.h b/MantidQt/CustomInterfaces/test/MuonAnalysisFitDataPresenterTest.h
index 316d13bc97414722e1e90f453e58ffbb00da8035..589e1c45ffde3e114f02db75276a645af3e407af 100644
--- a/MantidQt/CustomInterfaces/test/MuonAnalysisFitDataPresenterTest.h
+++ b/MantidQt/CustomInterfaces/test/MuonAnalysisFitDataPresenterTest.h
@@ -48,22 +48,20 @@ operator<<(std::basic_ostream<CharType, CharTrait> &out,
   return out;
 }
 }
-
 /// Mock data selector widget
 class MockDataSelector : public IMuonFitDataSelector {
 public:
   GCC_DIAG_OFF_SUGGEST_OVERRIDE
+
   MOCK_CONST_METHOD0(getFilenames, QStringList());
   MOCK_CONST_METHOD0(getStartTime, double());
   MOCK_CONST_METHOD0(getEndTime, double());
-  MOCK_METHOD1(setNumPeriods, void(size_t));
-  MOCK_METHOD1(setChosenPeriod, void(const QString &));
+  MOCK_METHOD1(setPeriodsSelected, void(const QStringList &));
   MOCK_CONST_METHOD0(getPeriodSelections, QStringList());
   MOCK_METHOD3(setWorkspaceDetails, void(const QString &, const QString &,
                                          const boost::optional<QString> &));
-  MOCK_METHOD1(setAvailableGroups, void(const QStringList &));
   MOCK_CONST_METHOD0(getChosenGroups, QStringList());
-  MOCK_METHOD1(setChosenGroup, void(const QString &));
+  MOCK_METHOD1(setGroupsSelected, void(const QStringList &));
   MOCK_METHOD1(setStartTime, void(double));
   MOCK_METHOD1(setEndTime, void(double));
   MOCK_METHOD1(setStartTimeQuietly, void(double));
@@ -95,6 +93,9 @@ public:
   MOCK_METHOD1(userChangedDataset, void(int));
   MOCK_CONST_METHOD0(rawData, bool());
   MOCK_METHOD1(continueAfterChecks, void(bool));
+  MOCK_METHOD1(setNumPeriods, void(size_t));
+  MOCK_METHOD1(setAvailableGroups, void(const QStringList &));
+  MOCK_METHOD1(setChosenGroup, void(const QString &));
   void preFitChecksRequested(bool sequential) override {
     UNUSED_ARG(sequential);
   }
@@ -205,9 +206,11 @@ public:
     EXPECT_CALL(*m_dataSelector,
                 setWorkspaceDetails(QString("00015189-91"), QString("MUSR"),
                                     Eq(boost::optional<QString>{}))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("long"))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(1);
-    m_presenter->setAssignedFirstRun(wsName, boost::none);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"long"})))
+        .Times(1);
+    localSetAssignedFirstRun(wsName, boost::none);
   }
 
   void test_setAssignedFirstRun_nonContiguousRange() {
@@ -217,9 +220,12 @@ public:
                 setWorkspaceDetails(QString("00015189-91, 15193"),
                                     QString("MUSR"),
                                     Eq(boost::optional<QString>{}))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("long"))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(1);
-    m_presenter->setAssignedFirstRun(wsName, boost::none);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"long"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
+    // m_presenter->setAssignedFirstRun(wsName, boost::none);
+    localSetAssignedFirstRun(wsName, boost::none);
   }
 
   void test_setAssignedFirstRun_alreadySet() {
@@ -228,9 +234,12 @@ public:
     m_presenter->setAssignedFirstRun(wsName, boost::none);
     EXPECT_CALL(*m_dataSelector, setWorkspaceDetails(_, _, _)).Times(0);
     EXPECT_CALL(*m_fitBrowser, allowSequentialFits(_)).Times(0);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("long"))).Times(0);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(0);
-    m_presenter->setAssignedFirstRun(wsName, boost::none);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"long"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
+    // m_presenter->setAssignedFirstRun(wsName, boost::none);
+    localSetAssignedFirstRun(wsName, boost::none);
   }
 
   void test_setAssignedFirstRun_loadCurrentRun() {
@@ -241,13 +250,15 @@ public:
     EXPECT_CALL(*m_dataSelector,
                 setWorkspaceDetails(QString("00061335"), QString("MUSR"),
                                     Eq(currentRunPath))).Times(1);
-    m_presenter->setAssignedFirstRun(wsName, currentRunPath);
+    // m_presenter->setAssignedFirstRun(wsName, currentRunPath);
+    localSetAssignedFirstRun(wsName, currentRunPath);
   }
 
   void test_getAssignedFirstRun() {
     setupGroupPeriodSelections();
     const QString wsName("MUSR00015189; Pair; long; Asym; 1; #1");
-    m_presenter->setAssignedFirstRun(wsName, boost::none);
+    // m_presenter->setAssignedFirstRun(wsName, boost::none);
+    localSetAssignedFirstRun(wsName, boost::none);
     TS_ASSERT_EQUALS(wsName, m_presenter->getAssignedFirstRun());
   }
 
@@ -683,42 +694,13 @@ public:
     EXPECT_CALL(*m_dataSelector,
                 setWorkspaceDetails(QString("00015189-91"), QString("MUSR"),
                                     Eq(boost::none))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("fwd"))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(1);
-
-    m_presenter->setSelectedWorkspace(wsName, boost::none);
-  }
-
-  void test_setSelectedWorkspace_groupsAlreadySelected_shouldNotUnselect() {
-    const QString wsName("MUSR00015189-91; Group; fwd; Asym; 1; #6");
-
-    // Groups "fwd" and "bwd" are already selected
-    ON_CALL(*m_dataSelector, getChosenGroups())
-        .WillByDefault(Return(QStringList{"fwd", "bwd"}));
-    ON_CALL(*m_dataSelector, getPeriodSelections())
-        .WillByDefault(Return(QStringList{}));
-
-    // It should NOT deselect the already selected groups
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(_)).Times(0);
-
-    m_presenter->setSelectedWorkspace(wsName, boost::none);
-  }
-
-  void test_setSelectedWorkspace_periodsAlreadySelected_shouldNotUnselect() {
-    const QString wsName("MUSR00015189-91; Group; fwd; Asym; 1; #6");
-
-    // Periods 1 and 2 are already selected
-    ON_CALL(*m_dataSelector, getPeriodSelections())
-        .WillByDefault(Return(QStringList{"1", "2"}));
-    ON_CALL(*m_dataSelector, getChosenGroups())
-        .WillByDefault(Return(QStringList{}));
-
-    // It should NOT deselect the already selected periods
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(_)).Times(0);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"fwd"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
 
-    m_presenter->setSelectedWorkspace(wsName, boost::none);
+    localSetSelectedWorkspace(wsName, boost::none);
   }
-
   void test_setSelectedWorkspace_loadCurrentRun() {
     setupGroupPeriodSelections();
     const QString wsName("MUSR00061335; Group; fwd; Asym; 1; #1");
@@ -735,10 +717,12 @@ public:
     EXPECT_CALL(*m_dataSelector,
                 setWorkspaceDetails(QString("00061335"), QString("MUSR"),
                                     Eq(currentRunPath))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenGroup(QString("fwd"))).Times(1);
-    EXPECT_CALL(*m_dataSelector, setChosenPeriod(QString("1"))).Times(1);
+    EXPECT_CALL(*m_dataSelector, setGroupsSelected(QStringList({"fwd"})))
+        .Times(1);
+    EXPECT_CALL(*m_dataSelector, setPeriodsSelected(QStringList({"1"})))
+        .Times(1);
 
-    m_presenter->setSelectedWorkspace(wsName, currentRunPath);
+    localSetSelectedWorkspace(wsName, currentRunPath);
   }
 
   void test_doPreFitChecks_nonSequential_invalidRuns_doesNotFit() {
@@ -1081,6 +1065,36 @@ private:
     m_presenter->doPreFitChecks(sequential);
   }
 
+  /// method to manually set up workspace
+  /// this is a work around for the signal/slots
+  void localSetAssignedFirstRun(const QString &wsName,
+                                const boost::optional<QString> &filepath) {
+    m_presenter->setAssignedFirstRun(wsName, filepath);
+    // manually replicate signal
+    const auto wsParams =
+        MantidQt::CustomInterfaces::MuonAnalysisHelper::parseWorkspaceName(
+            wsName.toStdString());
+    m_dataSelector->setPeriodsSelected(
+        QStringList{QString::fromStdString(wsParams.periods)});
+    m_dataSelector->setGroupsSelected(
+        QStringList{QString::fromStdString(wsParams.itemName)});
+  }
+
+  /// method to manually set up workspace
+  /// this is a work around for the signal/slots
+  void localSetSelectedWorkspace(const QString &wsName,
+                                 const boost::optional<QString> &filepath) {
+    m_presenter->setSelectedWorkspace(wsName, filepath);
+    // manually replicate signal
+    const auto wsParams =
+        MantidQt::CustomInterfaces::MuonAnalysisHelper::parseWorkspaceName(
+            wsName.toStdString());
+    m_dataSelector->setPeriodsSelected(
+        QStringList{QString::fromStdString(wsParams.periods)});
+    m_dataSelector->setGroupsSelected(
+        QStringList{QString::fromStdString(wsParams.itemName)});
+  }
+
   MockDataSelector *m_dataSelector;
   MockFitBrowser *m_fitBrowser;
   MuonAnalysisFitDataPresenter *m_presenter;
diff --git a/MantidQt/CustomInterfaces/test/ReflDataProcessorPresenterTest.h b/MantidQt/CustomInterfaces/test/ReflDataProcessorPresenterTest.h
index 738733de5a755523a9ae9791457cf4279972d457..32b7d900156f1f530d61ff288c3609a057c978aa 100644
--- a/MantidQt/CustomInterfaces/test/ReflDataProcessorPresenterTest.h
+++ b/MantidQt/CustomInterfaces/test/ReflDataProcessorPresenterTest.h
@@ -432,6 +432,104 @@ public:
     TS_ASSERT(Mock::VerifyAndClearExpectations(&mockMainPresenter));
   }
 
+  void testProcessEventWorkspacesLogValueSlicing() {
+    NiceMock<MockDataProcessorView> mockDataProcessorView;
+    NiceMock<MockProgressableView> mockProgress;
+    NiceMock<MockMainPresenter> mockMainPresenter;
+    auto presenter = presenterFactory.create();
+    presenter->acceptViews(&mockDataProcessorView, &mockProgress);
+    presenter->accept(&mockMainPresenter);
+
+    createPrefilledWorkspace("TestWorkspace", presenter->getWhiteList());
+    EXPECT_CALL(mockDataProcessorView, getWorkspaceToOpen())
+        .Times(1)
+        .WillRepeatedly(Return("TestWorkspace"));
+    TS_ASSERT_THROWS_NOTHING(
+        presenter->notify(DataProcessorPresenter::OpenTableFlag));
+
+    std::set<int> groupList;
+    groupList.insert(0);
+
+    // We should not receive any errors
+    EXPECT_CALL(mockMainPresenter, giveUserCritical(_, _)).Times(0);
+
+    // The user hits the "process" button with the first group selected
+    EXPECT_CALL(mockDataProcessorView, getSelectedChildren())
+        .Times(1)
+        .WillRepeatedly(Return(std::map<int, std::set<int>>()));
+    EXPECT_CALL(mockDataProcessorView, getSelectedParents())
+        .Times(1)
+        .WillRepeatedly(Return(groupList));
+    EXPECT_CALL(mockMainPresenter, getTimeSlicingValues())
+        .Times(1)
+        .WillOnce(Return("Slicing=\"0,10,20,30\",LogFilter=proton_charge"));
+    EXPECT_CALL(mockMainPresenter, getTimeSlicingType())
+        .Times(1)
+        .WillOnce(Return("LogValue"));
+    EXPECT_CALL(mockMainPresenter, getPreprocessingValues())
+        .Times(6)
+        .WillRepeatedly(Return(std::map<std::string, std::string>()));
+    EXPECT_CALL(mockMainPresenter, getPreprocessingProperties())
+        .Times(6)
+        .WillRepeatedly(Return(std::map<std::string, std::set<std::string>>()));
+    EXPECT_CALL(mockMainPresenter, getPreprocessingOptions())
+        .Times(6)
+        .WillRepeatedly(Return(std::map<std::string, std::string>()));
+    EXPECT_CALL(mockMainPresenter, getProcessingOptions())
+        .Times(6)
+        .WillRepeatedly(Return(""));
+    EXPECT_CALL(mockMainPresenter, getPostprocessingOptions())
+        .Times(3)
+        .WillRepeatedly(Return(""));
+    EXPECT_CALL(mockDataProcessorView, getEnableNotebook())
+        .Times(1)
+        .WillOnce(Return(false));
+    EXPECT_CALL(mockDataProcessorView, getProcessInstrument())
+        .Times(14)
+        .WillRepeatedly(Return("INTER"));
+    EXPECT_CALL(mockDataProcessorView, requestNotebookPath()).Times(0);
+
+    TS_ASSERT_THROWS_NOTHING(
+        presenter->notify(DataProcessorPresenter::ProcessFlag));
+
+    // Check output workspaces were created as expected
+    for (size_t i = 0; i < 3; i++) {
+      std::string sliceIndex = std::to_string(i);
+
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsLam_13460_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsLam_13462_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist("IvsQ_13460_slice_" +
+                                                          sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist("IvsQ_13462_slice_" +
+                                                          sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsQ_13460_slice_" + sliceIndex + "_13462_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsQ_binned_13460_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist(
+          "IvsQ_binned_13462_slice_" + sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13460_slice_" +
+                                                          sliceIndex));
+      TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13462_slice_" +
+                                                          sliceIndex));
+    }
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13460"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13462"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13460_monitors"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TOF_13462_monitors"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TRANS_13463"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TRANS_13464"));
+    TS_ASSERT(AnalysisDataService::Instance().doesExist("TRANS_13463_13464"));
+
+    // Tidy up
+    AnalysisDataService::Instance().clear();
+
+    TS_ASSERT(Mock::VerifyAndClearExpectations(&mockDataProcessorView));
+    TS_ASSERT(Mock::VerifyAndClearExpectations(&mockMainPresenter));
+  }
+
   void testProcessWithNotebookWarn() {
     NiceMock<MockDataProcessorView> mockDataProcessorView;
     NiceMock<MockProgressableView> mockProgress;
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/FitPropertyBrowser.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/FitPropertyBrowser.h
index e1ccc74e0921631e140c623b4fac85f3201680f7..9ff471ef2bd0fba0a0d9a2d635f147317f95ff35 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/FitPropertyBrowser.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/FitPropertyBrowser.h
@@ -299,9 +299,9 @@ protected slots:
   /// Called when a bool property is changed
   virtual void boolChanged(QtProperty *prop);
 
+  virtual void enumChanged(QtProperty *prop);
 private slots:
 
-  void enumChanged(QtProperty *prop);
   void intChanged(QtProperty *prop);
   virtual void doubleChanged(QtProperty *prop);
   /// Called when one of the parameter values gets changed
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/IMuonFitDataSelector.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/IMuonFitDataSelector.h
index 783db6d7f65a96590bf3a0468cd6eddbb7d4da83..c199ae4521f115da701f13a4d5a46b6a9e07ecfe 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/IMuonFitDataSelector.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/IMuonFitDataSelector.h
@@ -21,15 +21,11 @@ public:
   virtual QStringList getFilenames() const = 0;
   virtual double getStartTime() const = 0;
   virtual double getEndTime() const = 0;
-  virtual void setNumPeriods(size_t numPeriods) = 0;
-  virtual void setChosenPeriod(const QString &period) = 0;
   virtual QStringList getPeriodSelections() const = 0;
   virtual void
   setWorkspaceDetails(const QString &runNumbers, const QString &instName,
                       const boost::optional<QString> &filePath) = 0;
-  virtual void setAvailableGroups(const QStringList &groupNames) = 0;
   virtual QStringList getChosenGroups() const = 0;
-  virtual void setChosenGroup(const QString &group) = 0;
   virtual void setStartTime(double start) = 0;
   virtual void setEndTime(double end) = 0;
   virtual void setStartTimeQuietly(double start) = 0;
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h
index 5db55ca48cb933050c980c02f4fceec8298ee45d..2b0a1ff363daa59eafab3ae05e96d9ced741a3ac 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.h
@@ -3,8 +3,10 @@
 
 #include "ui_MuonFitDataSelector.h"
 #include "WidgetDllOption.h"
-#include "MantidQtMantidWidgets/IMuonFitDataSelector.h"
 #include "MantidQtAPI/MantidWidget.h"
+#include "MantidQtMantidWidgets/FitPropertyBrowser.h"
+#include "MantidQtMantidWidgets/MuonFitPropertyBrowser.h"
+#include "MantidQtMantidWidgets/IMuonFitDataSelector.h"
 
 namespace MantidQt {
 namespace MantidWidgets {
@@ -40,12 +42,13 @@ class EXPORT_OPT_MANTIDQT_MANTIDWIDGETS MuonFitDataSelector
     : public MantidQt::API::MantidWidget,
       public IMuonFitDataSelector {
   Q_OBJECT
+
 public:
   /// Basic constructor
   explicit MuonFitDataSelector(QWidget *parent);
   /// Constructor with more options
-  MuonFitDataSelector(QWidget *parent, int runNumber, const QString &instName,
-                      size_t numPeriods, const QStringList &groups);
+  MuonFitDataSelector(QWidget *parent, int runNumber, const QString &instName);
+  //, size_t numPeriods, const QStringList &groups);
   // --- MantidWidget methods ---
   /// Get user input through a common interface
   QVariant getUserInput() const override;
@@ -60,14 +63,11 @@ public:
   double getEndTime() const override;
   /// Get names of chosen groups
   QStringList getChosenGroups() const override;
-  /// Set chosen group
-  void setChosenGroup(const QString &group) override;
-  /// Clear list of selected groups
-  void clearChosenGroups() const;
+  /// Set chosen group/period
+  void setGroupsSelected(QStringList groups) { m_chosenGroups = groups; };
+  void setPeriodsSelected(QStringList periods) { m_chosenPeriods = periods; };
   /// Get selected periods
   QStringList getPeriodSelections() const override;
-  /// Set selected period
-  void setChosenPeriod(const QString &period) override;
   /// Get type of fit
   IMuonFitDataSelector::FitType getFitType() const override;
   /// Get instrument name
@@ -88,13 +88,9 @@ public:
   bool askUserWhetherToOverwrite() override;
 
 public slots:
-  /// Set number of periods in data
-  void setNumPeriods(size_t numPeriods) override;
   /// Set starting run number, instrument and (optionally) file path
   void setWorkspaceDetails(const QString &runNumbers, const QString &instName,
                            const boost::optional<QString> &filePath) override;
-  /// Set names of available groups
-  void setAvailableGroups(const QStringList &groupNames) override;
   /// Set start time for fit
   void setStartTime(double start) override;
   /// Set end time for fit
@@ -105,8 +101,6 @@ public slots:
   void setEndTimeQuietly(double end) override;
   /// Called when user changes runs
   void userChangedRuns();
-  /// Called when period combination box checked/unchecked
-  void periodCombinationStateChanged(int state);
   /// Called when fit type changed
   void fitTypeChanged(bool state);
   /// Called when group/period box selection changes
@@ -115,10 +109,6 @@ public slots:
 signals:
   /// Edited the start or end fields
   void dataPropertiesChanged();
-  /// Changed the groups selection
-  void selectedGroupsChanged();
-  /// Changed the periods selection
-  void selectedPeriodsChanged();
   /// Changed the workspace
   void workspaceChanged();
   /// Simultaneous fit label changed
@@ -127,30 +117,20 @@ signals:
   void datasetIndexChanged(int index);
 
 private:
-  /// Add a checkbox to Groups section
-  void addGroupCheckbox(const QString &name);
-  /// Clear all checkboxes from Groups section
-  void clearGroupCheckboxes();
-  /// Set visibility of "Periods" section
-  void setPeriodVisibility(bool visible);
   /// Set default values in some input controls
   void setDefaultValues();
-  /// Set up validators for input
-  void setUpValidators();
   /// Set up connections for signals/slots
   void setUpConnections();
   /// Set type for fit
   void setFitType(IMuonFitDataSelector::FitType type);
-  /// Check/uncheck "Combination" box and enable/disable text boxes
-  void setPeriodCombination(bool on);
   /// Set busy cursor and disable input
   void setBusyState();
   /// Member - user interface
   Ui::MuonFitDataSelector m_ui;
-  /// Map of group names to checkboxes
-  QMap<QString, QCheckBox *> m_groupBoxes;
-  /// Map of period names to checkboxes
-  QMap<QString, QCheckBox *> m_periodBoxes;
+  double m_startX;
+  double m_endX;
+  QStringList m_chosenGroups;
+  QStringList m_chosenPeriods;
 
 private slots:
   /// Set normal cursor and enable input
@@ -164,4 +144,4 @@ private slots:
 } // namespace MantidWidgets
 } // namespace MantidQt
 
-#endif /* MANTID_MANTIDWIDGETS_MUONFITDATASELECTOR_H_ */
\ No newline at end of file
+#endif /* MANTID_MANTIDWIDGETS_MUONFITDATASELECTOR_H_ */
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.ui b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.ui
index c889629f62a539fc5dbf15890af182410ae221a9..fc91bd25b66f2a9640383295acf4be3140b6a1f0 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.ui
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitDataSelector.ui
@@ -21,258 +21,14 @@
      </property>
      <layout class="QVBoxLayout" name="verticalLayout">
       <item>
-       <layout class="QHBoxLayout" name="horizontalLayoutData">
-        <item>
-         <widget class="QGroupBox" name="groupBoxWorkspaces">
-          <property name="title">
-           <string>Workspaces</string>
-          </property>
-          <layout class="QVBoxLayout" name="verticalLayoutWorkspaces">
-           <item>
-            <layout class="QHBoxLayout" name="horizontalLayoutRuns">
-             <item>
-              <widget class="QLabel" name="lblRuns">
-               <property name="text">
-                <string>Runs:</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="MantidQt::API::MWRunFiles" name="runs" native="true">
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Expanding" vsizetype="Preferred">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-               <property name="label" stdset="0">
-                <string/>
-               </property>
-               <property name="multipleFiles" stdset="0">
-                <bool>true</bool>
-               </property>
-               <property name="findRunFiles" stdset="0">
-                <bool>true</bool>
-               </property>
-              </widget>
-             </item>
-            </layout>
-           </item>
-           <item>
-            <layout class="QHBoxLayout" name="horizontalLayoutRadioButtons">
-             <item>
-              <widget class="QRadioButton" name="rbCoAdd">
-               <property name="text">
-                <string>Co-add</string>
-               </property>
-               <property name="checked">
-                <bool>true</bool>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QRadioButton" name="rbSimultaneous">
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="MinimumExpanding" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-               <property name="minimumSize">
-                <size>
-                 <width>0</width>
-                 <height>0</height>
-                </size>
-               </property>
-               <property name="text">
-                <string>Simultaneous</string>
-               </property>
-              </widget>
-             </item>
-            </layout>
-           </item>
-           <item>
-            <layout class="QHBoxLayout" name="horizontalLayoutTime">
-             <item>
-              <widget class="QLabel" name="lblStart">
-               <property name="text">
-                <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Start (us):&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLineEdit" name="txtStart">
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <spacer name="horizontalSpacerTime">
-               <property name="orientation">
-                <enum>Qt::Horizontal</enum>
-               </property>
-               <property name="sizeHint" stdset="0">
-                <size>
-                 <width>40</width>
-                 <height>20</height>
-                </size>
-               </property>
-              </spacer>
-             </item>
-             <item>
-              <widget class="QLabel" name="lblEnd">
-               <property name="text">
-                <string>End (us):</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLineEdit" name="txtEnd">
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-              </widget>
-             </item>
-            </layout>
-           </item>
-          </layout>
-         </widget>
-        </item>
-        <item>
-         <widget class="QGroupBox" name="groupBoxGroups">
-          <property name="title">
-           <string>Groups</string>
-          </property>
-          <layout class="QVBoxLayout" name="verticalLayoutGroups">
-           <item>
-            <widget class="QCheckBox" name="chkFwd">
-             <property name="text">
-              <string>fwd</string>
-             </property>
-            </widget>
-           </item>
-           <item>
-            <widget class="QCheckBox" name="chkBwd">
-             <property name="text">
-              <string>bwd</string>
-             </property>
-            </widget>
-           </item>
-          </layout>
-         </widget>
-        </item>
-        <item>
-         <widget class="QGroupBox" name="groupBoxPeriods">
-          <property name="title">
-           <string>Periods</string>
-          </property>
-          <layout class="QVBoxLayout" name="verticalLayoutPeriods">
-           <item>
-            <widget class="QCheckBox" name="chk1">
-             <property name="text">
-              <string>1</string>
-             </property>
-            </widget>
-           </item>
-           <item>
-            <widget class="QCheckBox" name="chk2">
-             <property name="text">
-              <string>2</string>
-             </property>
-            </widget>
-           </item>
-           <item>
-            <layout class="QHBoxLayout" name="horizontalLayoutPeriodsCombine">
-             <item>
-              <widget class="QCheckBox" name="chkCombine">
-               <property name="text">
-                <string>Combine</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLineEdit" name="txtFirst">
-               <property name="enabled">
-                <bool>false</bool>
-               </property>
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Maximum" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLabel" name="lblMinus">
-               <property name="text">
-                <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-size:12pt; font-weight:600;&quot;&gt;-&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
-               </property>
-              </widget>
-             </item>
-             <item>
-              <widget class="QLineEdit" name="txtSecond">
-               <property name="enabled">
-                <bool>false</bool>
-               </property>
-               <property name="sizePolicy">
-                <sizepolicy hsizetype="Maximum" vsizetype="Fixed">
-                 <horstretch>0</horstretch>
-                 <verstretch>0</verstretch>
-                </sizepolicy>
-               </property>
-              </widget>
-             </item>
-            </layout>
-           </item>
-          </layout>
-         </widget>
-        </item>
-       </layout>
-      </item>
-      <item>
-       <layout class="QHBoxLayout" name="horizontalLayoutLabel">
-        <item>
-         <widget class="QLabel" name="lblSimFitLabel">
-          <property name="text">
-           <string>Label for simultaneous fit: </string>
-          </property>
-         </widget>
-        </item>
+       <layout class="QHBoxLayout" name="horizontalLayoutDataset">
         <item>
-         <widget class="QLineEdit" name="txtSimFitLabel">
-          <property name="enabled">
-           <bool>false</bool>
-          </property>
+         <widget class="QLabel" name="label">
           <property name="text">
-           <string>0</string>
+           <string>Display Parameters For:</string>
           </property>
          </widget>
         </item>
-       </layout>
-      </item>
-      <item>
-       <layout class="QHBoxLayout" name="horizontalLayoutDataset">
-        <item>
-         <spacer name="horizontalSpacerDatasetLeft">
-          <property name="orientation">
-           <enum>Qt::Horizontal</enum>
-          </property>
-          <property name="sizeHint" stdset="0">
-           <size>
-            <width>40</width>
-            <height>20</height>
-           </size>
-          </property>
-         </spacer>
-        </item>
         <item>
          <widget class="QPushButton" name="btnPrevDataset">
           <property name="sizePolicy">
@@ -319,7 +75,7 @@
           </property>
           <property name="sizeHint" stdset="0">
            <size>
-            <width>40</width>
+            <width>30</width>
             <height>20</height>
            </size>
           </property>
@@ -327,6 +83,132 @@
         </item>
        </layout>
       </item>
+      <item>
+       <layout class="QHBoxLayout" name="horizontalLayoutLabel">
+        <item>
+         <widget class="QLabel" name="lblSimFitLabel">
+          <property name="text">
+           <string>Label for simultaneous fit: </string>
+          </property>
+         </widget>
+        </item>
+        <item>
+         <widget class="QLineEdit" name="txtSimFitLabel">
+          <property name="enabled">
+           <bool>false</bool>
+          </property>
+          <property name="text">
+           <string>0</string>
+          </property>
+         </widget>
+        </item>
+       </layout>
+      </item>
+      <item>
+       <layout class="QHBoxLayout" name="horizontalLayoutData">
+        <item>
+         <layout class="QHBoxLayout" name="horizontalLayout">
+          <item>
+           <layout class="QHBoxLayout" name="horizontalLayout_2">
+            <item>
+             <layout class="QHBoxLayout" name="horizontalLayoutRuns">
+              <item>
+               <widget class="QLabel" name="lblRuns">
+                <property name="text">
+                 <string>Runs:</string>
+                </property>
+               </widget>
+              </item>
+              <item>
+               <widget class="MantidQt::API::MWRunFiles" name="runs" native="true">
+                <property name="sizePolicy">
+                 <sizepolicy hsizetype="Expanding" vsizetype="Preferred">
+                  <horstretch>0</horstretch>
+                  <verstretch>0</verstretch>
+                 </sizepolicy>
+                </property>
+                <property name="label" stdset="0">
+                 <string/>
+                </property>
+                <property name="multipleFiles" stdset="0">
+                 <bool>true</bool>
+                </property>
+                <property name="findRunFiles" stdset="0">
+                 <bool>true</bool>
+                </property>
+               </widget>
+              </item>
+             </layout>
+            </item>
+           </layout>
+          </item>
+         </layout>
+        </item>
+       </layout>
+      </item>
+      <item>
+       <layout class="QHBoxLayout" name="horizontalLayout_4">
+        <item>
+         <layout class="QHBoxLayout" name="horizontalLayoutRadioButtons">
+          <item>
+           <spacer name="horizontalSpacer_2">
+            <property name="orientation">
+             <enum>Qt::Horizontal</enum>
+            </property>
+            <property name="sizeHint" stdset="0">
+             <size>
+              <width>40</width>
+              <height>20</height>
+             </size>
+            </property>
+           </spacer>
+          </item>
+          <item>
+           <widget class="QRadioButton" name="rbCoAdd">
+            <property name="text">
+             <string>Co-add</string>
+            </property>
+            <property name="checked">
+             <bool>true</bool>
+            </property>
+           </widget>
+          </item>
+          <item>
+           <widget class="QRadioButton" name="rbSimultaneous">
+            <property name="sizePolicy">
+             <sizepolicy hsizetype="MinimumExpanding" vsizetype="Fixed">
+              <horstretch>0</horstretch>
+              <verstretch>0</verstretch>
+             </sizepolicy>
+            </property>
+            <property name="minimumSize">
+             <size>
+              <width>0</width>
+              <height>0</height>
+             </size>
+            </property>
+            <property name="text">
+             <string>Simultaneous</string>
+            </property>
+           </widget>
+          </item>
+          <item>
+           <spacer name="horizontalSpacer">
+            <property name="orientation">
+             <enum>Qt::Horizontal</enum>
+            </property>
+            <property name="sizeHint" stdset="0">
+             <size>
+              <width>40</width>
+              <height>20</height>
+             </size>
+            </property>
+           </spacer>
+          </item>
+         </layout>
+        </item>
+       </layout>
+      </item>
      </layout>
     </widget>
    </item>
diff --git a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitPropertyBrowser.h b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitPropertyBrowser.h
index 7ddcbcfacbb2e3df4c1db27a1e1b612d29114e1c..341e858c6e877d5b7b87791a1c95e7b5ab5fe0a5 100644
--- a/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitPropertyBrowser.h
+++ b/MantidQt/MantidWidgets/inc/MantidQtMantidWidgets/MuonFitPropertyBrowser.h
@@ -5,10 +5,12 @@
 #include "MantidQtMantidWidgets/IMuonFitDataModel.h"
 #include "MantidQtMantidWidgets/IMuonFitFunctionModel.h"
 
+#include <QMap>
 /* Forward declarations */
 class QDockWidget;
 class QLabel;
 class QPushButton;
+class QCheckBox;
 class QMenu;
 class QSignalMapper;
 class QtTreePropertyBrowser;
@@ -21,7 +23,9 @@ class QtEnumPropertyManager;
 class QtProperty;
 class QtBrowserItem;
 class QVBoxLayout;
+class QGroupBox;
 class QSplitter;
+class QWidget;
 
 namespace Mantid {
 namespace API {
@@ -97,13 +101,33 @@ public:
   virtual void setFitEnabled(bool yes) override;
 
   void doTFAsymmFit(int maxIterations);
+  void setAvailableGroups(const QStringList &groups);
+  void setAvailablePeriods(const QStringList &periods);
 
+  QStringList getChosenGroups() const;
+  QStringList getChosenPeriods() const;
+
+  /// Clear list of selected groups
+  void clearChosenGroups() const;
+  void setAllGroups();
+  void setAllPairs();
+  void clearChosenPeriods() const;
+  void setChosenGroup(QString &group);
+  void setChosenPeriods(const QString &period);
+  void setSingleFitLabel(std::string name);
 public slots:
   /// Perform the fit algorithm
   void fit() override;
   /// Open sequential fit dialog
   void sequentialFit() override;
+
   void executeFitMenu(const QString &item) override;
+  void groupBtnPressed();
+  void periodBtnPressed();
+  void generateBtnPressed();
+  void combineBtnPressed();
+  void setNumPeriods(size_t numPeriods);
+
 signals:
   /// Emitted when sequential fit is requested by user
   void sequentialFitRequested();
@@ -117,6 +141,9 @@ signals:
   void userChangedDatasetIndex(int index) override;
   /// Emitted when "fit to raw data" is changed
   void fitRawDataClicked(bool enabled) override;
+  void groupBoxClicked();
+  void periodBoxClicked();
+  void reselctGroupClicked(bool enabled);
   /// Emitted when fit is about to be run
   void preFitChecksRequested(bool sequential) override;
 
@@ -127,12 +154,14 @@ protected:
 private slots:
   void doubleChanged(QtProperty *prop) override;
   void boolChanged(QtProperty *prop) override;
+  void enumChanged(QtProperty *prop) override;
 
 private:
   /// new menu option
   QAction *m_fitActionTFAsymm;
   /// override populating fit menu
   void populateFitMenuButton(QSignalMapper *fitMapper, QMenu *fitMenu) override;
+
   /// Get the registered function names
   void populateFunctionNames() override;
   /// Check if the workspace can be used in the fit
@@ -141,6 +170,18 @@ private:
   /// workspaces
   void finishAfterSimultaneousFit(const Mantid::API::IAlgorithm *fitAlg,
                                   const int nWorkspaces) const;
+
+  void clearGroupCheckboxes();
+  void addGroupCheckbox(const QString &name);
+  void genGroupWindow();
+  void genPeriodWindow();
+  void genCombinePeriodWindow();
+  void updateGroupDisplay();
+  void updatePeriodDisplay();
+  void setChosenPeriods(const QStringList &chosenPeriods);
+  void clearPeriodCheckboxes();
+  void addPeriodCheckbox(const QString &name);
+
   /// Splitter for additional widgets and splitter between this and browser
   QSplitter *m_widgetSplitter, *m_mainSplitter;
   /// Names of workspaces to fit
@@ -148,7 +189,35 @@ private:
   /// Label to use for simultaneous fits
   std::string m_simultaneousLabel;
   QtProperty *m_normalization;
-  mutable QStringList m_normalizationValue;
+  QStringList m_normalizationValue;
+
+  QtBrowserItem *m_multiFitSettingsGroup;
+  QtProperty *m_groupsToFit;
+  QStringList m_groupsToFitOptions;
+  /// Map of group names to checkboxes
+  QMap<QString, QtProperty *> m_groupBoxes;
+  QtProperty *m_showGroup;
+  QStringList m_showGroupValue;
+
+  QtProperty *m_periodsToFit;
+  QStringList m_periodsToFitOptions;
+  /// Map of group names to checkboxes
+  QMap<QString, QtProperty *> m_periodBoxes;
+  QtProperty *m_showPeriods;
+  QStringList m_showPeriodValue;
+  QLineEdit *m_positiveCombo;
+  QLineEdit *m_negativeCombo;
+
+  QPushButton *m_reselectGroupBtn;
+  QPushButton *m_reselectPeriodBtn;
+  QPushButton *m_generateBtn;
+  QGroupBox *m_btnGroup;
+  QDialog *m_groupWindow;
+  QDialog *m_periodWindow;
+  QDialog *m_comboWindow;
+
+  std::vector<std::string> m_groupsList = {"fwd", "bkwd", "top", "bottom",
+                                           "bwd"};
 };
 
 std::vector<double> readNormalization();
diff --git a/MantidQt/MantidWidgets/src/FitPropertyBrowser.cpp b/MantidQt/MantidWidgets/src/FitPropertyBrowser.cpp
index 40f7b16fee79644f5bc37632a48de09d77ff88fc..7ec03b415df35d7919c065cc2b5817f67706cfa6 100644
--- a/MantidQt/MantidWidgets/src/FitPropertyBrowser.cpp
+++ b/MantidQt/MantidWidgets/src/FitPropertyBrowser.cpp
@@ -1983,10 +1983,10 @@ void FitPropertyBrowser::addTieToFunction() {
   int iPar = -1;
   for (size_t i = 0; i < m_compositeFunction->nParams(); i++) {
     Mantid::API::ParameterReference ref(m_compositeFunction.get(), i);
-    Mantid::API::IFunction *fun = ref.getFunction();
+    Mantid::API::IFunction *fun = ref.getLocalFunction();
 
     // Pick out parameters with the same name as the one we're tying from
-    if (fun->parameterName(static_cast<int>(ref.getIndex())) == parName) {
+    if (fun->parameterName(static_cast<int>(ref.getLocalIndex())) == parName) {
       if (iPar == -1 &&
           fun ==
               h->function()
diff --git a/MantidQt/MantidWidgets/src/MantidHelpWindow.cpp b/MantidQt/MantidWidgets/src/MantidHelpWindow.cpp
index c8613668120d3695ad8d108a03516bd3427093d8..281babbed39e8b7b1ed5013750aa2a5a5760b7d4 100644
--- a/MantidQt/MantidWidgets/src/MantidHelpWindow.cpp
+++ b/MantidQt/MantidWidgets/src/MantidHelpWindow.cpp
@@ -1,3 +1,4 @@
+#include "MantidAPI/AlgorithmManager.h"
 #include "MantidQtMantidWidgets/MantidHelpWindow.h"
 #include "MantidQtMantidWidgets/pqHelpWindow.h"
 #include "MantidQtAPI/InterfaceManager.h"
@@ -183,23 +184,38 @@ void MantidHelpWindow::showWikiPage(const QString &page) {
  */
 void MantidHelpWindow::showAlgorithm(const string &name, const int version) {
   auto versionStr("-v" + boost::lexical_cast<string>(version));
-  if (version <= 0)
+  if (version <= 0) {
     versionStr = ""; // let the redirect do its thing
+  }
 
+  QString help_url("");
+  if (!name.empty()) {
+    auto alg = Mantid::API::AlgorithmManager::Instance().createUnmanaged(name);
+    help_url = QString::fromStdString(alg->helpURL());
+  }
   if (bool(g_helpWindow)) {
-    QString url(BASE_URL);
-    url += "algorithms/";
-    if (name.empty())
-      url += "index.html";
-    else
-      url += QString(name.c_str()) + QString(versionStr.c_str()) + ".html";
-    this->showHelp(url);
-  } else // qt-assistant disabled
-  {
-    if (name.empty())
-      this->showWikiPage(std::string("Category:Algorithms"));
-    else
-      this->showWikiPage(name);
+    if (help_url.isEmpty()) {
+      QString url(BASE_URL);
+      url += "algorithms/";
+      if (name.empty()) {
+        url += "index.html";
+      } else {
+        url += QString(name.c_str()) + QString(versionStr.c_str()) + ".html";
+      }
+      this->showHelp(url);
+    } else {
+      this->showHelp(help_url);
+    }
+  } else { // qt-assistant disabled
+    if (help_url.isEmpty()) {
+      if (name.empty()) {
+        this->showWikiPage(std::string("Category:Algorithms"));
+      } else {
+        this->showWikiPage(name);
+      }
+    } else {
+      this->openWebpage(help_url);
+    }
   }
 }
 
diff --git a/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp b/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp
index 07fb31bb11291b3deae887f1d0bb1cebfdecadb8..4fa7b1025202367cacdc9fdd1bc34fc37c64e8f8 100644
--- a/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp
+++ b/MantidQt/MantidWidgets/src/MuonFitDataSelector.cpp
@@ -1,6 +1,9 @@
 #include "MantidQtMantidWidgets/MuonFitDataSelector.h"
 #include "MantidKernel/Logger.h"
 
+#include "qttreepropertybrowser.h"
+#include "qtpropertymanager.h"
+
 namespace {
 Mantid::Kernel::Logger g_log("MuonFitDataSelector");
 }
@@ -15,14 +18,8 @@ namespace MantidWidgets {
 MuonFitDataSelector::MuonFitDataSelector(QWidget *parent)
     : MantidWidget(parent) {
   m_ui.setupUi(this);
-  this->setUpValidators();
   this->setDefaultValues();
   this->setUpConnections();
-  m_groupBoxes.insert("fwd", m_ui.chkFwd);
-  m_groupBoxes.insert("bwd", m_ui.chkBwd);
-  m_periodBoxes.insert("1", m_ui.chk1);
-  m_periodBoxes.insert("2", m_ui.chk2);
-
   // Disable "Browse" button - use case is that first run will always be the one
   // selected on front tab. User will type in the runs they want rather than
   // using the Browse button. (If they want to "Browse" they can use front tab).
@@ -34,18 +31,22 @@ MuonFitDataSelector::MuonFitDataSelector(QWidget *parent)
  * @param parent :: [input] Parent dialog for the widget
  * @param runNumber :: [input] Run number of initial workspace
  * @param instName :: [input] Name of instrument from initial workspace
- * @param numPeriods :: [input] Number of periods from initial workspace
- * @param groups :: [input] Group names from initial workspace
- */
+*/
 MuonFitDataSelector::MuonFitDataSelector(QWidget *parent, int runNumber,
-                                         const QString &instName,
-                                         size_t numPeriods,
-                                         const QStringList &groups)
+                                         const QString &instName) /*
+  * numPeriods :: [input] Number of periods from initial workspace
+  * groups :: [input] Group names from initial workspace
+                                          size_t numPeriods,
+                                          const QStringList &groups)*/
     : MuonFitDataSelector(parent) {
   this->setWorkspaceDetails(QString::number(runNumber), instName,
                             boost::optional<QString>{});
-  this->setNumPeriods(numPeriods);
-  this->setAvailableGroups(groups);
+  // not used in this case
+  // but leave these here as a remainder
+  // for future changes that may need to assign them
+
+  // this->setNumPeriods(numPeriods);
+  // this->setAvailableGroups(groups);
 }
 
 /**
@@ -56,18 +57,8 @@ void MuonFitDataSelector::setUpConnections() {
   connect(m_ui.runs, SIGNAL(filesFound()), this, SLOT(userChangedRuns()));
   connect(m_ui.rbCoAdd, SIGNAL(toggled(bool)), this,
           SLOT(fitTypeChanged(bool)));
-  connect(m_ui.txtStart, SIGNAL(editingFinished()), this,
-          SIGNAL(dataPropertiesChanged()));
-  connect(m_ui.txtEnd, SIGNAL(editingFinished()), this,
-          SIGNAL(dataPropertiesChanged()));
-  connect(m_ui.chkCombine, SIGNAL(stateChanged(int)), this,
-          SLOT(periodCombinationStateChanged(int)));
   connect(m_ui.txtSimFitLabel, SIGNAL(editingFinished()), this,
           SIGNAL(simulLabelChanged()));
-  connect(this, SIGNAL(selectedGroupsChanged()), this,
-          SLOT(checkForMultiGroupPeriodSelection()));
-  connect(this, SIGNAL(selectedPeriodsChanged()), this,
-          SLOT(checkForMultiGroupPeriodSelection()));
   connect(this, SIGNAL(workspaceChanged()), this,
           SLOT(checkForMultiGroupPeriodSelection()));
   connect(m_ui.cbDataset, SIGNAL(currentIndexChanged(int)), this,
@@ -75,10 +66,6 @@ void MuonFitDataSelector::setUpConnections() {
   connect(m_ui.btnNextDataset, SIGNAL(clicked()), this, SLOT(setNextDataset()));
   connect(m_ui.btnPrevDataset, SIGNAL(clicked()), this,
           SLOT(setPreviousDataset()));
-  connect(m_ui.txtFirst, SIGNAL(editingFinished()), this,
-          SIGNAL(selectedPeriodsChanged()));
-  connect(m_ui.txtSecond, SIGNAL(editingFinished()), this,
-          SIGNAL(selectedPeriodsChanged()));
 }
 
 /**
@@ -108,37 +95,13 @@ void MuonFitDataSelector::userChangedRuns() {
   emit workspaceChanged();
 }
 
-/**
- * Sets group names and updates checkboxes on UI
- * By default sets all unchecked
- * @param groups :: [input] List of group names
- */
-void MuonFitDataSelector::setAvailableGroups(const QStringList &groups) {
-  // If it's the same list, do nothing
-  if (groups.size() == m_groupBoxes.size()) {
-    auto existingGroups = m_groupBoxes.keys();
-    auto newGroups = groups;
-    qSort(existingGroups);
-    qSort(newGroups);
-    if (existingGroups == newGroups) {
-      return;
-    }
-  }
-
-  clearGroupCheckboxes();
-  for (const auto group : groups) {
-    addGroupCheckbox(group);
-  }
-}
-
 /**
  * Get the user's supplied start time (default 0)
  * @returns :: start time input by user in microseconds
  */
 double MuonFitDataSelector::getStartTime() const {
   // Validator ensures cast to double will succeed
-  const QString start = m_ui.txtStart->text();
-  return start.toDouble();
+  return m_startX; // start.toDouble();
 }
 
 /**
@@ -146,7 +109,7 @@ double MuonFitDataSelector::getStartTime() const {
  * @param start :: [input] Start time in microseconds
  */
 void MuonFitDataSelector::setStartTimeQuietly(double start) {
-  m_ui.txtStart->setText(QString::number(start));
+  m_startX = start;
 }
 
 /**
@@ -162,19 +125,13 @@ void MuonFitDataSelector::setStartTime(double start) {
  * Get the user's supplied end time (default 10)
  * @returns :: start time input by user in microseconds
  */
-double MuonFitDataSelector::getEndTime() const {
-  // Validator ensures cast to double will succeed
-  const QString end = m_ui.txtEnd->text();
-  return end.toDouble();
-}
+double MuonFitDataSelector::getEndTime() const { return m_endX; }
 
 /**
  * Set the end time in the UI WITHOUT sending signal
  * @param end :: [input] End time in microseconds
  */
-void MuonFitDataSelector::setEndTimeQuietly(double end) {
-  m_ui.txtEnd->setText(QString::number(end));
-}
+void MuonFitDataSelector::setEndTimeQuietly(double end) { m_endX = end; }
 
 /**
  * Set the end time in the UI, and send signal
@@ -193,16 +150,6 @@ QStringList MuonFitDataSelector::getFilenames() const {
   return m_ui.runs->getFilenames();
 }
 
-/**
- * Set up input validation on UI controls
- * e.g. some boxes should only accept numeric input
- */
-void MuonFitDataSelector::setUpValidators() {
-  // Start/end times: numeric values only
-  m_ui.txtStart->setValidator(new QDoubleValidator(this));
-  m_ui.txtEnd->setValidator(new QDoubleValidator(this));
-}
-
 /**
  * Set up run finder with initial run number and instrument
  * @param runNumbers :: [input] Run numbers from loaded workspace
@@ -243,127 +190,17 @@ void MuonFitDataSelector::setWorkspaceDetails(
  */
 void MuonFitDataSelector::setDefaultValues() {
   const QChar muMicro{0x03BC}; // mu in Unicode
-  m_ui.lblStart->setText(QString("Start (%1s)").arg(muMicro));
-  m_ui.lblEnd->setText(QString("End (%1s)").arg(muMicro));
   this->setStartTime(0.0);
   this->setEndTime(0.0);
-  setPeriodCombination(false);
   m_ui.txtSimFitLabel->setText("0");
   emit simulLabelChanged(); // make sure default "0" is set
 }
-
-/**
- * Set visibility of the "Periods" group box
- * (if single-period, hide to not confuse the user)
- * @param visible :: [input] Whether to show or hide the options
- */
-void MuonFitDataSelector::setPeriodVisibility(bool visible) {
-  m_ui.groupBoxPeriods->setVisible(visible);
-}
-
-/**
- * Add a new checkbox to the list of groups with given name
- * The new checkbox is unchecked by default
- * @param name :: [input] Name of group to add
- */
-void MuonFitDataSelector::addGroupCheckbox(const QString &name) {
-  auto checkBox = new QCheckBox(name);
-  m_groupBoxes.insert(name, checkBox);
-  checkBox->setChecked(false);
-  m_ui.verticalLayoutGroups->addWidget(checkBox);
-  connect(checkBox, SIGNAL(clicked(bool)), this,
-          SIGNAL(selectedGroupsChanged()));
-}
-
-/**
- * Clears all group names and checkboxes
- * (ready to add new ones)
- */
-void MuonFitDataSelector::clearGroupCheckboxes() {
-  for (const auto &checkbox : m_groupBoxes) {
-    m_ui.verticalLayoutGroups->removeWidget(checkbox);
-    checkbox->deleteLater(); // will disconnect signal automatically
-  }
-  m_groupBoxes.clear();
-}
-
-/**
- * Sets checkboxes on UI for given number
- * of periods plus "combination" boxes.
- * Hides control for single-period data.
- * @param numPeriods :: [input] Number of periods
- */
-void MuonFitDataSelector::setNumPeriods(size_t numPeriods) {
-  const size_t currentPeriods = static_cast<size_t>(m_periodBoxes.size());
-  if (numPeriods > currentPeriods) {
-    // create more boxes
-    for (size_t i = currentPeriods; i != numPeriods; i++) {
-      QString name = QString::number(i + 1);
-      auto checkbox = new QCheckBox(name);
-      m_periodBoxes.insert(name, checkbox);
-      m_ui.verticalLayoutPeriods->addWidget(checkbox);
-    }
-  } else if (numPeriods < currentPeriods) {
-    // delete the excess
-    QStringList toRemove;
-    for (const QString name : m_periodBoxes.keys()) {
-      const size_t periodNum = static_cast<size_t>(name.toInt());
-      if (periodNum > numPeriods) {
-        m_ui.verticalLayoutPeriods->removeWidget(m_periodBoxes.value(name));
-        m_periodBoxes.value(name)->deleteLater(); // will disconnect signal
-        toRemove.append(name);
-      }
-    }
-    for (const QString name : toRemove) {
-      m_periodBoxes.remove(name);
-    }
-  }
-
-  // Ensure signals connected
-  for (const auto &checkbox : m_periodBoxes) {
-    connect(checkbox, SIGNAL(clicked()), this,
-            SIGNAL(selectedPeriodsChanged()));
-  }
-
-  // Always put the combination at the bottom ("-1" = at end)
-  m_ui.verticalLayoutPeriods->removeItem(m_ui.horizontalLayoutPeriodsCombine);
-  m_ui.verticalLayoutPeriods->insertLayout(-1,
-                                           m_ui.horizontalLayoutPeriodsCombine);
-
-  // Hide box if single-period
-  this->setPeriodVisibility(numPeriods > 1);
-}
-
 /**
  * Returns a list of periods and combinations chosen in UI
  * @returns :: list of periods e.g. "1", "3", "1+2-3+4", or "" if single-period
  */
 QStringList MuonFitDataSelector::getPeriodSelections() const {
-  QStringList checked;
-  if (m_ui.groupBoxPeriods->isVisible()) {
-    for (auto iter = m_periodBoxes.constBegin();
-         iter != m_periodBoxes.constEnd(); ++iter) {
-      if (iter.value()->isChecked()) {
-        checked.append(iter.key());
-      }
-    }
-
-    // combination
-    if (m_ui.chkCombine->isChecked()) {
-      QString combination = m_ui.txtFirst->text();
-      const auto second = m_ui.txtSecond->text();
-      if (!second.isEmpty()) {
-        combination.append("-").append(m_ui.txtSecond->text());
-      }
-      combination.replace(" ", "");
-      combination.replace(",", "+");
-      checked.append(combination);
-    }
-  } else {
-    // Single-period data
-    checked << "";
-  }
-  return checked;
+  return m_chosenPeriods;
 }
 
 /**
@@ -371,89 +208,7 @@ QStringList MuonFitDataSelector::getPeriodSelections() const {
  * @returns :: list of selected groups
  */
 QStringList MuonFitDataSelector::getChosenGroups() const {
-  QStringList chosen;
-  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
-       ++iter) {
-    if (iter.value()->isChecked()) {
-      chosen.append(iter.key());
-    }
-  }
-  return chosen;
-}
-/**
-* Clears the list of selected groups (unchecks boxes)
-*/
-void MuonFitDataSelector::clearChosenGroups() const {
-  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
-       ++iter) {
-    iter.value()->setChecked(false);
-  }
-}
-/**
- * Set the chosen group ticked and all others off
- * Used when switching from Home tab to Data Analysis tab
- * @param group :: [input] Name of group to select
- */
-void MuonFitDataSelector::setChosenGroup(const QString &group) {
-  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
-       ++iter) {
-    if (iter.key() == group) {
-      iter.value()->setChecked(true);
-    }
-  }
-}
-
-/**
- * Set the chosen period/combination ticked and all others off
- * Used when switching from Home tab to Data Analysis tab
- * @param period :: [input] Period string to set selected
- * (can be just one period or a combination)
- */
-void MuonFitDataSelector::setChosenPeriod(const QString &period) {
-  // Begin by unchecking everything
-  for (auto checkbox : m_periodBoxes) {
-    checkbox->setChecked(false);
-  }
-
-  // If single-period or all periods, string will be empty
-  if (period.isEmpty()) {
-    if (m_periodBoxes.size() == 1) { // single-period
-      setPeriodCombination(false);
-      m_periodBoxes.begin().value()->setChecked(true);
-    } else { // all periods selected
-      setPeriodCombination(true);
-      QString combination;
-      for (int i = 0; i < m_periodBoxes.count() - 1; i++) {
-        combination.append(QString::number(i + 1)).append(", ");
-      }
-      m_ui.txtFirst->setText(
-          combination.append(QString::number(m_periodBoxes.count())));
-      m_ui.txtSecond->clear();
-    }
-  } else {
-    // Test if period can be cast to int (just one period) or if it's a
-    // combination e.g. "1+2"
-    bool onePeriod(false);
-    /*const int chosenPeriod = */ period.toInt(&onePeriod);
-    if (onePeriod) {
-      // set just one
-      for (auto iter = m_periodBoxes.constBegin();
-           iter != m_periodBoxes.constEnd(); ++iter) {
-        if (iter.key() == period) {
-          iter.value()->setChecked(true);
-        }
-      }
-      setPeriodCombination(false);
-    } else {
-      // set the combination
-      QStringList parts = period.split('-');
-      if (parts.size() == 2) {
-        m_ui.txtFirst->setText(parts[0].replace("+", ", "));
-        m_ui.txtSecond->setText(parts[1].replace("+", ", "));
-        setPeriodCombination(true);
-      }
-    }
-  }
+  return m_chosenGroups;
 }
 
 /**
@@ -513,10 +268,9 @@ IMuonFitDataSelector::FitType MuonFitDataSelector::getFitType() const {
   // If radio buttons disabled, it's a single fit unless multiple groups/periods
   // chosen
   if (!m_ui.rbCoAdd->isEnabled()) {
-    const auto groups = getChosenGroups();
-    const auto periods = getPeriodSelections();
-    return groups.size() <= 1 && periods.size() <= 1 ? FitType::Single
-                                                     : FitType::Simultaneous;
+    return m_chosenGroups.size() <= 1 && m_chosenPeriods.size() <= 1
+               ? FitType::Single
+               : FitType::Simultaneous;
   } else {
     // which button is selected
     if (m_ui.rbCoAdd->isChecked()) {
@@ -545,33 +299,6 @@ void MuonFitDataSelector::setFitType(IMuonFitDataSelector::FitType type) {
   }
   checkForMultiGroupPeriodSelection();
 }
-
-/**
- * Check/uncheck period combination checkbox and set the textboxes
- * enabled/disabled
- * @param on :: [input] Turn on or off
- */
-void MuonFitDataSelector::setPeriodCombination(bool on) {
-  m_ui.chkCombine->setChecked(on);
-  m_ui.txtFirst->setEnabled(on);
-  m_ui.txtSecond->setEnabled(on);
-}
-
-/**
- * Slot: Keeps enabled/disabled state of textboxes in sync with checkbox
- * for period combination choices
- * @param state :: [input] New check state of box
- */
-void MuonFitDataSelector::periodCombinationStateChanged(int state) {
-  m_ui.txtFirst->setEnabled(state == Qt::Checked);
-  m_ui.txtSecond->setEnabled(state == Qt::Checked);
-  // If no text is set in the boxes, put something in there
-  if (m_ui.txtFirst->text().isEmpty() && m_ui.txtSecond->text().isEmpty()) {
-    m_ui.txtFirst->setText("1");
-  }
-  emit selectedPeriodsChanged();
-}
-
 /**
  * Return the instrument name currently set as the override
  * for the data selector
@@ -601,11 +328,6 @@ void MuonFitDataSelector::unsetBusyState() {
   disconnect(m_ui.runs, SIGNAL(fileInspectionFinished()), this,
              SLOT(unsetBusyState()));
   this->setCursor(Qt::ArrowCursor);
-  m_ui.groupBoxDataSelector->setEnabled(true);
-  m_ui.groupBoxGroups->setEnabled(true);
-  if (m_ui.groupBoxPeriods->isVisible()) {
-    m_ui.groupBoxPeriods->setEnabled(true);
-  }
 }
 
 /**
@@ -616,11 +338,6 @@ void MuonFitDataSelector::setBusyState() {
   connect(m_ui.runs, SIGNAL(fileInspectionFinished()), this,
           SLOT(unsetBusyState()));
   this->setCursor(Qt::WaitCursor);
-  m_ui.groupBoxDataSelector->setEnabled(false);
-  m_ui.groupBoxGroups->setEnabled(false);
-  if (m_ui.groupBoxPeriods->isVisible()) {
-    m_ui.groupBoxPeriods->setEnabled(false);
-  }
 }
 
 /**
@@ -644,9 +361,8 @@ void MuonFitDataSelector::setSimultaneousFitLabel(const QString &label) {
  * Called when groups/periods selection changes.
  */
 void MuonFitDataSelector::checkForMultiGroupPeriodSelection() {
-  const auto groups = getChosenGroups();
-  const auto periods = getPeriodSelections();
-  m_ui.txtSimFitLabel->setEnabled(groups.size() > 1 || periods.size() > 1 ||
+  m_ui.txtSimFitLabel->setEnabled(m_chosenGroups.size() > 1 ||
+                                  m_chosenPeriods.size() > 1 ||
                                   getFitType() == FitType::Simultaneous);
 }
 
@@ -672,7 +388,6 @@ QString MuonFitDataSelector::getDatasetName() const {
  */
 void MuonFitDataSelector::setDatasetNames(const QStringList &datasetNames) {
   const auto selectedName = m_ui.cbDataset->currentText();
-
   // Turn off signals while names are updated
   m_ui.cbDataset->blockSignals(true);
   m_ui.cbDataset->clear();
diff --git a/MantidQt/MantidWidgets/src/MuonFitPropertyBrowser.cpp b/MantidQt/MantidWidgets/src/MuonFitPropertyBrowser.cpp
index 9d82982a708e8cc5582afbbcf7d2528ba058b974..eb9e4728064c8258c28f537778c6cec3a86b1ef3 100644
--- a/MantidQt/MantidWidgets/src/MuonFitPropertyBrowser.cpp
+++ b/MantidQt/MantidWidgets/src/MuonFitPropertyBrowser.cpp
@@ -9,6 +9,8 @@
 #include "MantidKernel/VectorHelper.h"
 #include "MantidQtMantidWidgets/StringEditorFactory.h"
 
+#include "MantidQtMantidWidgets/MuonFitDataSelector.h"
+
 // Suppress a warning coming out of code that isn't ours
 #if defined(__INTEL_COMPILER)
 #pragma warning disable 1125
@@ -43,13 +45,18 @@
 #include <QSettings>
 #include <QMessageBox>
 #include <QAction>
+#include <QFormLayout>
+
 #include <QLayout>
 #include <QSplitter>
-#include <QMap>
 #include <QLabel>
 #include <QPushButton>
+
 #include <QMenu>
 #include <QSignalMapper>
+
+#include <QCheckBox>
+
 namespace {
 Mantid::Kernel::Logger g_log("MuonFitPropertyBrowser");
 }
@@ -87,6 +94,15 @@ void MuonFitPropertyBrowser::init() {
   // Seperates the data and the settings into two seperate categories
   settingsGroup = m_groupManager->addProperty("Data");
 
+  QSettings multiFitSettings;
+  multiFitSettings.beginGroup("");
+
+  /* Create function group */
+  QtProperty *multiFitSettingsGroup(NULL);
+
+  // Seperates the data and the settings into two seperate categories
+  multiFitSettingsGroup = m_groupManager->addProperty("Data");
+
   // Have slightly different names as requested by the muon scientists.
   m_startX =
       addDoubleProperty(QString("Start (%1s)").arg(QChar(0x03BC))); //(mu);
@@ -129,6 +145,42 @@ void MuonFitPropertyBrowser::init() {
   settingsGroup->addSubProperty(m_startX);
   settingsGroup->addSubProperty(m_endX);
   settingsGroup->addSubProperty(m_normalization);
+
+  // Disable "Browse" button - use case is that first run will always be the one
+  // selected on front tab. User will type in the runs they want rather than
+  // using the Browse button. (If they want to "Browse" they can use front tab).
+
+  multiFitSettingsGroup->addSubProperty(m_startX);
+  multiFitSettingsGroup->addSubProperty(m_endX);
+  m_groupsToFit = m_enumManager->addProperty("Groups/Pairs to fit");
+  m_groupsToFitOptions << "All groups"
+                       << "All Pairs"
+                       << "Custom";
+  m_showGroupValue << "groups";
+  m_showGroup = m_enumManager->addProperty("Selected Groups");
+  m_enumManager->setEnumNames(m_groupsToFit, m_groupsToFitOptions);
+  multiFitSettingsGroup->addSubProperty(m_groupsToFit);
+  multiFitSettingsGroup->addSubProperty(m_showGroup);
+
+  m_enumManager->setEnumNames(m_showGroup, m_showGroupValue);
+  QString tmp = "fwd";
+  addGroupCheckbox(tmp);
+  tmp = "bwd";
+  addGroupCheckbox(tmp);
+  m_periodsToFit = m_enumManager->addProperty("Periods to fit");
+  m_periodsToFitOptions << "1"
+                        << "2"
+                        << "Custom";
+  m_showPeriodValue << "1";
+  m_showPeriods = m_enumManager->addProperty("Selected Periods");
+  m_enumManager->setEnumNames(m_periodsToFit, m_periodsToFitOptions);
+  multiFitSettingsGroup->addSubProperty(m_periodsToFit);
+  multiFitSettingsGroup->addSubProperty(m_showPeriods);
+  m_enumManager->setEnumNames(m_showPeriods, m_showPeriodValue);
+
+  connect(m_browser, SIGNAL(currentItemChanged(QtBrowserItem *)), this,
+          SLOT(currentItemChanged(QtBrowserItem *)));
+
   /* Create editors and assign them to the managers */
   createEditors(w);
 
@@ -136,11 +188,35 @@ void MuonFitPropertyBrowser::init() {
 
   m_functionsGroup = m_browser->addProperty(functionsGroup);
   m_settingsGroup = m_browser->addProperty(settingsGroup);
+  m_multiFitSettingsGroup = m_browser->addProperty(multiFitSettingsGroup);
+
+  m_btnGroup = new QGroupBox(tr("Reselect Data"));
+  QHBoxLayout *btnLayout = new QHBoxLayout;
+  m_reselectGroupBtn = new QPushButton("Groups/Pairs");
+  m_reselectPeriodBtn = new QPushButton("Periods");
+  m_generateBtn = new QPushButton("Combine Periods");
+  m_groupWindow = new QDialog(this);
+  m_periodWindow = new QDialog(this);
+  m_comboWindow = new QDialog(this);
+
+  m_reselectGroupBtn->setEnabled(false);
+  m_reselectPeriodBtn->setEnabled(false);
+  connect(m_reselectGroupBtn, SIGNAL(released()), this,
+          SLOT(groupBtnPressed()));
+  connect(m_reselectPeriodBtn, SIGNAL(released()), this,
+          SLOT(periodBtnPressed()));
+  connect(m_generateBtn, SIGNAL(released()), this, SLOT(generateBtnPressed()));
+
+  btnLayout->addWidget(m_reselectGroupBtn);
+  btnLayout->addWidget(m_reselectPeriodBtn);
+  btnLayout->addWidget(m_generateBtn);
+
+  m_btnGroup->setLayout(btnLayout);
 
   // Don't show "Function" or "Data" sections as they have separate widgets
   m_browser->setItemVisible(m_functionsGroup, false);
   m_browser->setItemVisible(m_settingsGroup, false);
-
+  m_browser->setItemVisible(m_multiFitSettingsGroup, true);
   // Custom settings that are specific and asked for by the muon scientists.
   QtProperty *customSettingsGroup = m_groupManager->addProperty("Settings");
 
@@ -184,9 +260,11 @@ void MuonFitPropertyBrowser::init() {
     const int index = parentLayout->count() - 1;
     constexpr int stretchFactor = 10; // so these widgets get any extra space
     parentLayout->insertWidget(index, m_mainSplitter, stretchFactor);
+
     parentLayout->setSpacing(0);
     parentLayout->setMargin(0);
     parentLayout->setContentsMargins(0, 0, 0, 0);
+    parentLayout->insertWidget(index + 1, m_btnGroup);
   }
   // Update tooltips when function structure is (or might've been) changed in
   // any way
@@ -200,6 +278,12 @@ void MuonFitPropertyBrowser::executeFitMenu(const QString &item) {
     FitPropertyBrowser::executeFitMenu(item);
   }
 }
+// Create group/pair selection pop up
+void MuonFitPropertyBrowser::groupBtnPressed() { genGroupWindow(); }
+// Create period selection pop up
+void MuonFitPropertyBrowser::periodBtnPressed() { genPeriodWindow(); }
+// Create combination selection pop up
+void MuonFitPropertyBrowser::generateBtnPressed() { genCombinePeriodWindow(); }
 /**
 pulate the fit button.
 * This initialization includes:
@@ -225,7 +309,6 @@ void MuonFitPropertyBrowser::setFitEnabled(bool yes) {
   m_fitActionSeqFit->setEnabled(yes);
   m_fitActionTFAsymm->setEnabled(yes);
 }
-
 /**
 * Set the input workspace name
 */
@@ -240,7 +323,79 @@ void MuonFitPropertyBrowser::setWorkspaceName(const QString &wsName) {
   if (i >= 0)
     m_enumManager->setValue(m_workspace, i);
 }
-
+/** Called when a dropdown menu is changed
+* @param prop :: A pointer to the function name property
+*/
+void MuonFitPropertyBrowser::enumChanged(QtProperty *prop) {
+  if (!m_changeSlotsEnabled)
+    return;
+  if (prop == m_groupsToFit) {
+    int j = m_enumManager->value(m_groupsToFit);
+    std::string option = m_groupsToFitOptions[j].toStdString();
+
+    if (option == "All groups") {
+      setAllGroups();
+      m_reselectGroupBtn->setEnabled(false);
+    } else if (option == "All Pairs") {
+      setAllPairs();
+      m_reselectGroupBtn->setEnabled(false);
+    } else if (option == "Custom") {
+      m_reselectGroupBtn->setEnabled(true);
+      genGroupWindow();
+    }
+    updateGroupDisplay();
+
+  } else if (prop == m_periodsToFit) {
+    int j = m_enumManager->value(m_periodsToFit);
+    std::string option = m_periodsToFitOptions[j].toStdString();
+    if (option == "Custom") {
+      m_reselectPeriodBtn->setEnabled(true);
+      genPeriodWindow();
+    } else {
+      for (auto iter = m_periodBoxes.constBegin();
+           iter != m_periodBoxes.constEnd(); ++iter) {
+        if (option == iter.key().toStdString()) {
+          m_boolManager->setValue(iter.value(), true);
+        } else {
+          m_boolManager->setValue(iter.value(), false);
+        }
+        m_reselectPeriodBtn->setEnabled(false);
+      }
+    }
+    updatePeriodDisplay();
+  } else if (prop == m_workspace) {
+    // make sure the output is updated
+    FitPropertyBrowser::enumChanged(prop);
+    int j = m_enumManager->value(m_workspace);
+    std::string option = m_workspaceNames[j].toStdString();
+    setOutputName(option);
+  } else {
+    FitPropertyBrowser::enumChanged(prop);
+  }
+}
+/** Sets the display for
+* selected groups
+*/
+void MuonFitPropertyBrowser::updateGroupDisplay() {
+  m_showGroupValue.clear();
+  auto tmp = getChosenGroups().join(",").toStdString();
+  m_showGroupValue << getChosenGroups().join(",");
+  m_enumManager->setEnumNames(m_showGroup, m_showGroupValue);
+  m_multiFitSettingsGroup->property()->addSubProperty(m_showGroup);
+}
+/** Sets the display for
+* selected periods
+*/
+void MuonFitPropertyBrowser::updatePeriodDisplay() {
+  m_showPeriodValue.clear();
+  auto tmp = getChosenPeriods();
+  tmp.replaceInStrings(QRegExp(","), "+");
+  m_showPeriodValue << tmp.join(",");
+  m_enumManager->setEnumNames(m_showPeriods, m_showPeriodValue);
+  if (m_periodsToFitOptions.size() > 1) {
+    m_multiFitSettingsGroup->property()->addSubProperty(m_showPeriods);
+  }
+}
 /** Called when a double property changed
  * @param prop :: A pointer to the property
  */
@@ -303,8 +458,31 @@ void MuonFitPropertyBrowser::boolChanged(QtProperty *prop) {
     const bool val = m_boolManager->value(prop);
     emit fitRawDataClicked(val);
   } else {
-    // defer to parent class
-    FitPropertyBrowser::boolChanged(prop);
+    // search map for group/pair change
+    bool done = false;
+    for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+         ++iter) {
+      if (iter.value() == prop) {
+        done = true;
+        updateGroupDisplay();
+        emit groupBoxClicked();
+      }
+    }
+    // search map for period change
+    if (done == false) {
+      for (auto iter = m_periodBoxes.constBegin();
+           iter != m_periodBoxes.constEnd(); ++iter) {
+        if (iter.value() == prop) {
+          done = true;
+          updatePeriodDisplay();
+          emit periodBoxClicked();
+        }
+      }
+    }
+    if (done == false) {
+      // defer to parent class
+      FitPropertyBrowser::boolChanged(prop);
+    }
   }
 }
 
@@ -522,7 +700,6 @@ void MuonFitPropertyBrowser::runFit() {
     alg->setProperty("WorkspaceIndex", workspaceIndex());
     alg->setProperty("StartX", startX());
     alg->setProperty("EndX", endX());
-    alg->setPropertyValue("Output", outputName());
     alg->setPropertyValue("Minimizer", minimizer());
     alg->setPropertyValue("CostFunction", costFunction());
 
@@ -541,7 +718,10 @@ void MuonFitPropertyBrowser::runFit() {
         alg->setProperty("StartX_" + suffix, startX());
         alg->setProperty("EndX_" + suffix, endX());
       }
+    } else {
+      setSingleFitLabel(wsName);
     }
+    alg->setPropertyValue("Output", outputName());
 
     observeFinish(alg);
     alg->executeAsync();
@@ -742,11 +922,20 @@ std::string MuonFitPropertyBrowser::outputName() const {
 void MuonFitPropertyBrowser::setMultiFittingMode(bool enabled) {
   // First, clear whatever model is currently set
   this->clear();
-
+  modifyFitMenu(m_fitActionEvaluate, !enabled);
+  modifyFitMenu(m_fitActionSeqFit, !enabled);
+  // set default selection (all groups)
+  if (enabled) {
+    setAllGroups();
+  } else { // clear current selection
+    clearChosenGroups();
+    clearChosenPeriods();
+  }
   // Show or hide "Function" and "Data" sections
   m_browser->setItemVisible(m_functionsGroup, !enabled);
   m_browser->setItemVisible(m_settingsGroup, !enabled);
-
+  m_browser->setItemVisible(m_multiFitSettingsGroup, enabled);
+  m_btnGroup->setVisible(enabled);
   // Show or hide additional widgets
   for (int i = 0; i < m_widgetSplitter->count(); ++i) {
     if (auto *widget = m_widgetSplitter->widget(i)) {
@@ -762,8 +951,6 @@ void MuonFitPropertyBrowser::setMultiFittingMode(bool enabled) {
 * @param enabled :: [input] Whether to turn this mode on or off
 */
 void MuonFitPropertyBrowser::setTFAsymmMode(bool enabled) {
-  // First, clear whatever model is currently set
-  this->clear();
   modifyFitMenu(m_fitActionTFAsymm, enabled);
 
   // Show or hide the TFAsymmetry fit
@@ -796,6 +983,382 @@ bool MuonFitPropertyBrowser::hasGuess() const {
     return false;
   }
 }
+/**
+* Sets group names and updates checkboxes on UI
+* By default sets all unchecked
+* @param groups :: [input] List of group names
+*/
+void MuonFitPropertyBrowser::setAvailableGroups(const QStringList &groups) {
+
+  m_enumManager->setValue(m_groupsToFit, 0);
+  // If it's the same list, do nothing
+  if (groups.size() == m_groupBoxes.size()) {
+    auto existingGroups = m_groupBoxes.keys();
+    auto newGroups = groups;
+    qSort(existingGroups);
+    qSort(newGroups);
+    if (existingGroups == newGroups) {
+      return;
+    }
+  }
+
+  clearGroupCheckboxes();
+  QSettings settings;
+  for (const auto group : groups) {
+    addGroupCheckbox(group);
+  }
+}
+/**
+* Selects a single group/pair
+* @param group :: [input] Group/pair to select
+*/
+void MuonFitPropertyBrowser::setChosenGroup(QString &group) {
+  clearChosenGroups();
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    if (iter.key() == group) {
+      m_boolManager->setValue(iter.value(), true);
+    }
+  }
+}
+/**
+* Clears all group names and checkboxes
+* (ready to add new ones)
+*/
+void MuonFitPropertyBrowser::clearGroupCheckboxes() {
+  for (const auto &checkbox : m_groupBoxes) {
+    delete (checkbox);
+  }
+  m_groupBoxes.clear();
+}
+/**
+* Add a new checkbox to the list of groups with given name
+* The new checkbox is checked according to dropdown menu selection
+* @param name :: [input] Name of group to add
+*/
+void MuonFitPropertyBrowser::addGroupCheckbox(const QString &name) {
+  m_groupBoxes.insert(name, m_boolManager->addProperty(name));
+  int j = m_enumManager->value(m_groupsToFit);
+  auto option = m_groupsToFitOptions[j].toStdString();
+  if (option == "All groups") {
+    setAllGroups();
+  } else if (option == "All Pairs") {
+    setAllPairs();
+  }
+}
+/**
+* Returns a list of the selected groups (checked boxes)
+* @returns :: list of selected groups
+*/
+QStringList MuonFitPropertyBrowser::getChosenGroups() const {
+  QStringList chosen;
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    if (m_boolManager->value(iter.value()) == true) {
+      chosen.append(iter.key());
+    }
+  }
+  return chosen;
+}
+/**
+* Clears the list of selected groups (unchecks boxes)
+*/
+void MuonFitPropertyBrowser::clearChosenGroups() const {
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    m_boolManager->setValue(iter.value(), false);
+  }
+}
+
+/**
+* Selects all groups
+*/
+void MuonFitPropertyBrowser::setAllGroups() {
+
+  clearChosenGroups();
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    for (auto group : m_groupsList) {
+      if (iter.key().toStdString() == group) {
+        m_boolManager->setValue(iter.value(), true);
+      }
+    }
+  }
+}
+/*
+* Sets all pairs
+*/
+void MuonFitPropertyBrowser::setAllPairs() {
+  clearChosenGroups();
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    bool isItGroup = false;
+    for (auto group : m_groupsList) {
+      if (iter.key().toStdString() == group) {
+        isItGroup = true;
+      }
+    }
+    if (!isItGroup) {
+      m_boolManager->setValue(iter.value(), true);
+    }
+  }
+}
+
+/*
+* Create a popup window to select a custom
+* selection of groups/pairs
+*/
+void MuonFitPropertyBrowser::genGroupWindow() {
+
+  QtGroupPropertyManager *groupManager =
+      new QtGroupPropertyManager(m_groupWindow);
+  QVBoxLayout *layout = new QVBoxLayout(m_groupWindow);
+  QtTreePropertyBrowser *groupBrowser = new QtTreePropertyBrowser();
+  QtProperty *groupSettings = groupManager->addProperty("Group/Pair selection");
+  for (auto iter = m_groupBoxes.constBegin(); iter != m_groupBoxes.constEnd();
+       ++iter) {
+    groupSettings->addSubProperty(m_groupBoxes.value(iter.key()));
+    m_boolManager->setValue(iter.value(), m_boolManager->value(iter.value()));
+  }
+  QtCheckBoxFactory *checkBoxFactory = new QtCheckBoxFactory(m_groupWindow);
+  groupBrowser->setFactoryForManager(m_boolManager, checkBoxFactory);
+  groupBrowser->addProperty(groupSettings);
+  layout->addWidget(groupBrowser);
+  m_groupWindow->setLayout(layout);
+  m_groupWindow->show();
+}
+/**
+* Sets checkboxes for periods
+* @param numPeriods :: [input] Number of periods
+*/
+void MuonFitPropertyBrowser::setNumPeriods(size_t numPeriods) {
+  m_periodsToFitOptions.clear();
+  // create more boxes
+  for (size_t i = 0; i != numPeriods; i++) {
+    QString name = QString::number(i + 1);
+    addPeriodCheckbox(name);
+  }
+  if (m_periodsToFitOptions.size() == 1) {
+    m_generateBtn->setDisabled(true);
+    m_multiFitSettingsGroup->property()->removeSubProperty(m_periodsToFit);
+    m_multiFitSettingsGroup->property()->removeSubProperty(m_showPeriods);
+    m_enumManager->setValue(m_periodsToFit, 0);
+    clearChosenPeriods();
+    m_boolManager->setValue(m_periodBoxes.constBegin().value(), true);
+  } else {
+    // add custom back into list
+    m_multiFitSettingsGroup->property()->insertSubProperty(m_periodsToFit,
+                                                           m_showGroup);
+    m_multiFitSettingsGroup->property()->addSubProperty(m_showPeriods);
+    m_generateBtn->setDisabled(false);
+
+    m_periodsToFitOptions << "Custom";
+    m_enumManager->setEnumNames(m_periodsToFit, m_periodsToFitOptions);
+  }
+}
+/**
+* Sets period names and updates checkboxes on UI
+* By default sets all unchecked
+* @param periods :: [input] List of period names
+*/
+void MuonFitPropertyBrowser::setAvailablePeriods(const QStringList &periods) {
+  // If it's the same list, do nothing
+  if (periods.size() == m_periodBoxes.size()) {
+    auto existingGroups = m_periodBoxes.keys();
+    auto newGroups = periods;
+    qSort(existingGroups);
+    qSort(newGroups);
+    if (existingGroups == newGroups) {
+      return;
+    }
+  }
+
+  clearPeriodCheckboxes();
+
+  for (const auto group : periods) {
+    addPeriodCheckbox(group);
+  }
+}
+/**
+* Clears all pair names and checkboxes
+* (ready to add new ones)
+*/
+void MuonFitPropertyBrowser::clearPeriodCheckboxes() {
+  if (m_periodBoxes.size() > 1) {
+    for (auto iter = m_periodBoxes.constBegin();
+         iter != m_periodBoxes.constEnd(); ++iter) {
+      if (iter != m_periodBoxes.constBegin()) {
+        delete (iter);
+      }
+    }
+  }
+  m_periodsToFitOptions.clear();
+  m_periodsToFitOptions << "1";
+  m_enumManager->setEnumNames(m_periodsToFit, m_periodsToFitOptions);
+}
+/**
+* Clears the list of selected groups (unchecks boxes)
+*/
+void MuonFitPropertyBrowser::clearChosenPeriods() const {
+  for (auto iter = m_periodBoxes.constBegin(); iter != m_periodBoxes.constEnd();
+       ++iter) {
+    m_boolManager->setValue(iter.value(), false);
+  }
+}
+/**
+* Add a new checkbox to the list of periods with given name
+* The new checkbox is unchecked by default
+* @param name :: [input] Name of period to add
+*/
+void MuonFitPropertyBrowser::addPeriodCheckbox(const QString &name) {
+  m_periodBoxes.insert(name, m_boolManager->addProperty(name));
+  int j = m_enumManager->value(m_periodsToFit);
+
+  // add new period to list will go after inital list
+  m_periodsToFitOptions << name;
+  auto active = getChosenPeriods();
+  m_enumManager->setEnumNames(m_periodsToFit, m_periodsToFitOptions);
+  setChosenPeriods(active);
+  m_enumManager->setValue(m_periodsToFit, j);
+}
+/**
+* Returns a list of the selected periods (checked boxes)
+* @returns :: list of selected periods
+*/
+QStringList MuonFitPropertyBrowser::getChosenPeriods() const {
+  QStringList chosen;
+  // if single period
+  if (m_periodsToFitOptions.size() == 1) {
+    chosen << "";
+  } else {
+    for (auto iter = m_periodBoxes.constBegin();
+         iter != m_periodBoxes.constEnd(); ++iter) {
+      if (m_boolManager->value(iter.value()) == true) {
+        chosen.append(iter.key());
+      }
+    }
+  }
+  return chosen;
+}
+/**
+* Ticks the selected periods
+* @param chosenPeriods :: list of selected periods
+*/
+void MuonFitPropertyBrowser::setChosenPeriods(
+    const QStringList &chosenPeriods) {
+  clearChosenPeriods();
+  for (auto selected : chosenPeriods) {
+    for (auto iter = m_periodBoxes.constBegin();
+         iter != m_periodBoxes.constEnd(); ++iter) {
+      auto tmp = iter.key();
+      if (iter.key() == selected) {
+        m_boolManager->setValue(iter.value(), true);
+      }
+    }
+  }
+}
+/**
+* Ticks the selected periods
+* @param period :: selected periods
+*/
+void MuonFitPropertyBrowser::setChosenPeriods(const QString &period) {
+  clearChosenPeriods();
+  for (auto iter = m_periodBoxes.constBegin(); iter != m_periodBoxes.constEnd();
+       ++iter) {
+    auto tmp = iter.key();
+    if (iter.key() == period) {
+      m_boolManager->setValue(iter.value(), true);
+    }
+  }
+}
+/*
+* Create a pop up window to select a custom
+* selection of periods
+*/
+void MuonFitPropertyBrowser::genPeriodWindow() {
+  QtGroupPropertyManager *groupManager =
+      new QtGroupPropertyManager(m_periodWindow);
+  QVBoxLayout *layout = new QVBoxLayout(m_periodWindow);
+  QtTreePropertyBrowser *groupBrowser = new QtTreePropertyBrowser();
+  QtProperty *groupSettings = groupManager->addProperty("Period selection");
+  for (auto iter = m_periodBoxes.constBegin(); iter != m_periodBoxes.constEnd();
+       ++iter) {
+    groupSettings->addSubProperty(m_periodBoxes.value(iter.key()));
+    m_boolManager->setValue(iter.value(), m_boolManager->value(iter.value()));
+  }
+  QtCheckBoxFactory *checkBoxFactory = new QtCheckBoxFactory(m_periodWindow);
+  groupBrowser->setFactoryForManager(m_boolManager, checkBoxFactory);
+  groupBrowser->addProperty(groupSettings);
+  layout->addWidget(groupBrowser);
+  m_periodWindow->setLayout(layout);
+  m_periodWindow->show();
+}
+/*
+* Create a pop up window to create
+* a combination of periods
+*/
+void MuonFitPropertyBrowser::genCombinePeriodWindow() {
+  QVBoxLayout *layout = new QVBoxLayout(m_comboWindow);
+  QFormLayout *formLayout = new QFormLayout;
+  m_positiveCombo = new QLineEdit();
+  m_negativeCombo = new QLineEdit();
+  formLayout->addRow(new QLabel(tr("Combine:")), m_positiveCombo);
+  formLayout->addRow(new QLabel(tr("   -    ")), m_negativeCombo);
+  layout->addLayout(formLayout);
+
+  QPushButton *applyBtn = new QPushButton("Apply");
+
+  connect(applyBtn, SIGNAL(released()), this, SLOT(combineBtnPressed()));
+
+  layout->addWidget(applyBtn);
+  m_comboWindow->setLayout(layout);
+  m_comboWindow->show();
+}
+/*
+* Get the positive and negative parts of the
+* combination of periods and produce a new
+* tick box. Unticked by default.
+*/
+void MuonFitPropertyBrowser::combineBtnPressed() {
+  QString value = m_positiveCombo->text();
+  if (value.isEmpty()) {
+    g_log.error("There are no positive periods (top box)");
+    return;
+  }
+  if (!m_negativeCombo->text().isEmpty()) {
+    value.append("-").append(m_negativeCombo->text());
+  }
+  m_positiveCombo->clear();
+  m_negativeCombo->clear();
+  addPeriodCheckbox(value);
+}
+/**
+* sets the label for a single fit and
+* selects the relevant group/pair
+* @param name :: string of the ws
+*/
+void MuonFitPropertyBrowser::setSingleFitLabel(std::string name) {
+  clearChosenGroups();
+  clearChosenPeriods();
+  std::vector<std::string> splitName;
+  std::string tmpName = name;
+  boost::erase_all(tmpName, " ");
+  boost::split(splitName, tmpName, boost::is_any_of(";"));
+  // set single group/pair
+  QString group = QString::fromUtf8(splitName[2].c_str());
+  setChosenGroup(group);
+  // set period if available
+  if (splitName.size() == 6) {
+    QString period = QString::fromUtf8(splitName[4].c_str());
+    setChosenPeriods(period);
+  }
+  setOutputName(name);
+  // for single fit in multi fit mode
+  if (m_browser->isItemVisible(m_multiFitSettingsGroup)) {
+    updateGroupDisplay();
+    updatePeriodDisplay();
+  }
+}
 
 } // MantidQt
 } // API
diff --git a/MantidQt/MantidWidgets/src/pqHelpWindow.cxx b/MantidQt/MantidWidgets/src/pqHelpWindow.cxx
index 8e0bd48e30d88dae1e6920d7e3db2dfcf1e5d042..c93d46055507054123f222ed6dfb6d8f924ed4be 100644
--- a/MantidQt/MantidWidgets/src/pqHelpWindow.cxx
+++ b/MantidQt/MantidWidgets/src/pqHelpWindow.cxx
@@ -53,7 +53,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 #include <QUrl>
 #include <QWebHistory>
 #include <QWebView>
-#include <iostream>
 
 using MantidQt::API::MantidDesktopServices;
 
@@ -305,7 +304,7 @@ void pqHelpWindow::errorMissingPage(const QUrl& url)
 //-----------------------------------------------------------------------------
 void pqHelpWindow::showPage(const QString& url)
 {
-  this->showPage(QUrl(url));
+  this->showPage(QUrl::fromUserInput(url));
 }
 
 //-----------------------------------------------------------------------------
diff --git a/MantidQt/SliceViewer/src/LineViewer.cpp b/MantidQt/SliceViewer/src/LineViewer.cpp
index 3b7d28000928484e72d73d69fffbbe82844d12f2..096773b0bc82799134d17fab47578233b0a8e04f 100644
--- a/MantidQt/SliceViewer/src/LineViewer.cpp
+++ b/MantidQt/SliceViewer/src/LineViewer.cpp
@@ -1027,7 +1027,7 @@ void LineViewer::setupScaleEngine(MantidQwtWorkspaceData &curveData) {
 
   if (m_lineOptions->isLogScaledY()) {
     engine = new QwtLog10ScaleEngine();
-    curveData.saveLowestPositiveValue(from);
+    curveData.setMinimumPositiveValue(from);
   } else {
     engine = new QwtLinearScaleEngine();
   }
diff --git a/MantidQt/SliceViewer/src/SliceViewer.cpp b/MantidQt/SliceViewer/src/SliceViewer.cpp
index 27c68cffc80119d4a12ee34f33082b6b324bb8aa..179aedc16025b4ad06c596aa15aba6d6d4ef4c3a 100644
--- a/MantidQt/SliceViewer/src/SliceViewer.cpp
+++ b/MantidQt/SliceViewer/src/SliceViewer.cpp
@@ -2996,6 +2996,7 @@ void SliceViewer::applyOrthogonalAxisScaleDraw() {
   auto *axis1 = new QwtScaleDraw();
   m_plot->setAxisScaleDraw(QwtPlot::xBottom, axis0);
   m_plot->setAxisScaleDraw(QwtPlot::yLeft, axis1);
+  this->updateDisplay();
 }
 
 } // namespace
diff --git a/MantidQt/SpectrumViewer/src/GraphDisplay.cpp b/MantidQt/SpectrumViewer/src/GraphDisplay.cpp
index a56e9cb5d0400114433d8af0efb3a75b050eca57..66ce72ba07f9214f0db8b3fa36b3c6bf9675657f 100644
--- a/MantidQt/SpectrumViewer/src/GraphDisplay.cpp
+++ b/MantidQt/SpectrumViewer/src/GraphDisplay.cpp
@@ -1,12 +1,14 @@
+#include "MantidQtSpectrumViewer/GraphDisplay.h"
+
+#include "MantidQtSpectrumViewer/QtUtils.h"
+#include "MantidQtSpectrumViewer/SVUtils.h"
+
+#include <boost/algorithm/clamp.hpp>
 #include <QtGui>
 #include <QVector>
 #include <QString>
 #include <qwt_scale_engine.h>
 
-#include "MantidQtSpectrumViewer/GraphDisplay.h"
-#include "MantidQtSpectrumViewer/QtUtils.h"
-#include "MantidQtSpectrumViewer/SVUtils.h"
-
 namespace MantidQt {
 namespace SpectrumView {
 
@@ -132,14 +134,32 @@ void GraphDisplay::clear() {
 void GraphDisplay::setRangeScale(double rangeScale) {
   m_rangeScale = rangeScale;
 
+  // A helper function to limit min and max to finite values.
+  auto clampRange = [](double &min, double &max) {
+    const double low = std::numeric_limits<double>::lowest();
+    const double high = std::numeric_limits<double>::max();
+    min = boost::algorithm::clamp(min, low, high, std::less_equal<double>());
+    max = boost::algorithm::clamp(max, low, high, std::less_equal<double>());
+  };
+
   if (m_isVertical) {
+    double axis_min = m_minX;
     double axis_max = m_rangeScale * (m_maxX - m_minX) + m_minX;
-    m_graphPlot->setAxisScale(QwtPlot::xBottom, m_minX, axis_max);
-    m_graphPlot->setAxisScale(QwtPlot::yLeft, m_minY, m_maxY);
+    clampRange(axis_min, axis_max);
+    m_graphPlot->setAxisScale(QwtPlot::xBottom, axis_min, axis_max);
+    axis_min = m_minY;
+    axis_max = m_maxY;
+    clampRange(axis_min, axis_max);
+    m_graphPlot->setAxisScale(QwtPlot::yLeft, axis_min, axis_max);
   } else {
+    double axis_min = m_minY;
     double axis_max = m_rangeScale * (m_maxY - m_minY) + m_minY;
-    m_graphPlot->setAxisScale(QwtPlot::yLeft, m_minY, axis_max);
-    m_graphPlot->setAxisScale(QwtPlot::xBottom, m_minX, m_maxX);
+    clampRange(axis_min, axis_max);
+    m_graphPlot->setAxisScale(QwtPlot::yLeft, axis_min, axis_max);
+    axis_min = m_minX;
+    axis_max = m_maxX;
+    clampRange(axis_min, axis_max);
+    m_graphPlot->setAxisScale(QwtPlot::xBottom, axis_min, axis_max);
   }
   m_graphPlot->replot();
 }
diff --git a/MantidQt/SpectrumViewer/src/MatrixWSDataSource.cpp b/MantidQt/SpectrumViewer/src/MatrixWSDataSource.cpp
index 95b0715007ebbb004df85c939aef291860585c82..8ccec01cea134afa0bb07c8b0233c33f608b610b 100644
--- a/MantidQt/SpectrumViewer/src/MatrixWSDataSource.cpp
+++ b/MantidQt/SpectrumViewer/src/MatrixWSDataSource.cpp
@@ -172,7 +172,7 @@ DataArray_const_sptr MatrixWSDataSource::getDataArray(double xMin, double xMax,
   MantidVec err;
   yVals.resize(numCols);
   err.resize(numCols);
-  size_t index = 0;
+  auto newDataIter = newData.begin();
   for (size_t i = 0; i < numRows; i++) {
     double midY = yMin + ((double)i + 0.5) * yStep;
     SVUtils::Interpolate(m_totalYMin, m_totalYMax, midY, 0.0,
@@ -184,10 +184,9 @@ DataArray_const_sptr MatrixWSDataSource::getDataArray(double xMin, double xMax,
     err.resize(numCols, 0);
 
     m_matWs->generateHistogram(sourceRow, xScale, yVals, err, true);
-    for (size_t col = 0; col < numCols; col++) {
-      newData[index] = (float)yVals[col];
-      index++;
-    }
+    newDataIter =
+        std::transform(yVals.cbegin(), yVals.cend(), newDataIter,
+                       [](const double y) { return static_cast<float>(y); });
   }
 
   // The calling code is responsible for deleting the DataArray when it is done
diff --git a/MantidQt/SpectrumViewer/src/SVUtils.cpp b/MantidQt/SpectrumViewer/src/SVUtils.cpp
index d2b5bd223beb65eb16c14aa27f545bd45dad5f39..0817ac0d3a8b7658ae23204253f11a279ea7d2b0 100644
--- a/MantidQt/SpectrumViewer/src/SVUtils.cpp
+++ b/MantidQt/SpectrumViewer/src/SVUtils.cpp
@@ -96,20 +96,17 @@ bool SVUtils::FindValidInterval(double &min, double &max) {
   if (max == min) // adjust values so they are not equal
   {
     valuesOK = false;
-    if (min == 0) {
-      min = -1, max = 1;
+    if (min == 0.0) {
+      min = -1.0;
+      max = 1.0;
     } else {
-      max = 1.1 * max;
-      min = 0.9 * min;
+      min *= 0.9;
+      max *= 1.1;
     }
-  }
-
-  if (min > max) // fix the order
+  } else if (min > max) // fix the order
   {
     valuesOK = false;
-    double temp = min;
-    min = max;
-    max = temp;
+    std::swap(min, max);
   }
 
   return valuesOK;
diff --git a/Testing/Data/SystemTest/35991-foc-h00.nxs.md5 b/Testing/Data/SystemTest/35991-foc-h00.nxs.md5
index f3deaffca81a1e7556dee7d38190d0b6bfebda84..36bbeba4eaa8c8e6cecb1b0afdeda5d0f4b71279 100644
--- a/Testing/Data/SystemTest/35991-foc-h00.nxs.md5
+++ b/Testing/Data/SystemTest/35991-foc-h00.nxs.md5
@@ -1 +1 @@
-265a7b2885316076a74893cfd685ed32
+871339d7b852e618f9943d79e0127e4e
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140678.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140678.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..0cdc6e0f6cac2c4c054468f00f5bf09f741888a7
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140678.nxs.md5
@@ -0,0 +1 @@
+0eea7509924a5e4912e6685638af2467
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140679.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140679.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..a56d272612db670ee083533ff812ead2da4ad6b6
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140679.nxs.md5
@@ -0,0 +1 @@
+141fbb9be2b046e6d5a7eb73f0c3517c
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140680.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140680.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..d4ec7c941c4538e27e52fc943d568bc2bbed26dd
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140680.nxs.md5
@@ -0,0 +1 @@
+80d8baadfe75b444957c96671f526199
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140681.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140681.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..435035e7b691f7025218b605797fd62543041857
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140681.nxs.md5
@@ -0,0 +1 @@
+f9e8990b52a9b342c4f259e2c3b0e816
diff --git a/Testing/Data/SystemTest/ILL/IN16B/140682.nxs.md5 b/Testing/Data/SystemTest/ILL/IN16B/140682.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..fa052096d2d1cbca40924997deb3874521b3059e
--- /dev/null
+++ b/Testing/Data/SystemTest/ILL/IN16B/140682.nxs.md5
@@ -0,0 +1 @@
+d3f95a48b4d1faaf51aa2f809fe87cd0
diff --git a/Testing/Data/SystemTest/SANS2D/DIRECT_SANS2D_REAR_34327_4m_8mm_16Feb16.txt.md5 b/Testing/Data/SystemTest/SANS2D/DIRECT_SANS2D_REAR_34327_4m_8mm_16Feb16.txt.md5
new file mode 100644
index 0000000000000000000000000000000000000000..5aa700540e452da26eb6b16e61287f25b2bba832
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/DIRECT_SANS2D_REAR_34327_4m_8mm_16Feb16.txt.md5
@@ -0,0 +1 @@
+d64495831325a63e1b961776a8544599
diff --git a/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_REAR_module2_tube12.xml.md5 b/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_REAR_module2_tube12.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..6f29bdcd62556866a4ed44fc62f529c15a604af6
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_REAR_module2_tube12.xml.md5
@@ -0,0 +1 @@
+d205f4893ef943234071195b6ed98bed
diff --git a/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_beam_stop_4m_x_100mm_2July2015_medium_beamstop.xml.md5 b/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_beam_stop_4m_x_100mm_2July2015_medium_beamstop.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..b053d46750d5e0c1fdcc6a70c780472d7e0f68f6
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/MASK_SANS2D_beam_stop_4m_x_100mm_2July2015_medium_beamstop.xml.md5
@@ -0,0 +1 @@
+e7a82ba82ddcf91cacbecd1b603fffbe
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034461.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034461.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..724677275ff3a42eb1b5538e4f1f28c08abc553e
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034461.nxs.md5
@@ -0,0 +1 @@
+02e693a2b832c1ea259c18239bd7cd47
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034481.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034481.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..1f2c5cb910d7a9c6ef7255e28e1c8633d530ae23
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034481.nxs.md5
@@ -0,0 +1 @@
+64c1cbe245360f5990909f2b7d4cc123
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034484.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034484.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..62fd7030e419dfbe0c771eb2433364d8150dc2f2
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034484.nxs.md5
@@ -0,0 +1 @@
+408bcafaaacb4af31970bdf84563cf87
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034502.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034502.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..1c8423ce9f9ffd808f941f14c9c913b7d999aa03
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034502.nxs.md5
@@ -0,0 +1 @@
+552dda99569562306ab421b1054f7171
diff --git a/Testing/Data/SystemTest/SANS2D/SANS2D00034505.nxs.md5 b/Testing/Data/SystemTest/SANS2D/SANS2D00034505.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..c2b0eebda89b2a53b059f595fb5a83bcb12bb76f
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/SANS2D00034505.nxs.md5
@@ -0,0 +1 @@
+6ac86823652c912cad4cbc1d6aed7d75
diff --git a/Testing/Data/SystemTest/SANS2D/TUBE_SANS2D_BOTH_31681_25Sept15.nxs.md5 b/Testing/Data/SystemTest/SANS2D/TUBE_SANS2D_BOTH_31681_25Sept15.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..4030886036723a942560d31ebc3a2b6388374afc
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/TUBE_SANS2D_BOTH_31681_25Sept15.nxs.md5
@@ -0,0 +1 @@
+e0b1f25d4a1746e9f3196e4214959d80
diff --git a/Testing/Data/SystemTest/SANS2D/USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt.md5 b/Testing/Data/SystemTest/SANS2D/USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt.md5
new file mode 100644
index 0000000000000000000000000000000000000000..9ee436a47608bbf59486dafe07bc9a25a4789ad4
--- /dev/null
+++ b/Testing/Data/SystemTest/SANS2D/USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt.md5
@@ -0,0 +1 @@
+d4f1dee75274e1a3f36281e678f4d277
diff --git a/Testing/Data/SystemTest/WISH00038237.raw.md5 b/Testing/Data/SystemTest/WISH00038237.raw.md5
new file mode 100644
index 0000000000000000000000000000000000000000..dcdbf3169550522802e8b0d3e17adf6d8efe35e5
--- /dev/null
+++ b/Testing/Data/SystemTest/WISH00038237.raw.md5
@@ -0,0 +1 @@
+744951e81f99b22a8a7bc32f1e9dee64
diff --git a/Testing/Data/SystemTest/WISHPredictedSingleCrystalPeaks.nxs.md5 b/Testing/Data/SystemTest/WISHPredictedSingleCrystalPeaks.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..080087f4a8dac87020d7a23e2fe1e4d98100d669
--- /dev/null
+++ b/Testing/Data/SystemTest/WISHPredictedSingleCrystalPeaks.nxs.md5
@@ -0,0 +1 @@
+4900e3e61488bd1b025d445a7095f238
diff --git a/Testing/Data/SystemTest/WishAnalysis.nxs.md5 b/Testing/Data/SystemTest/WishAnalysis.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..cc062f02c3b7a22f53db64833a028aa5e62c29f8
--- /dev/null
+++ b/Testing/Data/SystemTest/WishAnalysis.nxs.md5
@@ -0,0 +1 @@
+a62c0a4ffa9e79da7916f5e1370f8919
diff --git a/Testing/Data/SystemTest/predict_peaks_test_random_ub.nxs.md5 b/Testing/Data/SystemTest/predict_peaks_test_random_ub.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..8200440b5f6a6b27d57ab32dd6b1065305bc35fd
--- /dev/null
+++ b/Testing/Data/SystemTest/predict_peaks_test_random_ub.nxs.md5
@@ -0,0 +1 @@
+2cc49505ae5bdd6dfb8c5bddfa2c694f
diff --git a/Testing/Data/UnitTest/MASK_SANS2D_BOTH_Extras_24Mar2015.xml.md5 b/Testing/Data/UnitTest/MASK_SANS2D_BOTH_Extras_24Mar2015.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..b664590b6b250a6c60752ab844a0b57875dda994
--- /dev/null
+++ b/Testing/Data/UnitTest/MASK_SANS2D_BOTH_Extras_24Mar2015.xml.md5
@@ -0,0 +1 @@
+d327787830f80fec05b8b7c3af7de726
diff --git a/Testing/Data/UnitTest/MASK_SANS2D_FRONT_Edges_16Mar2015.xml.md5 b/Testing/Data/UnitTest/MASK_SANS2D_FRONT_Edges_16Mar2015.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..fb962c799badc82d8ac1862cbbe65102de95751a
--- /dev/null
+++ b/Testing/Data/UnitTest/MASK_SANS2D_FRONT_Edges_16Mar2015.xml.md5
@@ -0,0 +1 @@
+cc6749dc7c34bb937e43753e89cd7e93
diff --git a/Testing/Data/UnitTest/MASK_SANS2D_REAR_Bottom_3_tubes_16May2014.xml.md5 b/Testing/Data/UnitTest/MASK_SANS2D_REAR_Bottom_3_tubes_16May2014.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..d751c75e1d7ebd4396150167fdce9d86b85fa3dd
--- /dev/null
+++ b/Testing/Data/UnitTest/MASK_SANS2D_REAR_Bottom_3_tubes_16May2014.xml.md5
@@ -0,0 +1 @@
+5b3196a5b6f7d8a361bc417e1505c81d
diff --git a/Testing/Data/UnitTest/MASK_SANS2D_REAR_Edges_16Mar2015.xml.md5 b/Testing/Data/UnitTest/MASK_SANS2D_REAR_Edges_16Mar2015.xml.md5
new file mode 100644
index 0000000000000000000000000000000000000000..9091fa5db3db462985b95aee80c2d8f13fc38f91
--- /dev/null
+++ b/Testing/Data/UnitTest/MASK_SANS2D_REAR_Edges_16Mar2015.xml.md5
@@ -0,0 +1 @@
+f561508de44753d8a870bb6c133ed1ba
diff --git a/Testing/Data/UnitTest/POL00102.s1.md5 b/Testing/Data/UnitTest/POL00102.s1.md5
new file mode 100644
index 0000000000000000000000000000000000000000..bcfcae2898597f75a6da186c482470660e78dcb3
--- /dev/null
+++ b/Testing/Data/UnitTest/POL00102.s1.md5
@@ -0,0 +1 @@
+1179865bb8336ab9a9362a7a4cd9670d
diff --git a/Testing/Data/UnitTest/POL00102.s2.md5 b/Testing/Data/UnitTest/POL00102.s2.md5
new file mode 100644
index 0000000000000000000000000000000000000000..0c4cfb9e92f080f9824159604497965f7fa2ad8e
--- /dev/null
+++ b/Testing/Data/UnitTest/POL00102.s2.md5
@@ -0,0 +1 @@
+56174685a52192bdce71c8824238f098
diff --git a/Testing/Data/UnitTest/dnstof.d_dat.md5 b/Testing/Data/UnitTest/dnstof.d_dat.md5
new file mode 100644
index 0000000000000000000000000000000000000000..9d42867130c1dfa824b80f78cd7bd2f0534e4912
--- /dev/null
+++ b/Testing/Data/UnitTest/dnstof.d_dat.md5
@@ -0,0 +1 @@
+22cfa93c259ea5cc843dd01dec305cd6
diff --git a/Testing/Data/UnitTest/test_user_file_sans2d.txt.md5 b/Testing/Data/UnitTest/test_user_file_sans2d.txt.md5
new file mode 100644
index 0000000000000000000000000000000000000000..463f24157794a1865798311794b7534d492cbb57
--- /dev/null
+++ b/Testing/Data/UnitTest/test_user_file_sans2d.txt.md5
@@ -0,0 +1 @@
+28723764f0423ecf9090d57bae52fe23
diff --git a/Testing/SystemTests/tests/analysis/CountReflectionsTest.py b/Testing/SystemTests/tests/analysis/CountReflectionsTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e919ac4ee1999da35920b25ce2bd47238f4c0c5
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/CountReflectionsTest.py
@@ -0,0 +1,39 @@
+import stresstesting
+from mantid.simpleapi import *
+from SortHKLTest import HKLStatisticsTestMixin
+
+
+class CountReflectionsTest(HKLStatisticsTestMixin, stresstesting.MantidStressTest):
+    '''
+    This systemtest follows the same principle as the one for SortHKL. It loads data,
+    computes statistics and checks them against reference data obtained from another
+    software package (SORTAV, see SortHKLTest.py for a reference).
+    '''
+
+    def runTest(self):
+        self._init_test_data()
+        self.test_CountReflections()
+
+    def test_CountReflections(self):
+        for space_group in self._space_groups:
+            ub_parameters = self._load_ub_parameters(space_group)
+            reflections = self._load_reflections(space_group, ub_parameters)
+            reference_statistics = self._load_reference_statistics(space_group)
+
+            statistics = self._run_count_reflections(reflections, space_group)
+
+            self._compare_statistics(statistics._asdict(), reference_statistics)
+
+    def _run_count_reflections(self, reflections, space_group):
+        point_group = self._get_point_group(space_group).getHMSymbol()
+        centering = space_group[0]
+
+        return CountReflections(InputWorkspace=reflections, PointGroup=point_group,
+                                LatticeCentering=centering, MinDSpacing=0.5, MaxDSpacing=10.0)
+
+    def _compare_statistics(self, statistics, reference_statistics):
+        self.assertEquals(round(statistics['Redundancy'], 1), round(reference_statistics['<N>'], 1))
+        self.assertEquals(statistics['UniqueReflections'], int(reference_statistics['Nunique']))
+        self.assertDelta(round(statistics['Completeness'] * 100.0, 1),
+                         round(reference_statistics['Completeness'], 1),
+                         0.5)
diff --git a/Testing/SystemTests/tests/analysis/ILLIndirectReductionFWS.py b/Testing/SystemTests/tests/analysis/ILLIndirectReductionFWS.py
index 3663b9608ad0b04063952ce469e9a871977112be..9cf36c940831890e4ea53ab6e05389ad30e64e9a 100644
--- a/Testing/SystemTests/tests/analysis/ILLIndirectReductionFWS.py
+++ b/Testing/SystemTests/tests/analysis/ILLIndirectReductionFWS.py
@@ -1,6 +1,7 @@
 import stresstesting
-from mantid.simpleapi import *
-from mantid import config
+from mantid.simpleapi import CompareWorkspaces, LoadNexusProcessed, IndirectILLReductionFWS
+from mantid import config, mtd
+import numpy
 
 
 class ILLIndirectReductionFWSTest(stresstesting.MantidStressTest):
@@ -34,7 +35,8 @@ class ILLIndirectReductionFWSTest(stresstesting.MantidStressTest):
         return ["165944.nxs", "165945.nxs", "165946.nxs", "165947.nxs", "165948.nxs",
                 "165949.nxs", "165950.nxs", "165951.nxs", "165952.nxs", "165953.nxs",
                 "143720.nxs", "143721.nxs", "143722.nxs", "143723.nxs", "143724.nxs",
-                "143725.nxs", "143726.nxs", "143727.nxs", "143728.nxs", "143729.nxs"]
+                "143725.nxs", "143726.nxs", "143727.nxs", "143728.nxs", "143729.nxs",
+                "140678.nxs", "140679.nxs", "140680.nxs", "140681.nxs", "140682.nxs"]
 
     def runTest(self):
 
@@ -44,6 +46,8 @@ class ILLIndirectReductionFWSTest(stresstesting.MantidStressTest):
 
         self._run_sum_interpolate()
 
+        self._run_efws_mirror_sense()
+
         self.tearDown()
 
     def _run_ifws(self):
@@ -101,3 +105,12 @@ class ILLIndirectReductionFWSTest(stresstesting.MantidStressTest):
         else:
             self.assertTrue(result[0], "Sum/interpolate should be the same for one point: "
                             + result[1].row(0)['Message'])
+
+    def _run_efws_mirror_sense(self):
+        # this tests the EFWS in mirror mode: data in 140680 is indeed split to two wings,
+        # while the others have right wing empty (though mirror sense is ON!)
+        IndirectILLReductionFWS(Run="140678:140682", OutputWorkspace="efws_mirror")
+        yData = mtd["efws_mirror_red"].getItem(0).readY(17)
+        avg = numpy.average(yData)
+        for y in numpy.nditer(yData):
+            self.assertDelta(y, avg, 0.001)
diff --git a/Testing/SystemTests/tests/analysis/ILLIndirectReductionQENS.py b/Testing/SystemTests/tests/analysis/ILLIndirectReductionQENS.py
index 112f82b579b81064913efc5a6f1baea508ee0e09..38987bb05c127fbec485f9d782a9cf2dea4f59c6 100644
--- a/Testing/SystemTests/tests/analysis/ILLIndirectReductionQENS.py
+++ b/Testing/SystemTests/tests/analysis/ILLIndirectReductionQENS.py
@@ -1,6 +1,6 @@
 import stresstesting
-from mantid.simpleapi import *
-from mantid import config
+from mantid.simpleapi import IndirectILLReductionQENS, Plus, CompareWorkspaces, GroupWorkspaces, Scale
+from mantid import config, mtd
 
 
 class ILLIndirectReductionQENSTest(stresstesting.MantidStressTest):
diff --git a/Testing/SystemTests/tests/analysis/ISIS_WISHSingleCrystalReduction.py b/Testing/SystemTests/tests/analysis/ISIS_WISHSingleCrystalReduction.py
new file mode 100644
index 0000000000000000000000000000000000000000..a984f84b0b6f2c419c6bb89dd5f4c62d73f2ee15
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/ISIS_WISHSingleCrystalReduction.py
@@ -0,0 +1,53 @@
+from mantid.simpleapi import *
+import stresstesting
+import numpy as np
+
+
+class WISHSingleCrystalPeakPredictionTest(stresstesting.MantidStressTest):
+    """
+    At the time of writing WISH users rely quite heavily on the PredictPeaks
+    algorithm. As WISH has tubes rather than rectangular detectors sometimes
+    peaks fall between the gaps in the tubes.
+
+    Here we check that PredictPeaks works on a real WISH dataset & UB. This also
+    includes an example of a peak whose center is predicted to fall between two
+    tubes.
+    """
+
+    def requiredFiles(self):
+        return ["WISH00038237.raw", "WISHPredictedSingleCrystalPeaks.nxs"]
+
+    def requiredMemoryMB(self):
+        # Need lots of memory for full WISH dataset
+        return 16000
+
+    def cleanup(self):
+        pass
+
+    def runTest(self):
+        ws = LoadRaw(Filename='WISH00038237.raw', OutputWorkspace='38237')
+        ws = ConvertUnits(ws, 'dSpacing', OutputWorkspace='38237')
+        UB = np.array([[-0.00601763,  0.07397297,  0.05865706],
+                       [ 0.05373321,  0.050198,   -0.05651455],
+                       [-0.07822144,  0.0295911,  -0.04489172]])
+
+        SetUB(ws, UB=UB)
+
+        self._peaks = PredictPeaks(ws, WavelengthMin=0.1, WavelengthMax=100,
+                                   OutputWorkspace='peaks')
+        # We specifically want to check peak -5 -1 -7 exists, so filter for it
+        self._filtered = FilterPeaks(self._peaks, "h^2+k^2+l^2", 75, '=',
+                                     OutputWorkspace='filtered')
+
+    def validate(self):
+        self.assertEqual(self._peaks.rowCount(), 510)
+        self.assertEqual(self._filtered.rowCount(), 6)
+        peak = self._filtered.row(2)
+
+        # This is an example of a peak that is known to fall between the gaps
+        # in WISH tubes. Specifically check this one is predicted to exist
+        # because past bugs have been found in the ray tracing
+        peakMatches = peak['h'] == -5 and peak['k'] == -1 and peak['l'] == -7
+        self.assertTrue(peakMatches)
+
+        return self._peaks.name(), "WISHPredictedSingleCrystalPeaks.nxs"
diff --git a/Testing/SystemTests/tests/analysis/LoadLotsOfFiles.py b/Testing/SystemTests/tests/analysis/LoadLotsOfFiles.py
index 28bca44cd1d6cc7df197b198676f82d970163ab9..414feb772f52b5745cfcbc683ae9b04feb051f55 100644
--- a/Testing/SystemTests/tests/analysis/LoadLotsOfFiles.py
+++ b/Testing/SystemTests/tests/analysis/LoadLotsOfFiles.py
@@ -45,6 +45,8 @@ BANNED_FILES = ['80_tubes_Top_and_Bottom_April_2015.xml',
                 'MASK_SANS2D_FRONT_Edges_16Mar2015.xml',
                 'MASK_SANS2D_REAR_Bottom_3_tubes_16May2014.xml',
                 'MASK_SANS2D_REAR_Edges_16Mar2015.xml',
+                'MASK_SANS2D_REAR_module2_tube12.xml',
+                'MASK_SANS2D_beam_stop_4m_x_100mm_2July2015_medium_beamstop.xml',
                 'MASK_SANS2D_BOTH_Extras_24Mar2015.xml',
                 'MASK_Tube6.xml',
                 'MASK_squareBeamstop_6x8Beam_11-October-2016.xml',
@@ -91,6 +93,7 @@ BANNED_FILES = ['80_tubes_Top_and_Bottom_April_2015.xml',
                 'poldi2015n000977.hdf',
                 'USER_SANS2D_143ZC_2p4_4m_M4_Knowles_12mm.txt',
                 'USER_LARMOR_151B_LarmorTeam_80tubes_BenchRot1p4_M4_r3699.txt',
+                'USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt',
                 'USER_Larmor_163F_HePATest_r13038.txt',
                 'Vesuvio_IP_file_test.par',
                 'IP0004_10.par',
diff --git a/Testing/SystemTests/tests/analysis/PredictPeaksTest.py b/Testing/SystemTests/tests/analysis/PredictPeaksTest.py
index 1e7f9aba5a5878045ebd37a8d2c63f85ca54394e..bec31c000614811b010cbdc2936577c823c28e62 100644
--- a/Testing/SystemTests/tests/analysis/PredictPeaksTest.py
+++ b/Testing/SystemTests/tests/analysis/PredictPeaksTest.py
@@ -72,6 +72,8 @@ class PredictPeaksTestTOPAZ(stresstesting.MantidStressTest):
 
 
 class PredictPeaksCalculateStructureFactorsTest(stresstesting.MantidStressTest):
+    expected_num_peaks = 546
+
     def runTest(self):
         simulationWorkspace = CreateSimulationWorkspace(Instrument='WISH',
                                                         BinParams='0,1,2',
@@ -89,9 +91,9 @@ class PredictPeaksCalculateStructureFactorsTest(stresstesting.MantidStressTest):
                              MinDSpacing=0.5, MaxDSpacing=10,
                              CalculateStructureFactors=True)
 
-        self.assertEquals(peaks.getNumberPeaks(), 540)
+        self.assertEquals(peaks.getNumberPeaks(), self.expected_num_peaks)
 
-        for i in range(540):
+        for i in range(self.expected_num_peaks):
             peak = peaks.getPeak(i)
             self.assertLessThan(0.0, peak.getIntensity())
 
@@ -100,6 +102,6 @@ class PredictPeaksCalculateStructureFactorsTest(stresstesting.MantidStressTest):
                                    MinDSpacing=0.5, MaxDSpacing=10,
                                    CalculateStructureFactors=False)
 
-        for i in range(540):
+        for i in range(self.expected_num_peaks):
             peak = peaks_no_sf.getPeak(i)
             self.assertEquals(0.0, peak.getIntensity())
diff --git a/Testing/SystemTests/tests/analysis/SANSBatchReductionTest.py b/Testing/SystemTests/tests/analysis/SANSBatchReductionTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2071b4a22e6b848d7324518ecb709c9440e8f11
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/SANSBatchReductionTest.py
@@ -0,0 +1,161 @@
+# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import stresstesting
+from mantid.api import AnalysisDataService
+
+from sans.sans_batch import SANSBatchReduction
+from sans.user_file.user_file_state_director import UserFileStateDirectorISIS
+from sans.state.data import get_data_builder
+from sans.common.enums import (SANSFacility, ISISReductionMode, OutputMode)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_unmanaged_algorithm
+
+
+# -----------------------------------------------
+# Tests for the SANSBatchReduction algorithm
+# -----------------------------------------------
+class SANSBatchReductionTest(unittest.TestCase):
+
+    def _run_batch_reduction(self, states, use_optimizations=False):
+        batch_reduction_alg = SANSBatchReduction()
+        try:
+            batch_reduction_alg(states, use_optimizations, OutputMode.PublishToADS)
+            did_raise = False
+        except:  # noqa
+            did_raise = True
+        self.assertFalse(did_raise)
+
+    def _compare_workspace(self, workspace, reference_file_name):
+        # Load the reference file
+        load_name = "LoadNexusProcessed"
+        load_options = {"Filename": reference_file_name,
+                        "OutputWorkspace": EMPTY_NAME}
+        load_alg = create_unmanaged_algorithm(load_name, **load_options)
+        load_alg.execute()
+        reference_workspace = load_alg.getProperty("OutputWorkspace").value
+
+        # Compare reference file with the output_workspace
+        # We need to disable the instrument comparison, it takes way too long
+        # We need to disable the sample -- Not clear why yet
+        # operation how many entries can be found in the sample logs
+        compare_name = "CompareWorkspaces"
+        compare_options = {"Workspace1": workspace,
+                           "Workspace2": reference_workspace,
+                           "Tolerance": 1e-6,
+                           "CheckInstrument": False,
+                           "CheckSample": False,
+                           "ToleranceRelErr": True,
+                           "CheckAllData": True,
+                           "CheckMasking": True,
+                           "CheckType": True,
+                           "CheckAxes": True,
+                           "CheckSpectraMap": True}
+        compare_alg = create_unmanaged_algorithm(compare_name, **compare_options)
+        compare_alg.setChild(False)
+        compare_alg.execute()
+        result = compare_alg.getProperty("Result").value
+        self.assertTrue(result)
+
+    def test_that_batch_reduction_evaluates_LAB(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Act
+        states = [state]
+        self._run_batch_reduction(states, use_optimizations=False)
+        workspace_name = "34484rear_1D_1.75_16.5"
+        output_workspace = AnalysisDataService.retrieve(workspace_name)
+
+        # Evaluate it up to a defined point
+        reference_file_name = "SANS2D_ws_D20_reference_LAB_1D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+        if AnalysisDataService.doesExist(workspace_name):
+            AnalysisDataService.remove(workspace_name)
+
+    def test_batch_reduction_on_multiperiod_file(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D0005512")
+
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("MASKSANS2Doptions.091A")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
+        state = user_file_director.construct()
+
+        # Act
+        states = [state]
+        self._run_batch_reduction(states, use_optimizations=False)
+
+        # Assert
+        # We only assert that the expected workspaces exist on the ADS
+        expected_workspaces = ["5512p1rear_1D_2.0_14.0Phi-45.0_45.0", "5512p2rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p3rear_1D_2.0_14.0Phi-45.0_45.0", "5512p4rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p5rear_1D_2.0_14.0Phi-45.0_45.0", "5512p6rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p7rear_1D_2.0_14.0Phi-45.0_45.0", "5512p8rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p9rear_1D_2.0_14.0Phi-45.0_45.0", "5512p10rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p11rear_1D_2.0_14.0Phi-45.0_45.0", "5512p12rear_1D_2.0_14.0Phi-45.0_45.0",
+                               "5512p13rear_1D_2.0_14.0Phi-45.0_45.0"]
+        for element in expected_workspaces:
+            self.assertTrue(AnalysisDataService.doesExist(element))
+
+        # Clean up
+        for element in expected_workspaces:
+            AnalysisDataService.remove(element)
+
+
+class SANSBatchReductionRunnerTest(stresstesting.MantidStressTest):
+    def __init__(self):
+        stresstesting.MantidStressTest.__init__(self)
+        self._success = False
+
+    def runTest(self):
+        suite = unittest.TestSuite()
+        suite.addTest(unittest.makeSuite(SANSBatchReductionTest, 'test'))
+        runner = unittest.TextTestRunner()
+        res = runner.run(suite)
+        if res.wasSuccessful():
+            self._success = True
+
+    def requiredMemoryMB(self):
+        return 2000
+
+    def validate(self):
+        return self._success
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Testing/SystemTests/tests/analysis/SANSReductionCoreTest.py b/Testing/SystemTests/tests/analysis/SANSReductionCoreTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..166ecf5130b36bed7ec3a8137befcc577468053d
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/SANSReductionCoreTest.py
@@ -0,0 +1,192 @@
+# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments
+
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import os
+import stresstesting
+
+import mantid
+from mantid.api import AlgorithmManager
+
+from sans.state.data import get_data_builder
+from sans.common.enums import (DetectorType, DataType, SANSFacility)
+from sans.user_file.user_file_state_director import UserFileStateDirectorISIS
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_unmanaged_algorithm
+
+
+# -----------------------------------------------
+# Tests for the SANSReductionCore algorithm
+# -----------------------------------------------
+class SANSReductionCoreTest(unittest.TestCase):
+    def _load_workspace(self, state):
+        load_alg = AlgorithmManager.createUnmanaged("SANSLoad")
+        load_alg.setChild(True)
+        load_alg.initialize()
+
+        state_dict = state.property_manager
+        load_alg.setProperty("SANSState", state_dict)
+        load_alg.setProperty("PublishToCache", False)
+        load_alg.setProperty("UseCached", False)
+        load_alg.setProperty("MoveWorkspace", False)
+        load_alg.setProperty("SampleScatterWorkspace", EMPTY_NAME)
+        load_alg.setProperty("SampleScatterMonitorWorkspace", EMPTY_NAME)
+        if state.data.sample_transmission:
+            load_alg.setProperty("SampleTransmissionWorkspace", EMPTY_NAME)
+        if state.data.sample_direct:
+            load_alg.setProperty("SampleDirectWorkspace", EMPTY_NAME)
+
+        # Act
+        load_alg.execute()
+        self.assertTrue(load_alg.isExecuted())
+        sample_scatter = load_alg.getProperty("SampleScatterWorkspace").value
+        sample_scatter_monitor_workspace = load_alg.getProperty("SampleScatterMonitorWorkspace").value
+        if state.data.sample_transmission:
+            transmission_workspace = load_alg.getProperty("SampleTransmissionWorkspace").value
+        else:
+            transmission_workspace = None
+        if state.data.sample_direct:
+            direct_workspace = load_alg.getProperty("SampleDirectWorkspace").value
+        else:
+            direct_workspace = None
+        return sample_scatter, sample_scatter_monitor_workspace, transmission_workspace, direct_workspace
+
+    def _run_reduction_core(self, state, workspace, monitor, transmission=None, direct=None,
+                            detector_type=DetectorType.LAB, component=DataType.Sample):
+        reduction_core_alg = AlgorithmManager.createUnmanaged("SANSReductionCore")
+        reduction_core_alg.setChild(True)
+        reduction_core_alg.initialize()
+
+        state_dict = state.property_manager
+        reduction_core_alg.setProperty("SANSState", state_dict)
+        reduction_core_alg.setProperty("ScatterWorkspace", workspace)
+        reduction_core_alg.setProperty("ScatterMonitorWorkspace", monitor)
+
+        if transmission:
+            reduction_core_alg.setProperty("TransmissionWorkspace", transmission)
+
+        if direct:
+            reduction_core_alg.setProperty("DirectWorkspace", direct)
+
+        reduction_core_alg.setProperty("Component", DetectorType.to_string(detector_type))
+        reduction_core_alg.setProperty("DataType", DataType.to_string(component))
+
+        reduction_core_alg.setProperty("OutputWorkspace", EMPTY_NAME)
+
+        # Act
+        reduction_core_alg.execute()
+        self.assertTrue(reduction_core_alg.isExecuted())
+        return reduction_core_alg
+
+    def _compare_workspace(self, workspace, reference_file_name):
+        # Load the reference file
+        load_name = "LoadNexusProcessed"
+        load_options = {"Filename": reference_file_name,
+                        "OutputWorkspace": EMPTY_NAME}
+        load_alg = create_unmanaged_algorithm(load_name, **load_options)
+        load_alg.execute()
+        reference_workspace = load_alg.getProperty("OutputWorkspace").value
+
+        # Save the workspace out and reload it again. This makes equalizes it with the reference workspace
+        f_name = os.path.join(mantid.config.getString('defaultsave.directory'),
+                              'SANS_temp_single_core_reduction_testout.nxs')
+
+        save_name = "SaveNexus"
+        save_options = {"Filename": f_name,
+                        "InputWorkspace": workspace}
+        save_alg = create_unmanaged_algorithm(save_name, **save_options)
+        save_alg.execute()
+        load_alg.setProperty("Filename", f_name)
+        load_alg.setProperty("OutputWorkspace", EMPTY_NAME)
+        load_alg.execute()
+
+        ws = load_alg.getProperty("OutputWorkspace").value
+
+        # Compare reference file with the output_workspace
+        # We need to disable the instrument comparison, it takes way too long
+        # We need to disable the sample -- since the sample has been modified (more logs are being written)
+        # operation how many entries can be found in the sample logs
+        compare_name = "CompareWorkspaces"
+        compare_options = {"Workspace1": ws,
+                           "Workspace2": reference_workspace,
+                           "Tolerance": 1e-6,
+                           "CheckInstrument": False,
+                           "CheckSample": False,
+                           "ToleranceRelErr": True,
+                           "CheckAllData": True,
+                           "CheckMasking": True,
+                           "CheckType": True,
+                           "CheckAxes": True,
+                           "CheckSpectraMap": True}
+        compare_alg = create_unmanaged_algorithm(compare_name, **compare_options)
+        compare_alg.setChild(False)
+        compare_alg.execute()
+        result = compare_alg.getProperty("Result").value
+        self.assertTrue(result)
+
+        # Remove file
+        if os.path.exists(f_name):
+            os.remove(f_name)
+
+    def test_that_reduction_core_evaluates_LAB(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_state = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_state)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+        # Construct the final state
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        workspace, workspace_monitor, transmission_workspace, direct_workspace = self._load_workspace(state)
+
+        # Act
+        reduction_core_alg = self._run_reduction_core(state, workspace, workspace_monitor,
+                                                      transmission_workspace, direct_workspace)
+        output_workspace = reduction_core_alg.getProperty("OutputWorkspace").value
+
+        # Evaluate it up to a defined point
+        reference_file_name = "SANS2D_ws_D20_reference.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+
+class SANSReductionCoreRunnerTest(stresstesting.MantidStressTest):
+    def __init__(self):
+        stresstesting.MantidStressTest.__init__(self)
+        self._success = False
+
+    def runTest(self):
+        suite = unittest.TestSuite()
+        suite.addTest(unittest.makeSuite(SANSReductionCoreTest, 'test'))
+        runner = unittest.TextTestRunner()
+        res = runner.run(suite)
+        if res.wasSuccessful():
+            self._success = True
+
+    def requiredMemoryMB(self):
+        return 2000
+
+    def validate(self):
+        return self._success
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Testing/SystemTests/tests/analysis/SANSSingleReductionTest.py b/Testing/SystemTests/tests/analysis/SANSSingleReductionTest.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f504cfe858a3e595c0019db91d41d2d0f52b93b
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/SANSSingleReductionTest.py
@@ -0,0 +1,368 @@
+# pylint: disable=too-many-public-methods, invalid-name, too-many-arguments
+
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import stresstesting
+
+import mantid  # noqa
+from mantid.api import AlgorithmManager
+from sans.user_file.user_file_state_director import UserFileStateDirectorISIS
+from sans.state.data import get_data_builder
+from sans.common.enums import (SANSFacility, ISISReductionMode, ReductionDimensionality, FitModeForMerge)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_unmanaged_algorithm
+
+
+# -----------------------------------------------
+# Tests for the SANSSingleReduction algorithm
+# -----------------------------------------------
+class SANSSingleReductionTest(unittest.TestCase):
+    def _load_workspace(self, state):
+        load_alg = AlgorithmManager.createUnmanaged("SANSLoad")
+        load_alg.setChild(True)
+        load_alg.initialize()
+
+        state_dict = state.property_manager
+        load_alg.setProperty("SANSState", state_dict)
+        load_alg.setProperty("PublishToCache", False)
+        load_alg.setProperty("UseCached", False)
+        load_alg.setProperty("MoveWorkspace", False)
+
+        load_alg.setProperty("SampleScatterWorkspace", EMPTY_NAME)
+        load_alg.setProperty("SampleScatterMonitorWorkspace", EMPTY_NAME)
+        load_alg.setProperty("SampleTransmissionWorkspace", EMPTY_NAME)
+        load_alg.setProperty("SampleDirectWorkspace", EMPTY_NAME)
+
+        load_alg.setProperty("CanScatterWorkspace", EMPTY_NAME)
+        load_alg.setProperty("CanScatterMonitorWorkspace", EMPTY_NAME)
+        load_alg.setProperty("CanTransmissionWorkspace", EMPTY_NAME)
+        load_alg.setProperty("CanDirectWorkspace", EMPTY_NAME)
+
+        # Act
+        load_alg.execute()
+        self.assertTrue(load_alg.isExecuted())
+        sample_scatter = load_alg.getProperty("SampleScatterWorkspace").value
+        sample_scatter_monitor_workspace = load_alg.getProperty("SampleScatterMonitorWorkspace").value
+        transmission_workspace = load_alg.getProperty("SampleTransmissionWorkspace").value
+        direct_workspace = load_alg.getProperty("SampleDirectWorkspace").value
+
+        can_scatter_workspace = load_alg.getProperty("CanScatterWorkspace").value
+        can_scatter_monitor_workspace = load_alg.getProperty("CanScatterMonitorWorkspace").value
+        can_transmission_workspace = load_alg.getProperty("CanTransmissionWorkspace").value
+        can_direct_workspace = load_alg.getProperty("CanDirectWorkspace").value
+
+        return sample_scatter, sample_scatter_monitor_workspace, transmission_workspace, direct_workspace, \
+               can_scatter_workspace, can_scatter_monitor_workspace, can_transmission_workspace, can_direct_workspace  # noqa
+
+    def _run_single_reduction(self, state, sample_scatter, sample_monitor, sample_transmission=None, sample_direct=None,
+                              can_scatter=None, can_monitor=None, can_transmission=None, can_direct=None,
+                              output_settings=None):
+        single_reduction_name = "SANSSingleReduction"
+        state_dict = state.property_manager
+
+        single_reduction_options = {"SANSState": state_dict,
+                                    "SampleScatterWorkspace": sample_scatter,
+                                    "SampleScatterMonitorWorkspace": sample_monitor,
+                                    "UseOptimizations": False}
+        if sample_transmission:
+            single_reduction_options.update({"SampleTransmissionWorkspace": sample_transmission})
+
+        if sample_direct:
+            single_reduction_options.update({"SampleDirectWorkspace": sample_direct})
+
+        if can_scatter:
+            single_reduction_options.update({"CanScatterWorkspace": can_scatter})
+
+        if can_monitor:
+            single_reduction_options.update({"CanScatterMonitorWorkspace": can_monitor})
+
+        if can_transmission:
+            single_reduction_options.update({"CanTransmissionWorkspace": can_transmission})
+
+        if can_direct:
+            single_reduction_options.update({"CanDirectWorkspace": can_direct})
+
+        if output_settings:
+            single_reduction_options.update(output_settings)
+
+        single_reduction_alg = create_unmanaged_algorithm(single_reduction_name, **single_reduction_options)
+
+        # Act
+        single_reduction_alg.execute()
+        self.assertTrue(single_reduction_alg.isExecuted())
+        return single_reduction_alg
+
+    def _compare_workspace(self, workspace, reference_file_name):
+        # Load the reference file
+        load_name = "LoadNexusProcessed"
+        load_options = {"Filename": reference_file_name,
+                        "OutputWorkspace": EMPTY_NAME}
+        load_alg = create_unmanaged_algorithm(load_name, **load_options)
+        load_alg.execute()
+        reference_workspace = load_alg.getProperty("OutputWorkspace").value
+
+        # Compare reference file with the output_workspace
+        # We need to disable the instrument comparison, it takes way too long
+        # We need to disable the sample -- Not clear why yet
+        # operation how many entries can be found in the sample logs
+        compare_name = "CompareWorkspaces"
+        compare_options = {"Workspace1": workspace,
+                           "Workspace2": reference_workspace,
+                           "Tolerance": 1e-6,
+                           "CheckInstrument": False,
+                           "CheckSample": False,
+                           "ToleranceRelErr": True,
+                           "CheckAllData": True,
+                           "CheckMasking": True,
+                           "CheckType": True,
+                           "CheckAxes": True,
+                           "CheckSpectraMap": True}
+        compare_alg = create_unmanaged_algorithm(compare_name, **compare_options)
+        compare_alg.setChild(False)
+        compare_alg.execute()
+        result = compare_alg.getProperty("Result").value
+        self.assertTrue(result)
+
+    def test_that_single_reduction_evaluates_LAB(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
+
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        sample, sample_monitor, transmission_workspace, direct_workspace, can, can_monitor, \
+        can_transmission, can_direct = self._load_workspace(state)  # noqa
+
+        # Act
+        output_settings = {"OutputWorkspaceLAB": EMPTY_NAME}
+        single_reduction_alg = self._run_single_reduction(state, sample_scatter=sample,
+                                                          sample_transmission=transmission_workspace,
+                                                          sample_direct=direct_workspace,
+                                                          sample_monitor=sample_monitor,
+                                                          can_scatter=can,
+                                                          can_monitor=can_monitor,
+                                                          can_transmission=can_transmission,
+                                                          can_direct=can_direct,
+                                                          output_settings=output_settings)
+        output_workspace = single_reduction_alg.getProperty("OutputWorkspaceLAB").value
+
+        # Compare the output of the reduction with the reference
+        reference_file_name = "SANS2D_ws_D20_reference_LAB_1D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+    def test_that_single_reduction_evaluates_HAB(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.HAB)
+
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        sample, sample_monitor, transmission_workspace, direct_workspace, can, can_monitor,\
+        can_transmission, can_direct = self._load_workspace(state)  # noqa
+
+        # Act
+        output_settings = {"OutputWorkspaceHAB": EMPTY_NAME}
+        single_reduction_alg = self._run_single_reduction(state, sample_scatter=sample,
+                                                          sample_transmission=transmission_workspace,
+                                                          sample_direct=direct_workspace,
+                                                          sample_monitor=sample_monitor,
+                                                          can_scatter=can,
+                                                          can_monitor=can_monitor,
+                                                          can_transmission=can_transmission,
+                                                          can_direct=can_direct,
+                                                          output_settings=output_settings)
+        output_workspace = single_reduction_alg.getProperty("OutputWorkspaceHAB").value
+
+        # # Compare the output of the reduction with the reference
+        reference_file_name = "SANS2D_ws_D20_reference_HAB_1D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+    def test_that_single_reduction_evaluates_merged(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.Merged)
+        user_file_director.set_reduction_builder_merge_fit_mode(FitModeForMerge.Both)
+        user_file_director.set_reduction_builder_merge_scale(1.0)
+        user_file_director.set_reduction_builder_merge_shift(0.0)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        sample, sample_monitor, transmission_workspace, direct_workspace, \
+        can, can_monitor, can_transmission, can_direct = self._load_workspace(state)  # noqa
+
+        # Act
+        output_settings = {"OutputWorkspaceMerged": EMPTY_NAME}
+        single_reduction_alg = self._run_single_reduction(state, sample_scatter=sample,
+                                                          sample_transmission=transmission_workspace,
+                                                          sample_direct=direct_workspace,
+                                                          sample_monitor=sample_monitor,
+                                                          can_scatter=can,
+                                                          can_monitor=can_monitor,
+                                                          can_transmission=can_transmission,
+                                                          can_direct=can_direct,
+                                                          output_settings=output_settings)
+        output_workspace = single_reduction_alg.getProperty("OutputWorkspaceMerged").value
+        output_scale_factor = single_reduction_alg.getProperty("OutScaleFactor").value
+        output_shift_factor = single_reduction_alg.getProperty("OutShiftFactor").value
+
+        tolerance = 1e-6
+        expected_shift = 0.00278452
+        expected_scale = 0.81439154
+
+        self.assertTrue(abs(expected_shift - output_shift_factor) < tolerance)
+        self.assertTrue(abs(expected_scale - output_scale_factor) < tolerance)
+
+        # Compare the output of the reduction with the reference
+        reference_file_name = "SANS2D_ws_D20_reference_Merged_1D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+    def test_that_single_reduction_evaluates_LAB_for_2D_reduction(self):
+        # Arrange
+        # Build the data information
+        data_builder = get_data_builder(SANSFacility.ISIS)
+        data_builder.set_sample_scatter("SANS2D00034484")
+        data_builder.set_sample_transmission("SANS2D00034505")
+        data_builder.set_sample_direct("SANS2D00034461")
+        data_builder.set_can_scatter("SANS2D00034481")
+        data_builder.set_can_transmission("SANS2D00034502")
+        data_builder.set_can_direct("SANS2D00034461")
+
+        data_builder.set_calibration("TUBE_SANS2D_BOTH_31681_25Sept15.nxs")
+        data_info = data_builder.build()
+
+        # Get the rest of the state from the user file
+        user_file_director = UserFileStateDirectorISIS(data_info)
+        user_file_director.set_user_file("USER_SANS2D_154E_2p4_4m_M3_Xpress_8mm_SampleChanger.txt")
+        # Set the reduction mode to LAB
+        user_file_director.set_reduction_builder_reduction_mode(ISISReductionMode.LAB)
+        user_file_director.set_reduction_builder_reduction_dimensionality(ReductionDimensionality.TwoDim)
+        user_file_director.set_convert_to_q_builder_reduction_dimensionality(ReductionDimensionality.TwoDim)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY BEGIN -- Remove when appropriate
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # Since we are dealing with event based data but we want to compare it with histogram data from the
+        # old reduction system we need to enable the compatibility mode
+        user_file_director.set_compatibility_builder_use_compatibility_mode(True)
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        # COMPATIBILITY END
+        # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+        state = user_file_director.construct()
+
+        # Load the sample workspaces
+        sample, sample_monitor, transmission_workspace, direct_workspace, can, can_monitor, \
+        can_transmission, can_direct = self._load_workspace(state)  # noqa
+
+        # Act
+        output_settings = {"OutputWorkspaceLAB": EMPTY_NAME}
+        single_reduction_alg = self._run_single_reduction(state, sample_scatter=sample,
+                                                          sample_transmission=transmission_workspace,
+                                                          sample_direct=direct_workspace,
+                                                          sample_monitor=sample_monitor,
+                                                          can_scatter=can,
+                                                          can_monitor=can_monitor,
+                                                          can_transmission=can_transmission,
+                                                          can_direct=can_direct,
+                                                          output_settings=output_settings)
+        output_workspace = single_reduction_alg.getProperty("OutputWorkspaceLAB").value
+
+        # Compare the output of the reduction with the reference
+        reference_file_name = "SANS2D_ws_D20_reference_LAB_2D.nxs"
+        self._compare_workspace(output_workspace, reference_file_name)
+
+
+class SANSReductionRunnerTest(stresstesting.MantidStressTest):
+    def __init__(self):
+        stresstesting.MantidStressTest.__init__(self)
+        self._success = False
+
+    def runTest(self):
+        suite = unittest.TestSuite()
+        suite.addTest(unittest.makeSuite(SANSSingleReductionTest, 'test'))
+        runner = unittest.TextTestRunner()
+        res = runner.run(suite)
+        if res.wasSuccessful():
+            self._success = True
+
+    def requiredMemoryMB(self):
+        return 2000
+
+    def validate(self):
+        return self._success
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/Testing/SystemTests/tests/analysis/SortHKLTest.py b/Testing/SystemTests/tests/analysis/SortHKLTest.py
index 39c2e6c91ccb466139f09f06d2c0410f829c5f2c..1791db2de5d615bd92e6e73c2eee4322e2f47724 100644
--- a/Testing/SystemTests/tests/analysis/SortHKLTest.py
+++ b/Testing/SystemTests/tests/analysis/SortHKLTest.py
@@ -5,25 +5,8 @@ from mantid.simpleapi import *
 from mantid.geometry import PointGroupFactory
 
 
-class SortHKLTest(stresstesting.MantidStressTest):
-    ''' System test for SortHKL
-
-    This system test compares some of the output of SortHKL to statistics produced
-    by running the program SORTAV [1] on the same data set.
-
-    Since SORTAV processes HKL-files and those are small, the peaks are loaded from
-    HKL-files and put into an empty PeaksWorkspace. Two additional files are read
-    for the test, the parameters for SetUB in JSON-format and some of the output from
-    the sortav.lp file which contains the output after a SORTAV-run.
-
-    This system test is there to ensure the correctness what SortHKL does against
-    the output of an established program.
-
-    [1] SORTAV: ftp://ftp.hwi.buffalo.edu/pub/Blessing/Drear/sortav.use
-        (and references therein).
-    '''
-
-    def runTest(self):
+class HKLStatisticsTestMixin(object):
+    def _init_test_data(self):
         self._ws = CreateSimulationWorkspace(Instrument='TOPAZ',
                                              BinParams='0,10000,20000',
                                              UnitX='TOF',
@@ -41,18 +24,6 @@ class SortHKLTest(stresstesting.MantidStressTest):
         self._template_ub = 'ub_parameters_{0}.json'
         self._template_statistics = 'statistics_{0}.txt'
 
-        self.test_SortHKLStatistics()
-
-    def test_SortHKLStatistics(self):
-        for space_group in self._space_groups:
-            ub_parameters = self._load_ub_parameters(space_group)
-            reflections = self._load_reflections(space_group, ub_parameters)
-            statistics, sorted_hkls = self._run_sort_hkl(reflections, space_group)
-            reference_statistics = self._load_reference_statistics(space_group)
-
-            self._compare_statistics(statistics, reference_statistics)
-            self._check_sorted_hkls_consistency(sorted_hkls, space_group)
-
     def _load_ub_parameters(self, space_group):
         filename = FileFinder.Instance().getFullPath(self._base_directory + self._template_ub.format(space_group))
 
@@ -83,17 +54,6 @@ class SortHKLTest(stresstesting.MantidStressTest):
 
         return actual_hkls
 
-    def _run_sort_hkl(self, reflections, space_group):
-        point_group_name = self._get_point_group(space_group).getName()
-        centering_name = self._centering_map[space_group[0]]
-
-        # pylint: disable=unused-variable
-        sorted_hkls, chi2, statistics = SortHKL(InputWorkspace=reflections,
-                                                PointGroup=point_group_name,
-                                                LatticeCentering=centering_name)
-
-        return statistics.row(0), sorted_hkls
-
     def _get_point_group(self, space_group):
         return PointGroupFactory.createPointGroup(space_group[1:].replace('_', '/'))
 
@@ -116,6 +76,50 @@ class SortHKLTest(stresstesting.MantidStressTest):
 
         return overall_statistics
 
+
+class SortHKLTest(HKLStatisticsTestMixin, stresstesting.MantidStressTest):
+    ''' System test for SortHKL
+
+    This system test compares some of the output of SortHKL to statistics produced
+    by running the program SORTAV [1] on the same data set.
+
+    Since SORTAV processes HKL-files and those are small, the peaks are loaded from
+    HKL-files and put into an empty PeaksWorkspace. Two additional files are read
+    for the test, the parameters for SetUB in JSON-format and some of the output from
+    the sortav.lp file which contains the output after a SORTAV-run.
+
+    This system test is there to ensure the correctness what SortHKL does against
+    the output of an established program.
+
+    [1] SORTAV: ftp://ftp.hwi.buffalo.edu/pub/Blessing/Drear/sortav.use
+        (and references therein).
+    '''
+
+    def runTest(self):
+        self._init_test_data()
+        self.test_SortHKLStatistics()
+
+    def test_SortHKLStatistics(self):
+        for space_group in self._space_groups:
+            ub_parameters = self._load_ub_parameters(space_group)
+            reflections = self._load_reflections(space_group, ub_parameters)
+            statistics, sorted_hkls = self._run_sort_hkl(reflections, space_group)
+            reference_statistics = self._load_reference_statistics(space_group)
+
+            self._compare_statistics(statistics, reference_statistics)
+            self._check_sorted_hkls_consistency(sorted_hkls, space_group)
+
+    def _run_sort_hkl(self, reflections, space_group):
+        point_group_name = self._get_point_group(space_group).getName()
+        centering_name = self._centering_map[space_group[0]]
+
+        # pylint: disable=unused-variable
+        sorted_hkls, chi2, statistics = SortHKL(InputWorkspace=reflections,
+                                                PointGroup=point_group_name,
+                                                LatticeCentering=centering_name)
+
+        return statistics.row(0), sorted_hkls
+
     def _compare_statistics(self, statistics, reference_statistics):
         self.assertEquals(round(statistics['Multiplicity'], 1), round(reference_statistics['<N>'], 1))
         self.assertEquals(round(statistics['Rpim'], 2), round(100.0 * reference_statistics['Rm'], 2))
diff --git a/Testing/SystemTests/tests/analysis/SphinxWarnings.py b/Testing/SystemTests/tests/analysis/SphinxWarnings.py
index 4155b6fa1f77c579f7be004a9b511e8f7a3405e2..b31e535a9cb43930336aed6f56678d4a55cbf4b8 100644
--- a/Testing/SystemTests/tests/analysis/SphinxWarnings.py
+++ b/Testing/SystemTests/tests/analysis/SphinxWarnings.py
@@ -20,6 +20,7 @@ class SphinxWarnings(stresstesting.MantidStressTest):
                                 'Diffraction',
                                 'Events',
                                 'Examples',
+                                'ILL',
                                 'ISIS',
                                 'Inelastic',
                                 'MDAlgorithms',
diff --git a/Testing/SystemTests/tests/analysis/VesuvioCommandsTest.py b/Testing/SystemTests/tests/analysis/VesuvioCommandsTest.py
index 52185ccaef5f90129730fefdc05d5c03d6724c15..d74cb6bc5799f830f3c3d803673922fe0e800b96 100644
--- a/Testing/SystemTests/tests/analysis/VesuvioCommandsTest.py
+++ b/Testing/SystemTests/tests/analysis/VesuvioCommandsTest.py
@@ -11,7 +11,7 @@ from mantid.simpleapi import *
 from vesuvio.commands import fit_tof
 
 
-#=====================================Helper Function=================================
+# =====================================Helper Function=================================
 
 def _is_old_boost_version():
     # It appears that a difference in boost version is causing different
@@ -32,7 +32,7 @@ def _create_test_flags(background, multivariate=False):
     flags['fit_mode'] = 'spectrum'
     flags['spectra'] = '135'
     if multivariate:
-        mass1 = {'value': 1.0079, 'function': 'MultivariateGaussian', 'SigmaX':5, 'SigmaY':5, 'SigmaZ':5}
+        mass1 = {'value': 1.0079, 'function': 'MultivariateGaussian', 'SigmaX': 5, 'SigmaY': 5, 'SigmaZ': 5}
     else:
         mass1 = {'value': 1.0079, 'function': 'GramCharlier', 'width': [2, 5, 7],
                  'hermite_coeffs': [1, 0, 0], 'k_free': 0, 'sears_flag': 1}
@@ -42,7 +42,7 @@ def _create_test_flags(background, multivariate=False):
     flags['masses'] = [mass1, mass2, mass3, mass4]
     flags['intensity_constraints'] = [0, 1, 0, -4]
     if background:
-        flags['background'] = {'function': 'Polynomial', 'order':3}
+        flags['background'] = {'function': 'Polynomial', 'order': 3}
     else:
         flags['background'] = None
     flags['ip_file'] = 'Vesuvio_IP_file_test.par'
@@ -78,11 +78,11 @@ def _get_peak_height_and_index(workspace, ws_index):
 
     return peak_height, peak_bin
 
-#====================================================================================
 
+# ====================================================================================
 
-class FitSingleSpectrumNoBackgroundTest(stresstesting.MantidStressTest):
 
+class FitSingleSpectrumNoBackgroundTest(stresstesting.MantidStressTest):
     _fit_results = None
 
     def runTest(self):
@@ -96,7 +96,7 @@ class FitSingleSpectrumNoBackgroundTest(stresstesting.MantidStressTest):
 
         fitted_wsg = self._fit_results[0]
         self.assertTrue(isinstance(fitted_wsg, WorkspaceGroup))
-        self.assertEqual(2, len(fitted_wsg))
+        self.assertEqual(1, len(fitted_wsg))
 
         fitted_ws = fitted_wsg[0]
         self.assertTrue(isinstance(fitted_ws, MatrixWorkspace))
@@ -128,7 +128,8 @@ class FitSingleSpectrumNoBackgroundTest(stresstesting.MantidStressTest):
         exit_iteration = self._fit_results[3]
         self.assertTrue(isinstance(exit_iteration, int))
 
-#====================================================================================
+
+# ====================================================================================
 
 
 class FitSingleSpectrumBivariateGaussianTiesTest(stresstesting.MantidStressTest):
@@ -145,17 +146,17 @@ class FitSingleSpectrumBivariateGaussianTiesTest(stresstesting.MantidStressTest)
         self._fit_results = fit_tof(runs, flags)
 
     def validate(self):
-        #Get fit workspace
+        # Get fit workspace
         fit_params = mtd['15039-15045_params_iteration_1']
         f0_sigma_x = fit_params.readY(2)[0]
         f0_sigma_y = fit_params.readY(3)[0]
         self.assertAlmostEqual(f0_sigma_x, f0_sigma_y)
 
-#====================================================================================
 
+# ====================================================================================
 
-class SingleSpectrumBackground(stresstesting.MantidStressTest):
 
+class SingleSpectrumBackground(stresstesting.MantidStressTest):
     _fit_results = None
 
     def runTest(self):
@@ -169,7 +170,7 @@ class SingleSpectrumBackground(stresstesting.MantidStressTest):
 
         fitted_wsg = self._fit_results[0]
         self.assertTrue(isinstance(fitted_wsg, WorkspaceGroup))
-        self.assertEqual(2, len(fitted_wsg))
+        self.assertEqual(1, len(fitted_wsg))
 
         fitted_ws = fitted_wsg[0]
         self.assertTrue(isinstance(fitted_ws, MatrixWorkspace))
@@ -203,11 +204,11 @@ class SingleSpectrumBackground(stresstesting.MantidStressTest):
         exit_iteration = self._fit_results[3]
         self.assertTrue(isinstance(exit_iteration, int))
 
-#====================================================================================
 
+# ====================================================================================
 
-class BankByBankForwardSpectraNoBackground(stresstesting.MantidStressTest):
 
+class BankByBankForwardSpectraNoBackground(stresstesting.MantidStressTest):
     _fit_results = None
 
     def runTest(self):
@@ -222,32 +223,26 @@ class BankByBankForwardSpectraNoBackground(stresstesting.MantidStressTest):
         self.assertEquals(4, len(self._fit_results))
 
         fitted_banks = self._fit_results[0]
-        self.assertTrue(isinstance(fitted_banks, list))
+        self.assertTrue(isinstance(fitted_banks, WorkspaceGroup))
         self.assertEqual(8, len(fitted_banks))
 
         bank1 = fitted_banks[0]
-        self.assertTrue(isinstance(bank1, WorkspaceGroup))
+        self.assertTrue(isinstance(bank1, MatrixWorkspace))
 
-        bank1_data = bank1[0]
-        self.assertTrue(isinstance(bank1_data, MatrixWorkspace))
+        self.assertAlmostEqual(50.0, bank1.readX(0)[0])
+        self.assertAlmostEqual(562.0, bank1.readX(0)[-1])
 
-        self.assertAlmostEqual(50.0, bank1_data.readX(0)[0])
-        self.assertAlmostEqual(562.0, bank1_data.readX(0)[-1])
+        _equal_within_tolerance(self, 8.23840378769e-05, bank1.readY(1)[0])
+        _equal_within_tolerance(self, 0.000556695665501, bank1.readY(1)[-1])
 
-        _equal_within_tolerance(self, 8.23840378769e-05, bank1_data.readY(1)[0])
-        _equal_within_tolerance(self, 0.000556695665501, bank1_data.readY(1)[-1])
+        bank8 = fitted_banks[7]
+        self.assertTrue(isinstance(bank8, MatrixWorkspace))
 
-        bank8 = fitted_banks[-1]
-        self.assertTrue(isinstance(bank8, WorkspaceGroup))
+        self.assertAlmostEqual(50.0, bank8.readX(0)[0])
+        self.assertAlmostEqual(562.0, bank8.readX(0)[-1])
 
-        bank8_data = bank8[0]
-        self.assertTrue(isinstance(bank8_data, MatrixWorkspace))
-
-        self.assertAlmostEqual(50.0, bank8_data.readX(0)[0])
-        self.assertAlmostEqual(562.0, bank8_data.readX(0)[-1])
-
-        _equal_within_tolerance(self, 0.00025454613205, bank8_data.readY(1)[0])
-        _equal_within_tolerance(self, 0.00050412575393, bank8_data.readY(1)[-1])
+        _equal_within_tolerance(self, 0.00025454613205, bank8.readY(1)[0])
+        _equal_within_tolerance(self, 0.00050412575393, bank8.readY(1)[-1])
 
         chisq_values = self._fit_results[2]
         self.assertTrue(isinstance(chisq_values, list))
@@ -256,11 +251,11 @@ class BankByBankForwardSpectraNoBackground(stresstesting.MantidStressTest):
         exit_iteration = self._fit_results[3]
         self.assertTrue(isinstance(exit_iteration, int))
 
-#====================================================================================
 
+# ====================================================================================
 
-class SpectraBySpectraForwardSpectraNoBackground(stresstesting.MantidStressTest):
 
+class SpectraBySpectraForwardSpectraNoBackground(stresstesting.MantidStressTest):
     _fit_results = None
 
     def runTest(self):
@@ -275,32 +270,26 @@ class SpectraBySpectraForwardSpectraNoBackground(stresstesting.MantidStressTest)
         self.assertEquals(4, len(self._fit_results))
 
         fitted_spec = self._fit_results[0]
-        self.assertTrue(isinstance(fitted_spec, list))
+        self.assertTrue(isinstance(fitted_spec, WorkspaceGroup))
         self.assertEqual(2, len(fitted_spec))
 
         spec143 = fitted_spec[0]
-        self.assertTrue(isinstance(spec143, WorkspaceGroup))
-
-        spec143_data = spec143[0]
-        self.assertTrue(isinstance(spec143_data, MatrixWorkspace))
-
-        self.assertAlmostEqual(50.0, spec143_data.readX(0)[0])
-        self.assertAlmostEqual(562.0, spec143_data.readX(0)[-1])
+        self.assertTrue(isinstance(spec143, MatrixWorkspace))
 
-        _equal_within_tolerance(self, 2.27289862507e-06, spec143_data.readY(1)[0])
-        _equal_within_tolerance(self, 3.49287467421e-05, spec143_data.readY(1)[-1])
+        self.assertAlmostEqual(50.0, spec143.readX(0)[0])
+        self.assertAlmostEqual(562.0, spec143.readX(0)[-1])
 
-        spec144 = fitted_spec[-1]
-        self.assertTrue(isinstance(spec144, WorkspaceGroup))
+        _equal_within_tolerance(self, 2.27289862507e-06, spec143.readY(1)[0])
+        _equal_within_tolerance(self, 3.49287467421e-05, spec143.readY(1)[-1])
 
-        spec144_data = spec144[0]
-        self.assertTrue(isinstance(spec144_data, MatrixWorkspace))
+        spec144 = fitted_spec[1]
+        self.assertTrue(isinstance(spec144, MatrixWorkspace))
 
-        self.assertAlmostEqual(50.0, spec144_data.readX(0)[0])
-        self.assertAlmostEqual(562.0, spec144_data.readX(0)[-1])
+        self.assertAlmostEqual(50.0, spec144.readX(0)[0])
+        self.assertAlmostEqual(562.0, spec144.readX(0)[-1])
 
-        _equal_within_tolerance(self, 5.9811662524e-06, spec144_data.readY(1)[0])
-        _equal_within_tolerance(self, 4.7479831769e-05, spec144_data.readY(1)[-1])
+        _equal_within_tolerance(self, 5.9811662524e-06, spec144.readY(1)[0])
+        _equal_within_tolerance(self, 4.7479831769e-05, spec144.readY(1)[-1])
 
         chisq_values = self._fit_results[2]
         self.assertTrue(isinstance(chisq_values, list))
@@ -309,4 +298,4 @@ class SpectraBySpectraForwardSpectraNoBackground(stresstesting.MantidStressTest)
         exit_iteration = self._fit_results[3]
         self.assertTrue(isinstance(exit_iteration, int))
 
-#====================================================================================
+# ====================================================================================
diff --git a/Testing/SystemTests/tests/analysis/reference/PG3_9829_sum_reference.gsa.md5 b/Testing/SystemTests/tests/analysis/reference/PG3_9829_sum_reference.gsa.md5
index 6379b10f439cf65a48db6f156b37e35b6d6848b0..34847a850c8beed54bf903bf86197bdb8461a35b 100644
--- a/Testing/SystemTests/tests/analysis/reference/PG3_9829_sum_reference.gsa.md5
+++ b/Testing/SystemTests/tests/analysis/reference/PG3_9829_sum_reference.gsa.md5
@@ -1 +1 @@
-7873b53ec5b94b039b2fcb040d513cd4
+90f6f9ae594a0686a27830b2cc5fb029
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..0cbd2798cb6956cfc4488571a970647ad34ba81a
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference.nxs.md5
@@ -0,0 +1 @@
+bc8b84337511fe5f442717dedf98a21f
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_HAB_1D.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_HAB_1D.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..1cdf7b85235185e6532d3f38d8a2e4db90c54755
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_HAB_1D.nxs.md5
@@ -0,0 +1 @@
+bc37abb12a322758a7f3d82d5428f7ae
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_1D.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_1D.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..4bc3d75385c003e18cf34ae78294160fa1fbbb34
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_1D.nxs.md5
@@ -0,0 +1 @@
+d1495cecef8cfce8a98d149e1083bcfd
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_2D.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_2D.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..4338ef6a163ad6166f0eaa29502b4822116dcfe5
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_LAB_2D.nxs.md5
@@ -0,0 +1 @@
+4f4e0bcc36203a42cb1f4f0f6dd737c0
diff --git a/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_Merged_1D.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_Merged_1D.nxs.md5
new file mode 100644
index 0000000000000000000000000000000000000000..5db5641783926ed3d170363b9853befe070b728d
--- /dev/null
+++ b/Testing/SystemTests/tests/analysis/reference/SANS2D_ws_D20_reference_Merged_1D.nxs.md5
@@ -0,0 +1 @@
+b12a20b840ddabe017c23f17733a0c75
diff --git a/Testing/SystemTests/tests/analysis/reference/VesuvioFittingTest.nxs.md5 b/Testing/SystemTests/tests/analysis/reference/VesuvioFittingTest.nxs.md5
index d578509ceb8b34845ce16e9c0d50b0dd923cfcb6..97e9140a596f50dab50aca9dce3dc5457c8f8e15 100644
--- a/Testing/SystemTests/tests/analysis/reference/VesuvioFittingTest.nxs.md5
+++ b/Testing/SystemTests/tests/analysis/reference/VesuvioFittingTest.nxs.md5
@@ -1 +1 @@
-f87942c1b37e17f184c27274137d0953
+a12119b70164b4e192b48488f85bcf75
diff --git a/Testing/SystemTests/tests/analysis/utils.py b/Testing/SystemTests/tests/analysis/utils.py
deleted file mode 100644
index b96857118b229ee480557f663674d8dd5535d04f..0000000000000000000000000000000000000000
--- a/Testing/SystemTests/tests/analysis/utils.py
+++ /dev/null
@@ -1,316 +0,0 @@
-#pylint: disable=invalid-name
-''' SVN Info:   The variables below will only get subsituted at svn checkout if
-                the repository is configured for variable subsitution.
-
-        $Id$
-        $HeadURL$
-|=============================================================================|=======|
-1                                                                            80   <tab>
-'''
-import os
-import inspect
-import opcode
-
-
-def ls():
-    print os.getcwd()
-    files=os.listdir(os.getcwd())
-    for i in range(0,len(files)):
-
-        print files[i]
-
-
-def pwd():
-    print os.getcwd()
-
-
-def cd(dir_str):
-    os.chdir(dir_str)
-
-
-def lineno():
-    """
-    call signature(s)::
-    lineno()
-
-    Returns the current line number in our program.
-
-    No Arguments.
-
-
-    Working example
-    >>> print "This is the line number ",lineno(),"\n"
-
-    """
-    return inspect.currentframe().f_back.f_lineno
-
-
-def decompile(code_object):
-    '''     taken from http://thermalnoise.wordpress.com/2007/12/30/exploring-python-bytecode/
-
-        decompile extracts dissasembly information from the byte code and stores it in a
-                list for further use.
-
-        call signature(s)::
-                instructions=decompile(f.f_code)
-
-        Required arguments:
-        =========   =====================================================================
-        f.f_code    A  bytecode object ectracted with inspect.currentframe()
-                    or anyother mechanism that returns byte code.
-
-        Optional keyword arguments: NONE
-
-        Outputs:
-        =========   =====================================================================
-        instructions  a list of offsets, op_codes, names, arguments, argument_type,
-                        argument_value which can be deconstructed to find out various things
-                        about a function call.
-
-        Examples:
-
-        f = inspect.currentframe().f_back.f_back
-        i = f.f_lasti  # index of the last attempted instruction in byte code
-        ins=decompile(f.f_code)
-        pretty_print(ins)
-
-
-        '''
-    code = code_object.co_code
-    variables = code_object.co_cellvars + code_object.co_freevars
-    instructions = []
-    n = len(code)
-    i = 0
-    e = 0
-    while i < n:
-        i_offset = i
-        i_opcode = ord(code[i])
-        i = i + 1
-        if i_opcode >= opcode.HAVE_ARGUMENT:
-            i_argument = ord(code[i]) + (ord(code[i+1]) << (4*2)) + e
-            i = i +2
-            if i_opcode == opcode.EXTENDED_ARG:
-                e = iarg << 16
-            else:
-                e = 0
-            if i_opcode in opcode.hasconst:
-                i_arg_value = repr(code_object.co_consts[i_argument])
-                i_arg_type = 'CONSTANT'
-            elif i_opcode in opcode.hasname:
-                i_arg_value = code_object.co_names[i_argument]
-                i_arg_type = 'GLOBAL VARIABLE'
-            elif i_opcode in opcode.hasjrel:
-                i_arg_value = repr(i + i_argument)
-                i_arg_type = 'RELATIVE JUMP'
-            elif i_opcode in opcode.haslocal:
-                i_arg_value = code_object.co_varnames[i_argument]
-                i_arg_type = 'LOCAL VARIABLE'
-            elif i_opcode in opcode.hascompare:
-                i_arg_value = opcode.cmp_op[i_argument]
-                i_arg_type = 'COMPARE OPERATOR'
-            elif i_opcode in opcode.hasfree:
-                i_arg_value = variables[i_argument]
-                i_arg_type = 'FREE VARIABLE'
-            else:
-                i_arg_value = i_argument
-                i_arg_type = 'OTHER'
-        else:
-            i_argument = None
-            i_arg_value = None
-            i_arg_type = None
-        instructions.append( (i_offset, i_opcode, opcode.opname[i_opcode], i_argument, i_arg_type, i_arg_value) )
-    return instructions
-
-# Print the byte code in a human readable format
-
-
-def pretty_print(instructions):
-    print '%5s %-20s %3s  %5s  %-20s  %s' %  ('OFFSET', 'INSTRUCTION', 'OPCODE', 'ARG', 'TYPE', 'VALUE')
-    for (offset, op, name, argument, argtype, argvalue) in instructions:
-        print '%5d  %-20s (%3d)  ' % (offset, name, op),
-        if argument is not None:
-            print '%5d  %-20s  (%s)' % (argument, argtype, argvalue),
-        print
-
-
-def expecting():
-        #{{{
-    '''
-        call signature(s)::
-
-
-        Return how many values the caller is expecting
-
-        Required arguments:	NONE
-
-        Optional keyword arguments: NONE
-
-
-        Outputs:
-        =========   =====================================================================
-        numReturns	Number of return values on expected on the left of the equal sign.
-
-        Examples:
-
-        This function is not designed for cammand line use.  Using in a function can
-        follow the form below.
-
-
-        def test1():
-                def f():
-                        r = expecting()
-                        print r
-                        if r == 0:
-                                return None
-                        if r == 1:
-                                return 0
-                        return range(r)
-
-                f()
-                print "---"
-                a = f()
-                print "---", a
-                a, b = f()
-                print "---", a,b
-                a, b = c = f()
-                print "---", a,b,c
-                a, b = c = d = f()
-                print "---", a,b,c
-                a = b = f()
-                print "---", a,b
-                a = b, c = f()
-                print "---", a,b,c
-                a = b = c, d = f()
-                print "---", a,b,c,d
-                a = b, c = d = f()
-                print "---", a,b,c,d
-                a, b = c, d = f()
-                print "---", a,b,c,d
-
-        Developers Notes:
-
-                Now works with an multiple assigments correctly.  This is verified by
-                test() and test1() below
-        '''
-    f = inspect.currentframe().f_back.f_back
-    i = f.f_lasti  # index of the last attempted instruction in byte code
-    ins=decompile(f.f_code)
-        #pretty_print(ins)
-    for (offset, dummy_op, name, argument, dummy_argtype, dummy_argvalue) in ins:
-        if offset > i:
-            if name == 'POP_TOP':
-                return 0
-            if name == 'UNPACK_SEQUENCE':
-                return argument
-            if name == 'CALL_FUNCTION':
-                return 1
-
-#pylint: disable=too-many-locals,too-many-branches
-
-
-def lhs(output='names'):
-    '''
-        call signature(s)::
-
-        Return how many values the caller is expecting
-
-        Required arguments:	NONE
-
-        Optional keyword arguments: NONE
-
-
-        Outputs:
-        =========   =====================================================================
-        numReturns	Number of return values on expected on the left of the equal sign.
-
-        Examples:
-
-        This function is not designed for cammand line use.  Using in a function can
-        follow the form below.
-
-        '''
-
-    f = inspect.currentframe().f_back.f_back
-    i = f.f_lasti  # index of the last attempted instruction in byte code
-    ins=decompile(f.f_code)
-        #pretty_print(ins)
-
-    CallFunctionLocation={}
-    first=False
-    StartIndex=0
-    StartOffset=0
-        # we must list all of the operators that behave like a function call in byte-code
-    OperatorNames=set(['CALL_FUNCTION','UNARY_POSITIVE','UNARY_NEGATIVE',
-                       'UNARY_NOT','UNARY_CONVERT','UNARY_INVERT','GET_ITER',
-                       'BINARY_POWER','BINARY_MULTIPLY','BINARY_DIVIDE',
-                       'BINARY_FLOOR_DIVIDE', 'BINARY_TRUE_DIVIDE', 'BINARY_MODULO',
-                       'BINARY_ADD','BINARY_SUBTRACT','BINARY_SUBSCR',
-                       'BINARY_LSHIFT','BINARY_RSHIFT','BINARY_AND','BINARY_XOR','BINARY_OR'])
-
-    for index in range(len(ins)):
-        (offset, op, name, argument, argtype, argvalue) = ins[index]
-        if name in OperatorNames:
-            if not first:
-                CallFunctionLocation[StartOffset] = (StartIndex,index)
-            StartIndex=index
-            StartOffset = offset
-
-    (offset, op, name, argument, argtype, argvalue) = ins[-1]
-    CallFunctionLocation[StartOffset]=(StartIndex,len(ins)-1) # append the index of the last entry to form the last boundary
-
-        #print CallFunctionLocation
-        #pretty_print( ins[CallFunctionLocation[i][0]:CallFunctionLocation[i][1]] )
-        # In our case i should always be the offset of a Call_Function instruction. We can use this to baracket
-        # the bit which we are interested in
-
-    OutputVariableNames=[]
-    (offset, op, name, argument, argtype, argvalue) = ins[CallFunctionLocation[i][0] + 1]
-    if name == 'POP_TOP':  # no Return Values
-        pass
-                #return OutputVariableNames
-    if name == 'STORE_FAST' or name == 'STORE_NAME': # One Return Value
-        OutputVariableNames.append(argvalue)
-    if name == 'UNPACK_SEQUENCE': # Many Return Values, One equal sign
-        for index in range(argvalue):
-            (offset_, op_, name_, argument_, argtype_, argvalue_) = ins[CallFunctionLocation[i][0] + 1 + 1 +index]
-            OutputVariableNames.append(argvalue_)
-    maxReturns = len(OutputVariableNames)
-    if name == 'DUP_TOP': # Many Return Values, Many equal signs
-                # The output here should be a multi-dim list which mimics the variable unpacking sequence.
-                # For instance a,b=c,d=f() => [ ['a','b'] , ['c','d'] ]
-                #              a,b=c=d=f() => [ ['a','b'] , 'c','d' ]  So on and so forth.
-
-                # put this in a loop and stack the results in an array.
-        count = 0
-        maxReturns = 0 # Must count the maxReturns ourselves in this case
-        while count < len(ins[CallFunctionLocation[i][0] :CallFunctionLocation[i][1]]):
-            (offset_, op_, name_, argument_, argtype_, argvalue_) = ins[CallFunctionLocation[i][0]+count]
-                        #print 'i= ',i,'count = ', count, 'maxReturns = ',maxReturns
-            if name_ == 'UNPACK_SEQUENCE': # Many Return Values, One equal sign
-                hold=[]
-                                #print 'argvalue_ = ', argvalue_, 'count = ',count
-                if argvalue_ > maxReturns:
-                    maxReturns=argvalue_
-                for index in range(argvalue_):
-                    (_offset_, _op_, _name_, _argument_, _argtype_, _argvalue_) = ins[CallFunctionLocation[i][0] + count+1+index]
-                    hold.append(_argvalue_)
-                count = count + argvalue_
-                OutputVariableNames.append(hold)
-                        # Need to now skip the entries we just appended with the for loop.
-            if name_ == 'STORE_FAST' or name_ == 'STORE_NAME': # One Return Value
-                if 1 > maxReturns:
-                    maxReturns = 1
-                OutputVariableNames.append(argvalue_)
-            count = count + 1
-
-        # Now that OutputVariableNames is filled with the right stuff we need to output the correct thing. Either the maximum number of
-        # variables to unpack in the case of multiple ='s or just the length of the array or just the naames of the variables.
-
-    if output== 'names':
-        return OutputVariableNames
-    elif output == 'number':
-        return maxReturns
-    elif output == 'both':
-        return (maxReturns,OutputVariableNames)
-
-    return 0 # Should never get to here
diff --git a/docs/source/algorithms/AlignComponents-v1.rst b/docs/source/algorithms/AlignComponents-v1.rst
index 389ebe5352909b6366fca00dbd6b779ae8a87e50..86b823e1908db0dc6df25516888a6f79951b473d 100644
--- a/docs/source/algorithms/AlignComponents-v1.rst
+++ b/docs/source/algorithms/AlignComponents-v1.rst
@@ -62,13 +62,13 @@ Usage
 
 .. testcode:: position
 
-      LoadCalFile(InstrumentName="PG3",
+      ws = LoadEmptyInstrument(Filename="POWGEN_Definition_2015-08-01.xml")
+      LoadCalFile(InputWorkspace=ws,
             CalFilename="PG3_golden.cal",
             MakeGroupingWorkspace=False,
             MakeOffsetsWorkspace=True,
             MakeMaskWorkspace=True,
             WorkspaceName="PG3")
-      ws = LoadEmptyInstrument(Filename="POWGEN_Definition_2015-08-01.xml")
       component="bank26"
       print "Start position is",ws.getInstrument().getComponentByName(component).getPos()
       AlignComponents(CalibrationTable="PG3_cal",
@@ -91,13 +91,13 @@ Output:
 
 .. testcode:: rotation
 
-      LoadCalFile(InstrumentName="PG3",
+      ws = LoadEmptyInstrument(Filename="POWGEN_Definition_2015-08-01.xml")
+      LoadCalFile(InputWorkspace=ws,
 	    CalFilename="PG3_golden.cal",
 	    MakeGroupingWorkspace=False,
 	    MakeOffsetsWorkspace=True,
 	    MakeMaskWorkspace=True,
 	    WorkspaceName="PG3")
-      ws = LoadEmptyInstrument(Filename="POWGEN_Definition_2015-08-01.xml")
       components="bank25,bank46"
       bank25Rot = ws.getInstrument().getComponentByName("bank25").getRotation().getEulerAngles()
       bank46Rot = ws.getInstrument().getComponentByName("bank46").getRotation().getEulerAngles()
@@ -128,7 +128,8 @@ Output:
 
 .. testcode:: sample
 
-      LoadCalFile(InstrumentName="PG3",
+      ws = LoadEmptyInstrument(Filename="POWGEN_Definition_2015-08-01.xml")
+      LoadCalFile(InputWorkspace=ws,
 	    CalFilename="PG3_golden.cal",
 	    MakeGroupingWorkspace=False,
 	    MakeOffsetsWorkspace=True,
@@ -137,7 +138,6 @@ Output:
       # Mask banks that don't have calibration data
       MaskBTP(Workspace='PG3_mask', Instrument='POWGEN',
 	      Bank='22-25,42-45,62-66,82-86,102-105,123,124,143,144,164,184,204')
-      ws = LoadEmptyInstrument(Filename="POWGEN_Definition_2015-08-01.xml")
       print "Start sample position is",ws.getInstrument().getSample().getPos().getZ()
       AlignComponents(CalibrationTable="PG3_cal",
             Workspace=ws,
diff --git a/docs/source/algorithms/CountReflections-v1.rst b/docs/source/algorithms/CountReflections-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a04197df2de978780c3669e55a7429f93af6f174
--- /dev/null
+++ b/docs/source/algorithms/CountReflections-v1.rst
@@ -0,0 +1,89 @@
+
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm computes some crystallographic data set quality indicators that are based
+on counting reflections according to their Miller indices HKL. Intensity information is not
+required for these indicators, so that the algorithm can also be used with predicted data
+(for example generated by :ref:`algm-PredictPeaks`).
+
+According to the specified lattice centering and the resolution boundaries, a set of
+theoretically measurable reflections is generated. How the reflections are mapped to
+a set of :math:`N_{theor.}` unique reflections depends on the supplied point group. Then the
+:math:`N_{observed}` actually observed peaks from the input workspace are assigned to their
+respective unique reflection, yielding :math:`N_{unique}` observed unique reflections.
+
+From this assignment it is possible to calculate the following indicators:
+
+  * Unique reflections: :math:`N_{unique}`
+  * Completeness: :math:`\frac{N_{unique}}{N_{theor.}}`
+  * Redundancy: :math:`\frac{N_{observed}}{N_{unique}}`
+  * Multiply observed reflections: :math:`\frac{N_{unique} | N^{hkl}_{observed} > 1}{N_{unique}}`
+
+Furthermore, the algorithm optionally produces a list of missing reflections. In this list,
+each missing unique reflection is expanded to all symmetry equivalents according to the point
+group. For example, if the reflection family :math:`\left{001\right}` was missing
+with point group :math:`\bar{1}`, the list would contain :math:`(001)` and :math:`(00\bar{1})`.
+
+The reason for expanding the unique reflections is to make the list more useful as an input
+to :ref:`algm-PredictPeaks` again.
+
+.. note::
+
+    This algorithm has some overlap with :ref:`algm-SortHKL`, which computes some of the indicators this
+    algorithm calculates, but in addition also evaluates intensity information. SortHKL only works with
+    peaks that carry intensity data while this algorithm also works without intensities.
+
+Usage
+-----
+
+The usage example uses the same data as the usage test in :ref:`algm-SortHKL`, but produces slightly different
+data, because some intensities in the input file are 0, so these reflections are ignored by :ref:`algm-SortHKL`:
+
+.. testcode:: CountReflectionsExample
+
+    # Load example peak data and find cell
+    peaks = LoadIsawPeaks(Filename=r'Peaks5637.integrate')
+
+    FindUBUsingFFT(peaks, MinD=0.25, MaxD=10, Tolerance=0.2)
+    SelectCellWithForm(peaks, FormNumber=9, Apply=True, Tolerance=0.15)
+    OptimizeLatticeForCellType(peaks, CellType='Hexagonal', Apply=True, Tolerance=0.2)
+
+    # Run the SortHKL algorithm
+    unique, completeness, redundancy, multiple = CountReflections(peaks, PointGroup='-3m1',
+                                                                  LatticeCentering='Robv', MinDSpacing=0.205,
+                                                                  MaxDSpacing=2.08, MissingReflectionsWorkspace='')
+
+    print 'Data set statistics:'
+    print '             Peaks: {0}'.format(peaks.getNumberPeaks())
+    print '            Unique: {0}'.format(unique)
+    print '      Completeness: {0}%'.format(round(completeness * 100, 2))
+    print '        Redundancy: {0}'.format(round(redundancy, 2))
+    print ' Multiply observed: {0}%'.format(round(multiple*100, 2))
+
+Output:
+
+.. testoutput:: CountReflectionsExample
+
+    Data set statistics:
+                 Peaks: 434
+                Unique: 358
+          Completeness: 9.57%
+            Redundancy: 1.21
+     Multiply observed: 20.67%
+
+The resulting completeness is slightly higher than in the SortHKL case, but for actual statistics it might be
+better to remove the zero intensity peaks from the workspace prior to running the algorithm.
+
+.. categories::
+
+.. sourcelink::
+
diff --git a/docs/source/algorithms/CreateChunkingFromInstrument-v1.rst b/docs/source/algorithms/CreateChunkingFromInstrument-v1.rst
index 952d5eb664dc87e4ddb41850a3b3228f92cff214..456c573450c18e6eef29cc3687751bb5ab1d495d 100644
--- a/docs/source/algorithms/CreateChunkingFromInstrument-v1.rst
+++ b/docs/source/algorithms/CreateChunkingFromInstrument-v1.rst
@@ -22,11 +22,12 @@ will return an empty table workspace.
 Usage
 -----
 
-**Example: Powgen**  
+**Example: Powgen**
 
 .. testcode:: ExPowgen
-   
-   ws = CreateChunkingFromInstrument(InstrumentName="pg3", ChunkBy="Group")
+
+   pg3 = LoadEmptyInstrument(Filename="POWGEN_Definition_2015-08-01.xml")
+   ws = CreateChunkingFromInstrument(InputWorkspace=pg3, ChunkBy="Group")
    print "Created %i Chunks" % ws.rowCount()
 
 Output:
@@ -35,10 +36,10 @@ Output:
 
    Created 4 Chunks
 
-**Example: Snap**  
+**Example: Snap**
 
 .. testcode:: ExSnap
-   
+
    ws = CreateChunkingFromInstrument(InstrumentName="snap", ChunkNames="East,West", MaxBankNumber=20)
    print "Created %i Chunks" % ws.rowCount()
 
@@ -51,7 +52,3 @@ Output:
 .. categories::
 
 .. sourcelink::
-
-
-
-
diff --git a/docs/source/algorithms/CreateSampleWorkspace-v1.rst b/docs/source/algorithms/CreateSampleWorkspace-v1.rst
index cc67f7a07c7f98597d46c3f22c4e26b92d8f095d..633e93a8579651bd2529f8fb2b6a46ac43a1ed6f 100644
--- a/docs/source/algorithms/CreateSampleWorkspace-v1.rst
+++ b/docs/source/algorithms/CreateSampleWorkspace-v1.rst
@@ -59,6 +59,10 @@ If "NumMonitors" is also given the first monitor is created half-way between the
 sample and the first bank, then between each bank (or where the banks would be
 if "NumMonitors" > "NumBanks").
 
+If "NumScanPoints" > 1 then a scanning workspace is created, that is one with time
+indexed positions and rotations. The scan is set up such that for each scan point 
+all the detectors are rotated by 1 degree around the sample.
+
 Usage
 -----
 
diff --git a/docs/source/algorithms/DeltaPDF3D-v1.rst b/docs/source/algorithms/DeltaPDF3D-v1.rst
index 51838b354f117a231a34cc36c311bb7c217b33ad..0d0e00149ea3b3f9ce4c4663a5afde1dc05ea036 100644
--- a/docs/source/algorithms/DeltaPDF3D-v1.rst
+++ b/docs/source/algorithms/DeltaPDF3D-v1.rst
@@ -25,7 +25,11 @@ The input workspace must be a :ref:`MDHistoWorkspace
 The convolution option requires `astropy
 <http://docs.astropy.org/en/stable/index.html>`_ to be installed as it
 uses `astropy.convolution
-<http://docs.astropy.org/en/stable/convolution/>`_.
+<http://docs.astropy.org/en/stable/convolution/>`_. The convolution
+can be very slow for large workspaces, it will attempt to use
+astropy.convolution.convolve_fft (which is fast but only works for
+small workspace) but will use astropy.convolution.convolve (which is
+slow) if the workspace is too large.
 
 References
 ----------
@@ -98,7 +102,7 @@ The IntermediateWorkspace shows the changes to the input workspace.
 .. testcode:: fft2
 
    DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft2',IntermediateWorkspace='int2',
-              RemoveReflections=True,Size=0.4,Convolution=False)
+              RemoveReflections=True,Size=0.3,Convolution=False)
    print "The value at [1,0,0] is " + str(mtd['fft2'].signalAt(1866))
    print "The value at [0,1,0] is " + str(mtd['fft2'].signalAt(2226))
 
@@ -123,7 +127,7 @@ The IntermediateWorkspace shows the changes to the input workspace.
 .. testcode:: fft3
 
    DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft3',IntermediateWorkspace='int3',
-              RemoveReflections=True,Size=0.4,CropSphere=True,SphereMax=3,Convolution=False)
+              RemoveReflections=True,Size=0.3,CropSphere=True,SphereMax=3,Convolution=False)
    print "The value at [1,0,0] is " + str(mtd['fft3'].signalAt(1866))
    print "The value at [0,1,0] is " + str(mtd['fft3'].signalAt(2226))
 
@@ -143,12 +147,38 @@ The IntermediateWorkspace shows the changes to the input workspace.
 .. |int3| image:: /images/DeltaPDF3D_int3.png
    :width: 100%
 
+**Removing Reflections and crop to sphere with fill value**
+The fill value should be about the background level
+
+.. testcode:: fft3_2
+
+   DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft3',IntermediateWorkspace='int3',
+              RemoveReflections=True,Size=0.3,CropSphere=True,SphereMax=3,Convolution=False)
+   print "The value at [1,0,0] is " + str(mtd['fft3'].signalAt(1866))
+   print "The value at [0,1,0] is " + str(mtd['fft3'].signalAt(2226))
+
+.. testoutput:: fft3_2
+
+   The value at [1,0,0] is -477.173658361
+   The value at [0,1,0] is 501.081754175
+
++---------------------------------------------------------------------+---------------------------------------------------------------------+
+| Intermediate workspace after reflections removed and crop to sphere | Resulting 3D-ΔPDF                                                   |
++---------------------------------------------------------------------+---------------------------------------------------------------------+
+| |int3_2|                                                            | |fft3_2|                                                            |
++---------------------------------------------------------------------+---------------------------------------------------------------------+
+
+.. |fft3_2| image:: /images/DeltaPDF3D_fft3_2.png
+   :width: 100%
+.. |int3_2| image:: /images/DeltaPDF3D_int3_2.png
+   :width: 100%
+
 **Applying convolution**
 
 .. code-block:: python
 
    DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft4',IntermediateWorkspace='int4'
-              RemoveReflections=True,Size=0.4,CropSphere=True,SphereMax=3,Convolution=True)
+              RemoveReflections=True,Size=0.3,CropSphere=True,SphereMax=3,Convolution=True)
    print "The value at [1,0,0] is " + str(mtd['fft4'].signalAt(1866))
    print "The value at [0,1,0] is " + str(mtd['fft4'].signalAt(2226))
 
@@ -168,6 +198,31 @@ The IntermediateWorkspace shows the changes to the input workspace.
 .. |int4| image:: /images/DeltaPDF3D_int4.png
    :width: 100%
 
+**Applying convolution and deconvolution**
+
+.. code-block:: python
+
+   DeltaPDF3D(InputWorkspace='DeltaPDF3D_MDH',OutputWorkspace='fft5',IntermediateWorkspace='int5'
+              RemoveReflections=True,Size=0.3,CropSphere=True,SphereMax=3,Convolution=True,Deconvolution=True)
+   print "The value at [1,0,0] is " + str(mtd['fft5'].signalAt(1866))
+   print "The value at [0,1,0] is " + str(mtd['fft5'].signalAt(2226))
+
+.. code-block:: none
+
+   The value at [1,0,0] is -95.0767841089
+   The value at [0,1,0] is 99.3534883663
+
++--------------------------------------------------------------+--------------------------------------------------------------+
+| The deconvolution array, workspace signal is divided by this | Resulting 3D-ΔPDF                                            |
++--------------------------------------------------------------+--------------------------------------------------------------+
+| |deconv|                                                     | |fft5|                                                       |
++--------------------------------------------------------------+--------------------------------------------------------------+
+
+.. |fft5| image:: /images/DeltaPDF3D_fft5.png
+   :width: 100%
+.. |deconv| image:: /images/DeltaPDF3D_deconv.png
+   :width: 100%
+
 .. categories::
 
 .. sourcelink::
diff --git a/docs/source/algorithms/FindEPP-v1.rst b/docs/source/algorithms/FindEPP-v1.rst
index d8cb9345e334ac1164dbf210709472463c7eef0e..ffa00dab2f4d08f7e3b338a51b5c1626edb44c3c 100644
--- a/docs/source/algorithms/FindEPP-v1.rst
+++ b/docs/source/algorithms/FindEPP-v1.rst
@@ -28,7 +28,7 @@ Usage
                 XMin=4005.75, XMax=7995.75, BinWidth=10.5, BankDistanceFromSample=4.0)
 
     # search for elastic peak positions
-    table = FindEPP(ws)
+    table = FindEPP(ws, Version=1)
 
     # print some results
     print "The fit status is", table.row(0)['FitStatus']
@@ -47,3 +47,6 @@ Output:
 .. categories::
 
 .. sourcelink::
+	:filename: FindEPP
+	:cpp: None
+	:h: None
diff --git a/docs/source/algorithms/FindEPP-v2.rst b/docs/source/algorithms/FindEPP-v2.rst
new file mode 100644
index 0000000000000000000000000000000000000000..398731f00d3bee715e46adfbc2fcd68c93b4f5bd
--- /dev/null
+++ b/docs/source/algorithms/FindEPP-v2.rst
@@ -0,0 +1,60 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This is the rewrite of :ref:`algm-FindEPP-v1` python algorithm in C++, that offers significant performance gain when running over large workspaces.
+
+This utility algorithm attempts to search for the elastic peak position (EPP) in each spectrum of the given workspace. The algorithm estimates the starting parameters and performs Gaussian fit using the :ref:`algm-Fit` algorithm.
+
+.. note::
+    This algorithm uses very simple approach to search for an elastic peak: it suggests that the elastic peak has maximal intensity. This approach may fail in the case if the dataset contains Bragg peaks with higher intensities.
+
+As a result, `TableWorkspace <http://www.mantidproject.org/TableWorkspace>`_ with the following columns is produced: *WorkspaceIndex*, *PeakCentre*, *PeakCentreError*, *Sigma*, *SigmaError*, *Height*, *HeightError*, *chiSq* and *FitStatus*. Table rows correspond to the workspace indices.
+
+Last column will contain the status of peak finding as follows:
+
+* **success** : If the fit succeeded, the row is populated with the corresponding values obtained by the fit.
+* **fitFailed** : If the fit failed (for whatever reason). A debug message will be logged with a detailed failure message from the fit algorithm. *PeakCentre* is filled with the maximum.
+* **narrowPeak** : If there are `<3` bins around the maximum, that have `>0.5*MAX`. An information is logged, fit is not tried. *PeakCentre* is filled with the maximum.
+* **negativeMaximum** : If the maximum of the spectrum is not positive. A message will be logged in notice channel. Fit is not attempted.
+
+Usage
+-----
+**Example: Find EPP in the given workspace.**
+
+.. testcode:: ExFindEPP
+
+    # create sample workspace
+    ws = CreateSampleWorkspace(Function="User Defined", UserDefinedFunction="name=LinearBackground, \
+                A0=0.3;name=Gaussian, PeakCentre=6000, Height=5, Sigma=75", NumBanks=2, BankPixelWidth=1,
+                XMin=4005.75, XMax=7995.75, BinWidth=10.5, BankDistanceFromSample=4.0)
+
+    # search for elastic peak positions
+    table = FindEPP(ws)
+
+    # print some results
+    print "The fit status is", table.row(0)['FitStatus']
+    print "The peak centre is at", round(table.row(0)['PeakCentre'], 2), "microseconds"
+    print "The peak height is", round(table.row(0)['Height'],2)
+
+Output:
+
+.. testoutput:: ExFindEPP
+
+    The fit status is success
+    The peak centre is at 6005.25 microseconds
+    The peak height is 4.84
+
+
+.. categories::
+
+.. sourcelink::
+   :filename: FindEPP 
+   :py: None
diff --git a/docs/source/algorithms/IndirectILLEnergyTransfer-v1.rst b/docs/source/algorithms/IndirectILLEnergyTransfer-v1.rst
index f50cd680b8aa6d6b5fb3b329c52789574b1bcec3..0b66e985b252fd3cc46560576262357f9e2e9101 100644
--- a/docs/source/algorithms/IndirectILLEnergyTransfer-v1.rst
+++ b/docs/source/algorithms/IndirectILLEnergyTransfer-v1.rst
@@ -11,7 +11,7 @@ Description
 
 This is a part of multi-algorithm reduction workflow for **IN16B** indirect geometry instrument at **ILL**.
 It handles the first steps of the reduction chain, such as grouping of the detectors, normalizing to monitor dependent on the reduction type.
-It performs transformation of the axes; x-axis from channel number to energy transfer, y-axis to scattering angle.
+It performs transformation of the axes; x-axis from channel number to energy transfer, and optionally y-axis to scattering angle or elastic momentum transfer.
 It handles **automatically** all three types of data (QENS, EFWS, IFWS) recorded with or without mirror sense.
 Note, that following the standard, the ``Unit`` for energy transfer (``DeltaE``) will be mili-elevtron-volts (``mev``).
 This algorithm is intended to handle only single file at a time, although if multiple files are given, they will be automatically summed at raw level, i.e. while loading.
diff --git a/docs/source/algorithms/IntegrateEllipsoidsTwoStep-v1.rst b/docs/source/algorithms/IntegrateEllipsoidsTwoStep-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..207862868fef5833583a444c34be8935faed6a98
--- /dev/null
+++ b/docs/source/algorithms/IntegrateEllipsoidsTwoStep-v1.rst
@@ -0,0 +1,327 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+Overview and similar algorithms
+###############################
+
+This algorithm will integrate disjoint single crystal Bragg peaks by
+summing the number of raw or weighted events in a 3D ellipsoidal peak region in
+reciprocal space (See *IntegrateInHKL* option for integrating in HKL) 
+and subtracting an estimate of the background obtained
+from an ellipsoidal shell. In some ways it is similar to the
+:ref:`algm-IntegratePeaksMD` algorithm. In particular the size parameters to
+this algorithm are also specified in inverse Angstroms and the
+background subtraction is done in the same way for both the intensity
+and the estimated standard deviations. However, this algorithm differs
+from :ref:`algm-IntegratePeaksMD` in several critical ways.
+
+-  This algorithm works directly with raw or weighted events 
+   while :ref:`algm-IntegratePeaksMD` uses **MDEvents** from 
+   `MDEventWorkspace <http://www.mantidproject.org/MDEventWorkspace>`_.
+-  This algorithm uses 3D ellipsoidal regions with aspect ratios that
+   are adapted to the set of events that are near the peak center, while
+   :ref:`algm-IntegratePeaksMD` uses spherical regions.
+-  This algorithm includes an option to automatically choose the size of
+   the ellipsoidal regions based on the statistics of the set of events
+   near the peak.
+-  This algorithm only applies to peaks with integral HKL values and as
+   currently implemented it cannot be used to integrate ellipsoidal
+   regions at other locations in reciprocal space.
+
+This algorithm is very similar to the :ref:`algm-IntegrateEllipsoids` algorithm.
+But differs in how it treats weak peaks. Whether a peaks is classified as weak
+or strong is based on computing a rough estimate for the signal to noise ratio
+for each peak. The *WeakPeakThreshold* parameter is used to control this
+threshold.
+
+For strong peaks the algorithm calculates the three principal axes of the events 
+near a peak, and uses the standard deviations in the directions of the principal 
+axes to determine the aspect ratio of ellipsoids used for the peak and 
+background regions. This is identical to the method used in 
+:ref:`algm-IntegrateEllipsoids`.
+
+For weak peaks the nearest strong peak is found and the ellipsoid used to
+integrate the strong peak is used instead. After the integration has been
+performed the calculated intensity is modified by a fraction of a standard unit
+contour computed for the strong peak.
+
+The method is based on the ILL program Racer and the following paper:
+
+ - Wilkinson, C., et al. "Integration of single-crystal reflections using area 
+   multidetectors." *Journal of Applied Crystallography* 21.5 (1988): 471-478.
+
+Explanation of Inputs
+#####################
+
+-  The event data to be integrated is obtained from an ordinary
+   :ref:`EventWorkspace <EventWorkspace>` 
+   with an X-axis in time-of-flight, as loaded from a
+   NeXus event file. This algorithm maps the events to reciprocal space 
+   using *PeaksWorkwpace* with indexed peaks to determine the parameters 
+   of the transformation into the reciprocal space (UB matrix)
+
+-  The peaks to be integrated are are also obtained from a *PeaksWorkspace*. The
+   peaks must be indexed, and any peaks indexed as (0,0,0) will be
+   ignored. The HKL values for valid peaks should all be integers, to
+   make this check for unindexed peaks reliable.
+
+-  Only events that are near a peak are considered when constructing the
+   ellipsoids. The *RegionRadius* specifies the maximum distance from the
+   peak center to an event in reciprocal space, for that event to used.
+   See the figure below. Also, each event will be counted for at most
+   one peak, the one with the nearest HKL value. The RegionRadius should
+   be specified to be just slightly larger than the expected peak region
+   to avoid overlap with other peaks, and to avoid including excessive
+   background. As the size of the *RegionRadius* increases, the ellipsoids
+   will become more spherical and less well adapted to the actual shape
+   of the peak.
+
+.. figure:: /images/IntegrateEllipsoids.png
+   :alt: IntegrateEllipsoids.png
+    
+   IntegrateEllipsoidsTwoStep algorithm regions map.
+
+-  If the *SpecifySize* option is selected, then the user MUST specify the
+   *PeakSize*, *BackgroundInnerSize* and *BackgroundOuterSize*. In this mode, the
+   algorithm is similar to the :ref:`algm-IntegratePeaksMD` algorithm. As shown
+   in the figure, these values determine the length of the major axis for the
+   ellipsoidal peak region, and of the inner and outer ellipsoids bounding the
+   background region. The same major axis lengths are used for all peaks, but the
+   lengths of the other two axes of the ellipsoids are adjusted based on the
+   standard deviations of the events in those directions. If *SpecifySize* is
+   false, then the major axis length for each peak will be set to include a range
+   of plus or minus three times the standard deviation of the events in that
+   direction. That is, *PeakSize* is set to three times the standard deviation in
+   the direction of the first principal axis. Also, in this case the
+   *BackgroundInnerSize* is set to the *PeakSize* and the *BackgroundOuterSize*
+   is set so that the background ellipsoidal shell has the same volume as the
+   peak ellipsoidal region. If specified by the user, these parameters must be
+   ordered correctly with: :math:`0 < PeakSize \leq BackgroundInnerSize` and
+   :math:`BackgroundInnerSize < BackgroundOuterSize \leq RegionRadius`
+
+-  The top 1% of the background events are removed so that there are no
+   intensity spikes near the edges.
+
+-  *AdaptiveQMultiplier* can be used with *SpecifySize* for the radius to vary
+   as a function of the modulus of Q. If the *AdaptiveQBackground* option is set
+   to True, the background radius also changes so each peak has a different
+   integration radius.  Q includes the 2*pi factor.
+
+-  PeakRadius + AdaptiveQMultiplier * **|Q|** -  BackgroundOuterRadius +
+   AdaptiveQMultiplier * **|Q|** -  BackgroundInnerRadius +
+   AdaptiveQMultiplier * **|Q|**
+
+-  If the *IntegrateInHKL* option is selected, then HKL space is used for the
+   integration instead of reciprocal space.  This option may be useful for large
+   unit cells where the radius of integration needs to be very different for
+   peaks at low Q and high Q.  With this option the *PeakSize*,
+   *BackgroundInnerSize* and *BackgroundOuterSize* are specified in HKL and they
+   just need to be smaller than 0.5.
+
+-  The *WeakPeakThreshold* parameter controls the signal to noise threshold used
+   to classify peaks as either strong or weak.
+	
+-  The integrated intensities will be set in the specified *OutputWorkspace*. If
+   this is different from the input *PeaksWorkspace*, the input peaks workspace
+   will be copied to the *OutputWorkspace* before setting the integrated
+   intensities.
+
+Detailed Algorithm Description
+##############################
+
+This algorithm will integrate a list of indexed single-crystal diffraction peaks
+from a *PeaksWorkspace*, using events from an ( :ref:`EventWorkspace
+<EventWorkspace>` ).  The indexed peaks are first used to determine a UB matrix.
+The inverse of that UB matrix is then used to form lists of events that are
+close to peaks in reciprocal space. An event will be added to the list of events
+for a peak provided that the fractional :math:`h,k,l` value of that event
+(obtained by applying UB-inverse to the :math:`Q` -vector) is closer to the
+:math:`h,k,l` of that peak, than to the :math:`h,k,l` of any other peak AND the
+:math:`Q` -vector for that event is within the specified radius of the :math:`Q`
+-vector for that peak. This technique makes the algorithm suitable for nuclear
+peaks, but may not be suitable for magnetic peaks.
+
+When the lists of events near the peaks have been built, the algorithm will
+estimate the signal to noise ratio for each peak using the parameters provided.
+The *WeakPeakThreshold* parameter will be used to threshold which peaks are
+classed as strong as which are weak.
+
+Strong peaks will then be integrated. The three principal axes of the set of
+events near each peak are found, and the standard deviations of the projections
+of the events on each of the three principal axes are calculated. The principal
+axes and standard deviations for the events around a peak in the directions of
+the principal axes are used to determine an ellipsoidal region for the peak and
+an ellipsoidal shell region for the background. The number of events in the peak
+ellipsoid and background ellipsoidal shell are counted and used to determine the
+net integrated intensity of the peak.
+
+The ellipsoidal regions used for the peak and background can be obtained in two
+ways. First, the user may specify the size of the peak ellipsoid and the inner
+and outer size of the background ellipsoid. If these are specified, the values
+will be used for half the length of the major axis of an ellipsoid centered on
+the peak. The major axis is in the direction of the principal axis for which the
+standard deviation in that direction is largest. The other two axes for the
+ellipsoid are in the direction of the other two principal axes and are scaled
+relative to the major axes in proportion to their standard deviations. For
+example if the standard deviations in the direction of the other two principal
+axes are .8 and .7 times the standard deviation in the direction of the major
+axis, then the ellipse will extend only .8 and .7 times as far in the direction
+of those axes, as in the direction of the major axis. Overall, the user
+specified sizes for the *PeakSize*, *BackgroundInnerSize* and
+*BackgroundOuterSize* are similar to the *PeakRadius*, *BackgroundInnerRadius*
+and *BackgrounOuterRadius* for the :ref:`algm-IntegratePeaksMD` algorithm. The
+difference is that the regions used in this algorithm are not spherical, but are
+ellipsoidal with axis directions obtained from the principal axes of the events
+near a peak and the ellipsoid shape (relative axis lengths) is determined by the
+standard deviations in the directions of the principal axes.
+
+Second, if the user does not specify the size of the peak and background
+ellipsoids, then the three axes of the peak ellipsoid are again set to the
+principal axes of the set of nearby events but in this case their axis lengths
+are set to cover a range of plus or minus three standard deviations in the axis
+directions. In this case, the background ellipsoidal shell is chosen to have the
+same volume as the peak ellipsoid and it's inner surface is the outer surface of
+the peak ellipsoid. The outer surface of the background ellipsoidal shell is an
+ellipsoidal surface with the same relative axis lengths as the inner surface.
+
+This algorithm uses principle component analysis to determine the principle axis
+for each peak. For the event list (QLab) associated with each peak, the
+algorithm determines a covariance matrix, and uses that to establish
+eigenvectors corresponding to the principle axis (all orthogonal). The sizes of
+each principle axis are used define the region of which events will be
+counted/integrated from those already associated with each peak.
+
+Once strong peaks have been integrated the algorithm uses the parameters derived
+from there integration to integrate the remaining weak peaks. For each weak peak
+the nearest strong peak is found using a nearest neighbour search. The shape and
+principal components of the strong peak are then used to integrate the weak peak
+in the same was as described for strong peaks above. A weight determined by the
+fraction of a unit contour contained within the total integration contour of the
+strong peak is used to weight the integrated intensity for the weak peak.
+
+IntegrateIfOnEdge=False option
+###################################
+
+Edges for each bank or pack of tubes of the instrument are defined by masking
+the edges in the PeaksWorkspace instrument.  e.g. For CORELLI, tubes 1 and 16,
+and pixels 0 and 255.  Q in the lab frame for every peak is calculated, call it
+C For every point on the edge, the trajectory in reciprocal space is a straight
+line, going through:
+
+:math:`\vec{O}=(0,0,0)`
+
+Calculate a point at a fixed momentum, say k=1. 
+Q in the lab frame:
+
+:math:`\vec{E}=(-k*sin(\theta)*cos(\phi),-k*sin(\theta)*sin(\phi),k-k*cos(\phi))`
+
+Normalize E to 1: 
+
+:math:`\vec{E}=\vec{E}*(1./\left|\vec{E}\right|)`
+
+The distance from C to OE is given by:
+
+:math:`dv=\vec{C}-\vec{E}*(\vec{C} \cdot \vec{E})`
+
+If:
+
+:math:`\left|dv\right|<PeakRadius`
+
+for the integration, one of the detector trajectories on the edge is too close
+to the peak This method is also applied to all masked pixels.  If there are
+masked pixels trajectories inside an integration volume, the peak must be
+rejected.  If there are masked pixel trajectories inside the background volume,
+the background events are scaled by estimating the volume of the ellipsoid on
+the detector.
+
+Sigma from the background
+###################################
+The sigma from the background could be too small because the background contains
+events from other peaks.  In an effort to reduce this, all the background events
+are sorted and the top 1% are removed.
+
+Usage
+------
+
+**Example - IntegrateEllipsoids:**
+
+The code itself works but disabled from doc tests as takes too long to complete.
+User should provide its own event nexus file instead of **TOPAZ_3132_event.nxs**
+used within this example. The original **TOPAZ_3132_event.nxs** file is
+availible in `Mantid system tests repository
+<https://github.com/mantidproject/systemtests/tree/master/Data/TOPAZ_3132_event.nxs>`_.
+
+.. code-block:: python
+   :linenos:
+
+   #.. testcode:: exIntegrateEllipsoids
+
+   def print_tableWS(pTWS,nRows):
+       ''' Method to print part of the table workspace '''
+       tab_names=pTWS.keys();
+       
+       for name in tab_names:
+           if len(name)>8:
+              name= name[0:8];
+           print "| {0:8} ".format(name),
+       print "|\n",
+   
+       for i in xrange(0,nRows):
+           for name in tab_names:
+                 col = pTWS.column(name);
+                 data2pr=col[i]
+                 if type(data2pr) is float:
+                      print "| {0:8.3f} ".format(data2pr),
+                 else:
+                     print "| {0:8} ".format(data2pr),   
+           print "|\n",
+   
+      
+   # load test workspace
+   Load(Filename=r'TOPAZ_3132_event.nxs',OutputWorkspace='TOPAZ_3132_event',LoadMonitors='1')
+      
+   # build peak workspace necessary for IntegrateEllipsoids algorithm to work
+   ConvertToMD(InputWorkspace='TOPAZ_3132_event',QDimensions='Q3D',dEAnalysisMode='Elastic',Q3DFrames='Q_sample',LorentzCorrection='1',OutputWorkspace='TOPAZ_3132_md',\
+   MinValues='-25,-25,-25',MaxValues='25,25,25',SplitInto='2',SplitThreshold='50',MaxRecursionDepth='13',MinRecursionDepth='7')
+   FindPeaksMD(InputWorkspace='TOPAZ_3132_md',PeakDistanceThreshold='0.3768',MaxPeaks='50',DensityThresholdFactor='100',OutputWorkspace='TOPAZ_3132_peaks')   
+   FindUBUsingFFT(PeaksWorkspace='TOPAZ_3132_peaks',MinD='3',MaxD='15',Tolerance='0.12')
+   IndexPeaks(PeaksWorkspace='TOPAZ_3132_peaks',Tolerance='0.12')
+   
+   # integrate ellipsoids   
+   result=IntegrateEllipsoidsTwoStep(InputWorkspace='TOPAZ_3132_event',PeaksWorkspace='TOPAZ_3132_peaks',\
+         RegionRadius='0.25',PeakSize='0.2',BackgroundInnerSize='0.2',BackgroundOuterSize='0.25',OutputWorkspace='TOPAZ_3132_peaks')
+   
+   # print 10 rows of resulting table workspace
+   print_tableWS(result,10)
+
+**Output:**
+
+.. code-block:: python
+   :linenos:
+
+   #.. testoutput:: exIntegrateEllipsoids
+
+| RunNumbe  | DetID     | h         | k         | l         | Waveleng  | Energy    | TOF       | DSpacing  | Intens    | SigInt    | BinCount  | BankName  | Row       | Col       | QLab      | QSample   |
+|     3132  |  1124984  |    2.000  |    1.000  |    2.000  |    3.104  |    8.491  | 14482.289  |    2.025  | 120486.000  |  375.814  | 1668.000  | bank17    |  120.000  |   42.000  | [1.57771,1.21779,2.37854]  | [2.99396,0.815958,0.00317344]  |
+|     3132  |  1156753  |    3.000  |    2.000  |    3.000  |    2.085  |   18.822  | 9725.739  |    1.298  | 149543.000  |  393.038  | 1060.000  | bank17    |  145.000  |  166.000  | [2.48964,1.45725,3.88666]  | [4.52618,1.71025,0.129461]  |
+|     3132  |  1141777  |    4.000  |    2.000  |    3.000  |    1.707  |   28.090  | 7963.171  |    1.050  | 8744.000  |  106.311  |   96.000  | bank17    |   17.000  |  108.000  | [2.60836,2.31423,4.86391]  | [5.69122,1.79492,-0.452799]  |
+|     3132  |  1125241  |    4.000  |    2.000  |    4.000  |    1.554  |   33.860  | 7252.155  |    1.014  | 19740.000  |  146.164  |   83.000  | bank17    |  121.000  |   43.000  | [3.15504,2.42573,4.75121]  | [5.97829,1.63473,0.0118744]  |
+|     3132  |  1170598  |    4.000  |    3.000  |    4.000  |    1.548  |   34.124  | 7224.587  |    0.950  | 15914.000  |  131.385  |   73.000  | bank17    |  166.000  |  220.000  | [3.43363,1.70178,5.39301]  | [6.07726,2.59962,0.281759]  |
+|     3132  |  1214951  |    2.000  |    1.000  |    4.000  |    1.894  |   22.795  | 8839.546  |    1.677  | 121852.000  |  352.919  |  719.000  | bank18    |  231.000  |  137.000  | [2.73683,1.43808,2.11574]  | [3.5786,0.470838,1.00329]  |
+|     3132  |  1207827  |    3.000  |    1.000  |    4.000  |    1.713  |   27.890  | 7991.697  |    1.319  | 64593.000  |  257.707  |  447.000  | bank18    |   19.000  |  110.000  | [2.80324,2.29519,3.09134]  | [4.71517,0.554412,0.37714]  |
+|     3132  |  1232949  |    4.000  |    2.000  |    6.000  |    1.239  |   53.277  | 5782.138  |    0.934  | 18247.000  |  139.302  |   45.000  | bank18    |   53.000  |  208.000  | [4.29033,2.63319,4.46168]  | [6.52658,1.27985,1.00646]  |
+|     3132  |  1189484  |    4.000  |    1.000  |    6.000  |    1.136  |   63.418  | 5299.275  |    0.964  | 13512.000  |  120.748  |   31.000  | bank18    |  108.000  |   38.000  | [4.02414,3.39659,3.83664]  | [6.4679,0.298896,0.726133]  |
+|     3132  |  1218337  |    5.000  |    2.000  |    7.000  |    1.012  |   79.807  | 4724.051  |    0.773  | 7411.000  |   88.289  |   15.000  | bank18    |   33.000  |  151.000  | [4.96622,3.61607,5.32554]  | [7.99244,1.19363,0.892655]  |
+  
+
+.. categories::
+
+.. sourcelink::
diff --git a/docs/source/algorithms/InvertMask-v1.rst b/docs/source/algorithms/InvertMask-v1.rst
index 13c898a2d52336653b305be20b0d8f999291c859..cfee688e528af53035c69502d33e1bf565c0f3c1 100644
--- a/docs/source/algorithms/InvertMask-v1.rst
+++ b/docs/source/algorithms/InvertMask-v1.rst
@@ -10,7 +10,7 @@ Description
 -----------
 
 A NOT operation will be conducted on the input masking workspace
-(SpecialWorkspace2D)
+(SpecialWorkspace2D).
 
 Output
 ######
@@ -27,7 +27,8 @@ Usage
 .. testcode:: ExInvertPG3
 
   # Load data
-  maskws = LoadMask(Instrument="POWGEN", InputFile="Mask-PG3-19884.xml")
+  maskws = LoadMask(Instrument="POWGEN_Definition_2015-08-01.xml",
+                    InputFile="Mask-PG3-19884.xml")
 
   # Check source mask workspace
   nummasked = 0
diff --git a/docs/source/algorithms/LineProfile-v1.rst b/docs/source/algorithms/LineProfile-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fc44fa3be0bb876e0d3dff8d0b40628f0c25b0e7
--- /dev/null
+++ b/docs/source/algorithms/LineProfile-v1.rst
@@ -0,0 +1,167 @@
+
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+This algorithm extracts horizontal or vertical profiles from a MatrixWorkspace. The profile is returned as a single histogram workspace. :ref:`Ragged workspaces <Ragged_Workspace>` are not supported.
+
+The orientation of the profile is selected by the *Direction* property. By default, the line runs over the entire workspace. The length can be optionally limited by specifying the *Start* and/or the *End* properties. A region over which the profile is calculated is given by *HalfWidth*. The width is rounded to full bins so that partial bins are included entirely.
+
+Special values can be completely ignored by the *IgnoreNans* and *IgnoreInfs* properties. If a segment of the line contains special values only, it will be set to NaN.
+
+By default, the profile is calculated as an average over the line width. This behavior can be changed by the *Mode* property. The choices are:
+
+'Average'
+    Average the values. This is the default.
+
+'Sum'
+    Sum the values, weighting them by :math:`n / n_{tot}` where :math:`n` is the number of summed data points (excluding special values if *IgnoreNans* or *IgnoreInfs* is set) and :math:`n_{tot}` is the total number of data points (including special values).
+
+Usage
+-----
+
+**Example - Horizontal line profile**
+
+.. testcode:: HorizontalLineProfileExample
+
+    import numpy
+    
+    ws = CreateSampleWorkspace(
+        Function='Quasielastic Tunnelling',
+        NumBanks=1
+    )
+    
+    # Horizontal profile over spectra 5-10
+    horProfile = LineProfile(
+        InputWorkspace=ws,
+        Centre=7.5,
+        HalfWidth=2.5,
+        Start=3000,
+        End=13000
+    )
+    
+    indexMax = numpy.argmax(horProfile.readY(0))
+    epp = horProfile.readX(0)[indexMax]
+    print('Elastic peak at {}'.format(epp))
+
+Output:
+
+.. testoutput:: HorizontalLineProfileExample
+
+    Elastic peak at 10000.0
+
+**Example - Vertical line profile**
+
+.. testcode:: VerticalLineProfile
+
+    import numpy
+    
+    ws = CreateSampleWorkspace(
+        Function='Quasielastic Tunnelling',
+        NumBanks=1
+    )
+    
+    wsInTheta = ConvertSpectrumAxis(
+        InputWorkspace=ws,
+        Target='Theta'
+    )
+    
+    # Verical cuts.
+    
+    tofs = numpy.arange(3000, 7000, 500)
+    cutWSs = list()
+    for tof in tofs:
+        cutWS = LineProfile(
+            InputWorkspace=wsInTheta,
+            OutputWorkspace='cut-at-{}-us'.format(tof),
+            Direction='Vertical',
+            Centre=tof,
+            HalfWidth=250,
+            Start=0.5,  # Degrees
+            End=0.9
+        )
+        cutWSs.append(cutWS)
+    
+    for cut in cutWSs:
+        # Vertical axis holds the TOF bin edges of the cut
+        axis = cut.getAxis(1)
+        tofStart = axis.getValue(0)
+        tofEnd = axis.getValue(1)
+        # Notice the overlapping TOFs. This is because partial bins are
+        # included in their entirety.
+        print('Average intensity between {} and {} microsec: {:.03}'.format(
+            tofStart, tofEnd, cut.readY(0)[0]))
+
+Output:
+
+.. testoutput:: VerticalLineProfile
+
+    Average intensity between 2600.0 and 3400.0 microsec: 0.1
+    Average intensity between 3200.0 and 3800.0 microsec: 0.1
+    Average intensity between 3600.0 and 4400.0 microsec: 0.164
+    Average intensity between 4200.0 and 4800.0 microsec: 0.1
+    Average intensity between 4600.0 and 5400.0 microsec: 0.1
+    Average intensity between 5200.0 and 5800.0 microsec: 0.1
+    Average intensity between 5600.0 and 6400.0 microsec: 0.227
+    Average intensity between 6200.0 and 6800.0 microsec: 0.1
+
+**Example - The 'Sum' mode**
+
+.. testcode:: SumMode
+
+    import numpy
+
+    ws = CreateSampleWorkspace(
+        Function='Quasielastic Tunnelling',
+        NumBanks=1
+    )
+
+    wsInTheta = ConvertSpectrumAxis(
+        InputWorkspace=ws,
+        Target='Theta'
+    )
+
+    # Lets assing NaNs to the lower left and upper right corners
+    # of the workspace.
+    for iVert in range(wsInTheta.getNumberHistograms()):
+        for iHor in range(wsInTheta.blocksize()):
+            if iVert + iHor < 60:
+                ys = wsInTheta.dataY(iVert)
+                ys[iHor] = numpy.nan
+            elif iVert + iHor > 120:
+                ys = wsInTheta.dataY(iVert)
+                ys[iHor] = numpy.nan
+
+    centre = 0.6
+    width = 0.05
+    sumCutWS = LineProfile(wsInTheta, centre, width, Mode='Sum')
+
+    # When no NaNs are present both modes give the same result.
+    iElastic = sumCutWS.blocksize() / 2
+    y = sumCutWS.readY(0)[iElastic]
+    e = sumCutWS.readE(0)[iElastic]
+    print('Sum profile at elastic peak: {} +/- {}'.format(y, e))
+
+    # The weighting is apparent when the profile crosses some
+    # special values.
+    iEdge = sumCutWS.blocksize() / 6
+    y = sumCutWS.readY(0)[iEdge]
+    e = sumCutWS.readE(0)[iEdge]
+    print('Sum profile near NaNs: {} +/- {}'.format(y, e))
+
+.. testoutput:: SumMode
+
+    Sum profile at elastic peak: 103.45916358 +/- 10.1714877761
+    Sum profile near NaNs: 1.60000001019 +/- 2.52982213619
+
+.. categories::
+
+.. sourcelink::
+
diff --git a/docs/source/algorithms/LoadDNSLegacy-v1.rst b/docs/source/algorithms/LoadDNSLegacy-v1.rst
index 96b17ef1347e24281e2c9e7153be137f458cdab7..ebe85d2b41d2bab38e634cc1d4694c7fd83347c1 100644
--- a/docs/source/algorithms/LoadDNSLegacy-v1.rst
+++ b/docs/source/algorithms/LoadDNSLegacy-v1.rst
@@ -14,7 +14,13 @@ Description
    This algorithm is being developed for a specific instrument. It might get changed or even 
    removed without a notification, should instrument scientists decide to do so.
 
-This algorithm loads a DNS legacy data file into a :ref:`Workspace2D <Workspace2D>`. The loader rotates the detector bank in the position given in the data file. 
+This algorithm loads a DNS legacy data file into a :ref:`Workspace2D <Workspace2D>`. The loader rotates the detector bank in the position given in the data file.
+
+**Output**
+
+- For diffraction mode data (only one time channel) output is the :ref:`Workspace2D <Workspace2D>` with the X-axis in the wavelength units.
+
+- For TOF data (more than one time channel) output is the :ref:`Workspace2D <Workspace2D>` with the X-axis in TOF units. The lower bin boundary for the channel :math:`i`, :math:`t_i` is calculated as :math:`t_i = t_1 + t_{delay} + i*\Delta t`, where :math:`\Delta t` is the channel width and :math:`t_1` is the time-of-flight from the source (chopper) to sample. Given in the data file channel width is scaled by the *channel_width_factor* which can be set in the :ref:`parameter file <InstrumentParameterFile>`.
 
 **Normalization**
 
@@ -28,7 +34,9 @@ The **Normalization** option offers the following choices:
 
 **Polarisation**
 
-Since polarisation is not specified in the DNS legacy files, coil currents table is required to lookup for the polarisation and set the *polarisation* sample log. The coil currents table is a text file containing the following table.
+Since polarisation is not specified in the DNS legacy files, coil currents table is required to lookup for the polarisation and set the *polarisation* sample log. The default coil currents are given as *x_currents*, *y_currents* and *z_currents* parameters in the :ref:`parameter file <InstrumentParameterFile>` for x, y, and z polarisations, respectively.
+
+Alternatively, the text file with the coil currents table may be provided (optionally). The coil currents table is a text file containing the following table.
 
 +--------------+----------+-------+-------+-------+-------+
 | polarisation | comment  |  C_a  |  C_b  |  C_c  |  C_z  |
@@ -42,9 +50,9 @@ Since polarisation is not specified in the DNS legacy files, coil currents table
 |      x       |    7     |   0   | -2.1  | -0.97 |  2.21 |          
 +--------------+----------+-------+-------+-------+-------+
 
-First row must contain the listed column headers, other rows contain coil currents for each polarisation. Rows with different currents for one polarisation are alowed. Columns are separated by tab symbols. This table must be provided to the user by instrument scientist.  
+First row must contain the listed column headers, other rows contain coil currents for each polarisation. Rows with different currents for one polarisation are alowed. Columns are separated by tab symbols.
 
-This algorithm only supports DNS instrument in its configuration before major upgrade. 
+This algorithm only supports DNS instrument in its configuration with one detector bank (polarisation analysis).
 
 Usage
 -----
@@ -55,10 +63,9 @@ Usage
 
    # data file.
    datafile = 'dn134011vana.d_dat'
-   coilcurrents = 'currents.txt'
 
    # Load dataset
-   ws = LoadDNSLegacy(datafile, Normalization='monitor', CoilCurrentsTable=coilcurrents)
+   ws = LoadDNSLegacy(datafile, Normalization='monitor')
 
    print "This workspace has", ws.getNumDims(), "dimensions and has", ws.getNumberHistograms(), "histograms."
 
diff --git a/docs/source/algorithms/LoadMask-v1.rst b/docs/source/algorithms/LoadMask-v1.rst
index 24f96ac17f2cff7ed1efb3cb44f48e7dfd642740..9564f97bead77bc79fc38a295b4fd12dd5a8f35e 100644
--- a/docs/source/algorithms/LoadMask-v1.rst
+++ b/docs/source/algorithms/LoadMask-v1.rst
@@ -11,6 +11,7 @@ Description
 
 This algorithm is used to load a masking file, which can be in XML
 format (defined later in this page) or old-styled calibration file.
+The ``Instrument`` can be a IDF.
 
 Definition of Mask
 ------------------
@@ -63,13 +64,13 @@ Supporting ::
  * Component ID --> Detector IDs --> Workspace Indexes
  * Detector ID --> Workspace Indexes
  * Spectrum Number --> Workspace Indexes
- 
+
 
 When a spectra mask (ISIS) is used on multiple workspaces, the same masking is produced only
 if all masked workspaces have the same spectra-detector map.
-When mask is generated for one workspace and applied to workspace with different 
+When mask is generated for one workspace and applied to workspace with different
 spectra-detector mapping, the same masking can be produced by using *Workspace*
-option, using this workspace as the source of the spectra-detector mapping. 
+option, using this workspace as the source of the spectra-detector mapping.
 See the Spectra mask usage sample below.
 
 
@@ -105,7 +106,7 @@ Output:
     Is detector 20475 masked: True
 
 **Example: Using reference workspace with Spectra Mask**
-   
+
 .. testcode:: ExLoadSpectraMask
 
     # Load workspace with real spectra-derector mask
@@ -117,20 +118,20 @@ Output:
     file2remove = os.path.join(config.getString('defaultsave.directory'),'SampleMask.msk')
 
     #
-    
+
     # Load sample  spectra mask using 1:1 instrument
     mask1to1ws= LoadMask('MARI','SampleMask.msk')
-    
-    # Apply spectra mask using real workspace spectra-detector map. 
-    # Note that rws does not need to be masked like its here, We use it masked here only to avoid overhead of loading it again   
-    # it just needs to contain the same spectra-detector map as the initial workspace    
+
+    # Apply spectra mask using real workspace spectra-detector map.
+    # Note that rws does not need to be masked like its here, We use it masked here only to avoid overhead of loading it again
+    # it just needs to contain the same spectra-detector map as the initial workspace
     # you may want to try rows below to be sure:
     #rws1 = Load(Filename=r'MAR11001.raw', OutputWorkspace='realWs1',InlcudeMonitors=True);
-    
+
     # Load Mask using  instrument and spectra-detector map provided with source workspace
     maskRealSDM=LoadMask('MARI','SampleMask.msk',RefWorkspace=rws)
 
-    # Clear up rubbish 
+    # Clear up rubbish
     os.remove(file2remove)
 
     # See the difference:
@@ -145,15 +146,15 @@ Output:
         try:
             det = rws.getDetector(ind)
             if det.isMasked():
-                Sig0Masked.append(ind)                
+                Sig0Masked.append(ind)
                 Det0Masked.append(det.getID())
             #  1:1 map generated from instrument definitions
-            if mask1to1ws.readY(ind)[0]>0.5: 
+            if mask1to1ws.readY(ind)[0]>0.5:
                 det = mask1to1ws.getDetector(ind)
                 MaskedSp1to1.append(ind)
                 MaskedDet1to1.append(det.getID())
             # Real spectra-detector map:
-            if maskRealSDM.readY(ind)[0]>0.5:        
+            if maskRealSDM.readY(ind)[0]>0.5:
                 det = maskRealSDM.getDetector(ind)
                 MaskedSp_R.append(ind)
                 MaskedDet_R.append(det.getID())
@@ -167,7 +168,7 @@ Output:
     print "*** One to one mask workspace has masked the same spectra numbers but different detectors"
     print "ws 1to1 Masked spectra: ",MaskedSp1to1
     print "ws 1to1 Masked DedIDs : ",MaskedDet1to1
-    print "*** Real spectra-det-map workspace has masked different spectra numbers but the same detectors" 
+    print "*** Real spectra-det-map workspace has masked different spectra numbers but the same detectors"
     print "ws RSDM Masked spectra: ",MaskedSp_R
     print "ws RSDM Masked DedIDs : ",MaskedDet_R
     print "*** indeed the same:"
diff --git a/docs/source/algorithms/PredictPeaks-v1.rst b/docs/source/algorithms/PredictPeaks-v1.rst
index 8c49868b57daf4e45481511c30e0dfc4147adf34..03f12014258a3b441590388efa789facdaf834df 100644
--- a/docs/source/algorithms/PredictPeaks-v1.rst
+++ b/docs/source/algorithms/PredictPeaks-v1.rst
@@ -75,7 +75,7 @@ with predicted structure factor very close to 0, which are absent:
 
 .. testoutput:: ExPredictPeaksCrystalStructure
 
-    There are 294 detectable peaks.
+    There are 295 detectable peaks.
     Maximum intensity: 6101.93
     Peaks with relative intensity < 1%: 94
     Number of absences: 16
diff --git a/docs/source/algorithms/RecordPythonScript-v1.rst b/docs/source/algorithms/RecordPythonScript-v1.rst
index 611626ca168df7d775917b8cb5a18f05cde3ad5a..9edfdeb3e6afe1ef26c015523c6926566372fc27 100644
--- a/docs/source/algorithms/RecordPythonScript-v1.rst
+++ b/docs/source/algorithms/RecordPythonScript-v1.rst
@@ -66,7 +66,7 @@ Output:
     :options: +NORMALIZE_WHITESPACE
 
     The result file has the following python recorded
-    CreateSampleWorkspace(OutputWorkspace='ws',WorkspaceType='Event',Function='Multiple Peaks',UserDefinedFunction='',NumBanks='2',NumMonitors='0',BankPixelWidth='10',NumEvents='1000',Random='0',XUnit='TOF',XMin='0',XMax='20000',BinWidth='200',PixelSpacing='0.0080000000000000002',BankDistanceFromSample='5',SourceDistanceFromSample='10')
+    CreateSampleWorkspace(OutputWorkspace='ws',WorkspaceType='Event',Function='Multiple Peaks',UserDefinedFunction='',NumBanks='2',NumMonitors='0',BankPixelWidth='10',NumEvents='1000',Random='0',XUnit='TOF',XMin='0',XMax='20000',BinWidth='200',PixelSpacing='0.0080000000000000002',BankDistanceFromSample='5',SourceDistanceFromSample='10',NumScanPoints='1')
     CreateFlatEventWorkspace(InputWorkspace='ws',RangeStart='15000',RangeEnd='18000',OutputWorkspace='wsOut')
     RebinToWorkspace(WorkspaceToRebin='wsOut',WorkspaceToMatch='ws',OutputWorkspace='wsOut',PreserveEvents='1')
 
diff --git a/docs/source/algorithms/ReflectometryReductionOne-v2.rst b/docs/source/algorithms/ReflectometryReductionOne-v2.rst
index 071cf6434a864b19e69dfa3ac3efc8eb0b4d7fd7..6bf02909c137607f515063175d9cd41b753577f8 100644
--- a/docs/source/algorithms/ReflectometryReductionOne-v2.rst
+++ b/docs/source/algorithms/ReflectometryReductionOne-v2.rst
@@ -9,8 +9,9 @@
 Description
 -----------
 
-This algorithm is not meant to be used directly by users. Please see :ref:`algm-ReflectometryReductionOneAuto`
-which is a facade over this algorithm.
+This algorithm is not meant to be used directly by users. Please see
+:ref:`algm-ReflectometryReductionOneAuto` which is a facade over this
+algorithm.
 
 This algorithm reduces a single reflectometry run into a mod Q vs I/I0 workspace.
 The mandatory input properties, :literal:`WavelengthMin`, :literal:`WavelengthMax`
@@ -31,30 +32,51 @@ steps taking place in the reduction.
 
 .. diagram:: ReflectometryReductionOne_HighLvl-v2_wkflw.dot
 
+First, the algorithm checks the X units of the input workspace. If the input
+workspace is already in wavelength, summation and normalization by monitors and
+direct beam are not performed, as it is considered that the input run was
+already reduced using this algorithm.
+
+If summation is to be done in wavelength, then this is done first. The the
+conversion to wavelength and normalisation by monitors and direct beam is done,
+followed by the transmission correction. Transmission correction is always
+done, even if the input was already in wavelength.
+
+If summation is to be done in Q, this is done after the normalisations, but
+again, only if the original input was not already in wavelength.
+
+Finally, the output workspace in wavelength is converted to momentum transfer
+(Q).
 
 Conversion to Wavelength
 ########################
 
-First, the algorithm checks the X units of
-the input workspace. If the input workspace is already in wavelength, normalization by
-monitors and direct beam are not performed, as it is considered that the input run was
-already reduced using this algorithm. If the input workspace is in TOF, monitors, detectors of
-interest and region of direct beam are extracted by running :ref:`algm-GroupDetectors` with
-``ProcessingInstructions`` as input, :ref:`algm-GroupDetectors` with ``RegionOfDirectBeam`` as input,
-and :ref:`algm-CropWorkspace` with ``I0MonitorIndex`` as input respectively, and each of
-the resulting workspaces is converted to wavelength (note that :literal:`AlignBins` is set
-to :literal:`True` in all the three cases). Note that the normalization by a direct beam
-is optional, and only happens if ``RegionOfDirectBeam`` is provided. In the same way,
-monitor normalization is also optional, and only takes place if ``I0MonitorIndex``,
-``MonitorBackgroundWavelengthMin`` and ``MonitorBackgroundWavelengthMax`` are all
-specified. Detectors can be normalized by integrated monitors by setting
+If the input workspace is in TOF, monitors are extracted using
+:ref:`algm-GroupDetectors` with ``RegionOfDirectBeam`` as input, and region of
+direct beam using :ref:`algm-CropWorkspace` with ``I0MonitorIndex`` as
+input. If summing in wavelength, detectors of interest are extracted and summed
+in TOF using :ref:`algm-GroupDetectors` with ``ProcessingInstructions`` as
+input. If summing in Q, summation is not done yet as it is done in a later step
+after all normalisations have been done.
+
+Each of the resulting workspaces is converted to wavelength (note that
+:literal:`AlignBins` is set to :literal:`True` in all the three cases). Note
+that the normalization by a direct beam is optional, and only happens if
+``RegionOfDirectBeam`` is provided. In the same way, monitor normalization is
+also optional, and only takes place if ``I0MonitorIndex``,
+``MonitorBackgroundWavelengthMin`` and ``MonitorBackgroundWavelengthMax`` are
+all specified.
+
+Detectors can be normalized by integrated monitors by setting
 :literal:`NormalizeByIntegratedMonitors` to true, in which case
-:literal:`MonitorIntegrationWavelengthMin` and :literal:`MonitorIntegrationWavelengthMax` are
-used as the integration range. If monitors are not integrated, detectors are rebinned to
-monitors using :ref:`algm-RebinToWorkspace` so that the normalization by monitors can take place.
-Finally, the resulting workspace is cropped in wavelength according to :literal:`WavelengthMin`
-and :literal:`WavelengthMax`, which are both mandatory properties. A summary of the steps
-is shown in the workflow diagram below. For the sake of clarity, all possible steps are illustrated, even if some of them are optional.
+:literal:`MonitorIntegrationWavelengthMin` and
+:literal:`MonitorIntegrationWavelengthMax` are used as the integration
+range. If monitors are not integrated, detectors are rebinned to monitors using
+:ref:`algm-RebinToWorkspace` so that the normalization by monitors can take
+place.
+
+A summary of the steps is shown in the workflow diagram below. For the sake of
+clarity, all possible steps are illustrated, even if some of them are optional.
 
 .. diagram:: ReflectometryReductionOne_ConvertToWavelength-v2_wkflw.dot
 
@@ -98,12 +120,32 @@ property. If the :literal:`CorrectionAlgorithm` property is set to
 algorithm is used, with *C0* and *C1* taken from the :literal:`C0` and :literal:`C1`
 properties.
 
+Sum in Q and crop
+#################
+
+If summing in Q, the summation is done now, after all normalisations have been
+done. The summation is done using the algorithm proposed by Cubitt et al
+(J. Appl. Crystallogr., 48 (6) (2015)). This involves a projection to an
+arbitrary reference angle, :math:`2\theta_R`, with a "virtual" wavelength,
+:math:`\lambda_v`. This is the wavelength the neutron would have had if it had
+arrived at :math:`2\theta_R` with the same momentum transfer
+(:math:`Q`). Counts are shared out proportionally into the output array in
+:math:`\lambda_v` and the projections from all pixels are summed in
+:math:`\lambda_v`.
+
+In all cases, the 1D array in :math:`\lambda` is then cropped in wavelength
+according to :literal:`WavelengthMin` and :literal:`WavelengthMax`, which are
+both mandatory properties.
+
+.. diagram:: ReflectometryReductionOne_SumInQ-v2_wkflw.dot
+
 Conversion to Momentum Transfer (Q)
 ###################################
 
-Finally, the output workspace in wavelength is converted to momentum transfer (Q) using
-:ref:`algm-ConvertUnits`. Note that the output workspace in Q is therefore a workspace
-with native binning, and no rebin step is applied to it.
+Finally, the output workspace in wavelength is converted to momentum transfer
+(:math:`Q`) using :ref:`algm-ConvertUnits`. Note that the output workspace in Q
+is therefore a workspace with native binning, and no rebin step is applied to
+it.
 
 .. diagram:: ReflectometryReductionOne_ConvertToMomentum-v2_wkflw.dot
 
@@ -183,10 +225,10 @@ Output:
 
 .. testoutput:: ExReflRedOneTrans
 
-   0.4588
-   0.4655
-   0.7336
-   1.0156
+   0.4592
+   0.4654
+   0.7278
+   1.0305
 
 .. categories::
 
diff --git a/docs/source/algorithms/SaveDspacemap-v1.rst b/docs/source/algorithms/SaveDspacemap-v1.rst
index e7a32b538cf0510df5016142feb0b89ffc50b45d..f38a7f645640615985bf2b356665522b2efbc46c 100644
--- a/docs/source/algorithms/SaveDspacemap-v1.rst
+++ b/docs/source/algorithms/SaveDspacemap-v1.rst
@@ -26,7 +26,8 @@ Usage
     filepath = config["defaultsave.directory"]
   savefilename = os.path.join(filepath, "test_offset.dat")
 
-  LoadCalFile(InstrumentName='PG3',CalFilename=r'PG3_golden.cal',MakeGroupingWorkspace='0',MakeMaskWorkspace='0',WorkspaceName='PG3_gold')
+  ws = LoadEmptyInstrument(Filename="POWGEN_Definition_2015-08-01.xml")
+  LoadCalFile(InputWorkspace=ws,CalFilename=r'PG3_golden.cal',MakeGroupingWorkspace='0',MakeMaskWorkspace='0',WorkspaceName='PG3_gold')
   SaveDspacemap(InputWorkspace="PG3_gold_offsets", DspacemapFile=savefilename)
 
   print "File created = ", os.path.exists(savefilename), ", file size = ", os.path.getsize(savefilename)
@@ -44,6 +45,3 @@ Output:
 .. categories::
 
 .. sourcelink::
-
-
-
diff --git a/docs/source/algorithms/SaveYDA-v1.rst b/docs/source/algorithms/SaveYDA-v1.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2fc34c37a903935cd63152a1f2280d549b320708
--- /dev/null
+++ b/docs/source/algorithms/SaveYDA-v1.rst
@@ -0,0 +1,85 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+This algorithm exports a given :ref:`Workspace2D <Workspace2D>` to a YAML format which supposed to be read by the
+`Frida 2.0 <http://apps.jcns.fz-juelich.de/doku/frida/start>`_  software for further data analysis. The algorithm has been developed for the TOFTOF instrument,
+but can be used for other TOF instruments as well.
+
+Limitations
+###########
+
+The input workspace must be a Wokspace2D with an instrument.
+The X unit of the workspace has to be 'DeltaE'
+Y axis must be a Spectrum axis or it's unit has to be 'Momentum Transfer'.
+
+
+Usage
+-----
+
+**Example - SaveYDA**
+
+.. testcode:: SaveYDAExample
+
+    import os
+    import numpy as np
+
+    # create x and y data
+    dataX = np.arange(12).reshape(3, 4)
+    dataY = np.arange(9).reshape(3, 3)
+
+    # create sample workspace
+    ws = CreateWorkspace(DataX=dataX, DataY=dataY, DataE=np.sqrt(dataY), NSpec=3, UnitX="DeltaE")
+
+    # add Instrument
+    LoadInstrument(ws,False,InstrumentName="TOFTOF")
+
+    #add sample Logs
+    AddSampleLog(ws,"proposal_number","3")
+    AddSampleLog(ws, "proposal_title", "Proposal Title")
+    AddSampleLog(ws,"experiment_team","Experiment Team name")
+    AddSampleLog(ws,"temperature","200.0", LogUnit="F")
+    AddSampleLog(ws,"Ei","1.0",LogUnit="meV")
+
+    # test file name
+    filename = os.path.join(config["defaultsave.directory"], "TestSaveYDA.yaml")
+
+    # save file
+    SaveYDA(ws, filename)
+
+
+    with open(filename,'r') as f:
+        for i in range(12):
+            print f.readline(),
+
+**Output:**
+
+.. testoutput:: SaveYDAExample
+
+    Meta:
+      format: yaml/frida 2.0
+      type: generic tabular data
+    History:
+      - Proposal number 3
+      - Proposal Title
+      - Experiment Team name
+      - data reduced with mantid
+    Coord:
+      x: {name: w, unit: meV}
+      y: {name: 'S(q,w)', unit: meV-1}
+      z: [{name: 2th, unit: deg}]
+
+.. testcleanup:: SaveYDAExample
+
+    DeleteWorkspace("ws")
+    os.remove(filename)
+
+.. categories::
+
+.. sourcelink::
\ No newline at end of file
diff --git a/docs/source/concepts/UnitFactory.rst b/docs/source/concepts/UnitFactory.rst
index 300333779b0c543e37bd86b1390055db8ee805e3..eadc3aa5b2c75272fd98ec6cf73a77f88566ffd2 100644
--- a/docs/source/concepts/UnitFactory.rst
+++ b/docs/source/concepts/UnitFactory.rst
@@ -49,6 +49,9 @@ The following units are available in the default Mantid distribution.
 | Spin Echo Time                            | SpinEchoTime                    | :math:`ns`                  | | :math:`constant \times \lambda^3`                                                                              |
 |                                           |                                 |                             | |  The constant is supplied in eFixed                                                                            |
 +-------------------------------------------+---------------------------------+-----------------------------+------------------------------------------------------------------------------------------------------------------+
+| d-spacingPerpendicular                    | dSpacingPerpendicular           | :math:`\mathrm{\AA}`        | :math:`d_{\perp} = \sqrt{\lambda^2 - 2\log\cos\theta}`                                                           |
++-------------------------------------------+---------------------------------+-----------------------------+------------------------------------------------------------------------------------------------------------------+
+
 
 Where :math:`L_1` and :math:`L_2` are sample to the source and sample to
 detector distances respectively, :math:`L_{tot} = L_1+L_2` and
@@ -65,6 +68,10 @@ energy respectively. Units conversion into elastic momentum transfer
 (MomentumTransfer) will throw in elastic mode (emode=0) on inelastic
 workspace (when energy transfer is specified along x-axis)
 
+**d-spacingPerpendicular** is a unit invented in `J. Appl. Cryst. (2015) 48, pp. 1627--1636 <https://doi.org/10.1107/S1600576715016520>`_ for 2D Rietveld refinement
+of angular and wavelength-dispersive neutron time-of-flight powder diffraction data. Together with the d-Spacing :math:`d`,
+d-SpacingPerpendicular :math:`d_{\perp}` forms a new orthogonal coordinate system.
+
 
 Working with Units in Python
 ----------------------------
@@ -136,4 +143,4 @@ and we will add it to the default Mantid library.
 
 
 
-.. categories:: Concepts
\ No newline at end of file
+.. categories:: Concepts
diff --git a/docs/source/diagrams/ReflectometryReductionOne_ConvertToMomentum-v2_wkflw.dot b/docs/source/diagrams/ReflectometryReductionOne_ConvertToMomentum-v2_wkflw.dot
index 451901553112e1a015d0f972b9329ac2b576f29e..d65919b037bfe96d33c60c7d4d172560f1b0669c 100644
--- a/docs/source/diagrams/ReflectometryReductionOne_ConvertToMomentum-v2_wkflw.dot
+++ b/docs/source/diagrams/ReflectometryReductionOne_ConvertToMomentum-v2_wkflw.dot
@@ -4,8 +4,8 @@ label = "\n"
 
 subgraph params {
  $param_style
-  inputWorkspace    [label="OutputWorkspaceWavelength", group=g1]
-  outputWorkspace   [label="OutputWorkspace"]
+  inputWorkspace     [label="OutputWorkspaceWavelength", group=g1]
+  outputWorkspace    [label="OutputWorkspace"]
 }
 
 subgraph decisions {
@@ -25,6 +25,8 @@ subgraph values {
  $value_style
 }
 
-inputWorkspace    -> convertUnits
-convertUnits      ->  outputWorkspace
+inputWorkspace     -> convertUnits
+convertUnits       ->  outputWorkspace
+
 }
+
diff --git a/docs/source/diagrams/ReflectometryReductionOne_ConvertToWavelength-v2_wkflw.dot b/docs/source/diagrams/ReflectometryReductionOne_ConvertToWavelength-v2_wkflw.dot
index 097830117b72a0829a0cbe9a7f94e071d9601511..ee6162ae00d3bc0fc29ab8b95026c37c9bcb585b 100644
--- a/docs/source/diagrams/ReflectometryReductionOne_ConvertToWavelength-v2_wkflw.dot
+++ b/docs/source/diagrams/ReflectometryReductionOne_ConvertToWavelength-v2_wkflw.dot
@@ -9,8 +9,6 @@ subgraph params {
   inputWS             [label="InputWorkspace"]
   outputWS            [label="OutputWorkspaceWavelength"]
   procCommands        [label="ProcessingInstructions"]
-  wavMin              [label="WavelengthMin", group=gwav]
-  wavMax              [label="WavelengthMax", group=gwav]
   monitorIndex        [label="I0MonitorIndex"]
   regionOfDirectBeam  [label="RegionOf-\nDirectBeam"]
   monIntWavMax        [label="MonitorIntegration-\nWavelengthMax"]
@@ -21,6 +19,7 @@ subgraph params {
 
 subgraph decisions {
  $decision_style
+  checkSumInLam   [label="Sum in &lambda;?"]
 }
 
 subgraph algorithms {
@@ -35,7 +34,6 @@ subgraph algorithms {
   groupDetRDB   [label="GroupDetectors"]
   divideDetMon  [label="Divide\n(Detectors / Monitors)", group=g1]
   divideDetRDB  [label="Divide\n(Detectors / DirectBeam)", group=g1]
-  cropWav       [label="CropWorkspace", group=g1]
 }
 
 subgraph processes {
@@ -46,21 +44,24 @@ subgraph values {
  $value_style
 }
 
-inputWS				-> groupDet			[label="Detectors"]
-inputWS				-> groupDetRDB		[label="Direct Beam"]
-inputWS				-> cropMonWS		[label="Monitors"]
+inputWS			-> checkSumInLam	[label="Detectors"]
+inputWS			-> groupDetRDB		[label="Direct Beam"]
+inputWS			-> cropMonWS		[label="Monitors"]
+
+checkSumInLam           -> convertDet [label="No"]
+checkSumInLam           -> groupDet   [label="Yes"]
+groupDet		-> convertDet
 
 procCommands		-> groupDet
-groupDet			-> convertDet
 
 regionOfDirectBeam	-> groupDetRDB
-groupDetRDB			-> convertDB
+groupDetRDB		-> convertDB
 
 monitorIndex		-> cropMonWS
-cropMonWS			-> convertMon
+cropMonWS		-> convertMon
 
-convertDet			-> divideDetRDB
-convertDB			-> divideDetRDB
+convertDet		-> divideDetRDB
+convertDB		-> divideDetRDB
 
 convertMon          -> calcFlatBg
 monBackWavMin       -> calcFlatBg
@@ -71,10 +72,8 @@ monIntWavMax        -> intMon
 
 divideDetRDB        -> divideDetMon
 intMon              -> divideDetMon
-wavMin              -> cropWav
-divideDetMon        -> cropWav
-wavMax              -> cropWav
-cropWav             -> outputWS
+
+divideDetMon        -> outputWS
 
 {rank=same; groupDet; groupDetRDB; cropMonWS}
 {rank=same; convertDet; convertDB; convertMon}
diff --git a/docs/source/diagrams/ReflectometryReductionOne_HighLvl-v2_wkflw.dot b/docs/source/diagrams/ReflectometryReductionOne_HighLvl-v2_wkflw.dot
index caf0a9cd3a1abc708fa9a6d6586c48b05c9ddf52..738c598dc74b76b19742fe8234b7c5ba2365f11f 100644
--- a/docs/source/diagrams/ReflectometryReductionOne_HighLvl-v2_wkflw.dot
+++ b/docs/source/diagrams/ReflectometryReductionOne_HighLvl-v2_wkflw.dot
@@ -20,8 +20,9 @@ subgraph algorithms {
 
 subgraph processes {
  $process_style
-  convertUnits    [label="Convert to &lambda; and\nnormalize\nby monitors"]
+  convertUnits    [label="Convert to &lambda;,\noptionally sum in &lambda;, and\nnormalize by monitors"]
   applyCorrTrans  [label="Apply transmission\n corrections"]
+  sumInQ          [label="Optionally sum in Q,\nand crop"]
   convertMom      [label="Convert to momentum\ntransfer"]
 }
 
@@ -33,7 +34,9 @@ inputWS         -> checkXUnit
 checkXUnit      -> applyCorrTrans [label="Yes"]
 checkXUnit      -> convertUnits   [label="No"]
 convertUnits    -> applyCorrTrans
-applyCorrTrans  -> outputWSWL
+
+applyCorrTrans  -> sumInQ
+sumInQ          -> outputWSWL
 
 outputWSWL      -> convertMom
 convertMom      -> outputWSFinal
diff --git a/docs/source/diagrams/ReflectometryReductionOne_SumInQ-v2_wkflw.dot b/docs/source/diagrams/ReflectometryReductionOne_SumInQ-v2_wkflw.dot
new file mode 100644
index 0000000000000000000000000000000000000000..e3e913c53256bedb411de6480406b1b91ac6dce8
--- /dev/null
+++ b/docs/source/diagrams/ReflectometryReductionOne_SumInQ-v2_wkflw.dot
@@ -0,0 +1,43 @@
+digraph ReflectometryReductionOne {
+label = "\n"
+ $global_style
+
+subgraph params {
+ $param_style
+  inputWorkspace     [label="OutputWorkspaceWavelength", group=g1]
+  outputWorkspaceWav [label="OutputWorkspaceWavelength"]
+  wavMin             [label="WavelengthMin", group=gwav]
+  wavMax             [label="WavelengthMax", group=gwav]
+}
+
+subgraph decisions {
+ $decision_style
+  checkSumInQ        [label="Sum in Q?"]
+}
+
+subgraph algorithms {
+ $algorithm_style
+  cropWav           [label="CropWorkspace"]
+}
+
+subgraph processes {
+ $process_style
+  projectCounts      [label=<Project input counts to &lambda;<sub>v</sub> at 2&theta;<sub>R</sub>>]
+  sumInQ             [label=<Sum in &lambda;<sub>v</sub>>]
+}
+
+subgraph values {
+ $value_style
+}
+
+inputWorkspace     -> checkSumInQ
+checkSumInQ        -> projectCounts      [label="Yes"]
+checkSumInQ        -> cropWav            [label="No"]
+projectCounts      -> sumInQ
+sumInQ             -> cropWav
+wavMin             -> cropWav
+wavMax             -> cropWav
+cropWav            -> outputWorkspaceWav
+
+}
+
diff --git a/docs/source/fitfunctions/DynamicKuboToyabe.rst b/docs/source/fitfunctions/DynamicKuboToyabe.rst
index d23576ee075e45dfe163911fcf67baafbc1a32d9..99181d2261bbacb29b6688e5e525f307c473d1a5 100644
--- a/docs/source/fitfunctions/DynamicKuboToyabe.rst
+++ b/docs/source/fitfunctions/DynamicKuboToyabe.rst
@@ -22,7 +22,7 @@ where :math:`g_z\left(t\right)` is the static KT function, and :math:`\nu` the m
 
 | In the presence of a longitudinal field, :math:`B_0=\omega_0 /\left(2\pi \gamma_{\mu}\right)>0`: 
 
-.. math:: g_z\left(t\right) = \mbox{A} \Bigg[ 1 - 2\frac{\Delta^2}{\omega_0^2}\Big(1-cos(\omega_0 t)e^{-\frac{1}{2}\Delta^2 t^2}\Big) + 2\frac{\Delta^4}{\omega_0^4}\omega_0\int_0^\tau \sin(\omega_0\tau)e^{-\frac{1}{2}\Delta^2\tau^2}d\tau \Bigg]
+.. math:: g_z\left(t\right) = \mbox{A} \Bigg[ 1 - 2\frac{\Delta^2}{\omega_0^2}\Big(1-cos(\omega_0 t)e^{-\frac{1}{2}\Delta^2 t^2}\Big) + 2\frac{\Delta^4}{\omega_0^4}\omega_0\int_0^t \sin(\omega_0\tau)e^{-\frac{1}{2}\Delta^2\tau^2}d\tau \Bigg]
 
 DynamicKuboToyabe function has one attribute (non-fitting parameter), 'BinWidth', that sets the width of the step size between points for numerical integration. Note that 
 small values will lead to long calculation times, while large values will produce less accurate results. The default value is set to 0.05, and it is allowed to vary in the range [0.001,0.1].
diff --git a/docs/source/images/ArtRightGUIWaterfallCustom2sp1.PNG b/docs/source/images/ArtRightGUIWaterfallCustom2sp1.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..499eacb0029313d869876347795ecd7d1fc3cdb7
Binary files /dev/null and b/docs/source/images/ArtRightGUIWaterfallCustom2sp1.PNG differ
diff --git a/docs/source/images/ArtSurfacePlotT1.PNG b/docs/source/images/ArtSurfacePlotT1.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..bade121e5d5e7df5797f07f6720b6a0e423bb2f2
Binary files /dev/null and b/docs/source/images/ArtSurfacePlotT1.PNG differ
diff --git a/docs/source/images/ArtWaterfallT1.PNG b/docs/source/images/ArtWaterfallT1.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..4ffcb85671223500f41ad4c4aebb0d2834994190
Binary files /dev/null and b/docs/source/images/ArtWaterfallT1.PNG differ
diff --git a/docs/source/images/DeltaPDF3D_deconv.png b/docs/source/images/DeltaPDF3D_deconv.png
new file mode 100644
index 0000000000000000000000000000000000000000..d1380625134079ee9d212571435393f6895043f2
Binary files /dev/null and b/docs/source/images/DeltaPDF3D_deconv.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft1.png b/docs/source/images/DeltaPDF3D_fft1.png
index 6b11a18065a1d69758bcc2288aa42d9ebe144a43..83161f640717bc52ffe009960345e3d74fa3daeb 100644
Binary files a/docs/source/images/DeltaPDF3D_fft1.png and b/docs/source/images/DeltaPDF3D_fft1.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft2.png b/docs/source/images/DeltaPDF3D_fft2.png
index 9fb8c75f45c63b084c0212106c46df4d70c7ee2e..7c9a6f95b3b1e833682eaf3e0b11517383dbb296 100644
Binary files a/docs/source/images/DeltaPDF3D_fft2.png and b/docs/source/images/DeltaPDF3D_fft2.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft3.png b/docs/source/images/DeltaPDF3D_fft3.png
index 0e2b058b780c8fefcd1521f7d4ce7c665e2305d6..d839a4b6343e6e71248b3e970f347280d999b31c 100644
Binary files a/docs/source/images/DeltaPDF3D_fft3.png and b/docs/source/images/DeltaPDF3D_fft3.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft3_2.png b/docs/source/images/DeltaPDF3D_fft3_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..c162288bd9246f66591c23cb8bae6c92da636a97
Binary files /dev/null and b/docs/source/images/DeltaPDF3D_fft3_2.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft4.png b/docs/source/images/DeltaPDF3D_fft4.png
index 1e6b5ea71fb5b294b36069fa592c0a938cc8cb9b..ab0d6e1e7e9ade711993f22f48775f3d1d3d8d38 100644
Binary files a/docs/source/images/DeltaPDF3D_fft4.png and b/docs/source/images/DeltaPDF3D_fft4.png differ
diff --git a/docs/source/images/DeltaPDF3D_fft5.png b/docs/source/images/DeltaPDF3D_fft5.png
new file mode 100644
index 0000000000000000000000000000000000000000..c5c6ccd59f09cf1a778d4a057945d16dd5f8e4f1
Binary files /dev/null and b/docs/source/images/DeltaPDF3D_fft5.png differ
diff --git a/docs/source/images/DeltaPDF3D_int2.png b/docs/source/images/DeltaPDF3D_int2.png
index 3cd7f67c63af2219e1e728ca7bb2590104e5a305..24982081d4561802aa93b5c09f03797b3ba36ca7 100644
Binary files a/docs/source/images/DeltaPDF3D_int2.png and b/docs/source/images/DeltaPDF3D_int2.png differ
diff --git a/docs/source/images/DeltaPDF3D_int3.png b/docs/source/images/DeltaPDF3D_int3.png
index 4024a397adb90eb15216c86ea6e37f7635b8a496..a5cc9676e84ba02940fc7ea38f92b1276fc241d1 100644
Binary files a/docs/source/images/DeltaPDF3D_int3.png and b/docs/source/images/DeltaPDF3D_int3.png differ
diff --git a/docs/source/images/DeltaPDF3D_int3_2.png b/docs/source/images/DeltaPDF3D_int3_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..acdc24e817d50310d285ed830dc286404e4d5971
Binary files /dev/null and b/docs/source/images/DeltaPDF3D_int3_2.png differ
diff --git a/docs/source/images/DeltaPDF3D_int4.png b/docs/source/images/DeltaPDF3D_int4.png
index 36511b954e3aa7d3eb1fedd380e9e17beb3ed518..803b9c876613361c8a0ddbb4276f6d7e2196e575 100644
Binary files a/docs/source/images/DeltaPDF3D_int4.png and b/docs/source/images/DeltaPDF3D_int4.png differ
diff --git a/docs/source/images/DeltaPDF3D_testWS.png b/docs/source/images/DeltaPDF3D_testWS.png
index 90c910e35c904bf232378f6d5614d6d7e4337532..fd2cb7d0100448627a57a6b48ebd469a736804e3 100644
Binary files a/docs/source/images/DeltaPDF3D_testWS.png and b/docs/source/images/DeltaPDF3D_testWS.png differ
diff --git a/docs/source/images/ISISReflectometryPolref_event_handling_tab.png b/docs/source/images/ISISReflectometryPolref_event_handling_tab.png
index b50617345636919b5186bf6f008fe17423282e0a..1fcb17c8aba8a2e299d25e9f4592d53f263982a0 100644
Binary files a/docs/source/images/ISISReflectometryPolref_event_handling_tab.png and b/docs/source/images/ISISReflectometryPolref_event_handling_tab.png differ
diff --git a/docs/source/images/MuonAnalysisCombinePeriods.png b/docs/source/images/MuonAnalysisCombinePeriods.png
new file mode 100644
index 0000000000000000000000000000000000000000..74ad970345e99d7d681d994762f459be944a9da1
Binary files /dev/null and b/docs/source/images/MuonAnalysisCombinePeriods.png differ
diff --git a/docs/source/images/MuonAnalysisDataAnalysis3.10.png b/docs/source/images/MuonAnalysisDataAnalysis3.10.png
new file mode 100644
index 0000000000000000000000000000000000000000..8a2bbd2e7b69fb5b1b5862ef4238e7b98343b94e
Binary files /dev/null and b/docs/source/images/MuonAnalysisDataAnalysis3.10.png differ
diff --git a/docs/source/images/MuonAnalysisTFAsymm.png b/docs/source/images/MuonAnalysisTFAsymm.png
new file mode 100644
index 0000000000000000000000000000000000000000..a0bdf1429dd413de5cf8d0af6d7618207d7ee6d0
Binary files /dev/null and b/docs/source/images/MuonAnalysisTFAsymm.png differ
diff --git a/docs/source/images/SANSNewReductionBackendPerformance.png b/docs/source/images/SANSNewReductionBackendPerformance.png
new file mode 100644
index 0000000000000000000000000000000000000000..054c0314409e9e51aa8616ff7fcb62286a861da4
Binary files /dev/null and b/docs/source/images/SANSNewReductionBackendPerformance.png differ
diff --git a/docs/source/interfaces/CrystalFieldPythonInterface.rst b/docs/source/interfaces/CrystalFieldPythonInterface.rst
index 1854e7482328be0298fe9e41a313c23c7b49ae24..7822d4345a4e07307b579354633cfa0eaf345db1 100644
--- a/docs/source/interfaces/CrystalFieldPythonInterface.rst
+++ b/docs/source/interfaces/CrystalFieldPythonInterface.rst
@@ -114,12 +114,12 @@ The new output::
   
 To calculate a spectrum we need to define a shape of each peak (peak profile function) and its default width (`FWHM`).
 The width can be set either via a keyword argument or a property with name `FWHM`. If the peak shape isn't set the default
-of Lorentzian is assumed. To set a different shape use the `setPeaks` method::
+of Lorentzian is assumed. To set a different shape use the `PeakShape` property::
 
-  cf.setPeaks('Gaussian')
+  cf.PeakShape = 'Gaussian'
   cf.FWHM = 0.9
   
-The arguments of `setPeaks` are expected to be names of Mantid peak fit functions. At the moment only `Lorentzian` and
+The values of `PeakShape` are expected to be names of Mantid peak fit functions. At the moment only `Lorentzian` and
 `Gaussian` can be used.
 
 After the peak shape is defined a spectrum can be calculated::
@@ -212,11 +212,11 @@ For the parameters of the background the syntax is the same but the methods are
 The names of the peak parameters both in ties and constraints must include the index of the peak to which they belong. Here we follow
 the naming convention of the :ref:`func-CompositeFunction`: f<n>.<name>, where <n> stands for an integer index staring at 0 and <name>
 is the name of the parameter. For example, `f1.Sigma`, `f3.FWHM`. Because names now contain the period symbol '.' keyword arguments
-cannot be used. Instead we must pass strings containing ties::
+cannot be used. Instead we must pass a dictionary containing ties. The keys are parameter names and the values are the ties::
 
-    cf.peaks.ties('f2.FWHM=2*f1.FWHM', 'f3.FWHM=2*f2.FWHM')
+    cf.peaks.ties({'f2.FWHM': '2*f1.FWHM', 'f3.FWHM': '2*f2.FWHM'})
     
-and constraints are also a list of strings::
+Constraints are a list of strings::
 
     cf.peaks.constraints('f0.FWHM < 2.2', 'f1.FWHM >= 0.1')
     
@@ -226,13 +226,13 @@ If a parameter of all peaks needs to be tied/constrained with the same expressio
     cf.peaks.constrainAll('0 < Sigma < 0.1', 4)
 
 where the first argument is the general formula of the tie/constraint and the second is the number of peaks to apply to.
-The is also a version for a range of peak indices::
+There is also a version for a range of peak indices::
 
     cf.peaks.tieAll('Sigma=f0.Sigma', 1, 3)
 
 which is equivalent to::
 
-    cf.peaks.ties('f1.Sigma=f0.Sigma', 'f2.Sigma=f0.Sigma', 'f3.Sigma=f0.Sigma')
+    cf.peaks.ties({'f1.Sigma': 'f0.Sigma', 'f2.Sigma': 'f0.Sigma', 'f3.Sigma': 'f0.Sigma'})
 
 
 Setting Resolution Model
@@ -280,11 +280,11 @@ become lists. Here is an example of defining a `CrystalField` object with two sp
 
     cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                       Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-    cf.setPeaks('Lorentzian')
+    cf.PeakShape = 'Lorentzian'
     cf.peaks[0].param[0]['FWHM'] = 1.11
     cf.peaks[1].param[1]['FWHM'] = 1.12
-    cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                     background=Function('FlatBackground', A0=1.0))
+    cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
+                               background=Function('FlatBackground', A0=1.0))
     cf.background[1].peak.param['Sigma'] = 0.8
     cf.background[1].background.param['A0'] = 1.1
 
@@ -301,7 +301,7 @@ change::
     cf.background[1].peak.ties(Height=20.2)
     cf.background[1].peak.constraints('Sigma > 0.2')
     cf.peaks[1].tieAll('FWHM=2*f1.FWHM', 2, 5)
-    cf.peaks[0].constrainAll('FWHM < 2.2', 1, 6)
+    cf.peaks[0].constrainAll('FWHM < 2.2', 1, 4)
 
 The resolution model also needs to be initialised from a list::
 
@@ -328,6 +328,16 @@ To calculate a spectrum call the same method `getSpectrum` but pass the spectrum
   # Calculate first spectrum, use the i-th spectrum of a workspace
   sp = cf.getSpectrum(0, ws, i)
 
+Note that the attributes `Temperature`, `FWHM`, `peaks` and `background` may be set separately from the constructor, e.g.::
+
+    cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544)
+    cf.Temperature = [5, 50]
+
+However, each time that `Temperature` is set, if it defines a different number of spectra from the previous value
+(e.g. if `Temperature` was initially empty or `None` and is then defined as in the example above, or if `Temperature`
+was initially a scalar value but is then redefined to be a list or vice versa), then all `Ties`, `Constraints`,
+`FWHM` and `peaks` parameters are cleared. Any crystal field parameters previously defined will be retained, however.
+
 
 Multiple Ions
 -------------
@@ -652,7 +662,11 @@ or separately after construction::
     fit_moment.fit()
 
 Unfortunately only 1D datasets can be fitted (e.g. M(H, T) cannot be fitted as a simultaneous function of field and
-temperature).
+temperature). Also, note that setting the `PhysicalProperty` attribute after constructing the `CrystalField` object
+(e.g. running `cf.PhysicalProperty = PhysicalProperties('Cv')`) causes the number of datasets to change and will 
+clear all `Ties` and `Constraints` previously set, and also reset all `FWHM` and `peaks` to the default values (zero 
+for `FWHM` and `Lorentzian` for `peaks`). 
+
 
 Simultaneous Fitting of Physical Properties and Inelastic Neutron Spectra
 -------------------------------------------------------------------------
diff --git a/docs/source/interfaces/ISIS_Reflectometry.rst b/docs/source/interfaces/ISIS_Reflectometry.rst
index a6c9c856ec1cb403481059cae9deff1ee53e72f5..b80084d90db8148244946ca86e0f35711f7ef844 100644
--- a/docs/source/interfaces/ISIS_Reflectometry.rst
+++ b/docs/source/interfaces/ISIS_Reflectometry.rst
@@ -392,21 +392,23 @@ Event Handling tab
 .. figure:: /images/ISISReflectometryPolref_event_handling_tab.png
    :alt: Showing view of the settings tab.
 
-The *Event Handling* tab can be used to analyze event workspaces. It contains three text boxes for
-specifying uniform even, uniform and custom slicing respectively. Each of these slicing options are
-exclusive, no more than one can be applied. If the text box for the selected slicing method is empty
-no event analysis will be performed, runs will be loaded using
+The *Event Handling* tab can be used to analyze event workspaces. It contains four text boxes for
+specifying uniform even, uniform, custom and log value slicing respectively. Each of these slicing
+options are exclusive, no more than one can be applied. If the text box for the selected slicing
+method is empty no event analysis will be performed, runs will be loaded using
 :ref:`LoadISISNexus <algm-LoadISISNexus>` and analyzed as histogram workspaces. When this text box
 is not empty, runs will be loaded using :ref:`LoadEventNexus <algm-LoadEventNexus>` and the
-interface will try to parse the user input to obtain a set of start times and stop times. These
-define different time slices that will bepassed on to :ref:`FilterByTime <algm-FilterByTime>`. Each
-time slice will be normalized by the total proton charge and reduced as described in the previous
-section. Note that, if any of the runs in a group could not be loaded as an event workspace, the
-interface will load the runs within that group as histogram workspaces and no event analysis will
-be performed for that group. A warning message will be shown when the reduction is complete
-indicating that some groups could not be processed as event data.
-
-The three slicing options are described in more detail below:
+interface will try to parse the user input to obtain a set of start and stop values. These define
+different time slices that will be passed on to an appropriate filtering algorithm
+(:ref:`FilterByTime <algm-FilterByTime>` for uniform even, uniform and custom slicing,
+:ref:`FilterByLogValue <algm-FilterByLogValue>` for log value slicing). Each time slice will be
+normalized by the total proton charge and reduced as described in the previous section. Note that,
+if any of the runs in a group could not be loaded as an event workspace, the interface will load
+the runs within that group as histogram workspaces and no event analysis will be performed for that
+group. A warning message will be shown when the reduction is complete indicating that some groups
+could not be processed as event data.
+
+The four slicing options are described in more detail below:
 
 - **Uniform Even** - The interface obtains the start and end times of the run and divides it into
   a specified number of evenly-sized slices. For example given a run of duration 100 seconds,
@@ -430,6 +432,14 @@ The three slicing options are described in more detail below:
     ``200`` seconds after the start of the run, and the second one starting at ``200`` seconds
     and ending at ``300`` seconds.
 
+- **LogValue** - Like custom slicing this takes a list of comma-separated numbers and are parsed
+  in the same manner as shown above. The values however indicate the minimum and maximum values of
+  the logs we wish to filter rather than times. In addition, this takes a second entry 'Log Name'
+  which is the name of the log we wish to filter the run for. For example, given a run and entries
+  of ``100, 200, 300`` and ``proton_charge`` for slicing values and log name respectively, we would
+  produce two slices - the first containing all log values between ``100`` and ``200`` seconds, the
+  second containing all log values between ``200`` and ``300`` seconds.
+
 Workspaces will be named according to the index of the slice, e.g ``IvsQ_13460_slice_0``, ``IvsQ_13460_slice_1``, etc.
 
 Settings tab
diff --git a/docs/source/interfaces/Muon_Analysis.rst b/docs/source/interfaces/Muon_Analysis.rst
index cd0202371307d6ff776f2a25095fac4993f9eba9..4d75035bb40fa26624de914358fff03dcb366054 100644
--- a/docs/source/interfaces/Muon_Analysis.rst
+++ b/docs/source/interfaces/Muon_Analysis.rst
@@ -264,7 +264,8 @@ Data Analysis
 .. _DataAnalysis:
 
 This tab is designed for the user to make a fit against the data just plotted.
-Since Mantid 3.8, this tab has been enhanced to include fits of multiple datasets at once.
+Since Mantid 3.8 (upgraded in 3.10), this tab has been enhanced to include fits of multiple datasets at once.
+Since Mantid 3.10 a Transverse field (TF) Asymmetry mode has been added. 
 
 Default: multiple fitting disabled
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -281,13 +282,29 @@ When the tab is open, this fit property browser is used by default within Mantid
 Note that, in this mode, simultaneous fits are not possible.
 The intention is that this mode could be useful for users who are accustomed to the existing UI, or if a bug is found in the new UI.
 
+
+TF asymmetry enabled
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The TF asymmetry mode can be enabled by checking the "TF Asymmetry" checkbox on the Settings_ tab.
+At present it is not possible to use multiple fitting and TF asymmetry, therefore it is not possible 
+to select both checkboxes. Loading transverse field asymmetry data into muon analysis will automatically
+enable TF asymmetry mode. 
+When this is activated, the data analysis tab has two main differences to the pre 3.8 version. Firstly there 
+is an additional row in the Data table (normalization). The second difference is the addition of the "TF
+Asymmetry Fit" button in the fitting tab. Selecting this fitting option will call the :ref:`Calculate Muon Asymmetry <algm-CalculateMuonAsymmetry>` algorithm. The user defined function will be the composite function from the interface.
+
+.. image::  ../images/MuonAnalysisTFAsymm.png
+   :align: right
+
+
 Multiple fitting enabled
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-The new multiple fitting functionality can be enabled by checking the "Enable multiple fitting" checkbox on the Settings_ tab.
-When this is activated, the tab is divided into three sections vertically.
+The multiple fitting functionality can be enabled by checking the "Enable multiple fitting" checkbox on the Settings_ tab.
+When this is activated, the tab is divided into multiple sections vertically.
 
-.. image::  ../images/MuonAnalysisDataAnalysis3.8.png
+.. image::  ../images/MuonAnalysisDataAnalysis3.10.png
    :align: right
 
 Fit Function
@@ -306,36 +323,34 @@ This button will open the "Edit local parameter values" dialog, which offers gre
 
 Data
 ^^^^
-The central section of the tab is the data selector, which controls the dataset(s) that will be fitted.
+The next section of the tab is the data selector, which controls the dataset(s) that will be fitted.
 By default, this will be a single dataset, the same as the data loaded on the Home_ tab.
 The dataset(s) can be changed here and, if more than one is selected, they will all be fitted simultaneously.
 
+The "Display Parameters For" boxes consist of a backwards button, a drop-down selection and a forward button. The drop-down list shows all datasets currently selected,
+and the left and right buttons cycle through them. The currently selected dataset has its parameters shown in the *Fit Function* (upper) widget, and will be plotted.
+
 For a multi-dataset fit, the "Label" box is enabled.
 This allows the user to input a label for the simultaneous fit.
 
-The drop-down list shows all datasets currently selected, and the left and right buttons cycle through them.
-The currently selected dataset has its parameters shown in the *Fit Function* (upper) widget, and will be plotted.
-
-Fits can be done across runs, groups, periods or all three.
-From left to right, the options to select are:
-
 Runs
 """"
 A single run, or range (*e.g. 15189-91, 15193*) can be typed into the box here.
 The radio buttons below control whether the runs should be co-added together or fitted separately in a simultaneous fit.
-It is also possible to adjust the start and end time here.
 
-Groups
-""""""
-There is a checkbox in this section for each group defined in the GroupingOptions_ tab.
-One or multiple groups can be selected.
+Data Table
+^^^^^^^^^^
+
+The data table allows the user to modify the selected data for the fitting. This includes the start and end times, which can also
+be updated by dragging the blue dashed lines in the plot. The "Groups/Pairs to fit" box provides a drop-down menu with three options (all groups, all pairs and custom). 
+Selecting custom will produce a pop-up box with tick boxes for each of the available groups and pairs. If a user wants to update the custom selection the 
+Groups/Pairs button can be pressed from the ReselectData_ section at the bottom ofthe tab (this is only enabled if a custom selection is set). Underneath displays the
+"Selected Groups". 
 
-Periods
-"""""""
-There is a checkbox in this section for each period of the data.
-(This section is only visible for multi-period data).
-One or multiple periods can be selected.
-In addition, the "Combination" option can be used to fit a sum or difference of periods.
+The next row is the "Periods to fit" option, which is only displayed for multiple period data. This will automatically be populated with
+each of the periods (e.g. 1,2,3) and a custom option. Selecting custom will produce a pop-up with checkboxes for all of the periods. Selecting custom will also enable the 
+"Periods" button in the ReselectData_ section
+and pressing this button will allow the user to alter their custom selection. 
 
 Examples/Use cases
 """"""""""""""""""
@@ -372,12 +387,27 @@ Examples/Use cases
    - It is, of course, possible to select several runs, groups, periods all at once and a simultaneous fit will be performed across all the selected datasets.
    - Example: MUSR{15189, 15190, 15191}, groups {*fwd*, *bwd*}, periods {1, 2}: 12 datasets in all.
 
-Options
-^^^^^^^
-The bottom of the tab contains selected fit options that can be adjusted, just as elsewhere in Mantid.
+Additional Options
+^^^^^^^^^^^^^^^^^^
+Near the bottom of the tab contains selected fit options that can be adjusted, just as elsewhere in Mantid.
 The only option specific to the Muon Analysis interface is *Fit To Raw Data*.
 When this option is set to *True*, the fitting process is done using the raw (unbinned) data, even if the DataBinning_ is set.
 
+Reselect data
+^^^^^^^^^^^^^
+
+.. _ReselectData:
+
+At the bottom of the tab is the "Reselect Data" section. This includes three buttons "Groups/Pairs", "Periods" and "Combine Periods". The "Groups/Pairs" and "Periods" 
+buttons are only when the relevant options in the data table are set to custom. Pressing the button will produce a pop-up that will allow the user to modify their selection. 
+
+The "Combine Periods" button is only enabled if multiple periods are available. Pressing the button will generate a pop-up with two boxes. The top one is for adding periods 
+(as a comma seperated list or with "+") and the bottom box is for subtraction (as a comma sepearted list). Everything in the top and bottom boxes are summed seperatley 
+and the results are then used in the subtraction. 
+
+.. image::  ../images/MuonAnalysisCombinePeriods.png
+   :align: right
+
 Sequential fitting
 ^^^^^^^^^^^^^^^^^^
 
diff --git a/docs/source/release/v3.10.0/diffraction.rst b/docs/source/release/v3.10.0/diffraction.rst
index 378e77ab045153cc8daa81cf40db76e280660d9e..0ca0e77b1575a76d61c5ef9cad391de2e23b4326 100644
--- a/docs/source/release/v3.10.0/diffraction.rst
+++ b/docs/source/release/v3.10.0/diffraction.rst
@@ -15,6 +15,8 @@ Crystal Improvements
  - :ref:`SaveIsawPeaks <algm-SaveIsawPeaks>` now saves the calibration data for all detector banks in instrument so the header may be longer
  - :ref:`LoadIsawPeaks <algm-LoadIsawPeaks>` now uses the calibration lines to calibrate the detectors banks for CORELLI
  - :ref:SCD Event Data Reduction interface and SCD_Reduction python scripts work with both nxs and h5 extensions for data file.
+ - New algorithm :ref:`algm-IntegrateEllipsoidsTwoStep` which can be used to integrate weak peaks by using parameters derived from strong peaks.
+ - :ref:`FindSxPeaks <algm-FindSXPeaks>` Resolved an issue where the algorithm failed on instruments with multiple detectors per spectrum.
 
 Engineering Diffraction
 -----------------------
@@ -25,8 +27,10 @@ Powder Diffraction
 - ISIS Powder diffraction scripts have been released. These include support for
   GEM, PEARL and POLARIS instruments. For more details see the ISIS Powder documentation
   under API, Python category. 
+- New instrument definition files are added for D1B, D4, and D20 powder/liquid diffractometers at ILL. The source and monitor positions, as well as detector to sample distance have been corrected for existing D2B.
+- New IDF for the POWGEN upgrade
 - :ref:`AlignAndFocusPowder <algm-AlignAndFocusPowder>` Now supports supplying an a second ``.cal`` file for the ``GroupingFilename``.
-- New algorithm :ref:`AlignAndFocusPowderFromFiles <algm-AlignAndFocusPowderFromFiles>` is a wrapper around :ref:`AlignAndFocusPowder <algm-AlignAndFocusPowder>` which supports caching results
+- New algorithm :ref:`AlignAndFocusPowderFromFiles <algm-AlignAndFocusPowderFromFiles>` is a wrapper around :ref:`AlignAndFocusPowder <algm-AlignAndFocusPowder>` which supports caching results. :ref:`SNSPowderReduction <algm-SNSPowderReduction>` and :ref:`PDToPDFgetN <algm-PDToPDFgetN>` have been reworked to take advantage of this.
 - Bugfix in :ref:`SNAPReduce <algm-SNAPReduce>` with loading previous normalizations
 - :ref:`SNSPowderReduction <algm-SNSPowderReduction>` now supports splitters in format of ``MatrixWorkspace`` and general ``TableWorkspace``.
 - A new NOMAD instrument definition file with corrected values.
@@ -36,9 +40,11 @@ Single Crystal Diffraction
 
 - A new HB3A instrument definition file, for its 512 x 512 detector, is created.  Its valid period is from February 2017 to late April 2017.
 - An IDF for HB3A with 256 by 256 detectors was created.  It was dated from late April 2017 because its original detector has been switched back.
+- A Bug fix was added to the WISH instrument parameter file to prevent predicted peaks falling between tube gaps.
 - New algorithm :ref:`DeltaPDF3D <algm-DeltaPDF3D>` for calculating the 3D-deltaPDF from a HKL MDHistoWorkspace
 
-
 Full list of `diffraction <https://github.com/mantidproject/mantid/issues?q=is%3Aclosed+milestone%3A%22Release+3.10%22+label%3A%22Component%3A+Diffraction%22>`_
 and
 `imaging <https://github.com/mantidproject/mantid/issues?q=is%3Aclosed+milestone%3A%22Release+3.10%22+label%3A%22Component%3A+Imaging%22>`_ changes on GitHub.
+
+- HB3A reduction interface (application) now supports to integrate single crystal peaks by fitting peak intensity with 2D Gaussian with more detailed integraton information for user.
diff --git a/docs/source/release/v3.10.0/framework.rst b/docs/source/release/v3.10.0/framework.rst
index e254dafc12828b2087e98cd6e075daed1769e0da..e279b83cc5967e62401a37a9d487c2563bc6b218 100644
--- a/docs/source/release/v3.10.0/framework.rst
+++ b/docs/source/release/v3.10.0/framework.rst
@@ -11,17 +11,25 @@ API
 
 - The default multiple file limit is now made facility dependent. It is 1000 for ILL, and 100 for all the others.
 - Frequency unit (GHz) included as an option to represent energy transfer.
+- Framework changes now mean scanning workspaces (workspaces with moving detectors) can be created. Currently this can be tested using a new option in :ref:`CreateSampleWorkspace <algm-CreateSampleWorkspace-v1>`, by setting `NumScanPoints`. This is still experimental, as such the instrument view, saving workspaces and some algorithms will not work correctly with the scanning workspaces.
 - Fixed a bug where certain validators would crash with SingleValuedWorkspaces instead of rejecting them.
+- New unit :math:`d_{\perp}` (Angstrom) is implemented for TOF powder diffraction.
 
 Algorithms
 ----------
 
 - Removed the optional flag ``LocationParameters`` from ``ClearInstrumentParameters``.
+- New method `IAlgorithm::helpURL` returns an optional documentation webpage. Useful when registering Python
+  algorithms at runtime that are not part of the Mantid distribution.
 
 New
 ###
 
 - :ref:`DeleteWorkspaces <algm-DeleteWorkspaces>` will delete a list of workspaces.
+- :ref:`algm-IntegrateEllipsoidsTwoStep` which can be used to integrate weak single crystal peaks by using parameters derived from strong peaks.
+- :ref:`FindEPP-v2 <algm-FindEPP-v2>` reimplements the :ref:`FindEPP-v1 <algm-FindEPP-v1>` in C++, providing an order of magnitude gain in execution time for large workspaces.
+- :ref:`LineProfile <algm-LineProfile>` will give a horizontal or vertical line profile over a workspace.
+- :ref:`SaveYDA <algm-SaveYDA>` do export :ref:`Workspace2D <Workspace2D>` to `Frida 2.0 <http://apps.jcns.fz-juelich.de/doku/frida/start>`_ YAML format.
 
 Improved
 ########
@@ -39,6 +47,7 @@ Improved
 - Two new properties were added to :ref:`algm-Integration` *RangeLowerList* and *RangeUpperList* can be used to give histogram-specific integration ranges.
 - :ref:`algm-FindEPP` does not output the two extra workspaces from the :ref:`algm-Fit` anymore.
 - :ref:`ApplyDetailedBalance <algm-ApplyDetailedBalance>`: User can select the dynamic susceptibility versus energy or frequency.
+- :ref:`PredictPeaks <algm-PredictPeaks-v1>` is now faster on instruments that do not have rectangular detectors. The speed up with vary from instrument to instrument, but for CORELLI this was shown to reduce execution time from ~64 mins to < 1 min.
 - :ref:`MergeRuns <algm-MergeRuns>` now has a sum option and more control over failure when binning is different or sample logs do not match.
 - Made it possible for LiveListeners to read properties from the calling Algorithm. This gives greater flexiblity for authors of LiveListener plugins.
 - Improved verification of IDFs
@@ -50,6 +59,7 @@ Improved
 - Improved parallel scaling of :ref:`MDNormSCD <algm-MDNormSCD>` with > 4 cores.
 - Improved parallel scaling of :ref:`MDNormDirectSCD <algm-MDNormDirectSC>` with > 4 cores.
 - Reduced execution time of ``EventList::sortTof`` by over 2x, improving performance in algorithms such as :ref:`algm-CompressEvents` and :ref:`algm-SortEvents` which call it.
+- :ref:`LoadDNSLegacy <algm-LoadDNSLegacy-v1>` can now read the TOF data. CoilCurrentsTable is now optional. The default coil currents are now in the instrument parameters file.
 - :ref:`LoadNexusProcessed <algm-LoadNexusProcessed>` is now approximately 33x faster when loading a ``PeaksWorkspace`` with a large instrument attached.
 
 Bug Fixes
@@ -62,12 +72,6 @@ Bug Fixes
 - Fixed an issue with the ``GroupingPattern`` property in :ref:`algm-GroupDetectors`, where incorrect spectra were being used if spectrum numbers are not 1-based indices.
 - Fixed an issue with :ref:`algm-CreateWorkspace` where giving bin edges as ``VerticalAxisValues`` would fail.
 
-Deprecated
-##########
-
-MD Algorithms (VATES CLI)
-#########################
-
 Performance
 -----------
 
@@ -80,6 +84,7 @@ Bugs
 
 - We have fixed a bug where Mantid could crash when deleting a large number of workspaces.
 - Fixed a bug in :ref:`ConvertToMD <algm-ConvertToMD>` causing it to fail with the error "Run::storeEnergyBinBoundaries - Inconsistent start & end values" if monitors were all NaN, Inf, or zero.
+- Fixed a bug in illuminated volume calculation which could make :ref:`MonteCarloAbsorption <algm-MonteCarloAbsorption>` fail.
 
 CurveFitting
 ------------
@@ -91,11 +96,6 @@ Improved
 
 - :ref:`IkedaCarpenterPV <func-IkedaCarpenterPV>` now constrains all parameters to be non-negative which helps the fits converge faster and produces better fits.
 
-LiveData
---------
-
-- A new live listener for event data, `KafkaEventListener`, has been added. This is in development for the ESS and ISIS. It is only available on IBEX instruments at ISIS.
-
 Python
 ------
 
@@ -130,14 +130,6 @@ Python
 - ``CrystalStructure``, ``UnitCell``, ``PointGroup``, and ``SpaceGroup`` all have better console printing
 - Fixed a bug on MDHistogramWorkspaces where passing an index larger than the size of the dimensions of the workspace to ``setSignalAt`` would crash Mantid.
 
-
-Python Algorithms
-#################
-
-- :class:`mantid.api.DataProcessorAlgorithm` now have a new method
-  ``copyProperties()`` which allow them to copy properties (with
-  defaults, validators, and documentation) from other algorithms.
-
 Full list of
 `Framework <http://github.com/mantidproject/mantid/pulls?q=is%3Apr+milestone%3A%22Release+3.10%22+is%3Amerged+label%3A%22Component%3A+Framework%22>`__
 and
diff --git a/docs/source/release/v3.10.0/index.rst b/docs/source/release/v3.10.0/index.rst
index a856a7fa0f5f2315cd445d408dd96feda96584d9..c38ecf04ee7641389faaed841699b668b39d01f2 100644
--- a/docs/source/release/v3.10.0/index.rst
+++ b/docs/source/release/v3.10.0/index.rst
@@ -37,6 +37,10 @@ Citation
 
 Please cite any usage of Mantid as follows:
 
+- *O. Arnold, et al., Mantid—Data analysis and visualization package for neutron scattering and μSR experiments, Nuclear Instruments and Methods in Physics Research Section A, Volume 764, 11 November 2014, Pages 156-166*, doi: `10.1016/j.nima.2014.07.029 <http://dx.doi.org/10.1016/j.nima.2014.07.029>`_
+
+If you want to cite this specific release please use:
+
 - *Mantid 3.10: Manipulation and Analysis Toolkit for Instrument Data.; Mantid Project*. doi: http://dx.doi.org/10.5286/SOFTWARE/MANTID3.10
 
 Changes
diff --git a/docs/source/release/v3.10.0/indirect_inelastic.rst b/docs/source/release/v3.10.0/indirect_inelastic.rst
index 3b7dc52115c4719f0cbd57608c9fd9b65bd6d08e..a9f39f7b5fe2bba3ffd316664b37bbf9ebba3764 100644
--- a/docs/source/release/v3.10.0/indirect_inelastic.rst
+++ b/docs/source/release/v3.10.0/indirect_inelastic.rst
@@ -55,6 +55,10 @@ Improvements
 - OSIRIS diffraction now rebins container workspaces to match the sample workspace
 - :ref:`ISISIndirectDiffractionReduction <algm-ISISIndirectDiffractionReduction>` now fully supports VESUVIO data
 - Inelastic pixel ID's in BASIS instrument definition file grouped into continuous physical pixels.
+- Reduced number of workspaces produced by VESUVIO scripts
+- Added SortXAxis to Bayes Quasi and Stretch
+- Removed error bars as default
+
 
 
 Bugfixes
@@ -64,5 +68,6 @@ Bugfixes
 - *Abins*:  fix setting very small off-diagonal elements of b tensors
 - Fix errors from calling Rebin from VisionReduction.
 - Fixed validation of inputs in *CalculatePaalmanPings*
+- IN16_Definition.xml has been updated with a Monitor ID change from 19 to 29 to fix a duplicate identity issue
 
 `Full list of changes on GitHub <http://github.com/mantidproject/mantid/pulls?q=is%3Apr+milestone%3A%22Release+3.10%22+is%3Amerged+label%3A%22Component%3A+Indirect+Inelastic%22>`_
diff --git a/docs/source/release/v3.10.0/muon.rst b/docs/source/release/v3.10.0/muon.rst
index 80f4cefe6d7efa7374fde89ba261d9129a004b82..5427a663a81574639a921361e2c32e07aec9271e 100644
--- a/docs/source/release/v3.10.0/muon.rst
+++ b/docs/source/release/v3.10.0/muon.rst
@@ -9,7 +9,7 @@ Interfaces
 ----------
 Muon Analysis
 -  The new algorithms :ref:`EstimateMuonAsymmetryFromCounts <algm-EstimateMuonAsymmetryFromCounts-v1>`: and :ref:`CalculateMuonAsymmetry <algm-CalculateMuonAsymmetry-v1>` are now used in the muon analysis GUI.
-
+-  The main part of the multiple fitting GUI has been upgraded to be more user friendly.
 
 
 - Fixed a bug that meant transverse field asymmetry data was normalized to bin width. 
diff --git a/docs/source/release/v3.10.0/reflectometry.rst b/docs/source/release/v3.10.0/reflectometry.rst
index 3aa96ddd641ff9f6df311dc3b48e300cd5689693..985753f7975994d59f86e72fd30b12edf9084115 100644
--- a/docs/source/release/v3.10.0/reflectometry.rst
+++ b/docs/source/release/v3.10.0/reflectometry.rst
@@ -12,10 +12,12 @@ Algorithms
 
 - :ref:`algm-SpecularReflectionPositionCorrect` - fixed a bug where entering
   an invalid detector or sample name would cause a segmentation fault.
-- The :ref:`algm-SpecularReflectionPositionCorrect` algorithm has a new property, ``DetectorCorrectionType``,
+- The :ref:`algm-SpecularReflectionPositionCorrect` algorithm has a new property, ``DetectorCorrectionType``, 
   which specifies whether detector positions should be corrected by a vertical  shift (default) or by a rotation around the sample position.
 - :ref:`algm-ReflectometryReductionOneAuto-v2` and :ref:`algm-CreateTransmissionWorkspaceAuto-v2` attempts to populate properties `StartOverlap` and `EndOverlap` with values from the IDF.
 - :ref:`algm-GroupDetectors-v2` peforms a more resilient validation of grouping pattern that is less likely to throw an exception.
+- :ref:`algm-ReflectometryReductionOneAuto-v2` - fixed a bug where processing instructions were not applied correctly to the specified transmission run.
+- :ref:`algm-ReflectometryReductionOne-v2` and :ref:`algm-ReflectometryReductionOneAuto-v2` have a new property, ``SummationType``, which specifies whether summation should be done in wavelength (default) or in Q. For summation in Q, there is an additional new property, ``ReductionType``, which should be used to specify whether the reduction is for a divergent beam or non-flat sample.
 
 ConvertToReflectometryQ
 -----------------------
@@ -39,14 +41,15 @@ ISIS Reflectometry
   - Ctrl+X copies the selected row(s) to the clipboard and deletes them.
 
 - A brief description about the columns in the table can be now accessed by using the *What's this* tool (last tool in the toolbar) and clicking on the column headers.
-- Added two more time slicing options in the 'Event Handling' tab for analysing event data - Uniform Even and Uniform slicing.
-- For custom slicing (and new slicing options), workspace slices are now identified by an index (e.g. ws_slice_0) instead of a start/stop time.
+- Added three more time slicing options in the 'Event Handling' tab for analysing event data - Uniform Even, Uniform and Log Value slicing.
+- For custom slicing (and new slicing options), workspace slices are now identified by an index (e.g. ws_slice_0) instead of a start/stop value.
 - The 'Get Defaults` button for 'Experiment Settings' in the 'Settings' tab now populates `StartOverlap` and `EndOverlap` text boxes with values from the IDF.
 
 ISIS Reflectometry (Old)
 ########################
 
 - Interface `ISIS Reflectometry` has been renamed to `ISIS Reflectometry (Old)`.
+- Fixed a bug where the stitched output was not scaled correctly.
 
 |
 
diff --git a/docs/source/release/v3.10.0/sans.rst b/docs/source/release/v3.10.0/sans.rst
index ac6e98c873e7093c56f88a353d3d7c20dab02e3f..b0a24fc8abfa868d60d02d639f98f65d166b5d31 100644
--- a/docs/source/release/v3.10.0/sans.rst
+++ b/docs/source/release/v3.10.0/sans.rst
@@ -5,6 +5,18 @@ SANS Changes
 .. contents:: Table of Contents
    :local:
 
+
+ISIS SANS
+---------
+
+A new reduction backend was added. It improves considerably on performance, robustness and maintainability. Currently the new reduction backend can be used via a Python API. See `here <https://www.mantidproject.org/Scripting_SANS_Reductions_With_The_New_Reduction_Backend>`_  for more details on how to use the new reduction backend via this API.
+
+One of the most noteable improvements of this new reduction backend is an enhanced performance which is most prominent when running batch reductions.
+
+.. figure::  ../../images/SANSNewReductionBackendPerformance.png
+   :align: center
+
+
 Bug Fixes
 ---------
 - Fixed wrong first spectrum number for LARMOR. The first non-monitor spectrum number is 11, but it had been set to 10.
@@ -13,5 +25,7 @@ Bug Fixes
 - Fixed LOQ Batch mode bug where custom user file without a .txt ending was not being picked up.
 - Fixed Batch mode bug where the output name suffix was hardcoded to SANS2D. It now takes the individual instruments into account.
 - Fixed LOQ bug where prompt peak was not set correctly for monitor normalisation.
+- Fixed Batch mode bug where merged reductions set in the GUI were not respected.
+- Fixed display of current IDF, which was not updated when operating in batch mode.
 
 `Full list of changes on github <http://github.com/mantidproject/mantid/pulls?q=is%3Apr+milestone%3A%22Release+3.10%22+is%3Amerged+label%3A%22Component%3A+SANS%22>`__
diff --git a/docs/source/release/v3.10.0/ui.rst b/docs/source/release/v3.10.0/ui.rst
index f700a2410ee822b2e500cc961b725b836db558f1..97e0d2a26cfc1285a5e4989a6487d126a12b11e2 100644
--- a/docs/source/release/v3.10.0/ui.rst
+++ b/docs/source/release/v3.10.0/ui.rst
@@ -11,8 +11,7 @@ Installation
 Windows
 #######
 
-OS X
-####
+- At RAL administrative 03 accounts have been introduced.  As before Mantid does not need administrator access to be installed into a windows PC, so you do not need to use your 03 account.  Just use your normal account to install Mantid.  The trade off here is that it will only install desktop icons in the account you use to install it, if you need them in another account you can run the installer again from that account.
 
 User Interface
 --------------
@@ -22,19 +21,42 @@ User Interface
 - A new Print button has been added to the MantidPlot help window.
 - Masked bins are greyed out in the table view of the workspaces (except for EventWorkspaces):
 
-.. figure:: ../../images/maskedbins.jpg  
+.. figure:: ../../images/maskedbins.jpg     
+   :class: screenshot
+   :width: 500px
+   :align: right
 
 Instrument View
 ###############
- - Added the ability to visualise peaks generated by :ref:`algm-PredictPeaks` which fall off detectors.
- - Added the ability to zoom out on an unwrapped view.
- - Fixed a bug preventing the some of the banks from being visible when using a U correction.
- - Fixed a bug where pressing delete would delete a workspace even when the dock was not focused.
- - Fixed a bug where the user would not be prompted before deleting workspaces even if confirmations were turned on.
+
+- Added the ability to visualise peaks generated by :ref:`algm-PredictPeaks` which fall off detectors.
+- Added the ability to zoom out on an unwrapped view.
+- Fixed a bug preventing the some of the banks from being visible when using a U correction.
+- Fixed a bug where pressing delete would delete a workspace even when the dock was not focused.
+- Fixed a bug where the user would not be prompted before deleting workspaces even if confirmations were turned on.
 
 Plotting Improvements
 #####################
 
+- Surface, Contour, Waterfall, 1D and Tiled plotting of workspaces are now available from one dialog box (Plot Advanced) got from right-click menu of a workspace selection.
+- The log value facilities for Surface and Contour plot are now available for Waterfall and 1D plots, where they appear in the legend.
+
+.. figure:: ../../images/ArtRightGUIWaterfallCustom2sp1.PNG
+   :class: screenshot
+   :width: 294px
+   :align: right
+
+Here are a couple of plots with "Temp" selected as the log:
+
+.. figure:: ../../images/ArtWaterfallT1.PNG
+
+.. figure:: ../../images/ArtSurfacePlotT1.PNG
+
+More details `here <https://www.mantidproject.org/MBC_Displaying_data_in_multiple_workspaces>`_ .
+
+- Curves where all(Y) <= 0 are now not plotted when the Y-scale is set to logarithmic.
+  The previous behaviour assigned an arbitrary value of 0.1 which was confusing.
+
 Algorithm Toolbox
 #################
 
@@ -48,8 +70,22 @@ Scripting Window
 ################
 - Fixed a bug where Mantid would crash when trying to select the font for the script window
 
-Documentation
-#############
+SliceViewer Improvements
+########################
+- Fixed a bug where the rebin button was toggled when the user switch axes.
+- Changed zoom level on peak. Now when zooming onto a spherical or ellipsoidal peak, the entire peak is visible when using the default window size.
+- Fixed a bug where swapping the dimensions did not rebin the workspace despite having autorebin enabled.
+- Fixed a bug where swapping the dimensions did not draw the axis scale correctly.
+
+
+VSI Improvments
+###############
+- ParaView was updated to to `v5.3.0 <https://blog.kitware.com/paraview-5-3-0-release-notes/>`_.
+- The mapped array vtkMDHWSignalArray has been refactored to use the new vtkGenericDataArray class template. This interface minimizes virtual indirection and allows advanced compiler optimizations such as vectorization.
+- Minimize the number of times the workspace min and max values are calculated.
+- Threshold filter now reports progress to the user.
+- Add option to automatically choose a contrasting color for axes grid and colorbar.
+- Camera toolbar snaps to views along crystallographic axes,
 
 Custom Interfaces
 #################
@@ -63,21 +99,9 @@ Bugs Resolved
 
 - Fixed an issue in the Script Window that caused the Convert Tabs to Spaces and vice versa operations to corrupt the script.
 - Fixed an issue where some graphs not associated with a workspace would not be shown in the project save as view.
+- Fixed an issue where the Spectrum Viewer could crash when a workspace contained infinities.
+- Fixed an issue where contour lines were displayed at the wrong location.
 
-SliceViewer Improvements
-------------------------
-- Fixed a bug where the rebin button was toggled when the user switch axes.
-- Changed zoom level on peak. Now when zooming onto a spherical or ellipsoidal peak, the entire peak is visible when using the default window size.
-- Fixed a bug where swapping the dimensions did not rebin the workspace despite having autorebin enabled.
-
-VSI Improvments
----------------
-- ParaView was updated to to `v5.3.0 <https://blog.kitware.com/paraview-5-3-0-release-notes/>`_.
-- The mapped array vtkMDHWSignalArray has been refactored to use the new vtkGenericDataArray class template. This interface minimizes virtual indirection and allows advanced compiler optimizations such as vectorization.
-- Minimize the number of times the workspace min and max values are calculated.
-- Threshold filter now reports progress to the user.
-- Add option to automatically choose a contrasting color for axes grid and colorbar.
-- Camera toolbar snaps to views along crystallographic axes,
 
 |
 
diff --git a/instrument/D1B_Definition.xml b/instrument/D1B_Definition.xml
new file mode 100644
index 0000000000000000000000000000000000000000..d08bf7645808961197b0ec7017a3ebe7f5081b2b
--- /dev/null
+++ b/instrument/D1B_Definition.xml
@@ -0,0 +1,3910 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- For help on the notation used to specify an
+    Instrument Definition File see http://www.mantidproject.org/IDF
+    -->
+<instrument xmlns="http://www.mantidproject.org/IDF/1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 Schema/IDFSchema.xsd" name="D1B" valid-from="1900-01-31 23:59:59"
+valid-to="2100-01-31 23:59:59" last-modified="2017-02-27 17:25:52">
+  <!-- Author: vardanyan@ill.fr -->
+  <defaults>
+    <length unit="meter" />
+    <angle unit="degree" />
+    <reference-frame>
+      <!-- The z-axis is set parallel to and in the direction of the beam.
+        the y-axis points up and the coordinate system is right handed. -->
+      <along-beam axis="z" />
+      <pointing-up axis="y" />
+      <handedness val="right" />
+    </reference-frame>
+  </defaults>
+  <!-- Source position -->
+  <component type="monochromator">
+    <location z="-2.986" />
+  </component>
+  <type name="monochromator" is="Source">
+    <properties />
+  </type>
+  <!-- Monitor position -->
+  <component type="monitor" idlist="monitors">
+    <location z="-0.476" name="monitor" />
+  </component>
+  <type name="monitor" is="monitor">
+    <cuboid id="shape">
+      <left-front-bottom-point x="-0.005" y="-0.005" z="-0.005" />
+      <left-front-top-point x="-0.005" y="0.005" z="-0.005" />
+      <left-back-bottom-point x="-0.005" y="-0.005" z="0.005" />
+      <right-front-bottom-point x="0.005" y="-0.005" z="-0.005" />
+    </cuboid>
+    <algebra val="shape" />
+  </type>
+  <idlist idname="monitors">
+    <id val="0" />
+  </idlist>
+  <!-- Sample position -->
+  <component type="sample-position">
+    <location y="0.0" x="0.0" z="0.0" />
+  </component>
+  <type name="sample-position" is="SamplePos" />
+  <!-- Detector IDs -->
+  <idlist idname="detectors">
+    <id start="1" end="1280" />
+  </idlist>
+  <!-- Detector list def -->
+  <component type="detector" idlist="detectors">
+    <location name="detector" />
+  </component>
+  <!-- Detector Cells -->
+  <type name="detector">
+    <component type="cell">
+      <location name="cell_1" r="1.5" t="-0.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_2" r="1.5" t="-0.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_3" r="1.5" t="-1.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_4" r="1.5" t="-1.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_5" r="1.5" t="-1.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_6" r="1.5" t="-1.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_7" r="1.5" t="-1.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_8" r="1.5" t="-1.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_9" r="1.5" t="-1.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_10" r="1.5" t="-1.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_11" r="1.5" t="-1.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_12" r="1.5" t="-1.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_13" r="1.5" t="-2.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_14" r="1.5" t="-2.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_15" r="1.5" t="-2.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_16" r="1.5" t="-2.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_17" r="1.5" t="-2.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_18" r="1.5" t="-2.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_19" r="1.5" t="-2.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_20" r="1.5" t="-2.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_21" r="1.5" t="-2.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_22" r="1.5" t="-2.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_23" r="1.5" t="-3.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_24" r="1.5" t="-3.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_25" r="1.5" t="-3.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_26" r="1.5" t="-3.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_27" r="1.5" t="-3.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_28" r="1.5" t="-3.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_29" r="1.5" t="-3.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_30" r="1.5" t="-3.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_31" r="1.5" t="-3.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_32" r="1.5" t="-3.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_33" r="1.5" t="-4.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_34" r="1.5" t="-4.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_35" r="1.5" t="-4.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_36" r="1.5" t="-4.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_37" r="1.5" t="-4.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_38" r="1.5" t="-4.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_39" r="1.5" t="-4.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_40" r="1.5" t="-4.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_41" r="1.5" t="-4.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_42" r="1.5" t="-4.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_43" r="1.5" t="-5.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_44" r="1.5" t="-5.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_45" r="1.5" t="-5.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_46" r="1.5" t="-5.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_47" r="1.5" t="-5.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_48" r="1.5" t="-5.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_49" r="1.5" t="-5.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_50" r="1.5" t="-5.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_51" r="1.5" t="-5.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_52" r="1.5" t="-5.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_53" r="1.5" t="-6.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_54" r="1.5" t="-6.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_55" r="1.5" t="-6.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_56" r="1.5" t="-6.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_57" r="1.5" t="-6.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_58" r="1.5" t="-6.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_59" r="1.5" t="-6.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_60" r="1.5" t="-6.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_61" r="1.5" t="-6.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_62" r="1.5" t="-6.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_63" r="1.5" t="-7.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_64" r="1.5" t="-7.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_65" r="1.5" t="-7.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_66" r="1.5" t="-7.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_67" r="1.5" t="-7.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_68" r="1.5" t="-7.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_69" r="1.5" t="-7.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_70" r="1.5" t="-7.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_71" r="1.5" t="-7.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_72" r="1.5" t="-7.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_73" r="1.5" t="-8.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_74" r="1.5" t="-8.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_75" r="1.5" t="-8.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_76" r="1.5" t="-8.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_77" r="1.5" t="-8.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_78" r="1.5" t="-8.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_79" r="1.5" t="-8.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_80" r="1.5" t="-8.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_81" r="1.5" t="-8.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_82" r="1.5" t="-8.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_83" r="1.5" t="-9.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_84" r="1.5" t="-9.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_85" r="1.5" t="-9.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_86" r="1.5" t="-9.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_87" r="1.5" t="-9.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_88" r="1.5" t="-9.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_89" r="1.5" t="-9.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_90" r="1.5" t="-9.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_91" r="1.5" t="-9.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_92" r="1.5" t="-9.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_93" r="1.5" t="-10.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_94" r="1.5" t="-10.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_95" r="1.5" t="-10.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_96" r="1.5" t="-10.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_97" r="1.5" t="-10.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_98" r="1.5" t="-10.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_99" r="1.5" t="-10.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_100" r="1.5" t="-10.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_101" r="1.5" t="-10.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_102" r="1.5" t="-10.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_103" r="1.5" t="-11.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_104" r="1.5" t="-11.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_105" r="1.5" t="-11.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_106" r="1.5" t="-11.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_107" r="1.5" t="-11.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_108" r="1.5" t="-11.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_109" r="1.5" t="-11.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_110" r="1.5" t="-11.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_111" r="1.5" t="-11.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_112" r="1.5" t="-11.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_113" r="1.5" t="-12.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_114" r="1.5" t="-12.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_115" r="1.5" t="-12.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_116" r="1.5" t="-12.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_117" r="1.5" t="-12.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_118" r="1.5" t="-12.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_119" r="1.5" t="-12.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_120" r="1.5" t="-12.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_121" r="1.5" t="-12.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_122" r="1.5" t="-12.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_123" r="1.5" t="-13.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_124" r="1.5" t="-13.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_125" r="1.5" t="-13.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_126" r="1.5" t="-13.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_127" r="1.5" t="-13.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_128" r="1.5" t="-13.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_129" r="1.5" t="-13.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_130" r="1.5" t="-13.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_131" r="1.5" t="-13.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_132" r="1.5" t="-13.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_133" r="1.5" t="-14.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_134" r="1.5" t="-14.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_135" r="1.5" t="-14.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_136" r="1.5" t="-14.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_137" r="1.5" t="-14.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_138" r="1.5" t="-14.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_139" r="1.5" t="-14.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_140" r="1.5" t="-14.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_141" r="1.5" t="-14.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_142" r="1.5" t="-14.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_143" r="1.5" t="-15.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_144" r="1.5" t="-15.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_145" r="1.5" t="-15.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_146" r="1.5" t="-15.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_147" r="1.5" t="-15.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_148" r="1.5" t="-15.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_149" r="1.5" t="-15.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_150" r="1.5" t="-15.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_151" r="1.5" t="-15.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_152" r="1.5" t="-15.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_153" r="1.5" t="-16.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_154" r="1.5" t="-16.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_155" r="1.5" t="-16.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_156" r="1.5" t="-16.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_157" r="1.5" t="-16.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_158" r="1.5" t="-16.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_159" r="1.5" t="-16.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_160" r="1.5" t="-16.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_161" r="1.5" t="-16.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_162" r="1.5" t="-16.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_163" r="1.5" t="-17.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_164" r="1.5" t="-17.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_165" r="1.5" t="-17.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_166" r="1.5" t="-17.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_167" r="1.5" t="-17.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_168" r="1.5" t="-17.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_169" r="1.5" t="-17.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_170" r="1.5" t="-17.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_171" r="1.5" t="-17.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_172" r="1.5" t="-17.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_173" r="1.5" t="-18.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_174" r="1.5" t="-18.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_175" r="1.5" t="-18.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_176" r="1.5" t="-18.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_177" r="1.5" t="-18.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_178" r="1.5" t="-18.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_179" r="1.5" t="-18.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_180" r="1.5" t="-18.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_181" r="1.5" t="-18.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_182" r="1.5" t="-18.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_183" r="1.5" t="-19.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_184" r="1.5" t="-19.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_185" r="1.5" t="-19.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_186" r="1.5" t="-19.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_187" r="1.5" t="-19.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_188" r="1.5" t="-19.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_189" r="1.5" t="-19.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_190" r="1.5" t="-19.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_191" r="1.5" t="-19.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_192" r="1.5" t="-19.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_193" r="1.5" t="-20.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_194" r="1.5" t="-20.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_195" r="1.5" t="-20.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_196" r="1.5" t="-20.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_197" r="1.5" t="-20.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_198" r="1.5" t="-20.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_199" r="1.5" t="-20.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_200" r="1.5" t="-20.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_201" r="1.5" t="-20.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_202" r="1.5" t="-20.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_203" r="1.5" t="-21.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_204" r="1.5" t="-21.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_205" r="1.5" t="-21.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_206" r="1.5" t="-21.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_207" r="1.5" t="-21.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_208" r="1.5" t="-21.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_209" r="1.5" t="-21.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_210" r="1.5" t="-21.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_211" r="1.5" t="-21.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_212" r="1.5" t="-21.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_213" r="1.5" t="-22.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_214" r="1.5" t="-22.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_215" r="1.5" t="-22.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_216" r="1.5" t="-22.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_217" r="1.5" t="-22.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_218" r="1.5" t="-22.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_219" r="1.5" t="-22.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_220" r="1.5" t="-22.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_221" r="1.5" t="-22.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_222" r="1.5" t="-22.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_223" r="1.5" t="-23.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_224" r="1.5" t="-23.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_225" r="1.5" t="-23.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_226" r="1.5" t="-23.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_227" r="1.5" t="-23.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_228" r="1.5" t="-23.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_229" r="1.5" t="-23.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_230" r="1.5" t="-23.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_231" r="1.5" t="-23.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_232" r="1.5" t="-23.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_233" r="1.5" t="-24.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_234" r="1.5" t="-24.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_235" r="1.5" t="-24.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_236" r="1.5" t="-24.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_237" r="1.5" t="-24.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_238" r="1.5" t="-24.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_239" r="1.5" t="-24.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_240" r="1.5" t="-24.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_241" r="1.5" t="-24.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_242" r="1.5" t="-24.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_243" r="1.5" t="-25.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_244" r="1.5" t="-25.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_245" r="1.5" t="-25.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_246" r="1.5" t="-25.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_247" r="1.5" t="-25.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_248" r="1.5" t="-25.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_249" r="1.5" t="-25.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_250" r="1.5" t="-25.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_251" r="1.5" t="-25.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_252" r="1.5" t="-25.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_253" r="1.5" t="-26.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_254" r="1.5" t="-26.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_255" r="1.5" t="-26.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_256" r="1.5" t="-26.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_257" r="1.5" t="-26.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_258" r="1.5" t="-26.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_259" r="1.5" t="-26.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_260" r="1.5" t="-26.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_261" r="1.5" t="-26.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_262" r="1.5" t="-26.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_263" r="1.5" t="-27.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_264" r="1.5" t="-27.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_265" r="1.5" t="-27.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_266" r="1.5" t="-27.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_267" r="1.5" t="-27.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_268" r="1.5" t="-27.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_269" r="1.5" t="-27.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_270" r="1.5" t="-27.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_271" r="1.5" t="-27.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_272" r="1.5" t="-27.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_273" r="1.5" t="-28.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_274" r="1.5" t="-28.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_275" r="1.5" t="-28.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_276" r="1.5" t="-28.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_277" r="1.5" t="-28.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_278" r="1.5" t="-28.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_279" r="1.5" t="-28.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_280" r="1.5" t="-28.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_281" r="1.5" t="-28.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_282" r="1.5" t="-28.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_283" r="1.5" t="-29.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_284" r="1.5" t="-29.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_285" r="1.5" t="-29.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_286" r="1.5" t="-29.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_287" r="1.5" t="-29.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_288" r="1.5" t="-29.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_289" r="1.5" t="-29.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_290" r="1.5" t="-29.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_291" r="1.5" t="-29.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_292" r="1.5" t="-29.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_293" r="1.5" t="-30.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_294" r="1.5" t="-30.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_295" r="1.5" t="-30.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_296" r="1.5" t="-30.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_297" r="1.5" t="-30.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_298" r="1.5" t="-30.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_299" r="1.5" t="-30.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_300" r="1.5" t="-30.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_301" r="1.5" t="-30.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_302" r="1.5" t="-30.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_303" r="1.5" t="-31.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_304" r="1.5" t="-31.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_305" r="1.5" t="-31.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_306" r="1.5" t="-31.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_307" r="1.5" t="-31.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_308" r="1.5" t="-31.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_309" r="1.5" t="-31.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_310" r="1.5" t="-31.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_311" r="1.5" t="-31.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_312" r="1.5" t="-31.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_313" r="1.5" t="-32.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_314" r="1.5" t="-32.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_315" r="1.5" t="-32.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_316" r="1.5" t="-32.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_317" r="1.5" t="-32.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_318" r="1.5" t="-32.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_319" r="1.5" t="-32.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_320" r="1.5" t="-32.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_321" r="1.5" t="-32.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_322" r="1.5" t="-32.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_323" r="1.5" t="-33.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_324" r="1.5" t="-33.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_325" r="1.5" t="-33.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_326" r="1.5" t="-33.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_327" r="1.5" t="-33.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_328" r="1.5" t="-33.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_329" r="1.5" t="-33.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_330" r="1.5" t="-33.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_331" r="1.5" t="-33.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_332" r="1.5" t="-33.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_333" r="1.5" t="-34.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_334" r="1.5" t="-34.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_335" r="1.5" t="-34.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_336" r="1.5" t="-34.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_337" r="1.5" t="-34.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_338" r="1.5" t="-34.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_339" r="1.5" t="-34.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_340" r="1.5" t="-34.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_341" r="1.5" t="-34.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_342" r="1.5" t="-34.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_343" r="1.5" t="-35.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_344" r="1.5" t="-35.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_345" r="1.5" t="-35.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_346" r="1.5" t="-35.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_347" r="1.5" t="-35.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_348" r="1.5" t="-35.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_349" r="1.5" t="-35.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_350" r="1.5" t="-35.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_351" r="1.5" t="-35.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_352" r="1.5" t="-35.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_353" r="1.5" t="-36.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_354" r="1.5" t="-36.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_355" r="1.5" t="-36.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_356" r="1.5" t="-36.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_357" r="1.5" t="-36.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_358" r="1.5" t="-36.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_359" r="1.5" t="-36.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_360" r="1.5" t="-36.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_361" r="1.5" t="-36.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_362" r="1.5" t="-36.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_363" r="1.5" t="-37.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_364" r="1.5" t="-37.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_365" r="1.5" t="-37.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_366" r="1.5" t="-37.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_367" r="1.5" t="-37.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_368" r="1.5" t="-37.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_369" r="1.5" t="-37.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_370" r="1.5" t="-37.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_371" r="1.5" t="-37.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_372" r="1.5" t="-37.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_373" r="1.5" t="-38.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_374" r="1.5" t="-38.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_375" r="1.5" t="-38.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_376" r="1.5" t="-38.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_377" r="1.5" t="-38.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_378" r="1.5" t="-38.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_379" r="1.5" t="-38.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_380" r="1.5" t="-38.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_381" r="1.5" t="-38.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_382" r="1.5" t="-38.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_383" r="1.5" t="-39.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_384" r="1.5" t="-39.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_385" r="1.5" t="-39.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_386" r="1.5" t="-39.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_387" r="1.5" t="-39.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_388" r="1.5" t="-39.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_389" r="1.5" t="-39.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_390" r="1.5" t="-39.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_391" r="1.5" t="-39.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_392" r="1.5" t="-39.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_393" r="1.5" t="-40.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_394" r="1.5" t="-40.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_395" r="1.5" t="-40.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_396" r="1.5" t="-40.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_397" r="1.5" t="-40.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_398" r="1.5" t="-40.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_399" r="1.5" t="-40.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_400" r="1.5" t="-40.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_401" r="1.5" t="-40.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_402" r="1.5" t="-40.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_403" r="1.5" t="-41.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_404" r="1.5" t="-41.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_405" r="1.5" t="-41.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_406" r="1.5" t="-41.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_407" r="1.5" t="-41.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_408" r="1.5" t="-41.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_409" r="1.5" t="-41.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_410" r="1.5" t="-41.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_411" r="1.5" t="-41.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_412" r="1.5" t="-41.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_413" r="1.5" t="-42.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_414" r="1.5" t="-42.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_415" r="1.5" t="-42.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_416" r="1.5" t="-42.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_417" r="1.5" t="-42.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_418" r="1.5" t="-42.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_419" r="1.5" t="-42.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_420" r="1.5" t="-42.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_421" r="1.5" t="-42.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_422" r="1.5" t="-42.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_423" r="1.5" t="-43.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_424" r="1.5" t="-43.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_425" r="1.5" t="-43.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_426" r="1.5" t="-43.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_427" r="1.5" t="-43.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_428" r="1.5" t="-43.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_429" r="1.5" t="-43.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_430" r="1.5" t="-43.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_431" r="1.5" t="-43.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_432" r="1.5" t="-43.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_433" r="1.5" t="-44.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_434" r="1.5" t="-44.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_435" r="1.5" t="-44.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_436" r="1.5" t="-44.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_437" r="1.5" t="-44.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_438" r="1.5" t="-44.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_439" r="1.5" t="-44.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_440" r="1.5" t="-44.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_441" r="1.5" t="-44.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_442" r="1.5" t="-44.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_443" r="1.5" t="-45.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_444" r="1.5" t="-45.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_445" r="1.5" t="-45.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_446" r="1.5" t="-45.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_447" r="1.5" t="-45.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_448" r="1.5" t="-45.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_449" r="1.5" t="-45.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_450" r="1.5" t="-45.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_451" r="1.5" t="-45.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_452" r="1.5" t="-45.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_453" r="1.5" t="-46.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_454" r="1.5" t="-46.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_455" r="1.5" t="-46.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_456" r="1.5" t="-46.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_457" r="1.5" t="-46.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_458" r="1.5" t="-46.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_459" r="1.5" t="-46.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_460" r="1.5" t="-46.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_461" r="1.5" t="-46.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_462" r="1.5" t="-46.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_463" r="1.5" t="-47.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_464" r="1.5" t="-47.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_465" r="1.5" t="-47.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_466" r="1.5" t="-47.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_467" r="1.5" t="-47.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_468" r="1.5" t="-47.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_469" r="1.5" t="-47.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_470" r="1.5" t="-47.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_471" r="1.5" t="-47.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_472" r="1.5" t="-47.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_473" r="1.5" t="-48.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_474" r="1.5" t="-48.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_475" r="1.5" t="-48.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_476" r="1.5" t="-48.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_477" r="1.5" t="-48.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_478" r="1.5" t="-48.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_479" r="1.5" t="-48.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_480" r="1.5" t="-48.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_481" r="1.5" t="-48.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_482" r="1.5" t="-48.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_483" r="1.5" t="-49.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_484" r="1.5" t="-49.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_485" r="1.5" t="-49.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_486" r="1.5" t="-49.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_487" r="1.5" t="-49.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_488" r="1.5" t="-49.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_489" r="1.5" t="-49.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_490" r="1.5" t="-49.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_491" r="1.5" t="-49.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_492" r="1.5" t="-49.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_493" r="1.5" t="-50.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_494" r="1.5" t="-50.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_495" r="1.5" t="-50.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_496" r="1.5" t="-50.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_497" r="1.5" t="-50.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_498" r="1.5" t="-50.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_499" r="1.5" t="-50.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_500" r="1.5" t="-50.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_501" r="1.5" t="-50.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_502" r="1.5" t="-50.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_503" r="1.5" t="-51.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_504" r="1.5" t="-51.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_505" r="1.5" t="-51.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_506" r="1.5" t="-51.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_507" r="1.5" t="-51.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_508" r="1.5" t="-51.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_509" r="1.5" t="-51.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_510" r="1.5" t="-51.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_511" r="1.5" t="-51.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_512" r="1.5" t="-51.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_513" r="1.5" t="-52.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_514" r="1.5" t="-52.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_515" r="1.5" t="-52.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_516" r="1.5" t="-52.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_517" r="1.5" t="-52.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_518" r="1.5" t="-52.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_519" r="1.5" t="-52.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_520" r="1.5" t="-52.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_521" r="1.5" t="-52.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_522" r="1.5" t="-52.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_523" r="1.5" t="-53.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_524" r="1.5" t="-53.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_525" r="1.5" t="-53.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_526" r="1.5" t="-53.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_527" r="1.5" t="-53.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_528" r="1.5" t="-53.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_529" r="1.5" t="-53.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_530" r="1.5" t="-53.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_531" r="1.5" t="-53.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_532" r="1.5" t="-53.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_533" r="1.5" t="-54.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_534" r="1.5" t="-54.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_535" r="1.5" t="-54.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_536" r="1.5" t="-54.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_537" r="1.5" t="-54.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_538" r="1.5" t="-54.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_539" r="1.5" t="-54.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_540" r="1.5" t="-54.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_541" r="1.5" t="-54.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_542" r="1.5" t="-54.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_543" r="1.5" t="-55.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_544" r="1.5" t="-55.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_545" r="1.5" t="-55.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_546" r="1.5" t="-55.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_547" r="1.5" t="-55.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_548" r="1.5" t="-55.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_549" r="1.5" t="-55.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_550" r="1.5" t="-55.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_551" r="1.5" t="-55.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_552" r="1.5" t="-55.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_553" r="1.5" t="-56.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_554" r="1.5" t="-56.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_555" r="1.5" t="-56.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_556" r="1.5" t="-56.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_557" r="1.5" t="-56.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_558" r="1.5" t="-56.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_559" r="1.5" t="-56.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_560" r="1.5" t="-56.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_561" r="1.5" t="-56.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_562" r="1.5" t="-56.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_563" r="1.5" t="-57.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_564" r="1.5" t="-57.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_565" r="1.5" t="-57.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_566" r="1.5" t="-57.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_567" r="1.5" t="-57.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_568" r="1.5" t="-57.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_569" r="1.5" t="-57.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_570" r="1.5" t="-57.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_571" r="1.5" t="-57.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_572" r="1.5" t="-57.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_573" r="1.5" t="-58.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_574" r="1.5" t="-58.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_575" r="1.5" t="-58.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_576" r="1.5" t="-58.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_577" r="1.5" t="-58.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_578" r="1.5" t="-58.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_579" r="1.5" t="-58.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_580" r="1.5" t="-58.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_581" r="1.5" t="-58.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_582" r="1.5" t="-58.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_583" r="1.5" t="-59.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_584" r="1.5" t="-59.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_585" r="1.5" t="-59.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_586" r="1.5" t="-59.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_587" r="1.5" t="-59.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_588" r="1.5" t="-59.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_589" r="1.5" t="-59.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_590" r="1.5" t="-59.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_591" r="1.5" t="-59.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_592" r="1.5" t="-59.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_593" r="1.5" t="-60.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_594" r="1.5" t="-60.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_595" r="1.5" t="-60.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_596" r="1.5" t="-60.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_597" r="1.5" t="-60.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_598" r="1.5" t="-60.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_599" r="1.5" t="-60.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_600" r="1.5" t="-60.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_601" r="1.5" t="-60.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_602" r="1.5" t="-60.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_603" r="1.5" t="-61.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_604" r="1.5" t="-61.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_605" r="1.5" t="-61.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_606" r="1.5" t="-61.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_607" r="1.5" t="-61.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_608" r="1.5" t="-61.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_609" r="1.5" t="-61.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_610" r="1.5" t="-61.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_611" r="1.5" t="-61.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_612" r="1.5" t="-61.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_613" r="1.5" t="-62.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_614" r="1.5" t="-62.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_615" r="1.5" t="-62.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_616" r="1.5" t="-62.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_617" r="1.5" t="-62.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_618" r="1.5" t="-62.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_619" r="1.5" t="-62.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_620" r="1.5" t="-62.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_621" r="1.5" t="-62.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_622" r="1.5" t="-62.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_623" r="1.5" t="-63.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_624" r="1.5" t="-63.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_625" r="1.5" t="-63.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_626" r="1.5" t="-63.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_627" r="1.5" t="-63.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_628" r="1.5" t="-63.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_629" r="1.5" t="-63.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_630" r="1.5" t="-63.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_631" r="1.5" t="-63.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_632" r="1.5" t="-63.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_633" r="1.5" t="-64.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_634" r="1.5" t="-64.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_635" r="1.5" t="-64.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_636" r="1.5" t="-64.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_637" r="1.5" t="-64.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_638" r="1.5" t="-64.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_639" r="1.5" t="-64.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_640" r="1.5" t="-64.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_641" r="1.5" t="-64.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_642" r="1.5" t="-64.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_643" r="1.5" t="-65.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_644" r="1.5" t="-65.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_645" r="1.5" t="-65.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_646" r="1.5" t="-65.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_647" r="1.5" t="-65.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_648" r="1.5" t="-65.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_649" r="1.5" t="-65.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_650" r="1.5" t="-65.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_651" r="1.5" t="-65.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_652" r="1.5" t="-65.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_653" r="1.5" t="-66.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_654" r="1.5" t="-66.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_655" r="1.5" t="-66.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_656" r="1.5" t="-66.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_657" r="1.5" t="-66.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_658" r="1.5" t="-66.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_659" r="1.5" t="-66.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_660" r="1.5" t="-66.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_661" r="1.5" t="-66.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_662" r="1.5" t="-66.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_663" r="1.5" t="-67.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_664" r="1.5" t="-67.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_665" r="1.5" t="-67.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_666" r="1.5" t="-67.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_667" r="1.5" t="-67.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_668" r="1.5" t="-67.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_669" r="1.5" t="-67.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_670" r="1.5" t="-67.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_671" r="1.5" t="-67.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_672" r="1.5" t="-67.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_673" r="1.5" t="-68.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_674" r="1.5" t="-68.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_675" r="1.5" t="-68.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_676" r="1.5" t="-68.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_677" r="1.5" t="-68.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_678" r="1.5" t="-68.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_679" r="1.5" t="-68.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_680" r="1.5" t="-68.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_681" r="1.5" t="-68.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_682" r="1.5" t="-68.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_683" r="1.5" t="-69.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_684" r="1.5" t="-69.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_685" r="1.5" t="-69.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_686" r="1.5" t="-69.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_687" r="1.5" t="-69.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_688" r="1.5" t="-69.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_689" r="1.5" t="-69.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_690" r="1.5" t="-69.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_691" r="1.5" t="-69.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_692" r="1.5" t="-69.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_693" r="1.5" t="-70.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_694" r="1.5" t="-70.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_695" r="1.5" t="-70.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_696" r="1.5" t="-70.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_697" r="1.5" t="-70.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_698" r="1.5" t="-70.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_699" r="1.5" t="-70.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_700" r="1.5" t="-70.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_701" r="1.5" t="-70.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_702" r="1.5" t="-70.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_703" r="1.5" t="-71.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_704" r="1.5" t="-71.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_705" r="1.5" t="-71.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_706" r="1.5" t="-71.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_707" r="1.5" t="-71.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_708" r="1.5" t="-71.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_709" r="1.5" t="-71.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_710" r="1.5" t="-71.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_711" r="1.5" t="-71.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_712" r="1.5" t="-71.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_713" r="1.5" t="-72.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_714" r="1.5" t="-72.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_715" r="1.5" t="-72.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_716" r="1.5" t="-72.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_717" r="1.5" t="-72.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_718" r="1.5" t="-72.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_719" r="1.5" t="-72.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_720" r="1.5" t="-72.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_721" r="1.5" t="-72.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_722" r="1.5" t="-72.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_723" r="1.5" t="-73.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_724" r="1.5" t="-73.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_725" r="1.5" t="-73.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_726" r="1.5" t="-73.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_727" r="1.5" t="-73.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_728" r="1.5" t="-73.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_729" r="1.5" t="-73.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_730" r="1.5" t="-73.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_731" r="1.5" t="-73.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_732" r="1.5" t="-73.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_733" r="1.5" t="-74.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_734" r="1.5" t="-74.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_735" r="1.5" t="-74.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_736" r="1.5" t="-74.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_737" r="1.5" t="-74.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_738" r="1.5" t="-74.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_739" r="1.5" t="-74.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_740" r="1.5" t="-74.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_741" r="1.5" t="-74.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_742" r="1.5" t="-74.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_743" r="1.5" t="-75.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_744" r="1.5" t="-75.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_745" r="1.5" t="-75.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_746" r="1.5" t="-75.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_747" r="1.5" t="-75.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_748" r="1.5" t="-75.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_749" r="1.5" t="-75.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_750" r="1.5" t="-75.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_751" r="1.5" t="-75.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_752" r="1.5" t="-75.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_753" r="1.5" t="-76.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_754" r="1.5" t="-76.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_755" r="1.5" t="-76.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_756" r="1.5" t="-76.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_757" r="1.5" t="-76.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_758" r="1.5" t="-76.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_759" r="1.5" t="-76.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_760" r="1.5" t="-76.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_761" r="1.5" t="-76.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_762" r="1.5" t="-76.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_763" r="1.5" t="-77.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_764" r="1.5" t="-77.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_765" r="1.5" t="-77.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_766" r="1.5" t="-77.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_767" r="1.5" t="-77.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_768" r="1.5" t="-77.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_769" r="1.5" t="-77.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_770" r="1.5" t="-77.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_771" r="1.5" t="-77.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_772" r="1.5" t="-77.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_773" r="1.5" t="-78.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_774" r="1.5" t="-78.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_775" r="1.5" t="-78.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_776" r="1.5" t="-78.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_777" r="1.5" t="-78.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_778" r="1.5" t="-78.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_779" r="1.5" t="-78.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_780" r="1.5" t="-78.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_781" r="1.5" t="-78.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_782" r="1.5" t="-78.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_783" r="1.5" t="-79.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_784" r="1.5" t="-79.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_785" r="1.5" t="-79.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_786" r="1.5" t="-79.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_787" r="1.5" t="-79.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_788" r="1.5" t="-79.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_789" r="1.5" t="-79.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_790" r="1.5" t="-79.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_791" r="1.5" t="-79.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_792" r="1.5" t="-79.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_793" r="1.5" t="-80.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_794" r="1.5" t="-80.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_795" r="1.5" t="-80.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_796" r="1.5" t="-80.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_797" r="1.5" t="-80.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_798" r="1.5" t="-80.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_799" r="1.5" t="-80.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_800" r="1.5" t="-80.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_801" r="1.5" t="-80.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_802" r="1.5" t="-80.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_803" r="1.5" t="-81.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_804" r="1.5" t="-81.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_805" r="1.5" t="-81.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_806" r="1.5" t="-81.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_807" r="1.5" t="-81.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_808" r="1.5" t="-81.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_809" r="1.5" t="-81.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_810" r="1.5" t="-81.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_811" r="1.5" t="-81.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_812" r="1.5" t="-81.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_813" r="1.5" t="-82.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_814" r="1.5" t="-82.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_815" r="1.5" t="-82.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_816" r="1.5" t="-82.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_817" r="1.5" t="-82.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_818" r="1.5" t="-82.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_819" r="1.5" t="-82.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_820" r="1.5" t="-82.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_821" r="1.5" t="-82.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_822" r="1.5" t="-82.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_823" r="1.5" t="-83.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_824" r="1.5" t="-83.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_825" r="1.5" t="-83.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_826" r="1.5" t="-83.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_827" r="1.5" t="-83.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_828" r="1.5" t="-83.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_829" r="1.5" t="-83.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_830" r="1.5" t="-83.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_831" r="1.5" t="-83.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_832" r="1.5" t="-83.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_833" r="1.5" t="-84.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_834" r="1.5" t="-84.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_835" r="1.5" t="-84.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_836" r="1.5" t="-84.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_837" r="1.5" t="-84.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_838" r="1.5" t="-84.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_839" r="1.5" t="-84.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_840" r="1.5" t="-84.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_841" r="1.5" t="-84.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_842" r="1.5" t="-84.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_843" r="1.5" t="-85.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_844" r="1.5" t="-85.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_845" r="1.5" t="-85.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_846" r="1.5" t="-85.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_847" r="1.5" t="-85.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_848" r="1.5" t="-85.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_849" r="1.5" t="-85.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_850" r="1.5" t="-85.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_851" r="1.5" t="-85.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_852" r="1.5" t="-85.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_853" r="1.5" t="-86.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_854" r="1.5" t="-86.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_855" r="1.5" t="-86.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_856" r="1.5" t="-86.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_857" r="1.5" t="-86.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_858" r="1.5" t="-86.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_859" r="1.5" t="-86.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_860" r="1.5" t="-86.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_861" r="1.5" t="-86.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_862" r="1.5" t="-86.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_863" r="1.5" t="-87.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_864" r="1.5" t="-87.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_865" r="1.5" t="-87.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_866" r="1.5" t="-87.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_867" r="1.5" t="-87.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_868" r="1.5" t="-87.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_869" r="1.5" t="-87.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_870" r="1.5" t="-87.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_871" r="1.5" t="-87.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_872" r="1.5" t="-87.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_873" r="1.5" t="-88.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_874" r="1.5" t="-88.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_875" r="1.5" t="-88.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_876" r="1.5" t="-88.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_877" r="1.5" t="-88.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_878" r="1.5" t="-88.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_879" r="1.5" t="-88.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_880" r="1.5" t="-88.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_881" r="1.5" t="-88.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_882" r="1.5" t="-88.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_883" r="1.5" t="-89.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_884" r="1.5" t="-89.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_885" r="1.5" t="-89.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_886" r="1.5" t="-89.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_887" r="1.5" t="-89.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_888" r="1.5" t="-89.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_889" r="1.5" t="-89.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_890" r="1.5" t="-89.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_891" r="1.5" t="-89.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_892" r="1.5" t="-89.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_893" r="1.5" t="-90.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_894" r="1.5" t="-90.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_895" r="1.5" t="-90.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_896" r="1.5" t="-90.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_897" r="1.5" t="-90.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_898" r="1.5" t="-90.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_899" r="1.5" t="-90.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_900" r="1.5" t="-90.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_901" r="1.5" t="-90.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_902" r="1.5" t="-90.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_903" r="1.5" t="-91.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_904" r="1.5" t="-91.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_905" r="1.5" t="-91.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_906" r="1.5" t="-91.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_907" r="1.5" t="-91.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_908" r="1.5" t="-91.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_909" r="1.5" t="-91.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_910" r="1.5" t="-91.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_911" r="1.5" t="-91.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_912" r="1.5" t="-91.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_913" r="1.5" t="-92.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_914" r="1.5" t="-92.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_915" r="1.5" t="-92.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_916" r="1.5" t="-92.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_917" r="1.5" t="-92.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_918" r="1.5" t="-92.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_919" r="1.5" t="-92.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_920" r="1.5" t="-92.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_921" r="1.5" t="-92.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_922" r="1.5" t="-92.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_923" r="1.5" t="-93.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_924" r="1.5" t="-93.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_925" r="1.5" t="-93.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_926" r="1.5" t="-93.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_927" r="1.5" t="-93.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_928" r="1.5" t="-93.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_929" r="1.5" t="-93.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_930" r="1.5" t="-93.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_931" r="1.5" t="-93.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_932" r="1.5" t="-93.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_933" r="1.5" t="-94.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_934" r="1.5" t="-94.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_935" r="1.5" t="-94.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_936" r="1.5" t="-94.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_937" r="1.5" t="-94.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_938" r="1.5" t="-94.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_939" r="1.5" t="-94.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_940" r="1.5" t="-94.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_941" r="1.5" t="-94.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_942" r="1.5" t="-94.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_943" r="1.5" t="-95.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_944" r="1.5" t="-95.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_945" r="1.5" t="-95.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_946" r="1.5" t="-95.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_947" r="1.5" t="-95.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_948" r="1.5" t="-95.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_949" r="1.5" t="-95.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_950" r="1.5" t="-95.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_951" r="1.5" t="-95.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_952" r="1.5" t="-95.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_953" r="1.5" t="-96.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_954" r="1.5" t="-96.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_955" r="1.5" t="-96.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_956" r="1.5" t="-96.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_957" r="1.5" t="-96.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_958" r="1.5" t="-96.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_959" r="1.5" t="-96.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_960" r="1.5" t="-96.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_961" r="1.5" t="-96.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_962" r="1.5" t="-96.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_963" r="1.5" t="-97.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_964" r="1.5" t="-97.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_965" r="1.5" t="-97.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_966" r="1.5" t="-97.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_967" r="1.5" t="-97.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_968" r="1.5" t="-97.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_969" r="1.5" t="-97.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_970" r="1.5" t="-97.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_971" r="1.5" t="-97.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_972" r="1.5" t="-97.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_973" r="1.5" t="-98.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_974" r="1.5" t="-98.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_975" r="1.5" t="-98.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_976" r="1.5" t="-98.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_977" r="1.5" t="-98.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_978" r="1.5" t="-98.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_979" r="1.5" t="-98.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_980" r="1.5" t="-98.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_981" r="1.5" t="-98.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_982" r="1.5" t="-98.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_983" r="1.5" t="-99.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_984" r="1.5" t="-99.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_985" r="1.5" t="-99.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_986" r="1.5" t="-99.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_987" r="1.5" t="-99.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_988" r="1.5" t="-99.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_989" r="1.5" t="-99.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_990" r="1.5" t="-99.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_991" r="1.5" t="-99.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_992" r="1.5" t="-99.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_993" r="1.5" t="-100.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_994" r="1.5" t="-100.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_995" r="1.5" t="-100.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_996" r="1.5" t="-100.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_997" r="1.5" t="-100.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_998" r="1.5" t="-100.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_999" r="1.5" t="-100.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1000" r="1.5" t="-100.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1001" r="1.5" t="-100.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1002" r="1.5" t="-100.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1003" r="1.5" t="-101.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1004" r="1.5" t="-101.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1005" r="1.5" t="-101.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1006" r="1.5" t="-101.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1007" r="1.5" t="-101.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1008" r="1.5" t="-101.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1009" r="1.5" t="-101.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1010" r="1.5" t="-101.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1011" r="1.5" t="-101.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1012" r="1.5" t="-101.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1013" r="1.5" t="-102.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1014" r="1.5" t="-102.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1015" r="1.5" t="-102.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1016" r="1.5" t="-102.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1017" r="1.5" t="-102.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1018" r="1.5" t="-102.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1019" r="1.5" t="-102.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1020" r="1.5" t="-102.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1021" r="1.5" t="-102.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1022" r="1.5" t="-102.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1023" r="1.5" t="-103.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1024" r="1.5" t="-103.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1025" r="1.5" t="-103.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1026" r="1.5" t="-103.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1027" r="1.5" t="-103.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1028" r="1.5" t="-103.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1029" r="1.5" t="-103.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1030" r="1.5" t="-103.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1031" r="1.5" t="-103.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1032" r="1.5" t="-103.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1033" r="1.5" t="-104.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1034" r="1.5" t="-104.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1035" r="1.5" t="-104.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1036" r="1.5" t="-104.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1037" r="1.5" t="-104.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1038" r="1.5" t="-104.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1039" r="1.5" t="-104.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1040" r="1.5" t="-104.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1041" r="1.5" t="-104.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1042" r="1.5" t="-104.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1043" r="1.5" t="-105.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1044" r="1.5" t="-105.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1045" r="1.5" t="-105.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1046" r="1.5" t="-105.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1047" r="1.5" t="-105.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1048" r="1.5" t="-105.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1049" r="1.5" t="-105.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1050" r="1.5" t="-105.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1051" r="1.5" t="-105.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1052" r="1.5" t="-105.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1053" r="1.5" t="-106.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1054" r="1.5" t="-106.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1055" r="1.5" t="-106.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1056" r="1.5" t="-106.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1057" r="1.5" t="-106.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1058" r="1.5" t="-106.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1059" r="1.5" t="-106.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1060" r="1.5" t="-106.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1061" r="1.5" t="-106.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1062" r="1.5" t="-106.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1063" r="1.5" t="-107.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1064" r="1.5" t="-107.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1065" r="1.5" t="-107.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1066" r="1.5" t="-107.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1067" r="1.5" t="-107.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1068" r="1.5" t="-107.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1069" r="1.5" t="-107.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1070" r="1.5" t="-107.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1071" r="1.5" t="-107.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1072" r="1.5" t="-107.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1073" r="1.5" t="-108.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1074" r="1.5" t="-108.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1075" r="1.5" t="-108.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1076" r="1.5" t="-108.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1077" r="1.5" t="-108.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1078" r="1.5" t="-108.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1079" r="1.5" t="-108.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1080" r="1.5" t="-108.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1081" r="1.5" t="-108.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1082" r="1.5" t="-108.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1083" r="1.5" t="-109.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1084" r="1.5" t="-109.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1085" r="1.5" t="-109.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1086" r="1.5" t="-109.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1087" r="1.5" t="-109.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1088" r="1.5" t="-109.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1089" r="1.5" t="-109.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1090" r="1.5" t="-109.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1091" r="1.5" t="-109.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1092" r="1.5" t="-109.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1093" r="1.5" t="-110.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1094" r="1.5" t="-110.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1095" r="1.5" t="-110.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1096" r="1.5" t="-110.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1097" r="1.5" t="-110.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1098" r="1.5" t="-110.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1099" r="1.5" t="-110.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1100" r="1.5" t="-110.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1101" r="1.5" t="-110.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1102" r="1.5" t="-110.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1103" r="1.5" t="-111.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1104" r="1.5" t="-111.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1105" r="1.5" t="-111.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1106" r="1.5" t="-111.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1107" r="1.5" t="-111.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1108" r="1.5" t="-111.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1109" r="1.5" t="-111.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1110" r="1.5" t="-111.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1111" r="1.5" t="-111.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1112" r="1.5" t="-111.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1113" r="1.5" t="-112.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1114" r="1.5" t="-112.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1115" r="1.5" t="-112.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1116" r="1.5" t="-112.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1117" r="1.5" t="-112.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1118" r="1.5" t="-112.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1119" r="1.5" t="-112.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1120" r="1.5" t="-112.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1121" r="1.5" t="-112.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1122" r="1.5" t="-112.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1123" r="1.5" t="-113.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1124" r="1.5" t="-113.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1125" r="1.5" t="-113.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1126" r="1.5" t="-113.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1127" r="1.5" t="-113.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1128" r="1.5" t="-113.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1129" r="1.5" t="-113.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1130" r="1.5" t="-113.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1131" r="1.5" t="-113.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1132" r="1.5" t="-113.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1133" r="1.5" t="-114.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1134" r="1.5" t="-114.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1135" r="1.5" t="-114.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1136" r="1.5" t="-114.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1137" r="1.5" t="-114.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1138" r="1.5" t="-114.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1139" r="1.5" t="-114.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1140" r="1.5" t="-114.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1141" r="1.5" t="-114.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1142" r="1.5" t="-114.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1143" r="1.5" t="-115.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1144" r="1.5" t="-115.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1145" r="1.5" t="-115.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1146" r="1.5" t="-115.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1147" r="1.5" t="-115.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1148" r="1.5" t="-115.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1149" r="1.5" t="-115.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1150" r="1.5" t="-115.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1151" r="1.5" t="-115.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1152" r="1.5" t="-115.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1153" r="1.5" t="-116.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1154" r="1.5" t="-116.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1155" r="1.5" t="-116.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1156" r="1.5" t="-116.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1157" r="1.5" t="-116.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1158" r="1.5" t="-116.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1159" r="1.5" t="-116.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1160" r="1.5" t="-116.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1161" r="1.5" t="-116.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1162" r="1.5" t="-116.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1163" r="1.5" t="-117.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1164" r="1.5" t="-117.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1165" r="1.5" t="-117.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1166" r="1.5" t="-117.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1167" r="1.5" t="-117.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1168" r="1.5" t="-117.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1169" r="1.5" t="-117.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1170" r="1.5" t="-117.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1171" r="1.5" t="-117.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1172" r="1.5" t="-117.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1173" r="1.5" t="-118.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1174" r="1.5" t="-118.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1175" r="1.5" t="-118.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1176" r="1.5" t="-118.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1177" r="1.5" t="-118.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1178" r="1.5" t="-118.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1179" r="1.5" t="-118.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1180" r="1.5" t="-118.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1181" r="1.5" t="-118.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1182" r="1.5" t="-118.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1183" r="1.5" t="-119.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1184" r="1.5" t="-119.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1185" r="1.5" t="-119.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1186" r="1.5" t="-119.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1187" r="1.5" t="-119.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1188" r="1.5" t="-119.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1189" r="1.5" t="-119.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1190" r="1.5" t="-119.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1191" r="1.5" t="-119.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1192" r="1.5" t="-119.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1193" r="1.5" t="-120.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1194" r="1.5" t="-120.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1195" r="1.5" t="-120.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1196" r="1.5" t="-120.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1197" r="1.5" t="-120.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1198" r="1.5" t="-120.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1199" r="1.5" t="-120.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1200" r="1.5" t="-120.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1201" r="1.5" t="-120.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1202" r="1.5" t="-120.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1203" r="1.5" t="-121.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1204" r="1.5" t="-121.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1205" r="1.5" t="-121.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1206" r="1.5" t="-121.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1207" r="1.5" t="-121.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1208" r="1.5" t="-121.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1209" r="1.5" t="-121.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1210" r="1.5" t="-121.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1211" r="1.5" t="-121.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1212" r="1.5" t="-121.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1213" r="1.5" t="-122.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1214" r="1.5" t="-122.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1215" r="1.5" t="-122.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1216" r="1.5" t="-122.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1217" r="1.5" t="-122.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1218" r="1.5" t="-122.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1219" r="1.5" t="-122.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1220" r="1.5" t="-122.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1221" r="1.5" t="-122.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1222" r="1.5" t="-122.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1223" r="1.5" t="-123.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1224" r="1.5" t="-123.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1225" r="1.5" t="-123.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1226" r="1.5" t="-123.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1227" r="1.5" t="-123.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1228" r="1.5" t="-123.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1229" r="1.5" t="-123.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1230" r="1.5" t="-123.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1231" r="1.5" t="-123.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1232" r="1.5" t="-123.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1233" r="1.5" t="-124.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1234" r="1.5" t="-124.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1235" r="1.5" t="-124.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1236" r="1.5" t="-124.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1237" r="1.5" t="-124.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1238" r="1.5" t="-124.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1239" r="1.5" t="-124.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1240" r="1.5" t="-124.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1241" r="1.5" t="-124.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1242" r="1.5" t="-124.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1243" r="1.5" t="-125.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1244" r="1.5" t="-125.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1245" r="1.5" t="-125.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1246" r="1.5" t="-125.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1247" r="1.5" t="-125.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1248" r="1.5" t="-125.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1249" r="1.5" t="-125.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1250" r="1.5" t="-125.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1251" r="1.5" t="-125.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1252" r="1.5" t="-125.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1253" r="1.5" t="-126.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1254" r="1.5" t="-126.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1255" r="1.5" t="-126.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1256" r="1.5" t="-126.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1257" r="1.5" t="-126.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1258" r="1.5" t="-126.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1259" r="1.5" t="-126.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1260" r="1.5" t="-126.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1261" r="1.5" t="-126.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1262" r="1.5" t="-126.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1263" r="1.5" t="-127.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1264" r="1.5" t="-127.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1265" r="1.5" t="-127.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1266" r="1.5" t="-127.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1267" r="1.5" t="-127.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1268" r="1.5" t="-127.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1269" r="1.5" t="-127.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1270" r="1.5" t="-127.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1271" r="1.5" t="-127.85" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1272" r="1.5" t="-127.95" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1273" r="1.5" t="-128.05" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1274" r="1.5" t="-128.15" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1275" r="1.5" t="-128.25" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1276" r="1.5" t="-128.35" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1277" r="1.5" t="-128.45" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1278" r="1.5" t="-128.55" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1279" r="1.5" t="-128.65" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="cell_1280" r="1.5" t="-128.75" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+    </component>
+  </type>
+  <!-- Standard Cell -->
+  <type is="detector" name="cell">
+    <cuboid id="cell-shape">
+      <left-front-bottom-point x="-0.0013" y="-0.05" z="0" />
+      <left-front-top-point x="-0.0013" y="0.05" z="0" />
+      <left-back-bottom-point x="-0.0013" y="-0.05" z="0.001" />
+      <right-front-bottom-point x="0.0013" y="-0.05" z="0" />
+    </cuboid>
+    <algebra val="cell-shape" />
+  </type>
+</instrument>
diff --git a/instrument/D20_Definition.xml b/instrument/D20_Definition.xml
new file mode 100644
index 0000000000000000000000000000000000000000..142ecc16bc9b70e0bd10d78792e22a42f2d2bcb3
--- /dev/null
+++ b/instrument/D20_Definition.xml
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- For help on the notation used to specify an
+    Instrument Definition File see http://www.mantidproject.org/IDF
+    -->
+<instrument xmlns="http://www.mantidproject.org/IDF/1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 Schema/IDFSchema.xsd" name="D20" valid-from="1900-01-31 23:59:59"
+valid-to="2100-01-31 23:59:59" last-modified="2017-02-27 17:20:42">
+  <!-- Author: vardanyan@ill.fr -->
+  <defaults>
+    <length unit="meter" />
+    <angle unit="degree" />
+    <reference-frame>
+      <!-- The z-axis is set parallel to and in the direction of the beam.
+        the y-axis points up and the coordinate system is right handed. -->
+      <along-beam axis="z" />
+      <pointing-up axis="y" />
+      <handedness val="right" />
+    </reference-frame>
+  </defaults>
+  <!-- Source position -->
+  <component type="monochromator">
+    <location z="-3.2" />
+  </component>
+  <type name="monochromator" is="Source">
+    <properties />
+  </type>
+  <!-- Monitor position -->
+  <component type="monitor" idlist="monitors">
+    <location z="-2.0" name="monitor" />
+  </component>
+  <type name="monitor" is="monitor">
+    <cuboid id="shape">
+      <left-front-bottom-point x="-0.005" y="-0.005" z="-0.005" />
+      <left-front-top-point x="-0.005" y="0.005" z="-0.005" />
+      <left-back-bottom-point x="-0.005" y="-0.005" z="0.005" />
+      <right-front-bottom-point x="0.005" y="-0.005" z="-0.005" />
+    </cuboid>
+    <algebra val="shape" />
+  </type>
+  <idlist idname="monitors">
+    <id val="0" />
+  </idlist>
+  <!-- Sample position -->
+  <component type="sample-position">
+    <location y="0.0" x="0.0" z="0.0" />
+  </component>
+  <type name="sample-position" is="SamplePos" />
+  <!-- Detector IDs -->
+  <idlist idname="detectors">
+    <id start="1" end="3072" />
+  </idlist>
+  <!-- Detector list def -->
+  <component type="detector" idlist="detectors">
+    <location name="detector" />
+  </component>
+  <!-- Detector Panels -->
+  <type name="detector">
+    <component type="panel">
+      <location name="panel_1" r="1.471" t="1.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_2" r="1.471" t="4.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_3" r="1.471" t="8.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_4" r="1.471" t="11.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_5" r="1.471" t="14.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_6" r="1.471" t="17.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_7" r="1.471" t="20.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_8" r="1.471" t="24.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_9" r="1.471" t="27.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_10" r="1.471" t="30.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_11" r="1.471" t="33.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_12" r="1.471" t="36.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_13" r="1.471" t="40.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_14" r="1.471" t="43.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_15" r="1.471" t="46.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_16" r="1.471" t="49.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_17" r="1.471" t="52.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_18" r="1.471" t="56.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_19" r="1.471" t="59.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_20" r="1.471" t="62.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_21" r="1.471" t="65.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_22" r="1.471" t="68.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_23" r="1.471" t="72.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_24" r="1.471" t="75.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_25" r="1.471" t="78.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_26" r="1.471" t="81.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_27" r="1.471" t="84.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_28" r="1.471" t="88.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_29" r="1.471" t="91.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_30" r="1.471" t="94.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_31" r="1.471" t="97.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_32" r="1.471" t="100.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_33" r="1.471" t="104.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_34" r="1.471" t="107.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_35" r="1.471" t="110.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_36" r="1.471" t="113.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_37" r="1.471" t="116.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_38" r="1.471" t="120.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_39" r="1.471" t="123.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_40" r="1.471" t="126.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_41" r="1.471" t="129.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_42" r="1.471" t="132.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_43" r="1.471" t="136.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_44" r="1.471" t="139.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_45" r="1.471" t="142.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_46" r="1.471" t="145.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_47" r="1.471" t="148.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_48" r="1.471" t="152.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+    </component>
+  </type>
+  <!-- Standard Panel -->
+  <type name="panel">
+    <component type="cell">
+      <location name="cell_1" x="-0.040446" />
+      <location name="cell_2" x="-0.039162" />
+      <location name="cell_3" x="-0.037878" />
+      <location name="cell_4" x="-0.036594" />
+      <location name="cell_5" x="-0.03531" />
+      <location name="cell_6" x="-0.034026" />
+      <location name="cell_7" x="-0.032742" />
+      <location name="cell_8" x="-0.031458" />
+      <location name="cell_9" x="-0.030174" />
+      <location name="cell_10" x="-0.02889" />
+      <location name="cell_11" x="-0.027606" />
+      <location name="cell_12" x="-0.026322" />
+      <location name="cell_13" x="-0.025038" />
+      <location name="cell_14" x="-0.023754" />
+      <location name="cell_15" x="-0.02247" />
+      <location name="cell_16" x="-0.021186" />
+      <location name="cell_17" x="-0.019902" />
+      <location name="cell_18" x="-0.018618" />
+      <location name="cell_19" x="-0.017334" />
+      <location name="cell_20" x="-0.01605" />
+      <location name="cell_21" x="-0.014766" />
+      <location name="cell_22" x="-0.013482" />
+      <location name="cell_23" x="-0.012198" />
+      <location name="cell_24" x="-0.010914" />
+      <location name="cell_25" x="-0.00963" />
+      <location name="cell_26" x="-0.008346" />
+      <location name="cell_27" x="-0.007062" />
+      <location name="cell_28" x="-0.005778" />
+      <location name="cell_29" x="-0.004494" />
+      <location name="cell_30" x="-0.00321" />
+      <location name="cell_31" x="-0.001926" />
+      <location name="cell_32" x="-0.000642" />
+      <location name="cell_33" x="0.000642" />
+      <location name="cell_34" x="0.001926" />
+      <location name="cell_35" x="0.00321" />
+      <location name="cell_36" x="0.004494" />
+      <location name="cell_37" x="0.005778" />
+      <location name="cell_38" x="0.007062" />
+      <location name="cell_39" x="0.008346" />
+      <location name="cell_40" x="0.00963" />
+      <location name="cell_41" x="0.010914" />
+      <location name="cell_42" x="0.012198" />
+      <location name="cell_43" x="0.013482" />
+      <location name="cell_44" x="0.014766" />
+      <location name="cell_45" x="0.01605" />
+      <location name="cell_46" x="0.017334" />
+      <location name="cell_47" x="0.018618" />
+      <location name="cell_48" x="0.019902" />
+      <location name="cell_49" x="0.021186" />
+      <location name="cell_50" x="0.02247" />
+      <location name="cell_51" x="0.023754" />
+      <location name="cell_52" x="0.025038" />
+      <location name="cell_53" x="0.026322" />
+      <location name="cell_54" x="0.027606" />
+      <location name="cell_55" x="0.02889" />
+      <location name="cell_56" x="0.030174" />
+      <location name="cell_57" x="0.031458" />
+      <location name="cell_58" x="0.032742" />
+      <location name="cell_59" x="0.034026" />
+      <location name="cell_60" x="0.03531" />
+      <location name="cell_61" x="0.036594" />
+      <location name="cell_62" x="0.037878" />
+      <location name="cell_63" x="0.039162" />
+      <location name="cell_64" x="0.040446" />
+    </component>
+  </type>
+  <!-- Standard Cell -->
+  <type is="detector" name="cell">
+    <cuboid id="cell-shape">
+      <left-front-bottom-point x="-0.000642" y="-0.075" z="0" />
+      <left-front-top-point x="-0.000642" y="0.075" z="0" />
+      <left-back-bottom-point x="-0.000642" y="-0.075" z="0.05" />
+      <right-front-bottom-point x="0.000642" y="-0.075" z="0" />
+    </cuboid>
+    <algebra val="cell-shape" />
+  </type>
+</instrument>
diff --git a/instrument/D20_hr_Definition.xml b/instrument/D20_hr_Definition.xml
new file mode 100644
index 0000000000000000000000000000000000000000..e1eee07e5b59b88e4b98f4f0ce03fe670ca6b85e
--- /dev/null
+++ b/instrument/D20_hr_Definition.xml
@@ -0,0 +1,315 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- For help on the notation used to specify an
+    Instrument Definition File see http://www.mantidproject.org/IDF
+    -->
+<instrument xmlns="http://www.mantidproject.org/IDF/1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 Schema/IDFSchema.xsd" name="D20_hr" valid-from="1900-01-31 23:59:59"
+valid-to="2100-01-31 23:59:59" last-modified="2017-02-27 17:19:28">
+  <!-- Author: vardanyan@ill.fr -->
+  <defaults>
+    <length unit="meter" />
+    <angle unit="degree" />
+    <reference-frame>
+      <!-- The z-axis is set parallel to and in the direction of the beam.
+        the y-axis points up and the coordinate system is right handed. -->
+      <along-beam axis="z" />
+      <pointing-up axis="y" />
+      <handedness val="right" />
+    </reference-frame>
+  </defaults>
+  <!-- Source position -->
+  <component type="monochromator">
+    <location z="-3.2" />
+  </component>
+  <type name="monochromator" is="Source">
+    <properties />
+  </type>
+  <!-- Monitor position -->
+  <component type="monitor" idlist="monitors">
+    <location z="-2.0" name="monitor" />
+  </component>
+  <type name="monitor" is="monitor">
+    <cuboid id="shape">
+      <left-front-bottom-point x="-0.005" y="-0.005" z="-0.005" />
+      <left-front-top-point x="-0.005" y="0.005" z="-0.005" />
+      <left-back-bottom-point x="-0.005" y="-0.005" z="0.005" />
+      <right-front-bottom-point x="0.005" y="-0.005" z="-0.005" />
+    </cuboid>
+    <algebra val="shape" />
+  </type>
+  <idlist idname="monitors">
+    <id val="0" />
+  </idlist>
+  <!-- Sample position -->
+  <component type="sample-position">
+    <location y="0.0" x="0.0" z="0.0" />
+  </component>
+  <type name="sample-position" is="SamplePos" />
+  <!-- Detector IDs -->
+  <idlist idname="detectors">
+    <id start="1" end="4608" />
+  </idlist>
+  <!-- Detector list def -->
+  <component type="detector" idlist="detectors">
+    <location name="detector" />
+  </component>
+  <!-- Detector Panels -->
+  <type name="detector">
+    <component type="panel">
+      <location name="panel_1" r="1.471" t="1.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_2" r="1.471" t="4.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_3" r="1.471" t="8.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_4" r="1.471" t="11.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_5" r="1.471" t="14.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_6" r="1.471" t="17.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_7" r="1.471" t="20.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_8" r="1.471" t="24.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_9" r="1.471" t="27.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_10" r="1.471" t="30.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_11" r="1.471" t="33.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_12" r="1.471" t="36.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_13" r="1.471" t="40.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_14" r="1.471" t="43.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_15" r="1.471" t="46.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_16" r="1.471" t="49.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_17" r="1.471" t="52.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_18" r="1.471" t="56.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_19" r="1.471" t="59.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_20" r="1.471" t="62.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_21" r="1.471" t="65.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_22" r="1.471" t="68.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_23" r="1.471" t="72.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_24" r="1.471" t="75.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_25" r="1.471" t="78.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_26" r="1.471" t="81.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_27" r="1.471" t="84.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_28" r="1.471" t="88.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_29" r="1.471" t="91.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_30" r="1.471" t="94.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_31" r="1.471" t="97.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_32" r="1.471" t="100.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_33" r="1.471" t="104.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_34" r="1.471" t="107.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_35" r="1.471" t="110.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_36" r="1.471" t="113.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_37" r="1.471" t="116.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_38" r="1.471" t="120.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_39" r="1.471" t="123.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_40" r="1.471" t="126.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_41" r="1.471" t="129.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_42" r="1.471" t="132.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_43" r="1.471" t="136.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_44" r="1.471" t="139.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_45" r="1.471" t="142.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_46" r="1.471" t="145.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_47" r="1.471" t="148.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_48" r="1.471" t="152.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+    </component>
+  </type>
+  <!-- Standard Panel -->
+  <type name="panel">
+    <component type="cell">
+      <location name="cell_1" x="-0.04066" />
+      <location name="cell_2" x="-0.039804" />
+      <location name="cell_3" x="-0.038948" />
+      <location name="cell_4" x="-0.038092" />
+      <location name="cell_5" x="-0.037236" />
+      <location name="cell_6" x="-0.03638" />
+      <location name="cell_7" x="-0.035524" />
+      <location name="cell_8" x="-0.034668" />
+      <location name="cell_9" x="-0.033812" />
+      <location name="cell_10" x="-0.032956" />
+      <location name="cell_11" x="-0.0321" />
+      <location name="cell_12" x="-0.031244" />
+      <location name="cell_13" x="-0.030388" />
+      <location name="cell_14" x="-0.029532" />
+      <location name="cell_15" x="-0.028676" />
+      <location name="cell_16" x="-0.02782" />
+      <location name="cell_17" x="-0.026964" />
+      <location name="cell_18" x="-0.026108" />
+      <location name="cell_19" x="-0.025252" />
+      <location name="cell_20" x="-0.024396" />
+      <location name="cell_21" x="-0.02354" />
+      <location name="cell_22" x="-0.022684" />
+      <location name="cell_23" x="-0.021828" />
+      <location name="cell_24" x="-0.020972" />
+      <location name="cell_25" x="-0.020116" />
+      <location name="cell_26" x="-0.01926" />
+      <location name="cell_27" x="-0.018404" />
+      <location name="cell_28" x="-0.017548" />
+      <location name="cell_29" x="-0.016692" />
+      <location name="cell_30" x="-0.015836" />
+      <location name="cell_31" x="-0.01498" />
+      <location name="cell_32" x="-0.014124" />
+      <location name="cell_33" x="-0.013268" />
+      <location name="cell_34" x="-0.012412" />
+      <location name="cell_35" x="-0.011556" />
+      <location name="cell_36" x="-0.0107" />
+      <location name="cell_37" x="-0.009844" />
+      <location name="cell_38" x="-0.008988" />
+      <location name="cell_39" x="-0.008132" />
+      <location name="cell_40" x="-0.007276" />
+      <location name="cell_41" x="-0.00642" />
+      <location name="cell_42" x="-0.005564" />
+      <location name="cell_43" x="-0.004708" />
+      <location name="cell_44" x="-0.003852" />
+      <location name="cell_45" x="-0.002996" />
+      <location name="cell_46" x="-0.00214" />
+      <location name="cell_47" x="-0.001284" />
+      <location name="cell_48" x="-0.000428" />
+      <location name="cell_49" x="0.000428" />
+      <location name="cell_50" x="0.001284" />
+      <location name="cell_51" x="0.00214" />
+      <location name="cell_52" x="0.002996" />
+      <location name="cell_53" x="0.003852" />
+      <location name="cell_54" x="0.004708" />
+      <location name="cell_55" x="0.005564" />
+      <location name="cell_56" x="0.00642" />
+      <location name="cell_57" x="0.007276" />
+      <location name="cell_58" x="0.008132" />
+      <location name="cell_59" x="0.008988" />
+      <location name="cell_60" x="0.009844" />
+      <location name="cell_61" x="0.0107" />
+      <location name="cell_62" x="0.011556" />
+      <location name="cell_63" x="0.012412" />
+      <location name="cell_64" x="0.013268" />
+      <location name="cell_65" x="0.014124" />
+      <location name="cell_66" x="0.01498" />
+      <location name="cell_67" x="0.015836" />
+      <location name="cell_68" x="0.016692" />
+      <location name="cell_69" x="0.017548" />
+      <location name="cell_70" x="0.018404" />
+      <location name="cell_71" x="0.01926" />
+      <location name="cell_72" x="0.020116" />
+      <location name="cell_73" x="0.020972" />
+      <location name="cell_74" x="0.021828" />
+      <location name="cell_75" x="0.022684" />
+      <location name="cell_76" x="0.02354" />
+      <location name="cell_77" x="0.024396" />
+      <location name="cell_78" x="0.025252" />
+      <location name="cell_79" x="0.026108" />
+      <location name="cell_80" x="0.026964" />
+      <location name="cell_81" x="0.02782" />
+      <location name="cell_82" x="0.028676" />
+      <location name="cell_83" x="0.029532" />
+      <location name="cell_84" x="0.030388" />
+      <location name="cell_85" x="0.031244" />
+      <location name="cell_86" x="0.0321" />
+      <location name="cell_87" x="0.032956" />
+      <location name="cell_88" x="0.033812" />
+      <location name="cell_89" x="0.034668" />
+      <location name="cell_90" x="0.035524" />
+      <location name="cell_91" x="0.03638" />
+      <location name="cell_92" x="0.037236" />
+      <location name="cell_93" x="0.038092" />
+      <location name="cell_94" x="0.038948" />
+      <location name="cell_95" x="0.039804" />
+      <location name="cell_96" x="0.04066" />
+    </component>
+  </type>
+  <!-- Standard Cell -->
+  <type is="detector" name="cell">
+    <cuboid id="cell-shape">
+      <left-front-bottom-point x="-0.000428" y="-0.075" z="0" />
+      <left-front-top-point x="-0.000428" y="0.075" z="0" />
+      <left-back-bottom-point x="-0.000428" y="-0.075" z="0.05" />
+      <right-front-bottom-point x="0.000428" y="-0.075" z="0" />
+    </cuboid>
+    <algebra val="cell-shape" />
+  </type>
+</instrument>
diff --git a/instrument/D20_lr_Definition.xml b/instrument/D20_lr_Definition.xml
new file mode 100644
index 0000000000000000000000000000000000000000..51ffec53129ff9c34a34a89980a658fd1e4b88c1
--- /dev/null
+++ b/instrument/D20_lr_Definition.xml
@@ -0,0 +1,251 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- For help on the notation used to specify an
+    Instrument Definition File see http://www.mantidproject.org/IDF
+    -->
+<instrument xmlns="http://www.mantidproject.org/IDF/1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 Schema/IDFSchema.xsd" name="D20_lr" valid-from="1900-01-31 23:59:59"
+valid-to="2100-01-31 23:59:59" last-modified="2017-02-27 17:21:05">
+  <!-- Author: vardanyan@ill.fr -->
+  <defaults>
+    <length unit="meter" />
+    <angle unit="degree" />
+    <reference-frame>
+      <!-- The z-axis is set parallel to and in the direction of the beam.
+        the y-axis points up and the coordinate system is right handed. -->
+      <along-beam axis="z" />
+      <pointing-up axis="y" />
+      <handedness val="right" />
+    </reference-frame>
+  </defaults>
+  <!-- Source position -->
+  <component type="monochromator">
+    <location z="-3.2" />
+  </component>
+  <type name="monochromator" is="Source">
+    <properties />
+  </type>
+  <!-- Monitor position -->
+  <component type="monitor" idlist="monitors">
+    <location z="-2.0" name="monitor" />
+  </component>
+  <type name="monitor" is="monitor">
+    <cuboid id="shape">
+      <left-front-bottom-point x="-0.005" y="-0.005" z="-0.005" />
+      <left-front-top-point x="-0.005" y="0.005" z="-0.005" />
+      <left-back-bottom-point x="-0.005" y="-0.005" z="0.005" />
+      <right-front-bottom-point x="0.005" y="-0.005" z="-0.005" />
+    </cuboid>
+    <algebra val="shape" />
+  </type>
+  <idlist idname="monitors">
+    <id val="0" />
+  </idlist>
+  <!-- Sample position -->
+  <component type="sample-position">
+    <location y="0.0" x="0.0" z="0.0" />
+  </component>
+  <type name="sample-position" is="SamplePos" />
+  <!-- Detector IDs -->
+  <idlist idname="detectors">
+    <id start="1" end="1536" />
+  </idlist>
+  <!-- Detector list def -->
+  <component type="detector" idlist="detectors">
+    <location name="detector" />
+  </component>
+  <!-- Detector Panels -->
+  <type name="detector">
+    <component type="panel">
+      <location name="panel_1" r="1.471" t="1.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_2" r="1.471" t="4.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_3" r="1.471" t="8.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_4" r="1.471" t="11.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_5" r="1.471" t="14.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_6" r="1.471" t="17.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_7" r="1.471" t="20.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_8" r="1.471" t="24.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_9" r="1.471" t="27.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_10" r="1.471" t="30.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_11" r="1.471" t="33.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_12" r="1.471" t="36.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_13" r="1.471" t="40.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_14" r="1.471" t="43.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_15" r="1.471" t="46.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_16" r="1.471" t="49.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_17" r="1.471" t="52.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_18" r="1.471" t="56.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_19" r="1.471" t="59.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_20" r="1.471" t="62.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_21" r="1.471" t="65.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_22" r="1.471" t="68.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_23" r="1.471" t="72.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_24" r="1.471" t="75.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_25" r="1.471" t="78.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_26" r="1.471" t="81.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_27" r="1.471" t="84.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_28" r="1.471" t="88.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_29" r="1.471" t="91.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_30" r="1.471" t="94.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_31" r="1.471" t="97.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_32" r="1.471" t="100.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_33" r="1.471" t="104.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_34" r="1.471" t="107.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_35" r="1.471" t="110.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_36" r="1.471" t="113.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_37" r="1.471" t="116.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_38" r="1.471" t="120.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_39" r="1.471" t="123.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_40" r="1.471" t="126.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_41" r="1.471" t="129.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_42" r="1.471" t="132.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_43" r="1.471" t="136.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_44" r="1.471" t="139.2" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_45" r="1.471" t="142.4" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_46" r="1.471" t="145.6" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_47" r="1.471" t="148.8" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_48" r="1.471" t="152.0" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+    </component>
+  </type>
+  <!-- Standard Panel -->
+  <type name="panel">
+    <component type="cell">
+      <location name="cell_1" x="-0.039804" />
+      <location name="cell_2" x="-0.037236" />
+      <location name="cell_3" x="-0.034668" />
+      <location name="cell_4" x="-0.0321" />
+      <location name="cell_5" x="-0.029532" />
+      <location name="cell_6" x="-0.026964" />
+      <location name="cell_7" x="-0.024396" />
+      <location name="cell_8" x="-0.021828" />
+      <location name="cell_9" x="-0.01926" />
+      <location name="cell_10" x="-0.016692" />
+      <location name="cell_11" x="-0.014124" />
+      <location name="cell_12" x="-0.011556" />
+      <location name="cell_13" x="-0.008988" />
+      <location name="cell_14" x="-0.00642" />
+      <location name="cell_15" x="-0.003852" />
+      <location name="cell_16" x="-0.001284" />
+      <location name="cell_17" x="0.001284" />
+      <location name="cell_18" x="0.003852" />
+      <location name="cell_19" x="0.00642" />
+      <location name="cell_20" x="0.008988" />
+      <location name="cell_21" x="0.011556" />
+      <location name="cell_22" x="0.014124" />
+      <location name="cell_23" x="0.016692" />
+      <location name="cell_24" x="0.01926" />
+      <location name="cell_25" x="0.021828" />
+      <location name="cell_26" x="0.024396" />
+      <location name="cell_27" x="0.026964" />
+      <location name="cell_28" x="0.029532" />
+      <location name="cell_29" x="0.0321" />
+      <location name="cell_30" x="0.034668" />
+      <location name="cell_31" x="0.037236" />
+      <location name="cell_32" x="0.039804" />
+    </component>
+  </type>
+  <!-- Standard Cell -->
+  <type is="detector" name="cell">
+    <cuboid id="cell-shape">
+      <left-front-bottom-point x="-0.001284" y="-0.075" z="0" />
+      <left-front-top-point x="-0.001284" y="0.075" z="0" />
+      <left-back-bottom-point x="-0.001284" y="-0.075" z="0.05" />
+      <right-front-bottom-point x="0.001284" y="-0.075" z="0" />
+    </cuboid>
+    <algebra val="cell-shape" />
+  </type>
+</instrument>
diff --git a/instrument/D2B_Definition.xml b/instrument/D2B_Definition.xml
index ff72228f997acd04ad0f4270b0e159d52f4e12c5..fe513f0526f662f4e2e1f9898a78445e66287166 100644
--- a/instrument/D2B_Definition.xml
+++ b/instrument/D2B_Definition.xml
@@ -14,17 +14,33 @@ name="D2B" valid-from="1900-01-31 23:59:59" valid-to="2100-01-31 23:59:59" last-
       <handedness val="right" />
     </reference-frame>
   </defaults>
-  <component type="moderator">
-    <location z="-2" />
+  <component type="monochromator">
+    <location z="-2.997" />
   </component>
-  <type name="moderator" is="Source"></type>
+  <type name="monochromator" is="Source"/>
+  <!-- Monitor position -->
+  <component type="monitor" idlist="monitors">
+    <location z="-1.594" name="monitor" />
+  </component>
+  <type name="monitor" is="monitor">
+    <cuboid id="shape">
+      <left-front-bottom-point x="-0.005" y="-0.005" z="-0.005" />
+      <left-front-top-point x="-0.005" y="0.005" z="-0.005" />
+      <left-back-bottom-point x="-0.005" y="-0.005" z="0.005" />
+      <right-front-bottom-point x="0.005" y="-0.005" z="-0.005" />
+    </cuboid>
+    <algebra val="shape" />
+  </type>
+  <idlist idname="monitors">
+    <id val="1" />
+  </idlist>
   <!-- Sample position -->
   <component type="sample-position">
     <location y="0.0" x="0.0" z="0.0" />
   </component>
   <type name="sample-position" is="SamplePos" />
   <idlist idname="detectors">
-    <id start="1" end="16384" />
+    <id start="2" end="16385" />
   </idlist>
   <!-- Detector list def -->
   <component type="detectors" idlist="detectors">
@@ -60,134 +76,134 @@ name="D2B" valid-from="1900-01-31 23:59:59" valid-to="2100-01-31 23:59:59" last-
   <!-- Definition of the unique existent bank (made of tubes) -->
   <type name="bank_uniq">
     <component type="standard_tube">
-      <location r="2.000000" t="165.000000" name="tube_1" />
-      <location r="2.000000" t="163.740157" name="tube_2" />
-      <location r="2.000000" t="162.480315" name="tube_3" />
-      <location r="2.000000" t="161.220472" name="tube_4" />
-      <location r="2.000000" t="159.960630" name="tube_5" />
-      <location r="2.000000" t="158.700787" name="tube_6" />
-      <location r="2.000000" t="157.440945" name="tube_7" />
-      <location r="2.000000" t="156.181102" name="tube_8" />
-      <location r="2.000000" t="154.921260" name="tube_9" />
-      <location r="2.000000" t="153.661417" name="tube_10" />
-      <location r="2.000000" t="152.401575" name="tube_11" />
-      <location r="2.000000" t="151.141732" name="tube_12" />
-      <location r="2.000000" t="149.881890" name="tube_13" />
-      <location r="2.000000" t="148.622047" name="tube_14" />
-      <location r="2.000000" t="147.362205" name="tube_15" />
-      <location r="2.000000" t="146.102362" name="tube_16" />
-      <location r="2.000000" t="144.842520" name="tube_17" />
-      <location r="2.000000" t="143.582677" name="tube_18" />
-      <location r="2.000000" t="142.322835" name="tube_19" />
-      <location r="2.000000" t="141.062992" name="tube_20" />
-      <location r="2.000000" t="139.803150" name="tube_21" />
-      <location r="2.000000" t="138.543307" name="tube_22" />
-      <location r="2.000000" t="137.283465" name="tube_23" />
-      <location r="2.000000" t="136.023622" name="tube_24" />
-      <location r="2.000000" t="134.763780" name="tube_25" />
-      <location r="2.000000" t="133.503937" name="tube_26" />
-      <location r="2.000000" t="132.244094" name="tube_27" />
-      <location r="2.000000" t="130.984252" name="tube_28" />
-      <location r="2.000000" t="129.724409" name="tube_29" />
-      <location r="2.000000" t="128.464567" name="tube_30" />
-      <location r="2.000000" t="127.204724" name="tube_31" />
-      <location r="2.000000" t="125.944882" name="tube_32" />
-      <location r="2.000000" t="124.685039" name="tube_33" />
-      <location r="2.000000" t="123.425197" name="tube_34" />
-      <location r="2.000000" t="122.165354" name="tube_35" />
-      <location r="2.000000" t="120.905512" name="tube_36" />
-      <location r="2.000000" t="119.645669" name="tube_37" />
-      <location r="2.000000" t="118.385827" name="tube_38" />
-      <location r="2.000000" t="117.125984" name="tube_39" />
-      <location r="2.000000" t="115.866142" name="tube_40" />
-      <location r="2.000000" t="114.606299" name="tube_41" />
-      <location r="2.000000" t="113.346457" name="tube_42" />
-      <location r="2.000000" t="112.086614" name="tube_43" />
-      <location r="2.000000" t="110.826772" name="tube_44" />
-      <location r="2.000000" t="109.566929" name="tube_45" />
-      <location r="2.000000" t="108.307087" name="tube_46" />
-      <location r="2.000000" t="107.047244" name="tube_47" />
-      <location r="2.000000" t="105.787402" name="tube_48" />
-      <location r="2.000000" t="104.527559" name="tube_49" />
-      <location r="2.000000" t="103.267717" name="tube_50" />
-      <location r="2.000000" t="102.007874" name="tube_51" />
-      <location r="2.000000" t="100.748031" name="tube_52" />
-      <location r="2.000000" t="99.488189" name="tube_53" />
-      <location r="2.000000" t="98.228346" name="tube_54" />
-      <location r="2.000000" t="96.968504" name="tube_55" />
-      <location r="2.000000" t="95.708661" name="tube_56" />
-      <location r="2.000000" t="94.448819" name="tube_57" />
-      <location r="2.000000" t="93.188976" name="tube_58" />
-      <location r="2.000000" t="91.929134" name="tube_59" />
-      <location r="2.000000" t="90.669291" name="tube_60" />
-      <location r="2.000000" t="89.409449" name="tube_61" />
-      <location r="2.000000" t="88.149606" name="tube_62" />
-      <location r="2.000000" t="86.889764" name="tube_63" />
-      <location r="2.000000" t="85.629921" name="tube_64" />
-      <location r="2.000000" t="84.370079" name="tube_65" />
-      <location r="2.000000" t="83.110236" name="tube_66" />
-      <location r="2.000000" t="81.850394" name="tube_67" />
-      <location r="2.000000" t="80.590551" name="tube_68" />
-      <location r="2.000000" t="79.330709" name="tube_69" />
-      <location r="2.000000" t="78.070866" name="tube_70" />
-      <location r="2.000000" t="76.811024" name="tube_71" />
-      <location r="2.000000" t="75.551181" name="tube_72" />
-      <location r="2.000000" t="74.291339" name="tube_73" />
-      <location r="2.000000" t="73.031496" name="tube_74" />
-      <location r="2.000000" t="71.771654" name="tube_75" />
-      <location r="2.000000" t="70.511811" name="tube_76" />
-      <location r="2.000000" t="69.251969" name="tube_77" />
-      <location r="2.000000" t="67.992126" name="tube_78" />
-      <location r="2.000000" t="66.732283" name="tube_79" />
-      <location r="2.000000" t="65.472441" name="tube_80" />
-      <location r="2.000000" t="64.212598" name="tube_81" />
-      <location r="2.000000" t="62.952756" name="tube_82" />
-      <location r="2.000000" t="61.692913" name="tube_83" />
-      <location r="2.000000" t="60.433071" name="tube_84" />
-      <location r="2.000000" t="59.173228" name="tube_85" />
-      <location r="2.000000" t="57.913386" name="tube_86" />
-      <location r="2.000000" t="56.653543" name="tube_87" />
-      <location r="2.000000" t="55.393701" name="tube_88" />
-      <location r="2.000000" t="54.133858" name="tube_89" />
-      <location r="2.000000" t="52.874016" name="tube_90" />
-      <location r="2.000000" t="51.614173" name="tube_91" />
-      <location r="2.000000" t="50.354331" name="tube_92" />
-      <location r="2.000000" t="49.094488" name="tube_93" />
-      <location r="2.000000" t="47.834646" name="tube_94" />
-      <location r="2.000000" t="46.574803" name="tube_95" />
-      <location r="2.000000" t="45.314961" name="tube_96" />
-      <location r="2.000000" t="44.055118" name="tube_97" />
-      <location r="2.000000" t="42.795276" name="tube_98" />
-      <location r="2.000000" t="41.535433" name="tube_99" />
-      <location r="2.000000" t="40.275591" name="tube_100" />
-      <location r="2.000000" t="39.015748" name="tube_101" />
-      <location r="2.000000" t="37.755906" name="tube_102" />
-      <location r="2.000000" t="36.496063" name="tube_103" />
-      <location r="2.000000" t="35.236220" name="tube_104" />
-      <location r="2.000000" t="33.976378" name="tube_105" />
-      <location r="2.000000" t="32.716535" name="tube_106" />
-      <location r="2.000000" t="31.456693" name="tube_107" />
-      <location r="2.000000" t="30.196850" name="tube_108" />
-      <location r="2.000000" t="28.937008" name="tube_109" />
-      <location r="2.000000" t="27.677165" name="tube_110" />
-      <location r="2.000000" t="26.417323" name="tube_111" />
-      <location r="2.000000" t="25.157480" name="tube_112" />
-      <location r="2.000000" t="23.897638" name="tube_113" />
-      <location r="2.000000" t="22.637795" name="tube_114" />
-      <location r="2.000000" t="21.377953" name="tube_115" />
-      <location r="2.000000" t="20.118110" name="tube_116" />
-      <location r="2.000000" t="18.858268" name="tube_117" />
-      <location r="2.000000" t="17.598425" name="tube_118" />
-      <location r="2.000000" t="16.338583" name="tube_119" />
-      <location r="2.000000" t="15.078740" name="tube_120" />
-      <location r="2.000000" t="13.818898" name="tube_121" />
-      <location r="2.000000" t="12.559055" name="tube_122" />
-      <location r="2.000000" t="11.299213" name="tube_123" />
-      <location r="2.000000" t="10.039370" name="tube_124" />
-      <location r="2.000000" t="8.779528" name="tube_125" />
-      <location r="2.000000" t="7.519685" name="tube_126" />
-      <location r="2.000000" t="6.259843" name="tube_127" />
-      <location r="2.000000" t="5.000000" name="tube_128" />
+      <location r="1.296" t="165.000000" name="tube_1" />
+      <location r="1.296" t="163.740157" name="tube_2" />
+      <location r="1.296" t="162.480315" name="tube_3" />
+      <location r="1.296" t="161.220472" name="tube_4" />
+      <location r="1.296" t="159.960630" name="tube_5" />
+      <location r="1.296" t="158.700787" name="tube_6" />
+      <location r="1.296" t="157.440945" name="tube_7" />
+      <location r="1.296" t="156.181102" name="tube_8" />
+      <location r="1.296" t="154.921260" name="tube_9" />
+      <location r="1.296" t="153.661417" name="tube_10" />
+      <location r="1.296" t="152.401575" name="tube_11" />
+      <location r="1.296" t="151.141732" name="tube_12" />
+      <location r="1.296" t="149.881890" name="tube_13" />
+      <location r="1.296" t="148.622047" name="tube_14" />
+      <location r="1.296" t="147.362205" name="tube_15" />
+      <location r="1.296" t="146.102362" name="tube_16" />
+      <location r="1.296" t="144.842520" name="tube_17" />
+      <location r="1.296" t="143.582677" name="tube_18" />
+      <location r="1.296" t="142.322835" name="tube_19" />
+      <location r="1.296" t="141.062992" name="tube_20" />
+      <location r="1.296" t="139.803150" name="tube_21" />
+      <location r="1.296" t="138.543307" name="tube_22" />
+      <location r="1.296" t="137.283465" name="tube_23" />
+      <location r="1.296" t="136.023622" name="tube_24" />
+      <location r="1.296" t="134.763780" name="tube_25" />
+      <location r="1.296" t="133.503937" name="tube_26" />
+      <location r="1.296" t="132.244094" name="tube_27" />
+      <location r="1.296" t="130.984252" name="tube_28" />
+      <location r="1.296" t="129.724409" name="tube_29" />
+      <location r="1.296" t="128.464567" name="tube_30" />
+      <location r="1.296" t="127.204724" name="tube_31" />
+      <location r="1.296" t="125.944882" name="tube_32" />
+      <location r="1.296" t="124.685039" name="tube_33" />
+      <location r="1.296" t="123.425197" name="tube_34" />
+      <location r="1.296" t="122.165354" name="tube_35" />
+      <location r="1.296" t="120.905512" name="tube_36" />
+      <location r="1.296" t="119.645669" name="tube_37" />
+      <location r="1.296" t="118.385827" name="tube_38" />
+      <location r="1.296" t="117.125984" name="tube_39" />
+      <location r="1.296" t="115.866142" name="tube_40" />
+      <location r="1.296" t="114.606299" name="tube_41" />
+      <location r="1.296" t="113.346457" name="tube_42" />
+      <location r="1.296" t="112.086614" name="tube_43" />
+      <location r="1.296" t="110.826772" name="tube_44" />
+      <location r="1.296" t="109.566929" name="tube_45" />
+      <location r="1.296" t="108.307087" name="tube_46" />
+      <location r="1.296" t="107.047244" name="tube_47" />
+      <location r="1.296" t="105.787402" name="tube_48" />
+      <location r="1.296" t="104.527559" name="tube_49" />
+      <location r="1.296" t="103.267717" name="tube_50" />
+      <location r="1.296" t="102.007874" name="tube_51" />
+      <location r="1.296" t="100.748031" name="tube_52" />
+      <location r="1.296" t="99.488189" name="tube_53" />
+      <location r="1.296" t="98.228346" name="tube_54" />
+      <location r="1.296" t="96.968504" name="tube_55" />
+      <location r="1.296" t="95.708661" name="tube_56" />
+      <location r="1.296" t="94.448819" name="tube_57" />
+      <location r="1.296" t="93.188976" name="tube_58" />
+      <location r="1.296" t="91.929134" name="tube_59" />
+      <location r="1.296" t="90.669291" name="tube_60" />
+      <location r="1.296" t="89.409449" name="tube_61" />
+      <location r="1.296" t="88.149606" name="tube_62" />
+      <location r="1.296" t="86.889764" name="tube_63" />
+      <location r="1.296" t="85.629921" name="tube_64" />
+      <location r="1.296" t="84.370079" name="tube_65" />
+      <location r="1.296" t="83.110236" name="tube_66" />
+      <location r="1.296" t="81.850394" name="tube_67" />
+      <location r="1.296" t="80.590551" name="tube_68" />
+      <location r="1.296" t="79.330709" name="tube_69" />
+      <location r="1.296" t="78.070866" name="tube_70" />
+      <location r="1.296" t="76.811024" name="tube_71" />
+      <location r="1.296" t="75.551181" name="tube_72" />
+      <location r="1.296" t="74.291339" name="tube_73" />
+      <location r="1.296" t="73.031496" name="tube_74" />
+      <location r="1.296" t="71.771654" name="tube_75" />
+      <location r="1.296" t="70.511811" name="tube_76" />
+      <location r="1.296" t="69.251969" name="tube_77" />
+      <location r="1.296" t="67.992126" name="tube_78" />
+      <location r="1.296" t="66.732283" name="tube_79" />
+      <location r="1.296" t="65.472441" name="tube_80" />
+      <location r="1.296" t="64.212598" name="tube_81" />
+      <location r="1.296" t="62.952756" name="tube_82" />
+      <location r="1.296" t="61.692913" name="tube_83" />
+      <location r="1.296" t="60.433071" name="tube_84" />
+      <location r="1.296" t="59.173228" name="tube_85" />
+      <location r="1.296" t="57.913386" name="tube_86" />
+      <location r="1.296" t="56.653543" name="tube_87" />
+      <location r="1.296" t="55.393701" name="tube_88" />
+      <location r="1.296" t="54.133858" name="tube_89" />
+      <location r="1.296" t="52.874016" name="tube_90" />
+      <location r="1.296" t="51.614173" name="tube_91" />
+      <location r="1.296" t="50.354331" name="tube_92" />
+      <location r="1.296" t="49.094488" name="tube_93" />
+      <location r="1.296" t="47.834646" name="tube_94" />
+      <location r="1.296" t="46.574803" name="tube_95" />
+      <location r="1.296" t="45.314961" name="tube_96" />
+      <location r="1.296" t="44.055118" name="tube_97" />
+      <location r="1.296" t="42.795276" name="tube_98" />
+      <location r="1.296" t="41.535433" name="tube_99" />
+      <location r="1.296" t="40.275591" name="tube_100" />
+      <location r="1.296" t="39.015748" name="tube_101" />
+      <location r="1.296" t="37.755906" name="tube_102" />
+      <location r="1.296" t="36.496063" name="tube_103" />
+      <location r="1.296" t="35.236220" name="tube_104" />
+      <location r="1.296" t="33.976378" name="tube_105" />
+      <location r="1.296" t="32.716535" name="tube_106" />
+      <location r="1.296" t="31.456693" name="tube_107" />
+      <location r="1.296" t="30.196850" name="tube_108" />
+      <location r="1.296" t="28.937008" name="tube_109" />
+      <location r="1.296" t="27.677165" name="tube_110" />
+      <location r="1.296" t="26.417323" name="tube_111" />
+      <location r="1.296" t="25.157480" name="tube_112" />
+      <location r="1.296" t="23.897638" name="tube_113" />
+      <location r="1.296" t="22.637795" name="tube_114" />
+      <location r="1.296" t="21.377953" name="tube_115" />
+      <location r="1.296" t="20.118110" name="tube_116" />
+      <location r="1.296" t="18.858268" name="tube_117" />
+      <location r="1.296" t="17.598425" name="tube_118" />
+      <location r="1.296" t="16.338583" name="tube_119" />
+      <location r="1.296" t="15.078740" name="tube_120" />
+      <location r="1.296" t="13.818898" name="tube_121" />
+      <location r="1.296" t="12.559055" name="tube_122" />
+      <location r="1.296" t="11.299213" name="tube_123" />
+      <location r="1.296" t="10.039370" name="tube_124" />
+      <location r="1.296" t="8.779528" name="tube_125" />
+      <location r="1.296" t="7.519685" name="tube_126" />
+      <location r="1.296" t="6.259843" name="tube_127" />
+      <location r="1.296" t="5.000000" name="tube_128" />
     </component>
   </type>
   <!-- Definition of standard_tube -->
diff --git a/instrument/D4C_Definition.xml b/instrument/D4C_Definition.xml
new file mode 100644
index 0000000000000000000000000000000000000000..7b7a6dc0435e6608a2b8773f60c6e77169971324
--- /dev/null
+++ b/instrument/D4C_Definition.xml
@@ -0,0 +1,166 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- For help on the notation used to specify an
+    Instrument Definition File see http://www.mantidproject.org/IDF
+    -->
+<instrument xmlns="http://www.mantidproject.org/IDF/1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 Schema/IDFSchema.xsd" name="D4C" valid-from="1900-01-31 23:59:59"
+valid-to="2100-01-31 23:59:59" last-modified="2017-02-27 17:13:17">
+  <!-- Author: vardanyan@ill.fr -->
+  <defaults>
+    <length unit="meter" />
+    <angle unit="degree" />
+    <reference-frame>
+      <!-- The z-axis is set parallel to and in the direction of the beam.
+        the y-axis points up and the coordinate system is right handed. -->
+      <along-beam axis="z" />
+      <pointing-up axis="y" />
+      <handedness val="right" />
+    </reference-frame>
+  </defaults>
+  <!-- Source position -->
+  <component type="monochromator">
+    <location z="-2.61" />
+  </component>
+  <type name="monochromator" is="Source">
+    <properties />
+  </type>
+  <!-- Monitor position -->
+  <component type="monitor" idlist="monitors">
+    <location z="-0.71" name="monitor" />
+  </component>
+  <type name="monitor" is="monitor">
+    <cuboid id="shape">
+      <left-front-bottom-point x="-0.005" y="-0.005" z="-0.005" />
+      <left-front-top-point x="-0.005" y="0.005" z="-0.005" />
+      <left-back-bottom-point x="-0.005" y="-0.005" z="0.005" />
+      <right-front-bottom-point x="0.005" y="-0.005" z="-0.005" />
+    </cuboid>
+    <algebra val="shape" />
+  </type>
+  <idlist idname="monitors">
+    <id val="0" />
+  </idlist>
+  <!-- Sample position -->
+  <component type="sample-position">
+    <location y="0.0" x="0.0" z="0.0" />
+  </component>
+  <type name="sample-position" is="SamplePos" />
+  <!-- Detector IDs -->
+  <idlist idname="detectors">
+    <id start="1" end="576" />
+  </idlist>
+  <!-- Detector list def -->
+  <component type="detector" idlist="detectors">
+    <location name="detector" />
+  </component>
+  <!-- Detector Panels -->
+  <type name="detector">
+    <component type="panel">
+      <location name="panel_1" r="1.146" t="5.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_2" r="1.146" t="20.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_3" r="1.146" t="35.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_4" r="1.146" t="50.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_5" r="1.146" t="65.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_6" r="1.146" t="80.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_7" r="1.146" t="95.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_8" r="1.146" t="110.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_9" r="1.146" t="125.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+    </component>
+  </type>
+  <!-- Standard Panel -->
+  <type name="panel">
+    <component type="cell">
+      <location name="cell_1" x="-0.07875" />
+      <location name="cell_2" x="-0.07625" />
+      <location name="cell_3" x="-0.07375" />
+      <location name="cell_4" x="-0.07125" />
+      <location name="cell_5" x="-0.06875" />
+      <location name="cell_6" x="-0.06625" />
+      <location name="cell_7" x="-0.06375" />
+      <location name="cell_8" x="-0.06125" />
+      <location name="cell_9" x="-0.05875" />
+      <location name="cell_10" x="-0.05625" />
+      <location name="cell_11" x="-0.05375" />
+      <location name="cell_12" x="-0.05125" />
+      <location name="cell_13" x="-0.04875" />
+      <location name="cell_14" x="-0.04625" />
+      <location name="cell_15" x="-0.04375" />
+      <location name="cell_16" x="-0.04125" />
+      <location name="cell_17" x="-0.03875" />
+      <location name="cell_18" x="-0.03625" />
+      <location name="cell_19" x="-0.03375" />
+      <location name="cell_20" x="-0.03125" />
+      <location name="cell_21" x="-0.02875" />
+      <location name="cell_22" x="-0.02625" />
+      <location name="cell_23" x="-0.02375" />
+      <location name="cell_24" x="-0.02125" />
+      <location name="cell_25" x="-0.01875" />
+      <location name="cell_26" x="-0.01625" />
+      <location name="cell_27" x="-0.01375" />
+      <location name="cell_28" x="-0.01125" />
+      <location name="cell_29" x="-0.00875" />
+      <location name="cell_30" x="-0.00625" />
+      <location name="cell_31" x="-0.00375" />
+      <location name="cell_32" x="-0.00125" />
+      <location name="cell_33" x="0.00125" />
+      <location name="cell_34" x="0.00375" />
+      <location name="cell_35" x="0.00625" />
+      <location name="cell_36" x="0.00875" />
+      <location name="cell_37" x="0.01125" />
+      <location name="cell_38" x="0.01375" />
+      <location name="cell_39" x="0.01625" />
+      <location name="cell_40" x="0.01875" />
+      <location name="cell_41" x="0.02125" />
+      <location name="cell_42" x="0.02375" />
+      <location name="cell_43" x="0.02625" />
+      <location name="cell_44" x="0.02875" />
+      <location name="cell_45" x="0.03125" />
+      <location name="cell_46" x="0.03375" />
+      <location name="cell_47" x="0.03625" />
+      <location name="cell_48" x="0.03875" />
+      <location name="cell_49" x="0.04125" />
+      <location name="cell_50" x="0.04375" />
+      <location name="cell_51" x="0.04625" />
+      <location name="cell_52" x="0.04875" />
+      <location name="cell_53" x="0.05125" />
+      <location name="cell_54" x="0.05375" />
+      <location name="cell_55" x="0.05625" />
+      <location name="cell_56" x="0.05875" />
+      <location name="cell_57" x="0.06125" />
+      <location name="cell_58" x="0.06375" />
+      <location name="cell_59" x="0.06625" />
+      <location name="cell_60" x="0.06875" />
+      <location name="cell_61" x="0.07125" />
+      <location name="cell_62" x="0.07375" />
+      <location name="cell_63" x="0.07625" />
+      <location name="cell_64" x="0.07875" />
+    </component>
+  </type>
+  <!-- Standard Cell -->
+  <type is="detector" name="cell">
+    <cuboid id="cell-shape">
+      <left-front-bottom-point x="-0.00125" y="-0.05" z="-0.015" />
+      <left-front-top-point x="-0.00125" y="0.05" z="-0.015" />
+      <left-back-bottom-point x="-0.00125" y="-0.05" z="0.015" />
+      <right-front-bottom-point x="0.00125" y="-0.05" z="-0.015" />
+    </cuboid>
+    <algebra val="cell-shape" />
+  </type>
+</instrument>
diff --git a/instrument/D4C_hr_Definition.xml b/instrument/D4C_hr_Definition.xml
new file mode 100644
index 0000000000000000000000000000000000000000..1639c1f307aa964e4f8ac11cafa72ea596dd583c
--- /dev/null
+++ b/instrument/D4C_hr_Definition.xml
@@ -0,0 +1,230 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- For help on the notation used to specify an
+    Instrument Definition File see http://www.mantidproject.org/IDF
+    -->
+<instrument xmlns="http://www.mantidproject.org/IDF/1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 Schema/IDFSchema.xsd" name="D4C_hr" valid-from="1900-01-31 23:59:59"
+valid-to="2100-01-31 23:59:59" last-modified="2017-02-27 17:13:51">
+  <!-- Author: vardanyan@ill.fr -->
+  <defaults>
+    <length unit="meter" />
+    <angle unit="degree" />
+    <reference-frame>
+      <!-- The z-axis is set parallel to and in the direction of the beam.
+        the y-axis points up and the coordinate system is right handed. -->
+      <along-beam axis="z" />
+      <pointing-up axis="y" />
+      <handedness val="right" />
+    </reference-frame>
+  </defaults>
+  <!-- Source position -->
+  <component type="monochromator">
+    <location z="-2.61" />
+  </component>
+  <type name="monochromator" is="Source">
+    <properties />
+  </type>
+  <!-- Monitor position -->
+  <component type="monitor" idlist="monitors">
+    <location z="-0.71" name="monitor" />
+  </component>
+  <type name="monitor" is="monitor">
+    <cuboid id="shape">
+      <left-front-bottom-point x="-0.005" y="-0.005" z="-0.005" />
+      <left-front-top-point x="-0.005" y="0.005" z="-0.005" />
+      <left-back-bottom-point x="-0.005" y="-0.005" z="0.005" />
+      <right-front-bottom-point x="0.005" y="-0.005" z="-0.005" />
+    </cuboid>
+    <algebra val="shape" />
+  </type>
+  <idlist idname="monitors">
+    <id val="0" />
+  </idlist>
+  <!-- Sample position -->
+  <component type="sample-position">
+    <location y="0.0" x="0.0" z="0.0" />
+  </component>
+  <type name="sample-position" is="SamplePos" />
+  <!-- Detector IDs -->
+  <idlist idname="detectors">
+    <id start="1" end="1152" />
+  </idlist>
+  <!-- Detector list def -->
+  <component type="detector" idlist="detectors">
+    <location name="detector" />
+  </component>
+  <!-- Detector Panels -->
+  <type name="detector">
+    <component type="panel">
+      <location name="panel_1" r="1.146" t="5.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_2" r="1.146" t="20.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_3" r="1.146" t="35.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_4" r="1.146" t="50.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_5" r="1.146" t="65.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_6" r="1.146" t="80.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_7" r="1.146" t="95.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_8" r="1.146" t="110.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+      <location name="panel_9" r="1.146" t="125.5" p="0.0">
+        <facing r="0.0" t="0.0" p="0.0" />
+      </location>
+    </component>
+  </type>
+  <!-- Standard Panel -->
+  <type name="panel">
+    <component type="cell">
+      <location name="cell_1" x="-0.079375" />
+      <location name="cell_2" x="-0.078125" />
+      <location name="cell_3" x="-0.076875" />
+      <location name="cell_4" x="-0.075625" />
+      <location name="cell_5" x="-0.074375" />
+      <location name="cell_6" x="-0.073125" />
+      <location name="cell_7" x="-0.071875" />
+      <location name="cell_8" x="-0.070625" />
+      <location name="cell_9" x="-0.069375" />
+      <location name="cell_10" x="-0.068125" />
+      <location name="cell_11" x="-0.066875" />
+      <location name="cell_12" x="-0.065625" />
+      <location name="cell_13" x="-0.064375" />
+      <location name="cell_14" x="-0.063125" />
+      <location name="cell_15" x="-0.061875" />
+      <location name="cell_16" x="-0.060625" />
+      <location name="cell_17" x="-0.059375" />
+      <location name="cell_18" x="-0.058125" />
+      <location name="cell_19" x="-0.056875" />
+      <location name="cell_20" x="-0.055625" />
+      <location name="cell_21" x="-0.054375" />
+      <location name="cell_22" x="-0.053125" />
+      <location name="cell_23" x="-0.051875" />
+      <location name="cell_24" x="-0.050625" />
+      <location name="cell_25" x="-0.049375" />
+      <location name="cell_26" x="-0.048125" />
+      <location name="cell_27" x="-0.046875" />
+      <location name="cell_28" x="-0.045625" />
+      <location name="cell_29" x="-0.044375" />
+      <location name="cell_30" x="-0.043125" />
+      <location name="cell_31" x="-0.041875" />
+      <location name="cell_32" x="-0.040625" />
+      <location name="cell_33" x="-0.039375" />
+      <location name="cell_34" x="-0.038125" />
+      <location name="cell_35" x="-0.036875" />
+      <location name="cell_36" x="-0.035625" />
+      <location name="cell_37" x="-0.034375" />
+      <location name="cell_38" x="-0.033125" />
+      <location name="cell_39" x="-0.031875" />
+      <location name="cell_40" x="-0.030625" />
+      <location name="cell_41" x="-0.029375" />
+      <location name="cell_42" x="-0.028125" />
+      <location name="cell_43" x="-0.026875" />
+      <location name="cell_44" x="-0.025625" />
+      <location name="cell_45" x="-0.024375" />
+      <location name="cell_46" x="-0.023125" />
+      <location name="cell_47" x="-0.021875" />
+      <location name="cell_48" x="-0.020625" />
+      <location name="cell_49" x="-0.019375" />
+      <location name="cell_50" x="-0.018125" />
+      <location name="cell_51" x="-0.016875" />
+      <location name="cell_52" x="-0.015625" />
+      <location name="cell_53" x="-0.014375" />
+      <location name="cell_54" x="-0.013125" />
+      <location name="cell_55" x="-0.011875" />
+      <location name="cell_56" x="-0.010625" />
+      <location name="cell_57" x="-0.009375" />
+      <location name="cell_58" x="-0.008125" />
+      <location name="cell_59" x="-0.006875" />
+      <location name="cell_60" x="-0.005625" />
+      <location name="cell_61" x="-0.004375" />
+      <location name="cell_62" x="-0.003125" />
+      <location name="cell_63" x="-0.001875" />
+      <location name="cell_64" x="-0.000625" />
+      <location name="cell_65" x="0.000625" />
+      <location name="cell_66" x="0.001875" />
+      <location name="cell_67" x="0.003125" />
+      <location name="cell_68" x="0.004375" />
+      <location name="cell_69" x="0.005625" />
+      <location name="cell_70" x="0.006875" />
+      <location name="cell_71" x="0.008125" />
+      <location name="cell_72" x="0.009375" />
+      <location name="cell_73" x="0.010625" />
+      <location name="cell_74" x="0.011875" />
+      <location name="cell_75" x="0.013125" />
+      <location name="cell_76" x="0.014375" />
+      <location name="cell_77" x="0.015625" />
+      <location name="cell_78" x="0.016875" />
+      <location name="cell_79" x="0.018125" />
+      <location name="cell_80" x="0.019375" />
+      <location name="cell_81" x="0.020625" />
+      <location name="cell_82" x="0.021875" />
+      <location name="cell_83" x="0.023125" />
+      <location name="cell_84" x="0.024375" />
+      <location name="cell_85" x="0.025625" />
+      <location name="cell_86" x="0.026875" />
+      <location name="cell_87" x="0.028125" />
+      <location name="cell_88" x="0.029375" />
+      <location name="cell_89" x="0.030625" />
+      <location name="cell_90" x="0.031875" />
+      <location name="cell_91" x="0.033125" />
+      <location name="cell_92" x="0.034375" />
+      <location name="cell_93" x="0.035625" />
+      <location name="cell_94" x="0.036875" />
+      <location name="cell_95" x="0.038125" />
+      <location name="cell_96" x="0.039375" />
+      <location name="cell_97" x="0.040625" />
+      <location name="cell_98" x="0.041875" />
+      <location name="cell_99" x="0.043125" />
+      <location name="cell_100" x="0.044375" />
+      <location name="cell_101" x="0.045625" />
+      <location name="cell_102" x="0.046875" />
+      <location name="cell_103" x="0.048125" />
+      <location name="cell_104" x="0.049375" />
+      <location name="cell_105" x="0.050625" />
+      <location name="cell_106" x="0.051875" />
+      <location name="cell_107" x="0.053125" />
+      <location name="cell_108" x="0.054375" />
+      <location name="cell_109" x="0.055625" />
+      <location name="cell_110" x="0.056875" />
+      <location name="cell_111" x="0.058125" />
+      <location name="cell_112" x="0.059375" />
+      <location name="cell_113" x="0.060625" />
+      <location name="cell_114" x="0.061875" />
+      <location name="cell_115" x="0.063125" />
+      <location name="cell_116" x="0.064375" />
+      <location name="cell_117" x="0.065625" />
+      <location name="cell_118" x="0.066875" />
+      <location name="cell_119" x="0.068125" />
+      <location name="cell_120" x="0.069375" />
+      <location name="cell_121" x="0.070625" />
+      <location name="cell_122" x="0.071875" />
+      <location name="cell_123" x="0.073125" />
+      <location name="cell_124" x="0.074375" />
+      <location name="cell_125" x="0.075625" />
+      <location name="cell_126" x="0.076875" />
+      <location name="cell_127" x="0.078125" />
+      <location name="cell_128" x="0.079375" />
+    </component>
+  </type>
+  <!-- Standard Cell -->
+  <type is="detector" name="cell">
+    <cuboid id="cell-shape">
+      <left-front-bottom-point x="-0.000625" y="-0.05" z="-0.015" />
+      <left-front-top-point x="-0.000625" y="0.05" z="-0.015" />
+      <left-back-bottom-point x="-0.000625" y="-0.05" z="0.015" />
+      <right-front-bottom-point x="0.000625" y="-0.05" z="-0.015" />
+    </cuboid>
+    <algebra val="cell-shape" />
+  </type>
+</instrument>
diff --git a/instrument/DNS_Definition_PAonly.xml b/instrument/DNS_Definition_PAonly.xml
index 15fab5107e83e6003b9de06370116d574f8369eb..3cfa615ef75bf0c443ef327bb522c2c331344978 100644
--- a/instrument/DNS_Definition_PAonly.xml
+++ b/instrument/DNS_Definition_PAonly.xml
@@ -16,9 +16,9 @@
       <handedness val="right" />
     </reference-frame>
   </defaults>
-  <!-- moderator -->
+  <!-- chopper -->
   <component type="moderator">
-    <location z="-2.27" />
+    <location z="-0.36325" />
   </component>
   <type name="moderator" is="Source"></type>
   <!-- monitor -->
diff --git a/instrument/DNS_Parameters.xml b/instrument/DNS_Parameters.xml
new file mode 100644
index 0000000000000000000000000000000000000000..f5db344a2b75f0999fd5b13ae306c86f69058a23
--- /dev/null
+++ b/instrument/DNS_Parameters.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<parameter-file instrument="DNS" valid-from="2013-10-01T00:00:00">
+
+        <component-link name="DNS">
+
+		<parameter name="deltaE-mode" type="string">
+			<value val="direct" />
+		</parameter>
+
+                <!-- Coil currents Ca, Cb, Cc, Cz to determine the neutron polarisation -->
+                <parameter name="x_currents" type="string">
+                        <value val="0,-2,-0.77,-2.21; -0.5,-1.5,-1.2,-2.15" />
+		</parameter>
+                <parameter name="y_currents" type="string">
+                        <value val="0,1.60,-2.77,-2.21; 0,-1.4,1.65,-2.15" />
+                </parameter>
+                <parameter name="z_currents" type="string">
+                        <value val="0,0.11,-0.5,0; 0,0.15,-0.5,0" />
+                </parameter>
+
+                <!-- Scaling factor to calculate the channel width in microseconds -->
+                <parameter name="channel_width_factor" type="string">
+                        <value val="20.0" />
+                </parameter>
+
+                <!-- 2theta tolerance, degrees -->
+                <parameter name="two_theta_tolerance" type="string">
+                        <value val="0.1" />
+                </parameter>
+
+                <!-- file suffixes to lookup for the standard data -->
+                <!-- Vanadium -->
+                <parameter name="vana" type="string">
+                        <value val="vana"/>
+                </parameter>
+                <!-- NiCr -->
+                <parameter name="nicr" type="string">
+                        <value val="nicr"/>
+                </parameter>
+                <!-- Instrument background -->
+                <parameter name="bkg" type="string">
+                        <value val="leer"/>
+                </parameter>
+
+                <!-- Normalization workspace name suffix -->
+                <parameter name="normws_suffix" type="string">
+                        <value val="_n"/>
+                </parameter>
+
+                <!-- formula for Monitor efficiency calculation. Algorithm: MonitorEfficiencyCorUser  -->
+                <parameter name="formula_mon_eff" type="string">
+                        <value val="sqrt(e/25.3)" />
+                </parameter>
+
+                <!-- Distance [m] between sample and equatorial line of the detector. Mandatory
+                        if you want to correct the flight paths. -->
+                <parameter name="l2" type="string">
+                        <value val="0.80" />
+                </parameter>
+
+	</component-link>
+
+</parameter-file>
diff --git a/instrument/EXED_Definition.xml b/instrument/EXED_Definition.xml
index d8ac3a8b8fb9ace1b8a329b832f08273402dd91d..127c8baca996647af0ce2af6ec0259c5e31e32aa 100644
--- a/instrument/EXED_Definition.xml
+++ b/instrument/EXED_Definition.xml
@@ -22,7 +22,7 @@ last-modified="2017-04-21 14:58:37">
 </defaults>
 
 <!-- Detector components -->
-<component type="monitors" idlist="monitors"><location/></component>
+<!--<component type="monitors" idlist="monitors"><location/></component>-->
 <component type="panel02" idlist="panel02"><location y="-0.447"/></component>
 <component type="panel04" idlist="panel04"><location y="-0.4466"/></component>
 <component type="Tank">
diff --git a/instrument/Facilities.xml b/instrument/Facilities.xml
index 2c91f3b505bec6d9f3e5d3b68fcf4991c854f194..cb3a2138fb60cd9e2da871d991b776db0c2cb8c0 100644
--- a/instrument/Facilities.xml
+++ b/instrument/Facilities.xml
@@ -631,6 +631,29 @@
     <technique>Powder diffraction</technique>
   </instrument>
 
+  <instrument name="D20_lr">
+    <technique>Powder diffraction</technique>
+  </instrument>
+
+  <instrument name="D20">
+    <technique>Powder diffraction</technique>
+  </instrument>
+
+  <instrument name="D20_hr">
+    <technique>Powder diffraction</technique>
+  </instrument>
+
+  <instrument name="D1B">
+    <technique>Powder diffraction</technique>
+  </instrument>
+
+  <instrument name="D4">
+    <technique>Liquid diffraction</technique>
+  </instrument>
+
+  <instrument name="D4_hr">
+    <technique>Liquid diffraction</technique>
+  </instrument>
 
   <instrument name="D17">
     <technique>Reflectometry</technique>
diff --git a/instrument/IN16_Definition.xml b/instrument/IN16_Definition.xml
index 683e89cfa5591f687757d038d5e82a50a670a6c6..f01387c9644b6fb6605e46a18694bf64ba7a90b8 100644
--- a/instrument/IN16_Definition.xml
+++ b/instrument/IN16_Definition.xml
@@ -66,7 +66,7 @@
   </type>
 
   <idlist idname="monitor1">
-    <id val="19" />
+    <id val="29" />
   </idlist>
 
 <!--  detector components -->
diff --git a/instrument/POWGEN_Definition_2015-08-01.xml b/instrument/POWGEN_Definition_2015-08-01.xml
index dd1f381fae3e52584759a086c3c91c2f5014e51e..e038eb017d63cd84b1f71206fac860961406c547 100644
--- a/instrument/POWGEN_Definition_2015-08-01.xml
+++ b/instrument/POWGEN_Definition_2015-08-01.xml
@@ -1,5 +1,5 @@
 <?xml version='1.0' encoding='ASCII'?>
-<instrument xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.mantidproject.org/IDF/1.0" last-modified="2015-08-13 17:09:34.584106" name="PG3" valid-from="2015-08-01 00:00:01" valid-to="2100-01-31 23:59:59" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 http://schema.mantidproject.org/IDF/1.0/IDFSchema.xsd">
+<instrument xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.mantidproject.org/IDF/1.0" last-modified="2015-08-13 17:09:34.584106" name="PG3" valid-from="2015-08-01 00:00:01" valid-to="2017-04-30 23:59:59" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 http://schema.mantidproject.org/IDF/1.0/IDFSchema.xsd">
   <!--Created by Stuart Campbell, Vickie Lynch, Peter Peterson, Janik Zikovsky-->
   <!--DEFAULTS-->
   <defaults>
diff --git a/instrument/POWGEN_Definition_2017-05-01.xml b/instrument/POWGEN_Definition_2017-05-01.xml
new file mode 100644
index 0000000000000000000000000000000000000000..476cfd42fe61f30b5ffecc3afa6f08e7e7a23d1f
--- /dev/null
+++ b/instrument/POWGEN_Definition_2017-05-01.xml
@@ -0,0 +1,482 @@
+<?xml version='1.0' encoding='ASCII'?>
+<instrument xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.mantidproject.org/IDF/1.0" last-modified="2017-05-15 10:10:19.633681" name="PG3" valid-from="2017-05-01 00:00:01" valid-to="2100-01-31 23:59:59" xsi:schemaLocation="http://www.mantidproject.org/IDF/1.0 http://schema.mantidproject.org/IDF/1.0/IDFSchema.xsd">
+  <!--Created by Peter Peterson, Stuart Campbell, Vickie Lynch, Janik Zikovsky-->
+  <!--DEFAULTS-->
+  <defaults>
+    <length unit="metre"/>
+    <angle unit="degree"/>
+    <reference-frame>
+      <along-beam axis="z"/>
+      <pointing-up axis="y"/>
+      <handedness val="right"/>
+    </reference-frame>
+  </defaults>
+  <!--SOURCE-->
+  <component type="moderator">
+    <location z="-60.0"/>
+  </component>
+  <type is="Source" name="moderator"/>
+  <!--SAMPLE-->
+  <component type="sample-position">
+    <location x="0.0" y="0.0" z="0.0"/>
+  </component>
+  <type is="SamplePos" name="sample-position"/>
+  <!--MONITORS-->
+  <component idlist="monitors" type="monitors">
+    <location/>
+  </component>
+  <type name="monitors">
+    <component type="monitor">
+      <location name="monitor1" z="-1.5077"/>
+    </component>
+  </type>
+  <component type="SA">
+    <location/>
+  </component>
+  <component type="SB">
+    <location/>
+  </component>
+  <component type="SC">
+    <location/>
+  </component>
+  <component type="SD">
+    <location/>
+  </component>
+  <component type="SE">
+    <location/>
+  </component>
+  <component type="SF">
+    <location/>
+  </component>
+  <component type="SG">
+    <location/>
+  </component>
+  <component type="SH">
+    <location/>
+  </component>
+  <component type="SI">
+    <location/>
+  </component>
+  <component type="SJ">
+    <location/>
+  </component>
+  <component type="SK">
+    <location/>
+  </component>
+  <component type="SL">
+    <location/>
+  </component>
+  <type name="SA">
+    <component idfillbyfirst="y" idstart="0" idstepbyrow="7" type="panel_v2">
+      <location name="bank1" x="-0.73925475" y="-0.82071225" z="-1.996748">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-95.7836082769">
+          <rot val="-0.214446930188">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-260.481108323"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="15000" idstepbyrow="7" type="panel_v2">
+      <location name="bank2" x="-0.7395815" y="-0.4102375" z="-1.997611">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-248.092344247">
+          <rot val="-0.159140805275">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-108.22211052"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="30000" idstepbyrow="7" type="panel_v2">
+      <location name="bank3" x="-0.7384485" y="-0.00334675" z="-1.995525">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-21.9845356053">
+          <rot val="-0.20174549551">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-334.102436652"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="45000" idstepbyrow="7" type="panel_v2">
+      <location name="bank4" x="-0.7393245" y="0.40585725" z="-1.996414">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-90.0">
+          <rot val="-0.076965977547">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-266.027400928"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="60000" idstepbyrow="7" type="panel_v2">
+      <location name="bank5" x="-0.74006325" y="0.81578475" z="-1.99702175">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="0.0">
+          <rot val="-0.0102009129589">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-355.999468485"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SB">
+    <component idfillbyfirst="y" idstart="75000" idstepbyrow="7" type="panel_v2">
+      <location name="bank6" x="-1.4947395" y="-0.821244" z="-1.81559025">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-14.227431466">
+          <rot val="-0.115682526238">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-322.353813357"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="90000" idstepbyrow="7" type="panel_v2">
+      <location name="bank7" x="-1.4981475" y="-0.41126725" z="-1.813485">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-8.47357850501">
+          <rot val="-0.0715310382983">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-328.450171259"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="105000" idstepbyrow="7" type="panel_v2">
+      <location name="bank8" x="-1.49673875" y="-0.0024805" z="-1.81505625">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="0.0">
+          <rot val="-0.00975526506503">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-336.887798804"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="120000" idstepbyrow="7" type="panel_v2">
+      <location name="bank9" x="-1.49670575" y="0.40656575" z="-1.81423725">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-239.560642886">
+          <rot val="-0.200624965876">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-97.2292744758"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="135000" idstepbyrow="7" type="panel_v2">
+      <location name="bank10" x="-1.497145" y="0.81472025" z="-1.81433725">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-33.3719292385">
+          <rot val="-0.0758077553307">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-303.627647711"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SC">
+    <component idfillbyfirst="y" idstart="150000" idstepbyrow="7" type="panel_v2">
+      <location name="bank11" x="-2.160215" y="-0.411611" z="-1.39893575">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-110.47227952">
+          <rot val="-0.0289085423667">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-208.935220509"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="165000" idstepbyrow="7" type="panel_v2">
+      <location name="bank12" x="-2.162071" y="-0.00275275" z="-1.3995505">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-55.0160252665">
+          <rot val="-0.168691015004">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-264.328446742"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="180000" idstepbyrow="7" type="panel_v2">
+      <location name="bank13" x="-2.161607" y="0.40649425" z="-1.399101">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="0.0">
+          <rot val="-0.0503837129317">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-319.253875843"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SD">
+    <component idfillbyfirst="y" idstart="195000" idstepbyrow="7" type="panel_v2">
+      <location name="bank14" x="-2.6752895" y="-0.4119855" z="-0.80590125">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-7.59361175567">
+          <rot val="-0.265635069724">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-295.294310681"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="210000" idstepbyrow="7" type="panel_v2">
+      <location name="bank15" x="-2.67603825" y="-0.003183" z="-0.8076035">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-259.49647485">
+          <rot val="-0.0876965659647">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-43.2252787227"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="225000" idstepbyrow="7" type="panel_v2">
+      <location name="bank16" x="-2.67614475" y="0.4069395" z="-0.80604575">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-237.435549147">
+          <rot val="-0.272886676774">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-65.6425485041"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SE">
+    <component idfillbyfirst="y" idstart="240000" idstepbyrow="7" type="panel_v2">
+      <location name="bank17" x="-3.012676" y="-0.4117715" z="-0.09640275">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-328.98611594">
+          <rot val="-0.0991226590352">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-318.978598301"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="255000" idstepbyrow="7" type="panel_v2">
+      <location name="bank18" x="-3.01414" y="-0.0016465" z="-0.09651475">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-237.052270308">
+          <rot val="-0.0433751366478">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-51.0685515408"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="270000" idstepbyrow="7" type="panel_v2">
+      <location name="bank19" x="-3.01392675" y="0.406411" z="-0.09860525">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-324.491815572">
+          <rot val="-0.0982329908629">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-323.50660542"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SF">
+    <component idfillbyfirst="y" idstart="285000" idstepbyrow="7" type="panel_v2">
+      <location name="bank20" x="-3.162008" y="-0.411905" z="0.6771515">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-292.159006796">
+          <rot val="-0.038715242253">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-342.050092207"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="300000" idstepbyrow="7" type="panel_v2">
+      <location name="bank21" x="-3.162478" y="-0.002053" z="0.67561275">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-297.356379027">
+          <rot val="-0.0587913590109">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-336.692870716"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="315000" idstepbyrow="7" type="panel_v2">
+      <location name="bank22" x="-3.16293425" y="0.404794" z="0.675672">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-16.8583987678">
+          <rot val="-0.331991511595">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-257.169392039"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SG">
+    <component idfillbyfirst="y" idstart="330000" idstepbyrow="7" type="panel_v2">
+      <location name="bank23" x="-3.12830675" y="-0.411247" z="1.4622695">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-324.010543034">
+          <rot val="-0.225101818311">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-297.067790035"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="345000" idstepbyrow="7" type="panel_v2">
+      <location name="bank24" x="-3.12870025" y="-0.00212225" z="1.46520975">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-292.667406129">
+          <rot val="-0.0738162753662">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-328.59938431"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="360000" idstepbyrow="7" type="panel_v2">
+      <location name="bank25" x="-3.131831" y="0.40695875" z="1.46199975">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-140.376932458">
+          <rot val="-0.151527336503">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-120.75562437"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SH">
+    <component idfillbyfirst="y" idstart="375000" idstepbyrow="7" type="panel_v2">
+      <location name="bank26" x="-2.92700025" y="-0.412328" z="2.22329025">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-297.195089337">
+          <rot val="-0.185854196496">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-311.655187146"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="390000" idstepbyrow="7" type="panel_v2">
+      <location name="bank27" x="-2.92640375" y="-0.00424125" z="2.2249635">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-328.302263179">
+          <rot val="-0.421937944041">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-280.535174873"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="405000" idstepbyrow="7" type="panel_v2">
+      <location name="bank28" x="-2.92493925" y="0.4054455" z="2.22520275">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-342.221194876">
+          <rot val="-0.0878132050408">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-266.749077233"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SI">
+    <component idfillbyfirst="y" idstart="420000" idstepbyrow="7" type="panel_v2">
+      <location name="bank29" x="-2.5654625" y="-0.41229675" z="2.92516325">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-343.809212516">
+          <rot val="-0.0812208993013">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-253.562313467"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="435000" idstepbyrow="7" type="panel_v2">
+      <location name="bank30" x="-2.56693825" y="-0.002701" z="2.926053">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-320.833790053">
+          <rot val="-0.0639589613408">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-276.439485343"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="450000" idstepbyrow="7" type="panel_v2">
+      <location name="bank31" x="-2.56714575" y="0.40589375" z="2.92551425">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-319.018079143">
+          <rot val="-0.315217152334">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-278.264953374"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SJ">
+    <component idfillbyfirst="y" idstart="465000" idstepbyrow="7" type="panel_v2">
+      <location name="bank32" x="-2.08030575" y="-0.41216925" z="3.5484435">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-299.755580123">
+          <rot val="-0.144124370034">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-286.443905065"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="480000" idstepbyrow="7" type="panel_v2">
+      <location name="bank33" x="-2.0809715" y="-0.0024505" z="3.5501205">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-275.57520057">
+          <rot val="-0.13393852438">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-310.920820082"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="495000" idstepbyrow="7" type="panel_v2">
+      <location name="bank34" x="-2.07957875" y="0.406084" z="3.549173">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-337.740895143">
+          <rot val="-0.120942525992">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-248.445057948"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SK">
+    <component idfillbyfirst="y" idstart="510000" idstepbyrow="7" type="panel_v2">
+      <location name="bank35" x="-1.48729325" y="-0.4119115" z="4.0699365">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-285.609034597">
+          <rot val="-0.12013017802">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-290.494131053"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="525000" idstepbyrow="7" type="panel_v2">
+      <location name="bank36" x="-1.4875845" y="-0.003188" z="4.07093">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-197.709724524">
+          <rot val="-0.162979832664">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-18.5161133513"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="540000" idstepbyrow="7" type="panel_v2">
+      <location name="bank37" x="-1.4869935" y="0.40566575" z="4.069813">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-322.393046384">
+          <rot val="-0.0922239694629">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-253.924817426"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <type name="SL">
+    <component idfillbyfirst="y" idstart="555000" idstepbyrow="7" type="panel_v2">
+      <location name="bank38" x="-0.81221125" y="-0.41096475" z="4.47902375">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-319.242574796">
+          <rot val="-0.210570577141">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-247.092689865"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="570000" idstepbyrow="7" type="panel_v2">
+      <location name="bank39" x="-0.8139335" y="-0.00202475" z="4.4808475">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-253.084239916">
+          <rot val="-0.0586673095562">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-313.160124121"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+    <component idfillbyfirst="y" idstart="585000" idstepbyrow="7" type="panel_v2">
+      <location name="bank40" x="-0.809226" y="0.40591675" z="4.47886225">
+        <rot axis-x="0" axis-y="1" axis-z="0" val="-278.169933832">
+          <rot val="-0.161734568834">
+            <rot axis-x="0" axis-y="1" axis-z="0" val="-288.322832013"/>
+          </rot>
+        </rot>
+      </location>
+    </component>
+  </type>
+  <!-- Version 2 Detector Panel (7x154)-->
+  <type is="rectangular_detector" name="panel_v2" type="pixel_v2" xpixels="154" xstart="-0.3825" xstep="0.005" ypixels="7" ystart="-0.1629" ystep="0.0543">
+    <properties/>
+  </type>
+  <!-- Shape for Monitors-->
+  <!-- TODO: Update to real shape -->
+  <type is="monitor" name="monitor">
+    <cylinder id="cyl-approx">
+      <centre-of-bottom-base p="0.0" r="0.0" t="0.0"/>
+      <axis x="0.0" y="0.0" z="1.0"/>
+      <radius val="0.01"/>
+      <height val="0.03"/>
+    </cylinder>
+    <algebra val="cyl-approx"/>
+  </type>
+  <!-- Pixel for Version 2 Detectors (7x154)-->
+  <type is="detector" name="pixel_v2">
+    <cuboid id="pixel-shape">
+      <left-front-bottom-point x="-0.0025" y="-0.02715" z="0.0"/>
+      <left-front-top-point x="-0.0025" y="0.02715" z="0.0"/>
+      <left-back-bottom-point x="-0.0025" y="-0.02715" z="-0.0001"/>
+      <right-front-bottom-point x="0.0025" y="-0.02715" z="0.0"/>
+    </cuboid>
+    <algebra val="pixel-shape"/>
+  </type>
+  <!--MONITOR IDs-->
+  <idlist idname="monitors">
+    <id val="-1"/>
+  </idlist>
+</instrument>
diff --git a/instrument/WISH_Parameters.xml b/instrument/WISH_Parameters.xml
index 09c45dbaae21347dca31254a89122318df3a64ca..23d27cd0023c0120b8686697d6a6cb704ec3eb81 100644
--- a/instrument/WISH_Parameters.xml
+++ b/instrument/WISH_Parameters.xml
@@ -3,20 +3,25 @@
     
     <component-link name="WISH">
     
+        <!-- Specify the gap between the tubes for Peak::findDetector -->
+        <parameter name="tube-gap">
+            <value val="0.00017"/>
+        </parameter>
+
         <!-- SplitInto defaults for MDWorkspaces -->
-		<parameter name="SplitInto">
-		  <value val="2"/>
-		</parameter>
-		
-		<!-- SplitThreshold defaults for MDWorkspaces -->
-		<parameter name="SplitThreshold">
-		  <value val="30"/>
-		</parameter>
-		
-		 <!-- MaxRecursionDepth defaults for MDWorkspaces -->
-		<parameter name="MaxRecursionDepth">
-		  <value val="20"/>
-		</parameter>
+        <parameter name="SplitInto">
+            <value val="2"/>
+        </parameter>
+
+        <!-- SplitThreshold defaults for MDWorkspaces -->
+        <parameter name="SplitThreshold">
+            <value val="30"/>
+        </parameter>
+
+        <!-- MaxRecursionDepth defaults for MDWorkspaces -->
+        <parameter name="MaxRecursionDepth">
+            <value val="20"/>
+        </parameter>
         
          <!-- Offset the psi values in the mini-plot -->
          <parameter name="offset-phi" type="string">
diff --git a/scripts/Diffraction/isis_powder/__init__.py b/scripts/Diffraction/isis_powder/__init__.py
index 4da865423bad98c965c48a68fc61160b71fae5ee..4b03af1a87c11109b7dfce786e24b2d793cf9658 100644
--- a/scripts/Diffraction/isis_powder/__init__.py
+++ b/scripts/Diffraction/isis_powder/__init__.py
@@ -1,12 +1,13 @@
 from __future__ import (absolute_import, division, print_function)
 
+# Disable unused import warnings. The import is for user convenience
 # Bring instruments into package namespace
-from .gem import Gem
-from .pearl import Pearl
-from .polaris import Polaris
+from .gem import Gem  # noqa: F401
+from .pearl import Pearl  # noqa: F401
+from .polaris import Polaris  # noqa: F401
 
 # Other useful classes
-from .routines.sample_details import SampleDetails
+from .routines.sample_details import SampleDetails  # noqa: F401
 
 # Prevent users using from import *
 __all__ = []
diff --git a/scripts/Diffraction/isis_powder/abstract_inst.py b/scripts/Diffraction/isis_powder/abstract_inst.py
index ad60cb2b4e321c49fc69750fb11e0ebf5762e360..10d8554fa7aeff2fa850c2fa2ced93da99d21a8d 100644
--- a/scripts/Diffraction/isis_powder/abstract_inst.py
+++ b/scripts/Diffraction/isis_powder/abstract_inst.py
@@ -160,6 +160,15 @@ class AbstractInst(object):
         """
         return None
 
+    def _get_instrument_bin_widths(self):
+        """
+        Returns the bin widths to rebin the focused workspace to. If
+        the instrument does not want this step a value of None should
+        not rebin the workspace
+        :return: List of bin widths or None if no rebinning should take place
+        """
+        return None
+
     def _generate_auto_vanadium_calibration(self, run_details):
         """
         Used by focus if a vanadium spline was not found to automatically generate said spline if the instrument
@@ -218,6 +227,13 @@ class AbstractInst(object):
 
     # Steps applicable to all instruments
 
+    @staticmethod
+    def _generate_run_details_fingerprint(*args):
+        out_key = ""
+        for arg in args:
+            out_key += str(arg)
+        return out_key
+
     def _generate_out_file_paths(self, run_details):
         """
         Generates the various output paths and file names to be used during saving or as workspace names
diff --git a/scripts/Diffraction/isis_powder/gem.py b/scripts/Diffraction/isis_powder/gem.py
index 43efd9f7c4d69e0d9f9f08fb4466fd2f4463f6c6..129ba712953ed2f68291479b0ede80ebbf158f35 100644
--- a/scripts/Diffraction/isis_powder/gem.py
+++ b/scripts/Diffraction/isis_powder/gem.py
@@ -2,7 +2,7 @@ from __future__ import (absolute_import, division, print_function)
 
 from isis_powder.abstract_inst import AbstractInst
 from isis_powder.gem_routines import gem_advanced_config, gem_algs, gem_param_mapping
-from isis_powder.routines import absorb_corrections, common, instrument_settings, sample_details
+from isis_powder.routines import absorb_corrections, common, instrument_settings
 
 
 class Gem(AbstractInst):
@@ -43,7 +43,8 @@ class Gem(AbstractInst):
     # Private methods
 
     def _get_run_details(self, run_number_string):
-        run_number_string_key = str(run_number_string) + str(self._inst_settings.file_extension)
+        run_number_string_key = self._generate_run_details_fingerprint(run_number_string,
+                                                                       self._inst_settings.file_extension)
         if run_number_string_key in self._cached_run_details:
             return self._cached_run_details[run_number_string_key]
 
diff --git a/scripts/Diffraction/isis_powder/pearl.py b/scripts/Diffraction/isis_powder/pearl.py
index 03570e5e1c560de1fa649b5ceb502e42498b9e99..a40d4fbf1a7c2be3c97e3acf49e306ea59d7165b 100644
--- a/scripts/Diffraction/isis_powder/pearl.py
+++ b/scripts/Diffraction/isis_powder/pearl.py
@@ -47,7 +47,8 @@ class Pearl(AbstractInst):
                                      do_absorb_corrections=self._inst_settings.absorb_corrections)
 
     def _get_run_details(self, run_number_string):
-        run_number_string_key = str(run_number_string) + str(self._inst_settings.file_extension)
+        run_number_string_key = self._generate_run_details_fingerprint(run_number_string,
+                                                                       self._inst_settings.file_extension)
         if run_number_string_key in self._cached_run_details:
             return self._cached_run_details[run_number_string_key]
 
diff --git a/scripts/Diffraction/isis_powder/polaris.py b/scripts/Diffraction/isis_powder/polaris.py
index e9b7244250634e3e7466bba526dfb99da26ee0f3..445356d577203af9084f1b3c881d87949d8edd20 100644
--- a/scripts/Diffraction/isis_powder/polaris.py
+++ b/scripts/Diffraction/isis_powder/polaris.py
@@ -2,7 +2,7 @@ from __future__ import (absolute_import, division, print_function)
 
 import os
 
-from isis_powder.routines import absorb_corrections, common, instrument_settings, sample_details
+from isis_powder.routines import absorb_corrections, common, instrument_settings
 from isis_powder.abstract_inst import AbstractInst
 from isis_powder.polaris_routines import polaris_advanced_config, polaris_algs, polaris_param_mapping
 
@@ -102,8 +102,14 @@ class Polaris(AbstractInst):
     def _get_input_batching_mode(self):
         return self._inst_settings.input_mode
 
+
+    def _get_instrument_bin_widths(self):
+        return self._inst_settings.focused_bin_widths
+
     def _get_run_details(self, run_number_string):
-        run_number_string_key = str(run_number_string) + str(self._inst_settings.file_extension)
+        run_number_string_key = self._generate_run_details_fingerprint(run_number_string,
+                                                                       self._inst_settings.file_extension)
+
         if run_number_string_key in self._run_details_cached_obj:
             return self._run_details_cached_obj[run_number_string_key]
 
diff --git a/scripts/Diffraction/isis_powder/polaris_routines/polaris_advanced_config.py b/scripts/Diffraction/isis_powder/polaris_routines/polaris_advanced_config.py
index a083c3580632377bbfd64b1a677c11af91031183..3d30c280b7e84d0dc43ae78b109ebfa2d90e702a 100644
--- a/scripts/Diffraction/isis_powder/polaris_routines/polaris_advanced_config.py
+++ b/scripts/Diffraction/isis_powder/polaris_routines/polaris_advanced_config.py
@@ -30,6 +30,16 @@ focused_cropping_values = [
     (1500, 19900),  # Bank 5
     ]
 
+focused_bin_widths = [
+    # Note you want these to be negative for logarithmic (dt / t) binning
+    # else the output file will be larger than 1GB
+    -0.0050,  # Bank 1
+    -0.0010,  # Bank 2
+    -0.0010,  # Bank 3
+    -0.0010,  # Bank 4
+    -0.0005,  # Bank 5
+]
+
 vanadium_cropping_values = [
     (800, 19995),  # Bank 1
     (800, 19995),  # Bank 2
@@ -67,5 +77,6 @@ variables = {
     "file_names_dict": file_names,
     "script_params": script_params,
     "focused_cropping_values": focused_cropping_values,
-    "vanadium_cropping_values": vanadium_cropping_values
+    "vanadium_cropping_values": vanadium_cropping_values,
+    "focused_bin_widths": focused_bin_widths,
 }
diff --git a/scripts/Diffraction/isis_powder/polaris_routines/polaris_param_mapping.py b/scripts/Diffraction/isis_powder/polaris_routines/polaris_param_mapping.py
index 7a74533c7223f2d088b7fd4ef38b91f0384a2fa0..829a95b2885a72a895bc51994040e6f712a6b188 100644
--- a/scripts/Diffraction/isis_powder/polaris_routines/polaris_param_mapping.py
+++ b/scripts/Diffraction/isis_powder/polaris_routines/polaris_param_mapping.py
@@ -15,6 +15,7 @@ attr_mapping = \
      ParamMapEntry(ext_name="file_ext",                 int_name="file_extension", optional=True),
      ParamMapEntry(ext_name="first_cycle_run_no",       int_name="run_in_range"),
      ParamMapEntry(ext_name="focused_cropping_values",  int_name="focused_cropping_values"),
+     ParamMapEntry(ext_name="focused_bin_widths",       int_name="focused_bin_widths"),
      ParamMapEntry(ext_name="grouping_file_name",       int_name="grouping_file_name"),
      ParamMapEntry(ext_name="input_mode",               int_name="input_mode", enum_class=INPUT_BATCHING),
      ParamMapEntry(ext_name="masking_file_name",        int_name="masking_file_name"),
diff --git a/scripts/Diffraction/isis_powder/routines/__init__.py b/scripts/Diffraction/isis_powder/routines/__init__.py
index cf4ecb43ce688d78d138c0b4678a46a2b1d7ac5b..1f2d37b68e539945bc3e930660c0e4c48606d45c 100644
--- a/scripts/Diffraction/isis_powder/routines/__init__.py
+++ b/scripts/Diffraction/isis_powder/routines/__init__.py
@@ -1,4 +1,6 @@
 from __future__ import (absolute_import, division, print_function)
 
+# Disable unused import warnings. The import is for user convenience
+
 # Currently these are the things from routines we want to expose to a public API
-from .sample_details import SampleDetails
\ No newline at end of file
+from .sample_details import SampleDetails  # noqa: F401
diff --git a/scripts/Diffraction/isis_powder/routines/common.py b/scripts/Diffraction/isis_powder/routines/common.py
index 921b311581a15272938c686541acb508ac87d0e3..e567cbcbef047b32f2ef948642539fd765e680d0 100644
--- a/scripts/Diffraction/isis_powder/routines/common.py
+++ b/scripts/Diffraction/isis_powder/routines/common.py
@@ -258,6 +258,64 @@ def load_current_normalised_ws_list(run_number_string, instrument, input_batchin
     return normalised_ws_list
 
 
+def rebin_workspace(workspace, new_bin_width, start_x=None, end_x=None):
+    """
+    Rebins the specified workspace with the specified new bin width. Allows the user
+    to also set optionally the first and final bin boundaries of the histogram too.
+    If the bin boundaries are not set they are preserved from the original workspace
+    :param workspace: The workspace to rebin
+    :param new_bin_width: The new bin width to use across the workspace
+    :param start_x: (Optional) The first x bin to crop to
+    :param end_x: (Optional) The final x bin to crop to
+    :return: The rebinned workspace
+    """
+
+    # Find the starting and ending bin boundaries if they were not set
+    if start_x is None:
+        start_x = workspace.readX(0)[0]
+    if end_x is None:
+        end_x = workspace.readX(0)[-1]
+
+    rebin_string = str(start_x) + ',' + str(new_bin_width) + ',' + str(end_x)
+    workspace = mantid.Rebin(InputWorkspace=workspace, OutputWorkspace=workspace, Params=rebin_string)
+    return workspace
+
+
+def rebin_workspace_list(workspace_list, bin_width_list, start_x_list=None, end_x_list=None):
+    """
+    Rebins a list of workspaces with the specified bin widths in the list provided.
+    The number of bin widths and workspaces in the list must match. Additionally if
+    the optional parameters for start_x_list or end_x_list are provided these must
+    have the same length too.
+    :param workspace_list: The list of workspaces to rebin in place
+    :param bin_width_list: The list of new bin widths to apply to each workspace
+    :param start_x_list: The list of starting x boundaries to rebin to
+    :param end_x_list: The list of ending x boundaries to rebin to
+    :return: List of rebinned workspace
+    """
+    if not isinstance(workspace_list, list) or not isinstance(bin_width_list, list):
+        raise RuntimeError("One of the types passed to rebin_workspace_list was not a list")
+
+    ws_list_len = len(workspace_list)
+    if ws_list_len != len(bin_width_list):
+        raise ValueError("The number of bin widths found to rebin to does not match the number of banks")
+    if start_x_list and len(start_x_list) != ws_list_len:
+        raise ValueError("The number of starting bin values does not match the number of banks")
+    if end_x_list and len(end_x_list) != ws_list_len:
+        raise ValueError("The number of ending bin values does not match the number of banks")
+
+    # Create a list of None types of equal length to make using zip iterator easy
+    start_x_list = [None] * ws_list_len if start_x_list is None else start_x_list
+    end_x_list = [None] * ws_list_len if end_x_list is None else end_x_list
+
+    output_list = []
+    for ws, bin_width, start_x, end_x in zip(workspace_list, bin_width_list, start_x_list, end_x_list):
+        output_list.append(rebin_workspace(workspace=ws, new_bin_width=bin_width,
+                                           start_x=start_x, end_x=end_x))
+
+    return output_list
+
+
 def remove_intermediate_workspace(workspaces):
     """
     Removes the specified workspace(s) from the ADS. Can accept lists of workspaces. It
diff --git a/scripts/Diffraction/isis_powder/routines/common_output.py b/scripts/Diffraction/isis_powder/routines/common_output.py
index 02a4fbe5f54f3e9b28c415114ffb1a920c7729cf..0c43dafefbcf4a63514bb1672ef8a7b20dd598cd 100644
--- a/scripts/Diffraction/isis_powder/routines/common_output.py
+++ b/scripts/Diffraction/isis_powder/routines/common_output.py
@@ -18,9 +18,10 @@ def split_into_tof_d_spacing_groups(run_details, processed_spectra):
     tof_output = []
     run_number = str(run_details.output_run_string)
     ext = run_details.file_extension if run_details.file_extension else ""
-    for name_index, ws in enumerate(processed_spectra):
-        d_spacing_out_name = run_number + ext + "-ResultD-" + str(name_index + 1)
-        tof_out_name = run_number + ext + "-ResultTOF-" + str(name_index + 1)
+
+    for name_index, ws in enumerate(processed_spectra, start=1):
+        d_spacing_out_name = run_number + ext + "-ResultD-" + str(name_index)
+        tof_out_name = run_number + ext + "-ResultTOF-" + str(name_index)
 
         d_spacing_output.append(mantid.ConvertUnits(InputWorkspace=ws, OutputWorkspace=d_spacing_out_name,
                                                     Target="dSpacing"))
diff --git a/scripts/Diffraction/isis_powder/routines/focus.py b/scripts/Diffraction/isis_powder/routines/focus.py
index 792e425d6c24dfbc0795344ec63b1e7da47e01e4..71b3098f619124009bb03ba73b57128b5c232db5 100644
--- a/scripts/Diffraction/isis_powder/routines/focus.py
+++ b/scripts/Diffraction/isis_powder/routines/focus.py
@@ -24,7 +24,7 @@ def _focus_one_ws(ws, run_number, instrument, perform_vanadium_norm, absorb):
     if perform_vanadium_norm:
         _test_splined_vanadium_exists(instrument, run_details)
 
-    # Subtract empty beam runs
+    # Subtract empty instrument runs
     input_workspace = common.subtract_summed_runs(ws_to_correct=ws, instrument=instrument,
                                                   empty_sample_ws_string=run_details.empty_runs)
     # Subtract a sample empty if specified
@@ -52,10 +52,16 @@ def _focus_one_ws(ws, run_number, instrument, perform_vanadium_norm, absorb):
                                                      input_workspace=focused_ws,
                                                      perform_vanadium_norm=perform_vanadium_norm)
 
-    cropped_spectra = instrument._crop_banks_to_user_tof(calibrated_spectra)
+    output_spectra = instrument._crop_banks_to_user_tof(calibrated_spectra)
+
+    bin_widths = instrument._get_instrument_bin_widths()
+    if bin_widths:
+        # Reduce the bin width if required on this instrument
+        output_spectra = common.rebin_workspace_list(workspace_list=output_spectra,
+                                                     bin_width_list=bin_widths)
 
     # Output
-    d_spacing_group, tof_group = instrument._output_focused_ws(cropped_spectra, run_details=run_details)
+    d_spacing_group, tof_group = instrument._output_focused_ws(output_spectra, run_details=run_details)
 
     common.keep_single_ws_unit(d_spacing_group=d_spacing_group, tof_group=tof_group,
                                unit_to_keep=instrument._get_unit_to_keep())
@@ -64,7 +70,7 @@ def _focus_one_ws(ws, run_number, instrument, perform_vanadium_norm, absorb):
     common.remove_intermediate_workspace(input_workspace)
     common.remove_intermediate_workspace(aligned_ws)
     common.remove_intermediate_workspace(focused_ws)
-    common.remove_intermediate_workspace(cropped_spectra)
+    common.remove_intermediate_workspace(output_spectra)
 
     return d_spacing_group
 
diff --git a/scripts/Diffraction/isis_powder/routines/instrument_settings.py b/scripts/Diffraction/isis_powder/routines/instrument_settings.py
index add9d78c2a4a7f4433bf7eae24025d505afc51fa..9208a8fca6ccf0b464c203c79b3e31cadc6cdc08 100644
--- a/scripts/Diffraction/isis_powder/routines/instrument_settings.py
+++ b/scripts/Diffraction/isis_powder/routines/instrument_settings.py
@@ -7,7 +7,7 @@ import warnings
 
 # Have to patch warnings at runtime to not print the source code. This is even advertised as a 'feature' of
 # the warnings library in the documentation: https://docs.python.org/3/library/warnings.html#warnings.showwarning
-def _warning_no_source(msg, *_):
+def _warning_no_source(msg, *_, **__):
     return str(msg) + '\n'
 
 warnings.formatwarning = _warning_no_source
diff --git a/scripts/Diffraction/isis_powder/routines/sample_details.py b/scripts/Diffraction/isis_powder/routines/sample_details.py
index 0bce02dd8e6afa92e69b47cdac285a0f71e1c03f..648c9d6c062b60c8a11d29e7652869232bc11d35 100644
--- a/scripts/Diffraction/isis_powder/routines/sample_details.py
+++ b/scripts/Diffraction/isis_powder/routines/sample_details.py
@@ -91,6 +91,10 @@ class SampleDetails(object):
         if len(center) != 3:
             raise ValueError("The center must have three values corresponding to X, Y, Z position of the sample."
                              " For example [0. ,1., 2.]")
+
+        for val in center:
+            _check_can_convert_to_float(property_name="center", value=val)
+
         # All properties validated at this point
 
 
@@ -153,16 +157,22 @@ class _Material(object):
 
 def _check_value_is_physical(property_name, value):
     original_value = value
-    value = convert_to_float(value)
-    if value is None:
-        raise ValueError("Could not convert the " + property_name + " to a number."
-                         " The input was: '" + str(original_value) + "'")
+    value = _check_can_convert_to_float(property_name=property_name, value=value)
 
     if value <= 0 or math.isnan(value):
         raise ValueError("The value set for " + property_name + " was: " + str(original_value)
                          + " which is impossible for a physical object")
 
 
+def _check_can_convert_to_float(property_name, value):
+    original_value = value
+    value = convert_to_float(value)
+    if value is None:
+        raise ValueError("Could not convert the " + property_name + " to a number."
+                         " The input was: '" + str(original_value) + "'")
+    return value
+
+
 def convert_to_float(val):
     try:
         val = float(val)
diff --git a/scripts/HFIR_4Circle_Reduction/AddUBPeaksDialog.ui b/scripts/HFIR_4Circle_Reduction/AddUBPeaksDialog.ui
new file mode 100644
index 0000000000000000000000000000000000000000..58f8cca513932caa2f6b2ed55c771cd4a02c3675
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/AddUBPeaksDialog.ui
@@ -0,0 +1,490 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="windowModality">
+   <enum>Qt::ApplicationModal</enum>
+  </property>
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>666</width>
+    <height>547</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>Dialog</string>
+  </property>
+  <property name="modal">
+   <bool>true</bool>
+  </property>
+  <layout class="QVBoxLayout" name="verticalLayout_2">
+   <item>
+    <widget class="QGroupBox" name="groupBox_7">
+     <property name="title">
+      <string>Add Single Peak</string>
+     </property>
+     <layout class="QVBoxLayout" name="verticalLayout">
+      <item>
+       <layout class="QHBoxLayout" name="horizontalLayout_3">
+        <item>
+         <widget class="QLabel" name="label_scanNo">
+          <property name="minimumSize">
+           <size>
+            <width>120</width>
+            <height>0</height>
+           </size>
+          </property>
+          <property name="maximumSize">
+           <size>
+            <width>120</width>
+            <height>16777215</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>10</pointsize>
+           </font>
+          </property>
+          <property name="text">
+           <string>Scan Number</string>
+          </property>
+         </widget>
+        </item>
+        <item>
+         <widget class="QLineEdit" name="lineEdit_scanNumber">
+          <property name="sizePolicy">
+           <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+            <horstretch>0</horstretch>
+            <verstretch>0</verstretch>
+           </sizepolicy>
+          </property>
+          <property name="minimumSize">
+           <size>
+            <width>120</width>
+            <height>0</height>
+           </size>
+          </property>
+          <property name="maximumSize">
+           <size>
+            <width>160</width>
+            <height>16777215</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+           </font>
+          </property>
+         </widget>
+        </item>
+        <item>
+         <widget class="QPushButton" name="pushButton_findPeak">
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+            <weight>75</weight>
+            <bold>true</bold>
+           </font>
+          </property>
+          <property name="text">
+           <string>Find Peak</string>
+          </property>
+         </widget>
+        </item>
+        <item>
+         <widget class="QCheckBox" name="checkBox_loadHKLfromFile">
+          <property name="font">
+           <font>
+            <pointsize>8</pointsize>
+            <italic>true</italic>
+           </font>
+          </property>
+          <property name="text">
+           <string>Load HKL from Spice file</string>
+          </property>
+         </widget>
+        </item>
+        <item>
+         <spacer name="horizontalSpacer">
+          <property name="orientation">
+           <enum>Qt::Horizontal</enum>
+          </property>
+          <property name="sizeType">
+           <enum>QSizePolicy::Preferred</enum>
+          </property>
+          <property name="sizeHint" stdset="0">
+           <size>
+            <width>40</width>
+            <height>20</height>
+           </size>
+          </property>
+         </spacer>
+        </item>
+       </layout>
+      </item>
+      <item>
+       <layout class="QGridLayout" name="gridLayout">
+        <item row="1" column="1">
+         <widget class="QLineEdit" name="lineEdit_sampleQy">
+          <property name="enabled">
+           <bool>false</bool>
+          </property>
+          <property name="maximumSize">
+           <size>
+            <width>160</width>
+            <height>16777215</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+           </font>
+          </property>
+         </widget>
+        </item>
+        <item row="0" column="0">
+         <widget class="QLabel" name="label_31">
+          <property name="minimumSize">
+           <size>
+            <width>120</width>
+            <height>0</height>
+           </size>
+          </property>
+          <property name="maximumSize">
+           <size>
+            <width>120</width>
+            <height>16777215</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>10</pointsize>
+           </font>
+          </property>
+          <property name="text">
+           <string>Miller Index</string>
+          </property>
+         </widget>
+        </item>
+        <item row="1" column="3">
+         <widget class="QLineEdit" name="lineEdit_sampleQz">
+          <property name="enabled">
+           <bool>false</bool>
+          </property>
+          <property name="maximumSize">
+           <size>
+            <width>160</width>
+            <height>16777215</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+           </font>
+          </property>
+         </widget>
+        </item>
+        <item row="1" column="2">
+         <widget class="QLineEdit" name="lineEdit_sampleQx">
+          <property name="enabled">
+           <bool>false</bool>
+          </property>
+          <property name="maximumSize">
+           <size>
+            <width>160</width>
+            <height>16777215</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+           </font>
+          </property>
+         </widget>
+        </item>
+        <item row="0" column="3">
+         <widget class="QLineEdit" name="lineEdit_L">
+          <property name="maximumSize">
+           <size>
+            <width>160</width>
+            <height>16777215</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+           </font>
+          </property>
+         </widget>
+        </item>
+        <item row="0" column="2">
+         <widget class="QLineEdit" name="lineEdit_H">
+          <property name="maximumSize">
+           <size>
+            <width>160</width>
+            <height>40</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+           </font>
+          </property>
+         </widget>
+        </item>
+        <item row="1" column="0">
+         <widget class="QLabel" name="label_7">
+          <property name="minimumSize">
+           <size>
+            <width>120</width>
+            <height>0</height>
+           </size>
+          </property>
+          <property name="maximumSize">
+           <size>
+            <width>120</width>
+            <height>16777215</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>10</pointsize>
+           </font>
+          </property>
+          <property name="text">
+           <string> Q-Sample </string>
+          </property>
+          <property name="alignment">
+           <set>Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter</set>
+          </property>
+         </widget>
+        </item>
+        <item row="0" column="1">
+         <widget class="QLineEdit" name="lineEdit_K">
+          <property name="maximumSize">
+           <size>
+            <width>160</width>
+            <height>16777215</height>
+           </size>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+           </font>
+          </property>
+         </widget>
+        </item>
+        <item row="0" column="4">
+         <spacer name="horizontalSpacer_2">
+          <property name="orientation">
+           <enum>Qt::Horizontal</enum>
+          </property>
+          <property name="sizeType">
+           <enum>QSizePolicy::Preferred</enum>
+          </property>
+          <property name="sizeHint" stdset="0">
+           <size>
+            <width>40</width>
+            <height>20</height>
+           </size>
+          </property>
+         </spacer>
+        </item>
+       </layout>
+      </item>
+      <item>
+       <layout class="QHBoxLayout" name="horizontalLayout_2">
+        <item>
+         <spacer name="horizontalSpacer_3">
+          <property name="orientation">
+           <enum>Qt::Horizontal</enum>
+          </property>
+          <property name="sizeType">
+           <enum>QSizePolicy::Expanding</enum>
+          </property>
+          <property name="sizeHint" stdset="0">
+           <size>
+            <width>40</width>
+            <height>20</height>
+           </size>
+          </property>
+         </spacer>
+        </item>
+        <item>
+         <widget class="QPushButton" name="pushButton_addPeakToCalUB">
+          <property name="sizePolicy">
+           <sizepolicy hsizetype="Minimum" vsizetype="Fixed">
+            <horstretch>0</horstretch>
+            <verstretch>0</verstretch>
+           </sizepolicy>
+          </property>
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+            <weight>75</weight>
+            <bold>true</bold>
+           </font>
+          </property>
+          <property name="text">
+           <string>Add Peak</string>
+          </property>
+         </widget>
+        </item>
+       </layout>
+      </item>
+     </layout>
+    </widget>
+   </item>
+   <item>
+    <widget class="QGroupBox" name="groupBox">
+     <property name="title">
+      <string>Add Multiple Peaks</string>
+     </property>
+     <layout class="QHBoxLayout" name="horizontalLayout_4">
+      <item>
+       <layout class="QVBoxLayout" name="verticalLayout_3">
+        <item>
+         <widget class="QLabel" name="label">
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+           </font>
+          </property>
+          <property name="text">
+           <string>Scan Numbers</string>
+          </property>
+         </widget>
+        </item>
+        <item>
+         <spacer name="verticalSpacer">
+          <property name="orientation">
+           <enum>Qt::Vertical</enum>
+          </property>
+          <property name="sizeHint" stdset="0">
+           <size>
+            <width>20</width>
+            <height>40</height>
+           </size>
+          </property>
+         </spacer>
+        </item>
+       </layout>
+      </item>
+      <item>
+       <layout class="QVBoxLayout" name="verticalLayout_4">
+        <item>
+         <widget class="QPlainTextEdit" name="plainTextEdit_scanList">
+          <property name="font">
+           <font>
+            <pointsize>11</pointsize>
+           </font>
+          </property>
+         </widget>
+        </item>
+        <item>
+         <layout class="QHBoxLayout" name="horizontalLayout_5">
+          <item>
+           <widget class="QPushButton" name="pushButton_browseScansFile">
+            <property name="font">
+             <font>
+              <pointsize>11</pointsize>
+              <weight>75</weight>
+              <bold>true</bold>
+             </font>
+            </property>
+            <property name="text">
+             <string>Browse</string>
+            </property>
+           </widget>
+          </item>
+          <item>
+           <widget class="QLineEdit" name="lineEdit_scansFile"/>
+          </item>
+          <item>
+           <widget class="QPushButton" name="pushButton_loadScans">
+            <property name="font">
+             <font>
+              <pointsize>11</pointsize>
+              <weight>75</weight>
+              <bold>true</bold>
+             </font>
+            </property>
+            <property name="toolTip">
+             <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Load scans from a plain text file (ASCII).&lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;p&gt;Supported file format:&lt;/p&gt;&lt;p&gt;# comment&lt;/p&gt;&lt;p&gt;# comment &lt;/p&gt;&lt;p&gt;scan1, scan2, scan3, scan4-scan100&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+            </property>
+            <property name="text">
+             <string>Load Scans</string>
+            </property>
+           </widget>
+          </item>
+         </layout>
+        </item>
+        <item>
+         <layout class="QHBoxLayout" name="horizontalLayout">
+          <item>
+           <spacer name="horizontalSpacer_4">
+            <property name="orientation">
+             <enum>Qt::Horizontal</enum>
+            </property>
+            <property name="sizeType">
+             <enum>QSizePolicy::Expanding</enum>
+            </property>
+            <property name="sizeHint" stdset="0">
+             <size>
+              <width>40</width>
+              <height>20</height>
+             </size>
+            </property>
+           </spacer>
+          </item>
+          <item>
+           <widget class="QPushButton" name="pushButton_addScans">
+            <property name="font">
+             <font>
+              <pointsize>11</pointsize>
+              <weight>75</weight>
+              <bold>true</bold>
+             </font>
+            </property>
+            <property name="toolTip">
+             <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Add scans listed in the text editor above to calculate/refine UB matrix&lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+            </property>
+            <property name="text">
+             <string>Add Scans</string>
+            </property>
+           </widget>
+          </item>
+         </layout>
+        </item>
+       </layout>
+      </item>
+     </layout>
+    </widget>
+   </item>
+   <item>
+    <spacer name="verticalSpacer_2">
+     <property name="orientation">
+      <enum>Qt::Vertical</enum>
+     </property>
+     <property name="sizeHint" stdset="0">
+      <size>
+       <width>20</width>
+       <height>40</height>
+      </size>
+     </property>
+    </spacer>
+   </item>
+   <item>
+    <widget class="QPushButton" name="pushButton_quit">
+     <property name="text">
+      <string>Quit</string>
+     </property>
+    </widget>
+   </item>
+  </layout>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>
diff --git a/scripts/HFIR_4Circle_Reduction/CMakeLists.txt b/scripts/HFIR_4Circle_Reduction/CMakeLists.txt
index 36b4bb99ec77f6cfc1f796affed8ca9dc2019c71..053a1d366f695383a4c2e094896c73a87dc78931 100644
--- a/scripts/HFIR_4Circle_Reduction/CMakeLists.txt
+++ b/scripts/HFIR_4Circle_Reduction/CMakeLists.txt
@@ -2,11 +2,15 @@ include(UiToPy)
 
 # List of UIs to Auto convert
 set( UI_FILES
-  MainWindow.ui 
+  MainWindow.ui
+  messagebox.ui
   View3DWidget.ui
   OptimizeLattice.ui
   RefineUbFftDialog.ui
   SpiceViewerDialog.ui
+  UBSelectPeaksDialog.ui
+  AddUBPeaksDialog.ui
+  PeakIntegrationSpreadSheet.ui
 )
 
 UiToPy( UI_FILES CompileUIHFIR_4Circle_Reduction)
diff --git a/scripts/HFIR_4Circle_Reduction/FindUBUtility.py b/scripts/HFIR_4Circle_Reduction/FindUBUtility.py
new file mode 100644
index 0000000000000000000000000000000000000000..a41ba3cac87001777582d4b00ed43d9470fa31b3
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/FindUBUtility.py
@@ -0,0 +1,245 @@
+"""
+Containing a set of classes used for finding (calculating and refining) UB matrix
+"""
+import os
+
+import ui_AddUBPeaksDialog
+import ui_UBSelectPeaksDialog
+import guiutility
+
+from PyQt4 import QtGui, QtCore
+
+
+class AddScansForUBDialog(QtGui.QDialog):
+    """
+    Dialog class to add scans to UB scans' table for calculating and
+    """
+    def __init__(self, parent):
+        """
+        initialization
+        :param parent: main GUI, reductionControl
+        """
+        super(AddScansForUBDialog, self).__init__(parent)
+        self._myParent = parent
+
+        # set up UI
+        self.ui = ui_AddUBPeaksDialog.Ui_Dialog()
+        self.ui.setupUi(self)
+
+        # initialize widgets
+        self.ui.checkBox_loadHKLfromFile.setChecked(True)
+
+        # define event handling
+        self.connect(self.ui.pushButton_findPeak, QtCore.SIGNAL('clicked()'),
+                     self.do_find_peak)
+        self.connect(self.ui.pushButton_addPeakToCalUB, QtCore.SIGNAL('clicked()'),
+                     self.do_add_single_scan)
+
+        self.connect(self.ui.pushButton_loadScans, QtCore.SIGNAL('clicked()'),
+                     self.do_load_scans)
+        self.connect(self.ui.pushButton_addScans, QtCore.SIGNAL('clicked()'),
+                     self.do_add_scans)
+
+        self.connect(self.ui.pushButton_quit, QtCore.SIGNAL('clicked()'),
+                     self.do_quit)
+
+        return
+
+    def do_add_scans(self):
+        """
+        add all the scans list in the 'plainTextEdit_scanList'
+        :return:
+        """
+        scans_str = str(self.ui.plainTextEdit_scanList.toPlainText())
+        scan_list = guiutility.parse_integer_list(scans_str)
+        self._myParent.add_scans_ub_table(scan_list)
+
+        return
+
+    def do_add_single_scan(self):
+        """
+        add single scan to refine UB matrix
+        :return:
+        """
+        scan_number = int(self.ui.lineEdit_scanNumber.text())
+        self._myParent.add_scans_ub_table([scan_number])
+
+        return
+
+    def do_find_peak(self):
+        """
+        find the peak(s) in a merged scan
+        :return:
+        """
+        # get scan number
+        status, ret_obj = guiutility.parse_integers_editors([self.ui.lineEdit_scanNumber])
+        if status:
+            scan_number = ret_obj[0]
+        else:
+            # pop error message
+            self._myParent.pop_one_button_dialog(ret_obj)
+            return
+
+        # load HKL from SPICE?
+        hkl_from_spice = self.ui.checkBox_loadHKLfromFile.isChecked()
+
+        # find peak
+        status, ret_obj = self._myParent.find_peak_in_scan(scan_number, hkl_from_spice)
+
+        # set the result
+        if status:
+            hkl, vec_q = ret_obj
+            if len(hkl) > 0:
+                self.ui.lineEdit_H.setText('%.2f' % hkl[0])
+                self.ui.lineEdit_K.setText('%.2f' % hkl[1])
+                self.ui.lineEdit_L.setText('%.2f' % hkl[2])
+
+            self.ui.lineEdit_sampleQx.setText('%.5E' % vec_q[0])
+            self.ui.lineEdit_sampleQy.setText('%.5E' % vec_q[1])
+            self.ui.lineEdit_sampleQz.setText('%.5E' % vec_q[2])
+        # END-IF
+
+        return
+
+    def do_load_scans(self):
+        """
+        load an ASCII file containing scan numbers,
+        and the results are written to 'plainTextEdit_scanList'
+        :return:
+        """
+        # get file name
+        scan_file = str(self.ui.lineEdit_scansFile.text())
+        if os.path.exists(scan_file) is False:
+            raise RuntimeError('Scan file {0} does not exist.'.format(scan_file))
+
+        # parse file
+        exp_number, scans_str = guiutility.import_scans_text_file(scan_file)
+
+        self.ui.plainTextEdit_scanList.setPlainText(scans_str)
+
+        return
+
+    def do_quit(self):
+        """
+        quit
+        :return:
+        """
+        self.close()
+
+        return
+
+
+class SelectUBMatrixScansDialog(QtGui.QDialog):
+    """
+    Dialog to select scans for processing UB matrix
+    """
+    def __init__(self, parent):
+        """
+        initialization
+        :param parent:
+        """
+        super(SelectUBMatrixScansDialog, self).__init__(parent)
+        self._myParent = parent
+
+        # set ui
+        self.ui = ui_UBSelectPeaksDialog.Ui_Dialog()
+        self.ui.setupUi(self)
+
+        # define event handling methods
+        self.connect(self.ui.pushButton_selectScans, QtCore.SIGNAL('clicked()'),
+                     self.do_select_scans)
+        self.connect(self.ui.pushButton_revertCurrentSelection, QtCore.SIGNAL('clicked()'),
+                     self.do_revert_selection)
+        self.connect(self.ui.pushButton_exportSelectedScans, QtCore.SIGNAL('clicked()'),
+                     self.do_export_selected_scans)
+
+        self.connect(self.ui.pushButton_quit, QtCore.SIGNAL('clicked()'),
+                     self.do_quit)
+
+        return
+
+    def do_quit(self):
+        """
+
+        :return:
+        """
+        self.close()
+
+        return
+
+    def do_export_selected_scans(self):
+        """
+        export selected scans to a file
+        :return:
+        """
+        # get the scans
+        scans_list = self._myParent.ub_matrix_processing_table.get_selected_scans()
+        scans_list.sort()
+
+        # form the output string
+        output_str = '# Exp = {0}.\n'.format(self._myParent.current_exp_number)
+        for scan in scans_list:
+            output_str += '{0}, '.format(scan)
+
+        # trim the last
+        output_str = output_str[:-2]
+
+        # get the output file name
+        file_filter = 'Text Files (*.dat);;All Files (*.*)'
+        file_name = str(QtGui.QFileDialog.getSaveFileName(self, 'File to export selected scans',
+                        self._myParent.working_directory, file_filter))
+
+        # write file
+        out_file = open(file_name, 'w')
+        out_file.write(output_str)
+        out_file.close()
+
+        return
+
+    def do_revert_selection(self):
+        """
+        revert the current selection of the UB table
+        :return:
+        """
+        self._myParent.ub_matrix_processing_table.revert_selection()
+
+        return
+
+    def do_select_scans(self):
+        """
+
+        :return:
+        """
+        # get check box
+        if self.ui.checkBox_selectAllPeaks.isChecked():
+            self._myParent.select_ub_scans(select_all=True)
+
+        else:
+            select_args = dict()
+
+            if self.ui.checkBox_selectNuclearPeaks.isChecked():
+                status, ret_obj = guiutility.parse_float_editors([self.ui.lineEdit_nuclearPeaksTolerance],
+                                                                 allow_blank=False)
+                if not status:
+                    raise RuntimeError(ret_obj)
+                hkl_tol = ret_obj[0]
+                select_args['nuclear_peaks'] = True
+                select_args['hkl_tolerance'] = hkl_tol
+
+            if self.ui.checkBox_wavelength.isChecked():
+                # wave length selection
+                status, ret_obj = guiutility.parse_float_editors([self.ui.lineEdit_wavelength,
+                                                                  self.ui.lineEdit_wavelengthTolerance],
+                                                                 allow_blank=False)
+                if status:
+                    wave_length, wave_length_tol = ret_obj
+                    select_args['wavelength'] = wave_length
+                    select_args['wavelength_tolerance'] = wave_length_tol
+                else:
+                    select_args['wavelength'] = None
+
+            # select with filters
+            self._myParent.ub_matrix_processing_table.select_scans(**select_args)
+        # END-IF-ELSE
+
+        return
diff --git a/scripts/HFIR_4Circle_Reduction/MainWindow.ui b/scripts/HFIR_4Circle_Reduction/MainWindow.ui
index 3f4c6aa6b59885cb43f09566a4c33eff1fbb64e2..8d8c0e94c60d05cd64fe56367869dd1aea5571ce 100644
--- a/scripts/HFIR_4Circle_Reduction/MainWindow.ui
+++ b/scripts/HFIR_4Circle_Reduction/MainWindow.ui
@@ -6,8 +6,8 @@
    <rect>
     <x>0</x>
     <y>0</y>
-    <width>1536</width>
-    <height>918</height>
+    <width>1568</width>
+    <height>1116</height>
    </rect>
   </property>
   <property name="windowTitle">
@@ -19,6 +19,41 @@
      <layout class="QVBoxLayout" name="verticalLayout_3">
       <item>
        <layout class="QHBoxLayout" name="horizontalLayout_General">
+        <item>
+         <widget class="QLabel" name="label_71">
+          <property name="font">
+           <font>
+            <weight>75</weight>
+            <bold>true</bold>
+           </font>
+          </property>
+          <property name="text">
+           <string>IPTS</string>
+          </property>
+         </widget>
+        </item>
+        <item>
+         <widget class="QLineEdit" name="lineEdit_iptsNumber">
+          <property name="sizePolicy">
+           <sizepolicy hsizetype="Maximum" vsizetype="Fixed">
+            <horstretch>0</horstretch>
+            <verstretch>0</verstretch>
+           </sizepolicy>
+          </property>
+          <property name="minimumSize">
+           <size>
+            <width>40</width>
+            <height>0</height>
+           </size>
+          </property>
+          <property name="maximumSize">
+           <size>
+            <width>60</width>
+            <height>16777215</height>
+           </size>
+          </property>
+         </widget>
+        </item>
         <item>
          <widget class="QLabel" name="label_exp">
           <property name="font">
@@ -48,6 +83,16 @@
           </property>
          </widget>
         </item>
+        <item>
+         <widget class="QComboBox" name="comboBox_expNumberList">
+          <property name="maximumSize">
+           <size>
+            <width>16777215</width>
+            <height>60</height>
+           </size>
+          </property>
+         </widget>
+        </item>
         <item>
          <widget class="QPushButton" name="pushButton_setExp">
           <property name="text">
@@ -168,7 +213,7 @@
            <bool>true</bool>
           </property>
           <property name="currentIndex">
-           <number>0</number>
+           <number>2</number>
           </property>
           <widget class="QWidget" name="tab">
            <attribute name="title">
@@ -848,6 +893,34 @@ p, li { white-space: pre-wrap; }
                  </property>
                 </widget>
                </item>
+               <item row="1" column="8">
+                <widget class="QLabel" name="label_70">
+                 <property name="text">
+                  <string>Detector Size</string>
+                 </property>
+                </widget>
+               </item>
+               <item row="1" column="10">
+                <widget class="QPushButton" name="pushButton_applyDetectorSize">
+                 <property name="text">
+                  <string>Apply</string>
+                 </property>
+                </widget>
+               </item>
+               <item row="1" column="9">
+                <widget class="QComboBox" name="comboBox_detectorSize">
+                 <item>
+                  <property name="text">
+                   <string>256 x 256</string>
+                  </property>
+                 </item>
+                 <item>
+                  <property name="text">
+                   <string>512 x 512</string>
+                  </property>
+                 </item>
+                </widget>
+               </item>
               </layout>
              </widget>
             </item>
@@ -855,7 +928,7 @@ p, li { white-space: pre-wrap; }
           </widget>
           <widget class="QWidget" name="tab_survey">
            <attribute name="title">
-            <string>Information</string>
+            <string>List Scans</string>
            </attribute>
            <layout class="QGridLayout" name="gridLayout_8">
             <item row="0" column="0">
@@ -1038,6 +1111,33 @@ p, li { white-space: pre-wrap; }
                     </property>
                    </widget>
                   </item>
+                  <item>
+                   <widget class="QPushButton" name="pushButton_viewRawSpice">
+                    <property name="font">
+                     <font>
+                      <pointsize>12</pointsize>
+                     </font>
+                    </property>
+                    <property name="text">
+                     <string>View Spice File</string>
+                    </property>
+                   </widget>
+                  </item>
+                  <item>
+                   <widget class="QPushButton" name="pushButton_viewSurveyPeak">
+                    <property name="font">
+                     <font>
+                      <pointsize>12</pointsize>
+                     </font>
+                    </property>
+                    <property name="toolTip">
+                     <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Set the selected peak to view and switch to tab 'View Raw Data'&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                    </property>
+                    <property name="text">
+                     <string>View Peak</string>
+                    </property>
+                   </widget>
+                  </item>
                   <item>
                    <spacer name="verticalSpacer_9">
                     <property name="orientation">
@@ -1055,29 +1155,71 @@ p, li { white-space: pre-wrap; }
                    </spacer>
                   </item>
                   <item>
-                   <widget class="QPushButton" name="pushButton_viewRawSpice">
-                    <property name="text">
-                     <string>View Spice File</string>
+                   <widget class="Line" name="line_23">
+                    <property name="orientation">
+                     <enum>Qt::Horizontal</enum>
                     </property>
                    </widget>
                   </item>
                   <item>
-                   <widget class="QPushButton" name="pushButton_viewSurveyPeak">
+                   <widget class="QPushButton" name="pushButton_addPeaksToRefine">
+                    <property name="font">
+                     <font>
+                      <pointsize>11</pointsize>
+                      <weight>75</weight>
+                      <bold>true</bold>
+                     </font>
+                    </property>
                     <property name="toolTip">
-                     <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Set the selected peak to view and switch to tab 'View Raw Data'&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                     <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Add all the selected peaks to the table in tab 'CalculateUB and switch to tab 'CalculateUB;&lt;/p&gt;&lt;p&gt;All the Pts. in each scan will be merged and &lt;span style=&quot; font-style:italic;&quot;&gt;FindPeaks()&lt;/span&gt; will be operated on each such merged data.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
                     </property>
                     <property name="text">
-                     <string>View Peak</string>
+                     <string>Add to Calculate UB</string>
                     </property>
                    </widget>
                   </item>
                   <item>
-                   <widget class="QPushButton" name="pushButton_addPeaksToRefine">
+                   <widget class="QComboBox" name="comboBox_maskNamesSurvey">
+                    <property name="font">
+                     <font>
+                      <pointsize>11</pointsize>
+                     </font>
+                    </property>
                     <property name="toolTip">
-                     <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Add all the selected peaks to the table in tab 'CalculateUB and switch to tab 'CalculateUB;&lt;/p&gt;&lt;p&gt;All the Pts. in each scan will be merged and &lt;span style=&quot; font-style:italic;&quot;&gt;FindPeaks()&lt;/span&gt; will be operated on each such merged data.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                     <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-size:13pt;&quot;&gt;List of region of interest that can be applied to the selected scans that are added to tab 'Calculate UB' for UB matrix calculation.&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                    </property>
+                   </widget>
+                  </item>
+                  <item>
+                   <spacer name="verticalSpacer_30">
+                    <property name="orientation">
+                     <enum>Qt::Vertical</enum>
+                    </property>
+                    <property name="sizeType">
+                     <enum>QSizePolicy::Preferred</enum>
+                    </property>
+                    <property name="sizeHint" stdset="0">
+                     <size>
+                      <width>20</width>
+                      <height>40</height>
+                     </size>
+                    </property>
+                   </spacer>
+                  </item>
+                  <item>
+                   <widget class="QPushButton" name="pushButton_mergeScansSurvey">
+                    <property name="font">
+                     <font>
+                      <pointsize>11</pointsize>
+                      <weight>75</weight>
+                      <bold>true</bold>
+                     </font>
+                    </property>
+                    <property name="toolTip">
+                     <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Add selected scans to tab &amp;quot;Scans Processing&amp;quot; for merging or peak integrtoin&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
                     </property>
                     <property name="text">
-                     <string>Add Peaks</string>
+                     <string>Add to Integrate/Merge</string>
                     </property>
                    </widget>
                   </item>
@@ -1791,7 +1933,7 @@ p, li { white-space: pre-wrap; }
                    <string>Region of Interest</string>
                   </property>
                   <layout class="QGridLayout" name="gridLayout_18">
-                   <item row="3" column="0">
+                   <item row="5" column="0">
                     <widget class="QPushButton" name="pushButton_cancelROI">
                      <property name="font">
                       <font>
@@ -1842,6 +1984,31 @@ p, li { white-space: pre-wrap; }
                      </property>
                     </widget>
                    </item>
+                   <item row="3" column="0">
+                    <widget class="QPushButton" name="pushButton_integrateROI">
+                     <property name="font">
+                      <font>
+                       <pointsize>10</pointsize>
+                      </font>
+                     </property>
+                     <property name="text">
+                      <string>Integrate</string>
+                     </property>
+                    </widget>
+                   </item>
+                   <item row="4" column="0">
+                    <spacer name="verticalSpacer_31">
+                     <property name="orientation">
+                      <enum>Qt::Vertical</enum>
+                     </property>
+                     <property name="sizeHint" stdset="0">
+                      <size>
+                       <width>20</width>
+                       <height>40</height>
+                      </size>
+                     </property>
+                    </spacer>
+                   </item>
                   </layout>
                  </widget>
                 </item>
@@ -1888,254 +2055,19 @@ p, li { white-space: pre-wrap; }
             <string>Calculate UB</string>
            </attribute>
            <layout class="QVBoxLayout" name="verticalLayout_19">
-            <item>
-             <widget class="QGroupBox" name="groupBox_7">
-              <property name="title">
-               <string>Add Peak</string>
-              </property>
-              <layout class="QGridLayout" name="gridLayout_2">
-               <item row="0" column="8">
-                <widget class="QLineEdit" name="lineEdit_H">
-                 <property name="maximumSize">
-                  <size>
-                   <width>60</width>
-                   <height>40</height>
-                  </size>
-                 </property>
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="6">
-                <spacer name="horizontalSpacer_4">
-                 <property name="orientation">
-                  <enum>Qt::Horizontal</enum>
-                 </property>
-                 <property name="sizeType">
-                  <enum>QSizePolicy::Preferred</enum>
-                 </property>
-                 <property name="sizeHint" stdset="0">
-                  <size>
-                   <width>40</width>
-                   <height>20</height>
-                  </size>
-                 </property>
-                </spacer>
-               </item>
-               <item row="0" column="0">
-                <widget class="QLabel" name="label_scanNo">
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                 <property name="text">
-                  <string>Scan Number</string>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="4">
-                <widget class="QPushButton" name="pushButton_findPeak">
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                   <weight>75</weight>
-                   <bold>true</bold>
-                  </font>
-                 </property>
-                 <property name="text">
-                  <string>Find Peak</string>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="7">
-                <widget class="QLabel" name="label_31">
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                 <property name="text">
-                  <string>Miller Index</string>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="1">
-                <widget class="QLineEdit" name="lineEdit_scanNumber">
-                 <property name="sizePolicy">
-                  <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
-                   <horstretch>0</horstretch>
-                   <verstretch>0</verstretch>
-                  </sizepolicy>
-                 </property>
-                 <property name="maximumSize">
-                  <size>
-                   <width>60</width>
-                   <height>16777215</height>
-                  </size>
-                 </property>
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="18">
-                <widget class="QPushButton" name="pushButton_viewScan3D">
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                   <weight>50</weight>
-                   <bold>false</bold>
-                  </font>
-                 </property>
-                 <property name="toolTip">
-                  <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-size:11pt;&quot;&gt;View peak in 3D&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
-                 </property>
-                 <property name="text">
-                  <string>View Peak</string>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="11">
-                <widget class="QLineEdit" name="lineEdit_K">
-                 <property name="maximumSize">
-                  <size>
-                   <width>60</width>
-                   <height>16777215</height>
-                  </size>
-                 </property>
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="12">
-                <widget class="QLineEdit" name="lineEdit_L">
-                 <property name="maximumSize">
-                  <size>
-                   <width>60</width>
-                   <height>16777215</height>
-                  </size>
-                 </property>
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="5">
-                <widget class="QCheckBox" name="checkBox_loadHKLfromFile">
-                 <property name="font">
-                  <font>
-                   <pointsize>8</pointsize>
-                   <italic>true</italic>
-                  </font>
-                 </property>
-                 <property name="text">
-                  <string>Load HKL from Spice file</string>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="13">
-                <widget class="QLabel" name="label_7">
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                 <property name="text">
-                  <string> Q-Sample </string>
-                 </property>
-                 <property name="alignment">
-                  <set>Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter</set>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="14">
-                <widget class="QLineEdit" name="lineEdit_sampleQx">
-                 <property name="enabled">
-                  <bool>false</bool>
-                 </property>
-                 <property name="maximumSize">
-                  <size>
-                   <width>120</width>
-                   <height>16777215</height>
-                  </size>
-                 </property>
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="16">
-                <widget class="QLineEdit" name="lineEdit_sampleQz">
-                 <property name="enabled">
-                  <bool>false</bool>
-                 </property>
-                 <property name="maximumSize">
-                  <size>
-                   <width>120</width>
-                   <height>16777215</height>
-                  </size>
-                 </property>
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="17">
-                <widget class="QPushButton" name="pushButton_addPeakToCalUB">
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                   <weight>75</weight>
-                   <bold>true</bold>
-                  </font>
-                 </property>
-                 <property name="text">
-                  <string>Add Peak</string>
-                 </property>
-                </widget>
-               </item>
-               <item row="0" column="15">
-                <widget class="QLineEdit" name="lineEdit_sampleQy">
-                 <property name="enabled">
-                  <bool>false</bool>
-                 </property>
-                 <property name="maximumSize">
-                  <size>
-                   <width>120</width>
-                   <height>16777215</height>
-                  </size>
-                 </property>
-                 <property name="font">
-                  <font>
-                   <pointsize>10</pointsize>
-                  </font>
-                 </property>
-                </widget>
-               </item>
-              </layout>
-             </widget>
-            </item>
             <item>
              <layout class="QHBoxLayout" name="horizontalLayout_7">
               <item>
                <layout class="QVBoxLayout" name="verticalLayout_4">
                 <item>
-                 <widget class="UBMatrixPeakTable" name="tableWidget_peaksCalUB"/>
+                 <widget class="UBMatrixPeakTable" name="tableWidget_peaksCalUB">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Expanding" vsizetype="Expanding">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                 </widget>
                 </item>
                 <item>
                  <layout class="QHBoxLayout" name="horizontalLayout_10">
@@ -2153,6 +2085,22 @@ p, li { white-space: pre-wrap; }
                     </property>
                    </widget>
                   </item>
+                  <item>
+                   <spacer name="horizontalSpacer_38">
+                    <property name="orientation">
+                     <enum>Qt::Horizontal</enum>
+                    </property>
+                    <property name="sizeType">
+                     <enum>QSizePolicy::Preferred</enum>
+                    </property>
+                    <property name="sizeHint" stdset="0">
+                     <size>
+                      <width>40</width>
+                      <height>20</height>
+                     </size>
+                    </property>
+                   </spacer>
+                  </item>
                   <item>
                    <widget class="QPushButton" name="pushButton_setHKL2Int">
                     <property name="font">
@@ -2165,6 +2113,9 @@ p, li { white-space: pre-wrap; }
                     </property>
                    </widget>
                   </item>
+                  <item>
+                   <widget class="QComboBox" name="comboBox_hklType"/>
+                  </item>
                   <item>
                    <widget class="QPushButton" name="pushButton_undoSetToInteger">
                     <property name="font">
@@ -2234,25 +2185,16 @@ p, li { white-space: pre-wrap; }
               <item>
                <layout class="QVBoxLayout" name="verticalLayout_8">
                 <item>
-                 <widget class="QPushButton" name="pushButton_selectAllPeaks">
-                  <property name="text">
-                   <string>Select All</string>
-                  </property>
-                 </widget>
-                </item>
-                <item>
-                 <widget class="QCheckBox" name="checkBox_ubNuclearPeaks">
-                  <property name="enabled">
-                   <bool>true</bool>
-                  </property>
+                 <widget class="QPushButton" name="pushButton_addUBScans">
                   <property name="font">
                    <font>
-                    <pointsize>9</pointsize>
-                    <italic>true</italic>
+                    <pointsize>10</pointsize>
+                    <weight>75</weight>
+                    <bold>true</bold>
                    </font>
                   </property>
                   <property name="text">
-                   <string>nuclear peaks</string>
+                   <string>Add Peaks</string>
                   </property>
                  </widget>
                 </item>
@@ -2273,13 +2215,10 @@ p, li { white-space: pre-wrap; }
                  </spacer>
                 </item>
                 <item>
-                 <spacer name="verticalSpacer_8">
+                 <spacer name="verticalSpacer_13">
                   <property name="orientation">
                    <enum>Qt::Vertical</enum>
                   </property>
-                  <property name="sizeType">
-                   <enum>QSizePolicy::Preferred</enum>
-                  </property>
                   <property name="sizeHint" stdset="0">
                    <size>
                     <width>20</width>
@@ -2289,29 +2228,92 @@ p, li { white-space: pre-wrap; }
                  </spacer>
                 </item>
                 <item>
-                 <widget class="QPushButton" name="pushButton_plotSelectedData">
-                  <property name="minimumSize">
-                   <size>
-                    <width>120</width>
-                    <height>0</height>
-                   </size>
-                  </property>
-                  <property name="maximumSize">
-                   <size>
-                    <width>120</width>
-                    <height>16777215</height>
-                   </size>
-                  </property>
-                  <property name="text">
-                   <string>Plot Scan</string>
+                 <widget class="QGroupBox" name="groupBox_24">
+                  <property name="title">
+                   <string>Peaks Filter</string>
                   </property>
+                  <layout class="QVBoxLayout" name="verticalLayout_11">
+                   <item>
+                    <widget class="QRadioButton" name="radioButton_ubSelectAllScans">
+                     <property name="font">
+                      <font>
+                       <pointsize>10</pointsize>
+                       <weight>75</weight>
+                       <bold>true</bold>
+                      </font>
+                     </property>
+                     <property name="text">
+                      <string>Select all Peaks</string>
+                     </property>
+                    </widget>
+                   </item>
+                   <item>
+                    <widget class="QRadioButton" name="radioButton_ubSelectNoScan">
+                     <property name="font">
+                      <font>
+                       <pointsize>10</pointsize>
+                       <weight>75</weight>
+                       <bold>true</bold>
+                      </font>
+                     </property>
+                     <property name="text">
+                      <string>No Peak</string>
+                     </property>
+                    </widget>
+                   </item>
+                   <item>
+                    <widget class="QRadioButton" name="radioButton_ubAdvancedSelection">
+                     <property name="font">
+                      <font>
+                       <pointsize>10</pointsize>
+                       <weight>75</weight>
+                       <bold>true</bold>
+                      </font>
+                     </property>
+                     <property name="text">
+                      <string>Advanced Setup</string>
+                     </property>
+                    </widget>
+                   </item>
+                   <item>
+                    <spacer name="verticalSpacer_10">
+                     <property name="orientation">
+                      <enum>Qt::Vertical</enum>
+                     </property>
+                     <property name="sizeType">
+                      <enum>QSizePolicy::Ignored</enum>
+                     </property>
+                     <property name="sizeHint" stdset="0">
+                      <size>
+                       <width>20</width>
+                       <height>40</height>
+                      </size>
+                     </property>
+                    </spacer>
+                   </item>
+                   <item>
+                    <widget class="QCheckBox" name="checkBox_hideUnselectedUBPeaks">
+                     <property name="font">
+                      <font>
+                       <pointsize>9</pointsize>
+                      </font>
+                     </property>
+                     <property name="text">
+                      <string>Hide Unselected</string>
+                     </property>
+                    </widget>
+                   </item>
+                  </layout>
                  </widget>
                 </item>
                 <item>
-                 <spacer name="verticalSpacer_11">
+                 <spacer name="verticalSpacer_8">
                   <property name="orientation">
                    <enum>Qt::Vertical</enum>
                   </property>
+                  <property name="sizeType">
+                   <enum>QSizePolicy::Preferred</enum>
+                  </property>
                   <property name="sizeHint" stdset="0">
                    <size>
                     <width>20</width>
@@ -2321,26 +2323,32 @@ p, li { white-space: pre-wrap; }
                  </spacer>
                 </item>
                 <item>
-                 <widget class="QPushButton" name="pushButton_deleteUBPeak">
-                  <property name="minimumSize">
-                   <size>
-                    <width>120</width>
-                    <height>0</height>
-                   </size>
+                 <widget class="Line" name="line_7">
+                  <property name="orientation">
+                   <enum>Qt::Horizontal</enum>
                   </property>
-                  <property name="maximumSize">
-                   <size>
-                    <width>120</width>
-                    <height>16777215</height>
-                   </size>
+                 </widget>
+                </item>
+                <item>
+                 <widget class="QPushButton" name="pushButton_2">
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
                   </property>
                   <property name="text">
-                   <string>Delete</string>
+                   <string>Plot Scan 2D</string>
                   </property>
                  </widget>
                 </item>
                 <item>
-                 <widget class="QPushButton" name="pushButton_clearUBPeakTable">
+                 <widget class="QPushButton" name="pushButton_plotSelectedData">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
                   <property name="minimumSize">
                    <size>
                     <width>120</width>
@@ -2349,25 +2357,56 @@ p, li { white-space: pre-wrap; }
                   </property>
                   <property name="maximumSize">
                    <size>
-                    <width>120</width>
+                    <width>200</width>
                     <height>16777215</height>
                    </size>
                   </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="text">
+                   <string>Plot Scan 3D</string>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <widget class="QPushButton" name="pushButton_viewScan3D">
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                    <weight>50</weight>
+                    <bold>false</bold>
+                   </font>
+                  </property>
                   <property name="toolTip">
-                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Clear the peak table on the left&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-size:11pt;&quot;&gt;View peak in 3D&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
                   </property>
                   <property name="text">
-                   <string>Clear</string>
+                   <string>View Peak</string>
                   </property>
                  </widget>
                 </item>
                 <item>
-                 <spacer name="verticalSpacer_12">
+                 <widget class="QPushButton" name="pushButton">
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="text">
+                   <string>Show Spice File</string>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <spacer name="verticalSpacer_11">
                   <property name="orientation">
                    <enum>Qt::Vertical</enum>
                   </property>
                   <property name="sizeType">
-                   <enum>QSizePolicy::Ignored</enum>
+                   <enum>QSizePolicy::Preferred</enum>
                   </property>
                   <property name="sizeHint" stdset="0">
                    <size>
@@ -2378,9 +2417,138 @@ p, li { white-space: pre-wrap; }
                  </spacer>
                 </item>
                 <item>
-                 <spacer name="verticalSpacer_7">
+                 <widget class="Line" name="line_9">
                   <property name="orientation">
-                   <enum>Qt::Vertical</enum>
+                   <enum>Qt::Horizontal</enum>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <widget class="QPushButton" name="pushButton_reCalPeakCenter">
+                  <property name="enabled">
+                   <bool>true</bool>
+                  </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>11</pointsize>
+                   </font>
+                  </property>
+                  <property name="toolTip">
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-size:12pt;&quot;&gt;Re-calculate selected peaks' centers with region of interest (ROI)&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                  </property>
+                  <property name="text">
+                   <string>Calculate Peak Center</string>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <widget class="QComboBox" name="comboBox_maskNamesUB">
+                  <property name="enabled">
+                   <bool>true</bool>
+                  </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>11</pointsize>
+                   </font>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <spacer name="verticalSpacer_29">
+                  <property name="orientation">
+                   <enum>Qt::Vertical</enum>
+                  </property>
+                  <property name="sizeType">
+                   <enum>QSizePolicy::Preferred</enum>
+                  </property>
+                  <property name="sizeHint" stdset="0">
+                   <size>
+                    <width>20</width>
+                    <height>40</height>
+                   </size>
+                  </property>
+                 </spacer>
+                </item>
+                <item>
+                 <widget class="Line" name="line_22">
+                  <property name="orientation">
+                   <enum>Qt::Horizontal</enum>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <widget class="QPushButton" name="pushButton_deleteUBPeak">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="minimumSize">
+                   <size>
+                    <width>120</width>
+                    <height>0</height>
+                   </size>
+                  </property>
+                  <property name="maximumSize">
+                   <size>
+                    <width>200</width>
+                    <height>16777215</height>
+                   </size>
+                  </property>
+                  <property name="text">
+                   <string>Delete</string>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <widget class="QPushButton" name="pushButton_clearUBPeakTable">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="minimumSize">
+                   <size>
+                    <width>120</width>
+                    <height>0</height>
+                   </size>
+                  </property>
+                  <property name="maximumSize">
+                   <size>
+                    <width>200</width>
+                    <height>16777215</height>
+                   </size>
+                  </property>
+                  <property name="toolTip">
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Clear the peak table on the left&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                  </property>
+                  <property name="text">
+                   <string>Clear</string>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <spacer name="verticalSpacer_12">
+                  <property name="orientation">
+                   <enum>Qt::Vertical</enum>
+                  </property>
+                  <property name="sizeType">
+                   <enum>QSizePolicy::Ignored</enum>
+                  </property>
+                  <property name="sizeHint" stdset="0">
+                   <size>
+                    <width>20</width>
+                    <height>40</height>
+                   </size>
+                  </property>
+                 </spacer>
+                </item>
+                <item>
+                 <spacer name="verticalSpacer_7">
+                  <property name="orientation">
+                   <enum>Qt::Vertical</enum>
                   </property>
                   <property name="sizeHint" stdset="0">
                    <size>
@@ -2463,7 +2631,22 @@ p, li { white-space: pre-wrap; }
                       <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-size:11pt;&quot;&gt;Refine UB matrix by indexed peaks&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
                      </property>
                      <property name="text">
-                      <string>Refine by Indexed</string>
+                      <string>Refine by SPICE HKL</string>
+                     </property>
+                    </widget>
+                   </item>
+                   <item>
+                    <widget class="QPushButton" name="pushButton_refineUBCalIndex">
+                     <property name="font">
+                      <font>
+                       <pointsize>8</pointsize>
+                      </font>
+                     </property>
+                     <property name="toolTip">
+                      <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-size:11pt;&quot;&gt;Refine UB matrix by indexed peaks&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                     </property>
+                     <property name="text">
+                      <string>Refine by Calculated HKL</string>
                      </property>
                     </widget>
                    </item>
@@ -2483,7 +2666,7 @@ p, li { white-space: pre-wrap; }
                      </property>
                      <property name="maximumSize">
                       <size>
-                       <width>120</width>
+                       <width>200</width>
                        <height>100</height>
                       </size>
                      </property>
@@ -3412,7 +3595,7 @@ p, li { white-space: pre-wrap; }
           </widget>
           <widget class="QWidget" name="tab_advsetup">
            <attribute name="title">
-            <string>Merge Scan</string>
+            <string>Scans Processing</string>
            </attribute>
            <layout class="QGridLayout" name="gridLayout_4">
             <item row="4" column="0">
@@ -3424,23 +3607,32 @@ p, li { white-space: pre-wrap; }
             </item>
             <item row="1" column="0">
              <layout class="QGridLayout" name="gridLayout_10">
-              <item row="0" column="6">
-               <spacer name="horizontalSpacer_12">
-                <property name="orientation">
-                 <enum>Qt::Horizontal</enum>
+              <item row="0" column="3">
+               <widget class="QLabel" name="label_peakIndexType">
+                <property name="font">
+                 <font>
+                  <pointsize>10</pointsize>
+                 </font>
                 </property>
-                <property name="sizeType">
-                 <enum>QSizePolicy::Preferred</enum>
+                <property name="text">
+                 <string>Peaks indexed by</string>
                 </property>
-                <property name="sizeHint" stdset="0">
+               </widget>
+              </item>
+              <item row="0" column="9">
+               <widget class="QLabel" name="label_30">
+                <property name="maximumSize">
                  <size>
-                  <width>40</width>
-                  <height>20</height>
+                  <width>100</width>
+                  <height>16777215</height>
                  </size>
                 </property>
-               </spacer>
+                <property name="text">
+                 <string>Scans List</string>
+                </property>
+               </widget>
               </item>
-              <item row="0" column="2">
+              <item row="0" column="5">
                <spacer name="horizontalSpacer_10">
                 <property name="orientation">
                  <enum>Qt::Horizontal</enum>
@@ -3456,27 +3648,7 @@ p, li { white-space: pre-wrap; }
                 </property>
                </spacer>
               </item>
-              <item row="0" column="3">
-               <widget class="QLabel" name="label_29">
-                <property name="toolTip">
-                 <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;For MDEventsWorkspace with merged runs&lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;p&gt;For example:&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
-                </property>
-                <property name="text">
-                 <string>Base Workspace Name</string>
-                </property>
-               </widget>
-              </item>
-              <item row="0" column="4">
-               <widget class="QLineEdit" name="lineEdit_baseMergeMDName">
-                <property name="sizePolicy">
-                 <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
-                  <horstretch>0</horstretch>
-                  <verstretch>0</verstretch>
-                 </sizepolicy>
-                </property>
-               </widget>
-              </item>
-              <item row="0" column="1">
+              <item row="0" column="10">
                <widget class="QLineEdit" name="lineEdit_listScansSliceView">
                 <property name="sizePolicy">
                  <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
@@ -3489,14 +3661,35 @@ p, li { white-space: pre-wrap; }
                 </property>
                </widget>
               </item>
-              <item row="0" column="0">
-               <widget class="QLabel" name="label_30">
+              <item row="0" column="7">
+               <widget class="QPushButton" name="pushButton_showIntegrateDetails">
+                <property name="font">
+                 <font>
+                  <pointsize>9</pointsize>
+                 </font>
+                </property>
                 <property name="text">
-                 <string>Scans List</string>
+                 <string>Show Integrate Details</string>
                 </property>
                </widget>
               </item>
               <item row="0" column="8">
+               <spacer name="horizontalSpacer_39">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+                <property name="sizeType">
+                 <enum>QSizePolicy::Preferred</enum>
+                </property>
+                <property name="sizeHint" stdset="0">
+                 <size>
+                  <width>40</width>
+                  <height>20</height>
+                 </size>
+                </property>
+               </spacer>
+              </item>
+              <item row="0" column="6">
                <widget class="QPushButton" name="pushButton_showUB">
                 <property name="font">
                  <font>
@@ -3508,25 +3701,38 @@ p, li { white-space: pre-wrap; }
                 </property>
                </widget>
               </item>
-              <item row="0" column="5">
-               <widget class="QCheckBox" name="checkBox_useDefaultMergedName">
+              <item row="0" column="0">
+               <widget class="QLabel" name="label_29">
+                <property name="font">
+                 <font>
+                  <pointsize>10</pointsize>
+                 </font>
+                </property>
                 <property name="text">
-                 <string>default</string>
+                 <string>Scan Merging &amp; Integration Table</string>
                 </property>
                </widget>
               </item>
-              <item row="0" column="9">
-               <widget class="QPushButton" name="pushButton_refreshMerged">
-                <property name="enabled">
-                 <bool>false</bool>
+              <item row="0" column="2">
+               <spacer name="horizontalSpacer_12">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
                 </property>
-                <property name="font">
-                 <font>
-                  <pointsize>10</pointsize>
-                 </font>
+                <property name="sizeType">
+                 <enum>QSizePolicy::Ignored</enum>
+                </property>
+                <property name="sizeHint" stdset="0">
+                 <size>
+                  <width>40</width>
+                  <height>20</height>
+                 </size>
                 </property>
+               </spacer>
+              </item>
+              <item row="0" column="4">
+               <widget class="QLabel" name="label_peaksIndexedBy">
                 <property name="text">
-                 <string>Refresh</string>
+                 <string/>
                 </property>
                </widget>
               </item>
@@ -3544,6 +3750,11 @@ p, li { white-space: pre-wrap; }
             </item>
             <item row="3" column="0">
              <widget class="ProcessTableWidget" name="tableWidget_mergeScans">
+              <property name="font">
+               <font>
+                <pointsize>9</pointsize>
+               </font>
+              </property>
               <property name="toolTip">
                <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;? columns: &lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;p&gt;1. Scan number&lt;/p&gt;&lt;p&gt;2. Number of Pts.&lt;/p&gt;&lt;p&gt;3. Status: &lt;/p&gt;&lt;p&gt;(a) done&lt;/p&gt;&lt;p&gt;(b) error with error message&lt;/p&gt;&lt;p&gt;(c) on-going&lt;/p&gt;&lt;p&gt;(d) empty as not yet&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
               </property>
@@ -3596,21 +3807,76 @@ p, li { white-space: pre-wrap; }
                </widget>
               </item>
               <item>
-               <widget class="Line" name="line_8">
-                <property name="orientation">
-                 <enum>Qt::Horizontal</enum>
+               <widget class="QPushButton" name="pushButton_refreshMerged">
+                <property name="enabled">
+                 <bool>true</bool>
+                </property>
+                <property name="font">
+                 <font>
+                  <pointsize>10</pointsize>
+                 </font>
+                </property>
+                <property name="text">
+                 <string>Refresh Table</string>
                 </property>
                </widget>
               </item>
               <item>
-               <widget class="QGroupBox" name="groupBox_20">
+               <widget class="QPushButton" name="pushButton_toggleIntegrateType">
                 <property name="font">
                  <font>
-                  <pointsize>10</pointsize>
+                  <pointsize>9</pointsize>
                  </font>
                 </property>
-                <property name="title">
-                 <string>Single Scan</string>
+                <property name="text">
+                 <string>Toggle Int Type</string>
+                </property>
+               </widget>
+              </item>
+              <item>
+               <widget class="QPushButton" name="pushButton_exportSelectedPeaks">
+                <property name="font">
+                 <font>
+                  <pointsize>9</pointsize>
+                 </font>
+                </property>
+                <property name="text">
+                 <string>Export Selection</string>
+                </property>
+               </widget>
+              </item>
+              <item>
+               <spacer name="verticalSpacer_24">
+                <property name="orientation">
+                 <enum>Qt::Vertical</enum>
+                </property>
+                <property name="sizeType">
+                 <enum>QSizePolicy::Preferred</enum>
+                </property>
+                <property name="sizeHint" stdset="0">
+                 <size>
+                  <width>20</width>
+                  <height>40</height>
+                 </size>
+                </property>
+               </spacer>
+              </item>
+              <item>
+               <widget class="Line" name="line_8">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+               </widget>
+              </item>
+              <item>
+               <widget class="QGroupBox" name="groupBox_20">
+                <property name="font">
+                 <font>
+                  <pointsize>10</pointsize>
+                 </font>
+                </property>
+                <property name="title">
+                 <string>Single Scan</string>
                 </property>
                 <layout class="QVBoxLayout" name="verticalLayout_20">
                  <item>
@@ -3630,6 +3896,21 @@ p, li { white-space: pre-wrap; }
                    </property>
                   </widget>
                  </item>
+                 <item>
+                  <widget class="QPushButton" name="pushButton_setupPeakIntegration">
+                   <property name="font">
+                    <font>
+                     <pointsize>10</pointsize>
+                    </font>
+                   </property>
+                   <property name="toolTip">
+                    <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Switch to tab 'Peak Integrate' to set up options for peak integration&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                   </property>
+                   <property name="text">
+                    <string>Single Pt. Set up</string>
+                   </property>
+                  </widget>
+                 </item>
                  <item>
                   <widget class="QPushButton" name="pushButton_convertMerged2HKL">
                    <property name="font">
@@ -3660,13 +3941,6 @@ p, li { white-space: pre-wrap; }
                 </layout>
                </widget>
               </item>
-              <item>
-               <widget class="Line" name="line_2">
-                <property name="orientation">
-                 <enum>Qt::Horizontal</enum>
-                </property>
-               </widget>
-              </item>
               <item>
                <spacer name="verticalSpacer_23">
                 <property name="orientation">
@@ -3683,6 +3957,13 @@ p, li { white-space: pre-wrap; }
                 </property>
                </spacer>
               </item>
+              <item>
+               <widget class="Line" name="line_2">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+               </widget>
+              </item>
               <item>
                <widget class="QGroupBox" name="groupBox_22">
                 <property name="font">
@@ -3894,7 +4175,7 @@ p, li { white-space: pre-wrap; }
                 </property>
                </widget>
               </item>
-              <item row="1" column="0">
+              <item row="2" column="0">
                <widget class="QPushButton" name="pushButton_exportPeaks">
                 <property name="font">
                  <font>
@@ -3908,28 +4189,44 @@ p, li { white-space: pre-wrap; }
                 </property>
                </widget>
               </item>
+              <item row="1" column="0">
+               <widget class="Line" name="line_14">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+               </widget>
+              </item>
              </layout>
             </item>
             <item row="5" column="0">
              <layout class="QGridLayout" name="gridLayout_22">
-              <item row="0" column="10">
-               <spacer name="horizontalSpacer_19">
-                <property name="orientation">
-                 <enum>Qt::Horizontal</enum>
+              <item row="0" column="8">
+               <widget class="QLabel" name="label_39">
+                <property name="font">
+                 <font>
+                  <pointsize>10</pointsize>
+                  <weight>75</weight>
+                  <bold>true</bold>
+                 </font>
                 </property>
-                <property name="sizeType">
-                 <enum>QSizePolicy::Preferred</enum>
+                <property name="text">
+                 <string>Scale Factor</string>
                 </property>
-                <property name="sizeHint" stdset="0">
+               </widget>
+              </item>
+              <item row="0" column="9">
+               <widget class="QLineEdit" name="lineEdit_scaleFactor">
+                <property name="maximumSize">
                  <size>
-                  <width>40</width>
-                  <height>20</height>
+                  <width>180</width>
+                  <height>16777215</height>
                  </size>
                 </property>
-               </spacer>
-              </item>
-              <item row="0" column="8">
-               <widget class="QLineEdit" name="lineEdit_scaleFactor">
+                <property name="font">
+                 <font>
+                  <pointsize>10</pointsize>
+                 </font>
+                </property>
                 <property name="toolTip">
                  <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Peak intensity scale factor&lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
                 </property>
@@ -3949,7 +4246,7 @@ p, li { white-space: pre-wrap; }
                 </item>
                </widget>
               </item>
-              <item row="1" column="0">
+              <item row="2" column="0">
                <widget class="QLabel" name="label_52">
                 <property name="font">
                  <font>
@@ -3962,8 +4259,14 @@ p, li { white-space: pre-wrap; }
                 </property>
                </widget>
               </item>
-              <item row="1" column="2">
-               <widget class="QComboBox" name="comboBox_kVectors"/>
+              <item row="2" column="2">
+               <widget class="QComboBox" name="comboBox_kVectors">
+                <property name="font">
+                 <font>
+                  <pointsize>10</pointsize>
+                 </font>
+                </property>
+               </widget>
               </item>
               <item row="0" column="0">
                <widget class="QLabel" name="label_47">
@@ -3978,25 +4281,16 @@ p, li { white-space: pre-wrap; }
                 </property>
                </widget>
               </item>
-              <item row="0" column="7">
-               <widget class="QLabel" name="label_39">
+              <item row="0" column="2">
+               <widget class="QComboBox" name="comboBox_mergePeakNormType">
                 <property name="font">
                  <font>
                   <pointsize>10</pointsize>
-                  <weight>75</weight>
-                  <bold>true</bold>
                  </font>
                 </property>
-                <property name="text">
-                 <string>Scale Factor</string>
-                </property>
-               </widget>
-              </item>
-              <item row="0" column="2">
-               <widget class="QComboBox" name="comboBox_mergePeakNormType">
                 <item>
                  <property name="text">
-                  <string>Absolute</string>
+                  <string>Normalized by Time</string>
                  </property>
                 </item>
                 <item>
@@ -4006,7 +4300,7 @@ p, li { white-space: pre-wrap; }
                 </item>
                 <item>
                  <property name="text">
-                  <string>Normalized by Time</string>
+                  <string>Absolute</string>
                  </property>
                 </item>
                </widget>
@@ -4027,14 +4321,7 @@ p, li { white-space: pre-wrap; }
                 </property>
                </spacer>
               </item>
-              <item row="0" column="6">
-               <widget class="QLineEdit" name="lineEdit_numPt4BackgroundRight">
-                <property name="toolTip">
-                 <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Number of background points take for the right side if it is different from that for left side&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
-                </property>
-               </widget>
-              </item>
-              <item row="1" column="3">
+              <item row="2" column="3">
                <widget class="QPushButton" name="pushButton_applyKShift">
                 <property name="font">
                  <font>
@@ -4049,36 +4336,92 @@ p, li { white-space: pre-wrap; }
                 </property>
                </widget>
               </item>
-              <item row="0" column="9">
-               <widget class="QPushButton" name="pushButton_setupPeakIntegration">
+              <item row="0" column="6">
+               <widget class="QLabel" name="label_48">
                 <property name="font">
                  <font>
                   <pointsize>10</pointsize>
+                  <weight>50</weight>
+                  <bold>false</bold>
                  </font>
                 </property>
+                <property name="text">
+                 <string>Background</string>
+                </property>
+               </widget>
+              </item>
+              <item row="2" column="9">
+               <widget class="QComboBox" name="comboBox_kVectorToExport">
                 <property name="toolTip">
-                 <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Switch to tab 'Peak Integrate' to set up options for peak integration&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                 <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;select the K-vector to export.  Option includes&lt;/p&gt;&lt;p&gt;1. nuclear peaks: k-vector = (0, 0, 0)&lt;/p&gt;&lt;p&gt;2. magnetic peaks with certain k-vector&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
                 </property>
-                <property name="text">
-                 <string>Single Pt. Set up</string>
+               </widget>
+              </item>
+              <item row="1" column="10">
+               <widget class="Line" name="line_17">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
                 </property>
                </widget>
               </item>
-              <item row="0" column="5">
-               <widget class="QLineEdit" name="lineEdit_numPt4Background">
-                <property name="sizePolicy">
-                 <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
-                  <horstretch>0</horstretch>
-                  <verstretch>0</verstretch>
-                 </sizepolicy>
+              <item row="1" column="8">
+               <widget class="Line" name="line_11">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
                 </property>
-                <property name="toolTip">
-                 <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Number of Pt. to calculate background&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+               </widget>
+              </item>
+              <item row="0" column="7">
+               <layout class="QHBoxLayout" name="horizontalLayout_23">
+                <item>
+                 <widget class="QLineEdit" name="lineEdit_numPt4Background">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="maximumSize">
+                   <size>
+                    <width>60</width>
+                    <height>16777215</height>
+                   </size>
+                  </property>
+                  <property name="toolTip">
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Number of Pt. to calculate background&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <widget class="QLineEdit" name="lineEdit_numPt4BackgroundRight">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="maximumSize">
+                   <size>
+                    <width>60</width>
+                    <height>16777215</height>
+                   </size>
+                  </property>
+                  <property name="toolTip">
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Number of background points take for the right side if it is different from that for left side&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                  </property>
+                 </widget>
+                </item>
+               </layout>
+              </item>
+              <item row="1" column="0">
+               <widget class="Line" name="line_10">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
                 </property>
                </widget>
               </item>
               <item row="0" column="4">
-               <widget class="QLabel" name="label_48">
+               <widget class="QCheckBox" name="checkBox">
                 <property name="font">
                  <font>
                   <pointsize>10</pointsize>
@@ -4086,15 +4429,28 @@ p, li { white-space: pre-wrap; }
                   <bold>true</bold>
                  </font>
                 </property>
+                <property name="toolTip">
+                 <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;If True, then the peak intensity will be calculated for a Gaussian function, which is fit to a Pt. - Counts curve.&lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;p&gt;Function to fit is a Gaussian plus linear background&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                </property>
                 <property name="text">
-                 <string>Background</string>
+                 <string>Fit By Gaussian</string>
                 </property>
                </widget>
               </item>
-              <item row="1" column="9">
-               <widget class="QComboBox" name="comboBox_kVectorToExport"/>
+              <item row="2" column="7">
+               <widget class="QLabel" name="label_63">
+                <property name="font">
+                 <font>
+                  <weight>75</weight>
+                  <bold>true</bold>
+                 </font>
+                </property>
+                <property name="text">
+                 <string>Export Setup</string>
+                </property>
+               </widget>
               </item>
-              <item row="1" column="8">
+              <item row="2" column="8">
                <widget class="QCheckBox" name="checkBox_exportAbsorptionToFP">
                 <property name="font">
                  <font>
@@ -4110,92 +4466,148 @@ p, li { white-space: pre-wrap; }
                 </property>
                </widget>
               </item>
-             </layout>
-            </item>
-           </layout>
-          </widget>
-          <widget class="QWidget" name="tab_indexPeak">
-           <attribute name="title">
-            <string>Peak Integration</string>
-           </attribute>
-           <layout class="QVBoxLayout" name="verticalLayout_11">
-            <item>
-             <layout class="QGridLayout" name="gridLayout_13">
-              <item row="0" column="5">
-               <spacer name="horizontalSpacer_17">
+              <item row="1" column="7">
+               <widget class="Line" name="line_12">
                 <property name="orientation">
                  <enum>Qt::Horizontal</enum>
                 </property>
-                <property name="sizeType">
-                 <enum>QSizePolicy::Expanding</enum>
-                </property>
-                <property name="sizeHint" stdset="0">
-                 <size>
-                  <width>40</width>
-                  <height>20</height>
-                 </size>
-                </property>
-               </spacer>
+               </widget>
               </item>
-              <item row="0" column="10">
-               <widget class="QLineEdit" name="lineEdit_background">
-                <property name="sizePolicy">
-                 <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
-                  <horstretch>0</horstretch>
-                  <verstretch>0</verstretch>
-                 </sizepolicy>
+              <item row="2" column="5">
+               <widget class="Line" name="line_15">
+                <property name="orientation">
+                 <enum>Qt::Vertical</enum>
                 </property>
                </widget>
               </item>
-              <item row="0" column="1">
-               <widget class="QLineEdit" name="lineEdit_scanIntegratePeak">
-                <property name="sizePolicy">
-                 <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
-                  <horstretch>0</horstretch>
-                  <verstretch>0</verstretch>
-                 </sizepolicy>
+              <item row="1" column="9">
+               <widget class="Line" name="line_16">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+               </widget>
+              </item>
+              <item row="0" column="10">
+               <spacer name="horizontalSpacer_19">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+                <property name="sizeType">
+                 <enum>QSizePolicy::Preferred</enum>
                 </property>
-                <property name="readOnly">
-                 <bool>false</bool>
+                <property name="sizeHint" stdset="0">
+                 <size>
+                  <width>42</width>
+                  <height>26</height>
+                 </size>
+                </property>
+               </spacer>
+              </item>
+              <item row="1" column="6">
+               <widget class="Line" name="line_13">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
                 </property>
                </widget>
               </item>
-              <item row="0" column="0">
-               <widget class="QLabel" name="label_5">
-                <property name="text">
-                 <string>Scan Number</string>
+              <item row="1" column="2">
+               <widget class="Line" name="line_18">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
                 </property>
                </widget>
               </item>
-              <item row="0" column="2">
-               <widget class="QPushButton" name="pushButton_nextScanIntegrate">
-                <property name="text">
-                 <string>Next</string>
+              <item row="1" column="1">
+               <widget class="Line" name="line_19">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
                 </property>
                </widget>
               </item>
-              <item row="0" column="6">
-               <widget class="QLabel" name="label_34">
-                <property name="text">
-                 <string>Peak Radius</string>
+              <item row="1" column="3">
+               <widget class="Line" name="line_20">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
                 </property>
                </widget>
               </item>
-              <item row="0" column="11">
-               <widget class="QPushButton" name="pushButton_integratePeak">
-                <property name="font">
-                 <font>
-                  <weight>50</weight>
-                  <bold>false</bold>
-                 </font>
+              <item row="1" column="4">
+               <widget class="Line" name="line_21">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+               </widget>
+              </item>
+             </layout>
+            </item>
+           </layout>
+          </widget>
+          <widget class="QWidget" name="tab_indexPeak">
+           <attribute name="title">
+            <string>Peak Integration</string>
+           </attribute>
+           <layout class="QGridLayout" name="gridLayout_13">
+            <item row="0" column="3">
+             <layout class="QHBoxLayout" name="horizontalLayout_21">
+              <item>
+               <layout class="QHBoxLayout" name="horizontalLayout_20">
+                <item>
+                 <widget class="QLabel" name="label_34">
+                  <property name="text">
+                   <string>Scan Number</string>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <widget class="QLineEdit" name="lineEdit_scanIntegratePeak">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="readOnly">
+                   <bool>false</bool>
+                  </property>
+                 </widget>
+                </item>
+               </layout>
+              </item>
+              <item>
+               <widget class="QPushButton" name="pushButton_integratePt">
+                <property name="toolTip">
+                 <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Function to fit Pt.&lt;/p&gt;&lt;p&gt;  A * exp( - (x-x0)**2/(2 * sigma**2) )+B&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
                 </property>
                 <property name="text">
                  <string>Integrate Peak</string>
                 </property>
                </widget>
               </item>
-              <item row="0" column="7">
-               <widget class="QLineEdit" name="lineEdit_peakRadius">
+              <item>
+               <spacer name="horizontalSpacer_17">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+                <property name="sizeType">
+                 <enum>QSizePolicy::Ignored</enum>
+                </property>
+                <property name="sizeHint" stdset="0">
+                 <size>
+                  <width>40</width>
+                  <height>20</height>
+                 </size>
+                </property>
+               </spacer>
+              </item>
+              <item>
+               <widget class="QLabel" name="label_69">
+                <property name="text">
+                 <string>Scale Factor</string>
+                </property>
+               </widget>
+              </item>
+              <item>
+               <widget class="QLineEdit" name="lineEdit_scaleFactorScan">
                 <property name="sizePolicy">
                  <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
                   <horstretch>0</horstretch>
@@ -4204,156 +4616,655 @@ p, li { white-space: pre-wrap; }
                 </property>
                </widget>
               </item>
-              <item row="0" column="9">
-               <widget class="QLabel" name="label_46">
+              <item>
+               <widget class="QLabel" name="label_64">
                 <property name="text">
-                 <string>Background</string>
+                 <string>Background Pts.</string>
                 </property>
                </widget>
               </item>
-             </layout>
-            </item>
-            <item>
-             <widget class="QLabel" name="label_peakIntegraeInfo">
-              <property name="text">
-               <string>TextLabel</string>
-              </property>
-             </widget>
-            </item>
-            <item>
-             <layout class="QGridLayout" name="gridLayout_15">
-              <item row="0" column="0">
-               <widget class="PeakIntegrationTableWidget" name="tableWidget_peakIntegration">
+              <item>
+               <widget class="QLineEdit" name="lineEdit_backgroundPts">
                 <property name="sizePolicy">
-                 <sizepolicy hsizetype="Preferred" vsizetype="Expanding">
+                 <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
                   <horstretch>0</horstretch>
                   <verstretch>0</verstretch>
                  </sizepolicy>
                 </property>
+               </widget>
+              </item>
+              <item>
+               <widget class="QComboBox" name="comboBox_ptCountType"/>
+              </item>
+              <item>
+               <widget class="QComboBox" name="comboBox_maskNames2">
+                <item>
+                 <property name="text">
+                  <string>No Mask</string>
+                 </property>
+                </item>
+               </widget>
+              </item>
+              <item>
+               <widget class="QCheckBox" name="checkBox_use3algorithms">
+                <property name="text">
+                 <string>3 algorithms</string>
+                </property>
+               </widget>
+              </item>
+              <item>
+               <spacer name="horizontalSpacer_20">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+                <property name="sizeType">
+                 <enum>QSizePolicy::Preferred</enum>
+                </property>
+                <property name="sizeHint" stdset="0">
+                 <size>
+                  <width>40</width>
+                  <height>20</height>
+                 </size>
+                </property>
+               </spacer>
+              </item>
+              <item>
+               <widget class="QPushButton" name="pushButton_showIntPeakDetails">
+                <property name="enabled">
+                 <bool>true</bool>
+                </property>
                 <property name="toolTip">
-                 <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Table for integrating Pts belogned to same scan.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                 <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Fit by Gaussian for background&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                </property>
+                <property name="text">
+                 <string>See Details</string>
                 </property>
                </widget>
               </item>
-              <item row="1" column="1">
-               <layout class="QGridLayout" name="gridLayout_20">
-                <item row="1" column="3">
-                 <spacer name="horizontalSpacer_20">
-                  <property name="orientation">
-                   <enum>Qt::Horizontal</enum>
+             </layout>
+            </item>
+            <item row="2" column="3">
+             <widget class="IntegratedPeakView" name="graphicsView_integratedPeakView">
+              <property name="sizePolicy">
+               <sizepolicy hsizetype="Expanding" vsizetype="Expanding">
+                <horstretch>0</horstretch>
+                <verstretch>0</verstretch>
+               </sizepolicy>
+              </property>
+              <property name="font">
+               <font>
+                <pointsize>10</pointsize>
+               </font>
+              </property>
+             </widget>
+            </item>
+            <item row="3" column="3">
+             <layout class="QHBoxLayout" name="horizontalLayout_22">
+              <item>
+               <layout class="QGridLayout" name="gridLayout_15">
+                <item row="1" column="4">
+                 <widget class="QLabel" name="label_68">
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
                   </property>
-                  <property name="sizeType">
-                   <enum>QSizePolicy::Expanding</enum>
+                  <property name="text">
+                   <string>Pt. Range</string>
                   </property>
-                  <property name="sizeHint" stdset="0">
-                   <size>
-                    <width>40</width>
-                    <height>20</height>
-                   </size>
+                 </widget>
+                </item>
+                <item row="2" column="1">
+                 <widget class="QLineEdit" name="lineEdit_gaussianPeakIntensity">
+                  <property name="enabled">
+                   <bool>true</bool>
                   </property>
-                 </spacer>
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="dragEnabled">
+                   <bool>true</bool>
+                  </property>
+                  <property name="readOnly">
+                   <bool>true</bool>
+                  </property>
+                 </widget>
+                </item>
+                <item row="0" column="0">
+                 <widget class="QLabel" name="label_32">
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="toolTip">
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-size:12pt;&quot;&gt;Intensity calculated by &lt;/span&gt;&lt;span style=&quot; font-size:12pt; font-weight:600;&quot;&gt;simple summation&lt;/span&gt;&lt;span style=&quot; font-size:12pt;&quot;&gt; with &lt;/span&gt;&lt;span style=&quot; font-size:12pt; font-weight:600;&quot;&gt;averaged background&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                  </property>
+                  <property name="text">
+                   <string>Intensity 1</string>
+                  </property>
+                 </widget>
+                </item>
+                <item row="0" column="1">
+                 <widget class="QLineEdit" name="lineEdit_rawSinglePeakIntensity">
+                  <property name="enabled">
+                   <bool>true</bool>
+                  </property>
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="toolTip">
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Intensity calculated by &lt;span style=&quot; font-weight:600;&quot;&gt;simple summation&lt;/span&gt; with &lt;span style=&quot; font-weight:600;&quot;&gt;averaged background&lt;/span&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                  </property>
+                  <property name="dragEnabled">
+                   <bool>true</bool>
+                  </property>
+                  <property name="readOnly">
+                   <bool>true</bool>
+                  </property>
+                 </widget>
+                </item>
+                <item row="0" column="4">
+                 <widget class="QLabel" name="label_46">
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="text">
+                   <string>Background</string>
+                  </property>
+                 </widget>
+                </item>
+                <item row="0" column="5">
+                 <widget class="QLineEdit" name="lineEdit_avgBackground">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="readOnly">
+                   <bool>true</bool>
+                  </property>
+                 </widget>
+                </item>
+                <item row="1" column="0">
+                 <widget class="QLabel" name="label_65">
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="toolTip">
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Intensity calculated by &lt;span style=&quot; font-weight:600;&quot;&gt;simple summation&lt;/span&gt; with &lt;span style=&quot; font-weight:600;&quot;&gt;fitted flat background&lt;/span&gt;&lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;p&gt;Fit function: Gaussian + Flat background&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                  </property>
+                  <property name="text">
+                   <string>Intensity 2</string>
+                  </property>
+                 </widget>
+                </item>
+                <item row="1" column="1">
+                 <widget class="QLineEdit" name="lineEdit_intensity2">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="toolTip">
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Intensity calculated by &lt;span style=&quot; font-weight:600;&quot;&gt;simple summation&lt;/span&gt; with &lt;span style=&quot; font-weight:600;&quot;&gt;fitted flat background&lt;/span&gt;&lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;p&gt;Fit function: Gaussian + Flat background&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                  </property>
+                  <property name="dragEnabled">
+                   <bool>true</bool>
+                  </property>
+                  <property name="readOnly">
+                   <bool>true</bool>
+                  </property>
+                 </widget>
+                </item>
+                <item row="0" column="3">
+                 <widget class="QLineEdit" name="lineEdit_errorIntensity1">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                 </widget>
+                </item>
+                <item row="2" column="0">
+                 <widget class="QLabel" name="label_45">
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="text">
+                   <string>Intensity 3</string>
+                  </property>
+                 </widget>
+                </item>
+                <item row="2" column="4">
+                 <widget class="QLabel" name="label_66">
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                  <property name="text">
+                   <string>Gaussian B</string>
+                  </property>
+                 </widget>
                 </item>
                 <item row="1" column="5">
-                 <widget class="QPushButton" name="pushButton_handPickBkgd">
+                 <widget class="QLineEdit" name="lineEdit_ptRange">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                 </widget>
+                </item>
+                <item row="1" column="3">
+                 <widget class="QLineEdit" name="lineEdit_errorIntensity2">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
+                  </property>
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
+                  </property>
+                 </widget>
+                </item>
+                <item row="2" column="5">
+                 <widget class="QLineEdit" name="lineEdit_peakBackground">
                   <property name="enabled">
-                   <bool>false</bool>
+                   <bool>true</bool>
+                  </property>
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
                   </property>
                   <property name="font">
                    <font>
-                    <pointsize>11</pointsize>
+                    <pointsize>10</pointsize>
                    </font>
                   </property>
                   <property name="toolTip">
-                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;User hand-pick background&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Background value from fitted Gaussian&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
                   </property>
-                  <property name="text">
-                   <string>Customize Bkgd</string>
+                  <property name="dragEnabled">
+                   <bool>true</bool>
+                  </property>
+                  <property name="readOnly">
+                   <bool>true</bool>
                   </property>
                  </widget>
                 </item>
-                <item row="0" column="5">
-                 <widget class="QPushButton" name="pushButton_fitBkgd">
-                  <property name="enabled">
-                   <bool>false</bool>
-                  </property>
-                  <property name="toolTip">
-                   <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Fit by Gaussian for background&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+                <item row="2" column="3">
+                 <widget class="QLineEdit" name="lineEdit_errorIntensity3">
+                  <property name="sizePolicy">
+                   <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                    <horstretch>0</horstretch>
+                    <verstretch>0</verstretch>
+                   </sizepolicy>
                   </property>
-                  <property name="text">
-                   <string>Fit Background</string>
+                  <property name="font">
+                   <font>
+                    <pointsize>10</pointsize>
+                   </font>
                   </property>
                  </widget>
                 </item>
-                <item row="0" column="1">
-                 <widget class="QComboBox" name="comboBox_ptCountType">
-                  <item>
-                   <property name="text">
-                    <string>Absolute</string>
-                   </property>
-                  </item>
-                  <item>
-                   <property name="text">
-                    <string>Normalized by Time</string>
-                   </property>
-                  </item>
-                  <item>
-                   <property name="text">
-                    <string>Normalized by Monitor</string>
-                   </property>
-                  </item>
+                <item row="0" column="2">
+                 <widget class="QLabel" name="label_5">
+                  <property name="text">
+                   <string>+/-</string>
+                  </property>
                  </widget>
                 </item>
-                <item row="1" column="1">
-                 <widget class="QComboBox" name="comboBox_maskNames2">
-                  <item>
-                   <property name="text">
-                    <string>No Mask</string>
-                   </property>
-                  </item>
+                <item row="1" column="2">
+                 <widget class="QLabel" name="label_7">
+                  <property name="text">
+                   <string>+/-</string>
+                  </property>
                  </widget>
                 </item>
-                <item row="0" column="6">
-                 <widget class="QCheckBox" name="checkBox_peakIntTabLorentzCorr">
+                <item row="2" column="2">
+                 <widget class="QLabel" name="label_31">
                   <property name="text">
-                   <string>Lorentz Correction</string>
+                   <string>+/-</string>
                   </property>
                  </widget>
                 </item>
                </layout>
               </item>
-              <item row="1" column="0">
-               <layout class="QGridLayout" name="gridLayout_21">
-                <item row="1" column="0">
-                 <widget class="QPushButton" name="pushButton_calBkgd">
+              <item>
+               <spacer name="horizontalSpacer_40">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+                <property name="sizeType">
+                 <enum>QSizePolicy::Preferred</enum>
+                </property>
+                <property name="sizeHint" stdset="0">
+                 <size>
+                  <width>40</width>
+                  <height>20</height>
+                 </size>
+                </property>
+               </spacer>
+              </item>
+              <item>
+               <layout class="QVBoxLayout" name="verticalLayout_23">
+                <item>
+                 <widget class="QLabel" name="label_53">
+                  <property name="maximumSize">
+                   <size>
+                    <width>16777215</width>
+                    <height>40</height>
+                   </size>
+                  </property>
                   <property name="font">
                    <font>
-                    <pointsize>11</pointsize>
+                    <pointsize>10</pointsize>
                    </font>
                   </property>
                   <property name="text">
-                   <string>Calculate Background</string>
+                   <string>Fitted Parameters</string>
                   </property>
                  </widget>
                 </item>
-                <item row="0" column="0">
-                 <widget class="QPushButton" name="pushButton_integratePt">
-                  <property name="text">
-                   <string>Integrate Pt</string>
-                  </property>
-                 </widget>
+                <item>
+                 <layout class="QGridLayout" name="gridLayout_20">
+                  <item row="1" column="3">
+                   <widget class="QLineEdit" name="lineEdit_guassX0">
+                    <property name="sizePolicy">
+                     <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                      <horstretch>0</horstretch>
+                      <verstretch>0</verstretch>
+                     </sizepolicy>
+                    </property>
+                    <property name="maximumSize">
+                     <size>
+                      <width>80</width>
+                      <height>16777215</height>
+                     </size>
+                    </property>
+                    <property name="font">
+                     <font>
+                      <pointsize>10</pointsize>
+                     </font>
+                    </property>
+                   </widget>
+                  </item>
+                  <item row="0" column="3">
+                   <widget class="QLineEdit" name="lineEdit_gaussSigma">
+                    <property name="sizePolicy">
+                     <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                      <horstretch>0</horstretch>
+                      <verstretch>0</verstretch>
+                     </sizepolicy>
+                    </property>
+                    <property name="maximumSize">
+                     <size>
+                      <width>80</width>
+                      <height>16777215</height>
+                     </size>
+                    </property>
+                    <property name="font">
+                     <font>
+                      <pointsize>10</pointsize>
+                     </font>
+                    </property>
+                   </widget>
+                  </item>
+                  <item row="0" column="2">
+                   <widget class="QLabel" name="label_61">
+                    <property name="font">
+                     <font>
+                      <pointsize>10</pointsize>
+                     </font>
+                    </property>
+                    <property name="text">
+                     <string>Sigma</string>
+                    </property>
+                   </widget>
+                  </item>
+                  <item row="0" column="0">
+                   <widget class="QLabel" name="label_54">
+                    <property name="font">
+                     <font>
+                      <pointsize>10</pointsize>
+                     </font>
+                    </property>
+                    <property name="text">
+                     <string>A</string>
+                    </property>
+                   </widget>
+                  </item>
+                  <item row="1" column="0">
+                   <widget class="QLabel" name="label_62">
+                    <property name="font">
+                     <font>
+                      <pointsize>10</pointsize>
+                     </font>
+                    </property>
+                    <property name="text">
+                     <string>B  </string>
+                    </property>
+                   </widget>
+                  </item>
+                  <item row="1" column="2">
+                   <widget class="QLabel" name="label_67">
+                    <property name="font">
+                     <font>
+                      <pointsize>10</pointsize>
+                     </font>
+                    </property>
+                    <property name="text">
+                     <string>X0</string>
+                    </property>
+                   </widget>
+                  </item>
+                  <item row="1" column="1">
+                   <widget class="QLineEdit" name="lineEdit_gaussB">
+                    <property name="sizePolicy">
+                     <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                      <horstretch>0</horstretch>
+                      <verstretch>0</verstretch>
+                     </sizepolicy>
+                    </property>
+                    <property name="maximumSize">
+                     <size>
+                      <width>80</width>
+                      <height>16777215</height>
+                     </size>
+                    </property>
+                    <property name="font">
+                     <font>
+                      <pointsize>10</pointsize>
+                     </font>
+                    </property>
+                   </widget>
+                  </item>
+                  <item row="0" column="1">
+                   <widget class="QLineEdit" name="lineEdit_gaussA">
+                    <property name="sizePolicy">
+                     <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+                      <horstretch>0</horstretch>
+                      <verstretch>0</verstretch>
+                     </sizepolicy>
+                    </property>
+                    <property name="maximumSize">
+                     <size>
+                      <width>80</width>
+                      <height>16777215</height>
+                     </size>
+                    </property>
+                    <property name="font">
+                     <font>
+                      <pointsize>10</pointsize>
+                     </font>
+                    </property>
+                   </widget>
+                  </item>
+                 </layout>
                 </item>
                </layout>
               </item>
-              <item row="0" column="1">
-               <widget class="IntegratedPeakView" name="graphicsView_integratedPeakView">
+              <item>
+               <spacer name="horizontalSpacer_37">
+                <property name="orientation">
+                 <enum>Qt::Horizontal</enum>
+                </property>
+                <property name="sizeType">
+                 <enum>QSizePolicy::Ignored</enum>
+                </property>
+                <property name="sizeHint" stdset="0">
+                 <size>
+                  <width>40</width>
+                  <height>20</height>
+                 </size>
+                </property>
+               </spacer>
+              </item>
+              <item>
+               <widget class="MatrixTable" name="tableWidget_covariance">
                 <property name="sizePolicy">
-                 <sizepolicy hsizetype="Expanding" vsizetype="Expanding">
+                 <sizepolicy hsizetype="Expanding" vsizetype="Preferred">
                   <horstretch>0</horstretch>
                   <verstretch>0</verstretch>
                  </sizepolicy>
                 </property>
+                <property name="font">
+                 <font>
+                  <pointsize>10</pointsize>
+                 </font>
+                </property>
+                <row>
+                 <property name="text">
+                  <string>X0</string>
+                 </property>
+                </row>
+                <row>
+                 <property name="text">
+                  <string>sigma</string>
+                 </property>
+                </row>
+                <row>
+                 <property name="text">
+                  <string>A</string>
+                 </property>
+                </row>
+                <row>
+                 <property name="text">
+                  <string>B</string>
+                 </property>
+                </row>
+                <column>
+                 <property name="text">
+                  <string>x0</string>
+                 </property>
+                </column>
+                <column>
+                 <property name="text">
+                  <string>sigma</string>
+                 </property>
+                </column>
+                <column>
+                 <property name="text">
+                  <string>A</string>
+                 </property>
+                </column>
+                <column>
+                 <property name="text">
+                  <string>B</string>
+                 </property>
+                </column>
+                <item row="0" column="0">
+                 <property name="text">
+                  <string/>
+                 </property>
+                 <property name="font">
+                  <font>
+                   <pointsize>10</pointsize>
+                  </font>
+                 </property>
+                </item>
                </widget>
               </item>
+              <item>
+               <layout class="QVBoxLayout" name="verticalLayout_24">
+                <item>
+                 <widget class="QPushButton" name="pushButton_clearPeakIntFigure">
+                  <property name="text">
+                   <string>Clear Canvas</string>
+                  </property>
+                 </widget>
+                </item>
+                <item>
+                 <spacer name="verticalSpacer_16">
+                  <property name="orientation">
+                   <enum>Qt::Vertical</enum>
+                  </property>
+                  <property name="sizeType">
+                   <enum>QSizePolicy::Preferred</enum>
+                  </property>
+                  <property name="sizeHint" stdset="0">
+                   <size>
+                    <width>20</width>
+                    <height>40</height>
+                   </size>
+                  </property>
+                 </spacer>
+                </item>
+               </layout>
+              </item>
              </layout>
             </item>
            </layout>
@@ -4833,7 +5744,7 @@ p, li { white-space: pre-wrap; }
     <rect>
      <x>0</x>
      <y>0</y>
-     <width>1536</width>
+     <width>1568</width>
      <height>25</height>
     </rect>
    </property>
@@ -4999,11 +5910,6 @@ p, li { white-space: pre-wrap; }
    <extends>QTableWidget</extends>
    <header>hfctables.h</header>
   </customwidget>
-  <customwidget>
-   <class>PeakIntegrationTableWidget</class>
-   <extends>QTableWidget</extends>
-   <header>hfctables.h</header>
-  </customwidget>
   <customwidget>
    <class>IntegratedPeakView</class>
    <extends>QGraphicsView</extends>
@@ -5019,6 +5925,11 @@ p, li { white-space: pre-wrap; }
    <extends>QTableWidget</extends>
    <header>hfctables.h</header>
   </customwidget>
+  <customwidget>
+   <class>MatrixTable</class>
+   <extends>QTableWidget</extends>
+   <header>hfctables.h</header>
+  </customwidget>
  </customwidgets>
  <resources/>
  <connections/>
diff --git a/scripts/HFIR_4Circle_Reduction/NTableWidget.py b/scripts/HFIR_4Circle_Reduction/NTableWidget.py
index e8ffda0c330e0a777e2784aa40da0dbca955ec60..e8e65036197ad3487e5bd1f67116502c3d1d9be5 100644
--- a/scripts/HFIR_4Circle_Reduction/NTableWidget.py
+++ b/scripts/HFIR_4Circle_Reduction/NTableWidget.py
@@ -1,6 +1,6 @@
 #pylint: disable=C0103,R0904
 # N(DAV)TableWidget
-
+import csv
 from PyQt4 import QtGui, QtCore
 
 try:
@@ -31,6 +31,7 @@ class NTableWidget(QtGui.QTableWidget):
         self._editableList = list()
 
         self._statusColName = 'Status'
+        self._colIndexSelect = None
 
         return
 
@@ -90,6 +91,47 @@ class NTableWidget(QtGui.QTableWidget):
 
         return
 
+    def export_table_csv(self, csv_file_name):
+        """
+
+        :return:
+        """
+        # get title as header
+        col_names = self._myColumnNameList[:]
+        # col_names_str = '{0}'.format(col_names)
+        # col_names_str = col_names_str.replace(', ', ' ')
+        # headeder = col_names_str
+
+        num_columns = self.columnCount()
+
+        num_rows = self.rowCount()
+        content_line_list = list()
+        for i_row in range(num_rows):
+            line_items = list()
+            for j_col in range(num_columns):
+                item_value = self.get_cell_value(i_row, j_col)
+                if isinstance(item_value, str):
+                    # remove tab because tab will be used as delimiter
+                    item_value = item_value.replace('\t', '')
+                elif item_value is None:
+                    item_value = ''
+                line_items.append(item_value)
+            # END-FOR
+            content_line_list.append(line_items)
+        # END-FOR (row)
+
+        with open(csv_file_name, 'w') as csv_file:
+            csv_writer = csv.writer(csv_file, delimiter=' ', quoting=csv.QUOTE_MINIMAL)
+            # write header
+            csv_writer.writerow(col_names)
+            # write content
+            for line_items in content_line_list:
+                csv_writer.writerow(line_items)
+            # END-FOR
+        # END-WITH
+
+        return
+
     def get_cell_value(self, row_index, col_index):
         """
         Purpose: Get cell value
@@ -120,7 +162,9 @@ class NTableWidget(QtGui.QTableWidget):
             assert isinstance(item_i_j, QtGui.QTableWidgetItem)
 
             return_value = str(item_i_j.text())
-            if cell_data_type == 'int':
+            if return_value == 'None':
+                return_value = None
+            elif cell_data_type == 'int':
                 return_value = int(return_value)
             elif cell_data_type == 'float' or cell_data_type == 'double':
                 return_value = float(return_value)
@@ -266,6 +310,23 @@ class NTableWidget(QtGui.QTableWidget):
 
         return error_message
 
+    def revert_selection(self):
+        """
+        revert the selection of rows
+        :return:
+        """
+        # check
+        if self._colIndexSelect is None:
+            raise RuntimeError('Column for selection is not defined yet. Unable to revert selection')
+
+        num_rows = self.rowCount()
+        for i_row in range(num_rows):
+            curr_selection = self.get_cell_value(i_row, self._colIndexSelect)
+            self.update_cell_value(i_row, self._colIndexSelect, not curr_selection)
+        # END-FOR
+
+        return
+
     def select_all_rows(self, status):
         """
         Purpose: select or deselect all rows in the table if applied
@@ -316,6 +377,41 @@ class NTableWidget(QtGui.QTableWidget):
 
         return
 
+    def select_rows_by_column_value(self, column_index, target_value, value_tolerance,
+                                    keep_current_selection):
+        """
+        select row
+        :param column_index:
+        :param target_value:
+        :param value_tolerance:
+        :param keep_current_selection:
+        :return:
+        """
+        # check inputs
+        assert isinstance(column_index, int) and 0 <= column_index < self.columnCount(),\
+            'Column index {0} must be an integer (now {1}) and in range (0, {2}]' \
+            ''.format(column_index, type(column_index), self.columnCount())
+        if self._colIndexSelect is None:
+            raise RuntimeError('Column for selection is never set up.')
+
+        # loop over lines
+        num_rows = self.rowCount()
+        for i_row in range(num_rows):
+            if keep_current_selection and self.get_cell_value(i_row, self._colIndexSelect) is False:
+                # in case to keep and based on current selection, and this row is not selected, skip
+                continue
+
+            value_i = self.get_cell_value(i_row, column_index)
+            if isinstance(target_value, str) and value_i == target_value:
+                # in case of string
+                self.update_cell_value(i_row, self._colIndexSelect, True)
+            elif (isinstance(target_value, float) or isinstance(target_value, int)) and abs(value_i - target_value) < value_tolerance:
+                # in case of integer or float, then test with consideration of tolerance
+                self.update_cell_value(i_row, self._colIndexSelect, True)
+        # END-FOR
+
+        return
+
     def set_check_box(self, row, col, state):
         """ function to add a new select checkbox to a cell in a table row
         won't add a new checkbox if one already exists
@@ -352,11 +448,16 @@ class NTableWidget(QtGui.QTableWidget):
         # check
         assert isinstance(name, str), 'Given status column name must be an integer,' \
                                       'but not %s.' % str(type(name))
-        assert name in self._myColumnNameList
+        if name not in self._myColumnNameList:
+            raise RuntimeError('Input selection/status name {0} is not in column names list {1}.'
+                               ''.format(name, self._myColumnNameList))
 
         # set value
         self._statusColName = name
 
+        # set the column index
+        self._colIndexSelect = self._myColumnNameList.index(name)
+
         return
 
     def set_value_cell(self, row, col, value=''):
diff --git a/scripts/HFIR_4Circle_Reduction/PeakIntegrationDialog.ui b/scripts/HFIR_4Circle_Reduction/PeakIntegrationDialog.ui
new file mode 100644
index 0000000000000000000000000000000000000000..5133f2667b926acf33e160dc7e1bea213441204e
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/PeakIntegrationDialog.ui
@@ -0,0 +1,68 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>410</width>
+    <height>317</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>Dialog</string>
+  </property>
+  <layout class="QGridLayout" name="gridLayout">
+   <item row="0" column="0">
+    <widget class="QLabel" name="label_34">
+     <property name="text">
+      <string>Peak Radius</string>
+     </property>
+    </widget>
+   </item>
+   <item row="0" column="1">
+    <widget class="QLineEdit" name="lineEdit_peakRadius">
+     <property name="sizePolicy">
+      <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+       <horstretch>0</horstretch>
+       <verstretch>0</verstretch>
+      </sizepolicy>
+     </property>
+    </widget>
+   </item>
+   <item row="1" column="0">
+    <widget class="QLabel" name="label_46">
+     <property name="text">
+      <string>Background</string>
+     </property>
+    </widget>
+   </item>
+   <item row="1" column="1">
+    <widget class="QLineEdit" name="lineEdit_background">
+     <property name="sizePolicy">
+      <sizepolicy hsizetype="Preferred" vsizetype="Fixed">
+       <horstretch>0</horstretch>
+       <verstretch>0</verstretch>
+      </sizepolicy>
+     </property>
+    </widget>
+   </item>
+   <item row="2" column="1">
+    <widget class="QPushButton" name="pushButton_integratePeak">
+     <property name="font">
+      <font>
+       <weight>50</weight>
+       <bold>false</bold>
+      </font>
+     </property>
+     <property name="text">
+      <string>Integrate Peak</string>
+     </property>
+    </widget>
+   </item>
+  </layout>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>
diff --git a/scripts/HFIR_4Circle_Reduction/PeakIntegrationSpreadSheet.ui b/scripts/HFIR_4Circle_Reduction/PeakIntegrationSpreadSheet.ui
new file mode 100644
index 0000000000000000000000000000000000000000..52b42acd4794a7881ad1f193fa38529654dde1b2
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/PeakIntegrationSpreadSheet.ui
@@ -0,0 +1,78 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>1318</width>
+    <height>613</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>Dialog</string>
+  </property>
+  <layout class="QVBoxLayout" name="verticalLayout">
+   <item>
+    <widget class="PeaksIntegrationSpreadSheet" name="tableWidget_spreadsheet"/>
+   </item>
+   <item>
+    <layout class="QHBoxLayout" name="horizontalLayout">
+     <item>
+      <spacer name="horizontalSpacer">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>40</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+     <item>
+      <widget class="QPushButton" name="pushButton_exportTable">
+       <property name="text">
+        <string>Export Table</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <spacer name="horizontalSpacer_2">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeType">
+        <enum>QSizePolicy::Preferred</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>40</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+     <item>
+      <widget class="QPushButton" name="pushButton_quit">
+       <property name="text">
+        <string>Quit</string>
+       </property>
+      </widget>
+     </item>
+    </layout>
+   </item>
+  </layout>
+ </widget>
+ <customwidgets>
+  <customwidget>
+   <class>PeaksIntegrationSpreadSheet</class>
+   <extends>QTableWidget</extends>
+   <header>hfctables.h</header>
+  </customwidget>
+ </customwidgets>
+ <resources/>
+ <connections/>
+</ui>
diff --git a/scripts/HFIR_4Circle_Reduction/PeaksIntegrationReport.py b/scripts/HFIR_4Circle_Reduction/PeaksIntegrationReport.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5f13cd30770defd2d6bc40553934c2efd92fa51
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/PeaksIntegrationReport.py
@@ -0,0 +1,105 @@
+import os
+
+from PyQt4 import QtGui, QtCore
+import ui_PeakIntegrationSpreadSheet
+
+
+class PeaksIntegrationReportDialog(QtGui.QDialog):
+    """
+    Dialog to report the details of peaks integration
+    """
+    def __init__(self, parent):
+        """
+        initialization
+        :param parent:
+        """
+        super(PeaksIntegrationReportDialog, self).__init__(parent)
+
+        # set up UI
+        self.ui = ui_PeakIntegrationSpreadSheet.Ui_Dialog()
+        self.ui.setupUi(self)
+
+        # initialize widget
+        self.ui.tableWidget_spreadsheet.setup()
+
+        # set up handlers
+        self.connect(self.ui.pushButton_exportTable, QtCore.SIGNAL('clicked()'),
+                     self.do_export_table)
+
+        self.connect(self.ui.pushButton_quit, QtCore.SIGNAL('clicked()'),
+                     self.do_quit)
+
+        return
+
+    def do_export_table(self):
+        """
+
+        :return:
+        """
+        default_dir = os.getcwd()
+        output_file = str(QtGui.QFileDialog.getSaveFileName(self, 'Export table to csv file', default_dir,
+                                                            'Data Files (*.dat);;All  Files (*.*)'))
+
+        # return if cancelled
+        if len(output_file) == 0:
+            return
+
+        # write
+        self.ui.tableWidget_spreadsheet.export_table_csv(output_file)
+
+        return
+
+    def do_quit(self):
+        """
+
+        :return:
+        """
+        self.close()
+
+        return
+
+    def set_report(self, peak_integration_summary):
+        """
+
+        :param peak_integration_summary: dictionary of dictionary; key is scan number
+        :return:
+        """
+        # check input
+        assert isinstance(peak_integration_summary, dict)
+
+        if len(peak_integration_summary) == 0:
+            print '[WARNING] There is no peak integration summary given for the report.'
+            return
+
+        scan_number_list = sorted(peak_integration_summary.keys())
+        for scan_number in scan_number_list:
+            print '[DB...BAT] Scan {0} Peak integration report keys: {1}' \
+                  ''.format(scan_number, peak_integration_summary[scan_number].keys())
+
+            spice_hkl = peak_integration_summary[scan_number]['SPICE HKL']
+            calculated_hkl = peak_integration_summary[scan_number]['Mantid HKL']
+            mask_name = peak_integration_summary[scan_number]['Mask']
+            intensity1 = peak_integration_summary[scan_number]['Raw Intensity']
+            error1 = peak_integration_summary[scan_number]['Raw Intensity Error']
+            intensity2 = peak_integration_summary[scan_number]['Intensity 2']
+            error2 = peak_integration_summary[scan_number]['Intensity 2 Error']
+            intensity3 = peak_integration_summary[scan_number]['Gauss Intensity']
+            error3 = peak_integration_summary[scan_number]['Gauss Error']
+            lorentz_factor = peak_integration_summary[scan_number]['Lorentz']
+            estimated_bkgd = peak_integration_summary[scan_number]['Estimated Background']
+            gauss_bkgd = peak_integration_summary[scan_number]['Fitted Background']
+            gauss_a = peak_integration_summary[scan_number]['Fitted A']
+            gauss_sigma = peak_integration_summary[scan_number]['Fitted Sigma']
+            motor_name = peak_integration_summary[scan_number]['Motor']
+            motor_step = peak_integration_summary[scan_number]['Motor Step']
+            k_shift = peak_integration_summary[scan_number]['K-vector']
+            absorption_correction = peak_integration_summary[scan_number]['Absorption Correction']
+
+            self.ui.tableWidget_spreadsheet.add_scan_information(scan_number, spice_hkl, calculated_hkl,
+                                                                 mask_name, intensity1, error1, intensity2, error2,
+                                                                 intensity3, error3, lorentz_factor, estimated_bkgd,
+                                                                 gauss_bkgd, gauss_sigma, gauss_a, motor_name,
+                                                                 motor_step, k_shift, absorption_correction)
+        # END-FOR
+
+        return
diff --git a/scripts/HFIR_4Circle_Reduction/UBSelectPeaksDialog.ui b/scripts/HFIR_4Circle_Reduction/UBSelectPeaksDialog.ui
new file mode 100644
index 0000000000000000000000000000000000000000..edf65f4801fb1558ad521c3b0b1f3fab185b7b7e
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/UBSelectPeaksDialog.ui
@@ -0,0 +1,440 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="windowModality">
+   <enum>Qt::WindowModal</enum>
+  </property>
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>676</width>
+    <height>303</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>Dialog</string>
+  </property>
+  <property name="modal">
+   <bool>true</bool>
+  </property>
+  <layout class="QVBoxLayout" name="verticalLayout">
+   <item>
+    <layout class="QHBoxLayout" name="horizontalLayout">
+     <item>
+      <widget class="QCheckBox" name="checkBox_selectAllPeaks">
+       <property name="text">
+        <string>Select All Peaks</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <spacer name="horizontalSpacer_3">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>40</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+    </layout>
+   </item>
+   <item>
+    <spacer name="verticalSpacer">
+     <property name="orientation">
+      <enum>Qt::Vertical</enum>
+     </property>
+     <property name="sizeType">
+      <enum>QSizePolicy::Ignored</enum>
+     </property>
+     <property name="sizeHint" stdset="0">
+      <size>
+       <width>20</width>
+       <height>40</height>
+      </size>
+     </property>
+    </spacer>
+   </item>
+   <item>
+    <layout class="QHBoxLayout" name="horizontalLayout_2">
+     <item>
+      <widget class="QCheckBox" name="checkBox_selectNuclearPeaks">
+       <property name="text">
+        <string>Nuclear Peaks</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <spacer name="horizontalSpacer_4">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>40</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+    </layout>
+   </item>
+   <item>
+    <layout class="QHBoxLayout" name="horizontalLayout_3">
+     <item>
+      <spacer name="horizontalSpacer">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeType">
+        <enum>QSizePolicy::Fixed</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>80</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+     <item>
+      <widget class="QLabel" name="label">
+       <property name="sizePolicy">
+        <sizepolicy hsizetype="Fixed" vsizetype="Preferred">
+         <horstretch>0</horstretch>
+         <verstretch>0</verstretch>
+        </sizepolicy>
+       </property>
+       <property name="minimumSize">
+        <size>
+         <width>90</width>
+         <height>0</height>
+        </size>
+       </property>
+       <property name="maximumSize">
+        <size>
+         <width>90</width>
+         <height>16777215</height>
+        </size>
+       </property>
+       <property name="text">
+        <string>Tolerance</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <widget class="QLineEdit" name="lineEdit_nuclearPeaksTolerance">
+       <property name="sizePolicy">
+        <sizepolicy hsizetype="Fixed" vsizetype="Fixed">
+         <horstretch>0</horstretch>
+         <verstretch>0</verstretch>
+        </sizepolicy>
+       </property>
+       <property name="minimumSize">
+        <size>
+         <width>160</width>
+         <height>0</height>
+        </size>
+       </property>
+       <property name="maximumSize">
+        <size>
+         <width>160</width>
+         <height>16777215</height>
+        </size>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <widget class="QLabel" name="label_3">
+       <property name="sizePolicy">
+        <sizepolicy hsizetype="Fixed" vsizetype="Preferred">
+         <horstretch>0</horstretch>
+         <verstretch>0</verstretch>
+        </sizepolicy>
+       </property>
+       <property name="minimumSize">
+        <size>
+         <width>80</width>
+         <height>0</height>
+        </size>
+       </property>
+       <property name="maximumSize">
+        <size>
+         <width>80</width>
+         <height>16777215</height>
+        </size>
+       </property>
+       <property name="text">
+        <string>HKL from</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <widget class="QComboBox" name="comboBox_hklType">
+       <property name="sizePolicy">
+        <sizepolicy hsizetype="Fixed" vsizetype="Fixed">
+         <horstretch>0</horstretch>
+         <verstretch>0</verstretch>
+        </sizepolicy>
+       </property>
+       <property name="minimumSize">
+        <size>
+         <width>160</width>
+         <height>0</height>
+        </size>
+       </property>
+       <property name="maximumSize">
+        <size>
+         <width>160</width>
+         <height>16777215</height>
+        </size>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <spacer name="horizontalSpacer_5">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>40</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+    </layout>
+   </item>
+   <item>
+    <spacer name="verticalSpacer_2">
+     <property name="orientation">
+      <enum>Qt::Vertical</enum>
+     </property>
+     <property name="sizeType">
+      <enum>QSizePolicy::Ignored</enum>
+     </property>
+     <property name="sizeHint" stdset="0">
+      <size>
+       <width>20</width>
+       <height>40</height>
+      </size>
+     </property>
+    </spacer>
+   </item>
+   <item>
+    <layout class="QHBoxLayout" name="horizontalLayout_4">
+     <item>
+      <widget class="QCheckBox" name="checkBox_wavelength">
+       <property name="text">
+        <string>Wave Length</string>
+       </property>
+      </widget>
+     </item>
+    </layout>
+   </item>
+   <item>
+    <layout class="QHBoxLayout" name="horizontalLayout_6">
+     <item>
+      <spacer name="horizontalSpacer_2">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeType">
+        <enum>QSizePolicy::Fixed</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>80</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+     <item>
+      <widget class="QLabel" name="label_2">
+       <property name="sizePolicy">
+        <sizepolicy hsizetype="Fixed" vsizetype="Preferred">
+         <horstretch>0</horstretch>
+         <verstretch>0</verstretch>
+        </sizepolicy>
+       </property>
+       <property name="minimumSize">
+        <size>
+         <width>90</width>
+         <height>0</height>
+        </size>
+       </property>
+       <property name="maximumSize">
+        <size>
+         <width>90</width>
+         <height>16777215</height>
+        </size>
+       </property>
+       <property name="text">
+        <string>Wave length</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <widget class="QLineEdit" name="lineEdit_wavelength">
+       <property name="sizePolicy">
+        <sizepolicy hsizetype="Fixed" vsizetype="Fixed">
+         <horstretch>0</horstretch>
+         <verstretch>0</verstretch>
+        </sizepolicy>
+       </property>
+       <property name="minimumSize">
+        <size>
+         <width>160</width>
+         <height>0</height>
+        </size>
+       </property>
+       <property name="maximumSize">
+        <size>
+         <width>160</width>
+         <height>16777215</height>
+        </size>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <widget class="QLabel" name="label_4">
+       <property name="sizePolicy">
+        <sizepolicy hsizetype="Fixed" vsizetype="Preferred">
+         <horstretch>0</horstretch>
+         <verstretch>0</verstretch>
+        </sizepolicy>
+       </property>
+       <property name="minimumSize">
+        <size>
+         <width>80</width>
+         <height>0</height>
+        </size>
+       </property>
+       <property name="maximumSize">
+        <size>
+         <width>80</width>
+         <height>16777215</height>
+        </size>
+       </property>
+       <property name="text">
+        <string>Tolerance</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <widget class="QLineEdit" name="lineEdit_wavelengthTolerance">
+       <property name="sizePolicy">
+        <sizepolicy hsizetype="Fixed" vsizetype="Fixed">
+         <horstretch>0</horstretch>
+         <verstretch>0</verstretch>
+        </sizepolicy>
+       </property>
+       <property name="minimumSize">
+        <size>
+         <width>160</width>
+         <height>0</height>
+        </size>
+       </property>
+       <property name="maximumSize">
+        <size>
+         <width>160</width>
+         <height>16777215</height>
+        </size>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <spacer name="horizontalSpacer_6">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>40</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+    </layout>
+   </item>
+   <item>
+    <spacer name="verticalSpacer_3">
+     <property name="orientation">
+      <enum>Qt::Vertical</enum>
+     </property>
+     <property name="sizeHint" stdset="0">
+      <size>
+       <width>20</width>
+       <height>40</height>
+      </size>
+     </property>
+    </spacer>
+   </item>
+   <item>
+    <layout class="QHBoxLayout" name="horizontalLayout_5">
+     <item>
+      <widget class="QPushButton" name="pushButton_selectScans">
+       <property name="font">
+        <font>
+         <pointsize>12</pointsize>
+        </font>
+       </property>
+       <property name="text">
+        <string>Select</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <widget class="QPushButton" name="pushButton_revertCurrentSelection">
+       <property name="font">
+        <font>
+         <pointsize>12</pointsize>
+        </font>
+       </property>
+       <property name="text">
+        <string>Revert</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <widget class="QPushButton" name="pushButton_exportSelectedScans">
+       <property name="font">
+        <font>
+         <pointsize>12</pointsize>
+        </font>
+       </property>
+       <property name="toolTip">
+        <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Export selected peaks to a file&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
+       </property>
+       <property name="text">
+        <string>Export</string>
+       </property>
+      </widget>
+     </item>
+     <item>
+      <widget class="QPushButton" name="pushButton_quit">
+       <property name="font">
+        <font>
+         <pointsize>12</pointsize>
+        </font>
+       </property>
+       <property name="text">
+        <string>Quit</string>
+       </property>
+      </widget>
+     </item>
+    </layout>
+   </item>
+  </layout>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>
diff --git a/scripts/HFIR_4Circle_Reduction/detector2dview.py b/scripts/HFIR_4Circle_Reduction/detector2dview.py
index f7ce4af9f5bbf16b8028aef75b05c27a6196ea78..b2bdd80c789faa0b530b6db397c1750c691c9f2f 100644
--- a/scripts/HFIR_4Circle_Reduction/detector2dview.py
+++ b/scripts/HFIR_4Circle_Reduction/detector2dview.py
@@ -1,7 +1,7 @@
 #pylint: disable=W0403,R0902,R0903,R0904,W0212
-import mpl2dgraphicsview
-
+import os
 import numpy as np
+import mpl2dgraphicsview
 
 
 class Detector2DView(mpl2dgraphicsview.Mpl2dGraphicsView):
@@ -92,6 +92,70 @@ class Detector2DView(mpl2dgraphicsview.Mpl2dGraphicsView):
 
         return
 
+    def integrate_roi_linear(self, exp_number, scan_number, pt_number, output_dir):
+        """
+        integrate the 2D data inside region of interest along both axis-0 and axis-1 individually.
+        and the result (as 1D data) will be saved to ascii file.
+        the X values will be the corresponding pixel index either along axis-0 or axis-1
+        :return:
+        """
+        def save_to_file(base_file_name, axis, array1d, start_index):
+            """
+            save the result (1D data) to an ASCII file
+            :param base_file_name:
+            :param axis:
+            :param array1d:
+            :param start_index:
+            :return:
+            """
+            file_name = '{0}_axis_{1}.dat'.format(base_file_name, axis)
+
+            wbuf = ''
+            vec_x = np.array(range(len(array1d))) + start_index
+            for i in range(len(array1d)):
+                wbuf += '{0} \t{1}\n'.format(vec_x[i], array1d[i])
+
+            ofile = open(file_name, 'w')
+            ofile.write(wbuf)
+            ofile.close()
+
+            return
+
+        matrix = self.array2d
+        assert isinstance(matrix, np.ndarray), 'A matrix must be an ndarray but not {0}.'.format(type(matrix))
+
+        # get region of interest
+        if self._roiStart is None:
+            self._roiStart = (0, 0)
+        if self._roiEnd is None:
+            self._roiEnd = matrix.shape
+
+        ll_row = min(self._roiStart[0], self._roiEnd[0])
+        ll_col = min(self._roiStart[1], self._roiEnd[1])
+
+        ur_row = max(self._roiStart[0], self._roiEnd[0])
+        ur_col = max(self._roiStart[1], self._roiEnd[1])
+
+        # print 'Row: {0} : {1}  Col: {2} : {3}'.format(ll_row, ur_row, ll_col, ur_col)
+
+        roi_matrix = matrix[ll_col:ur_col, ll_row:ur_row]
+
+        sum_0 = roi_matrix.sum(0)
+        #  print sum_0
+        sum_1 = roi_matrix.sum(1)
+        #  print sum_1
+        print '[SUM 0] Dimension: {0}'.format(sum_0.shape)
+        print '[SUM 1] Dimension: {0}'.format(sum_1.shape)
+
+        # write to file
+        base_name = os.path.join(output_dir, 'Exp{0}_Scan{1}_Pt{2}'.format(exp_number, scan_number, pt_number))
+        save_to_file(base_name, 0, sum_0, ll_row)
+        save_to_file(base_name, 1, sum_1, ll_col)
+
+        message = 'Integrated values are saved to {0}...'.format(base_name)
+
+        return message
+
     def get_roi(self):
         """
         :return: A list for polygon0
@@ -149,6 +213,9 @@ class Detector2DView(mpl2dgraphicsview.Mpl2dGraphicsView):
             # FUTURE-TO-DO: this should be replaced by some update() method of canvas
             self._myCanvas._flush()
 
+            self._roiStart = None
+            self._roiEnd = None
+
         return
 
     def on_mouse_motion(self, event):
@@ -260,7 +327,7 @@ class Detector2DView(mpl2dgraphicsview.Mpl2dGraphicsView):
         :param parent_window:
         :return:
         """
-        assert parent_window is not None, 'blabla'
+        assert parent_window is not None, 'Parent window cannot be None'
 
         self._myParentWindow = parent_window
 
@@ -273,8 +340,8 @@ class Detector2DView(mpl2dgraphicsview.Mpl2dGraphicsView):
         :return:
         """
         # check
-        assert isinstance(cursor_x, float)
-        assert isinstance(cursor_y, float)
+        assert isinstance(cursor_x, float), 'Cursor x coordination {0} must be a float.'.format(cursor_x)
+        assert isinstance(cursor_y, float), 'Cursor y coordination {0} must be a float.'.format(cursor_y)
 
         # remove the original polygon
         if self._myPolygon is not None:
diff --git a/scripts/HFIR_4Circle_Reduction/fourcircle_utility.py b/scripts/HFIR_4Circle_Reduction/fourcircle_utility.py
index 32a8c46649cb4b8b9520316aedae1b917f8ab2c5..2043bfe2316793c9cc36d58f4756f5a184d20ace 100644
--- a/scripts/HFIR_4Circle_Reduction/fourcircle_utility.py
+++ b/scripts/HFIR_4Circle_Reduction/fourcircle_utility.py
@@ -72,7 +72,7 @@ def convert_to_wave_length(m1_position):
     return wave_length
 
 
-def generate_mask_file(file_path, ll_corner, ur_corner, rectangular=True):
+def generate_mask_file(file_path, ll_corner, ur_corner, rectangular=True, num_det_row=None):
     """ Generate a Mantid RIO/Mask XML file
     Requirements:
     1. file_path is writable;
@@ -85,7 +85,12 @@ def generate_mask_file(file_path, ll_corner, ur_corner, rectangular=True):
     """
     # check
     assert isinstance(file_path, str), 'File path must be a string but not a %s.' % str(type(file_path))
-    assert len(ll_corner) == 2 and len(ur_corner) == 2
+    assert len(ll_corner) == 2 and len(ur_corner) == 2,\
+        'Left corner and right corner coordinates must be 2-tuple but not of size {0} or {1}' \
+        ''.format(len(ll_corner), len(ur_corner))
+
+    if num_det_row is None:
+        num_det_row = NUM_DET_ROW
 
     if rectangular is False:
         raise RuntimeError('Non-rectangular detector is not supported yet.')
@@ -99,20 +104,42 @@ def generate_mask_file(file_path, ll_corner, ur_corner, rectangular=True):
     xml_str += '          <detids>'
 
     # part 2: all the masked detectors
-    start_row = int(ll_corner[0])
-    start_col = int(ll_corner[1])
+    mpl_start_row = int(ll_corner[0])
+    mpl_start_col = int(ll_corner[1])
+
+    mpl_end_row = int(ur_corner[0])
+    mpl_end_col = int(ur_corner[1])
+
+    # correction:
+    if False:
+        start_row = mpl_start_col
+        start_col = 256 - mpl_end_row
 
-    end_row = int(ur_corner[0])
-    end_col = int(ur_corner[1])
+        end_row = mpl_end_col
+        end_col = mpl_start_row
+    else:
+        start_row = mpl_start_row
+        start_col = mpl_start_col
+        end_row =   mpl_end_row
+        end_col =   mpl_end_col
 
-    assert start_col < end_col
+    assert start_col < end_col, 'Start column {0} cannot be smaller than end column {1}.'.format(start_col, end_col)
 
     det_sub_xml = ''
-    for col_number in xrange(start_col, end_col+1):
-        start_det_id = 1 + col_number * NUM_DET_ROW + start_row
-        end_det_id = 1 + col_number * NUM_DET_ROW + end_row
-        det_sub_xml += '%d-%d,' % (start_det_id, end_det_id)
+    if False:
+        for col_number in xrange(start_col, end_col+1):
+            start_det_id = 1 + col_number * NUM_DET_ROW + start_row
+            end_det_id = 1 + col_number * NUM_DET_ROW + end_row
+            det_sub_xml += '%d-%d,' % (start_det_id, end_det_id)
+    else:
+        # print '[DB...BAT] Row numbers from {0} to {1}'.format(start_row, end_row)
+        # print '[DB...BAT] Col numbers from {0} to {1}'.format(start_col, end_col)
+        for row_number in range(start_row, end_row+1):
+            start_det_id = 1 + row_number * num_det_row + start_col
+            end_det_id = 1 + row_number * num_det_row + end_col
+            det_sub_xml += '{0}-{1},'.format(start_det_id, end_det_id)
     # END-FOR
+
     # remove last ','
     det_sub_xml = det_sub_xml[:-1]
     # add to xml string
@@ -136,8 +163,8 @@ def get_hb3a_wavelength(m1_motor_pos):
     :param m1_motor_pos:
     :return: wavelength.  None for no mapping
     """
-    assert isinstance(m1_motor_pos, float), 'Motor m1\'s position %s must be a float but not %s.' \
-                                            '' % (str(m1_motor_pos), type(m1_motor_pos))
+    assert isinstance(m1_motor_pos, float) or isinstance(m1_motor_pos, int),\
+        'Motor m1\'s position {0} must be a float but not {1}.'.format(m1_motor_pos, type(m1_motor_pos))
 
     # hard-coded HB3A m1 position and wavelength mapping
     m1_pos_list = [(-25.870, 1.003),
diff --git a/scripts/HFIR_4Circle_Reduction/fputility.py b/scripts/HFIR_4Circle_Reduction/fputility.py
index e893e95e190d50d7d1dbdf2ea2acdc533f200951..2c94f404153d299bd950d05c5c2edb8c4bdfebf3 100644
--- a/scripts/HFIR_4Circle_Reduction/fputility.py
+++ b/scripts/HFIR_4Circle_Reduction/fputility.py
@@ -230,7 +230,11 @@ def write_scd_fullprof_kvector(user_header, wave_length, k_vector_dict, peak_dic
         # END-IF-ELSE
 
         # peak intensity and sigma
-        part3 = '%8.2f%8.2f%4d' % (peak_dict['intensity'], peak_dict['sigma'], 1)
+        try:
+            part3 = '%8.2f%8.2f%4d' % (peak_dict['intensity'], peak_dict['sigma'], 1)
+        except TypeError as type_err:
+            raise RuntimeError('In writing Fullprof file, unable to convert intensity {0} and/or sigma {1} to '
+                               'floats. FYI: {2}'.format(peak_dict['intensity'], peak_dict['sigma'], type_err))
 
         peak_line = part1 + part2 + part3
 
@@ -275,7 +279,7 @@ def main(argv):
     """
     # get input
     if len(argv) < 4:
-        print 'Calculate the difference of two measuremnts:\n'
+        print 'Calculate the difference of two measurements:\n'
         print '> %s [intensity file 1]  [intensity file 2]  [output intensity file]' % argv[0]
         return
     else:
diff --git a/scripts/HFIR_4Circle_Reduction/guiutility.py b/scripts/HFIR_4Circle_Reduction/guiutility.py
index 3c7dea5d4b332044c74c543439eadc036b81806f..9ee8918d52e23719041b8200e1415d6cd9e381ce 100644
--- a/scripts/HFIR_4Circle_Reduction/guiutility.py
+++ b/scripts/HFIR_4Circle_Reduction/guiutility.py
@@ -3,6 +3,7 @@
 #
 import math
 import numpy
+import os
 from PyQt4 import QtGui, QtCore
 
 
@@ -41,6 +42,46 @@ def convert_str_to_matrix(matrix_str, matrix_shape):
     return matrix
 
 
+def import_scans_text_file(file_name):
+    """
+    import a plain text file containing a list of scans
+    :param file_name:
+    :return:
+    """
+    # check inputs
+    assert isinstance(file_name, str), 'File name {0} must be a string but not of type {1}.' \
+                                       ''.format(file_name, type(file_name))
+    if os.path.exists(file_name) is False:
+        raise RuntimeError('File {0} does not exist.'.format(file_name))
+
+    # import file
+    scan_file = open(file_name, 'r')
+    raw_lines = scan_file.readline()
+    scan_file.close()
+
+    # parse
+    scans_str = ''
+    for raw_line in raw_lines:
+        # get a clean line and skip empty line
+        line = raw_line.strip()
+        if len(line) == 0:
+            continue
+
+        # skip comment line
+        if line.startswith('#'):
+            continue
+
+        # form the string
+        scans_str += line
+    # END-FOR
+
+    # convert scans (in string) to list of integers
+    scan_list = parse_integer_list(scans_str)
+    scan_list.sort()
+
+    return scan_list
+
+
 def map_to_color(data_array, base_color, change_color_flag):
     """ Map 1-D data to color list
     :param data_array:
@@ -149,13 +190,18 @@ def parse_float_array(array_str):
     return True, float_list
 
 
-def parse_integer_list(array_str):
+def parse_integer_list(array_str, expected_size=None):
     """ Parse a string to an array of integer separated by ','
     also, the format as 'a-b' is supported too
+    :exception: RuntimeError
     :param array_str:
-    :return: boolean, list of floats/error message
+    :param expected_size
+    :return: list of floats/error message
     """
-    assert isinstance(array_str, str)
+    # check input type
+    assert isinstance(array_str, str), 'Input {0} must be a string but not a {1}'.format(array_str, type(array_str))
+
+    # remove space, tab and \n
     array_str = array_str.replace(' ', '')
     array_str = array_str.replace('\n', '')
     array_str = array_str.replace('\t ', '')
@@ -163,7 +209,6 @@ def parse_integer_list(array_str):
     int_str_list = array_str.split(',')
     integer_list = list()
     for int_str in int_str_list:
-
         try:
             int_value = int(int_str)
             integer_list.append(int_value)
@@ -198,6 +243,10 @@ def parse_integer_list(array_str):
             integer_list.extend(xrange(start_value, end_value+1))
     # END-FOR
 
+    # check size
+    if expected_size is not None and len(integer_list) != expected_size:
+        raise RuntimeError('It is required to have {0} integers given in {1}.'.format(expected_size, array_str))
+
     return integer_list
 
 
diff --git a/scripts/HFIR_4Circle_Reduction/hfctables.py b/scripts/HFIR_4Circle_Reduction/hfctables.py
index 4269976ed3b050608d85c7fa7908e6444c4dd3de..16fb17171b4b5a9199e73ae5b7ab1aaee71b9f07 100644
--- a/scripts/HFIR_4Circle_Reduction/hfctables.py
+++ b/scripts/HFIR_4Circle_Reduction/hfctables.py
@@ -2,6 +2,7 @@
 import numpy
 import sys
 import fourcircle_utility
+import guiutility
 
 import NTableWidget as tableBase
 
@@ -77,12 +78,207 @@ class KShiftTableWidget(tableBase.NTableWidget):
         return
 
 
-class PeakIntegrationTableWidget(tableBase.NTableWidget):
+class MatrixTable(tableBase.NTableWidget):
     """
-    Extended table widget for studying peak integration of scan on various Pts.
+
     """
+    def __init__(self, parent):
+        """
+
+        :param parent:
+        """
+        super(MatrixTable, self).__init__(parent)
+
+        return
+
+    def setup(self, num_rows, num_cols):
+        """
+        set up a table for matrix
+        :param num_rows:
+        :param num_cols:
+        :return:
+        """
+        # check inputs
+        assert isinstance(num_rows, int) and num_rows > 0, 'Number of rows larger than 0.'
+        assert isinstance(num_cols, int) and num_cols > 0, 'Number of columns larger than 0.'
+
+        # think of reset
+        if self.rowCount() != num_rows or self.columnCount() != num_cols:
+            raise RuntimeError('ASAP')
+
+        return
+
+    def set_matrix(self, matrix):
+        """
 
-    # UB peak information table
+        :param matrix:
+        :return:
+        """
+        # check inputs
+        assert isinstance(matrix, numpy.ndarray) and matrix.shape == (4, 4), 'Matrix {0} must be ndarray with {1}.' \
+                                                                             ''.format(matrix, matrix.shape)
+        for i in range(matrix.shape[0]):
+            for j in range(matrix.shape[1]):
+                self.set_value_cell(i, j, matrix[i, j])
+
+        return
+
+
+class PeaksIntegrationSpreadSheet(tableBase.NTableWidget):
+    """
+    Detailed peaks integration information table. Each row is for a peak measured in a scan containing multiple Pts.
+    It can be converted to a csv file for user to check the integration details.
+    Note: all the intensities shown below are corrected by by Lorentzian and absorption if either of them is
+          calculated and applied.
+    """
+    Table_Setup = [('Scan', 'int'),
+                   ('HKL (S)', 'str'),
+                   ('HKL (C)', 'str'),
+                   ('Mask', 'str'),
+                   ('Intensity (R)', 'float'),
+                   ('Error (R)', 'float'),
+                   ('Intensity 2', 'float'),
+                   ('Error (2)', 'float'),
+                   ('Intensity (G)', 'float'),
+                   ('Error (G)', 'float'),
+                   ('Lorentz', 'float'),
+                   ('Bkgd (E)', 'float'),
+                   ('Bkgd (G)', 'float'),
+                   ('Sigma', 'float'),
+                   ('A', 'float'),
+                   ('Motor Name', 'str'),
+                   ('Motor Step', 'float'),
+                   ('K-shift', 'str'),
+                   ('Absorption', 'float')
+                   ]
+
+    def __init__(self, parent):
+        """
+        initialization
+        :param parent:
+        """
+        super(PeaksIntegrationSpreadSheet, self).__init__(parent)
+
+        # define column indexes
+        self._colIndexScan = None
+        self._colIndexSpiceHKL = None
+        self._colIndexMantidHKL = None
+        self._colIndexMask = None
+        self._colIndexRawIntensity = None
+        self._colIndexRawError = None
+        self._colIndexIntensity2 = None
+        self._colIndexError2 = None
+        self._colIndexIntensity3 = None
+        self._colIndexError3 = None
+        self._colIndexBkgdE = None
+        self._colIndexBkgdG = None
+        self._colIndexMotorName = None
+        self._colIndexMotorStep = None
+        self._colIndexAbsorption = None
+        self._colIndexKShift = None
+        self._colIndexLorentz = None
+        self._colIndexSigma = None
+        self._colIndexA = None
+
+        return
+
+    def add_scan_information(self, scan_number, s_hkl, m_hkl, mask, raw_intensity, raw_error, intensity2, error2,
+                             intensity3, error3, lorentz, bkgd_e, bkgd_g, gauss_s, gauss_a, motor_name, motor_step,
+                             k_shift, absorption):
+        """
+        add the detailed integrating information to table
+        :param scan_number:
+        :param s_hkl:
+        :param m_hkl:
+        :param mask:
+        :param raw_intensity:
+        :param raw_error:
+        :param intensity2:
+        :param error2:
+        :param intensity3:
+        :param error3:
+        :param lorentz:
+        :param bkgd_e:
+        :param bkgd_g:
+        :param gauss_s:
+        :param gauss_a:
+        :param motor_name:
+        :param motor_step:
+        :param k_shift:
+        :param absorption:
+        :return:
+        """
+        # append an empty row
+        row_list = [None] * len(self.Table_Setup)
+        status, msg = self.append_row(row_list)
+        if not status:
+            print '[ERROR] Unable to append a new row due to {0}.'.format(msg)
+        else:
+            row_list[0] = 123
+            row_list[1] = ''
+            row_list[2] = ''
+        last_row_number = self.rowCount() - 1
+
+        # set value
+        self.update_cell_value(last_row_number, self._colIndexScan, scan_number)
+        self.update_cell_value(last_row_number, self._colIndexSpiceHKL, s_hkl)
+        self.update_cell_value(last_row_number, self._colIndexMantidHKL, m_hkl)
+        self.update_cell_value(last_row_number, self._colIndexMask, mask)
+        self.update_cell_value(last_row_number, self._colIndexRawIntensity, raw_intensity)
+        self.update_cell_value(last_row_number, self._colIndexRawError, raw_error)
+        self.update_cell_value(last_row_number, self._colIndexIntensity2, intensity2)
+        self.update_cell_value(last_row_number, self._colIndexIntensity3, intensity3)
+        self.update_cell_value(last_row_number, self._colIndexError2, error2)
+        self.update_cell_value(last_row_number, self._colIndexError3, error3)
+        self.update_cell_value(last_row_number, self._colIndexLorentz, lorentz)
+        self.update_cell_value(last_row_number, self._colIndexBkgdE, bkgd_e)
+        self.update_cell_value(last_row_number, self._colIndexBkgdG, bkgd_g)
+        self.update_cell_value(last_row_number, self._colIndexSigma, gauss_s)
+        self.update_cell_value(last_row_number, self._colIndexA, gauss_a)
+        self.update_cell_value(last_row_number, self._colIndexKShift, k_shift)
+        self.update_cell_value(last_row_number, self._colIndexAbsorption, absorption)
+        self.update_cell_value(last_row_number, self._colIndexMotorName, motor_name)
+        self.update_cell_value(last_row_number, self._colIndexMotorStep, motor_step)
+
+        return
+
+    def setup(self):
+        """
+        Init setup
+        :return:
+        """
+        self.init_setup(self.Table_Setup)
+
+        # get column names
+        col_name_list = self._myColumnNameList
+
+        self._colIndexScan = col_name_list.index('Scan')
+        self._colIndexSpiceHKL = self.Table_Setup.index(('HKL (S)', 'str'))
+        self._colIndexMantidHKL = self.Table_Setup.index(('HKL (C)', 'str'))
+        self._colIndexMask = self.Table_Setup.index(('Mask', 'str'))
+        self._colIndexRawIntensity = self.Table_Setup.index(('Intensity (R)', 'float'))
+        self._colIndexRawError = self.Table_Setup.index(('Error (R)', 'float'))
+        self._colIndexIntensity2 = self.Table_Setup.index(('Intensity 2', 'float'))
+        self._colIndexError2 = self.Table_Setup.index(('Error (2)', 'float'))
+        self._colIndexIntensity3 = self.Table_Setup.index(('Intensity (G)', 'float'))
+        self._colIndexError3 = self.Table_Setup.index(('Error (G)', 'float'))
+        self._colIndexLorentz = self.Table_Setup.index(('Lorentz', 'float'))
+        self._colIndexBkgdE = self.Table_Setup.index(('Bkgd (E)', 'float'))
+        self._colIndexBkgdG = self.Table_Setup.index(('Bkgd (G)', 'float'))
+        self._colIndexMotorName = self.Table_Setup.index(('Motor Name', 'str'))
+        self._colIndexMotorStep = self.Table_Setup.index(('Motor Step', 'float'))
+        self._colIndexKShift = self.Table_Setup.index(('K-shift', 'str'))
+        self._colIndexAbsorption = self.Table_Setup.index(('Absorption', 'float'))
+        self._colIndexSigma = self.Table_Setup.index(('Sigma', 'float'))
+        self._colIndexA = self.Table_Setup.index(('A', 'float'))
+
+        return
+
+
+class PeakIntegrationTableWidget(tableBase.NTableWidget):
+    """
+    Extended table widget for studying peak integration of a single scan on various Pts.
+    """
     Table_Setup = [('Pt', 'int'),
                    ('Raw', 'float'),
                    ('Masked', 'float'),
@@ -97,7 +293,8 @@ class PeakIntegrationTableWidget(tableBase.NTableWidget):
         self._expNumber = -1
         self._scanNumber = -1
 
-        self._intensityColIndex = None
+        self._rawIntensityColIndex = None
+        self._maskedIntensityColIndex = None
 
         return
 
@@ -110,9 +307,11 @@ class PeakIntegrationTableWidget(tableBase.NTableWidget):
         :return: 2-tuple as boolean and error message
         """
         # check requirements
-        assert isinstance(pt_number, int)
-        assert isinstance(raw_signal, int) or isinstance(raw_signal, float)
-        assert isinstance(masked_signal, float)
+        assert isinstance(pt_number, int), 'Error 920X'
+        assert isinstance(raw_signal, int) or isinstance(raw_signal, float) or raw_signal is None,\
+            'Error 920A'
+        assert isinstance(masked_signal, float) or isinstance(masked_signal, int) or masked_signal is None,\
+            'Error 920B'
 
         # form a new row and append
         status, msg = self.append_row([pt_number, raw_signal, masked_signal, False])
@@ -128,6 +327,36 @@ class PeakIntegrationTableWidget(tableBase.NTableWidget):
         """
         return self._expNumber, self._scanNumber
 
+    def sum_raw_intensity(self):
+        """
+        sum raw intensities of all Pts.
+        :return:
+        """
+        num_rows = self.rowCount()
+
+        count_sum = 0.
+        for i_row in range(num_rows):
+            pt_count = self.get_cell_value(i_row, self._rawIntensityColIndex)
+            count_sum += pt_count
+        # END-FOR
+
+        return count_sum
+
+    def sum_masked_intensity(self):
+        """
+        sum masked intensities of all Pts.
+        :return:
+        """
+        num_rows = self.rowCount()
+
+        count_sum = 0.
+        for i_row in range(num_rows):
+            pt_count = self.get_cell_value(i_row, self._maskedIntensityColIndex)
+            count_sum += pt_count
+        # END-FOR
+
+        return count_sum
+
     def setup(self):
         """
         Init setup
@@ -144,7 +373,8 @@ class PeakIntegrationTableWidget(tableBase.NTableWidget):
         self.setColumnWidth(3, 90)
 
         # Set others...
-        self._intensityColIndex = self._myColumnNameList.index('Masked')
+        self._rawIntensityColIndex = self._myColumnNameList.index('Raw')
+        self._maskedIntensityColIndex = self._myColumnNameList.index('Masked')
 
         return
 
@@ -218,7 +448,7 @@ class PeakIntegrationTableWidget(tableBase.NTableWidget):
         # Integrate
         sum_intensity = 0.
         for i_row in range(self.rowCount()):
-            intensity_i = self.get_cell_value(i_row, self._intensityColIndex)
+            intensity_i = self.get_cell_value(i_row, self._maskedIntensityColIndex)
             sum_intensity += intensity_i - background
 
         return sum_intensity
@@ -339,15 +569,12 @@ class UBMatrixTable(tableBase.NTableWidget):
 # UB peak information table
 UB_Peak_Table_Setup = [('Scan', 'int'),
                        ('Pt', 'int'),
-                       ('H', 'float'),
-                       ('K', 'float'),
-                       ('L', 'float'),
-                       ('Q_x', 'float'),
-                       ('Q_y', 'float'),
-                       ('Q_z', 'float'),
+                       ('Spice HKL', 'str'),
+                       ('Calculated HKL', 'str'),
+                       ('Q-Sample', 'str'),
                        ('Selected', 'checkbox'),
                        ('m1', 'float'),
-                       ('lambda', 'float'),  # wave length
+                       ('Wavelength', 'float'),  # wave length
                        ('Error', 'float')]
 
 
@@ -365,10 +592,62 @@ class UBMatrixPeakTable(tableBase.NTableWidget):
         tableBase.NTableWidget.__init__(self, parent)
 
         # define class variables
-        self._storedHKL = dict()
+        self._cachedSpiceHKL = dict()
+
+        # class variables for column indexes
+        self._colIndexScan = None
+        self._colIndexSpiceHKL = None
+        self._colIndexCalculatedHKL = None
+        self._colIndexQSample = None
+        self._colIndexWavelength = None
+        self._colIndexError = None
 
         return
 
+    def add_peak(self, scan_number, spice_hkl, q_sample, m1, wave_length):
+        """
+
+        :param scan_number:
+        :param spice_hkl:
+        :param q_sample:
+        :param m1:
+        :param wave_length:
+        :return:
+        """
+        # check inputs
+        assert isinstance(scan_number, int), 'Scan number integer'
+        assert len(spice_hkl) == 3, 'Spice HKL'
+        assert len(q_sample) == 3, 'Q-sample'
+        assert isinstance(m1, float) or m1 is None, 'm1'
+        assert isinstance(wave_length, float) or wave_length is None, 'wave length'
+
+        # spice_hkl_str = '{0:.4f}, {1:.4f}, {2:.4f}'.format(spice_hkl[0], spice_hkl[1], spice_hkl[2])
+        # q_sample_str = '{0:.4f}, {1:.4f}, {2:.4f}'.format(q_sample[0], q_sample[1], q_sample[2])
+        spice_hkl_str = self.format_array(spice_hkl)
+        q_sample_str = self.format_array(q_sample)
+        self.append_row([scan_number, -1, spice_hkl_str, '', q_sample_str, False, m1, wave_length, ''])
+
+        return True, ''
+
+    @staticmethod
+    def format_array(array):
+        """
+        output a formatted array with limited precision of float
+        :param array:
+        :return:
+        """
+        format_str = ''
+        for index, number in enumerate(array):
+            if index > 0:
+                format_str += ', '
+            if isinstance(number, float):
+                format_str += '{0:.4f}'.format(number)
+            else:
+                format_str += '{0}'.format(number)
+        # END-FOR
+
+        return format_str
+
     def get_exp_info(self, row_index):
         """
         Get experiment information from a row
@@ -384,21 +663,31 @@ class UBMatrixPeakTable(tableBase.NTableWidget):
 
         return scan_number, pt_number
 
-    def get_hkl(self, row_index):
+    def get_hkl(self, row_index, is_spice_hkl):
         """
         Get reflection's miller index
         :param row_index:
-        :return:
+        :param is_spice_hkl:
+        :return: 3-tuple as H, K, L
         """
-        assert isinstance(row_index, int)
+        # check input
+        assert isinstance(row_index, int), 'Row index {0} must be an integer but not a {1}.' \
+                                           ''.format(row_index, type(row_index))
 
-        m_h = self.get_cell_value(row_index, 2)
-        m_k = self.get_cell_value(row_index, 3)
-        m_l = self.get_cell_value(row_index, 4)
-
-        assert isinstance(m_h, float)
-        assert isinstance(m_k, float)
-        assert isinstance(m_l, float)
+        # get the HKL either parsed from SPICE file or from calculation
+        if is_spice_hkl:
+            hkl_str = self.get_cell_value(row_index, self._colIndexSpiceHKL)
+        else:
+            hkl_str = self.get_cell_value(row_index, self._colIndexCalculatedHKL)
+
+        # convert the recorded string to HKL
+        status, ret_obj = guiutility.parse_float_array(hkl_str)
+        if not status:
+            raise RuntimeError(ret_obj)
+        elif len(ret_obj) != 3:
+            raise RuntimeError('Unable to parse array "{0}" to 3 floating points.'.format(hkl_str))
+        else:
+            m_h, m_k, m_l = ret_obj
 
         return m_h, m_k, m_l
 
@@ -413,6 +702,20 @@ class UBMatrixPeakTable(tableBase.NTableWidget):
 
         return scan_number, pt_number
 
+    def get_selected_scans(self):
+        """
+        get the scan numbers that are selected
+        :return:
+        """
+        selected_rows = self.get_selected_rows(True)
+
+        scan_list = list()
+        for i_row in selected_rows:
+            scan_number = self.get_cell_value(i_row, self._colIndexScan)
+            scan_list.append(scan_number)
+
+        return scan_list
+
     def is_selected(self, row_index):
         """ Check whether a row is selected.
         :param row_index:
@@ -431,14 +734,28 @@ class UBMatrixPeakTable(tableBase.NTableWidget):
         :return:
         """
         self.init_setup(UB_Peak_Table_Setup)
-        self._statusColName = 'Selected'
+        self.set_status_column_name('Selected')
+
+        # define all the _colIndex
+        self._colIndexScan = self._myColumnNameList.index('Scan')
+        self._colIndexSpiceHKL = self._myColumnNameList.index('Spice HKL')
+        self._colIndexCalculatedHKL = self._myColumnNameList.index('Calculated HKL')
+        self._colIndexQSample = self._myColumnNameList.index('Q-Sample')
+        self._colIndexWavelength = self._myColumnNameList.index('Wavelength')
+        self._colIndexError = self._myColumnNameList.index('Error')
+
+        # set up the width of some columns
+        self.setColumnWidth(self._colIndexSpiceHKL, 240)
+        self.setColumnWidth(self._colIndexCalculatedHKL, 240)
+        self.setColumnWidth(4, 240)
 
         return
 
-    def select_all_nuclear_peaks(self):
+    def select_nuclear_peak_rows(self, tolerance):
         """
         select all nuclear peaks, i.e., set the flag on on 'select' for all rows if their HKL indicates that
         they are nuclear peaks
+        :param tolerance:
         :return: string as error message
         """
         num_rows = self.rowCount()
@@ -447,8 +764,8 @@ class UBMatrixPeakTable(tableBase.NTableWidget):
         for row_index in range(num_rows):
             # get the reading of HKL
             try:
-                hkl_tuple = self.get_hkl(row_index)
-                if fourcircle_utility.is_peak_nuclear(hkl_tuple[0], hkl_tuple[1], hkl_tuple[2]):
+                hkl_tuple = self.get_hkl(row_index, is_spice_hkl=True)
+                if fourcircle_utility.is_peak_nuclear(hkl_tuple[0], hkl_tuple[1], hkl_tuple[2], tolerance):
                     self.select_row(row_index, status=True)
             except RuntimeError as error:
                 error_message += 'Unable to parse HKL of line %d due to %s.' % (row_index, str(error))
@@ -456,15 +773,43 @@ class UBMatrixPeakTable(tableBase.NTableWidget):
 
         return error_message
 
-    def set_hkl(self, i_row, hkl, error=None):
+    def select_scans(self, select_all=False, nuclear_peaks=False, hkl_tolerance=None,
+                     wave_length=None, wave_length_tolerance=None):
         """
-        Set HKL to table
+        select scans in the UB matrix table
+        :param select_all:
+        :param nuclear_peaks:
+        :param hkl_tolerance:
+        :param wave_length:
+        :param wave_length_tolerance:
+        :return:
+        """
+        if select_all:
+            # select all
+            self.select_all_rows(True)
+
+        elif nuclear_peaks or wave_length_tolerance is not None:
+            # using filters
+            if nuclear_peaks:
+                self.select_nuclear_peak_rows(hkl_tolerance)
+            if wave_length_tolerance is not None:
+                self.select_rows_by_column_value(self._colIndexWavelength, wave_length, wave_length_tolerance,
+                                                 keep_current_selection=True)
+        else:
+            raise RuntimeError('Must pick up one option to do filter.')
+
+        return
+
+    def set_hkl(self, i_row, hkl, is_spice_hkl, error=None):
+        """
+        Set HKL to a row in the table. Show H/K/L with 4 decimal pionts
         :param i_row:
         :param hkl: HKL is a list of tuple
+        :param is_spice_hkl: If true, then set input to cell for SPICE-imported HKL. Otherwise to calculated HKL.
         :param error: error of HKL
         """
         # Check
-        assert isinstance(i_row, int), 'Row number (index) must be integer but not %s.' % type(i_row)
+        assert isinstance(i_row, int), 'Row number (index) must be integer but not %s.'.format(type(i_row))
 
         if isinstance(hkl, list) or isinstance(hkl, tuple):
             assert len(hkl) == 3, 'In case HKL is list of tuple, its size must be equal to 3 but not %d.' \
@@ -475,39 +820,41 @@ class UBMatrixPeakTable(tableBase.NTableWidget):
         else:
             raise AssertionError('HKL of type %s is not supported. Supported types include list, tuple '
                                  'and numpy array.' % type(hkl))
+        assert isinstance(is_spice_hkl, bool), 'Flag {0} for SPICE-HKL must be a boolean but not a {1}.' \
+                                               ''.format(is_spice_hkl, type(is_spice_hkl))
 
-        # get columns
-        i_col_h = UB_Peak_Table_Setup.index(('H', 'float'))
-        i_col_k = UB_Peak_Table_Setup.index(('K', 'float'))
-        i_col_l = UB_Peak_Table_Setup.index(('L', 'float'))
+        # convert to a string with 4 decimal points
+        hkl_str = '%.4f, %.4f, %.4f' % (hkl[0], hkl[1], hkl[2])
 
-        self.update_cell_value(i_row, i_col_h, hkl[0])
-        self.update_cell_value(i_row, i_col_k, hkl[1])
-        self.update_cell_value(i_row, i_col_l, hkl[2])
+        if is_spice_hkl:
+            self.update_cell_value(i_row, self._colIndexSpiceHKL, hkl_str)
+        else:
+            self.update_cell_value(i_row, self._colIndexCalculatedHKL, hkl_str)
 
+        # set error
         if error is not None:
             i_col_error = UB_Peak_Table_Setup.index(('Error', 'float'))
             self.update_cell_value(i_row, i_col_error, error)
 
         return
 
-    def restore_cached_indexing(self):
+    def restore_cached_indexing(self, is_spice=True):
         """
         Restore the previously saved value to HKL
         :return:
         """
         # check first such that all the stored value are to be
-        stored_line_index = sorted(self._storedHKL.keys())
+        stored_line_index = sorted(self._cachedSpiceHKL.keys())
         assert len(stored_line_index) == self.rowCount(), 'The current rows and cached row counts do not match.'
 
         # restore
         for row_index in stored_line_index:
-            hkl = self._storedHKL[row_index]
-            self.set_hkl(row_index, hkl)
+            hkl = self._cachedSpiceHKL[row_index]
+            self.set_hkl(row_index, hkl, is_spice_hkl=is_spice)
         # END-FOR
 
         # clear
-        self._storedHKL.clear()
+        self._cachedSpiceHKL.clear()
 
         return
 
@@ -517,13 +864,13 @@ class UBMatrixPeakTable(tableBase.NTableWidget):
         :return:
         """
         # clear the previous value
-        self._storedHKL.clear()
+        self._cachedSpiceHKL.clear()
 
         # store
         num_rows = self.rowCount()
         for row_index in range(num_rows):
-            peak_indexing = self.get_hkl(row_index)
-            self._storedHKL[row_index] = peak_indexing
+            peak_indexing = self.get_hkl(row_index, is_spice_hkl=True)
+            self._cachedSpiceHKL[row_index] = peak_indexing
         # END-FOR
 
         return
@@ -535,30 +882,29 @@ class UBMatrixPeakTable(tableBase.NTableWidget):
         :param k:
         :param l:
         """
-        assert isinstance(i_row, int)
+        assert isinstance(i_row, int), 'row number {0} must be an integer but not a {1}.' \
+                                       ''.format(i_row, type(i_row))
 
-        self.update_cell_value(i_row, 2, h)
-        self.update_cell_value(i_row, 3, k)
-        self.update_cell_value(i_row, 4, l)
+        self.update_cell_value(i_row, self._colIndexCalculatedHKL, self.format_array([h, k, l]))
 
         return
 
 
 class ProcessTableWidget(tableBase.NTableWidget):
     """
-    Extended table for peaks used to calculate UB matrix
+    Extended table for peaks used to process scans including peak integration, scan merging and etc.
     """
     TableSetup = [('Scan', 'int'),
-                  ('Intensity', 'float'),
-                  ('Corrected', 'float'),
                   ('Status', 'str'),
-                  ('Peak', 'str'),  # peak center can be either HKL or Q depending on the unit
+                  ('Intensity', 'float'),
+                  ('Corrected', 'float'),  # Lorenzian corrected
+                  ('Error', 'float'),
+                  ('Integrate', 'str'),  # integration type, Gaussian fit / simple summation
+                  ('Mask', 'str'),  # '' for no mask
                   ('HKL', 'str'),
-                  ('Index From', 'str'),  # source of HKL index, either SPICE or calculation
                   ('Motor', 'str'),
                   ('Motor Step', 'str'),
                   ('Wavelength', 'float'),
-                  ('Workspace', 'str'),
                   ('K-Index', 'int'),
                   ('Select', 'checkbox')]
 
@@ -574,16 +920,22 @@ class ProcessTableWidget(tableBase.NTableWidget):
         self._colIndexScan = None
         self._colIndexIntensity = None
         self._colIndexCorrInt = None
+        self._colIndexErrorBar = None
+        self._colIndexMask = None
+        self._colIndexIntType = None
         self._colIndexHKL = None
         self._colIndexStatus = None
         self._colIndexPeak = None
-        self._colIndexIndexFrom = None
+        # self._colIndexIndexFrom = None
         self._colIndexMotor = None
         self._colIndexMotorStep = None
         self._colIndexWaveLength = None
         self._colIndexKIndex = None
         self._colIndexWorkspace = None
 
+        # cache dictionaries
+        self._workspaceCacheDict = dict()
+
         return
 
     @staticmethod
@@ -591,7 +943,6 @@ class ProcessTableWidget(tableBase.NTableWidget):
         """ Generate a list for empty row with scan number
         :param scan_number:
         :param status:
-        :param frame: HKL or QSample
         :param ws_name
         :return:
         """
@@ -601,15 +952,16 @@ class ProcessTableWidget(tableBase.NTableWidget):
 
         intensity = None
         corr_int = None
+        error = None
+        mask = ''
+        integrate_type = 'sum'
         motor_name = None
         motor_step = None
         wave_length = 0
-        peak_center = ''
         hkl = ''
-        hkl_from = ''
 
-        new_row = [scan_number, intensity, corr_int, status, peak_center, hkl, hkl_from,
-                   motor_name, motor_step, wave_length, ws_name, 0, False]
+        new_row = [scan_number, status, intensity, corr_int, error, integrate_type, mask,  # peak_center,
+                   hkl, motor_name, motor_step, wave_length, 0, False]
 
         return new_row
 
@@ -622,9 +974,12 @@ class ProcessTableWidget(tableBase.NTableWidget):
         :return:
         """
         # check
-        assert isinstance(exp_number, int)
-        assert isinstance(scan_number, int)
-        assert isinstance(ws_name, str)
+        assert isinstance(exp_number, int), 'Experiment number {0} must be an integer but not a {1}.' \
+                                            ''.format(exp_number, type(exp_number))
+        assert isinstance(scan_number, int), 'Scan number {0} must be an integer but not a {1}.' \
+                                             ''.format(scan_number, type(scan_number))
+        assert isinstance(ws_name, str), 'Workspace name {0} must be a string but not a {1}.' \
+                                         ''.format(ws_name, type(ws_name))
 
         # construct a row
         new_row = self._generate_empty_row(scan_number, ws_name=ws_name)
@@ -665,6 +1020,18 @@ class ProcessTableWidget(tableBase.NTableWidget):
 
         return
 
+    def get_integration_type(self):
+        """
+        get the peak integration type
+        :return:
+        """
+        if self.rowCount() == 0:
+            raise RuntimeError('Empty table!')
+
+        integrate_type = self.get_cell_value(0, self._colIndexIntType)
+
+        return integrate_type
+
     def get_row_by_scan(self, scan_number):
         """
         get the row number for a gien scan
@@ -696,7 +1063,8 @@ class ProcessTableWidget(tableBase.NTableWidget):
         :return:
         """
         # Check
-        assert isinstance(target_state, str), 'blabla'
+        assert isinstance(target_state, str), 'State {0} must be a string but not a {1}.' \
+                                              ''.format(target_state, type(target_state))
 
         # Loop around to check
         return_list = list()
@@ -761,7 +1129,8 @@ class ProcessTableWidget(tableBase.NTableWidget):
         :param i_row:
         :return:
         """
-        return self.get_cell_value(i_row, self._colIndexWorkspace)
+        #  return self.get_cell_value(i_row, self._colIndexWorkspace)
+        return self._workspaceCacheDict[i_row]
 
     def get_scan_list(self, output_row_number=True):
         """
@@ -845,9 +1214,6 @@ class ProcessTableWidget(tableBase.NTableWidget):
         hkl_str = '%.3f, %.3f, %.3f' % (hkl[0], hkl[1], hkl[2])
         self.update_cell_value(row_number, self._colIndexHKL, hkl_str)
 
-        if hkl_source is not None:
-            self.update_cell_value(row_number, self._colIndexIndexFrom, hkl_source)
-
         return
 
     def set_k_shift_index(self, row_number, k_index):
@@ -908,24 +1274,31 @@ class ProcessTableWidget(tableBase.NTableWidget):
 
         return
 
-    def set_peak_intensity(self, row_number, peak_intensity, lorentz_corrected=False):
-        """ Set peak intensity to a row or scan
-        Requirement: Either row number or scan number must be given
+    def set_peak_intensity(self, row_number, peak_intensity, corrected_intensity, standard_error, integrate_method):
+        """
+        Set peak intensity to a row in the table
         Guarantees: peak intensity is set
         :param row_number:
         :param peak_intensity:
-        :param lorentz_corrected:
+        :param corrected_intensity:
+        :param standard_error:
+        :param integrate_method: must be '', simple or gaussian for simple counts summation or Gaussian fit, respectively
         :return:
         """
         # check requirements
         assert isinstance(peak_intensity, float), 'Peak intensity must be a float.'
+        assert isinstance(integrate_method, str), 'Integrated method {0} must be a string but not {1}.' \
+                                                  ''.format(integrate_method, type(integrate_method))
+        if integrate_method not in ['', 'simple', 'mixed', 'gaussian']:
+            raise RuntimeError('Peak integration {0} not in list. Method must be in ["" (Not defined), "simple"'
+                               ', "gaussian"]'.format(integrate_method))
 
-        if lorentz_corrected:
-            col_index = self._colIndexCorrInt
-        else:
-            col_index = self._colIndexIntensity
+        self.update_cell_value(row_number, self._colIndexIntensity, peak_intensity)
+        self.update_cell_value(row_number, self._colIndexIntType, integrate_method)
+        self.update_cell_value(row_number, self._colIndexCorrInt, corrected_intensity)
+        self.update_cell_value(row_number, self._colIndexErrorBar, standard_error)
 
-        return self.update_cell_value(row_number, col_index, peak_intensity)
+        return
 
     def set_status(self, row_number, status):
         """
@@ -965,7 +1338,9 @@ class ProcessTableWidget(tableBase.NTableWidget):
         # Check
         assert isinstance(merged_md_name, str), 'Merged MDWorkspace name must be a string.'
 
-        self.update_cell_value(row_number, self._colIndexWorkspace, merged_md_name)
+        #  self.update_cell_value(row_number, self._colIndexWorkspace, merged_md_name)
+
+        self._workspaceCacheDict[row_number] = merged_md_name
 
         return
 
@@ -981,15 +1356,18 @@ class ProcessTableWidget(tableBase.NTableWidget):
         self._colIndexScan = ProcessTableWidget.TableSetup.index(('Scan', 'int'))
         self._colIndexIntensity = self.TableSetup.index(('Intensity', 'float'))
         self._colIndexCorrInt = self.TableSetup.index(('Corrected', 'float'))
+        self._colIndexMask = self.TableSetup.index(('Mask', 'str'))
+        self._colIndexIntType = self.TableSetup.index(('Integrate', 'str'))
         self._colIndexStatus = self.TableSetup.index(('Status', 'str'))
         self._colIndexHKL = ProcessTableWidget.TableSetup.index(('HKL', 'str'))
-        self._colIndexPeak = self.TableSetup.index(('Peak', 'str'))
-        self._colIndexIndexFrom = self.TableSetup.index(('Index From', 'str'))
+        # self._colIndexPeak = self.TableSetup.index(('Peak', 'str'))
+        # self._colIndexIndexFrom = self.TableSetup.index(('Index From', 'str'))
         self._colIndexMotor = ProcessTableWidget.TableSetup.index(('Motor', 'str'))
         self._colIndexMotorStep = ProcessTableWidget.TableSetup.index(('Motor Step', 'str'))
         self._colIndexWaveLength = self.TableSetup.index(('Wavelength', 'float'))
         self._colIndexKIndex = self.TableSetup.index(('K-Index', 'int'))
-        self._colIndexWorkspace = self.TableSetup.index(('Workspace', 'str'))
+        self._colIndexErrorBar = self.TableSetup.index(('Error', 'float'))
+        # self._colIndexWorkspace = self.TableSetup.index(('Workspace', 'str'))
 
         return
 
@@ -1137,24 +1515,42 @@ class ScanSurveyTable(tableBase.NTableWidget):
 
         return scan_list
 
-    def get_selected_run_surveyed(self):
+    def get_selected_run_surveyed(self, required_size=1):
         """
         Purpose: Get selected pt number and run number that is set as selected
         Requirements: there must be one and only one run that is selected
         Guarantees: a 2-tuple for integer for return as scan number and Pt. number
-        :return: a 2-tuple of integer
+        :param required_size: if specified as an integer, then if the number of selected rows is different,
+                              an exception will be thrown.
+        :return: a 2-tuple of integer if required size is 1 (as old implementation) or a list of 2-tuple of integer
         """
+        # check required size?
+        assert isinstance(required_size, int) or required_size is None, 'Required number of runs {0} must be None ' \
+                                                                        'or an integer but not a {1}.' \
+                                                                        ''.format(required_size, type(required_size))
+
         # get the selected row indexes and check
         row_index_list = self.get_selected_rows(True)
-        assert len(row_index_list) == 1, 'There must be exactly one run that is selected. Now' \
-                                         'there are %d runs that are selected' % len(row_index_list)
 
-        # get scan and pt
-        row_index = row_index_list[0]
-        scan_number = self.get_cell_value(row_index, 0)
-        pt_number = self.get_cell_value(row_index, 1)
-
-        return scan_number, pt_number
+        if required_size is not None and required_size != len(row_index_list):
+            raise RuntimeError('It is required to have {0} runs selected, but now there are {1} runs that are '
+                               'selected.'.format(required_size, row_index_list))
+
+        # get all the scans and rows that are selected
+        scan_run_list = list()
+        for i_row in row_index_list:
+            # get scan and pt.
+            scan_number = self.get_cell_value(i_row, 0)
+            pt_number = self.get_cell_value(i_row, 1)
+            scan_run_list.append((scan_number, pt_number))
+
+        # special case for only 1 run that is selected
+        if len(row_index_list) == 1 and required_size is not None:
+            # get scan and pt
+            return scan_run_list[0]
+        # END-IF
+
+        return scan_run_list
 
     def show_reflections(self, num_rows):
         """
diff --git a/scripts/HFIR_4Circle_Reduction/integratedpeakview.py b/scripts/HFIR_4Circle_Reduction/integratedpeakview.py
index 7c737b52ff192a603b004ed2865a882a2adca911..6ea8f631586722766915b949116b8c8fe62ac0f0 100644
--- a/scripts/HFIR_4Circle_Reduction/integratedpeakview.py
+++ b/scripts/HFIR_4Circle_Reduction/integratedpeakview.py
@@ -1,4 +1,5 @@
 #pylint: disable=W0403,R0904,R0903
+import numpy
 import mplgraphicsview
 
 
@@ -31,6 +32,10 @@ class IntegratedPeakView(mplgraphicsview.MplGraphicsView):
         self._currX = 0.
         self._currY = 0.
 
+        # data managing
+        self._rawDataID = None
+        self._modelDataID = None
+
         return
 
     def add_background_indictor(self):
@@ -42,6 +47,20 @@ class IntegratedPeakView(mplgraphicsview.MplGraphicsView):
 
         return
 
+    def get_raw_data(self):
+        """
+        :exception: RuntimeError if no plot on canvas
+        :return: 2-tuple as vec_x and vec_y
+        """
+        if self._rawDataID is None:
+            raise RuntimeError('There is no raw data plot on the canvas')
+
+        data_set = self.canvas().get_data(self._rawDataID)
+        vec_x = data_set[0]
+        vec_y = data_set[1]
+
+        return vec_x, vec_y
+
     def on_mouse_motion(self, event):
         """
 
@@ -73,7 +92,69 @@ class IntegratedPeakView(mplgraphicsview.MplGraphicsView):
         """
         self._mousePressed = self.MousePress.RELEASED
 
-        print event.y, event.ydata
+        return
+
+    def plot_raw_data(self, vec_x, vec_y):
+        """
+        plot raw data, which will be recorded by _rawDataID
+        :param vec_x:
+        :param vec_y:
+        :return:
+        """
+        # plot data
+        self._rawDataID = self.add_plot_1d(vec_x, vec_y,  color='blue')
+        self.set_smart_y_limit(vec_y)
+
+        return
+
+    def plot_model(self, vec_x, model_vec_y, title=None):
+        """
+        plot model data which will be recorded by
+        :param vec_x:
+        :param model_vec_y:
+        :param title:
+        :return:
+        """
+        # plot data
+        self._modelDataID = self.add_plot_1d(vec_x, model_vec_y)
+        if title is not None:
+            self.set_title(title)
+        self.set_smart_y_limit(model_vec_y)
+
+        self.setXYLimit(xmin=vec_x[0] - 1., xmax=vec_x[-1] + 1.)
+
+        return
+
+    def remove_model(self):
+        """
+        remove the plot for model
+        :return:
+        """
+        if self._modelDataID is None:
+            raise RuntimeError('There is no model plot on canvas')
+
+        # reset title
+        self.set_title('')
+        self.remove_line(self._modelDataID)
+
+        self._modelDataID = None
+
+        return
+
+    def reset(self):
+        """
+        reset the canvas and thus the handler to the plots
+        :return:
+        """
+        # clear all lines
+        self.clear_all_lines()
+
+        # reset handlers
+        self._rawDataID = None
+        self._modelDataID = None
+
+        # reset title
+        self.set_title('')
 
         return
 
@@ -85,11 +166,14 @@ class IntegratedPeakView(mplgraphicsview.MplGraphicsView):
         :return:
         """
         # check
-        assert len(vec_y) > 0
+        assert isinstance(vec_y, numpy.ndarray) and len(vec_y) > 0, 'Vector Y must be a numpy array and not empty.'
 
         # find y's minimum and maximum
-        min_y = min(vec_y)
-        max_y = max(vec_y)
+        try:
+            min_y = numpy.min(vec_y)
+            max_y = numpy.max(vec_y)
+        except ValueError as value_err:
+            raise RuntimeError(str(value_err))
 
         d_y = max_y - min_y
 
diff --git a/scripts/HFIR_4Circle_Reduction/message_dialog.py b/scripts/HFIR_4Circle_Reduction/message_dialog.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e1d7385e1cc40282e117f65547f9c549ccf5cde
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/message_dialog.py
@@ -0,0 +1,72 @@
+# Dialog for message
+from PyQt4 import QtGui, QtCore
+
+import ui_messagebox
+
+
+class MessageDialog(QtGui.QDialog):
+    """
+    extension of QDialog
+    """
+    def __init__(self, parent):
+        """
+        initialization of customized dialog box
+        :param parent:
+        :return:
+        """
+        super(MessageDialog, self).__init__(parent)
+
+        # set up UI
+        self.ui = ui_messagebox.Ui_Dialog()
+        self.ui.setupUi(self)
+
+        # define operation
+        self.connect(self.ui.pushButton_close, QtCore.SIGNAL('clicked()'),
+                     self.do_quit)
+
+        return
+
+    def do_quit(self):
+        """
+
+        :return:
+        """
+        self.close()
+
+    def set_text(self, text):
+        """
+        set text to the text editor
+        :param text:
+        :return:
+        """
+        assert isinstance(text, str), 'Input text of type {0} must be a string.'.format(type(text))
+        self.ui.plainTextEdit_message.setPlainText(text)
+
+        return
+
+    def set_peak_integration_details(self, motor_pos_vec, pt_intensity_vec):
+        """
+        set the  details information of integrated peak including the peak intensities of
+        each Pt.
+        :param motor_pos_vec:
+        :param pt_intensity_vec:
+        :return:
+        """
+        text = '# Pt. \tIntensity \tMotor Position\n'
+        num_loops = max(len(motor_pos_vec), len(pt_intensity_vec))
+
+        for pt in range(num_loops):
+            text += '{0} \t'.format(pt+1)
+            if pt < len(pt_intensity_vec):
+                text += '{0} \t'.format(pt_intensity_vec[pt])
+            else:
+                text += '     \t'
+            if pt < len(motor_pos_vec):
+                text += '{0}\n'.format(motor_pos_vec[pt])
+            else:
+                text += '   \n'
+        # END-FOR
+
+        self.set_text(text)
+
+        return
diff --git a/scripts/HFIR_4Circle_Reduction/messagebox.ui b/scripts/HFIR_4Circle_Reduction/messagebox.ui
new file mode 100644
index 0000000000000000000000000000000000000000..397cab0b38e171c43a45809b390ad247c801dd7e
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/messagebox.ui
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>865</width>
+    <height>1001</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>Dialog</string>
+  </property>
+  <layout class="QGridLayout" name="gridLayout">
+   <item row="0" column="0">
+    <widget class="QPlainTextEdit" name="plainTextEdit_message"/>
+   </item>
+   <item row="1" column="0">
+    <layout class="QHBoxLayout" name="horizontalLayout">
+     <item>
+      <spacer name="horizontalSpacer">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>40</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+     <item>
+      <widget class="QPushButton" name="pushButton_close">
+       <property name="text">
+        <string>Close</string>
+       </property>
+      </widget>
+     </item>
+    </layout>
+   </item>
+  </layout>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>
diff --git a/scripts/HFIR_4Circle_Reduction/mpl2dgraphicsview.py b/scripts/HFIR_4Circle_Reduction/mpl2dgraphicsview.py
index 1a53ba0ffe3a39ca6306371c6509464c2e9a6dc2..f49c1e9235fdc23a40511bd6b1caca53d3ba8e61 100644
--- a/scripts/HFIR_4Circle_Reduction/mpl2dgraphicsview.py
+++ b/scripts/HFIR_4Circle_Reduction/mpl2dgraphicsview.py
@@ -37,6 +37,14 @@ class Mpl2dGraphicsView(QtGui.QWidget):
 
         return
 
+    @property
+    def array2d(self):
+        """
+        return the matrix (2d-array) plot on the canvas
+        :return:
+        """
+        return self._myCanvas.array2d
+
     def add_plot_2d(self, array2d, x_min, x_max, y_min, y_max, hold_prev_image=True, y_tick_label=None):
         """
         Add a 2D image to canvas
@@ -148,6 +156,9 @@ class Qt4Mpl2dCanvas(FigureCanvas):
         # polygon
         self._myPolygon = None
 
+        # Buffer of data
+        self._currentArray2D = None
+
         # image management data structure
         self._currIndex = 0
         self._imagePlotDict = dict()
@@ -158,6 +169,14 @@ class Qt4Mpl2dCanvas(FigureCanvas):
 
         return
 
+    @property
+    def array2d(self):
+        """
+        get the matrix plot now
+        :return:
+        """
+        return self._currentArray2D
+
     def add_2d_plot(self, array2d, x_min, x_max, y_min, y_max, hold_prev, yticklabels=None):
         """ Add a 2D plot
         Requirements:
@@ -187,6 +206,7 @@ class Qt4Mpl2dCanvas(FigureCanvas):
         img_plot = self.axes.imshow(array2d,
                                     extent=[x_min, x_max, y_min, y_max],
                                     interpolation='none')
+        self._currentArray2D = array2d
 
         # set y ticks as an option:
         if yticklabels is not None:
diff --git a/scripts/HFIR_4Circle_Reduction/mplgraphicsview.py b/scripts/HFIR_4Circle_Reduction/mplgraphicsview.py
index 83115051cad2cfe50bfaa7b031a88466ee7ff105..a9ba39c9dc4e98f96e022d47581a88bb54fce0da 100644
--- a/scripts/HFIR_4Circle_Reduction/mplgraphicsview.py
+++ b/scripts/HFIR_4Circle_Reduction/mplgraphicsview.py
@@ -1,8 +1,9 @@
-#pylint: disable=invalid-name,too-many-public-methods,too-many-arguments,non-parent-init-called,R0901,R0902,too-many-branches,C0302,W0231
+#pylint: disable=invalid-name,too-many-public-methods,too-many-arguments,non-parent-init-called,R0902,too-many-branches,C0302
 import os
 import numpy as np
 
 from PyQt4 import QtGui
+from PyQt4.QtCore import pyqtSignal
 
 from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
 from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar2
@@ -55,7 +56,6 @@ class IndicatorManager(object):
     - 1: vertical. moving along X-direction. [x, x], [y_min, y_max];
     - 2: 2-way. moving in any direction. [x_min, x_max], [y, y], [x, x], [y_min, y_max].
     """
-
     def __init__(self):
         """
 
@@ -143,6 +143,16 @@ class IndicatorManager(object):
 
         return this_id
 
+    def delete(self, indicator_id):
+        """
+        Delete indicator
+        """
+        del self._lineManager[indicator_id]
+        del self._canvasLineKeyDict[indicator_id]
+        del self._indicatorTypeDict[indicator_id]
+
+        return
+
     def get_canvas_line_index(self, indicator_id):
         """
         Get a line's ID (on canvas) from an indicator ID
@@ -171,8 +181,8 @@ class IndicatorManager(object):
         :param line_id:
         :return:
         """
-        assert line_id in self._indicatorTypeDict
-        assert self._indicatorTypeDict[line_id] == 2
+        assert line_id in self._indicatorTypeDict, 'blabla'
+        assert self._indicatorTypeDict[line_id] == 2, 'blabla'
 
         vec_set = [self._lineManager[line_id][0:2], self._lineManager[line_id][2:4]]
 
@@ -233,13 +243,13 @@ class IndicatorManager(object):
         """
         return sorted(self._lineManager.keys())
 
-    def get_marker(self):
+    @staticmethod
+    def get_marker():
         """
         Get the marker a line
-        :param line_id:
         :return:
         """
-        return 'o'
+        return '.'
 
     def get_next_color(self):
         """
@@ -346,7 +356,6 @@ class MplGraphicsView(QtGui.QWidget):
 
     Note: Merged with HFIR_Powder_Reduction.MplFigureCAnvas
     """
-
     def __init__(self, parent):
         """ Initialization
         """
@@ -357,6 +366,11 @@ class MplGraphicsView(QtGui.QWidget):
         self._myCanvas = Qt4MplCanvas(self)
         self._myToolBar = MyNavigationToolbar(self, self._myCanvas)
 
+        # state of operation
+        self._isZoomed = False
+        # X and Y limit with home button
+        self._homeXYLimit = None
+
         # set up layout
         self._vBox = QtGui.QVBoxLayout(self)
         self._vBox.addWidget(self._myCanvas)
@@ -367,12 +381,19 @@ class MplGraphicsView(QtGui.QWidget):
         self._myLineMarkerColorIndex = 0
         self.setAutoLineMarkerColorCombo()
 
+        # records for all the lines that are plot on the canvas
+        self._my1DPlotDict = dict()
+
         # Declaration of class variables
         self._indicatorKey = None
 
         # Indicator manager
         self._myIndicatorsManager = IndicatorManager()
 
+        # some statistic recorder for convenient operation
+        self._statDict = dict()
+        self._statRightPlotDict = dict()
+
         return
 
     def add_arrow(self, start_x, start_y, stop_x, stop_y):
@@ -407,14 +428,27 @@ class MplGraphicsView(QtGui.QWidget):
 
         return key_list
 
-    def add_plot_1d(self, vec_x, vec_y, y_err=None, color=None, label="", x_label=None, y_label=None,
-                    marker=None, line_style=None, line_width=1):
-        """ Add a new plot
+    def add_plot_1d(self, vec_x, vec_y, y_err=None, color=None, label='', x_label=None, y_label=None,
+                    marker=None, line_style=None, line_width=1, show_legend=True):
         """
-        line_key = self._myCanvas.add_plot_1d(vec_x, vec_y, y_err, color, label, x_label, y_label, marker, line_style,
-                                              line_width)
+        Add a 1-D plot to canvas
+        :param vec_x:
+        :param vec_y:
+        :param y_err:
+        :param color:
+        :param label:
+        :param x_label:
+        :param y_label:
+        :param marker:
+        :param line_style:
+        :param line_width:
+        :param show_legend:
+        :return: line ID (key to the line)
+        """
+        line_id = self._myCanvas.add_plot_1d(vec_x, vec_y, y_err, color, label, x_label, y_label, marker, line_style,
+                                             line_width, show_legend)
 
-        return line_key
+        return line_id
 
     def add_plot_1d_right(self, vec_x, vec_y, color=None, label='', marker=None, line_style=None, line_width=1):
         """
@@ -432,6 +466,8 @@ class MplGraphicsView(QtGui.QWidget):
                                                     color=color, marker=marker,
                                                     linestyle=line_style, linewidth=line_width)
 
+        self._statRightPlotDict[line_key] = (min(vec_x), max(vec_x), min(vec_y), max(vec_y))
+
         return line_key
 
     def add_2way_indicator(self, x=None, y=None, color=None, master_line=None):
@@ -505,12 +541,13 @@ class MplGraphicsView(QtGui.QWidget):
 
         return my_id
 
-    def add_vertical_indicator(self, x=None, color=None):
+    def add_vertical_indicator(self, x=None, color=None, style=None, line_width=1):
         """
         Add a vertical indicator line
         Guarantees: an indicator is plot and its ID is returned
         :param x: None as the automatic mode using default from middle of canvas
         :param color: None as the automatic mode using default
+        :param style:
         :return: indicator ID
         """
         # For indicator line's position
@@ -528,6 +565,10 @@ class MplGraphicsView(QtGui.QWidget):
         else:
             assert isinstance(color, str)
 
+        # style
+        if style is None:
+            style = self._myIndicatorsManager.get_line_style()
+
         # Form
         my_id = self._myIndicatorsManager.add_vertical_indicator(x, y_min, y_max, color)
         vec_x, vec_y = self._myIndicatorsManager.get_data(my_id)
@@ -579,9 +620,27 @@ class MplGraphicsView(QtGui.QWidget):
         """
         self._myCanvas.clear_all_1d_plots()
 
+        self._statRightPlotDict.clear()
+        self._statDict.clear()
+        self._my1DPlotDict.clear()
+
+        # about zoom
+        self._isZoomed = False
+        self._homeXYLimit = None
+
+        return
+
     def clear_canvas(self):
         """ Clear canvas
         """
+        # clear all the records
+        self._statDict.clear()
+        self._my1DPlotDict.clear()
+
+        # about zoom
+        self._isZoomed = False
+        self._homeXYLimit = None
+
         return self._myCanvas.clear_canvas()
 
     def draw(self):
@@ -589,6 +648,21 @@ class MplGraphicsView(QtGui.QWidget):
         """
         return self._myCanvas.draw()
 
+    def evt_toolbar_home(self):
+        """
+
+        Parameters
+        ----------
+
+        Returns
+        -------
+
+        """
+        # turn off zoom mode
+        self._isZoomed = False
+
+        return
+
     def evt_view_updated(self):
         """ Event handling as canvas size updated
         :return:
@@ -606,6 +680,24 @@ class MplGraphicsView(QtGui.QWidget):
 
         return
 
+    def evt_zoom_released(self):
+        """
+        event for zoom is release
+        Returns
+        -------
+
+        """
+        # record home XY limit if it is never zoomed
+        if self._isZoomed is False:
+            self._homeXYLimit = list(self.getXLimit())
+            self._homeXYLimit.extend(list(self.getYLimit()))
+        # END-IF
+
+        # set the state of being zoomed
+        self._isZoomed = True
+
+        return
+
     def getPlot(self):
         """
         """
@@ -627,6 +719,38 @@ class MplGraphicsView(QtGui.QWidget):
         """
         return self._myCanvas.getYLimit()
 
+    def get_y_min(self):
+        """
+        Get the minimum Y value of the plots on canvas
+        :return:
+        """
+        if len(self._statDict) == 0:
+            return 1E10
+
+        line_id_list = self._statDict.keys()
+        min_y = self._statDict[line_id_list[0]][2]
+        for i_plot in range(1, len(line_id_list)):
+            if self._statDict[line_id_list[i_plot]][2] < min_y:
+                min_y = self._statDict[line_id_list[i_plot]][2]
+
+        return min_y
+
+    def get_y_max(self):
+        """
+        Get the maximum Y value of the plots on canvas
+        :return:
+        """
+        if len(self._statDict) == 0:
+            return -1E10
+
+        line_id_list = self._statDict.keys()
+        max_y = self._statDict[line_id_list[0]][3]
+        for i_plot in range(1, len(line_id_list)):
+            if self._statDict[line_id_list[i_plot]][3] > max_y:
+                max_y = self._statDict[line_id_list[i_plot]][3]
+
+        return max_y
+
     def move_indicator(self, line_id, dx, dy):
         """
         Move the indicator line in horizontal
@@ -661,6 +785,7 @@ class MplGraphicsView(QtGui.QWidget):
         #
         plot_id = self._myIndicatorsManager.get_canvas_line_index(indicator_key)
         self._myCanvas.remove_plot_1d(plot_id)
+        self._myIndicatorsManager.delete(indicator_key)
 
         return
 
@@ -669,8 +794,18 @@ class MplGraphicsView(QtGui.QWidget):
         :param line_id:
         :return:
         """
+        # remove line
         self._myCanvas.remove_plot_1d(line_id)
 
+        # remove the records
+        if line_id in self._statDict:
+            del self._statDict[line_id]
+            del self._my1DPlotDict[line_id]
+        else:
+            del self._statRightPlotDict[line_id]
+
+        return
+
     def set_indicator_position(self, line_id, pos_x, pos_y):
         """ Set the indicator to new position
         :param line_id:
@@ -702,9 +837,27 @@ class MplGraphicsView(QtGui.QWidget):
         """
         return self._myCanvas.remove_plot_1d(ikey)
 
-    def updateLine(self, ikey, vecx, vecy, linestyle=None, linecolor=None, marker=None, markercolor=None):
+    def updateLine(self, ikey, vecx=None, vecy=None, linestyle=None, linecolor=None, marker=None, markercolor=None):
         """
+        update a line's set up
+        Parameters
+        ----------
+        ikey
+        vecx
+        vecy
+        linestyle
+        linecolor
+        marker
+        markercolor
+
+        Returns
+        -------
+
         """
+        # check
+        assert isinstance(ikey, int), 'Line key must be an integer.'
+        assert ikey in self._my1DPlotDict, 'Line with ID %d is not on canvas. ' % ikey
+
         return self._myCanvas.updateLine(ikey, vecx, vecy, linestyle, linecolor, marker, markercolor)
 
     def update_indicator(self, i_key, color):
@@ -730,6 +883,28 @@ class MplGraphicsView(QtGui.QWidget):
 
         return
 
+    def get_canvas(self):
+        """
+        get canvas
+        Returns:
+
+        """
+        return self._myCanvas
+
+    def get_current_plots(self):
+        """
+        Get the current plots on canvas
+        Returns
+        -------
+        list of 2-tuple: integer (plot ID) and string (label)
+        """
+        tuple_list = list()
+        line_id_list = sorted(self._my1DPlotDict.keys())
+        for line_id in line_id_list:
+            tuple_list.append((line_id, self._my1DPlotDict[line_id]))
+
+        return tuple_list
+
     def get_indicator_key(self, x, y):
         """ Get the key of the indicator with given position
         :param picker_pos:
@@ -800,12 +975,23 @@ class MplGraphicsView(QtGui.QWidget):
 
         return marker, color
 
-    def resetLineColorStyle(self):
+    def reset_line_color_marker_index(self):
         """ Reset the auto index for line's color and style
         """
         self._myLineMarkerColorIndex = 0
         return
 
+    def set_title(self, title, color='black'):
+        """
+        set title to canvas
+        :param title:
+        :param color:
+        :return:
+        """
+        self._myCanvas.set_title(title, color)
+
+        return
+
     def setXYLimit(self, xmin=None, xmax=None, ymin=None, ymax=None):
         """ Set X-Y limit automatically
         """
@@ -817,12 +1003,12 @@ class MplGraphicsView(QtGui.QWidget):
         return
 
     def setAutoLineMarkerColorCombo(self):
+        """ Set the default/auto line marker/color combination list
         """
-        """
-        self._myLineMarkerColorList = []
+        self._myLineMarkerColorList = list()
         for marker in MplLineMarkers:
             for color in MplBasicColors:
-                self._myLineMarkerColorList.append( (marker, color) )
+                self._myLineMarkerColorList.append((marker, color))
 
         return
 
@@ -838,7 +1024,6 @@ class Qt4MplCanvas(FigureCanvas):
     """  A customized Qt widget for matplotlib figure.
     It can be used to replace GraphicsView of QtGui
     """
-
     def __init__(self, parent):
         """  Initialization
         """
@@ -851,7 +1036,8 @@ class Qt4MplCanvas(FigureCanvas):
         self.fig.patch.set_facecolor('white')
 
         if True:
-            self.axes = self.fig.add_subplot(111) # return: matplotlib.axes.AxesSubplot
+            self.axes = self.fig.add_subplot(111)  # return: matplotlib.axes.AxesSubplot
+            self.fig.subplots_adjust(bottom=0.15)
             self.axes2 = None
         else:
             self.axes = self.fig.add_host_subplot(111)
@@ -870,9 +1056,20 @@ class Qt4MplCanvas(FigureCanvas):
 
         # legend and color bar
         self._colorBar = None
+        self._isLegendOn = False
+        self._legendFontSize = 8
 
         return
 
+    @property
+    def is_legend_on(self):
+        """
+        check whether the legend is shown or hide
+        Returns:
+        boolean
+        """
+        return self._isLegendOn
+
     def add_arrow(self, start_x, start_y, stop_x, stop_y):
         """
         0, 0, 0.5, 0.5, head_width=0.05, head_length=0.1, fc='k', ec='k')
@@ -889,7 +1086,7 @@ class Qt4MplCanvas(FigureCanvas):
         return
 
     def add_plot_1d(self, vec_x, vec_y, y_err=None, color=None, label="", x_label=None, y_label=None,
-                    marker=None, line_style=None, line_width=1):
+                    marker=None, line_style=None, line_width=1, show_legend=True):
         """
 
         :param vec_x: numpy array X
@@ -902,11 +1099,13 @@ class Qt4MplCanvas(FigureCanvas):
         :param marker:
         :param line_style:
         :param line_width:
+        :param show_legend:
         :return: new key
         """
         # Check input
         if isinstance(vec_x, np.ndarray) is False or isinstance(vec_y, np.ndarray) is False:
-            raise NotImplementedError('Input vec_x or vec_y for addPlot() must be numpy.array.')
+            raise NotImplementedError('Input vec_x {0} or vec_y {1} for addPlot() must be numpy.array, but they are '
+                                      '{2} and {3}.'.format(vec_x, vec_y, type(vec_x), type(vec_y)))
         plot_error = y_err is not None
         if plot_error is True:
             if isinstance(y_err, np.ndarray) is False:
@@ -920,19 +1119,25 @@ class Qt4MplCanvas(FigureCanvas):
         # Hold previous data
         self.axes.hold(True)
 
+        # set x-axis and y-axis label
+        if x_label is not None:
+            self.axes.set_xlabel(x_label, fontsize=16)
+        if y_label is not None:
+            self.axes.set_ylabel(y_label, fontsize=16)
+
         # process inputs and defaults
         if color is None:
             color = (0, 1, 0, 1)
         if marker is None:
-            marker = 'o'
+            marker = 'None'
         if line_style is None:
             line_style = '-'
 
         # color must be RGBA (4-tuple)
         if plot_error is False:
-            r = self.axes.plot(vec_x, vec_y, color=color, marker=marker, linestyle=line_style,
-                               label=label, linewidth=line_width)
             # return: list of matplotlib.lines.Line2D object
+            r = self.axes.plot(vec_x, vec_y, color=color, marker=marker, markersize=1, linestyle=line_style,
+                               label=label, linewidth=line_width)
         else:
             r = self.axes.errorbar(vec_x, vec_y, yerr=y_err, color=color, marker=marker, linestyle=line_style,
                                    label=label, linewidth=line_width)
@@ -946,15 +1151,25 @@ class Qt4MplCanvas(FigureCanvas):
             self.axes.set_ylabel(y_label, fontsize=20)
 
         # set/update legend
-        self._setupLegend()
+        if show_legend:
+            self._setup_legend()
 
         # Register
         line_key = self._lineIndex
-        if len(r) == 1:
+        if plot_error:
+            msg = 'Return from plot is a {0}-tuple: {1} with plot error is {2}\n'.format(len(r), r, plot_error)
+            for i_r in range(len(r)):
+                msg += 'r[%d] = %s\n' % (i_r, str(r[i_r]))
+            raise NotImplementedError(msg)
+        else:
+            assert len(r) > 0, 'There must be at least 1 figure returned'
             self._lineDict[line_key] = r[0]
             self._lineIndex += 1
-        else:
-            print "Impoooooooooooooooosible!  Return from plot is a %d-tuple. " % (len(r))
+
+            for i_r in range(1, len(r)):
+                # remove the un-defined extra lines
+                self.axes.lines.remove(r[i_r])
+        # END-IF-ELSE
 
         # Flush/commit
         self.draw()
@@ -998,7 +1213,7 @@ class Qt4MplCanvas(FigureCanvas):
             self.axes2.set_ylabel(ylabel, fontsize=20)
 
         # set/update legend
-        self._setupLegend()
+        self._setup_legend()
 
         # Register
         line_key = -1
@@ -1029,7 +1244,9 @@ class Qt4MplCanvas(FigureCanvas):
         # self.axes.set_yticks(yticks)
 
         # show image
-        imgplot = self.axes.imshow(array2d, extent=[xmin,xmax,ymin,ymax], interpolation='none')
+        imgplot = self.axes.imshow(array2d, extent=[xmin, xmax, ymin, ymax], interpolation='none')
+
+        # TODO/ISSUE/55: how to make this part more powerful
         # set y ticks as an option:
         if yticklabels is not None:
             # it will always label the first N ticks even image is zoomed in
@@ -1052,6 +1269,51 @@ class Qt4MplCanvas(FigureCanvas):
 
         return
 
+    def add_contour_plot(self, vec_x, vec_y, matrix_z):
+        """
+
+        :param vec_x:
+        :param vec_y:
+        :param matrix_z:
+        :return:
+        """
+        # create mesh grid
+        grid_x, grid_y = np.meshgrid(vec_x, vec_y)
+
+        # check size
+        assert grid_x.shape == matrix_z.shape, 'Size of X (%d) and Y (%d) must match size of Z (%s).' \
+                                               '' % (len(vec_x), len(vec_y), matrix_z.shape)
+
+        # Release the current image
+        self.axes.hold(False)
+
+        # Do plot
+        contour_plot = self.axes.contourf(grid_x, grid_y, matrix_z, 100)
+
+        labels = [item.get_text() for item in self.axes.get_yticklabels()]
+        print '[DB...BAT] Number of Y labels = ', len(labels), ', Number of Y = ', len(vec_y)
+
+        # TODO/ISSUE/55: how to make this part more powerful
+        if len(labels) == 2*len(vec_y) - 1:
+            new_labels = [''] * len(labels)
+            for i in range(len(vec_y)):
+                new_labels[i*2] = '%d' % int(vec_y[i])
+            self.axes.set_yticklabels(new_labels)
+
+        # explicitly set aspect ratio of the image
+        self.axes.set_aspect('auto')
+
+        # Set color bar.  plt.colorbar() does not work!
+        if self._colorBar is None:
+            # set color map type
+            contour_plot.set_cmap('spectral')
+            self._colorBar = self.fig.colorbar(contour_plot)
+        else:
+            self._colorBar.update_bruteforce(contour_plot)
+
+        # Flush...
+        self._flush()
+
     def addImage(self, imagefilename):
         """ Add an image by file
         """
@@ -1090,7 +1352,7 @@ class Qt4MplCanvas(FigureCanvas):
                 except ValueError as e:
                     print "[Error] Plot %s is not in axes.lines which has %d lines. Error mesage: %s" % (
                         str(plot), len(self.axes.lines), str(e))
-                self._lineDict[ikey] = None
+                del self._lineDict[ikey]
             else:
                 # error bar
                 plot[0].remove()
@@ -1098,11 +1360,11 @@ class Qt4MplCanvas(FigureCanvas):
                     line.remove()
                 for line in plot[2]:
                     line.remove()
-                self._lineDict[ikey] = None
+                del self._lineDict[ikey]
             # ENDIF(plot)
         # ENDFOR
 
-        self._setupLegend()
+        self._setup_legend()
 
         self.draw()
 
@@ -1127,12 +1389,30 @@ class Qt4MplCanvas(FigureCanvas):
             self.fig.clear()
             # Re-create subplot
             self.axes = self.fig.add_subplot(111)
+            self.fig.subplots_adjust(bottom=0.15)
 
         # flush/commit
         self._flush()
 
         return
 
+    def decrease_legend_font_size(self):
+        """
+        reset the legend with the new font size
+        Returns:
+
+        """
+        # minimum legend font size is 2! return if it already uses the smallest font size.
+        if self._legendFontSize <= 2:
+            return
+
+        self._legendFontSize -= 1
+        self._setup_legend(font_size=self._legendFontSize)
+
+        self.draw()
+
+        return
+
     def getLastPlotIndexKey(self):
         """ Get the index/key of the last added line
         """
@@ -1153,6 +1433,35 @@ class Qt4MplCanvas(FigureCanvas):
         """
         return self.axes.get_ylim()
 
+    def hide_legend(self):
+        """
+        hide the legend if it is not None
+        Returns:
+
+        """
+        if self.axes.legend() is not None:
+            # set visible to be False and re-draw
+            self.axes.legend().set_visible(False)
+            self.draw()
+
+        self._isLegendOn = False
+
+        return
+
+    def increase_legend_font_size(self):
+        """
+        reset the legend with the new font size
+        Returns:
+
+        """
+        self._legendFontSize += 1
+
+        self._setup_legend(font_size=self._legendFontSize)
+
+        self.draw()
+
+        return
+
     def setXYLimit(self, xmin, xmax, ymin, ymax):
         """
         """
@@ -1179,6 +1488,27 @@ class Qt4MplCanvas(FigureCanvas):
 
         return
 
+    def set_title(self, title, color, location='center'):
+        """
+        set title to the figure (canvas) with default location at center
+        :param title:
+        :param color:
+        :param location
+        :return:
+        """
+        # check input
+        assert isinstance(title, str), 'Title {0} must be a string but not a {1}.'.format(title, type(title))
+        assert isinstance(color, str) and len(color) > 0, 'Color {0} must be a non-empty string but not a {1}.' \
+                                                          ''.format(color, type(color))
+        assert isinstance(location, str) and len(location) > 0, 'Location {0} must be a non-empty string but not a {1}.' \
+                                                                ''.format(location, type(location))
+
+        # set title and re-draw to apply
+        self.axes.set_title(title, loc=location, color=color)
+        self.draw()
+
+        return
+
     def remove_plot_1d(self, plot_key):
         """ Remove the line with its index as key
         :param plot_key:
@@ -1186,23 +1516,63 @@ class Qt4MplCanvas(FigureCanvas):
         """
         # Get all lines in list
         lines = self.axes.lines
-        assert isinstance(lines, list)
+        assert isinstance(lines, list), 'Lines must be list'
 
         if plot_key in self._lineDict:
-            self.axes.lines.remove(self._lineDict[plot_key])
-            self._lineDict[plot_key] = None
+            try:
+                self.axes.lines.remove(self._lineDict[plot_key])
+            except ValueError as r_error:
+                error_message = 'Unable to remove to 1D line %s (ID=%d) due to %s.' % (str(self._lineDict[plot_key]),
+                                                                                       plot_key, str(r_error))
+                raise RuntimeError(error_message)
+            # remove the plot key from dictionary
+            del self._lineDict[plot_key]
         else:
             raise RuntimeError('Line with ID %s is not recorded.' % plot_key)
 
+        self._setup_legend(location='best', font_size=self._legendFontSize)
+
         # Draw
         self.draw()
 
         return
 
-    def updateLine(self, ikey, vecx, vecy, linestyle=None, linecolor=None, marker=None, markercolor=None):
+    def show_legend(self):
+        """
+        show the legend if the legend is not None
+        Returns:
+
+        """
+        if self.axes.legend() is not None:
+            # set visible to be True and re-draw
+            # self.axes.legend().set_visible(True)
+            self._setup_legend(font_size=self._legendFontSize)
+            self.draw()
+
+            # set flag on
+            self._isLegendOn = True
+
+        return
+
+    def updateLine(self, ikey, vecx=None, vecy=None, linestyle=None, linecolor=None, marker=None, markercolor=None):
         """
+        Update a plot line or a series plot line
+        Args:
+            ikey:
+            vecx:
+            vecy:
+            linestyle:
+            linecolor:
+            marker:
+            markercolor:
+
+        Returns:
+
         """
         line = self._lineDict[ikey]
+        if line is None:
+            print '[ERROR] Line (key = %d) is None. Unable to update' % ikey
+            return
 
         if vecx is not None and vecy is not None:
             line.set_xdata(vecx)
@@ -1223,13 +1593,30 @@ class Qt4MplCanvas(FigureCanvas):
         oldlabel = line.get_label()
         line.set_label(oldlabel)
 
-        self.axes.legend()
+        self._setup_legend()
 
         # commit
         self.draw()
 
         return
 
+    def get_data(self, line_id):
+        """
+        Get vecX and vecY from line object in matplotlib
+        :param line_id:
+        :return: 2-tuple as vector X and vector Y
+        """
+        # check
+        if line_id not in self._lineDict:
+            raise KeyError('Line ID %s does not exist.' % str(line_id))
+
+        # get line
+        line = self._lineDict[line_id]
+        if line is None:
+            raise RuntimeError('Line ID %s has been removed.' % line_id)
+
+        return line.get_xdata(), line.get_ydata()
+
     def getLineStyleList(self):
         """
         """
@@ -1249,19 +1636,19 @@ class Qt4MplCanvas(FigureCanvas):
         """ Get a list of line/marker color and marker style combination
         as default to add more and more line to plot
         """
-        combolist = []
-        nummarkers = len(MplLineMarkers)
-        numcolors = len(MplBasicColors)
+        combo_list = list()
+        num_markers = len(MplLineMarkers)
+        num_colors = len(MplBasicColors)
 
-        for i in xrange(nummarkers):
+        for i in xrange(num_markers):
             marker = MplLineMarkers[i]
-            for j in xrange(numcolors):
+            for j in xrange(num_colors):
                 color = MplBasicColors[j]
-                combolist.append( (marker, color) )
+                combo_list.append((marker, color))
             # ENDFOR (j)
         # ENDFOR(i)
 
-        return combolist
+        return combo_list
 
     def _flush(self):
         """ A dirty hack to flush the image
@@ -1272,12 +1659,18 @@ class Qt4MplCanvas(FigureCanvas):
 
         return
 
-    def _setupLegend(self, location='best'):
-        """ Set up legend
-        self.axes.legend()
-        Handler is a Line2D object. Lable maps to the line object
+    def _setup_legend(self, location='best', font_size=10):
+        """
+        Set up legend
+        self.axes.legend(): Handler is a Line2D object. Lable maps to the line object
+        Args:
+            location:
+            font_size:
+
+        Returns:
+
         """
-        loclist = [
+        allowed_location_list = [
             "best",
             "upper right",
             "upper left",
@@ -1291,11 +1684,13 @@ class Qt4MplCanvas(FigureCanvas):
             "center"]
 
         # Check legend location valid or not
-        if location not in loclist:
+        if location not in allowed_location_list:
             location = 'best'
 
         handles, labels = self.axes.get_legend_handles_labels()
-        self.axes.legend(handles, labels, loc=location)
+        self.axes.legend(handles, labels, loc=location, fontsize=font_size)
+
+        self._isLegendOn = True
 
         return
 
@@ -1315,21 +1710,46 @@ class MyNavigationToolbar(NavigationToolbar2):
     NAVIGATION_MODE_PAN = 1
     NAVIGATION_MODE_ZOOM = 2
 
+    # This defines a signal called 'home_button_pressed' that takes 1 boolean
+    # argument for being in zoomed state or not
+    home_button_pressed = pyqtSignal()
+
+    # This defines a signal called 'canvas_zoom_released'
+    canvas_zoom_released = pyqtSignal()
+
     def __init__(self, parent, canvas):
         """ Initialization
+        built-in methods
+        - drag_zoom(self, event): triggered during holding the mouse and moving
         """
         NavigationToolbar2.__init__(self, canvas, canvas)
 
+        # parent
         self._myParent = parent
-        self._navigationMode = MyNavigationToolbar.NAVIGATION_MODE_NONE
+        # tool bar mode
+        self._myMode = MyNavigationToolbar.NAVIGATION_MODE_NONE
+
+        # connect the events to parent
+        self.home_button_pressed.connect(self._myParent.evt_toolbar_home)
+        self.canvas_zoom_released.connect(self._myParent.evt_zoom_released)
 
         return
 
+    @property
+    def is_zoom_mode(self):
+        """
+        check whether the tool bar is in zoom mode
+        Returns
+        -------
+
+        """
+        return self._myMode == MyNavigationToolbar.NAVIGATION_MODE_ZOOM
+
     def get_mode(self):
         """
         :return: integer as none/pan/zoom mode
         """
-        return self._navigationMode
+        return self._myMode
 
     # Overriding base's methods
     def draw(self):
@@ -1343,6 +1763,25 @@ class MyNavigationToolbar(NavigationToolbar2):
 
         return
 
+    def home(self, *args):
+        """
+
+        Parameters
+        ----------
+        args
+
+        Returns
+        -------
+
+        """
+        # call super's home() method
+        NavigationToolbar2.home(self, args)
+
+        # send a signal to parent class for further operation
+        self.home_button_pressed.emit()
+
+        return
+
     def pan(self, *args):
         """
 
@@ -1351,12 +1790,14 @@ class MyNavigationToolbar(NavigationToolbar2):
         """
         NavigationToolbar2.pan(self, args)
 
-        if self._navigationMode == MyNavigationToolbar.NAVIGATION_MODE_PAN:
+        if self._myMode == MyNavigationToolbar.NAVIGATION_MODE_PAN:
             # out of pan mode
-            self._navigationMode = MyNavigationToolbar.NAVIGATION_MODE_NONE
+            self._myMode = MyNavigationToolbar.NAVIGATION_MODE_NONE
         else:
             # into pan mode
-            self._navigationMode = MyNavigationToolbar.NAVIGATION_MODE_PAN
+            self._myMode = MyNavigationToolbar.NAVIGATION_MODE_PAN
+
+        print 'PANNED'
 
         return
 
@@ -1368,12 +1809,29 @@ class MyNavigationToolbar(NavigationToolbar2):
         """
         NavigationToolbar2.zoom(self, args)
 
-        if self._navigationMode == MyNavigationToolbar.NAVIGATION_MODE_ZOOM:
+        if self._myMode == MyNavigationToolbar.NAVIGATION_MODE_ZOOM:
             # out of zoom mode
-            self._navigationMode = MyNavigationToolbar.NAVIGATION_MODE_NONE
+            self._myMode = MyNavigationToolbar.NAVIGATION_MODE_NONE
         else:
             # into zoom mode
-            self._navigationMode = MyNavigationToolbar.NAVIGATION_MODE_ZOOM
+            self._myMode = MyNavigationToolbar.NAVIGATION_MODE_ZOOM
+
+        return
+
+    def release_zoom(self, event):
+        """
+        override zoom released method
+        Parameters
+        ----------
+        event
+
+        Returns
+        -------
+
+        """
+        self.canvas_zoom_released.emit()
+
+        NavigationToolbar2.release_zoom(self, event)
 
         return
 
diff --git a/scripts/HFIR_4Circle_Reduction/multi_threads_helpers.py b/scripts/HFIR_4Circle_Reduction/multi_threads_helpers.py
index 7bc322f9e753afe495ab9c611587a7018b788509..5fd1e648ba1c729c9026be73b734131da09c6a5a 100644
--- a/scripts/HFIR_4Circle_Reduction/multi_threads_helpers.py
+++ b/scripts/HFIR_4Circle_Reduction/multi_threads_helpers.py
@@ -1,9 +1,9 @@
 #pylint: disable=W0403,R0913,R0902
-
 from PyQt4 import QtCore
 from PyQt4.QtCore import QThread
 
 import reduce4circleControl as r4c
+import peak_integration_utility
 
 
 class AddPeaksThread(QThread):
@@ -115,7 +115,7 @@ class IntegratePeaksThread(QThread):
     mergeMsgSignal = QtCore.pyqtSignal(int, int, int, str)
 
     def __init__(self, main_window, exp_number, scan_tuple_list, mask_det, mask_name, norm_type, num_pt_bg_left,
-                 num_pt_bg_right):
+                 num_pt_bg_right, scale_factor=1.000):
         """
 
         :param main_window:
@@ -140,8 +140,12 @@ class IntegratePeaksThread(QThread):
         assert isinstance(mask_name, str), 'Name of mask must be a string but not %s.' % str(type(mask_name))
         assert isinstance(norm_type, str), 'Normalization type must be a string but not %s.' \
                                            '' % str(type(norm_type))
-        assert isinstance(num_pt_bg_left, int) and num_pt_bg_left >= 0
-        assert isinstance(num_pt_bg_right, int) and num_pt_bg_right >= 0
+        assert isinstance(num_pt_bg_left, int) and num_pt_bg_left >= 0,\
+            'Number of Pt at left for background {0} must be non-negative integers but not of type {1}.' \
+            ''.format(num_pt_bg_left, type(num_pt_bg_left))
+        assert isinstance(num_pt_bg_right, int) and num_pt_bg_right >= 0,\
+            'Number of Pt at right for background {0} must be non-negative integers but not of type {1}.' \
+            ''.format(num_pt_bg_right, type(num_pt_bg_right))
 
         # set values
         self._mainWindow = main_window
@@ -152,6 +156,7 @@ class IntegratePeaksThread(QThread):
         self._selectedMaskName = mask_name
         self._numBgPtLeft = num_pt_bg_left
         self._numBgPtRight = num_pt_bg_right
+        self._scaleFactor = scale_factor
 
         # link signals
         self.peakMergeSignal.connect(self._mainWindow.update_merge_value)
@@ -238,8 +243,9 @@ class IntegratePeaksThread(QThread):
             # check given mask workspace
             if self._maskDetector:
                 self._mainWindow.controller.check_generate_mask_workspace(self._expNumber, scan_number,
-                                                                          self._selectedMaskName)
+                                                                          self._selectedMaskName, check_throw=True)
 
+            bkgd_pt_list = (self._numBgPtLeft, self._numBgPtRight)
             # integrate peak
             try:
                 status, ret_obj = self._mainWindow.controller.integrate_scan_peaks(exp=self._expNumber,
@@ -249,33 +255,39 @@ class IntegratePeaksThread(QThread):
                                                                                    merge_peaks=False,
                                                                                    use_mask=self._maskDetector,
                                                                                    normalization=self._normalizeType,
-                                                                                   mask_ws_name=self._selectedMaskName)
+                                                                                   mask_ws_name=self._selectedMaskName,
+                                                                                   scale_factor=self._scaleFactor,
+                                                                                   background_pt_tuple=bkgd_pt_list)
             except ValueError as val_err:
                 status = False
                 ret_obj = 'Unable to integrate scan {0} due to {1}.'.format(scan_number, str(val_err))
+            except RuntimeError as run_err:
+                status = False
+                ret_obj = 'Unable to integrate scan {0}: {1}.'.format(scan_number, run_err)
 
             # handle integration error
             if status:
                 # get PT dict
                 pt_dict = ret_obj
+                assert isinstance(pt_dict, dict), 'dictionary must'
+                self.set_integrated_peak_info(scan_number, pt_dict)
+                # information setup include
+                # - lorentz correction factor
+                # - peak integration dictionary
+                # - motor information: peak_info_obj.set_motor(motor_name, motor_step, motor_std_dev)
             else:
                 # integration failed
                 error_msg = str(ret_obj)
                 self.mergeMsgSignal.emit(self._expNumber, scan_number, 0, error_msg)
                 continue
 
-            # calculate background value
-            background_pt_list = pt_number_list[:self._numBgPtLeft] + pt_number_list[-self._numBgPtRight:]
-            avg_bg_value = self._mainWindow.controller.estimate_background(pt_dict, background_pt_list)
-
-            # correct intensity by background value
-            intensity_i = self._mainWindow.controller.simple_integrate_peak(pt_dict, avg_bg_value)
+            intensity1 = pt_dict['simple intensity']
             peak_centre = self._mainWindow.controller.get_peak_info(self._expNumber, scan_number).get_peak_centre()
 
             # emit signal to main app for peak intensity value
             mode = 1
             # center_i
-            self.peakMergeSignal.emit(self._expNumber, scan_number, float(intensity_i), list(peak_centre), mode)
+            self.peakMergeSignal.emit(self._expNumber, scan_number, float(intensity1), list(peak_centre), mode)
         # END-FOR
 
         # terminate the process
@@ -284,3 +296,43 @@ class IntegratePeaksThread(QThread):
         # self._mainWindow.ui.tableWidget_mergeScans.select_all_rows(False)
 
         return
+
+    def set_integrated_peak_info(self, scan_number, peak_integration_dict):
+        """
+        set the integrated peak information including
+        * calculate Lorentz correction
+        * add the integration result dictionary
+        * add motor step information
+        :return:
+        """
+        # print '[DB...BAT] Set Integrated Peak Info is called for exp {0} scan {1}.' \
+        #       ''.format(self._expNumber, scan_number)
+
+        # get peak information
+        peak_info_obj = self._mainWindow.controller.get_peak_info(self._expNumber, scan_number)
+
+        # get Q-vector of the peak center and calculate |Q| from it
+        peak_center_q = peak_info_obj.get_peak_centre_v3d().norm()
+        # get wave length
+        wavelength = self._mainWindow.controller.get_wave_length(self._expNumber, [scan_number])
+
+        # get motor step (choose from omega, phi and chi)
+        try:
+            motor_move_tup = self._mainWindow.controller.get_motor_step(self._expNumber, scan_number)
+            motor_name, motor_step, motor_std_dev = motor_move_tup
+        except RuntimeError as run_err:
+            return str(run_err)
+        except AssertionError as ass_err:
+            return str(ass_err)
+
+        # calculate lorentz correction
+        lorentz_factor = peak_integration_utility.calculate_lorentz_correction_factor(peak_center_q, wavelength,
+                                                                                      motor_step)
+
+        peak_info_obj.lorentz_correction_factor = lorentz_factor
+        # set motor
+        peak_info_obj.set_motor(motor_name, motor_step, motor_std_dev)
+        # set peak integration dictionary
+        peak_info_obj.set_integration(peak_integration_dict)
+
+        return
diff --git a/scripts/HFIR_4Circle_Reduction/peak_integration_info.ui b/scripts/HFIR_4Circle_Reduction/peak_integration_info.ui
new file mode 100644
index 0000000000000000000000000000000000000000..45065d1f824ff19a1644405d5e1c280fadc4933a
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/peak_integration_info.ui
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ui version="4.0">
+ <class>Dialog</class>
+ <widget class="QDialog" name="Dialog">
+  <property name="geometry">
+   <rect>
+    <x>0</x>
+    <y>0</y>
+    <width>1073</width>
+    <height>785</height>
+   </rect>
+  </property>
+  <property name="windowTitle">
+   <string>Dialog</string>
+  </property>
+  <layout class="QVBoxLayout" name="verticalLayout">
+   <item>
+    <widget class="QTableWidget" name="tableWidget"/>
+   </item>
+   <item>
+    <layout class="QHBoxLayout" name="horizontalLayout">
+     <item>
+      <spacer name="horizontalSpacer">
+       <property name="orientation">
+        <enum>Qt::Horizontal</enum>
+       </property>
+       <property name="sizeHint" stdset="0">
+        <size>
+         <width>40</width>
+         <height>20</height>
+        </size>
+       </property>
+      </spacer>
+     </item>
+     <item>
+      <widget class="QPushButton" name="pushButton">
+       <property name="text">
+        <string>Hide</string>
+       </property>
+      </widget>
+     </item>
+    </layout>
+   </item>
+  </layout>
+ </widget>
+ <resources/>
+ <connections/>
+</ui>
diff --git a/scripts/HFIR_4Circle_Reduction/peak_integration_utility.py b/scripts/HFIR_4Circle_Reduction/peak_integration_utility.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef5464087af3f60d518061adb381bbdb609df17d
--- /dev/null
+++ b/scripts/HFIR_4Circle_Reduction/peak_integration_utility.py
@@ -0,0 +1,772 @@
+# Utility methods to do peak integration
+import numpy
+import math
+from scipy.optimize import curve_fit
+import mantid.simpleapi as mantidsimple
+from mantid.api import AnalysisDataService
+
+
+def apply_lorentz_correction(peak_intensity, q, wavelength, step_omega):
+    """ Apply lorentz correction to intensity """
+    # calculate theta
+    sin_theta = q * wavelength / (4 * numpy.pi)
+    theta = math.asin(sin_theta)
+    corrected_intensity = peak_intensity * numpy.sin(2 * theta) * step_omega
+
+    return corrected_intensity
+
+
+def calculate_lorentz_correction_factor(q_sample, wavelength, motor_step):
+    """
+
+    :param q_sample:
+    :param wavelength:
+    :param motor_step:
+    :return:
+    """
+    sin_theta = q_sample * wavelength / (4 * numpy.pi)
+    theta = math.asin(sin_theta)
+    factor = numpy.sin(2 * theta) * motor_step
+
+    # print '[DB...BAT Lorentz] Q-sample = {0}, wavelength = {1}, motor step = {2}, theta = {3} --> factor = {4}.' \
+    #       ''.format(q_sample, wavelength, motor_step, theta, factor)
+
+    return factor
+
+
+def calculate_motor_step(motor_pos_array, motor_step_tolerance=0.5):
+    """
+    calculate the motor steps
+    :param motor_step_tolerance:
+    :param motor_pos_array:
+    :return:
+    """
+    assert isinstance(motor_pos_array, numpy.ndarray), 'Motor positions {0} must be given as a numpy array but not ' \
+                                                       'a {1}.'.format(motor_pos_array, type(motor_pos_array))
+    # need to check somewhere: assert len(pt_motor_dict) == len(pt_intensity_dict), 'blabla 3'
+
+    motor_step_vector = motor_pos_array[1:] - motor_pos_array[:-1]
+
+    motor_step = numpy.average(motor_step_vector)
+    motor_step_std = motor_step_vector.std()
+
+    if motor_step_std > motor_step_tolerance:
+        raise RuntimeError('Step deviation too large. Cannot use average!')
+
+    return motor_step
+
+
+def convert_motor_pos_intensity(integrated_pt_dict, motor_pos_dict):
+    """
+    :except: raise RuntimeError if
+    :param integrated_pt_dict:
+    :param motor_pos_dict:
+    :return: motor_pos_vec, pt_intensity_vec
+    """
+    pt_list = sorted(integrated_pt_dict.keys())
+
+    if len(motor_pos_dict) != len(pt_list):
+        raise RuntimeError('Integrated Pt intensities does not match motor positions')
+
+    pt_intensity_vec = numpy.ndarray(shape=(len(pt_list), ), dtype='float')
+    motor_pos_vec = numpy.ndarray(shape=(len(pt_list), ), dtype='float')
+
+    for i_pt, pt in enumerate(pt_list):
+        pt_intensity_vec[i_pt] = integrated_pt_dict[pt]
+        motor_pos_vec[i_pt] = motor_pos_dict[pt]
+
+    return motor_pos_vec, pt_intensity_vec
+
+
+def calculate_penalty(model_vec_y, exp_vec_y):
+    """
+    calculate the penalty/cost of the model to experimental data
+    say: error = 1/(N-1)sqrt(\sum(y_i - m_i)**2)
+    :param model_vec_y:
+    :param exp_vec_y:
+    :return:
+    """
+    # check inputs
+    assert isinstance(model_vec_y, numpy.ndarray), 'Model vec Y cannot be {0}.' \
+                                                   ''.format(type(model_vec_y))
+    assert isinstance(exp_vec_y, numpy.ndarray), 'Experimental vec Y cannot be {0}.' \
+                                                 ''.format(type(exp_vec_y))
+    if model_vec_y.shape != exp_vec_y.shape or len(model_vec_y) <= 1:
+        raise RuntimeError('Model and experimental data array do not match! Or too short!')
+
+    # calculate
+    diff_y = model_vec_y - exp_vec_y
+
+    diff_y2 = numpy.ndarray(shape=(len(diff_y),), dtype='float')
+    numpy.power(diff_y, 2, out=diff_y2)
+
+    cost = numpy.sqrt(diff_y2.sum()) / (len(model_vec_y) - 1.)
+
+    return cost
+
+
+def estimate_background(pt_intensity_dict, bg_pt_list):
+    """
+    Estimate background value by average the integrated counts of some Pt.
+    :param pt_intensity_dict:
+    :param bg_pt_list: Pt. for the first N and last M Pt.
+    :return:
+    """
+    # Check
+    assert isinstance(pt_intensity_dict, dict), 'Peak (Pt) intensities {0} must be given by dictionary but not {1}.' \
+                                                ''.format(pt_intensity_dict, type(pt_intensity_dict))
+    assert (isinstance(bg_pt_list, tuple) or isinstance(bg_pt_list, list)) and len(bg_pt_list) > 0,\
+        'background points {0} must be a 2-element tuple or list but not a {1}.'.format(bg_pt_list, type(bg_pt_list))
+
+    # from bg_pt_list
+    bg_sum = 0.
+    background_points = list()
+    pt_list = sorted(pt_intensity_dict.keys())
+    left_bgs = pt_list[0:bg_pt_list[0]]
+    background_points.extend(left_bgs)
+    right_bgs = pt_list[-bg_pt_list[1]:]
+    background_points.extend(right_bgs)
+
+    for bg_pt in background_points:
+        assert bg_pt in pt_intensity_dict, 'Pt. %d is not calculated.' % bg_pt
+        bg_sum += pt_intensity_dict[bg_pt]
+
+    avg_bg = float(bg_sum) / len(bg_pt_list)
+
+    return avg_bg
+
+
+def find_gaussian_start_values_by_observation(vec_x, vec_y):
+    """
+    find out the starting values of a gaussian + linear background with observation
+    :param vec_x:
+    :param vec_y:
+    :return: must be the same order as gaussian linear background function as x0, sigma, a, b
+    """
+    # assume that it is a quasi-ideal Gaussian
+    # find out the maximum Y with x
+    max_y_index = vec_y.argmax()
+
+    x0 = vec_x[max_y_index]
+    max_y = vec_y[max_y_index]
+    est_background = 0.5 * (vec_y[0] + vec_y[-1])
+    est_sigma = (vec_x[-1] - vec_x[0]) * 0.1
+    est_a = max(1.0, max_y - est_background)
+
+    return [x0, est_sigma, est_a, est_background]
+
+
+def fit_gaussian_linear_background(vec_x, vec_y, vec_e, start_value_list=None, find_start_value_by_fit=False):
+    """
+    Fit a curve with Gaussian + linear background
+    The starting value can be
+    1. specified by caller
+    2. guessed by fitting a pure gaussian to the data
+    3. guessed by observing the data
+    :param vec_x:
+    :param vec_y:
+    :param vec_e:
+    :param start_value_list: if not None, then it must have 4 elements:  x0, sigma, A, and b (for background)
+    :param find_start_value_by_fit: if it is True, then fit the curve with a Gaussian without background
+    :return: 3-tuple (1) float as error, (2) list/tuple as x0, sigma, a, b , (3) 4 x 4 covariance matrix
+    """
+    # check input
+    assert isinstance(vec_x, numpy.ndarray), 'Input vec_x must be a numpy.ndarray but not a {0}.'.format(vec_x)
+    assert isinstance(vec_y, numpy.ndarray), 'Input vec_y must be a numpy.ndarray but not a {0}.'.format(vec_y)
+    assert isinstance(vec_e, numpy.ndarray), 'Input vec_e must be a numpy.ndarray but not a {0}.'.format(vec_e)
+
+    # print '[DB] Vec X: ', vec_x
+    # print '[DB] Vec Y: ', vec_y
+    # print '[DB] Vec e: ', vec_e
+    # print '[DB] Start values: ', start_value_list
+    # print '[DB] Find start value by fit: ', find_start_value_by_fit
+
+    # starting value
+    if isinstance(start_value_list, list):
+        assert len(start_value_list) == 4, 'If specified, there must be 4 values: a, x0, sigma and b but not {0}.' \
+                                           ''.format(start_value_list)
+    elif find_start_value_by_fit:
+        # find out the starting value by fit a Gaussian without background
+        fit1_coeff, fit1_cov_matrix = curve_fit(gaussian, vec_x, vec_y)
+        start_x0, start_sigma, start_a = fit1_coeff
+        # get result
+        start_value_list = [start_x0, start_sigma, start_a, 0.0]
+
+        # print '[DB] Start value by fit: ', start_value_list
+
+    else:
+        # guess starting value via observation
+        start_value_list = find_gaussian_start_values_by_observation(vec_x, vec_y)
+
+        # print '[DB] Start value by observation: ', start_value_list
+    # END-IF-ELSE
+
+    """
+    [DB] Start values:  None
+    [DB] Find start value by fit:  False
+    [DB] Start value by observation:  [21, 19.0, 10.5, 100.0]: should be
+    """
+
+    # do second round fit
+    assert isinstance(start_value_list, list) and len(start_value_list) == 4, 'Starting value list must have 4 elements'
+    fit2_coeff, fit2_cov_matrix = curve_fit(gaussian_linear_background, vec_x, vec_y,  sigma=vec_e, p0=start_value_list)
+    # take sigma=vec_e,  out as it increases unstable
+
+    # calculate the model
+    x0, sigma, a, b = fit2_coeff
+    model_vec_y = gaussian_linear_background(vec_x, x0, sigma, a, b)
+
+    print 'Covariance matrix: ', fit2_cov_matrix
+
+    cost = calculate_penalty(model_vec_y, vec_y)
+
+    return cost, fit2_coeff, fit2_cov_matrix
+
+
+def fit_motor_intensity_model(motor_pos_dict, integrated_pt_dict):
+    """
+    construct a data as motor position vs counts, and do the fit with Gaussian + flat background
+    :param motor_pos_dict:
+    :param integrated_pt_dict:
+    :return: 3-tuple: dictionary for fitted parameter, dictionary for fitting error, covariance matrix
+    """
+    # check inputs
+    assert isinstance(motor_pos_dict, dict), 'Input motor position {0} must be a dictionary but not a {1}.' \
+                                             ''.format(motor_pos_dict, type(motor_pos_dict))
+    assert isinstance(integrated_pt_dict, dict), 'Input integrated Pt. intensity {0} must be a dictionary but not a ' \
+                                                 '{1}.'.format(integrated_pt_dict, type(integrated_pt_dict))
+
+    # construct the data
+    list_motor_pos = list()
+    list_intensity = list()
+
+    pt_list = motor_pos_dict.keys()
+    pt_list.sort()
+
+    for pt in pt_list:
+        if pt not in integrated_pt_dict:
+            raise RuntimeError('Pt. {0} does not exist in integrated intensity dictionary {1}'
+                               ''.format(pt, integrated_pt_dict))
+        list_motor_pos.append(motor_pos_dict[pt])
+        list_intensity.append(integrated_pt_dict[pt])
+    # END-FOR
+
+    vec_x = numpy.array(list_motor_pos)
+    vec_y = numpy.array(list_intensity)
+    # try to avoid negative Y value
+    vec_e = numpy.ndarray(shape=(len(vec_x),), dtype='float')
+    for index in range(len(vec_y)):
+        if vec_y[index] > 1.:
+            vec_e[index] = numpy.sqrt(vec_y[index])
+        else:
+            vec_e[index] = 1.
+    # END-FOR
+
+    # fit
+    gauss_error, gauss_parameters, cov_matrix = fit_gaussian_linear_background(vec_x, vec_y, vec_e)
+    # print '[DB] Overall Gaussian error = ', gauss_error
+    # print '[DB] Gaussian fitted parameters = ', gauss_parameters
+    # print '[DB] Gaussian covariance matrix = ', cov_matrix
+
+    # function parameters (in order): x0, sigma, a, b
+    # construct parameter dictionary and error dictionary
+    gauss_parameter_dict = dict()
+    gauss_error_dict = dict()
+
+    gauss_parameter_dict['x0'] = gauss_parameters[0]
+    gauss_parameter_dict['s'] = gauss_parameters[1]
+    gauss_parameter_dict['A'] = gauss_parameters[2]
+    gauss_parameter_dict['B'] = gauss_parameters[3]
+
+    if str(cov_matrix).count('inf') > 0:
+        # gaussian fit fails
+        cov_matrix = None
+    else:
+        # good
+        assert isinstance(cov_matrix, numpy.ndarray), 'Covarance matrix must be a numpy array'
+        gauss_error_dict['x02'] = cov_matrix[0, 0]
+        gauss_error_dict['s2'] = cov_matrix[1, 1]
+        gauss_error_dict['A2'] = cov_matrix[2, 2]
+        gauss_error_dict['B2'] = cov_matrix[3, 3]
+        gauss_error_dict['s_A'] = cov_matrix[1, 2]
+        gauss_error_dict['A_s'] = cov_matrix[2, 1]
+
+    return gauss_parameter_dict, gauss_error_dict, cov_matrix
+
+
+def get_motor_step_for_intensity(motor_pos_dict):
+    """
+    get the motor step for each measurement Pts.
+    if it is the first or last Pt. then use the difference between this Pt and its nearest Pts as motor step
+    else use 1/2 as the motor step to its previous one and 1/2 as the motor step to its following one.
+    :param motor_pos_dict:
+    :return: dictionary of motor steps for calculating intensity
+    """
+    # check
+    assert isinstance(motor_pos_dict, dict), 'Input motor position must in dictionary.'
+
+    # get Pt list
+    pt_list = motor_pos_dict.keys()
+    pt_list.sort()
+    if len(pt_list) < 2:
+        raise RuntimeError('Motor position dictionary has too few Pt (FYI: Motor positions: {0}'
+                           ''.format(motor_pos_dict))
+
+    # get step dictionary
+    motor_step_dict = dict()
+
+    for i_pt in range(len(pt_list)):
+        if i_pt == 0:
+            # first motor position
+            motor_step = motor_pos_dict[pt_list[1]] - motor_pos_dict[pt_list[0]]
+        elif i_pt == len(pt_list) - 1:
+            # last motor position
+            motor_step = motor_pos_dict[pt_list[-1]] - motor_pos_dict[pt_list[-2]]
+        else:
+            # regular
+            motor_step = 0.5 * (motor_pos_dict[pt_list[i_pt+1]] - motor_pos_dict[pt_list[i_pt-1]])
+        pt = pt_list[i_pt]
+        motor_step_dict[pt] = motor_step
+
+    return motor_step_dict
+
+
+def get_moving_motor_information(spice_table_name):
+    """
+
+    :param spice_table_name:
+    :return:
+    """
+    table = AnalysisDataService.retrieve(spice_table_name)
+
+    col_names = table.getColumnNames()
+    pt_index = col_names.index('Pt.')
+    omega_index = col_names.index('omega')
+    chi_index = col_names.index('chi')
+    phi_index = col_names.index('phi')
+
+    col_tup_dict = {'omega': omega_index, 'phi': phi_index, 'chi': chi_index}
+
+    std_list = list()
+    motor_vector_dict = dict()
+    for motor in col_tup_dict:
+        motor_index = col_tup_dict[motor]
+        motor_vector = numpy.array(table.column(motor_index))
+        motor_vector_dict[motor] = motor_vector
+        std_list.append((motor_vector.std(), motor))
+    std_list.sort()
+    moving_motor = std_list[-1][1]
+    pt_list = table.column(pt_index)
+
+    motor_pos_dict = dict()
+    for i_m in range(len(pt_list)):
+        motor_pos_dict[pt_list[i_m]] = motor_vector_dict[moving_motor][i_m]
+
+    return moving_motor, motor_pos_dict
+
+
+def gaussian_linear_background(x, x0, sigma, a, b):
+    """
+    Gaussian + linear background: y = a * exp( -(x-x0)**2/2*sigma**2 ) + b
+    :param x:
+    :param x0:
+    :param sigma:
+    :param a: maximum value
+    :param b: linear background
+    :return:
+    """
+    # gaussian + linear background
+
+    # print '[DB] Input x0 = ', x0, ', sigma = ', sigma, ', a = ', a, ', b = ', b
+    return a * numpy.exp(-(x - x0) ** 2 / (2. * sigma ** 2)) + b
+
+
+def gaussian(x, a, b, c):
+    # pure gaussian
+    return c*numpy.exp(-(x-a)**2/(2. * b * b))
+
+
+def gaussian_peak_intensity(parameter_dict, error_dict):
+    """
+    calculate peak intensity as a Gaussian
+    the equation to calculate Gaussian from -infinity to +infinity is
+    I = A\times s\times\sqrt{2\pi}
+    :param parameter_dict:
+    :param error_dict:
+    :return:
+    """
+    # check input
+    assert isinstance(parameter_dict, dict), 'Parameters {0} must be given as a dictionary but not a {1}.' \
+                                             ''.format(parameter_dict, type(parameter_dict))
+    assert isinstance(error_dict, dict), 'Errors {0} must be given as a dictionary but not a {1}.' \
+                                         ''.format(error_dict, type(error_dict))
+
+    # get the parameters from the dictionary
+    try:
+        gauss_a = parameter_dict['A']
+        gauss_sigma = parameter_dict['s']
+    except KeyError as key_err:
+        raise RuntimeError('Parameter dictionary must have "A", "s" (for sigma) but now only {0}. Error message: {1}'
+                           ''.format(parameter_dict.keys(), key_err))
+
+    # I = A\times s\times\sqrt{2 pi}
+    peak_intensity = gauss_a * gauss_sigma * numpy.sqrt(2. * numpy.pi)
+    # print '[DB] Gaussian Peak Intensity: A * S * sqrt(2 Pi) == ', gauss_a, gauss_sigma, ' --> peak intensity = ', peak_intensity
+
+    # calculate error
+    # \sigma_I^2 = 2\pi (A^2\cdot \sigma_s^2 + \sigma_A^2\cdot s^2 + 2\cdot A\cdot s\cdot \sigma_{As})
+    try:
+        error_a_sq = error_dict['A2']
+        error_s_sq = error_dict['s2']
+        error_a_s = error_dict['A_s']
+    except KeyError as key_err:
+        raise RuntimeError('Error dictionary must have "A2", "s2", "A_s" but not only found {0}. FYI: {1}'
+                           ''.format(error_dict.keys(), key_err))
+    intensity_error = numpy.sqrt(2/numpy.pi * (gauss_a**2 * error_s_sq + error_a_sq * gauss_sigma**2 +
+                                               2 * gauss_a * gauss_sigma * error_a_s))
+
+    return peak_intensity, intensity_error
+
+
+def calculate_peak_intensity_gauss(gauss_a, gauss_sigma, error_a_sq=None, error_sigma_sq=None,
+                                   error_a_sigma=None):
+    """
+    calculate the peak intensity, which is the area under the peak
+    if sigma == 1, then the integral is sqrt(pi);
+    then the value is sqrt(pi) * e^{-1/(2.*sigma**2)}
+    :param gauss_a:
+    :param gauss_sigma:
+    :param error_a_sq: error(a)**2
+    :param error_sigma_sq: error(sigma)**2
+    :param error_a_sigma: correlated error for a and sigma
+    :return:
+    """
+    integral = numpy.sqrt(2. * numpy.pi) * gauss_a * gauss_sigma
+
+    if error_a_sq is not None:
+        # calculate integral intensity error by propagation
+        # check
+        assert isinstance(error_a_sq, float), 'Error(a)**2 must be a float but not a {0}.'.format(type(error_a_sq))
+        assert isinstance(error_sigma_sq, float), 'Error(sigma)**2 must be a float but not a {0}.' \
+                                                  ''.format(type(error_sigma_sq))
+        assert isinstance(error_a_sigma, float), 'Error(a,sigma) must be a float but not a {0}.' \
+                                                 ''.format(type(error_a_sigma))
+        # calculate
+        error2 = gauss_a**2 * error_sigma_sq + error_a_sq * gauss_sigma**2 + 2. * gauss_a * gauss_sigma * error_a_sigma
+        error = numpy.sqrt(error2)
+    else:
+        error = numpy.sqrt(integral)
+
+    return integral, error
+
+
+def get_finer_grid(vec_x, factor):
+    """
+    insert values to a vector (grid) to make it finer
+    :param vec_x:
+    :param factor:
+    :return:
+    """
+    assert isinstance(factor, int), 'Insertion factor {0} must be an integer but not a {1}'.format(factor, type(factor))
+
+    orig_size = len(vec_x)
+    new_list = list()
+    for i in range(orig_size-1):
+        d_x = vec_x[i+1] - vec_x[i]
+        for j in range(factor):
+            temp_x = vec_x[i] + d_x * float(j) / float(factor)
+            new_list.append(temp_x)
+        # END-FOR
+    # END-FOR
+
+    # don't forget the last
+    new_list.append(vec_x[-1])
+
+    new_vector = numpy.array(new_list)
+
+    return new_vector
+
+
+def integrate_single_scan_peak(merged_scan_workspace_name, integrated_peak_ws_name,
+                               peak_radius, peak_centre,
+                               merge_peaks=True,
+                               normalization='', mask_ws_name=None,
+                               scale_factor=1):
+
+    """ Integrate the peak in a single scan with merged Pt.
+    :param merged_scan_workspace_name: MDEventWorkspace with merged Pts.
+    :param integrated_peak_ws_name: output PeaksWorkspace for integrated peak
+    :param peak_radius:
+    :param peak_centre:  a float radius or None for not using
+    :param merge_peaks: If selected, merged all the Pts can return 1 integrated peak's value;
+                        otherwise, integrate peak for each Pt.
+    :param normalization: normalization set up (by time or ...)
+    :param mask_ws_name: mask workspace name or None
+    :param scale_factor: integrated peaks' scaling factor
+    :return: dictionary of Pts.
+    """
+    # check
+    # assert isinstance(exp, int)
+    # assert isinstance(scan, int)
+    assert isinstance(peak_radius, float) or peak_radius is None, 'Peak radius {0} must be of type float but not ' \
+                                                                  '{1}.'.format(peak_radius, type(peak_radius))
+    assert len(peak_centre) == 3, 'Peak center {0} of type {1} must have 3 elements but not {2}.' \
+                                  ''.format(peak_centre, type(peak_centre), len(peak_centre))
+    assert isinstance(merge_peaks, bool), 'Flag to merge peak must be a boolean but not {0}.'.format(type(merge_peaks))
+
+    try:
+        peak_centre_str = '%f, %f, %f' % (peak_centre[0], peak_centre[1],
+                                          peak_centre[2])
+    except IndexError:
+        raise RuntimeError('Peak center {0} must have 3 elements.'.format(peak_centre))
+    except ValueError:
+        raise RuntimeError('Peak center {0} must have floats.'.format(peak_centre))
+
+    # normalization
+    norm_by_mon = False
+    norm_by_time = False
+    if normalization == 'time':
+        norm_by_time = True
+    elif normalization == 'monitor':
+        norm_by_mon = True
+
+    # integrate peak of a scan
+    mantidsimple.IntegratePeaksCWSD(InputWorkspace=merged_scan_workspace_name,
+                                    OutputWorkspace=integrated_peak_ws_name,
+                                    PeakRadius=peak_radius,
+                                    PeakCentre=peak_centre_str,
+                                    MergePeaks=merge_peaks,
+                                    NormalizeByMonitor=norm_by_mon,
+                                    NormalizeByTime=norm_by_time,
+                                    MaskWorkspace=mask_ws_name,
+                                    ScaleFactor=scale_factor)
+
+    # process the output workspace
+    pt_dict = dict()
+    out_peak_ws = AnalysisDataService.retrieve(integrated_peak_ws_name)
+    num_peaks = out_peak_ws.rowCount()
+
+    for i_peak in xrange(num_peaks):
+        peak_i = out_peak_ws.getPeak(i_peak)
+        run_number_i = peak_i.getRunNumber() % 1000
+        intensity_i = peak_i.getIntensity()
+        pt_dict[run_number_i] = intensity_i
+    # END-FOR
+
+    # # store the data into peak info
+    # if (exp, scan) not in self._myPeakInfoDict:
+    #     raise RuntimeError('Exp %d Scan %d is not recorded in PeakInfo-Dict' % (exp, scan))
+    # self._myPeakInfoDict[(exp, scan)].set_pt_intensity(pt_dict)
+
+    return True, pt_dict
+
+
+def integrate_peak_full_version(scan_md_ws_name, spice_table_name, output_peak_ws_name,
+                                peak_center, mask_workspace_name, norm_type,
+                                intensity_scale_factor, background_pt_tuple):
+    """
+    Integrate peak with the full version including
+    1. simple summation
+    2. simple summation with gaussian fit
+    3. integrate with fitted gaussian
+    :return: peak integration result in dictionary
+    """
+    def create_peak_integration_dict():
+        """
+        create a standard dictionary for recording peak integration result
+        keys are
+         - simple intensity
+         - simple error
+         - simple background
+         - intensity 2
+         - error 2
+         - gauss intensity
+         - gauss error
+         - gauss background
+         - gauss parameters
+         - motor positions: numpy array of motor positions
+         - pt intensities: numpy array of integrated intensities per Pt.
+        :return:
+        """
+        info_dict = {'simple intensity': 0.,
+                     'simple error': 0.,
+                     'simple background': 0.,
+                     'intensity 2': 0.,
+                     'error 2': 0.,
+                     'pt_range': '',
+                     'gauss intensity': 0.,
+                     'gauss error': 0.,
+                     'gauss background': 0.,
+                     'gauss parameters': None,
+                     'gauss errors': None,
+                     'motor positions': None,
+                     'pt intensities': None,
+                     'covariance matrix': None
+                     }
+
+        return info_dict
+    # END-DEF: create_peak_integration_dict()
+
+    # integrate the peak in MD workspace
+    try:
+        status, ret_obj = integrate_single_scan_peak(merged_scan_workspace_name=scan_md_ws_name,
+                                                     integrated_peak_ws_name=output_peak_ws_name,
+                                                     peak_radius=1.0,
+                                                     peak_centre=peak_center,
+                                                     merge_peaks=False,
+                                                     mask_ws_name=mask_workspace_name,
+                                                     normalization=norm_type,
+                                                     scale_factor=intensity_scale_factor)
+    except RuntimeError as run_err:
+        raise RuntimeError('Failed to integrate peak at {0} due to {1}'.format(scan_md_ws_name, run_err))
+    except Exception as run_err:
+        raise RuntimeError('Failed (2) to integrate peak at {0} due to {1}'.format(scan_md_ws_name, run_err))
+
+    # result due to error
+    if status is False:
+        error_message = ret_obj
+        raise RuntimeError('Unable to integrate peak of workspace {0} due to {1}.'
+                           ''.format(scan_md_ws_name, error_message))
+    else:
+        # process result
+        integrated_pt_dict = ret_obj
+        assert isinstance(integrated_pt_dict, dict), 'Returned masked Pt dict must be a dictionary'
+
+    # create output dictionary
+    peak_int_dict = create_peak_integration_dict()
+
+    # get moving motor information. candidates are 2theta, phi and chi
+    motor, motor_pos_dict = get_moving_motor_information(spice_table_name)
+
+    # check motor position dictionary and integrated per Pt. peak intensity
+    motor_pos_vec, pt_intensity_vec = convert_motor_pos_intensity(integrated_pt_dict, motor_pos_dict)
+    peak_int_dict['motor positions'] = motor_pos_vec
+    peak_int_dict['pt intensities'] = pt_intensity_vec
+    peak_int_dict['mask'] = mask_workspace_name
+
+    # get motor step per pt.
+    try:
+        motor_step_dict = get_motor_step_for_intensity(motor_pos_dict)
+    except RuntimeError as run_err:
+        raise RuntimeError('Unable to integrate workspace {0} due to {1}.'.format(scan_md_ws_name, run_err))
+
+    # calculate the intensity with background removed and correct intensity by background value
+    averaged_background = estimate_background(integrated_pt_dict, background_pt_tuple)
+    simple_intensity, simple_intensity_error, pt_range = simple_integrate_peak(integrated_pt_dict, averaged_background,
+                                                                               motor_step_dict)
+    peak_int_dict['simple background'] = averaged_background
+    peak_int_dict['simple intensity'] = simple_intensity
+    peak_int_dict['simple error'] = simple_intensity_error
+    peak_int_dict['simple background'] = averaged_background
+
+    # fit gaussian + flat background
+    parameters, errors, covariance_matrix = fit_motor_intensity_model(motor_pos_dict, integrated_pt_dict)
+    peak_int_dict['gauss parameters'] = parameters
+    peak_int_dict['gauss errors'] = errors
+    peak_int_dict['covariance matrix'] = covariance_matrix
+
+    if covariance_matrix is None or parameters['B'] < 0.:
+        # gaussian fit fails or output result is not correct
+        peak_int_dict['intensity 2'] = None
+        peak_int_dict['error 2'] = None
+
+        peak_int_dict['gauss intensity'] = None
+        peak_int_dict['gauss error'] = None
+
+    else:
+        # calculate intensity with method 2
+        motor_pos_center = parameters['x0']
+        motor_pos_sigma = parameters['s']
+        intensity_m2, error_m2, pt_range = simple_integrate_peak(integrated_pt_dict, parameters['B'],
+                                                                 motor_step_dict,
+                                                                 peak_center=motor_pos_center,
+                                                                 peak_sigma=motor_pos_sigma,
+                                                                 motor_pos_dict=motor_pos_dict,
+                                                                 sigma_range=2.)
+
+        peak_int_dict['intensity 2'] = intensity_m2
+        peak_int_dict['error 2'] = error_m2
+        peak_int_dict['pt_range'] = pt_range
+
+        # calculate gaussian (method 3)
+        intensity_gauss, intensity_gauss_error = gaussian_peak_intensity(parameters, errors)
+        peak_int_dict['gauss intensity'] = intensity_gauss
+        peak_int_dict['gauss error'] = intensity_gauss_error
+    # END-IF-ELSE
+
+    return peak_int_dict
+
+
+def simple_integrate_peak(pt_intensity_dict, bg_value, motor_step_dict, peak_center=None,
+                          peak_sigma=None, motor_pos_dict=None, sigma_range=2.):
+    """
+    A simple approach to integrate peak in a cuboid with background removed.
+    :param pt_intensity_dict:
+    :param bg_value:
+    :param motor_step_dict:
+    :param peak_center:
+    :param peak_sigma:
+    :param motor_pos_dict:
+    :param sigma_range:
+    :return:
+    """
+    # check
+    assert isinstance(pt_intensity_dict, dict), 'Pt. intensities {0} should be a dictionary but not a {1}.' \
+                                                ''.format(pt_intensity_dict, type(pt_intensity_dict))
+    assert isinstance(bg_value, float) and bg_value >= 0., 'Background value {0} must be a non-negative float.' \
+                                                           ''.format(bg_value)
+    assert isinstance(motor_step_dict, dict), 'Motor steps {0} must be given as a dictionary of Pt but not a {1}.' \
+                                              ''.format(motor_step_dict, type(motor_step_dict))
+
+    if peak_center is not None:
+        assert peak_sigma is not None and motor_pos_dict is not None and sigma_range is not None,\
+            'Must be specified'
+
+    pt_list = pt_intensity_dict.keys()
+    pt_list.sort()
+
+    # loop over Pt. to sum for peak's intensity
+    sum_intensity = 0.
+    error_2 = 0.
+    used_pt_list = list()
+
+    # raw intensity
+    sum_raw_int = 0.
+
+    motor_step = 0.
+    for pt in pt_list:
+        # check the motor position if required
+        if peak_center is not None:
+            motor_pos = motor_pos_dict[pt]
+            if abs(motor_pos - peak_center) > sigma_range * peak_sigma:
+                # peak is out of range
+                continue
+        # END-IF
+
+        intensity = pt_intensity_dict[pt]
+        motor_step_i = motor_step_dict[pt]
+        sum_intensity += (intensity - bg_value) * motor_step_i
+        motor_step = motor_step_i
+
+        if 0:
+            pass
+            # error_2 += numpy.sqrt(intensity) * motor_step_i
+        else:
+            sum_raw_int += intensity
+
+        used_pt_list.append(pt)
+
+        # print '[DB...BAT] Motor step size {0} = {1}'.format(pt, motor_step_i)
+    # END-FOR
+
+    # error = sqrt(sum I_i) * delta
+    error_2 = numpy.sqrt(sum_raw_int) * motor_step
+
+    # convert the Pt to list
+    if len(used_pt_list) > 0:
+        used_pt_list.sort()
+        pt_list_range = '{0} - {1}'.format(used_pt_list[0], used_pt_list[-1])
+    else:
+        pt_list_range = 'N/A'
+
+    return sum_intensity, error_2, pt_list_range
diff --git a/scripts/HFIR_4Circle_Reduction/peakprocesshelper.py b/scripts/HFIR_4Circle_Reduction/peakprocesshelper.py
index c35242b3627d25d1b0bb1eacd947b2ef477dda8a..a418d0158086db36f200cd9e31a9054a0c1c8743 100644
--- a/scripts/HFIR_4Circle_Reduction/peakprocesshelper.py
+++ b/scripts/HFIR_4Circle_Reduction/peakprocesshelper.py
@@ -1,5 +1,6 @@
 #pylint: disable=W0403,R0902
-import numpy
+import time
+import random
 from fourcircle_utility import *
 from mantid.api import AnalysisDataService
 from mantid.kernel import V3D
@@ -26,7 +27,6 @@ class PeakProcessRecord(object):
                                                             'exist.' % peak_ws_name
 
         # set
-        self._isCurrentUserHKL = True
         self._myExpNumber = exp_number
         self._myScanNumber = scan_number
         self._myPeakWorkspaceName = peak_ws_name
@@ -36,20 +36,38 @@ class PeakProcessRecord(object):
 
         # Define class variable
         # HKL list
-        self._userHKL = None    # user specified HKL
+        self._calculatedHKL = None    # user specified HKL
         self._spiceHKL = None                        # spice HKL
         self._prevHKL = numpy.array([0., 0., 0.])    # previous HKL
 
+        # magnetic peak set up
+        self._kShiftVector = [0, 0, 0]
+        self._absorptionCorrection = 1.
+
+        # peak center and PeaksWorkspace
         self._avgPeakCenter = None
         self._myPeakWSKey = (None, None, None)
         self._myPeakIndex = None
-        self._ptIntensityDict = None
 
         self._myLastPeakUB = None
 
-        self._myIntensity = 0.
-        self._mySigma = 0.
+        self._myIntensity = None
+        self._gaussIntensity = 0.
+        self._gaussStdDev = 0.
+        self._lorenzFactor = None
+
+        # peak integration result
+        self._integrationDict = None
+        self._ptIntensityDict = None
+
+        # some motor/goniometer information for further correction
+        self._movingMotorTuple = None
+
+        # Figure print
+        self._fingerPrint = '{0:.7f}.{1}'.format(time.time(), random.randint(0, 10000000))
 
+        # print '[DB...BAT] Create PeakProcessRecord for Exp {0} Scan {1} ({2} | {3}).' \
+        #       ''.format(self._myExpNumber, self._myScanNumber, self._fingerPrint, hex(id(self)))
         return
 
     def calculate_peak_center(self, allow_bad_monitor=True):
@@ -115,6 +133,112 @@ class PeakProcessRecord(object):
 
         return
 
+    def generate_integration_report(self):
+        """
+        generate a dictionary for this PeakInfo
+        :return:
+        """
+        # print '[DB...BAT] PeakInfo (Scan: {0}, ID: {1}) generate report.  Spice HKL: {2}' \
+        #       ''.format(self._myScanNumber, hex(id(self)), self._spiceHKL)
+
+        report = dict()
+
+        if self._spiceHKL is not None:
+            report['SPICE HKL'] = str_format(self._spiceHKL)
+        else:
+            report['SPICE HKL'] = ''
+        if self._calculatedHKL is not None:
+            report['Mantid HKL'] = str_format(self._calculatedHKL)
+        else:
+            report['Mantid HKL'] = None
+        if self._integrationDict:
+            report['Mask'] = self._integrationDict['mask']
+            report['Raw Intensity'] = self._integrationDict['simple intensity']
+            report['Raw Intensity Error'] = self._integrationDict['simple error']
+            report['Intensity 2'] = self._integrationDict['intensity 2']
+            report['Intensity 2 Error'] = self._integrationDict['error 2']
+            report['Gauss Intensity'] = self._integrationDict['gauss intensity']
+            report['Gauss Error'] = self._integrationDict['gauss error']
+            report['Estimated Background'] = self._integrationDict['simple background']
+            if 'gauss parameters' in self._integrationDict:
+                report['Fitted Background'] = self._integrationDict['gauss parameters']['B']
+                report['Fitted A'] = self._integrationDict['gauss parameters']['A']
+                report['Fitted Sigma'] = self._integrationDict['gauss parameters']['s']
+            else:
+                report['Fitted Background'] = ''
+                report['Fitted A'] = ''
+                report['Fitted Sigma'] = ''
+        else:
+            report['Raw Intensity'] = ''
+            report['Raw Intensity Error'] = ''
+            report['Intensity 2'] = ''
+            report['Intensity 2 Error'] = ''
+            report['Gauss Intensity'] = ''
+            report['Gauss Error'] = ''
+            report['Lorentz'] = ''
+            report['Estimated Background'] = ''
+            report['Fitted Background'] = ''
+            report['Fitted A'] = ''
+            report['Fitted Sigma'] = ''
+            report['Mask'] = ''
+
+        report['Lorentz'] = self._lorenzFactor
+        if self._movingMotorTuple is None:
+            report['Motor'] = ''
+            report['Motor Step'] = None
+        else:
+            report['Motor'] = self._movingMotorTuple[0]
+            report['Motor Step'] = self._movingMotorTuple[1]
+        report['K-vector'] = self._kShiftVector
+        report['Absorption Correction'] = self._absorptionCorrection
+
+        return report
+
+    def get_intensity(self, algorithm_type, lorentz_corrected):
+        """
+        get the integrated intensity with specified integration algorithm and whether
+        the result should be corrected by Lorentz correction factor
+        :param algorithm_type:
+        :param lorentz_corrected:
+        :return:
+        """
+        # check
+        if self._integrationDict is None and self._myIntensity is None:
+            raise RuntimeError('PeakInfo of Exp {0} Scan {1} ({2} | {3}) has not integrated setup.'
+                               ''.format(self._myExpNumber, self._myScanNumber, self._fingerPrint, hex(id(self))))
+        elif self._myIntensity is not None:
+            # return ZERO intensity due to previously found error
+            return self._myIntensity, 0.
+
+        try:
+            if algorithm_type == 0 or algorithm_type.startswith('simple'):
+                # simple
+                intensity = self._integrationDict['simple intensity']
+                std_dev = self._integrationDict['simple error']
+            elif algorithm_type == 1 or algorithm_type.count('mixed') > 0:
+                # intensity 2: mixed simple and gaussian
+                intensity = self._integrationDict['intensity 2']
+                std_dev = self._integrationDict['error 2']
+            elif algorithm_type == 2 or algorithm_type.count('gauss') > 0:
+                # gaussian
+                intensity = self._integrationDict['gauss intensity']
+                std_dev = self._integrationDict['gauss error']
+            else:
+                raise RuntimeError('Type {0} not supported yet.')
+        except KeyError as key_err:
+            err_msg = 'Some key(s) does not exist in dictionary with keys {0}. FYI: {1}' \
+                      ''.format(self._integrationDict.keys(), key_err)
+            raise RuntimeError(err_msg)
+
+        if intensity is None:
+            intensity = 0.
+            std_dev = 0.
+        elif lorentz_corrected:
+            intensity *= self._lorenzFactor
+            std_dev *= self._lorenzFactor
+
+        return intensity, std_dev
+
     def get_peak_centre(self):
         """ get weighted peak centre
         :return: Qx, Qy, Qz (3-double-tuple)
@@ -148,16 +272,37 @@ class PeakProcessRecord(object):
         """
         if user_hkl:
             # return user-specified HKL
-            assert self._userHKL is not None, 'User HKL is None (not set up yet)'
-            ret_hkl = self._userHKL
+            assert self._calculatedHKL is not None, 'User HKL is None (not set up yet)'
+            ret_hkl = self._calculatedHKL
         else:
             # get HKL from SPICE file
             # if self._spiceHKL is None:
             self.retrieve_hkl_from_spice_table()
             ret_hkl = self._spiceHKL
 
+            # print '[DB...BAT] PeakInfo (Scan: {0}, ID: {1}) SPICE HKL: {2}' \
+            #       ''.format(self._myScanNumber, hex(id(self)), self._spiceHKL)
+
         return ret_hkl
 
+    def get_experiment_info(self):
+        """
+
+        :return: 2-tuple of integer as experiment number
+        """
+        return self._myExpNumber, self._myScanNumber
+
+    def get_sample_frame_q(self, peak_index):
+        """
+        Get Q in sample frame
+        :return: 3-tuple of floats as Qx, Qy, Qz
+        """
+        peak_ws = AnalysisDataService.retrieve(self._myPeakWorkspaceName)
+        peak = peak_ws.getPeak(peak_index)
+        q_sample = peak.getQSampleFrame()
+
+        return q_sample.getX(), q_sample.getY(), q_sample.getZ()
+
     def get_weighted_peak_centres(self):
         """ Get the peak centers found in peak workspace.
         Guarantees: the peak centers and its weight (detector counts) are exported
@@ -184,6 +329,61 @@ class PeakProcessRecord(object):
 
         return peak_center_list, peak_intensity_list
 
+    def set_k_vector(self, k_vector):
+        """
+
+        :param k_vector:
+        :return:
+        """
+        # check input
+        assert not isinstance(k_vector, str) and len(k_vector) == 3, 'K-vector {0} must have 3 items.'.format(k_vector)
+
+        self._kShiftVector = k_vector[:]
+
+        return
+
+    @property
+    def lorentz_correction_factor(self):
+        """
+
+        :return:
+        """
+        if self._lorenzFactor is None:
+            raise RuntimeError('Lorentz factor has not been calculated for Exp {0} Scan {1} ({2} | {3}).'
+                               ''.format(self._myExpNumber, self._myScanNumber, self._fingerPrint, hex(id(self))))
+        return self._lorenzFactor
+
+    @lorentz_correction_factor.setter
+    def lorentz_correction_factor(self, factor):
+        """
+        get lorenz factor
+        :param factor:
+        :return:
+        """
+        assert isinstance(factor, float), 'Lorentz correction factor'
+        self._lorenzFactor = factor
+
+        # print '[DB...BAT] Exp {0} Scan {1}  ({2} | {3}) has Lorentz factor set up.' \
+        #       ''.format(self._myExpNumber, self._myScanNumber, self._fingerPrint, hex(id(self)))
+
+        return
+
+    @property
+    def md_workspace(self):
+        """
+        give out MDEventWorkspace name for merged scan
+        :return:
+        """
+        return self._myDataMDWorkspaceName
+
+    @property
+    def peaks_workspace(self):
+        """
+        give out PeaksWorkspace
+        :return:
+        """
+        return self._myPeakWorkspaceName
+
     def retrieve_hkl_from_spice_table(self):
         """ Get averaged HKL from SPICE table
         HKL will be averaged from SPICE table by assuming the value in SPICE might be right
@@ -191,7 +391,7 @@ class PeakProcessRecord(object):
         """
         # get SPICE table
         spice_table_name = get_spice_table_name(self._myExpNumber, self._myScanNumber)
-        assert AnalysisDataService.doesExist(spice_table_name), 'Spice table for exp %d scan %d cannot be found.' \
+        assert AnalysisDataService.doesExist(spice_table_name), 'Spice table for Exp %d Scan %d cannot be found.' \
                                                                 '' % (self._myExpNumber, self._myScanNumber)
 
         spice_table_ws = AnalysisDataService.retrieve(spice_table_name)
@@ -216,6 +416,19 @@ class PeakProcessRecord(object):
 
         return
 
+    def set_absorption_factor(self, abs_factor):
+        """
+        set absorption correction factor
+        :return:
+        """
+        # check
+        assert isinstance(abs_factor, float) or isinstance(abs_factor, int),\
+            'Absorption correction {0} must be an integer but not {1}.'.format(abs_factor, type(abs_factor))
+
+        self._absorptionCorrection = abs_factor
+
+        return
+
     def set_data_ws_name(self, md_ws_name):
         """ Set the name of MDEventWorkspace with merged Pts.
         :param md_ws_name:
@@ -238,9 +451,9 @@ class PeakProcessRecord(object):
         assert hkl.shape == (3,), 'HKL must be a 3-element 1-D array but not %s.' % str(hkl.shape)
 
         # store the HKL
-        if self._userHKL is not None:
-            self._prevHKL = self._userHKL[:]
-        self._userHKL = hkl
+        if self._calculatedHKL is not None:
+            self._prevHKL = self._calculatedHKL[:]
+        self._calculatedHKL = hkl
 
         return
 
@@ -262,83 +475,75 @@ class PeakProcessRecord(object):
             mi_l = float(mi_l)
         # END-IF
 
-        if self._userHKL is None:
+        if self._calculatedHKL is None:
             # init HKL
-            self._userHKL = numpy.ndarray(shape=(3,), dtype='float')
+            self._calculatedHKL = numpy.ndarray(shape=(3,), dtype='float')
         else:
             # save previous HKL
-            self._prevHKL = self._userHKL[:]
+            self._prevHKL = self._calculatedHKL[:]
 
         # set current
-        self._userHKL[0] = mi_h
-        self._userHKL[1] = mi_k
-        self._userHKL[2] = mi_l
+        self._calculatedHKL[0] = mi_h
+        self._calculatedHKL[1] = mi_k
+        self._calculatedHKL[2] = mi_l
 
         return
 
-    def get_intensity(self):
-        """ Get current peak intensity
-        :return:
+    def set_motor(self, motor_name, motor_step, motor_std_dev):
         """
-        return self._myIntensity
-
-    def set_intensity(self, peak_intensity):
-        """ Set peak intensity
-        :param peak_intensity:
+        set motor step information
+        :param motor_name:
+        :param motor_step:
+        :param motor_std_dev:
         :return:
         """
-        assert isinstance(peak_intensity, float), 'Input peak intensity %s is not a float.' % str(peak_intensity)
-        assert peak_intensity >= -0., 'Input peak intensity %f is negative.' % peak_intensity
+        assert isinstance(motor_name, str), 'Motor name {0} must be a string but not {1}.' \
+                                            ''.format(motor_name, type(motor_name))
+        assert isinstance(motor_step, float), 'Motor float {0} must be a string but not {1}.' \
+                                              ''.format(motor_step, type(motor_step))
+        assert isinstance(motor_std_dev, float), 'Standard deviation type must be float'
 
-        self._myIntensity = peak_intensity
+        self._movingMotorTuple = (motor_name, motor_step, motor_std_dev)
 
         return
 
-    def set_pt_intensity(self, pt_intensity_dict):
+    def set_integration(self, peak_integration_dict):
         """
-        Set Pt. intensity
-        :param pt_intensity_dict:
+        set the integration result by information stored in a dictionary
+        :param peak_integration_dict:
         :return:
         """
-        assert isinstance(pt_intensity_dict, dict)
+        assert isinstance(peak_integration_dict, dict),\
+            'Integrated peak information {0} must be given by a dictionary but not a {1}.' \
+            ''.format(peak_integration_dict, type(peak_integration_dict))
 
-        self._ptIntensityDict = pt_intensity_dict
+        # print '[DB...BAT] Exp {0} Scan {1}  ({2} | {3}) has integrated dictionary set up.' \
+        #       ''.format(self._myExpNumber, self._myScanNumber, self._fingerPrint, hex(id(self)))
+
+        self._integrationDict = peak_integration_dict
 
         return
 
-    def get_sigma(self):
-        """ Get peak intensity's sigma
-        :return:
+    def set_intensity_to_zero(self):
         """
-        return self._mySigma
-
-    def set_sigma(self, sigma):
-        """ set peak intensity's sigma
+        if peak integration is wrong, then set the intensity to zero
         :return:
         """
-        assert isinstance(sigma, float) and sigma > -0.
-
-        self._mySigma = sigma
+        self._myIntensity = -0.
 
         return
 
-    def get_experiment_info(self):
+    def set_pt_intensity(self, pt_intensity_dict):
         """
-
-        :return: 2-tuple of integer as experiment number
+        Set Pt. intensity
+        :param pt_intensity_dict:
+        :return:
         """
-        return self._myExpNumber, self._myScanNumber
+        assert isinstance(pt_intensity_dict, dict)
 
-    def get_sample_frame_q(self, peak_index):
-        """
-        Get Q in sample frame
-        :return: 3-tuple of floats as Qx, Qy, Qz
-        """
-        peak_ws = AnalysisDataService.retrieve(self._myPeakWorkspaceName)
-        peak = peak_ws.getPeak(peak_index)
-        q_sample = peak.getQSampleFrame()
+        self._ptIntensityDict = pt_intensity_dict
 
-        return q_sample.getX(), q_sample.getY(), q_sample.getZ()
+        return
 
 
 def build_pt_spice_table_row_map(spice_table_ws):
@@ -356,3 +561,22 @@ def build_pt_spice_table_row_map(spice_table_ws):
         pt_spice_row_dict[pt_number] = i_row
 
     return pt_spice_row_dict
+
+
+def str_format(float_items):
+    """
+
+    :param float_items:
+    :return:
+    """
+    format_str = ''
+    for index, value in enumerate(float_items):
+        if index > 0:
+            format_str += ', '
+        if isinstance(value, float):
+            format_str += '{0:.4f}'.format(value)
+        else:
+            format_str += '{0}'.format(value)
+    # END-FOR
+
+    return format_str
diff --git a/scripts/HFIR_4Circle_Reduction/project_manager.py b/scripts/HFIR_4Circle_Reduction/project_manager.py
index 0d2463d4ed5d5a709458d7e20b4d1d7e8f742f72..656a158e8e6d92b81944520aa3553752e3a51ed8 100644
--- a/scripts/HFIR_4Circle_Reduction/project_manager.py
+++ b/scripts/HFIR_4Circle_Reduction/project_manager.py
@@ -82,7 +82,14 @@ class ProjectManager(object):
         for ws_name in self._wsList:
             md_file_name = os.path.join(self._wsDir, ws_name + '.nxs')
             if overwrite or not os.path.exists(md_file_name):
-                mantidsimple.SaveMD(InputWorkspace=ws_name, Filename=md_file_name)
+                try:
+                    mantidsimple.SaveMD(InputWorkspace=ws_name, Filename=md_file_name)
+                except RuntimeError as run_err:
+                    print '[ERROR] Unable to save {0} due to RuntimeError {1}.'.format(ws_name, run_err)
+                except Exception as arb_err:
+                    print '[ERROR] Unable to save {0} due to arbitrary exception {1}.'.format(ws_name, arb_err)
+            # END-IF
+        # END-FOR (ws_name)
 
         with open(self._projectPath, 'w') as pickle_file:
             pickle.dump(self._variableDict, pickle_file, pickle.HIGHEST_PROTOCOL)
@@ -103,7 +110,14 @@ class ProjectManager(object):
         # load data
         for ws_name in self._wsList:
             md_file_path = os.path.join(self._wsDir, ws_name + '.nxs')
-            mantidsimple.LoadMD(Filename=md_file_path, OutputWorkspace=ws_name)
+            try:
+                mantidsimple.LoadMD(Filename=md_file_path, OutputWorkspace=ws_name)
+            except RuntimeError as run_err:
+                print '[DB] Unable to load file {0} due to RuntimeError {1}.'.format(md_file_path, run_err)
+            except OSError as run_err:
+                print '[DB] Unable to load file {0} due to OSError {1}.'.format(md_file_path, run_err)
+            except IOError as run_err:
+                print '[DB] Unable to load file {0} due to IOError {1}.'.format(md_file_path, run_err)
         # END-FOR
 
         return
diff --git a/scripts/HFIR_4Circle_Reduction/reduce4circleControl.py b/scripts/HFIR_4Circle_Reduction/reduce4circleControl.py
index 998e74ac3ec2a3f19ccd7270b96007e807dccfe2..0df9b5082a500e2c32d2e7dca38ac2cdba5d6029 100644
--- a/scripts/HFIR_4Circle_Reduction/reduce4circleControl.py
+++ b/scripts/HFIR_4Circle_Reduction/reduce4circleControl.py
@@ -16,6 +16,8 @@ from fourcircle_utility import *
 from peakprocesshelper import PeakProcessRecord
 import fputility
 import project_manager
+import peak_integration_utility
+import absorption
 
 import mantid
 import mantid.simpleapi as mantidsimple
@@ -25,9 +27,8 @@ from mantid.kernel import V3D
 
 DebugMode = True
 
-# TODO - changed without configuration
-DET_X_SIZE = 512
-DET_Y_SIZE = 512
+# DET_X_SIZE = 512
+# DET_Y_SIZE = 512
 
 MAX_SCAN_NUMBER = 100000
 
@@ -107,6 +108,12 @@ class CWSCDReductionControl(object):
         self._detSampleDistanceDict = dict()
         self._detCenterDict = dict()
 
+        # detector geometry: initialized to unphysical value
+        self._detectorSize = [-1, -1]
+
+        # reference workspace for LoadMask
+        self._refWorkspaceForMask = None
+
         # register startup
         mantid.UsageService.registerFeatureUsage("Interface","4-Circle Reduction",False)
 
@@ -142,9 +149,9 @@ class CWSCDReductionControl(object):
         :return: k_index of the (k_x, k_y, k_z)
         """
         # check
-        assert isinstance(k_x, float)
-        assert isinstance(k_y, float)
-        assert isinstance(k_z, float)
+        assert isinstance(k_x, float), 'Kx is wrong'
+        assert isinstance(k_y, float), 'Ky is wrong'
+        assert isinstance(k_z, float), 'Kz is wrong'
 
         k_shift_vector = (k_x, k_y, k_z)
         self._kShiftDict[self._kVectorIndex] = [k_shift_vector, []]
@@ -189,16 +196,6 @@ class CWSCDReductionControl(object):
 
         return
 
-    @staticmethod
-    def apply_lorentz_correction(peak_intensity, q, wavelength, step_omega):
-        """ Apply lorentz correction to intensity """
-        # calculate theta
-        sin_theta = q * wavelength/(4*math.pi)
-        theta = math.asin(sin_theta)
-        corrected_intensity = peak_intensity * math.sin(2*theta) * step_omega
-
-        return corrected_intensity
-
     def find_peak(self, exp_number, scan_number, pt_number_list=None):
         """ Find 1 peak in sample Q space for UB matrix
         :param exp_number:
@@ -230,7 +227,8 @@ class CWSCDReductionControl(object):
                                  PeakDistanceThreshold=5.,
                                  DensityThresholdFactor=0.1,
                                  OutputWorkspace=peak_ws_name)
-        assert AnalysisDataService.doesExist(peak_ws_name)
+        assert AnalysisDataService.doesExist(peak_ws_name), 'PeaksWorkspace {0} does not exist in ADS.' \
+                                                            ''.format(peak_ws_name)
 
         # add peak to UB matrix workspace to manager
         self._set_peak_info(exp_number, scan_number, peak_ws_name, merged_ws_name)
@@ -242,6 +240,29 @@ class CWSCDReductionControl(object):
 
         return True, peak_center
 
+    @staticmethod
+    def find_detector_size(exp_directory, exp_number):
+        """
+        find detector size from experiment directory
+        :param exp_directory:
+        :param exp_number
+        :return:
+        """
+        # guess the file name
+        first_xm_file = os.path.join(exp_directory, 'HB3A_Exp{0}_Scan0001_00001.xml'.format(exp_number))
+        if os.path.exists(first_xm_file):
+            file_size = os.path.getsize(first_xm_file)
+            if file_size < 136132 * 2:
+                det_size = 256, 256
+            elif file_size < 529887 * 2:
+                det_size = 512, 512
+            else:
+                raise RuntimeError('File size is over {0}.  It is not supported.')
+
+            return True, det_size
+
+        return False, 'Unable to find first Pt file {0}'.format(first_xm_file)
+
     def calculate_ub_matrix(self, peak_info_list, a, b, c, alpha, beta, gamma):
         """
         Calculate UB matrix
@@ -420,20 +441,28 @@ class CWSCDReductionControl(object):
 
         return True, error_message
 
-    def check_generate_mask_workspace(self, exp_number, scan_number, mask_tag):
+    def check_generate_mask_workspace(self, exp_number, scan_number, mask_tag, check_throw):
         """
-        Check whether a workspace does exist.
+        Check whether a MaskWorkspace exists according to the tag
         If it does not, then generate one according to the tag
+
+        A MaskWorkspace's name is exactly the same as the tag of the mask specified by user in
+        reduction GUI.
+
         :param exp_number:
         :param scan_number:
-        :param mask_tag:
+        :param mask_tag: string as the tag of the mask.
+        :param check_throw
         :return:
         """
         # Check
-        assert isinstance(exp_number, int)
-        assert isinstance(scan_number, int)
-        assert isinstance(mask_tag, str)
+        assert isinstance(exp_number, int), 'Experiment number {0} must be an integer but not a {1}.' \
+                                            ''.format(exp_number, type(exp_number))
+        assert isinstance(scan_number, int), 'Scan number {0} ({1}) must be an integer.' \
+                                             ''.format(scan_number, type(scan_number))
+        assert isinstance(mask_tag, str), 'Mask tag {0} ({1}) must be a string.'.format(mask_tag, type(mask_tag))
 
+        # MaskWorkspace's name is same as mask's tag
         mask_ws_name = mask_tag
 
         if AnalysisDataService.doesExist(mask_ws_name) is False:
@@ -444,7 +473,11 @@ class CWSCDReductionControl(object):
             ur = region_of_interest[1]
             self.generate_mask_workspace(exp_number, scan_number, ll, ur, mask_ws_name)
 
-        return
+        if check_throw:
+            assert AnalysisDataService.doesExist(mask_ws_name), 'MaskWorkspace %s does not exist.' \
+                                                                 '' % mask_ws_name
+
+        return mask_ws_name
 
     def does_file_exist(self, exp_number, scan_number, pt_number=None):
         """
@@ -553,8 +586,10 @@ class CWSCDReductionControl(object):
         :return:
         """
         # check
-        assert isinstance(exp_number, int)
-        assert isinstance(scan_number, int)
+        assert isinstance(exp_number, int), 'Experiment number {0} must be an integer but not a {1}.' \
+                                            ''.format(exp_number, type(scan_number))
+        assert isinstance(scan_number, int), 'Scan number {0} must be an integer but not a {1}.' \
+                                             ''.format(scan_number, type(scan_number))
 
         # get SPICE table
         spice_table_name = get_spice_table_name(exp_number, scan_number)
@@ -625,49 +660,65 @@ class CWSCDReductionControl(object):
         assert len(scan_kindex_dict) == 0 or len(scan_kindex_dict) >= len(scan_number_list), error_message
 
         # form peaks
-        peaks = list()
         no_shift = len(scan_kindex_dict) == 0
 
         # get ub matrix
         ub_matrix = self.get_ub_matrix(exp_number)
 
-        for scan_number in scan_number_list:
-            peak_dict = dict()
-            try:
-                peak_dict['hkl'] = self._myPeakInfoDict[(exp_number, scan_number)]. get_hkl(user_hkl=True)
-            except RuntimeError as run_err:
-                return False, str('Peak index error: %s.' % run_err)
+        for algorithm_type in ['simple', 'mixed', 'gauss']:
+            # set list of peaks for exporting
+            peaks = list()
+            for scan_number in scan_number_list:
+                peak_dict = dict()
+                try:
+                    peak_dict['hkl'] = self._myPeakInfoDict[(exp_number, scan_number)].get_hkl(user_hkl=True)
+                except RuntimeError as run_err:
+                    return False, str('Peak index error: %s.' % run_err)
 
-            peak_dict['intensity'] = self._myPeakInfoDict[(exp_number, scan_number)].get_intensity()
-            peak_dict['sigma'] = self._myPeakInfoDict[(exp_number, scan_number)].get_sigma()
-            if no_shift:
-                peak_dict['kindex'] = 0
-            else:
-                peak_dict['kindex'] = scan_kindex_dict[scan_number]
+                intensity, std_dev = self._myPeakInfoDict[(exp_number, scan_number)].get_intensity(
+                    algorithm_type, lorentz_corrected=True)
 
-            if export_absorption:
-                # calculate absorption correction
-                import absorption
+                if intensity < std_dev:
+                    # error is huge, very likely bad gaussian fit
+                    print '[INFO] Integration Type {0}: Scan {1} Intensity {2} < Std Dev {2} Excluded from exporting.' \
+                          ''.format(algorithm_type, scan_number, intensity, std_dev)
+                    continue
+                # END-IF
 
-                spice_ub = convert_mantid_ub_to_spice(ub_matrix)
-                up_cart, us_cart = absorption.calculate_absorption_correction_2(
-                    exp_number, scan_number, spice_ub)
-                peak_dict['up'] = up_cart
-                peak_dict['us'] = us_cart
+                peak_dict['intensity'] = intensity
+                peak_dict['sigma'] = std_dev
+                if no_shift:
+                    peak_dict['kindex'] = 0
+                else:
+                    peak_dict['kindex'] = scan_kindex_dict[scan_number]
+
+                if export_absorption:
+                    # calculate absorption correction
+                    spice_ub = convert_mantid_ub_to_spice(ub_matrix)
+                    up_cart, us_cart = absorption.calculate_absorption_correction_2(
+                        exp_number, scan_number, spice_ub)
+                    peak_dict['up'] = up_cart
+                    peak_dict['us'] = us_cart
+
+                # append peak (in dict) to peaks
+                peaks.append(peak_dict)
+            # END-FOR (scan_number)
 
-            # append peak (in dict) to peaks
-            peaks.append(peak_dict)
-        # END-FOR (scan_number)
+            # get file name for this type
+            this_file_name = fullprof_file_name.split('.')[0] + '_' + algorithm_type + '.dat'
 
-        try:
-            file_content = fputility.write_scd_fullprof_kvector(
-                user_header=user_header, wave_length=exp_wave_length,
-                k_vector_dict=k_shift_dict, peak_dict_list=peaks,
-                fp_file_name=fullprof_file_name, with_absorption=export_absorption)
-        except AssertionError as error:
-            return False, 'AssertionError: %s.' % str(error)
-        except RuntimeError as error:
-            return False, 'RuntimeError: %s.' % str(error)
+            try:
+                file_content = fputility.write_scd_fullprof_kvector(
+                    user_header=user_header, wave_length=exp_wave_length,
+                    k_vector_dict=k_shift_dict, peak_dict_list=peaks,
+                    fp_file_name=this_file_name, with_absorption=export_absorption)
+            except AssertionError as error:
+                return False, 'AssertionError: %s.' % str(error)
+            except RuntimeError as error:
+                return False, 'RuntimeError: %s.' % str(error)
+
+            continue
+        # END-FOR
 
         return True, file_content
 
@@ -750,13 +801,13 @@ class CWSCDReductionControl(object):
         raw_ws = self.get_raw_data_workspace(exp_no, scan_no, pt_no)
         if raw_ws is None:
             return False, 'Raw data for Exp %d Scan %d Pt %d is not loaded.' % (exp_no, scan_no, pt_no)
-        print '[DB...BAT] Raw workspace size: ', raw_ws.getNumberHistograms()
 
         # Convert to numpy array
-        array2d = numpy.ndarray(shape=(DET_X_SIZE, DET_Y_SIZE), dtype='float')
-        for i in xrange(DET_X_SIZE):
-            for j in xrange(DET_Y_SIZE):
-                array2d[i][j] = raw_ws.readY(j * DET_X_SIZE + i)[0]
+        det_shape = (self._detectorSize[0], self._detectorSize[1])
+        array2d = numpy.ndarray(shape=det_shape, dtype='float')
+        for i in xrange(det_shape[0]):
+            for j in xrange(det_shape[1]):
+                array2d[i][j] = raw_ws.readY(j * det_shape[0] + i)[0]
 
         # Flip the 2D array to look detector from sample
         array2d = numpy.flipud(array2d)
@@ -777,11 +828,12 @@ class CWSCDReductionControl(object):
         """ Get region of interest
         :param exp_number:
         :param scan_number:
-        :return:
+        :return: region of interest
         """
         # check
-        assert isinstance(exp_number, int)
-        assert isinstance(scan_number, int) or scan_number is None
+        assert isinstance(exp_number, int), 'Experiment number {0} must be an integer.'.format(exp_number)
+        assert isinstance(scan_number, int) or scan_number is None, 'Scan number {0} must be either an integer or None.' \
+                                                                    ''.format(scan_number)
 
         if (exp_number, scan_number) in self._roiDict:
             # able to find region of interest for this scan
@@ -872,6 +924,9 @@ class CWSCDReductionControl(object):
                                                                 'it is of type %s now.' % (str(pt_number),
                                                                                            type(pt_number))
 
+        # print '[DB...BAT] Retrieve: Exp {0} Scan {1} Peak Info Object. Current keys are {0}.' \
+        #       ''.format(exp_number, scan_number, self._myPeakInfoDict.keys())
+
         # construct key
         if pt_number is None:
             p_key = (exp_number, scan_number)
@@ -881,6 +936,8 @@ class CWSCDReductionControl(object):
         # Check for existence
         if p_key in self._myPeakInfoDict:
             ret_value = self._myPeakInfoDict[p_key]
+            # print '[DB...BAT] Retrieved: Exp {0} Scan {1} Peak Info Object {2}.'.format(exp_number, scan_number,
+            #                                                                             hex(id(ret_value)))
         else:
             ret_value = None
 
@@ -937,8 +994,10 @@ class CWSCDReductionControl(object):
         :return:
         """
         # assert ...
-        assert isinstance(exp_number, int)
-        assert isinstance(scan_number, int)
+        assert isinstance(exp_number, int), 'Experiment number {0} ({1}) must be an integer.' \
+                                            ''.format(exp_number, type(exp_number))
+        assert isinstance(scan_number, int), 'Scan number {0} ({1}) must be an integer.' \
+                                             ''.format(scan_number, type(scan_number))
 
         # create an xml file
         mask_file_name = get_mask_xml_temp(self._workDir, exp_number, scan_number)
@@ -954,9 +1013,14 @@ class CWSCDReductionControl(object):
             # use given name
             mask_ws_name = str(mask_tag)
 
+        if self._refWorkspaceForMask is None:
+            return False, 'There is no reference workspace. Plot a Pt. first!'
+        elif AnalysisDataService.doesExist(self._refWorkspaceForMask) is False:
+            return False, 'Previous reference workspace has been deleted. Plot a Pt. first'
         mantidsimple.LoadMask(Instrument='HB3A',
                               InputFile=mask_file_name,
-                              OutputWorkspace=mask_ws_name)
+                              OutputWorkspace=mask_ws_name,
+                              RefWorkspace=self._refWorkspaceForMask)
         mantidsimple.InvertMask(InputWorkspace=mask_ws_name,
                                 OutputWorkspace=mask_ws_name)
 
@@ -1117,7 +1181,7 @@ class CWSCDReductionControl(object):
         temp_index_ws = AnalysisDataService.retrieve(temp_index_ws_name)
 
         if num_peak_index == 0:
-            return False, 'No peak can be indexed.'
+            return False, 'No peak can be indexed: {0}.'.format(error)
         elif num_peak_index > 1:
             raise RuntimeError('Case for PeaksWorkspace containing more than 1 peak is not '
                                'considered. Contact developer for this issue.')
@@ -1133,10 +1197,70 @@ class CWSCDReductionControl(object):
 
         return True, (hkl, error)
 
+    def integrate_scan_peak(self, exp_number, scan_number, peak_centre, mask_name, normalization,
+                            scale_factor, background_pt_tuple):
+        """
+        new way to integrate a peak in a scan
+        Note: it is going to replace "integrate_scan_peaks()"
+        :param exp_number:
+        :param scan_number:
+        :param peak_centre:
+        :param mask_name:
+        :param normalization:
+        :param scale_factor:
+        :param background_pt_tuple:
+        :return:
+        """
+        # check inputs
+        assert isinstance(exp_number, int), 'Experiment number {0} must be an integer but not a {1}.' \
+                                            ''.format(exp_number, type(exp_number))
+        assert isinstance(scan_number, int), 'Scan number {0} must be an integer but not a {1}.' \
+                                             ''.format(scan_number, type(scan_number))
+        assert isinstance(mask_name, str), 'Mask name {0} must be a string but not a {1}.' \
+                                           ''.format(mask_name, type(mask_name))
+        assert isinstance(normalization, str), 'Normalization type {0} must be a string but not a {1}.' \
+                                               ''.format(normalization, type(normalization))
+        assert isinstance(scale_factor, float) or isinstance(scale_factor, int),\
+            'Scale factor {0} must be a float or integer but not a {1}.'.format(scale_factor, type(scale_factor))
+        assert len(peak_centre) == 3, 'Peak center {0} must have 3 elements for (Qx, Qy, Qz).'.format(peak_centre)
+        # print '[DB...BAT] Background tuple {0} is of type {1}.'.format(background_pt_tuple, type(background_pt_tuple))
+        assert len(background_pt_tuple) == 2, 'Background tuple {0} must be of length 2.'.format(background_pt_tuple)
+
+        # get input MDEventWorkspace name for merged scan
+        status, ret_obj = self.get_pt_numbers(exp_number, scan_number)
+        if status:
+            pt_list = ret_obj
+        else:
+            raise RuntimeError('Unable to get Pt. list from Exp {0} Scan {1} due to {2}'
+                               ''.format(exp_number,scan_number, ret_obj))
+        md_ws_name = get_merged_md_name(self._instrumentName, exp_number, scan_number, pt_list)
+
+        # get the TableWorkspace name for Spice
+        spice_table_ws = get_spice_table_name(exp_number, scan_number)
+
+        # output PeaksWorkspace name and MaskWorkspace
+        if len(mask_name) > 0:
+            mask_ws_name = self.check_generate_mask_workspace(exp_number, scan_number, mask_name, check_throw=True)
+        else:
+            mask_ws_name = None
+        peak_ws_name = get_integrated_peak_ws_name(exp_number, scan_number, pt_list, mask_name)
+
+        # peak center
+        int_peak_dict = peak_integration_utility.integrate_peak_full_version(scan_md_ws_name=md_ws_name,
+                                                                             spice_table_name=spice_table_ws,
+                                                                             output_peak_ws_name=peak_ws_name,
+                                                                             peak_center=peak_centre,
+                                                                             mask_workspace_name=mask_ws_name,
+                                                                             norm_type=normalization,
+                                                                             intensity_scale_factor=scale_factor,
+                                                                             background_pt_tuple=background_pt_tuple)
+
+        return int_peak_dict
+
     def integrate_scan_peaks(self, exp, scan, peak_radius, peak_centre,
                              merge_peaks=True, use_mask=False,
                              normalization='', mask_ws_name=None,
-                             scale_factor=1):
+                             scale_factor=1.00, background_pt_tuple=None):
         """
         :param exp:
         :param scan:
@@ -1148,7 +1272,7 @@ class CWSCDReductionControl(object):
         :param normalization: normalization set up (by time or ...)
         :param mask_ws_name: mask workspace name or None
         :param scale_factor: integrated peaks' scaling factor
-        :return:
+        :return: dictionary of Pts.
         """
         # check
         assert isinstance(exp, int)
@@ -1157,208 +1281,58 @@ class CWSCDReductionControl(object):
         assert len(peak_centre) == 3
         assert isinstance(merge_peaks, bool)
 
-        # VZ-FUTURE - combine the download and naming for common use
-        # get spice file
-        spice_table_name = get_spice_table_name(exp, scan)
-        if AnalysisDataService.doesExist(spice_table_name) is False:
-            self.download_spice_file(exp, scan, False)
-            self.load_spice_scan_file(exp, scan)
-
-        # get MD workspace name
-        status, pt_list = self.get_pt_numbers(exp, scan)
-        assert status, str(pt_list)
-        md_ws_name = get_merged_md_name(self._instrumentName, exp, scan, pt_list)
-
-        peak_centre_str = '%f, %f, %f' % (peak_centre[0], peak_centre[1],
-                                          peak_centre[2])
-
-        # mask workspace
-        if use_mask:
-            if mask_ws_name is None:
-                # get default mask workspace name
-                mask_ws_name = get_mask_ws_name(exp, scan)
-            elif not AnalysisDataService.doesExist(mask_ws_name):
-                # the appointed mask workspace has not been loaded
-                # then load it from saved mask
-                self.check_generate_mask_workspace(exp, scan, mask_ws_name)
-
-            assert AnalysisDataService.doesExist(mask_ws_name), 'MaskWorkspace %s does not exist.' \
-                                                                '' % mask_ws_name
-
-            integrated_peak_ws_name = get_integrated_peak_ws_name(exp, scan, pt_list, use_mask)
-        else:
-            mask_ws_name = ''
-            integrated_peak_ws_name = get_integrated_peak_ws_name(exp, scan, pt_list)
-
-        # normalization
-        norm_by_mon = False
-        norm_by_time = False
-        if normalization == 'time':
-            norm_by_time = True
-        elif normalization == 'monitor':
-            norm_by_mon = True
-
-        # integrate peak of a scan
-        mantidsimple.IntegratePeaksCWSD(InputWorkspace=md_ws_name,
-                                        OutputWorkspace=integrated_peak_ws_name,
-                                        PeakRadius=peak_radius,
-                                        PeakCentre=peak_centre_str,
-                                        MergePeaks=merge_peaks,
-                                        NormalizeByMonitor=norm_by_mon,
-                                        NormalizeByTime=norm_by_time,
-                                        MaskWorkspace=mask_ws_name,
-                                        ScaleFactor=scale_factor)
-
-        # process the output workspace
-        pt_dict = dict()
-        out_peak_ws = AnalysisDataService.retrieve(integrated_peak_ws_name)
-        num_peaks = out_peak_ws.rowCount()
-
-        for i_peak in xrange(num_peaks):
-            peak_i = out_peak_ws.getPeak(i_peak)
-            run_number_i = peak_i.getRunNumber() % 1000
-            intensity_i = peak_i.getIntensity()
-            pt_dict[run_number_i] = intensity_i
-        # END-FOR
+        peak_int_dict = self.integrate_scan_peak(exp_number=exp, scan_number=scan, peak_centre=peak_centre,
+                                                 mask_name=mask_ws_name, normalization=normalization,
+                                                 scale_factor=scale_factor, background_pt_tuple=background_pt_tuple)
 
+        #
         # store the data into peak info
         if (exp, scan) not in self._myPeakInfoDict:
             raise RuntimeError('Exp %d Scan %d is not recorded in PeakInfo-Dict' % (exp, scan))
-        self._myPeakInfoDict[(exp, scan)].set_pt_intensity(pt_dict)
-
-        return True, pt_dict
-
-    def integrate_peaks_q(self, exp_no, scan_no):
-        """
-        Integrate peaks in Q-space
-        :param exp_no:
-        :param scan_no:
-        :return:
-        """
-        # Check inputs
-        assert isinstance(exp_no, int)
-        assert isinstance(scan_no, int)
-
-        # Get the SPICE file
-        spice_table_name = get_spice_table_name(exp_no, scan_no)
-        if AnalysisDataService.doesExist(spice_table_name) is False:
-            self.download_spice_file(exp_no, scan_no, False)
-            self.load_spice_scan_file(exp_no, scan_no)
-
-        # Find peaks & get the peak centers
-        spice_table = AnalysisDataService.retrieve(spice_table_name)
-        num_rows = spice_table.rowCount()
-
-        sum_peak_center = [0., 0., 0.]
-        sum_bin_counts = 0.
-
-        for i_row in xrange(num_rows):
-            pt_no = spice_table.cell(i_row, 0)
-            self.download_spice_xml_file(scan_no, pt_no, exp_no)
-            # self.load_spice_xml_file(exp_no, scan_no, pt_no)
-            self.find_peak(exp_no, scan_no, pt_no)
-            peak_ws_name = get_peak_ws_name(exp_no, scan_no, pt_no)
-            peak_ws = AnalysisDataService.retrieve(peak_ws_name)
-            if peak_ws.getNumberPeaks() == 1:
-                peak = peak_ws.getPeak(0)
-                peak_center = peak.getQSampleFrame()
-                bin_count = peak.getBinCount()
-
-                sum_peak_center[0] += bin_count * peak_center.X()
-                sum_peak_center[1] += bin_count * peak_center.Y()
-                sum_peak_center[2] += bin_count * peak_center.Z()
-
-                sum_bin_counts += bin_count
-
-            elif peak_ws.getNumberPeaks() > 1:
-                raise NotImplementedError('More than 1 peak???')
-        # END-FOR
-
-        final_peak_center = [0., 0., 0.]
-        for i in xrange(3):
-            final_peak_center[i] = sum_peak_center[i] * (1./sum_bin_counts)
-        #final_peak_center = sum_peak_center * (1./sum_bin_counts)
-
-        print '[INFO] Avg peak center = ', final_peak_center, 'Total counts = ', sum_bin_counts
-
-        # Integrate peaks
-        total_intensity = 0.
-        for i_row in xrange(num_rows):
-            pt_no = spice_table.cell(i_row, 0)
-            md_ws_name = get_single_pt_md_name(exp_no, scan_no, pt_no)
-            peak_ws_name = get_peak_ws_name(exp_no, scan_no, pt_no)
-            out_ws_name = peak_ws_name + '_integrated'
-            mantidsimple.IntegratePeaksCWSD(InputWorkspace=md_ws_name,
-                                            PeaksWorkspace=peak_ws_name,
-                                            OutputWorkspace=out_ws_name)
-            out_peak_ws = AnalysisDataService.retrieve(out_ws_name)
-            peak = out_peak_ws.getPeak(0)
-            intensity = peak.getIntensity()
-            total_intensity += intensity
-        # END-FOR
+        self._myPeakInfoDict[(exp, scan)].set_pt_intensity(peak_int_dict)
 
-        return total_intensity
+        return True, peak_int_dict
 
-    def integrate_peaks(self, exp_no, scan_no, pt_list, md_ws_name,
-                        peak_radius, bkgd_inner_radius, bkgd_outer_radius,
-                        is_cylinder):
+    @staticmethod
+    def gauss_correction_peak_intensity(pt_dict):
         """
-        Integrate peaks
-        :return: Boolean as successful or failed
+        fit a peak along Pt. with Gaussian and thus calculate background automatically
+        :param pt_dict:
+        :return: 3-tuple (intensity, background and information string)
         """
-        # Check input
-        if is_cylinder is True:
-            raise RuntimeError('Cylinder peak shape has not been implemented yet!')
-
-        if exp_no is None:
-            exp_no = self._expNumber
-        assert isinstance(exp_no, int)
-        assert isinstance(scan_no, int)
-        assert isinstance(peak_radius, float)
-        assert isinstance(bkgd_inner_radius, float)
-        assert isinstance(bkgd_outer_radius, float)
-        assert bkgd_inner_radius >= peak_radius
-        assert bkgd_outer_radius >= bkgd_inner_radius
-
-        # NEXT - Need to re-write this method according to documentation of IntegratePeaksCWSD()
-
-        # Get MD WS
-        if md_ws_name is None:
-            raise RuntimeError('Implement how to locate merged MD workspace name from '
-                               'Exp %d Scan %d Pt %s' % (exp_no, scan_no, str(pt_list)))
-        # Peak workspace
-        # create an empty peak workspace
-        if AnalysisDataService.doesExist('spicematrixws') is False:
-            raise RuntimeError('Workspace spicematrixws does not exist.')
-        mantidsimple.LoadInstrument(Workspace='', InstrumentName='HB3A')
-        target_peak_ws_name = 'MyPeakWS'
-        mantidsimple.CreatePeaksWorkspace(InstrumentWorkspace='spicematrixws', OutputWorkspace=target_peak_ws_name)
-        target_peak_ws = AnalysisDataService.retrieve(target_peak_ws_name)
-        # copy a peak
-        temp_peak_ws_name = 'peak1'
-        mantidsimple.FindPeaksMD(InputWorkspace='MergedSan0017_QSample',
-                                 PeakDistanceThreshold=0.5,
-                                 MaxPeaks=10,
-                                 DensityThresholdFactor=100,
-                                 OutputWorkspace=temp_peak_ws_name)
-
-        src_peak_ws = AnalysisDataService.retrieve(temp_peak_ws_name)
-        centre_peak = src_peak_ws.getPeak(0)
-        target_peak_ws.addPeak(centre_peak)
-        target_peak_ws.removePeak(0)
-
-        # Integrate peak
-        mantidsimple.IntegratePeaksMD(InputWorkspace='MergedSan0017_QSample',
-                                      PeakRadius=1.5,
-                                      BackgroundInnerRadius=1.5,
-                                      BackgroundOuterRadius=3,
-                                      PeaksWorkspace=target_peak_ws_name,
-                                      OutputWorkspace='SinglePeak1',
-                                      IntegrateIfOnEdge=False,
-                                      AdaptiveQBackground=True,
-                                      Cylinder=False)
-
-        raise RuntimeError('Implement ASAP!')
+        # check
+        assert isinstance(pt_dict, dict), 'Input must be a dictionary but not {0}'.format(type(pt_dict))
+
+        # convert to vector
+        tup_list = list()
+        for pt in pt_dict.keys():
+            tup_list.append((pt, pt_dict[pt]))
+        tup_list.sort()
+        list_x = list()
+        list_y = list()
+        for tup in tup_list:
+            list_x.append(float(tup[0]))
+            list_y.append(float(tup[1]))
+        vec_x = numpy.array(list_x)
+        vec_y = numpy.array(list_y)
+        vec_e = numpy.sqrt(vec_y)
+
+        # do fit
+        error, gauss_params, model_vec_y = peak_integration_utility.fit_gaussian_linear_background(vec_x, vec_y, vec_e)
+        x0, gauss_sigma, gauss_a, gauss_bkgd = gauss_params
+        if not (0 < x0 < vec_x[-1]):
+            raise RuntimeError('Fitted center of the peak {0} is out of range, which is not correct'.format(x0))
+        if gauss_a <= 0.:
+            raise RuntimeError('Fitted peak height {0} is negative!'.format(gauss_a))
+
+        # calculate the peak intensity
+        peak_intensity = peak_integration_utility.calculate_peak_intensity_gauss(gauss_a, gauss_sigma)
+
+        # information
+        info_str = 'Fit error = {0}: a = {1}, x0 = {2}, sigma = {3}, b = {4}'.format(error, gauss_a, x0, gauss_sigma,
+                                                                                     gauss_bkgd)
+
+        return peak_intensity, gauss_bkgd, info_str
 
     @staticmethod
     def load_scan_survey_file(csv_file_name):
@@ -1475,29 +1449,25 @@ class CWSCDReductionControl(object):
 
         # load SPICE Pt.  detector file
         pt_ws_name = get_raw_data_workspace_name(exp_no, scan_no, pt_no)
-        # new_idf_name = '/home/wzz/Projects/HB3A/NewDetector/HB3A_ND_Definition.xml'
-        new_idf_name = '/SNS/users/wzz/Projects/HB3A/HB3A_ND_Definition.xml'
-        if os.path.exists(new_idf_name) is False:
-            raise RuntimeError('Instrument file {0} cannot be found!'.format(new_idf_name))
         try:
             mantidsimple.LoadSpiceXML2DDet(Filename=xml_file_name,
                                            OutputWorkspace=pt_ws_name,
-                                           # FIXME - Need UI input
-                                           DetectorGeometry='512,512',
-                                           InstrumentFilename=new_idf_name,
                                            SpiceTableWorkspace=spice_table_name,
                                            PtNumber=pt_no)
+            if self._refWorkspaceForMask is None or AnalysisDataService.doesExist(pt_ws_name) is False:
+                self._refWorkspaceForMask = pt_ws_name
         except RuntimeError as run_err:
             return False, str(run_err)
 
         # Add data storage
-        assert AnalysisDataService.doesExist(pt_ws_name), 'blabla'
+        assert AnalysisDataService.doesExist(pt_ws_name), 'Unable to locate workspace {0}.'.format(pt_ws_name)
         raw_matrix_ws = AnalysisDataService.retrieve(pt_ws_name)
         self._add_raw_workspace(exp_no, scan_no, pt_no, raw_matrix_ws)
 
         return True, pt_ws_name
 
-    def merge_multiple_scans(self, scan_md_ws_list, scan_peak_centre_list, merged_ws_name):
+    @staticmethod
+    def merge_multiple_scans(scan_md_ws_list, scan_peak_centre_list, merged_ws_name):
         """
         Merge multiple scans
         :param scan_md_ws_list: List of MDWorkspace, each of which is for a scan.
@@ -1720,13 +1690,6 @@ class CWSCDReductionControl(object):
                 if exp_no in self._userWavelengthDict:
                     alg_args['UserDefinedWavelength'] = self._userWavelengthDict[exp_no]
 
-                # TODO/FIXME/NOW - Should get a flexible way to define IDF or no IDF
-                # new_idf_name = '/home/wzz/Projects/HB3A/NewDetector/HB3A_ND_Definition.xml'
-                new_idf_name = '/SNS/users/wzz/Projects/HB3A/HB3A_ND_Definition.xml'
-                if os.path.exists(new_idf_name) is False:
-                    raise RuntimeError('Instrument file {0} cannot be found!'.format(new_idf_name))
-                alg_args['InstrumentFilename'] = new_idf_name
-
                 # call:
                 mantidsimple.ConvertCWSDExpToMomentum(**alg_args)
 
@@ -1801,7 +1764,7 @@ class CWSCDReductionControl(object):
         ur_x = int(upper_right_corner[0])
         ur_y = int(upper_right_corner[1])
         assert ll_x < ur_x and ll_y < ur_y, 'Lower left corner (%.5f, %.5f) vs. upper right corner ' \
-                                            '(%.5f, %.5f)' % (ll_x, ll_y, ur_x, ur_y)
+                                            '(%.5f, %.5f) ' % (ll_x, ll_y, ur_x, ur_y)
 
         # Add to dictionary.  Because usually one ROI is defined for all scans in an experiment,
         # then it is better and easier to support client to search this ROI by experiment number
@@ -1834,6 +1797,24 @@ class CWSCDReductionControl(object):
 
         return
 
+    def set_detector_geometry(self, size_x, size_y):
+        """
+        set the detector's geometry, i.e., size
+        :param size_x:
+        :param size_y:
+        :return:
+        """
+        # check inputs
+        assert isinstance(size_x, int) and size_x > 0, 'Input detector size-X {0} must be a positive integer.' \
+                                                       ''.format(size_x)
+        assert isinstance(size_y, int) and size_y > 0, 'Input detector size-Y {0} must be a positive integer.' \
+                                                       ''.format(size_y)
+
+        self._detectorSize[0] = size_x
+        self._detectorSize[1] = size_y
+
+        return
+
     def set_detector_sample_distance(self, exp_number, sample_det_distance):
         """
         set instrument's detector - sample distance
@@ -2304,6 +2285,10 @@ class CWSCDReductionControl(object):
             if k_index > 0 and scan_number not in self._kShiftDict[k_index][1]:
                 self._kShiftDict[k_index][1].append(scan_number)
 
+            # add to the peak info
+            peak_info = self.get_peak_info(self._expNumber, scan_number)
+            peak_info.set_k_vector(self._kShiftDict[k_index][0])
+
             # remove from the previous placeholder
             for k_i in self._kShiftDict.keys():
                 # skip current one
@@ -2352,10 +2337,22 @@ class CWSCDReductionControl(object):
         :return: (boolean, PeakInfo/string)
         """
         # check
-        assert isinstance(exp_number, int)
-        assert isinstance(scan_number, int)
-        assert isinstance(peak_ws_name, str)
-        assert isinstance(md_ws_name, str)
+        assert isinstance(exp_number, int), 'Experiment number must be an integer.'
+        assert isinstance(scan_number, int), 'Scan number must an be integer.'
+        assert isinstance(peak_ws_name, str), 'PeaksWorkspace must be a string.'
+        assert isinstance(md_ws_name, str), 'MDEventWorkspace name must be a string.'
+
+        # check whether there is a redundant creation of PeakProcessRecord for the same (exp, scan) combination
+        if (exp_number, scan_number) in self._myPeakInfoDict:
+            peak_info = self._myPeakInfoDict[(exp_number, scan_number)]
+            print '[ERROR] PeakProcessRecord for Exp {0} Scan {1} shall not be created twice!' \
+                  ''.format(exp_number, scan_number)
+            print '[CONTINUE] New PeaksWorkspace = {0} vs Existing PeaksWorkspace = {1}.' \
+                  ''.format(peak_ws_name, peak_info.peaks_workspace)
+            print '[CONTINUE] New MDEventWorkspace = {0} vs Existing MDEventWorkspace = {1}.' \
+                  ''.format(md_ws_name, peak_info.md_workspace)
+            return False, peak_info
+        # END-IF
 
         # create a PeakInfo instance if it does not exist
         peak_info = PeakProcessRecord(exp_number, scan_number, peak_ws_name)
@@ -2422,7 +2419,7 @@ class CWSCDReductionControl(object):
 
         return ptlist
 
-    def set_peak_intensity(self, exp_number, scan_number, intensity):
+    def set_zero_peak_intensity(self, exp_number, scan_number):
         """
         Set peak intensity to a scan and set to PeakInfo
         :param exp_number:
@@ -2433,7 +2430,6 @@ class CWSCDReductionControl(object):
         # check
         assert isinstance(exp_number, int)
         assert isinstance(scan_number, int)
-        assert isinstance(intensity, float)
 
         # get dictionary item
         err_msg = 'Exp %d Scan %d does not exist in peak information' \
@@ -2442,17 +2438,7 @@ class CWSCDReductionControl(object):
         peak_info = self._myPeakInfoDict[(exp_number, scan_number)]
 
         # set intensity
-        try:
-            peak_info.set_intensity(intensity)
-        except AssertionError as ass_error:
-            return False, 'Unable to set peak intensity due to %s.' % str(ass_error)
-
-        # calculate sigma by simple square root
-        if intensity > 0:
-            sigma = math.sqrt(intensity)
-        else:
-            sigma = 1.
-        peak_info.set_sigma(sigma)
+        peak_info.set_intensity_to_zero()
 
         return True, ''
 
@@ -2496,7 +2482,7 @@ class CWSCDReductionControl(object):
         error_message = ''
 
         # Download and
-        for scan_number in xrange(start_scan, end_scan):
+        for scan_number in range(start_scan, end_scan+1):
             # check whether file exists
             if self.does_file_exist(exp_number, scan_number) is False:
                 # SPICE file does not exist in data directory. Download!
@@ -2570,6 +2556,7 @@ class CWSCDReductionControl(object):
                 wavelength = get_hb3a_wavelength(m1)
                 if wavelength is None:
                     q_range = 0.
+                    print '[ERROR] Scan number {0} has invalid m1 for wavelength.'.format(scan_number)
                 else:
                     q_range = 4.*math.pi*math.sin(two_theta/180.*math.pi*0.5)/wavelength
 
@@ -2591,7 +2578,7 @@ class CWSCDReductionControl(object):
 
         return True, scan_sum_list, error_message
 
-    def export_project(self, project_file_name, ui_dict):
+    def save_project(self, project_file_name, ui_dict):
         """ Export project
         - the data structure and information will be written to a ProjectManager file
         :param project_file_name:
diff --git a/scripts/HFIR_4Circle_Reduction/reduce4circleGUI.py b/scripts/HFIR_4Circle_Reduction/reduce4circleGUI.py
index 87709d85f007ed6b9a26b1db984fb43bb2cbe55c..d40ab566cd03f5ce842531c0021bbe674096083c 100644
--- a/scripts/HFIR_4Circle_Reduction/reduce4circleGUI.py
+++ b/scripts/HFIR_4Circle_Reduction/reduce4circleGUI.py
@@ -11,8 +11,6 @@ import time
 import datetime
 import random
 import numpy
-from scipy.optimize import curve_fit
-
 
 from PyQt4 import QtCore, QtGui
 try:
@@ -35,10 +33,14 @@ import plot3dwindow
 from multi_threads_helpers import *
 import optimizelatticewindow as ol_window
 import viewspicedialog
+import peak_integration_utility
+import FindUBUtility
+import message_dialog
 
 # import line for the UI python class
 from ui_MainWindow import Ui_MainWindow
 
+
 # define constants
 IndexFromSpice = 'From Spice (pre-defined)'
 IndexFromUB = 'From Calculation By UB'
@@ -51,7 +53,8 @@ class MainWindow(QtGui.QMainWindow):
     TabPage = {'View Raw Data': 2,
                'Calculate UB': 3,
                'UB Matrix': 4,
-               'Peak Integration': 6}
+               'Peak Integration': 6,
+               'Scans Processing': 5}
 
     def __init__(self, parent=None):
         """ Initialization and set up
@@ -63,6 +66,15 @@ class MainWindow(QtGui.QMainWindow):
         self.ui = Ui_MainWindow()
         self.ui.setupUi(self)
 
+        # children windows
+        self._my3DWindow = None
+        self._refineConfigWindow = None
+        self._peakIntegrationInfoWindow = None
+        self._addUBPeaksDialog = None
+        self._spiceViewer = None
+        self._mySinglePeakIntegrationDialog = None
+        self._singlePeakIntegrationDialogBuffer = ''
+
         # Make UI scrollable
         if NO_SCROLL is False:
             self._scrollbars = MantidQt.API.WidgetScrollbarDecorator(self)
@@ -103,6 +115,37 @@ class MainWindow(QtGui.QMainWindow):
                      self.do_set_user_detector_center)
         self.connect(self.ui.pushButton_applyUserWavelength, QtCore.SIGNAL('clicked()'),
                      self.do_set_user_wave_length)
+        self.connect(self.ui.pushButton_applyDetectorSize, QtCore.SIGNAL('clicked()'),
+                     self.do_set_detector_size)
+
+        # Tab survey
+        self.connect(self.ui.pushButton_survey, QtCore.SIGNAL('clicked()'),
+                     self.do_survey)
+        self.connect(self.ui.pushButton_saveSurvey, QtCore.SIGNAL('clicked()'),
+                     self.do_save_survey)
+        self.connect(self.ui.pushButton_loadSurvey, QtCore.SIGNAL('clicked()'),
+                     self.do_load_survey)
+        self.connect(self.ui.pushButton_viewSurveyPeak, QtCore.SIGNAL('clicked()'),
+                     self.do_view_survey_peak)
+        self.connect(self.ui.pushButton_addPeaksToRefine, QtCore.SIGNAL('clicked()'),
+                     self.do_add_peaks_for_ub)
+        self.connect(self.ui.pushButton_mergeScansSurvey, QtCore.SIGNAL('clicked()'),
+                     self.do_merge_scans_survey)
+        self.connect(self.ui.pushButton_selectAllSurveyPeaks, QtCore.SIGNAL('clicked()'),
+                     self.do_select_all_survey)
+        self.connect(self.ui.pushButton_sortInfoTable, QtCore.SIGNAL('clicked()'),
+                     self.do_filter_sort_survey_table)
+        self.connect(self.ui.pushButton_clearSurvey, QtCore.SIGNAL('clicked()'),
+                     self.do_clear_survey)
+        self.connect(self.ui.pushButton_viewRawSpice, QtCore.SIGNAL('clicked()'),
+                     self.do_show_spice_file)
+
+        self.connect(self.ui.lineEdit_numSurveyOutput, QtCore.SIGNAL('editingFinished()'),
+                     self.evt_show_survey)
+        self.connect(self.ui.lineEdit_numSurveyOutput, QtCore.SIGNAL('returnPressed()'),
+                     self.evt_show_survey)
+        self.connect(self.ui.lineEdit_numSurveyOutput, QtCore.SIGNAL('textEdited(const QString&)'),
+                     self.evt_show_survey)
 
         # Tab 'View Raw Data'
         self.connect(self.ui.pushButton_setScanInfo, QtCore.SIGNAL('clicked()'),
@@ -129,12 +172,14 @@ class MainWindow(QtGui.QMainWindow):
                      self.do_mask_pt_2d)
         self.connect(self.ui.pushButton_saveMask, QtCore.SIGNAL('clicked()'),
                      self.do_save_roi)
+        self.connect(self.ui.pushButton_integrateROI, QtCore.SIGNAL('clicked()'),
+                     self.do_integrate_roi)
 
         # Tab 'calculate ub matrix'
-        self.connect(self.ui.pushButton_findPeak, QtCore.SIGNAL('clicked()'),
-                     self.do_find_peak)
-        self.connect(self.ui.pushButton_addPeakToCalUB, QtCore.SIGNAL('clicked()'),
-                     self.do_add_ub_peak)
+        self.connect(self.ui.pushButton_addUBScans, QtCore.SIGNAL('clicked()'),
+                     self.do_add_ub_peaks)
+        # self.connect(self.ui.pushButton_addPeakToCalUB, QtCore.SIGNAL('clicked()'),
+        #              self.do_add_ub_peak)
         self.connect(self.ui.pushButton_calUB, QtCore.SIGNAL('clicked()'),
                      self.do_cal_ub_matrix)
         self.connect(self.ui.pushButton_acceptUB, QtCore.SIGNAL('clicked()'),
@@ -147,8 +192,8 @@ class MainWindow(QtGui.QMainWindow):
                      self.do_clear_ub_peaks)
         self.connect(self.ui.pushButton_resetPeakHKLs, QtCore.SIGNAL('clicked()'),
                      self.do_reset_ub_peaks_hkl)
-        self.connect(self.ui.pushButton_selectAllPeaks, QtCore.SIGNAL('clicked()'),
-                     self.do_select_all_peaks)
+        # self.connect(self.ui.pushButton_selectAllPeaks, QtCore.SIGNAL('clicked()'),
+        #              self.do_select_all_peaks)
         self.connect(self.ui.pushButton_viewScan3D, QtCore.SIGNAL('clicked()'),
                      self.do_view_data_3d)
         self.connect(self.ui.pushButton_plotSelectedData, QtCore.SIGNAL('clicked()'),
@@ -162,11 +207,21 @@ class MainWindow(QtGui.QMainWindow):
 
         self.connect(self.ui.pushButton_refineUB, QtCore.SIGNAL('clicked()'),
                      self.do_refine_ub_indexed_peaks)
+        self.connect(self.ui.pushButton_refineUBCalIndex, QtCore.SIGNAL('clicked()'),
+                     self.do_refine_ub_cal_indexed_peaks)
+
         self.connect(self.ui.pushButton_refineUBFFT, QtCore.SIGNAL('clicked()'),
                      self.do_refine_ub_fft)
         self.connect(self.ui.pushButton_findUBLattice, QtCore.SIGNAL('clicked()'),
                      self.do_refine_ub_lattice)
 
+        self.connect(self.ui.radioButton_ubAdvancedSelection, QtCore.SIGNAL('toggled(bool)'),
+                     self.do_select_all_peaks)
+        self.connect(self.ui.radioButton_ubSelectAllScans, QtCore.SIGNAL('toggled(bool)'),
+                     self.do_select_all_peaks)
+        self.connect(self.ui.radioButton_ubSelectNoScan, QtCore.SIGNAL('toggled(bool)'),
+                     self.do_select_all_peaks)
+
         # Tab 'Setup'
         self.connect(self.ui.pushButton_useDefaultDir, QtCore.SIGNAL('clicked()'),
                      self.do_setup_dir_default)
@@ -185,7 +240,7 @@ class MainWindow(QtGui.QMainWindow):
         self.connect(self.ui.pushButton_saveUB, QtCore.SIGNAL('clicked()'),
                      self.do_save_ub)
 
-        # Tab 'Merge'
+        # Tab 'Scans Processing'
         self.connect(self.ui.pushButton_addScanSliceView, QtCore.SIGNAL('clicked()'),
                      self.do_add_scans_merge)
         self.connect(self.ui.pushButton_mergeScans, QtCore.SIGNAL('clicked()'),
@@ -216,50 +271,22 @@ class MainWindow(QtGui.QMainWindow):
                      self.do_convert_merged_to_hkl)
         self.connect(self.ui.pushButton_showScanWSInfo, QtCore.SIGNAL('clicked()'),
                      self.do_show_workspaces)
-
-        # Tab 'Integrate Peaks'
+        self.connect(self.ui.pushButton_showIntegrateDetails, QtCore.SIGNAL('clicked()'),
+                     self.do_show_integration_details)
+        self.connect(self.ui.pushButton_toggleIntegrateType, QtCore.SIGNAL('clicked()'),
+                     self.do_toggle_table_integration)
+        self.connect(self.ui.pushButton_exportSelectedPeaks, QtCore.SIGNAL('clicked()'),
+                     self.do_export_selected_peaks_to_integrate)
+
+        # Tab 'Integrate (single) Peaks'
         self.connect(self.ui.pushButton_integratePt, QtCore.SIGNAL('clicked()'),
-                     self.do_integrate_per_pt)
+                     self.do_integrate_single_scan)
         self.connect(self.ui.comboBox_ptCountType, QtCore.SIGNAL('currentIndexChanged(int)'),
-                     self.do_plot_pt_peak)
-
-        self.connect(self.ui.pushButton_integratePeak, QtCore.SIGNAL('clicked()'),
-                     self.do_integrate_peak)
-
-        self.connect(self.ui.pushButton_fitBkgd, QtCore.SIGNAL('clicked()'),
-                     self.do_fit_bkgd)
-        self.connect(self.ui.pushButton_handPickBkgd, QtCore.SIGNAL('clicked()'),
-                     self.do_manual_bkgd)
-        self.connect(self.ui.pushButton_calBkgd, QtCore.SIGNAL('clicked()'),
-                     self.do_cal_background)
-
-        # Tab survey
-        self.connect(self.ui.pushButton_survey, QtCore.SIGNAL('clicked()'),
-                     self.do_survey)
-        self.connect(self.ui.pushButton_saveSurvey, QtCore.SIGNAL('clicked()'),
-                     self.do_save_survey)
-        self.connect(self.ui.pushButton_loadSurvey, QtCore.SIGNAL('clicked()'),
-                     self.do_load_survey)
-        self.connect(self.ui.pushButton_viewSurveyPeak, QtCore.SIGNAL('clicked()'),
-                     self.do_view_survey_peak)
-        self.connect(self.ui.pushButton_addPeaksToRefine, QtCore.SIGNAL('clicked()'),
-                     self.do_add_peaks_for_ub)
-        self.connect(self.ui.pushButton_selectAllSurveyPeaks, QtCore.SIGNAL('clicked()'),
-                     self.do_select_all_survey)
-        self.connect(self.ui.pushButton_sortInfoTable, QtCore.SIGNAL('clicked()'),
-                     self.do_filter_sort_survey_table)
-        self.connect(self.ui.pushButton_clearSurvey, QtCore.SIGNAL('clicked()'),
-                     self.do_clear_survey)
-
-        self.connect(self.ui.lineEdit_numSurveyOutput, QtCore.SIGNAL('editingFinished()'),
-                     self.evt_show_survey)
-        self.connect(self.ui.lineEdit_numSurveyOutput, QtCore.SIGNAL('returnPressed()'),
-                     self.evt_show_survey)
-        self.connect(self.ui.lineEdit_numSurveyOutput, QtCore.SIGNAL('textEdited(const QString&)'),
-                     self.evt_show_survey)
-
-        self.connect(self.ui.pushButton_viewRawSpice, QtCore.SIGNAL('clicked()'),
-                     self.do_show_spice_file)
+                     self.evt_change_normalization)  # calculate the normalized data again
+        self.connect(self.ui.pushButton_showIntPeakDetails, QtCore.SIGNAL('clicked()'),
+                     self.do_show_single_peak_integration)
+        self.connect(self.ui.pushButton_clearPeakIntFigure, QtCore.SIGNAL('clicked()'),
+                     self.do_clear_peak_integration_canvas)
 
         # Tab k-shift vector
         self.connect(self.ui.pushButton_addKShift, QtCore.SIGNAL('clicked()'),
@@ -287,6 +314,9 @@ class MainWindow(QtGui.QMainWindow):
         # Validator ... (NEXT)
 
         # Declaration of class variable
+        # IPTS number
+        self._iptsNumber = None
+
         # some configuration
         self._homeSrcDir = os.getcwd()
         self._homeDir = os.getcwd()
@@ -298,9 +328,10 @@ class MainWindow(QtGui.QMainWindow):
         self._surveyTableFlag = True
         self._ubPeakTableFlag = True
 
+        # set the detector geometry
+        self.do_set_detector_size()
+
         # Sub window
-        self._my3DWindow = None
-        self._refineConfigWindow = None
         self._baseTitle = 'Title is not initialized'
 
         # Timing and thread 'global'
@@ -310,9 +341,6 @@ class MainWindow(QtGui.QMainWindow):
         # QSettings
         self.load_settings()
 
-        # pre-define child windows
-        self._spiceViewer = None
-
         return
 
     @property
@@ -331,13 +359,15 @@ class MainWindow(QtGui.QMainWindow):
         :return:
         """
         self._baseTitle = str(self.windowTitle())
-        self.setWindowTitle('%s: No Experiment Is Set' % self._baseTitle)
+        self.setWindowTitle('No Experiment Is Set')
+
+        # detector geometry (set to 256 x 256)
+        self.ui.comboBox_detectorSize.setCurrentIndex(0)
 
         # Table widgets
         self.ui.tableWidget_peaksCalUB.setup()
         self.ui.tableWidget_ubMatrix.setup()
         self.ui.tableWidget_surveyTable.setup()
-        self.ui.tableWidget_peakIntegration.setup()
         self.ui.tableWidget_mergeScans.setup()
         self.ui.tableWidget_ubInUse.setup()
         self.ui.tableWidget_kShift.setup()
@@ -363,14 +393,24 @@ class MainWindow(QtGui.QMainWindow):
         self.ui.comboBox_indexFrom.addItem('By calculation')
         self.ui.comboBox_indexFrom.addItem('From SPICE')
 
+        self.ui.comboBox_hklType.clear()
+        self.ui.comboBox_hklType.addItem('SPICE')
+        self.ui.comboBox_hklType.addItem('Calculated')
+
+        # normalization to peak
+        self.ui.comboBox_ptCountType.clear()
+        self.ui.comboBox_ptCountType.addItem('Time')
+        self.ui.comboBox_ptCountType.addItem('Monitor')
+        self.ui.comboBox_ptCountType.addItem('Absolute')
+
         # tab
         self.ui.tabWidget.setCurrentIndex(0)
 
         self.ui.radioButton_ubMantidStyle.setChecked(True)
         self.ui.lineEdit_numSurveyOutput.setText('50')
-        self.ui.checkBox_loadHKLfromFile.setChecked(True)
         self.ui.checkBox_sortDescending.setChecked(False)
         self.ui.radioButton_sortByCounts.setChecked(True)
+        self.ui.radioButton_ubSelectNoScan.setChecked(True)
 
         # Tab 'Access'
         self.ui.lineEdit_url.setText('http://neutron.ornl.gov/user_data/hb3a/')
@@ -385,9 +425,12 @@ class MainWindow(QtGui.QMainWindow):
         # check boxes
         self.ui.graphicsView_detector2dPlot.set_parent_window(self)
 
+        # background points
+        self.ui.lineEdit_backgroundPts.setText('1, 1')
+
         return
 
-    def _build_peak_info_list(self, zero_hkl):
+    def _build_peak_info_list(self, zero_hkl, is_spice=True):
         """ Build a list of PeakInfo to build peak workspace
         peak HKL can be set to zero or from table
         :return: list of peak information, which is a PeakProcessRecord instance
@@ -414,10 +457,13 @@ class MainWindow(QtGui.QMainWindow):
             if zero_hkl:
                 # set HKL to zero
                 peak_info.set_hkl(0., 0., 0.)
-            else:
+            elif is_spice:
                 # set from table
-                miller_index = self.ui.tableWidget_peaksCalUB.get_hkl(i_row)
-                peak_info.set_hkl_np_array(numpy.array(miller_index))
+                spice_hkl = self.ui.tableWidget_peaksCalUB.get_hkl(i_row, True)
+                peak_info.set_hkl_np_array(numpy.array(spice_hkl))
+            else:
+                calculated_hkl = self.ui.tableWidget_peaksCalUB.get_hkl(i_row, False)
+                peak_info.set_hkl_np_array(numpy.array(calculated_hkl))
             # END-IF-ELSE
 
             peak_info_list.append(peak_info)
@@ -482,17 +528,36 @@ class MainWindow(QtGui.QMainWindow):
         ui_dict['survey start'] = str(self.ui.lineEdit_surveyStartPt.text())
         ui_dict['survey stop'] = str(self.ui.lineEdit_surveyEndPt.text())
 
-        # export/save project
-        self._myControl.export_project(project_file_name, ui_dict)
+        # detector-sample distance
+        det_distance_str = str(self.ui.lineEdit_infoDetSampleDistance.text()).strip()
+        if len(det_distance_str) > 0:
+            ui_dict['det_sample_distance'] = det_distance_str
+
+        # wave length
+        wave_length_str = str(self.ui.lineEdit_infoWavelength.text()).strip()
+        if len(wave_length_str) > 0:
+            ui_dict['wave_length'] = wave_length_str
+
+        # calibrated detector center
+        det_center_str = str(self.ui.lineEdit_infoDetCenter.text())
+        if len(det_center_str) > 0:
+            ui_dict['det_center'] = det_center_str
 
         # register and make it as a queue for last n opened/saved project
         last_1_path = str(self.ui.label_last1Path.text())
         if last_1_path != project_file_name:
             self.ui.label_last3Path.setText(self.ui.label_last2Path.text())
             self.ui.label_last2Path.setText(self.ui.label_last1Path.text())
-            self.ui.label_last1Path.setText(last_1_path)
+            self.ui.label_last1Path.setText(project_file_name)
         # END-IF
 
+        self._myControl.save_project(project_file_name, ui_dict)
+
+        # TODO/NOW/TODAY - Implement a pop-up dialog for this
+        information = 'Project has been saved to {0}\n'.format(project_file_name),
+        information += 'Including dictionary keys: {0}'.format(ui_dict)
+        print '[INFO]\n{0}'.format(information)
+
         return
 
     def action_load_project(self):
@@ -501,6 +566,9 @@ class MainWindow(QtGui.QMainWindow):
         :return:
         """
         project_file_name = str(QtGui.QFileDialog.getOpenFileName(self, 'Choose Project File', os.getcwd()))
+        if len(project_file_name) == 0:
+            # return if cancelled
+            return
 
         # make it as a queue for last n opened/saved project
         last_1_path = str(self.ui.label_last1Path.text())
@@ -514,34 +582,6 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
-    def load_project(self, project_file_name):
-        """
-        Load a saved project
-        :param project_file_name:
-        :return:
-        """
-        assert isinstance(project_file_name, str), 'Project file name %s must be a string but not %s.' \
-                                                   '' % (str(project_file_name), type(project_file_name))
-        assert os.path.exists(project_file_name), 'Project file "%s" cannot be found.' % project_file_name
-
-        # load project
-        ui_dict = self._myControl.load_project(project_file_name)
-
-        # set the UI parameters to GUI
-        try:
-            self.ui.lineEdit_localSpiceDir.setText(ui_dict['local spice dir'])
-            self.ui.lineEdit_workDir.setText(ui_dict['work dir'])
-            self.ui.lineEdit_surveyStartPt.setText(ui_dict['survey start'])
-            self.ui.lineEdit_surveyEndPt.setText(ui_dict['survey stop'])
-
-            # now try to call some actions
-            self.do_apply_setup()
-            self.do_set_experiment()
-        except KeyError:
-            print '[Error] Some field cannot be found.'
-
-        return
-
     def action_load_last_project(self):
         """
         Load last project
@@ -626,6 +666,32 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
+    def add_scans_ub_table(self, scan_list):
+        """
+
+        :param scan_list:
+        :return:
+        """
+        # TODO/FIXME/ISSUE/NOW - consider to refactor with do_add_peaks_for_ub() and
+        # get experiment number
+        status, exp_number = gutil.parse_integers_editors(self.ui.lineEdit_exp)
+        if not status:
+            self.pop_one_button_dialog('Unable to get experiment number\n  due to %s.' % str(exp_number))
+            return
+
+        # switch to tab-3
+        # self.ui.tabWidget.setCurrentIndex(MainWindow.TabPage['Calculate UB'])
+
+        # prototype for a new thread
+        self.ui.progressBar_add_ub_peaks.setRange(0, len(scan_list))
+        self._addUBPeaksThread = AddPeaksThread(self, exp_number, scan_list)
+        self._addUBPeaksThread.start()
+
+        # set the flag/notification where the indexing (HKL) from
+        self.ui.lineEdit_peaksIndexedBy.setText(IndexFromSpice)
+
+        return
+
     def do_add_roi(self):
         """ Add region of interest to 2D image
         :return:
@@ -659,51 +725,136 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
-    def do_add_ub_peak(self):
-        """ Add current to ub peaks
+    def do_add_ub_peaks(self):
+        """
+        Launch dialog to add UB peaks
+        :return:
+        """
+        if self._addUBPeaksDialog is None:
+            self._addUBPeaksDialog = FindUBUtility.AddScansForUBDialog(self)
+
+        self._addUBPeaksDialog.show()
+
+        return
+
+    # def do_add_ub_peak(self):
+    #     """ Add current to ub peaks
+    #     :return:
+    #     """
+    #     # TODO/FIXME/ISSUE/NOW - Find out whether this method is still needed
+    #     # Add peak
+    #     status, int_list = gutil.parse_integers_editors([self.ui.lineEdit_exp,
+    #                                                      self.ui.lineEdit_scanNumber])
+    #     if status is False:
+    #         self.pop_one_button_dialog(int_list)
+    #         return
+    #     exp_no, scan_no = int_list
+    #
+    #     # Get HKL from GUI
+    #     status, float_list = gutil.parse_float_editors([self.ui.lineEdit_H,
+    #                                                     self.ui.lineEdit_K,
+    #                                                     self.ui.lineEdit_L])
+    #     if status is False:
+    #         err_msg = float_list
+    #         self.pop_one_button_dialog(err_msg)
+    #         return
+    #     h, k, l = float_list
+    #
+    #     try:
+    #         peak_info_obj = self._myControl.get_peak_info(exp_no, scan_no)
+    #     except AssertionError as ass_err:
+    #         self.pop_one_button_dialog(str(ass_err))
+    #         return
+    #
+    #     assert isinstance(peak_info_obj, r4c.PeakProcessRecord)
+    #     peak_info_obj.set_hkl(h, k, l)
+    #     self.set_ub_peak_table(peak_info_obj)
+    #
+    #     # Clear
+    #     self.ui.lineEdit_scanNumber.setText('')
+    #
+    #     self.ui.lineEdit_sampleQx.setText('')
+    #     self.ui.lineEdit_sampleQy.setText('')
+    #     self.ui.lineEdit_sampleQz.setText('')
+    #
+    #     self.ui.lineEdit_H.setText('')
+    #     self.ui.lineEdit_K.setText('')
+    #     self.ui.lineEdit_L.setText('')
+    #
+    #     # set the flag/notification where the indexing (HKL) from
+    #     self.ui.lineEdit_peaksIndexedBy.setText(IndexFromSpice)
+    #
+    #     return
+
+    def do_add_k_shift_vector(self):
+        """ Add a k-shift vector
         :return:
         """
-        # Add peak
-        status, int_list = gutil.parse_integers_editors([self.ui.lineEdit_exp,
-                                                         self.ui.lineEdit_scanNumber])
+        # parse the k-vector
+        status, ret_obj = gutil.parse_float_editors([self.ui.lineEdit_kX, self.ui.lineEdit_kY, self.ui.lineEdit_kZ],
+                                                    allow_blank=False)
         if status is False:
-            self.pop_one_button_dialog(int_list)
+            error_message = ret_obj
+            self.pop_one_button_dialog(error_message)
             return
-        exp_no, scan_no = int_list
+        else:
+            k_x, k_y, k_z = ret_obj
 
-        # Get HKL from GUI
-        status, float_list = gutil.parse_float_editors([self.ui.lineEdit_H,
-                                                        self.ui.lineEdit_K,
-                                                        self.ui.lineEdit_L])
-        if status is False:
-            err_msg = float_list
-            self.pop_one_button_dialog(err_msg)
-            return
-        h, k, l = float_list
+        # add to controller
+        k_index = self._myControl.add_k_shift_vector(k_x, k_y, k_z)
 
-        try:
-            peak_info_obj = self._myControl.get_peak_info(exp_no, scan_no)
-        except AssertionError as ass_err:
-            self.pop_one_button_dialog(str(ass_err))
-            return
+        # add to table and combo-box
+        self.ui.tableWidget_kShift.add_k_vector(k_index, k_x, k_y, k_z)
+
+        combo_message = '%d: (%.5f, %.5f, %.5f)' % (k_index, k_x, k_y, k_z)
+        self.ui.comboBox_kVectors.addItem(combo_message)
+
+        return
+
+    def do_apply_k_shift(self):
+        """ Apply k-shift to selected reflections
+        :return:
+        """
+        # get the selected scans
+        scan_list = list()
+        selected_row_numbers = self.ui.tableWidget_mergeScans.get_selected_rows(True)
+        for row_index in selected_row_numbers:
+            scan_number = self.ui.tableWidget_mergeScans.get_scan_number(row_index)
+            scan_list.append(scan_number)
 
-        assert isinstance(peak_info_obj, r4c.PeakProcessRecord)
-        peak_info_obj.set_hkl(h, k, l)
-        self.set_ub_peak_table(peak_info_obj)
+        # get the k-vector
+        k_shift_message = str(self.ui.comboBox_kVectors.currentText())
+        k_index = int(k_shift_message.split(':')[0])
 
-        # Clear
-        self.ui.lineEdit_scanNumber.setText('')
+        # set to controller
+        self._myControl.set_k_shift(scan_list, k_index)
 
-        self.ui.lineEdit_sampleQx.setText('')
-        self.ui.lineEdit_sampleQy.setText('')
-        self.ui.lineEdit_sampleQz.setText('')
+        # set k-shift to table
+        # exp_number = int(self.ui.lineEdit_exp.text())
+        for row_index in selected_row_numbers:
+            self.ui.tableWidget_mergeScans.set_k_shift_index(row_index, k_index)
+            # scan_number = self.ui.tableWidget_mergeScans.get_scan_number(row_index)
 
-        self.ui.lineEdit_H.setText('')
-        self.ui.lineEdit_K.setText('')
-        self.ui.lineEdit_L.setText('')
+        return
 
-        # set the flag/notification where the indexing (HKL) from
-        self.ui.lineEdit_peaksIndexedBy.setText(IndexFromSpice)
+    def do_apply_roi(self):
+        """ Save current selection of region of interest
+        :return:
+        """
+        lower_left_c, upper_right_c = self.ui.graphicsView_detector2dPlot.get_roi()
+        # at the very beginning, the lower left and upper right are same
+        if lower_left_c[0] == upper_right_c[0] or lower_left_c[1] == upper_right_c[1]:
+            return
+
+        status, par_val_list = gutil.parse_integers_editors([self.ui.lineEdit_exp, self.ui.lineEdit_run])
+        assert status, str(par_val_list)
+        exp_number = par_val_list[0]
+        scan_number = par_val_list[1]
+
+        try:
+            self._myControl.set_roi(exp_number, scan_number, lower_left_c, upper_right_c)
+        except AssertionError as ass_err:
+            print '[ERROR] Unable to set ROI due to {0}.'.format(ass_err)
 
         return
 
@@ -824,29 +975,6 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
-    def do_cal_background(self):
-        """
-        calculate background
-        algorithm 1: average the selected pt's intensity.
-        :return:
-        """
-        # get the selected rows in table
-        background_rows = self.ui.tableWidget_peakIntegration.get_selected_rows(True)
-
-        # loop through the selected rows and do the average
-        intensity_sum = 0.
-        for i_row in background_rows:
-            tmp_intensity = self.ui.tableWidget_peakIntegration.get_cell_value(i_row, 2)
-            intensity_sum += tmp_intensity
-
-        # calculate background value
-        background = intensity_sum / float(len(background_rows))
-
-        # set the value
-        self.ui.lineEdit_background.setText('%.7f' % background)
-
-        return
-
     def do_cal_ub_matrix(self):
         """ Calculate UB matrix by 2 or 3 reflections
         """
@@ -932,7 +1060,9 @@ class MainWindow(QtGui.QMainWindow):
         """
         num_rows = self.ui.tableWidget_peaksCalUB.rowCount()
         for i_row in range(num_rows):
-            self.ui.tableWidget_peaksCalUB.set_hkl(i_row, [0., 0., 0.])
+            self.ui.tableWidget_peaksCalUB.set_hkl(i_row, [0., 0., 0.], is_spice_hkl=False)
+
+        return
 
     def do_clear_merge_table(self):
         """
@@ -942,6 +1072,22 @@ class MainWindow(QtGui.QMainWindow):
         # clear
         self.ui.tableWidget_mergeScans.remove_all_rows()
 
+    def do_clear_peak_integration_canvas(self):
+        """
+        clear the peak integration canvas and the integrated values
+        :return:
+        """
+        self.ui.graphicsView_integratedPeakView.clear_all_lines()
+
+        self.ui.lineEdit_rawSinglePeakIntensity.setText('')
+        self.ui.lineEdit_intensity2.setText('')
+        self.ui.lineEdit_gaussianPeakIntensity.setText('')
+        self.ui.lineEdit_errorIntensity1.setText('')
+        self.ui.lineEdit_errorIntensity2.setText('')
+        self.ui.lineEdit_errorIntensity3.setText('')
+
+        return
+
     def do_clear_survey(self):
         """
         Clear survey and survey table.
@@ -1058,85 +1204,68 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
-    def do_find_peak(self):
+    def find_peak_in_scan(self , scan_number, load_spice_hkl):
         """ Find peak in a given scan and record it
         """
         # Get experiment, scan and pt
-        status, ret_obj = gutil.parse_integers_editors([self.ui.lineEdit_exp,
-                                                        self.ui.lineEdit_scanNumber])
+        status, ret_obj = gutil.parse_integers_editors([self.ui.lineEdit_exp])
         if status is True:
-            exp_no, scan_no = ret_obj
+            exp_no = ret_obj
         else:
             self.pop_one_button_dialog(ret_obj)
             return
 
         # merge peak if necessary
-        if self._myControl.has_merged_data(exp_no, scan_no) is False:
-            status, err_msg = self._myControl.merge_pts_in_scan(exp_no, scan_no, [])
+        if self._myControl.has_merged_data(exp_no, scan_number) is False:
+            status, err_msg = self._myControl.merge_pts_in_scan(exp_no, scan_number, [])
             if status is False:
                 self.pop_one_button_dialog(err_msg)
 
         # Find peak
-        status, err_msg = self._myControl.find_peak(exp_no, scan_no)
+        status, err_msg = self._myControl.find_peak(exp_no, scan_number)
         if status is False:
             self.pop_one_button_dialog(ret_obj)
             return
 
         # Get information from the latest (integrated) peak
-        if self.ui.checkBox_loadHKLfromFile.isChecked() is True:
-            # This is the first time that in the workflow to get HKL from MD workspace
-            peak_info = self._myControl.get_peak_info(exp_no, scan_no)
-            assert peak_info is not None, 'Unable to locate PeakProcessRecord (peak info).'
-            # try:
-            #     peak_info.retrieve_hkl_from_spice_table()
-            # except RuntimeError as run_err:
-            #     self.pop_one_button_dialog('Unable to locate peak info due to %s.' % str(run_err))
-        # END-IF
+        # if load_spice_hkl:
+        #     # This is the first time that in the workflow to get HKL from MD workspace
+        #     peak_info = self._myControl.get_peak_info(exp_no, scan_number)
+        #     assert peak_info is not None, 'Unable to locate PeakProcessRecord (peak info).'
+        # # END-IF
 
         # Set up correct values to table tableWidget_peaksCalUB
-        peak_info = self._myControl.get_peak_info(exp_no, scan_no)
-        h, k, l = peak_info.get_spice_hkl()
-        self.ui.lineEdit_H.setText('%.2f' % h)
-        self.ui.lineEdit_K.setText('%.2f' % k)
-        self.ui.lineEdit_L.setText('%.2f' % l)
+        peak_info = self._myControl.get_peak_info(exp_no, scan_number)
+        assert peak_info is not None, 'Unable to locate PeakProcessRecord (peak info).'
+
+        if load_spice_hkl:
+            h, k, l = peak_info.get_hkl()
+            hkl = (h, k, l)
+        else:
+            hkl = ()
 
         q_x, q_y, q_z = peak_info.get_peak_centre()
-        self.ui.lineEdit_sampleQx.setText('%.5E' % q_x)
-        self.ui.lineEdit_sampleQy.setText('%.5E' % q_y)
-        self.ui.lineEdit_sampleQz.setText('%.5E' % q_z)
+        vec_q = (q_x, q_y, q_z)
 
-        return
+        return hkl, vec_q
 
-    def do_fit_bkgd(self):
-        """ Purpose: fit the Pt.-integrated peak intensity curve with Gaussian to find out the background
+    def do_export_selected_peaks_to_integrate(self):
+        """
+        export (to file or just print out) the scans that are selected for integration
+        :param self:
         :return:
         """
-        def gauss(x, a, b, c):
-            return c*numpy.exp(-(x-a)**2/b)
-
-        def gauss4(x, a, b, c, d):
-            return c*numpy.exp(-(x-a)**2/b)+d
-
-        # get the curve
-        vec_x, vec_y, vec_e = self.ui.graphicsView_integratedPeakView.get_xye()
-
-        # fit Gaussian for starting value of a, b and c
-        fit_result1 = curve_fit(gauss, vec_x, vec_y)
-        popt = fit_result1[0]  # popt, pcov
-        # gauss_fit = gauss(vec_x, popt[0], popt[1], popt[2])
-
-        # fit Gaussian again including background
-        p0 = [popt[0], popt[1], popt[2], 0.]
-        fit_result2 = curve_fit(gauss4, vec_x, vec_y, sigma=vec_e,  p0=p0)
-        popt2 = fit_result2[0]  # popt2, pcov2
-        gauss_fit4 = gauss4(vec_x, popt2[0], popt2[1], popt2[2], popt2[3])
+        # get selected rows' scan numbers
+        scan_tuple_list = self.ui.tableWidget_mergeScans.get_selected_scans()
+        scan_number_list = list()
+        for tup in scan_tuple_list:
+            scan_number_list.append(tup[0])
+        scan_number_list.sort()
 
-        # plot the result
-        self.ui.graphicsView_integratedPeakView.add_plot_1d(vec_x, gauss_fit4, color='red', marker='-')
+        info_str = '# Selected scans: \n'
+        info_str += '{0}'.format(scan_number_list)
 
-        # write out the result
-        background_value = popt2[3]
-        self.ui.lineEdit_background.setText('%.7f' % background_value)
+        print '[TEMP] Selected scans:\n{0}'.format(info_str)
 
         return
 
@@ -1166,15 +1295,6 @@ class MainWindow(QtGui.QMainWindow):
         # write
         user_header = str(self.ui.lineEdit_fpHeader.text())
         try:
-            # # get lattice parameters from UB tab
-            # a = float(self.ui.lineEdit_aUnitCell.text())
-            # b = float(self.ui.lineEdit_bUnitCell.text())
-            # c = float(self.ui.lineEdit_cUnitCell.text())
-            # alpha = float(self.ui.lineEdit_alphaUnitCell.text())
-            # beta = float(self.ui.lineEdit_betaUnitCell.text())
-            # gamma = float(self.ui.lineEdit_gammaUnitCell.text())
-            # lattice = absorption.Lattice(a, b, c, alpha, beta, gamma)
-
             export_absorption = self.ui.checkBox_exportAbsorptionToFP.isChecked()
 
             status, file_content = self._myControl.export_to_fullprof(exp_number, scan_number_list,
@@ -1188,6 +1308,8 @@ class MainWindow(QtGui.QMainWindow):
         except AssertionError as a_err:
             self.pop_one_button_dialog(str(a_err))
             return
+        except KeyError as key_err:
+            self.pop_one_button_dialog(str(key_err))
 
         return
 
@@ -1255,55 +1377,13 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
-    def do_integrate_peak(self):
-        """ Integrate a peak in tab peak integration
-        :return:
+    def do_integrate_single_scan(self):
         """
-        # only support the simple cuboid counts summing algorithm
-
-        # get experiment number and scan number
-        status, ret_obj = gutil.parse_integers_editors([self.ui.lineEdit_exp, self.ui.lineEdit_scanIntegratePeak],
-                                                       allow_blank=False)
-        if status is False:
-            err_msg = ret_obj
-            self.pop_one_button_dialog(err_msg)
-            return
-
-        # check table
-        table_exp, table_scan = self.ui.tableWidget_peakIntegration.get_exp_info()
-        if (table_exp, table_scan) != tuple(ret_obj):
-            err_msg = 'Table has value of a different experiment/scan (%d/%d vs %d/%d). Integrate Pt. first!' \
-                      '' % (table_exp, table_scan, ret_obj[0], ret_obj[1])
-            self.pop_one_button_dialog(err_msg)
-            return
-
-        # integrate by take account of background value
-        status, ret_obj = gutil.parse_float_editors(self.ui.lineEdit_background, allow_blank=True)
-        assert status, ret_obj
-        if ret_obj is None:
-            background = 0.
-        else:
-            background = ret_obj
-        peak_intensity = self.ui.tableWidget_peakIntegration.simple_integrate_peak(background)
-
-        # write result to label
-        norm_type = str(self.ui.comboBox_ptCountType.currentText())
-        label_str = 'Experiment %d Scan %d: Peak intensity = %.7f, Normalized by %s, Background = %.7f.' \
-                    '' % (table_exp, table_scan, peak_intensity, norm_type, background)
-        self.ui.label_peakIntegraeInfo.setText(label_str)
-
-        # set value to previous table
-        self.ui.tableWidget_mergeScans.set_peak_intensity(None, table_scan, peak_intensity)
-
-        return
-
-    def do_integrate_per_pt(self):
-        """
-        Integrate and plot per Pt.
+        integrate a single scan in 'Peak Integration' tab
+        Note: this is an experimenntal replacement for do_integrate_per_pt
         :return:
         """
-        # VZ-FUTURE: consider to compare and merge with method do_plot_pt_peak()
-        # get experiment and scan number
+        # parse experiment and scan number
         status, ret_obj = gutil.parse_integers_editors([self.ui.lineEdit_exp,
                                                         self.ui.lineEdit_scanIntegratePeak])
         if not status:
@@ -1312,6 +1392,7 @@ class MainWindow(QtGui.QMainWindow):
         else:
             exp_number, scan_number = ret_obj
 
+        # parse normalization type
         normalization = str(self.ui.comboBox_ptCountType.currentText())
         if normalization.count('Time') > 0:
             norm_type = 'time'
@@ -1320,7 +1401,13 @@ class MainWindow(QtGui.QMainWindow):
         else:
             norm_type = ''
 
-        # get peak center (weighted)
+        # parse scale factor
+        try:
+            intensity_scale_factor = float(self.ui.lineEdit_scaleFactorScan.text())
+        except ValueError:
+            intensity_scale_factor = 1.
+
+        # calculate peak center (weighted)
         status, ret_obj = self._myControl.find_peak(exp_number, scan_number)
         if status is False:
             error_message = ret_obj
@@ -1329,69 +1416,131 @@ class MainWindow(QtGui.QMainWindow):
         else:
             this_peak_centre = ret_obj
 
-        # scale factor
+        # mask workspace
+        mask_name = str(self.ui.comboBox_maskNames2.currentText())
+        if mask_name.lower() == 'no mask':
+            mask_name = ''
+
+        # ui.lineEdit_backgroundPts and set default in init_widgets
+        bkgd_pt_tuple = gutil.parse_integer_list(str(self.ui.lineEdit_backgroundPts.text()), 2)
+
+        # integrate peak
         try:
-            intensity_scale_factor = float(self.ui.lineEdit_scaleFactor.text())
-        except ValueError:
-            intensity_scale_factor = 1.
+            int_peak_dict = self._myControl.integrate_scan_peak(exp_number=exp_number,
+                                                                scan_number=scan_number,
+                                                                peak_centre=this_peak_centre,
+                                                                mask_name=mask_name,
+                                                                normalization=norm_type,
+                                                                scale_factor=intensity_scale_factor,
+                                                                background_pt_tuple=bkgd_pt_tuple)
+        except RuntimeError as run_error:
+            self.pop_one_button_dialog('Unable to integrate peak for scan {0} due to {1}.'
+                                       ''.format(scan_number, run_error))
+            return
+
+        # plot calculated motor position (or Pt.) - integrated intensity per Pts.
+        motor_pos_vec = int_peak_dict['motor positions']
+        pt_intensity_vec = int_peak_dict['pt intensities']
+        # print '[DB...BAT] motor position vector: {0} of type {1}'.format(motor_pos_vec, type(motor_pos_vec))
+        motor_std = motor_pos_vec.std()
+        if motor_std > 0.005:
+            self.ui.graphicsView_integratedPeakView.plot_raw_data(motor_pos_vec, pt_intensity_vec)
+        else:
+            # motor position fixed
+            # KEEP-IN-MIND:  Make this an option from
+            self.ui.graphicsView_integratedPeakView.plot_raw_data(numpy.array(range(1, len(pt_intensity_vec)+1)),
+                                                                  pt_intensity_vec)
 
-        # get masked workspace
-        mask_name = str(self.ui.comboBox_maskNames2.currentText())
-        if mask_name.startswith('No Mask'):
-            mask_name = None
-        # mask workspace?
-        mask_detectors = mask_name is not None
-
-        status, ret_obj = self._myControl.integrate_scan_peaks(exp=exp_number,
-                                                               scan=scan_number,
-                                                               peak_radius=1.0,
-                                                               peak_centre=this_peak_centre,
-                                                               merge_peaks=False,
-                                                               use_mask=mask_detectors,
-                                                               mask_ws_name=mask_name,
-                                                               normalization=norm_type,
-                                                               scale_factor=intensity_scale_factor)
-
-        # result due to error
-        if status is False:
-            error_message = ret_obj
-            self.pop_one_button_dialog(error_message)
-            return
+        if self._mySinglePeakIntegrationDialog is None:
+            self._mySinglePeakIntegrationDialog = message_dialog.MessageDialog(self)
+        self._mySinglePeakIntegrationDialog.set_peak_integration_details(motor_pos_vec, pt_intensity_vec)
+
+        # set calculated values
+        try:
+            self.ui.lineEdit_rawSinglePeakIntensity.setText('{0:.7f}'.format(int_peak_dict['simple intensity']))
+            self.ui.lineEdit_errorIntensity1.setText('{0:.7f}'.format(int_peak_dict['simple error']))
+            self.ui.lineEdit_avgBackground.setText('{0:.7f}'.format(int_peak_dict['simple background']))
+            self.ui.lineEdit_intensity2.setText('{0:.7f}'.format(int_peak_dict['intensity 2']))
+            if int_peak_dict['error 2'] is None:
+                self.ui.lineEdit_errorIntensity2.setText('inf')
+            else:
+                self.ui.lineEdit_errorIntensity2.setText('{0:.7f}'.format(int_peak_dict['error 2']))
+            self.ui.lineEdit_ptRange.setText('{0}'.format(int_peak_dict['pt_range']))
+            self.ui.lineEdit_gaussianPeakIntensity.setText('{0:.7f}'.format(int_peak_dict['gauss intensity']))
+            if int_peak_dict['gauss error'] is None:
+                self.ui.lineEdit_errorIntensity3.setText('inf')
+            else:
+                self.ui.lineEdit_errorIntensity3.setText('{0:.7f}'.format(int_peak_dict['gauss error']))
+            self.ui.tableWidget_covariance.set_matrix(int_peak_dict['covariance matrix'])
+
+            fit_param_dict = int_peak_dict['gauss parameters']
+            # {'A': 1208.4097237325959, 'x0': 32.175524426773507, 'B': 23.296505385975976, 's': 0.47196665622701633}
+            self.ui.lineEdit_peakBackground.setText('{0:.4f}'.format(fit_param_dict['B']))
+            self.ui.lineEdit_gaussA.setText('{0:.4f}'.format(fit_param_dict['A']))
+            self.ui.lineEdit_gaussSigma.setText('{0:.4f}'.format(fit_param_dict['s']))
+            self.ui.lineEdit_gaussB.setText('{0:.4f}'.format(fit_param_dict['B']))
+
+            # plot fitted Gaussian
+            fit_gauss_dict = int_peak_dict['gauss parameters']
+        except KeyError as key_err:
+            raise RuntimeError('Peak integration result dictionary has keys {0}. Error is caused by {1}.'
+                               ''.format(int_peak_dict.keys(), key_err))
+        except ValueError as value_err:
+            print '[ERROR] Unable to fit by Gaussian due to {0}.'.format(value_err)
+        else:
+            self.plot_model_data(motor_pos_vec, fit_gauss_dict)
 
-        # process result
-        pt_dict = ret_obj
-        assert isinstance(pt_dict, dict)
+        return
 
-        # clear table
-        if self.ui.tableWidget_peakIntegration.rowCount() > 0:
-            self.ui.tableWidget_peakIntegration.remove_all_rows()
+    def plot_model_data(self, vec_x, params):
+        """
+        calculate the Y value by the model and plot them.
+        the sparse X values will be expanded
+        :return:
+        """
+        # check inputs
+        assert isinstance(vec_x, numpy.ndarray), 'vec X {0} must be a numpy.ndarray but not a {1}.' \
+                                                 ''.format(vec_x, type(vec_x))
+        assert isinstance(params, dict), 'Model parameters {0} must be given by a dictionary but not by a {1}.' \
+                                         ''.format(params, type(params))
 
-        # Append new lines
-        pt_list = sorted(pt_dict.keys())
-        intensity_list = list()
-        for pt in pt_list:
-            pt_intensity = pt_dict[pt]
-            intensity_list.append(pt_intensity)
-            status, msg = self.ui.tableWidget_peakIntegration.append_pt(pt, -1, pt_intensity)
-            if not status:
-                error_msg = '[Error!] Unable to add Pt %d due to %s.' % (pt, msg)
-                self.pop_one_button_dialog(error_msg)
+        # get parameters
+        x0 = params['x0']
+        gauss_sigma = params['s']
+        gauss_a = params['A']
+        background = params['B']
+        info_str = 'Gaussian fit'
 
-        # Set up the experiment information to table
-        self.ui.tableWidget_peakIntegration.set_exp_info(exp_number, scan_number)
+        # plot the data
+        # make modelX and modelY for more fine grids
+        model_x = peak_integration_utility.get_finer_grid(vec_x, 10)
+        model_y = peak_integration_utility.gaussian_linear_background(model_x, x0, gauss_sigma,
+                                                                      gauss_a, background)
 
-        # Clear previous line and plot the Pt.
-        self.ui.graphicsView_integratedPeakView.clear_all_lines()
-        x_array = numpy.array(pt_list)
-        y_array = numpy.array(intensity_list)
-        self.ui.graphicsView_integratedPeakView.add_plot_1d(x_array, y_array,
-                                                            color='blue')
-        self.ui.graphicsView_integratedPeakView.set_smart_y_limit(y_array)
+        # plot the model
+        self.ui.graphicsView_integratedPeakView.plot_model(model_x, model_y, title=info_str)
+
+        return
+
+    def do_integrate_roi(self):
+        """
+        integrate the detector counts in the region of interest (2D) along axis-0 and axis-1 respectively.
+        and save the result (1D data) to file
+        :return:
+        """
+        exp_number = str(self.ui.lineEdit_exp.text())
+        scan_number = str(self.ui.lineEdit_run.text())
+        pt_number = str(self.ui.lineEdit_rawDataPtNo.text())
+        working_dir = str(self.ui.lineEdit_workDir.text())
+
+        msg = self.ui.graphicsView_detector2dPlot.integrate_roi_linear(exp_number, scan_number, pt_number, working_dir)
+
+        self.pop_one_button_dialog(msg)
 
         return
 
     def do_integrate_peaks(self):
-        """ Integrate selected peaks tab-merged scans.
+        """ Integrate selected peaks tab-'scan processing'.
         If any scan is not merged, then it will merge the scan first.
         Integrate peaks from the table of merged peak.
         It will so the simple cuboid integration with region of interest and background subtraction.
@@ -1445,8 +1594,9 @@ class MainWindow(QtGui.QMainWindow):
             if status is False:
                 error_msg = 'Unable to get Pt. of experiment %d scan %d due to %s.' % (exp_number, scan_number,
                                                                                        str(pt_number_list))
-                self.controller.set_peak_intensity(exp_number, scan_number, 0.)
-                self.ui.tableWidget_mergeScans.set_peak_intensity(row_number, scan_number, 0., False)
+                self.controller.set_zero_peak_intensity(exp_number, scan_number)
+                self.ui.tableWidget_mergeScans.set_peak_intensity(row_number, scan_number, 0., False,
+                                                                  integrate_method='')
                 self.ui.tableWidget_mergeScans.set_status(scan_number, error_msg)
                 continue
 
@@ -1499,7 +1649,8 @@ class MainWindow(QtGui.QMainWindow):
             if status is True:
                 hkl_value = ret_obj[0]
                 hkl_error = ret_obj[1]
-                self.ui.tableWidget_peaksCalUB.set_hkl(i_peak, hkl_value, hkl_error)
+                self.ui.tableWidget_peaksCalUB.set_hkl(i_peak, hkl_value, is_spice_hkl=False,
+                                                       error=hkl_error)
             else:
                 err_msg += ret_obj + '\n'
         # END-FOR
@@ -1512,8 +1663,8 @@ class MainWindow(QtGui.QMainWindow):
         self.ui.lineEdit_peaksIndexedBy.setText(IndexFromUB)
 
         # enable/disable push buttons
-        self.ui.pushButton_setHKL2Int.setEnabled(True)
-        self.ui.pushButton_undoSetToInteger.setEnabled(False)
+        # self.ui.pushButton_setHKL2Int.setEnabled(True)
+        # self.ui.pushButton_undoSetToInteger.setEnabled(True)
 
         return
 
@@ -1602,49 +1753,15 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
-    def do_plot_pt_peak(self):
+    def evt_change_normalization(self):
         """
         Integrate Pt. vs integrated intensity of detectors of that Pt. if it is not calculated before
         and then plot pt vs. integrated intensity on
         :return:
         """
-        # Find out the current condition including (1) absolute (2) normalized by time
-        # (3) normalized by monitor counts
-        be_norm_str = str(self.ui.comboBox_ptCountType.currentText())
-
-        norm_by_time = False
-        norm_by_monitor = False
-
-        if be_norm_str.startswith('Absolute'):
-            # no normalization
-            pass
-        elif be_norm_str.count('Time') > 0:
-            # norm by time
-            norm_by_time = True
-        elif be_norm_str.count('Monitor') > 0:
-            # norm by monitor counts
-            norm_by_monitor = True
-        else:
-            # exception!
-            raise RuntimeError('Normalization mode %s is not supported.' % be_norm_str)
-
-        # Integrate peak if the integrated peak workspace does not exist
-        # get experiment number and scan number from the table
-        exp_number, scan_number = self.ui.tableWidget_peakIntegration.get_exp_info()
-
-        mask_name = str(self.ui.comboBox_maskNames2.currentText())
-        masked = not mask_name.startswith('No Mask')
-        has_integrated = self._myControl.has_integrated_peak(exp_number, scan_number, pt_list=None,
-                                                             normalized_by_monitor=norm_by_monitor,
-                                                             normalized_by_time=norm_by_time,
-                                                             masked=masked)
-
-        # integrate and/or plot
-        if has_integrated:
-            self.plot_pt_intensity()
-        else:
-            # VZ-FUTURE: implement this new method!
-            self.do_integrate_per_pt()
+        # integrate any how
+        # self.do_integrate_per_pt()
+        self.do_integrate_single_scan()
 
         return
 
@@ -1795,34 +1912,6 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
-    def do_manual_bkgd(self):
-        """ Select background by moving indicator manually
-        :return:
-        """
-        if str(self.ui.pushButton_handPickBkgd.text()) == 'Customize Bkgd':
-            # get into customize background mode.  add an indicator to the line and make it movable
-            self.ui.graphicsView_integratedPeakView.add_background_indictor()
-
-            # modify the push buttons status
-            self.ui.pushButton_handPickBkgd.setText('Done')
-
-        elif str(self.ui.pushButton_handPickBkgd.text()) == 'Done':
-            # get out from the customize-background mode.  get the vertical indicator's position as background
-            background_value = self.ui.graphicsView_integratedPeakView.get_indicator_position(self._bkgdIndicatorKey)
-
-            # set the ground value to UI
-            self._myControl.set_background_value(background_value)
-            self.ui.lineEdit_bkgdValue.setText('%.7f' % background_value)
-
-            # modify the push button status
-            self.ui.pushButton_handPickBkgd.setText('Customize Bkgd')
-
-        else:
-            raise RuntimeError('Push button in state %s is not supported.' %
-                               str(self.ui.pushButton_handPickBkgd.text()))
-
-        return
-
     def do_mask_pt_2d(self):
         """ Mask a Pt and re-plot
         :return:
@@ -1840,11 +1929,14 @@ class MainWindow(QtGui.QMainWindow):
         # get the mask
         status, ret_obj = self._myControl.get_region_of_interest(exp, scan)
         if status is False:
+            # unable to get region of interest
             self.pop_one_button_dialog(ret_obj)
             return
+        else:
+            corner1, corner2 = ret_obj
 
         # create mask workspace
-        status, error = self._myControl.generate_mask_workspace(exp, scan, ret_obj[0], ret_obj[1])
+        status, error = self._myControl.generate_mask_workspace(exp, scan, corner1, corner2)
         if status is False:
             self.pop_one_button_dialog(error)
             return
@@ -1914,7 +2006,7 @@ class MainWindow(QtGui.QMainWindow):
         self._myControl.set_ub_matrix(exp_number=None, ub_matrix=ub_matrix)
 
         # Warning
-        self.pop_one_button_dialog('Data processing is long. Be patient!')
+        self.pop_one_button_dialog('Merging scans can take long time. Please be patient!')
 
         # Process
         row_number_list = self.ui.tableWidget_mergeScans.get_selected_rows(True)
@@ -1940,10 +2032,6 @@ class MainWindow(QtGui.QMainWindow):
                                                                 pt_num_list=[])
             # find peaks too
             status, ret_obj = self._myControl.find_peak(exp_number, scan_number)
-            if status:
-                peak_centre = ret_obj
-            else:
-                peak_centre = None
 
             # process output
             if status:
@@ -1954,15 +2042,38 @@ class MainWindow(QtGui.QMainWindow):
                 merge_status = 'Failed. Reason: %s' % ret_tup
                 merged_name = 'x'
 
-            # update table
-            self.ui.tableWidget_mergeScans.set_status(row_number, merge_status)
-            self.ui.tableWidget_mergeScans.set_ws_name(row_number, merged_name)
-            if peak_centre is not None:
-                self.ui.tableWidget_mergeScans.set_peak_centre(row_number, peak_centre)
+            # update table
+            self.ui.tableWidget_mergeScans.set_status(row_number, merge_status)
+            self.ui.tableWidget_mergeScans.set_ws_name(row_number, merged_name)
+            # if peak_centre is not None:
+            #     self.ui.tableWidget_mergeScans.set_peak_centre(row_number, peak_centre)
+
+            # Sleep for a while
+            time.sleep(0.1)
+        # END-FOR
+
+        return
+
+    def do_merge_scans_survey(self):
+        """
+        Merge each selected scans in the 'List Scans' tab to Q-sample space
+        :return:
+        """
+        # get the selected scans
+        scan_run_list = self.ui.tableWidget_surveyTable.get_selected_run_surveyed(required_size=None)
+        if len(scan_run_list) == 0:
+            self.pop_one_button_dialog('There is no run that is selected.')
+
+        # start to add scan/run to table
+        # Set table
+        scan_list = list()
+        for scan, pt in scan_run_list:
+            scan_list.append(scan)
+        scan_list.sort()
+        self.ui.tableWidget_mergeScans.append_scans(scans=scan_list, allow_duplicate_scans=False)
 
-            # Sleep for a while
-            time.sleep(0.1)
-        # END-FOR
+        # switch tab
+        self.ui.tabWidget.setCurrentIndex(MainWindow.TabPage['Scans Processing'])
 
         return
 
@@ -1986,6 +2097,26 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
+    def do_refine_ub_cal_indexed_peaks(self):
+        """
+        refine UB matrix by indexed peaks with HKL calculated
+        :return:
+        """
+        # refine UB matrix by indexed peak
+        peak_info_list = self._build_peak_info_list(zero_hkl=False, is_spice=False)
+
+        # Refine UB matrix
+        try:
+            self._myControl.refine_ub_matrix_indexed_peaks(peak_info_list)
+        except AssertionError as error:
+            self.pop_one_button_dialog(str(error))
+            return
+
+        # show result
+        self._show_refined_ub_result()
+
+        return
+
     def do_refine_ub_lattice(self):
         """
         Calculate UB matrix constrained by lattice parameters
@@ -2000,6 +2131,65 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
+    def load_project(self, project_file_name):
+        """
+        Load a saved project with all the setup loaded to memory
+        :param project_file_name:
+        :return:
+        """
+        assert isinstance(project_file_name, str), 'Project file name %s must be a string but not %s.' \
+                                                   '' % (str(project_file_name), type(project_file_name))
+        assert os.path.exists(project_file_name), 'Project file "%s" cannot be found.' % project_file_name
+
+        # load project
+        ui_dict = self._myControl.load_project(project_file_name)
+
+        # get experiment number and IPTS number
+        exp_number = int(ui_dict['exp number'])
+        self.ui.lineEdit_exp.setText(str(exp_number))
+        if 'ipts' in ui_dict and ui_dict['ipts'] is not None:
+            self.ui.lineEdit_iptsNumber.setText(str(ui_dict['ipts']))
+
+        # set the UI parameters to GUI
+        try:
+            self.ui.lineEdit_localSpiceDir.setText(ui_dict['local spice dir'])
+            self.ui.lineEdit_workDir.setText(ui_dict['work dir'])
+            self.ui.lineEdit_surveyStartPt.setText(ui_dict['survey start'])
+            self.ui.lineEdit_surveyEndPt.setText(ui_dict['survey stop'])
+
+            # now try to call some actions
+            self.do_apply_setup()
+            self.do_set_experiment()
+
+        except KeyError:
+            print '[Error] Some field cannot be found.'
+
+        # set experiment configurations
+        # set sample distance
+        if 'det_sample_distance' in ui_dict and ui_dict['det_sample_distance'] is not None:
+            det_sample_distance = float(ui_dict['det_sample_distance'])
+            self.ui.lineEdit_infoDetSampleDistance.setText(str(det_sample_distance))
+            self._myControl.set_default_detector_sample_distance(det_sample_distance)
+
+        # set user-specified wave length
+        if 'wave_length' in ui_dict and ui_dict['wave_length'] is not None:
+            wave_length = float(ui_dict['wave_length'])
+            self.ui.lineEdit_infoWavelength.setText(str(wave_length))
+            self._myControl.set_user_wave_length(exp_number, wave_length)
+
+        if 'det_center' in ui_dict and ui_dict['det_center'] is not None:
+            det_center_str = ui_dict['det_center'].strip()
+            terms = det_center_str.split(',')
+            center_row = int(terms[0].strip())
+            center_col = int(terms[1].strip())
+            self.ui.lineEdit_infoDetCenter.setText('{0}, {1}'.format(center_row, center_col))
+            self._myControl.set_detector_center(exp_number, center_row, center_col)
+
+        # TODO/ISSUE/NOW/TODAY - Shall pop out a dialog to notify the completion
+        print '[INFO] Project from file {0} is loaded.'.format(project_file_name)
+
+        return
+
     # add slot for UB refinement configuration window's signal to connect to
     @QtCore.pyqtSlot(int)
     def refine_ub_lattice(self, val):
@@ -2122,8 +2312,10 @@ class MainWindow(QtGui.QMainWindow):
         """
         # get experiment number
         status, ret_obj = gutil.parse_integers_editors([self.ui.lineEdit_exp])
-        assert status, ret_obj
-        exp_number = ret_obj[0]
+        if not status:
+            raise RuntimeError(ret_obj)
+        else:
+            exp_number = ret_obj[0]
 
         # reset all rows back to SPICE HKL
         num_rows = self.ui.tableWidget_peaksCalUB.rowCount()
@@ -2172,73 +2364,6 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
-    def do_add_k_shift_vector(self):
-        """ Add a k-shift vector
-        :return:
-        """
-        # parse the k-vector
-        status, ret_obj = gutil.parse_float_editors([self.ui.lineEdit_kX, self.ui.lineEdit_kY, self.ui.lineEdit_kZ],
-                                                    allow_blank=False)
-        if status is False:
-            error_message = ret_obj
-            self.pop_one_button_dialog(error_message)
-            return
-        else:
-            k_x, k_y, k_z = ret_obj
-
-        # add to controller
-        k_index = self._myControl.add_k_shift_vector(k_x, k_y, k_z)
-
-        # add to table and combo-box
-        self.ui.tableWidget_kShift.add_k_vector(k_index, k_x, k_y, k_z)
-
-        combo_message = '%d: (%.5f, %.5f, %.5f)' % (k_index, k_x, k_y, k_z)
-        self.ui.comboBox_kVectors.addItem(combo_message)
-
-        return
-
-    def do_apply_k_shift(self):
-        """ Apply k-shift to selected reflections
-        :return:
-        """
-        # get the selected scans
-        scan_list = list()
-        selected_row_numbers = self.ui.tableWidget_mergeScans.get_selected_rows(True)
-        for row_index in selected_row_numbers:
-            scan_number = self.ui.tableWidget_mergeScans.get_scan_number(row_index)
-            scan_list.append(scan_number)
-
-        # get the k-vector
-        k_shift_message = str(self.ui.comboBox_kVectors.currentText())
-        k_index = int(k_shift_message.split(':')[0])
-
-        # set to controller
-        self._myControl.set_k_shift(scan_list, k_index)
-
-        # set to table
-        for row_index in selected_row_numbers:
-            self.ui.tableWidget_mergeScans.set_k_shift_index(row_index, k_index)
-
-        return
-
-    def do_apply_roi(self):
-        """ Save current selection of region of interest
-        :return:
-        """
-        lower_left_c, upper_right_c = self.ui.graphicsView_detector2dPlot.get_roi()
-        # at the very beginning, the lower left and upper right are same
-        if lower_left_c[0] == upper_right_c[0] or lower_left_c[1] == upper_right_c[1]:
-            return
-
-        status, par_val_list = gutil.parse_integers_editors([self.ui.lineEdit_exp, self.ui.lineEdit_run])
-        assert status, str(par_val_list)
-        exp_number = par_val_list[0]
-        scan_number = par_val_list[1]
-
-        self._myControl.set_roi(exp_number, scan_number, lower_left_c, upper_right_c)
-
-        return
-
     def do_save_survey(self):
         """
         Save the survey to a file
@@ -2291,19 +2416,33 @@ class MainWindow(QtGui.QMainWindow):
         Purpose: select all peaks in table tableWidget_peaksCalUB
         :return:
         """
-        if not self._ubPeakTableFlag:
-            # turn to deselect all
-            self.ui.tableWidget_peaksCalUB.select_all_rows(self._ubPeakTableFlag)
-        elif self.ui.checkBox_ubNuclearPeaks.isChecked() is False:
-            # all peaks are subjected to select
-            self.ui.tableWidget_peaksCalUB.select_all_rows(self._ubPeakTableFlag)
-        else:
-            # only nuclear peaks to get selected
-            self.ui.tableWidget_peaksCalUB.select_all_nuclear_peaks()
-        # END-IF-ELSE
+        if self.ui.radioButton_ubSelectAllScans.isChecked() and self._ubPeakTableFlag != 0:
+            self.ui.tableWidget_peaksCalUB.select_all_rows(True)
+            self._ubPeakTableFlag = 0
+        elif self.ui.radioButton_ubSelectNoScan.isChecked() and self._ubPeakTableFlag != 1:
+            self.ui.tableWidget_peaksCalUB.select_all_rows(False)
+            self._ubPeakTableFlag = 1
+        elif self.ui.radioButton_ubAdvancedSelection.isChecked() and self._ubPeakTableFlag != 2:
+            # advanced
+            import FindUBUtility
+            self._selectUBScanDialog = FindUBUtility.SelectUBMatrixScansDialog(self)
+            self._selectUBScanDialog.show()
+            self._ubPeakTableFlag = 2
+        # END-IF
 
-        # revert the flag
-        self._ubPeakTableFlag = not self._ubPeakTableFlag
+        # if not self._ubPeakTableFlag:
+        #     # turn to deselect all
+        #     self.ui.tableWidget_peaksCalUB.select_all_rows(self._ubPeakTableFlag)
+        # elif self.ui.checkBox_ubNuclearPeaks.isChecked() is False:
+        #     # all peaks are subjected to select
+        #     self.ui.tableWidget_peaksCalUB.select_all_rows(self._ubPeakTableFlag)
+        # else:
+        #     # only nuclear peaks to get selected
+        #     self.ui.tableWidget_peaksCalUB.select_all_nuclear_peaks()
+        # # END-IF-ELSE
+        #
+        # # revert the flag
+        # self._ubPeakTableFlag = not self._ubPeakTableFlag
 
         return
 
@@ -2354,10 +2493,64 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
+    def do_set_detector_size(self):
+        """
+        set the detector size to controller
+        :return:
+        """
+        det_size_str = str(self.ui.comboBox_detectorSize.currentText())
+
+        if det_size_str.count('256') > 0:
+            # 256 by 256 pixels
+            det_size_row = 256
+            det_size_col = 256
+        elif det_size_str.count('512') > 0:
+            # 512 x 512
+            det_size_row = 512
+            det_size_col = 512
+        else:
+            # unsupported case yet
+            raise NotImplementedError('Detector with size {0} is not supported yet.'.format(det_size_str))
+
+        # set to controller
+        self._myControl.set_detector_geometry(det_size_row, det_size_col)
+
+        return
+
+    def do_set_ipts_number(self):
+        """
+        set IPTS number
+        :return:
+        """
+        # get IPTS number
+        status, ret_obj = gutil.parse_integers_editors([self.ui.lineEdit_iptsNumber])
+        if status:
+            # a valid IPTS number
+
+            raise NotImplementedError('The following section commented out now will be implemented when IPTS is ready.')
+            # ipts_number = ret_obj[0]
+            # search archive for available experiment number under this IPTS
+            # status, ret_obj = self._myControl.check_ipts(ipts_number=ipts_number)
+            # if status:
+            #     exp_number_list = ret_obj
+            #     self._iptsNumber = ipts_number
+            #     self.ui.comboBox_expInIPTS.clear()
+            #     for exp_number in exp_number_list:
+            #         self.ui.comboBox_expInIPTS.addItem(str(exp_number))
+            # else:
+            #     self.pop_one_button_dialog('Unable to locate IPTS {0} due to {1}'.format(ipts_number, ret_obj))
+            #     return
+        else:
+            # error
+            self.pop_one_button_dialog('User specified IPTS number {0} is not correct.'
+                                       ''.format(str(self.ui.lineEdit_iptsNumber.text())))
+            return
+
     def do_set_experiment(self):
         """ Set experiment
         :return:
         """
+        # get exp number
         status, ret_obj = gutil.parse_integers_editors([self.ui.lineEdit_exp])
         if status:
             # new experiment number
@@ -2370,12 +2563,18 @@ class MainWindow(QtGui.QMainWindow):
             # set the new experiment number
             self._myControl.set_exp_number(exp_number)
             self.ui.lineEdit_exp.setStyleSheet('color: black')
-            self.setWindowTitle('%s: Experiment %d' % (self._baseTitle, exp_number))
+            self.setWindowTitle('Experiment %d' % exp_number)
 
             # try to set the default
-            default_data_dir = '/HFIR/HB3A/exp%d/Datafiles' % exp_number
+            if self._iptsNumber is not None:
+                default_data_dir = '/HFIR/HB3A/IPTS-{0}/exp{1}/Datafiles'.format(self._iptsNumber, exp_number)
+            else:
+                default_data_dir = '/HFIR/HB3A/exp{0}/Datafiles'.format(exp_number)
             if os.path.exists(default_data_dir):
+                # set the directory in
                 self.ui.lineEdit_localSpiceDir.setText(default_data_dir)
+                # find out the detector type
+                status, ret_obj = self._myControl.find_detector_size(default_data_dir, exp_number)
 
         else:
             err_msg = ret_obj
@@ -2418,19 +2617,62 @@ class MainWindow(QtGui.QMainWindow):
         Change to these HKL values is only related to GUI, i.e., the table
         :return:
         """
+        # get the current index source
+        hkl_type_str = str(self.ui.comboBox_hklType.currentText())
+        if hkl_type_str.lower().count('spice') > 0:
+            # set spice HKL to integer
+            is_spice = True
+        else:
+            is_spice = False
+
         # store the current value
         self.ui.tableWidget_peaksCalUB .store_current_indexing()
 
         # set the index to integer
         num_rows = self.ui.tableWidget_peaksCalUB.rowCount()
         for row_index in range(num_rows):
-            m_h, m_l, m_k = self.ui.tableWidget_peaksCalUB.get_hkl(row_index)
-            peak_indexing, round_error = hb3a_util.convert_hkl_to_integer(m_h, m_l, m_k, MAGNETIC_TOL)
-            self.ui.tableWidget_peaksCalUB.set_hkl(row_index, peak_indexing, round_error)
+            try:
+                m_h, m_l, m_k = self.ui.tableWidget_peaksCalUB.get_hkl(row_index, is_spice_hkl=is_spice)
+                peak_indexing, round_error = hb3a_util.convert_hkl_to_integer(m_h, m_l, m_k, MAGNETIC_TOL)
+                self.ui.tableWidget_peaksCalUB.set_hkl(row_index, peak_indexing, is_spice, round_error)
+            except RuntimeError as run_err:
+                scan_number, pt_number = self.ui.tableWidget_peaksCalUB.get_scan_pt(row_index)
+                print '[ERROR] Unable to convert HKL to integer for scan {0} due to {1}.' \
+                      ''.format(scan_number, run_err)
+        # END-FOR
 
         # disable the set to integer button and enable the revert/undo button
-        self.ui.pushButton_setHKL2Int.setEnabled(False)
-        self.ui.pushButton_undoSetToInteger.setEnabled(True)
+        # self.ui.pushButton_setHKL2Int.setEnabled(False)
+        # self.ui.pushButton_undoSetToInteger.setEnabled(True)
+
+        return
+
+    def do_toggle_table_integration(self):
+        """
+        change the type
+        :return:
+        """
+        exp_number = int(self.ui.lineEdit_exp.text())
+
+        integrate_type = self.ui.tableWidget_mergeScans.get_integration_type()
+        if integrate_type == 'simple':
+            integrate_type = 'mixed'
+        elif integrate_type == 'mixed':
+            integrate_type = 'gaussian'
+        else:
+            integrate_type = 'simple'
+
+        for i_row in range(self.ui.tableWidget_mergeScans.rowCount()):
+            scan_number = self.ui.tableWidget_mergeScans.get_scan_number(i_row)
+            peak_info_obj = self._myControl.get_peak_info(exp_number, scan_number)
+            try:
+                intensity1, error1 = peak_info_obj.get_intensity(integrate_type, False)
+                intensity2, error2 = peak_info_obj.get_intensity(integrate_type, True)
+                self.ui.tableWidget_mergeScans.set_peak_intensity(i_row, intensity1, intensity2, error2, integrate_type)
+            except RuntimeError as run_err:
+                print '[ERROR] Unable to get peak intensity of scan {0} due to {1}.' \
+                      ''.format(self.ui.tableWidget_mergeScans.get_scan_number(i_row), run_err)
+        # END-FOR
 
         return
 
@@ -2439,12 +2681,19 @@ class MainWindow(QtGui.QMainWindow):
         After the peaks' indexing are set to integer, undo the action (i.e., revert to the original value)
         :return:
         """
+        # get the column
+        hkl_type = str(self.ui.comboBox_hklType.currentText())
+        if hkl_type.lower().count('spice') > 0:
+            is_spice = True
+        else:
+            is_spice = False
+
         # restore the value
-        self.ui.tableWidget_peaksCalUB.restore_cached_indexing()
+        self.ui.tableWidget_peaksCalUB.restore_cached_indexing(is_spice)
 
         # enable and disable the buttons
-        self.ui.pushButton_setHKL2Int.setEnabled(True)
-        self.ui.pushButton_undoSetToInteger.setEnabled(False)
+        # self.ui.pushButton_setHKL2Int.setEnabled(True)
+        # self.ui.pushButton_undoSetToInteger.setEnabled(False)
 
         return
 
@@ -2638,6 +2887,38 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
+    def do_show_integration_details(self):
+        """
+        show the details (in table) about the integration of scans
+        :return:
+        """
+        import PeaksIntegrationReport
+
+        # check whether the integration information table
+        if self._peakIntegrationInfoWindow is None:
+            self._peakIntegrationInfoWindow = PeaksIntegrationReport.PeaksIntegrationReportDialog(self)
+
+        # show
+        self._peakIntegrationInfoWindow.show()
+
+        # report
+        report_dict = self.generate_peaks_integration_report()
+        self._peakIntegrationInfoWindow.set_report(report_dict)
+
+        return
+
+    def do_show_single_peak_integration(self):
+        """
+        pop out a dialog box to show the detailed integration information
+        :return:
+        """
+        if self._mySinglePeakIntegrationDialog is None:
+            self._mySinglePeakIntegrationDialog = message_dialog.MessageDialog(self)
+
+        self._mySinglePeakIntegrationDialog.show()
+
+        return
+
     def do_show_spice_file(self):
         """
         Show SPICE file in a window
@@ -2679,8 +2960,9 @@ class MainWindow(QtGui.QMainWindow):
         try:
             scan_number = int(str(self.ui.lineEdit_run.text()))
         except ValueError as val_err:
-            self.pop_one_button_dialog('Scan number %s in raw-data-view-tab is invalid. Error: %s'
-                                       '' % str(self.ui.lineEdit_run.text()), str(val_err))
+            error_msg = 'Scan number {0} in raw-data-view-tab is invalid. FYI: {1}.' \
+                        ''.format(self.ui.lineEdit_run.text(), val_err)
+            self.pop_one_button_dialog(error_msg)
             return
 
         # get spice file
@@ -2874,10 +3156,11 @@ class MainWindow(QtGui.QMainWindow):
 
     def do_view_data_3d(self):
         """
-        View merged scan data in 3D after FindPeaks
+        View merged scan data in 3D after FindPeaks.
         :return:
         """
         # get experiment and scan number
+        scan_number = self.ui.tableWidget_peaksCalUB.get_selected_scans()
         status, ret_obj = gutil.parse_integers_editors([self.ui.lineEdit_exp,
                                                         self.ui.lineEdit_scanNumber])
         if status:
@@ -3012,6 +3295,10 @@ class MainWindow(QtGui.QMainWindow):
             return
 
         max_number = int(self.ui.lineEdit_numSurveyOutput.text())
+
+        # ignore the situation that this line edit is cleared
+        if max_number <= 0:
+            return
         if max_number != self.ui.tableWidget_surveyTable.rowCount():
             # re-show survey
             self.ui.tableWidget_surveyTable.remove_all_rows()
@@ -3019,6 +3306,30 @@ class MainWindow(QtGui.QMainWindow):
 
         return
 
+    def generate_peaks_integration_report(self):
+        """
+        generate a report for all integrated peaks
+        :return:
+        """
+        # get experiment number
+        exp_number = int(self.ui.lineEdit_exp.text())
+
+        # get all the selected peaks from table
+        row_number_list = self.ui.tableWidget_mergeScans.get_selected_rows()
+
+        # collection all the information
+        report_dict = dict()
+        print '[DB] Selected rows: {0}'.format(row_number_list)
+        for row_number in row_number_list:
+            scan_number = self.ui.tableWidget_mergeScans.get_scan_number(row_number)
+            peak_info = self._myControl.get_peak_info(exp_number, scan_number)
+            peak_integrate_dict = peak_info.generate_integration_report()
+            report_dict[scan_number] = peak_integrate_dict
+            print '[DB] Report Scan {0}. Keys: {1}'.format(scan_number, peak_integrate_dict.keys())
+        # END-FOR
+
+        return report_dict
+
     def get_ub_from_text(self):
         """ Purpose: Set UB matrix in use from plain text edit plainTextEdit_ubInput.
         Requirements:
@@ -3089,58 +3400,7 @@ class MainWindow(QtGui.QMainWindow):
         self._myControl.set_working_directory(str(self.ui.lineEdit_workDir.text()))
         self._myControl.set_server_url(str(self.ui.lineEdit_url.text()))
 
-        return
-
-    def ui_apply_lorentz_correction_mt(self):
-        """
-        Apply Lorentz corrections to the integrated peak intensities of all the selected peaks
-        at the UI level
-        :return:
-        """
-        # get experiment number
-        exp_number = int(self.ui.lineEdit_exp.text())
-
-        # select rows
-        selected_rows = self.ui.tableWidget_mergeScans.get_selected_rows(True)
-
-        # apply for each row selected for Lorentz correction
-        error_message = ''
-        for row_number in selected_rows:
-            # get scan number
-            scan_number = self.ui.tableWidget_mergeScans.get_scan_number(row_number)
-            # get peak information object
-            peak_info_obj = self._myControl.get_peak_info(exp_number, scan_number)
-            if peak_info_obj is None:
-                error_message += 'Unable to get peak information from scan %d\n' % scan_number
-                continue
-            # get intensity
-            peak_intensity = peak_info_obj.get_intensity()
-            # get Q-vector of the peak center and calculate |Q| from it
-            q = peak_info_obj.get_peak_centre_v3d().norm()
-            # get wave length
-            wavelength = self._myControl.get_wave_length(exp_number, [scan_number])
-            self.ui.tableWidget_mergeScans.set_wave_length(row_number, wavelength)
-            # get motor step (choose from omega, phi and chi)
-            try:
-                motor_move_tup = self._myControl.get_motor_step(exp_number, scan_number)
-            except RuntimeError as run_err:
-                self.ui.tableWidget_mergeScans.set_status(row_number, str(run_err))
-                continue
-            except AssertionError as ass_err:
-                self.ui.tableWidget_mergeScans.set_status(row_number, str(ass_err))
-                continue
-            # set motor information (the moving motor)
-            self.ui.tableWidget_mergeScans.set_motor_info(row_number, motor_move_tup)
-            motor_step = motor_move_tup[1]
-            # apply the Lorentz correction to the intensity
-            corrected = self._myControl.apply_lorentz_correction(peak_intensity, q, wavelength, motor_step)
-
-            self.ui.tableWidget_mergeScans.set_peak_intensity(row_number, corrected, lorentz_corrected=True)
-            self._myControl.set_peak_intensity(exp_number, scan_number, corrected)
-        # END-FOR (row_number)
-
-        if len(error_message) > 0:
-            self.pop_one_button_dialog(error_message)
+        print '[INFO] Session {0} has been loaded.'.format(filename)
 
         return
 
@@ -3181,7 +3441,6 @@ class MainWindow(QtGui.QMainWindow):
 
         # Experiment
         save_dict['lineEdit_exp'] = str(self.ui.lineEdit_exp.text())
-        save_dict['lineEdit_scanNumber'] = self.ui.lineEdit_scanNumber.text()
 
         # Lattice
         save_dict['lineEdit_a'] = str(self.ui.lineEdit_a.text())
@@ -3193,7 +3452,6 @@ class MainWindow(QtGui.QMainWindow):
 
         # Merge scan
         save_dict['lineEdit_listScansSliceView'] = str(self.ui.lineEdit_listScansSliceView.text())
-        save_dict['lineEdit_baseMergeMDName'] = str(self.ui.lineEdit_baseMergeMDName.text())
 
         # Save to csv file
         if filename is None:
@@ -3269,11 +3527,12 @@ class MainWindow(QtGui.QMainWindow):
         h, k, l = peak_info.get_hkl(user_hkl=False)
         q_x, q_y, q_z = peak_info.get_peak_centre()
         m1 = self._myControl.get_sample_log_value(exp_number, scan_number, 1, '_m1')
+        # TODO/ISSUE/NOW consider user specified
         wave_length = hb3a_util.convert_to_wave_length(m1_position=m1)
 
         # Set to table
-        status, err_msg = self.ui.tableWidget_peaksCalUB.append_row(
-            [scan_number, -1, h, k, l, q_x, q_y, q_z, False, m1, wave_length, ''])
+        status, err_msg = self.ui.tableWidget_peaksCalUB.add_peak(scan_number, (h, k, l), (q_x, q_y, q_z), m1,
+                                                                  wave_length)
         if status is False:
             self.pop_one_button_dialog(err_msg)
 
@@ -3310,6 +3569,19 @@ class MainWindow(QtGui.QMainWindow):
         lattice_gamma = str(self.ui.lineEdit_gamma.text())
         settings.setValue('gamma', lattice_gamma)
 
+        # calibrated instrument configurations
+        user_wave_length = str(self.ui.lineEdit_userWaveLength.text())
+        settings.setValue('wave_length', user_wave_length)
+
+        det_row_center = str(self.ui.lineEdit_detCenterPixHorizontal.text())
+        settings.setValue('row_center', det_row_center)
+
+        det_col_center = str(self.ui.lineEdit_detCenterPixVertical.text())
+        settings.setValue('col_center', det_col_center)
+
+        det_sample_distance_str = str(self.ui.lineEdit_userDetSampleDistance.text())
+        settings.setValue('det_sample_distance', det_sample_distance_str)
+
         # last project
         last_1_project_path = str(self.ui.label_last1Path.text())
         settings.setValue('last1path', last_1_project_path)
@@ -3348,6 +3620,19 @@ class MainWindow(QtGui.QMainWindow):
             lattice_gamma = settings.value('gamma')
             self.ui.lineEdit_gamma.setText(str(lattice_gamma))
 
+            # calibrated instrument configurations
+            user_wave_length = settings.value('wave_length')
+            self.ui.lineEdit_userWaveLength.setText(user_wave_length)
+
+            det_row_center = settings.value('row_center')
+            self.ui.lineEdit_detCenterPixHorizontal.setText(det_row_center)
+
+            det_col_center = settings.value('col_center')
+            self.ui.lineEdit_detCenterPixVertical.setText(det_col_center)
+
+            det_sample_distance = settings.value('det_sample_distance')
+            self.ui.lineEdit_userDetSampleDistance.setText(det_sample_distance)
+
             # last project
             last_1_project_path = str(settings.value('last1path'))
             self.ui.label_last1Path.setText(last_1_project_path)
@@ -3417,8 +3702,15 @@ class MainWindow(QtGui.QMainWindow):
         raw_det_data = self._myControl.get_raw_detector_counts(exp_no, scan_no, pt_no)
         # raw_det_data = numpy.rot90(raw_det_data, 1)
         self.ui.graphicsView_detector2dPlot.clear_canvas()
-        # TODO/FIXME - changed to 512 from 256 as prototype.  Should be via configuration
-        self.ui.graphicsView_detector2dPlot.add_plot_2d(raw_det_data, x_min=0, x_max=512, y_min=0, y_max=512,
+        # get the configuration of detector from GUI
+        #  FIXME/TODO/ISSUE/NOW/TODAY - use the detector size wrong!
+        if 0:
+            ret_obj = gutil.parse_integer_list(str(self.ui.lineEdit_detectorGeometry.text()), expected_size=2)
+            x_max, y_max = ret_obj
+        else:
+            x_max, y_max = 256, 256
+
+        self.ui.graphicsView_detector2dPlot.add_plot_2d(raw_det_data, x_min=0, x_max=x_max, y_min=0, y_max=y_max,
                                                         hold_prev_image=False)
         status, roi = self._myControl.get_region_of_interest(exp_no, scan_number=None)
         if status:
@@ -3481,53 +3773,53 @@ class MainWindow(QtGui.QMainWindow):
 
         elif mode == 1:
             # receive signal from the end of processing one peak: complete the row
-            # get row number
+            # get the peak object
+            peak_info_obj = self._myControl.get_peak_info(exp_number, scan_number)
+
+            # get row number to set up the table
             try:
                 row_number = self.ui.tableWidget_mergeScans.get_row_by_scan(scan_number)
             except RuntimeError as run_err:
-                self.pop_one_button_dialog(str(run_err))
-                return
+                raise RuntimeError('Unable to find scan {0} in Peak-Processing table due to {1}.'
+                                   ''.format(scan_number, run_err))
 
-            # gather values for updating
-            intensity = sig_value
+            # get peak: simple summation intensity
+            intensity, int_std_dev = peak_info_obj.get_intensity('simple intensity', False)
 
             # check intensity value
             is_error = False
             if intensity < 0:
                 # set to status
-                error_msg = 'Negative intensity: %.3f' % intensity
+                error_msg = 'Negative intensity (simple): %.3f' % intensity
                 self.ui.tableWidget_mergeScans.set_status(row_number=row_number, status=error_msg)
                 # reset intensity to 0.
                 intensity = 0.
                 is_error = True
-
-            if len(peak_centre) != 3:
-                self.pop_one_button_dialog('Peak centre %s is not correct.' % str(peak_centre))
-                return
-
-            # set the calculated peak intensity to _peakInfoDict
-            status, error_msg = self._myControl.set_peak_intensity(exp_number, scan_number, intensity)
-            if status:
-                # set the value to table
-                self.ui.tableWidget_mergeScans.set_peak_intensity(row_number=row_number,
-                                                                  peak_intensity=intensity,
-                                                                  lorentz_corrected=False)
-                self.ui.tableWidget_mergeScans.set_peak_centre(row_number=row_number,
-                                                               peak_centre=peak_centre)
-                if is_error:
-                    self.ui.tableWidget_mergeScans.set_status(row_number, 'Intensity Error')
-                else:
-                    self.ui.tableWidget_mergeScans.set_status(row_number, 'Good')
-
+                int_std_dev = 0.
+
+            # get the corrected value
+            corrected_intensity = intensity * peak_info_obj.lorentz_correction_factor
+            corrected_std_dev = int_std_dev * peak_info_obj.lorentz_correction_factor
+
+            # status, error_msg = self._myControl.set_peak_intensity(exp_number, scan_number, intensity)
+            # if status:
+            #     # set the value to table
+            self.ui.tableWidget_mergeScans.set_peak_intensity(row_number=row_number,
+                                                              peak_intensity=intensity,
+                                                              corrected_intensity=corrected_intensity,
+                                                              standard_error=corrected_std_dev,
+                                                              integrate_method='simple')
+
+            if is_error:
+                self.ui.tableWidget_mergeScans.set_status(row_number, 'Integration Error')
             else:
-                self._errorMessageEnsemble += error_msg + '\n'
-                self.ui.tableWidget_mergeScans.set_status(row_number, error_msg)
+                self.ui.tableWidget_mergeScans.set_status(row_number, 'Integrated')
 
         elif mode == 2:
             # get signal from the end of all selected scans being integrated
 
             # apply Lorentz correction
-            self.ui_apply_lorentz_correction_mt()
+            # self.ui_apply_lorentz_correction_mt()
 
             # set progress bar
             progress = int(sig_value+0.5)
@@ -3581,12 +3873,12 @@ class MainWindow(QtGui.QMainWindow):
         # set intensity, state to table
         if mode == 0:
             # error message
-            self.ui.tableWidget_mergeScans.set_peak_intensity(row_number=row_number, peak_intensity=0.,
-                                                              lorentz_corrected=False)
+            self.ui.tableWidget_mergeScans.set_peak_intensity(row_number, peak_intensity=0., corrected_intensity=0.,
+                                                              standard_error=0., integrate_method='simple')
             self.ui.tableWidget_mergeScans.set_status(row_number=row_number, status=message)
 
             # set peak value
-            status, ret_message = self._myControl.set_peak_intensity(exp_number, scan_number, 0.)
+            status, ret_message = self._myControl.set_zero_peak_intensity(exp_number, scan_number)
             if not status:
                 self.pop_one_button_dialog(ret_message)
 
@@ -3615,12 +3907,23 @@ class MainWindow(QtGui.QMainWindow):
         peak_info = self._myControl.get_peak_info(exp_number, scan_number)
         assert isinstance(peak_info, r4c.PeakProcessRecord)
 
-        # retrieve and set HKL from spice table
-        # peak_info.retrieve_hkl_from_spice_table()
-
         # add to table
         self.set_ub_peak_table(peak_info)
 
         return
 
-    # END-OF-DEFINITION (MainWindow)
+    @property
+    def ub_matrix_processing_table(self):
+        """
+        return the handler to the UB matrix
+        :return:
+        """
+        return self.ui.tableWidget_peaksCalUB
+
+    @property
+    def working_directory(self):
+        """
+        return the current working directory
+        :return:
+        """
+        return self._myControl._workDir
diff --git a/scripts/Inelastic/CrystalField/fitting.py b/scripts/Inelastic/CrystalField/fitting.py
index 99d37f2799514a017c6fdac467ddbbcd71b11e60..14ea524248f6cbffa1a2b328ac1cd6e279ad396a 100644
--- a/scripts/Inelastic/CrystalField/fitting.py
+++ b/scripts/Inelastic/CrystalField/fitting.py
@@ -113,50 +113,53 @@ class CrystalField(object):
                                           Note that physical properties datasets should follow inelastic spectra
                                           See the Crystal Field Python Interface help page for more details.
         """
-        from .function import PeaksFunction
+
+        self._background = None
+
+        if 'Temperature' in kwargs:
+            temperature = kwargs['Temperature']
+            del kwargs['Temperature']
+        else:
+            temperature = -1
+
+        # Create self.function attribute
+        self._makeFunction(Ion, Symmetry, temperature)
+        self.Temperature = temperature
         self.Ion = Ion
-        self._symmetry = Symmetry
-        self._toleranceEnergy = 1e-10
-        self._toleranceIntensity = 1e-1
-        self._fieldParameters = {}
-        self._fieldTies = {}
-        self._fieldConstraints = []
-        self._temperature = None
-        self._FWHM = None
-        self._intensityScaling = None
+        self.Symmetry = Symmetry
         self._resolutionModel = None
-        self._fwhmVariation = None
-        self._fixAllPeaks = False
         self._physprop = None
 
+        free_parameters = []
         for key in kwargs:
             if key == 'ToleranceEnergy':
-                self._toleranceEnergy = kwargs[key]
+                self.ToleranceEnergy = kwargs[key]
             elif key == 'ToleranceIntensity':
-                self._toleranceIntensity = kwargs[key]
+                self.ToleranceIntensity = kwargs[key]
             elif key == 'IntensityScaling':
-                self._intensityScaling = kwargs[key]
+                self.IntensityScaling = kwargs[key]
             elif key == 'FWHM':
-                self._FWHM = kwargs[key]
+                self.FWHM = kwargs[key]
             elif key == 'ResolutionModel':
                 self.ResolutionModel = kwargs[key]
-            elif key == 'Temperature':
-                self._temperature = kwargs[key]
+            elif key == 'NPeaks':
+                self.NPeaks = kwargs[key]
             elif key == 'FWHMVariation':
-                self._fwhmVariation = kwargs[key]
+                self.FWHMVariation = kwargs[key]
             elif key == 'FixAllPeaks':
-                self._fixAllPeaks = kwargs[key]
+                self.FixAllPeaks = kwargs[key]
             elif key == 'PhysicalProperty':
-                self._physprop = kwargs[key]
+                self.PhysicalProperty = kwargs[key]
             else:
                 # Crystal field parameters
-                self._fieldParameters[key] = kwargs[key]
+                self.function.setParameter(key, kwargs[key])
+                free_parameters.append(key)
 
-        if isinstance(self._temperature, list) or isinstance(self._temperature, np.ndarray):
-            self.peaks = [PeaksFunction(firstIndex=1) for _ in self._temperature]
-        else:
-            self.peaks = PeaksFunction()
-        self.background = None
+        for param in CrystalField.field_parameter_names:
+            if param not in free_parameters:
+                self.function.fixParameter(param)
+
+        self._setPeaks()
 
         # Eigensystem
         self._dirty_eigensystem = True
@@ -173,67 +176,70 @@ class CrystalField(object):
         self._spectra = {}
         self._plot_window = {}
 
-        self._setDefaultTies()
+        # self._setDefaultTies()
         self.chi2 = None
 
+    def _makeFunction(self, ion, symmetry, temperature):
+        from mantid.simpleapi import FunctionFactory
+        if temperature is not None and islistlike(temperature) and len(temperature) > 1:
+            self.function = FunctionFactory.createFunction('CrystalFieldMultiSpectrum')
+            self._isMultiSpectrum = True
+            tempStr = 'Temperatures'
+        else:
+            self.function = FunctionFactory.createFunction('CrystalFieldSpectrum')
+            self._isMultiSpectrum = False
+            tempStr = 'Temperature'
+        self.function.setAttributeValue('Ion', ion)
+        self.function.setAttributeValue('Symmetry', symmetry)
+        if temperature:
+            temperature = [float(val) for val in temperature] if islistlike(temperature) else float(temperature)
+            self.function.setAttributeValue(tempStr, temperature)
+
+    def _remakeFunction(self, temperature):
+        """Redefines the internal function, e.g. when `Temperature` (number of datasets) change"""
+        fieldParams = self._getFieldParameters()
+        self._makeFunction(self.Ion, self.Symmetry, temperature)
+        for item in fieldParams.items():
+            self.function.setParameter(item[0], item[1])
+        for param in CrystalField.field_parameter_names:
+            if param not in fieldParams.keys():
+                self.function.fixParameter(param)
+
+    def _setPeaks(self):
+        from .function import PeaksFunction
+        if self._isMultiSpectrum:
+            self._peaks = []
+            for i in range(self.NumberOfSpectra):
+                self._peaks.append(PeaksFunction(self.crystalFieldFunction, 'f%s.' % i, 1))
+        else:
+            self._peaks = PeaksFunction(self.crystalFieldFunction, '', 0)
+
+    @property
+    def crystalFieldFunction(self):
+        if not self._isMultiSpectrum and self.background is not None:
+            return self.function[1]
+        else:
+            return self.function
+
     def makePeaksFunction(self, i):
         """Form a definition string for the CrystalFieldPeaks function
         @param i: Index of a spectrum.
         """
         temperature = self._getTemperature(i)
-        out = 'name=CrystalFieldPeaks,Ion=%s,Symmetry=%s,Temperature=%s' % (self._ion, self._symmetry, temperature)
-        out += ',ToleranceEnergy=%s,ToleranceIntensity=%s' % (self._toleranceEnergy, self._toleranceIntensity)
-        out += ',%s' % ','.join(['%s=%s' % item for item in self._fieldParameters.items()])
+        out = 'name=CrystalFieldPeaks,Ion=%s,Symmetry=%s,Temperature=%s' % (self.Ion, self.Symmetry, temperature)
+        out += ',ToleranceEnergy=%s,ToleranceIntensity=%s' % (self.ToleranceEnergy, self.ToleranceIntensity)
+        out += ',%s' % ','.join(['%s=%s' % item for item in self._getFieldParameters().items()])
         return out
 
     def makeSpectrumFunction(self, i=0):
         """Form a definition string for the CrystalFieldSpectrum function
         @param i: Index of a spectrum.
         """
-        from .function import Background
-        temperature = self._getTemperature(i)
-        out = 'name=CrystalFieldSpectrum,Ion=%s,Symmetry=%s,Temperature=%s' % (self._ion, self._symmetry, temperature)
-        out += ',ToleranceEnergy=%s,ToleranceIntensity=%s' % (self._toleranceEnergy, self._toleranceIntensity)
-        out += ',FixAllPeaks=%s' % (1 if self._fixAllPeaks else 0)
-        out += ',PeakShape=%s' % self.getPeak(i).name
-        if self._intensityScaling is not None:
-            out += ',IntensityScaling=%s' % self._getIntensityScaling(i)
-        if self._FWHM is not None:
-            out += ',FWHM=%s' % self._getFWHM(i)
-        if len(self._fieldParameters) > 0:
-            out += ',%s' % ','.join(['%s=%s' % item for item in self._fieldParameters.items()])
-        if self._resolutionModel is not None:
-            if self._resolutionModel.multi:
-                model = self._resolutionModel.model[i]
-            else:
-                model = self._resolutionModel.model
-            out += ',FWHMX=%s,FWHMY=%s' % tuple(map(tuple, model))
-            if self._fwhmVariation is not None:
-                out += ',FWHMVariation=%s' % self._fwhmVariation
-
-        peaks = self.getPeak(i)
-        params = peaks.paramString('', 0)
-        if len(params) > 0:
-            out += ',%s' % params
-        ties = peaks.tiesString()
-        if len(ties) > 0:
-            out += ',%s' % ties
-        constraints = peaks.constraintsString()
-        if len(constraints) > 0:
-            out += ',%s' % constraints
-        if self.background is not None:
-            if isinstance(self.background, Background):
-                bgOut = self.background.toString()
-            else:
-                bgOut = self.background[i].toString()
-            out = '%s;%s' % (bgOut, out)
-        ties = self.getFieldTies()
-        if len(ties) > 0:
-            out += ',ties=(%s)' % ties
-        constraints = self.getFieldConstraints()
-        if len(constraints) > 0:
-            out += ',constraints=(%s)' % constraints
-        return out
+        if not self._isMultiSpectrum:
+            return str(self.function)
+        else:
+            funs = self.function.createEquivalentFunctions()
+            return str(funs[i])
 
     def makePhysicalPropertiesFunction(self, i=0):
         """Form a definition string for one of the crystal field physical properties functions
@@ -249,132 +255,21 @@ class CrystalField(object):
                 out = ppobj.toString()
             else:
                 return ''
-        out += ',Ion=%s,Symmetry=%s' % (self._ion, self._symmetry)
-        if len(self._fieldParameters) > 0:
-            out += ',%s' % ','.join(['%s=%s' % item for item in self._fieldParameters.items()])
-        ties = self.getFieldTies()
+        out += ',Ion=%s,Symmetry=%s' % (self.Ion, self.Symmetry)
+        fieldParams = self._getFieldParameters()
+        if len(fieldParams) > 0:
+            out += ',%s' % ','.join(['%s=%s' % item for item in fieldParams.items()])
+        ties = self._getFieldTies()
         if len(ties) > 0:
             out += ',ties=(%s)' % ties
-        constraints = self.getFieldConstraints()
+        constraints = self._getFieldConstraints()
         if len(constraints) > 0:
             out += ',constraints=(%s)' % constraints
         return out
 
-    def _makeMultiAttributes(self):
-        """
-        Make the main attribute part of the function string for makeMultiSpectrumFunction()
-        """
-        # Handles physical properties (PP). self._temperature applies only for INS datasets. But the
-        # C++ CrystalFieldMultiSpectrum uses it to count number of datasets, so we need to set it here
-        # as a concatenation of the INS (self._temperature and self._FWHM) and PP (self._physprop)
-        if self._temperature is None:
-            if self._physprop is None:
-                errmsg = 'Cannot run fit: No temperature (INS spectrum) or physical properties defined.'
-                raise RuntimeError(errmsg)
-            physprop = []
-            temperature = []
-            FWHM = []
-        else:
-            physprop = (len(self._temperature) if islistlike(self._temperature) else 1) * [None]
-            temperature = self._temperature if islistlike(self._temperature) else [self._temperature]
-            FWHM = self._FWHM if islistlike(self._FWHM) else [self._FWHM]
-        if self._physprop is not None:
-            for pp in (self._physprop if islistlike(self._physprop) else [self._physprop]):
-                temperature.append(pp.Temperature if (pp.Temperature is not None) else 0.)
-                FWHM.append(0.)
-                physprop.append(pp)
-            ppid = [0 if pp is None else pp.TypeID for pp in physprop]
-            ppenv = [pp.envString(i) for i, pp in enumerate(physprop) if pp is not None]
-            ppenv = filter(None, ppenv)
-        out = ',ToleranceEnergy=%s,ToleranceIntensity=%s' % (self._toleranceEnergy, self._toleranceIntensity)
-        out += ',PeakShape=%s' % self.getPeak().name
-        out += ',FixAllPeaks=%s' % (1 if self._fixAllPeaks else 0)
-        if self.background is not None:
-            out += ',Background=%s' % self.background[0].nameString()
-        out += ',Temperatures=(%s)' % ','.join(map(str, temperature))
-        if self._physprop is not None:
-            out += ',PhysicalProperties=(%s)' % ','.join(map(str, ppid))
-            out += ',%s' % ','.join(map(str, ppenv))
-        if self._FWHM is not None:
-            out += ',FWHMs=(%s)' % ','.join(map(str, FWHM))
-        if self._intensityScaling is not None:
-            for i in range(len(self._intensityScaling)):
-                out += ',IntensityScaling%s=%s' % (i, self._intensityScaling[i])
-        return out
-
-    def _makeMultiResolutionModel(self):
-        """
-        Make the resolution model part of the function string for makeMultiSpectrumFunction()
-        """
-        out = ''
-        if self._resolutionModel is not None:
-            i = 0
-            for model in self._resolutionModel.model:
-                out += ',FWHMX{0}={1},FWHMY{0}={2}'.format(i, tuple(model[0]), tuple(model[1]))
-                i += 1
-            if self._fwhmVariation is not None:
-                out += ',FWHMVariation=%s' % self._fwhmVariation
-        return out
-
-    def _makeMultiPeaks(self):
-        """
-        Make the peaks part of the function string for makeMultiSpectrumFunction()
-        """
-        out = ''
-        i = 0
-        for peaks in (self.peaks if islistlike(self.peaks) else [self.peaks]):
-            parOut = peaks.paramString('f%s.' % i, 1)
-            if len(parOut) > 0:
-                out += ',%s' % parOut
-            tiesOut = peaks.tiesString('f%s.' % i)
-            if len(tiesOut) > 0:
-                out += ',%s' % tiesOut
-            constraintsOut = peaks.constraintsString('f%s.' % i)
-            if len(constraintsOut) > 0:
-                out += ',%s' % constraintsOut
-            i += 1
-        return out
-
-    # pylint: disable=too-many-public-branches
     def makeMultiSpectrumFunction(self):
-        """Form a definition string for the CrystalFieldMultiSpectrum function"""
-        out = 'name=CrystalFieldMultiSpectrum,Ion=%s,Symmetry=%s' % (self._ion, self._symmetry)
-        out += self._makeMultiAttributes()
-        out += ',%s' % ','.join(['%s=%s' % item for item in self._fieldParameters.items()])
-
-        tieList = []
-        constraintsList = []
-        if self.background is not None:
-            i = 0
-            for background in self.background:
-                prefix = 'f%s.f0.' % i
-                bgOut = background.paramString(prefix)
-                if len(bgOut) > 0:
-                    out += ',%s' % bgOut
-                tieOut = background.tiesString(prefix)
-                if len(tieOut) > 0:
-                    tieList.append(tieOut)
-                constraintsOut = background.constraintsString(prefix)
-                if len(constraintsOut) > 0:
-                    constraintsList.append(constraintsOut)
-                i += 1
-        if self._temperature is not None:
-            out += self._makeMultiResolutionModel()
-            out += self._makeMultiPeaks()
-
-        ties = self.getFieldTies()
-        if len(ties) > 0:
-            tieList.append(ties)
-        ties = ','.join(tieList)
-        if len(ties) > 0:
-            out += ',ties=(%s)' % ties
-        constraints = self.getFieldConstraints()
-        if len(constraints) > 0:
-            constraintsList.append(constraints)
-        constraints = ','.join(constraintsList)
-        if len(constraints) > 0:
-            out += ',constraints=(%s)' % constraints
-        return out
+        import re
+        return re.sub(r'FWHM[X|Y]\d+=\(\),', '', str(self.function))
 
     @property
     def Ion(self):
@@ -384,7 +279,7 @@ class CrystalField(object):
         ...
         ion = cf.Ion
         """
-        return self._ion
+        return self.crystalFieldFunction.getAttributeValue('Ion')
 
     @Ion.setter
     def Ion(self, value):
@@ -395,19 +290,10 @@ class CrystalField(object):
         cf.Ion = 'Pr'
         """
         if value not in self.ion_nre_map.keys():
-            msg = 'Value %s is not allowed for attribute Ion.\nList of allowed values: %s' %\
+            msg = 'Value %s is not allowed for attribute Ion.\nList of allowed values: %s' % \
                   (value, ', '.join(list(self.ion_nre_map.keys())))
-            arbitraryJ = re.match('[SJsj]([0-9\.]+)', value)
-            if arbitraryJ and (float(arbitraryJ.group(1)) % 0.5) == 0:
-                value = arbitraryJ.group(0)
-                self._nre = int(-float(arbitraryJ.group(1)) * 2.)
-                if self._nre < -99:
-                    raise RuntimeError('J value ' + str(-self._nre / 2) + ' is too large.')
-            else:
-                raise RuntimeError(msg+', S<n>, J<n>')
-        else:
-            self._nre = self.ion_nre_map[value]
-        self._ion = value
+            raise RuntimeError(msg)
+        self.crystalFieldFunction.setAttributeValue('Ion', value)
         self._dirty_eigensystem = True
         self._dirty_peaks = True
         self._dirty_spectra = True
@@ -420,7 +306,7 @@ class CrystalField(object):
         ...
         symm = cf.Symmetry
         """
-        return self._symmetry
+        return self.crystalFieldFunction.getAttributeValue('Symmetry')
 
     @Symmetry.setter
     def Symmetry(self, value):
@@ -434,7 +320,7 @@ class CrystalField(object):
             msg = 'Value %s is not allowed for attribute Symmetry.\nList of allowed values: %s' % \
                   (value, ', '.join(self.allowed_symmetries))
             raise RuntimeError(msg)
-        self._symmetry = value
+        self.crystalFieldFunction.setAttributeValue('Symmetry', value)
         self._dirty_eigensystem = True
         self._dirty_peaks = True
         self._dirty_spectra = True
@@ -442,67 +328,119 @@ class CrystalField(object):
     @property
     def ToleranceEnergy(self):
         """Get energy tolerance"""
-        return self._toleranceEnergy
+        return self.crystalFieldFunction.getAttributeValue('ToleranceEnergy')
 
     @ToleranceEnergy.setter
     def ToleranceEnergy(self, value):
         """Set energy tolerance"""
-        self._toleranceEnergy = value
+        self.crystalFieldFunction.setAttributeValue('ToleranceEnergy', float(value))
         self._dirty_peaks = True
         self._dirty_spectra = True
 
     @property
     def ToleranceIntensity(self):
         """Get intensity tolerance"""
-        return self._toleranceIntensity
+        return self.crystalFieldFunction.getAttributeValue('ToleranceIntensity')
 
     @ToleranceIntensity.setter
     def ToleranceIntensity(self, value):
         """Set intensity tolerance"""
-        self._toleranceIntensity = value
+        self.crystalFieldFunction.setAttributeValue('ToleranceIntensity', float(value))
         self._dirty_peaks = True
         self._dirty_spectra = True
 
     @property
     def IntensityScaling(self):
-        return self._intensityScaling
+        if not self._isMultiSpectrum:
+            return self.crystalFieldFunction.getParameterValue('IntensityScaling')
+        iscaling = []
+        for i in range(self.NumberOfSpectra):
+            paramName = 'IntensityScaling%s' % i
+            iscaling.append(self.crystalFieldFunction.getParameterValue(paramName))
+        return iscaling
 
     @IntensityScaling.setter
     def IntensityScaling(self, value):
-        self._intensityScaling = value
+        if not self._isMultiSpectrum:
+            if islistlike(value):
+                if len(value) == 1:
+                    value = value[0]
+                else:
+                    raise ValueError('IntensityScaling is expected to be a single floating point value')
+            self.crystalFieldFunction.setParameter('IntensityScaling', value)
+        else:
+            n = self.NumberOfSpectra
+            if not islistlike(value) or len(value) != n:
+                raise ValueError('IntensityScaling is expected to be a list of %s values' % n)
+            for i in range(n):
+                paramName = 'IntensityScaling%s' % i
+                self.crystalFieldFunction.setParameter(paramName, value[i])
+
         self._dirty_peaks = True
         self._dirty_spectra = True
 
     @property
     def Temperature(self):
-        return self._temperature
+        attrName = 'Temperatures' if self._isMultiSpectrum else 'Temperature'
+        return self.crystalFieldFunction.getAttributeValue(attrName)
 
     @Temperature.setter
     def Temperature(self, value):
-        lenval = len(value) if islistlike(value) else 1
-        lentemp = len(self._temperature) if islistlike(self._temperature) else 1
-        self._temperature = value
+        if islistlike(value) and len(value) == 1:
+            value = value[0]
+        if self._isMultiSpectrum:
+            if not islistlike(value):
+                # Try to keep current set of field parameters.
+                self._remakeFunction(float(value))
+                return
+            self.crystalFieldFunction.setAttributeValue('Temperatures', value)
+        else:
+            if islistlike(value):
+                self._remakeFunction(value)
+                return
+            self.crystalFieldFunction.setAttributeValue('Temperature', float(value))
         self._dirty_peaks = True
         self._dirty_spectra = True
-        if lenval != lentemp:
-            peakname = self.peaks[0].name if isinstance(self.peaks, list) else self.peaks.name
-            self.setPeaks(peakname)
 
     @property
     def FWHM(self):
-        return self._FWHM
+        attrName = 'FWHMs' if self._isMultiSpectrum else 'FWHM'
+        fwhm = self.crystalFieldFunction.getAttributeValue(attrName)
+        if self._isMultiSpectrum:
+            nDatasets = len(self.Temperature)
+            if len(fwhm) != nDatasets:
+                return list(fwhm) * nDatasets
+        return fwhm
 
     @FWHM.setter
     def FWHM(self, value):
-        self._FWHM = value
+        if islistlike(value) and len(value) == 1:
+            value = value[0]
+        if self._isMultiSpectrum:
+            if not islistlike(value):
+                value = [value] * self.NumberOfSpectra
+            self.crystalFieldFunction.setAttributeValue('FWHMs', value)
+        else:
+            if islistlike(value):
+                raise ValueError('FWHM is expected to be a single floating point value')
+            self.crystalFieldFunction.setAttributeValue('FWHM', float(value))
+        self._dirty_spectra = True
+
+    @property
+    def FWHMVariation(self):
+        return self.crystalFieldFunction.getAttributeValue('FWHMVariation')
+
+    @FWHMVariation.setter
+    def FWHMVariation(self, value):
+        self.crystalFieldFunction.setAttributeValue('FWHMVariation', float(value))
         self._dirty_spectra = True
 
     def __getitem__(self, item):
-        return self._fieldParameters[item]
+        return self.crystalFieldFunction.getParameterValue(item)
 
     def __setitem__(self, key, value):
         self._dirty_spectra = True
-        self._fieldParameters[key] = value
+        self.crystalFieldFunction.setParameter(key, value)
 
     @property
     def ResolutionModel(self):
@@ -515,18 +453,93 @@ class CrystalField(object):
             self._resolutionModel = value
         else:
             self._resolutionModel = ResolutionModel(value)
+        if self._isMultiSpectrum:
+            if not self._resolutionModel.multi or self._resolutionModel.NumberOfSpectra != self.NumberOfSpectra:
+                raise RuntimeError('Resolution model is expected to have %s functions, found %s' %
+                                   (self.NumberOfSpectra, self._resolutionModel.NumberOfSpectra))
+            for i in range(self.NumberOfSpectra):
+                model = self._resolutionModel.model[i]
+                self.crystalFieldFunction.setAttributeValue('FWHMX%s' % i, model[0])
+                self.crystalFieldFunction.setAttributeValue('FWHMY%s' % i, model[1])
+        else:
+            model = self._resolutionModel.model
+            self.crystalFieldFunction.setAttributeValue('FWHMX', model[0])
+            self.crystalFieldFunction.setAttributeValue('FWHMY', model[1])
 
     @property
     def FixAllPeaks(self):
-        return self._fixAllPeaks
+        return self.crystalFieldFunction.getAttributeValue('FixAllPeaks')
 
     @FixAllPeaks.setter
     def FixAllPeaks(self, value):
-        self._fixAllPeaks = value
+        self.crystalFieldFunction.setAttributeValue('FixAllPeaks', value)
+
+    @property
+    def PeakShape(self):
+        return self.crystalFieldFunction.getAttributeValue('PeakShape')
+
+    @PeakShape.setter
+    def PeakShape(self, value):
+        self.crystalFieldFunction.setAttributeValue('PeakShape', value)
 
     @property
     def NumberOfSpectra(self):
-        return len(self._temperature)
+        return self.crystalFieldFunction.getNumberDomains()
+
+    @property
+    def NPeaks(self):
+        return self.crystalFieldFunction.getAttributeValue('NPeaks')
+
+    @NPeaks.setter
+    def NPeaks(self, value):
+        self.crystalFieldFunction.setAttributeValue('NPeaks', value)
+
+    @property
+    def peaks(self):
+        return self._peaks
+
+    @property
+    def background(self):
+        return self._background
+
+    @background.setter
+    def background(self, value):
+        """
+        Define the background function.
+        Args:
+            value: an instance of function.Background class or a list of instances
+                in a multi-spectrum case
+        """
+        from .function import Background
+        from mantid.simpleapi import FunctionFactory
+        if self._background is not None:
+            raise ValueError('Background has been set already')
+        if not isinstance(value, Background):
+            raise TypeError('Expected a Background object, found %s' % str(value))
+        if not self._isMultiSpectrum:
+            fun_str = value.toString() + ';' + str(self.function)
+            self.function = FunctionFactory.createInitialized(fun_str)
+            self._background = self._makeBackgroundObject(value)
+            self._setPeaks()
+        else:
+            self.function.setAttributeValue("Background", value.toString())
+            self._background = []
+            for ispec in range(self.NumberOfSpectra):
+                prefix = 'f%s.' % ispec
+                self._background.append(self._makeBackgroundObject(value, prefix))
+
+    def _makeBackgroundObject(self, value, prefix=''):
+        from .function import Background, Function
+        if value.peak is not None and value.background is not None:
+            peak = Function(self.function, prefix=prefix + 'f0.f0.')
+            background = Function(self.function, prefix=prefix + 'f0.f1.')
+        elif value.peak is not None:
+            peak = Function(self.function, prefix=prefix + 'f0.')
+            background = None
+        elif value.background is not None:
+            peak = None
+            background = Function(self.function, prefix=prefix + 'f0.')
+        return Background(peak=peak, background=background)
 
     @property
     def PhysicalProperty(self):
@@ -537,21 +550,37 @@ class CrystalField(object):
         from .function import PhysicalProperties
         vlist = value if islistlike(value) else [value]
         if all([isinstance(pp, PhysicalProperties) for pp in vlist]):
+            nOldPP = len(self._physprop) if islistlike(self._physprop) else (0 if self._physprop is None else 1)
             self._physprop = value
         else:
             errmsg = 'PhysicalProperty input must be a PhysicalProperties'
             errmsg += ' instance or a list of such instances'
             raise ValueError(errmsg)
+        # If a spectrum (temperature) is already defined, or multiple physical properties
+        # given, redefine the CrystalFieldMultiSpectrum function.
+        if not self.isPhysicalPropertyOnly or islistlike(self.PhysicalProperty):
+            tt = self.Temperature if islistlike(self.Temperature) else [self.Temperature]
+            ww = list(self.FWHM) if islistlike(self.FWHM) else [self.FWHM]
+            # Last n-set of temperatures correspond to PhysicalProperties
+            if nOldPP > 0:
+                tt = tt[:-nOldPP]
+            # Removes 'negative' temperature, which is a flag for no INS dataset
+            tt = [val for val in tt if val > 0]
+            pptt = [0 if val.Temperature is None else val.Temperature for val in vlist]
+            self._remakeFunction(list(tt)+pptt)
+            if len(tt) > 0 and len(pptt) > 0:
+                ww += [0] * len(pptt)
+            self.FWHM = ww
+            ppids = [pp.TypeID for pp in vlist]
+            self.function.setAttributeValue('PhysicalProperties', [0]*len(tt)+ppids)
+            for attribs in [pp.getAttributes(i+len(tt)) for i, pp in enumerate(vlist)]:
+                for item in attribs.items():
+                    self.function.setAttributeValue(item[0], item[1])
 
     @property
     def isPhysicalPropertyOnly(self):
-        return self.Temperature is None and self.PhysicalProperty
-
-    @property
-    def numPhysicalPropertyData(self):
-        if self._physprop:
-            return len(self._physprop) if islistlike(self._physprop) else 1
-        return 0
+        return (not islistlike(self.Temperature) and self.Temperature < 0
+                and self.PhysicalProperty is not None)
 
     def ties(self, **kwargs):
         """Set ties on the field parameters.
@@ -561,7 +590,7 @@ class CrystalField(object):
                 tie(B20 = 0.1, IB23 = '2*B23')
         """
         for tie in kwargs:
-            self._fieldTies[tie] = kwargs[tie]
+            self.crystalFieldFunction.tie(tie, str(kwargs[tie]))
 
     def constraints(self, *args):
         """
@@ -570,28 +599,7 @@ class CrystalField(object):
         @param args: A list of constraints. For example:
                 constraints('B00 > 0', '0.1 < B43 < 0.9')
         """
-        self._fieldConstraints += args
-
-    def setPeaks(self, name):
-        """Define the shape of the peaks and create PeakFunction instances."""
-        from .function import PeaksFunction
-        if self._temperature is None or not isinstance(self._temperature, list):
-            self.peaks = PeaksFunction(name, firstIndex=0)
-        else:
-            self.peaks = [PeaksFunction(name, firstIndex=1) for _ in self._temperature]
-
-    def setBackground(self, peak=None, background=None):
-        from .function import Background
-        if isinstance(self._temperature, list):
-            self.background = len(self._temperature) * Background(peak=peak, background=background)
-        else:
-            self.background = Background(peak=peak, background=background)
-
-    def getPeak(self, i=0):
-        if isinstance(self.peaks, list):
-            return self.peaks[i]
-        else:
-            return self.peaks
+        self.crystalFieldFunction.addConstraints(','.join(args))
 
     def getEigenvalues(self):
         self._calcEigensystem()
@@ -647,6 +655,9 @@ class CrystalField(object):
             wksp = i
             i = 0
 
+        if (self.Temperature[i] if islistlike(self.Temperature) else self.Temperature) < 0:
+            raise RuntimeError('You must first define a temperature for the spectrum')
+
         # Workspace is given, always calculate
         if wksp is None:
             xArray = None
@@ -731,7 +742,6 @@ class CrystalField(object):
         # _calcSpectrum updates parameters and susceptibility has a 'Lambda' parameter which other
         # CF functions don't have. This causes problems if you want to calculate another quantity after
         x, y = self._getPhysProp(PhysicalProperties('chi', *args, **kwargs), workspace, ws_index)
-        self._fieldParameters.pop('Lambda', None)
         return x, y
 
     def getMagneticMoment(self, *args, **kwargs):
@@ -819,7 +829,7 @@ class CrystalField(object):
         createWS.initialize()
 
         xArray, yArray = self.getSpectrum(i, workspace, ws_index)
-        ws_name = name if name is not None else 'CrystalField_%s' % self._ion
+        ws_name = name if name is not None else 'CrystalField_%s' % self.Ion
 
         if isinstance(i, int):
             if workspace is None:
@@ -848,97 +858,19 @@ class CrystalField(object):
             createWS.execute()
             plotSpectrum(ws_name, 0)
 
-    def _setDefaultTies(self):
-        for name in self.field_parameter_names:
-            if name not in self._fieldParameters:
-                self._fieldTies[name] = '0'
-
-    def getFieldTies(self):
-        ties = ['%s=%s' % item for item in self._fieldTies.items()]
-        return ','.join(ties)
-
-    def getFieldConstraints(self):
-        return ','.join(self._fieldConstraints)
-
-    def updateParameters(self, func):
-        """
-        Update values of the field and peaks parameters.
-        @param func: A IFunction object containing new parameter values.
-        """
-        for i in range(func.nParams()):
-            par = func.parameterName(i)
-            value = func.getParameterValue(i)
-            if par == 'IntensityScaling':
-                self._intensityScaling = value
-            else:
-                match = re.match(FN_PATTERN, par)
-                if match:
-                    i = int(match.group(1))
-                    par = match.group(2)
-                    self.peaks.param[i][par] = value
-                else:
-                    self._fieldParameters[par] = value
-
     def update(self, func):
         """
         Update values of the fitting parameters.
         @param func: A IFunction object containing new parameter values.
         """
-        from mantid.api import CompositeFunction
-        if isinstance(func, CompositeFunction):
-            nFunc = len(func)
-            if nFunc == 3:
-                self.background.update(func[0], func[1])
-                self.updateParameters(func[2])
-            elif nFunc == 2:
-                self.background.update(func[0])
-                self.updateParameters(func[1])
+        self.function = func
+        if self._background is not None:
+            if isinstance(self._background, list):
+                for background in self._background:
+                    background.update(func)
             else:
-                raise RuntimeError('CompositeFunuction cannot have more than 3 components.')
-        else:
-            self.updateParameters(func)
-
-    def update_multi(self, func):
-        """
-        Update values of the fitting parameters in case of a multi-spectrum function.
-        @param func: A IFunction object containing new parameter values.
-        """
-        from .function import Function
-        for i in range(func.nParams()):
-            par = func.parameterName(i)
-            value = func.getParameterValue(i)
-            match = re.match(FN_MS_PATTERN, par)
-            if match:
-                ispec = int(match.group(1))
-                ipeak = int(match.group(2))
-                par = match.group(3)
-                if ipeak == 0:
-                    if self.background is None:
-                        self.setBackground(background=Function(self.default_background))
-                    background = (self.background[ispec]
-                                  if islistlike(self.background) else self.background)
-                    bgMatch = re.match(FN_PATTERN, par)
-                    if bgMatch:
-                        i = int(bgMatch.group(1))
-                        par = bgMatch.group(2)
-                        if i == 0:
-                            background.peak.param[par] = value
-                        else:
-                            background.background.param[par] = value
-                    else:
-                        if background.peak is not None:
-                            background.peak.param[par] = value
-                        elif background.background is not None:
-                            background.background.param[par] = value
-                        else:
-                            raise RuntimeError('Background is undefined in CrystalField instance.')
-                else:
-                    if islistlike(self.peaks):
-                        self.peaks[ispec].param[ipeak - 1][par] = value
-                    else:
-                        self.peaks.param[ipeak - 1][par] = value
-            else:
-                self._fieldParameters[par] = value
+                self._background.update(func)
+        self._setPeaks()
 
     def calc_xmin_xmax(self, i):
         """Calculate the x-range containing interesting features of a spectrum (for plotting)
@@ -956,58 +888,6 @@ class CrystalField(object):
         x_max += deltaX
         return x_min, x_max
 
-    def check_consistency(self):
-        """ Checks that list input variables are consistent """
-        if not self._temperature:
-            return 0
-        # Number of datasets is implied by temperature.
-        nDataset = len(self._temperature) if islistlike(self._temperature) else 1
-        nFWHM = len(self._FWHM) if islistlike(self._FWHM) else 1
-        nIntensity = len(self._intensityScaling) if islistlike(self._intensityScaling) else 1
-        nPeaks = len(self.peaks) if islistlike(self.peaks) else 1
-        # Consistent if temperature, FWHM, intensityScale are lists with same len
-        # Or if FWHM, intensityScale are 1-element list or scalar
-        if (nFWHM != nDataset and nFWHM != 1) or (nIntensity != nDataset and nIntensity != 1):
-            errmsg = 'The Temperature, FWHM, and IntensityScaling properties have different '
-            errmsg += 'number of elements implying different number of spectra.'
-            raise ValueError(errmsg)
-        # This should not occur, but may do if the user changes the temperature(s) after
-        # initialisation. In which case, we reset the peaks, giving a warning.
-        if nPeaks != nDataset:
-            from .function import PeaksFunction
-            errmsg = 'Internal inconsistency between number of spectra and list of '
-            errmsg += 'temperatures. Changing number of spectra to match temperature. '
-            errmsg += 'This may reset some peaks constraints / limits'
-            warnings.warn(errmsg, RuntimeWarning)
-            if len(self.peaks) > nDataset:           # Truncate
-                self.peaks = self.peaks[0:nDataset]
-            else:                                    # Append empty PeaksFunctions
-                for i in range(len(self.peaks), nDataset):
-                    self.peaks.append(PeaksFunction(self.peaks[0].name(), firstIndex=0))
-        # Convert to all scalars if only one dataset
-        if nDataset == 1:
-            if islistlike(self._temperature) and self._temperature is not None:
-                self._temperature = self._temperature[0]
-                if islistlike(self.peaks):
-                    self.peaks = self.peaks[0]
-            if islistlike(self._FWHM) and self._FWHM is not None:
-                self._FWHM = self._FWHM[0]
-            if islistlike(self._intensityScaling) and self._intensityScaling is not None:
-                self._intensityScaling = self._intensityScaling[0]
-        # Convert to list of same size if multidatasets
-        else:
-            if nFWHM == 1 and self._FWHM is not None:
-                if islistlike(self._FWHM):
-                    self._FWHM *= nDataset
-                else:
-                    self._FWHM = nDataset * [self._FWHM]
-            if nIntensity == 1 and self._intensityScaling is not None:
-                if islistlike(self._intensityScaling):
-                    self._intensityScaling *= nDataset
-                else:
-                    self._intensityScaling = nDataset * [self._intensityScaling]
-        return nDataset
-
     def __add__(self, other):
         if isinstance(other, CrystalFieldMulti):
             return other.__radd__(self)
@@ -1025,32 +905,28 @@ class CrystalField(object):
 
     def _getTemperature(self, i):
         """Get temperature value for i-th spectrum."""
-        if self._temperature is None:
-            raise RuntimeError('Temperature must be set.')
-        if isinstance(self._temperature, float) or isinstance(self._temperature, int):
+        if not self._isMultiSpectrum:
             if i != 0:
                 raise RuntimeError('Cannot evaluate spectrum %s. Only 1 temperature is given.' % i)
-            return float(self._temperature)
+            return float(self.Temperature)
         else:
-            nTemp = len(self._temperature)
+            temperatures = self.Temperature
+            nTemp = len(temperatures)
             if -nTemp <= i < nTemp:
-                return float(self._temperature[i])
+                return float(temperatures[i])
             else:
                 raise RuntimeError('Cannot evaluate spectrum %s. Only %s temperatures are given.' % (i, nTemp))
 
     def _getFWHM(self, i):
         """Get default FWHM value for i-th spectrum."""
-        if self._FWHM is None:
-            raise RuntimeError('Default FWHM must be set.')
-        if isinstance(self._FWHM, float) or isinstance(self._FWHM, int):
+        if not self._isMultiSpectrum:
             # if i != 0 assume that value for all spectra
-            return float(self._FWHM)
+            return float(self.FWHM)
         else:
-            nFWHM = len(self._FWHM)
-            if i >= -nFWHM and i < nFWHM:
-                return float(self._FWHM[i])
-            elif nFWHM == 1:
-                return self._FWHM[0]
+            fwhm = self.FWHM
+            nFWHM = len(fwhm)
+            if -nFWHM <= i < nFWHM:
+                return float(fwhm[i])
             else:
                 raise RuntimeError('Cannot get FWHM for spectrum %s. Only %s FWHM are given.' % (i, nFWHM))
 
@@ -1068,6 +944,29 @@ class CrystalField(object):
             return self.peaks[i]
         return self.peaks
 
+    def _getFieldParameters(self):
+        """
+        Get the values of non-zero field parameters.
+        Returns:
+            a dict with name: value pairs.
+        """
+        params = {}
+        for name in self.field_parameter_names:
+            value = self.crystalFieldFunction.getParameterValue(name)
+            if value != 0.0:
+                params[name] = value
+        return params
+
+    def _getFieldTies(self):
+        import re
+        ties = re.match('ties=\((.*?)\)', str(self.crystalFieldFunction))
+        return ties.group(1) if ties else ''
+
+    def _getFieldConstraints(self):
+        import re
+        constraints = re.match('constraints=\((.*?)\)', str(self.crystalFieldFunction))
+        return constraints.group(1) if constraints else ''
+
     def _getPhysProp(self, ppobj, workspace, ws_index):
         """
         Returns a physical properties calculation
@@ -1101,9 +1000,9 @@ class CrystalField(object):
         """
         if self._dirty_eigensystem:
             import CrystalField.energies as energies
-            if self._nre < -99:
-                raise RuntimeError('J value ' + str(-self._nre / 2) + ' is too large.')
-            self._eigenvalues, self._eigenvectors, self._hamiltonian = energies.energies(self._nre, **self._fieldParameters)
+            nre = self.ion_nre_map[self.Ion]
+            self._eigenvalues, self._eigenvectors, self._hamiltonian = \
+                energies.energies(nre, **self._getFieldParameters())
             self._dirty_eigensystem = False
 
     def _calcPeaksList(self, i):
@@ -1135,16 +1034,13 @@ class CrystalField(object):
         alg.setProperty('WorkspaceIndex', ws_index)
         alg.setProperty('OutputWorkspace', 'dummy')
         alg.execute()
-        fun = alg.getProperty('Function').value
-        if not self._isMultiSpectra():
-            self.update(fun)
         out = alg.getProperty('OutputWorkspace').value
         # Create copies of the x and y because `out` goes out of scope when this method returns
         # and x and y get deallocated
         return np.array(out.readX(0)), np.array(out.readY(1))
 
-    def _isMultiSpectra(self):
-        return islistlike(self._temperature)
+    def isMultiSpectrum(self):
+        return self._isMultiSpectrum
 
 
 class CrystalFieldSite(object):
@@ -1221,6 +1117,11 @@ class CrystalFieldMulti(object):
         return ','.join(ties)
 
     def getSpectrum(self, i=0, workspace=None, ws_index=0):
+        tt = []
+        for site in self.sites:
+            tt = tt + (list(site.Temperature) if islistlike(site.Temperature) else [site.Temperature])
+        if any([val < 0 for val in tt]):
+            raise RuntimeError('You must first define a temperature for the spectrum')
         largest_abundance= max(self.abundances)
         if workspace is not None:
             xArray, yArray = self.sites[0].getSpectrum(i, workspace, ws_index)
@@ -1313,23 +1214,29 @@ class CrystalFieldMulti(object):
             a.PhysicalProperty = value
 
     @property
-    def numPhysicalPropertyData(self):
-        num_spec = []
-        for a in self.sites:
-            num_spec.append(a.numPhysicalPropertyData)
-        if len(set(num_spec)) > 1:
-            raise ValueError('Number of physical properties datasets for each site not consistent')
-        return num_spec[0]
-
-    def check_consistency(self):
-        """ Checks that list input variables are consistent """
+    def NumberOfSpectra(self):
+        """ Returns the number of expected workspaces """
         num_spec = []
         for site in self.sites:
-            num_spec.append(site.check_consistency())
+            num_spec.append(site.NumberOfSpectra)
         if len(set(num_spec)) > 1:
             raise ValueError('Number of spectra for each site not consistent with each other')
         return num_spec[0]
 
+    @property
+    def Temperature(self):
+        tt = []
+        for site in self.sites:
+            tt.append([val for val in (site.Temperature if islistlike(site.Temperature) else [site.Temperature])])
+        if len(set([tuple(val) for val in tt])) > 1:
+            raise ValueError('Temperatures of spectra for each site not consistent with each other')
+        return tt[0]
+
+    @Temperature.setter
+    def Temperature(self, value):
+        for site in self.sites:
+            site.Temperature = value
+
     def __add__(self, other):
         if isinstance(other, CrystalFieldMulti):
             cfm = CrystalFieldMulti()
@@ -1462,6 +1369,10 @@ class CrystalFieldFit(object):
         """
         from mantid.api import AlgorithmManager
         fun = self.model.makeSpectrumFunction()
+        if 'CrystalFieldMultiSpectrum' in fun:
+            # Hack to ensure that 'PhysicalProperties' attribute is first
+            # otherwise it won't set up other attributes properly
+            fun = re.sub(r'(name=.*?,)(.*?)(PhysicalProperties=\(.*?\),)',r'\1\3\2', fun)
         alg = AlgorithmManager.createUnmanaged('EstimateFitParameters')
         alg.initialize()
         alg.setProperty('Function', fun)
@@ -1481,6 +1392,8 @@ class CrystalFieldFit(object):
         """
         from mantid.api import AlgorithmManager
         fun = self.model.makeMultiSpectrumFunction()
+        if 'CrystalFieldMultiSpectrum' in fun:
+            fun = re.sub(r'(name=.*?,)(.*?)(PhysicalProperties=\(.*?\),)',r'\1\3\2', fun)
         alg = AlgorithmManager.createUnmanaged('EstimateFitParameters')
         alg.initialize()
         alg.setProperty('Function', fun)
@@ -1493,7 +1406,7 @@ class CrystalFieldFit(object):
             alg.setProperty(param, kwargs[param])
         alg.execute()
         function = alg.getProperty('Function').value
-        self.model.update_multi(function)
+        self.model.update(function)
         self._function = function
 
     def _fit_single(self):
@@ -1508,6 +1421,8 @@ class CrystalFieldFit(object):
                 fun = self.model.makeSpectrumFunction()
         else:
             fun = str(self._function)
+        if 'CrystalFieldMultiSpectrum' in fun:
+            fun = re.sub(r'(name=.*?,)(.*?)(PhysicalProperties=\(.*?\),)',r'\1\3\2', fun)
         alg = AlgorithmManager.createUnmanaged('Fit')
         alg.initialize()
         alg.setProperty('Function', fun)
@@ -1525,6 +1440,8 @@ class CrystalFieldFit(object):
         """
         from mantid.api import AlgorithmManager
         fun = self.model.makeMultiSpectrumFunction()
+        if 'CrystalFieldMultiSpectrum' in fun:
+            fun = re.sub(r'(name=.*?,)(.*?)(PhysicalProperties=\(.*?\),)',r'\1\3\2', fun)
         alg = AlgorithmManager.createUnmanaged('Fit')
         alg.initialize()
         alg.setProperty('Function', fun)
@@ -1537,7 +1454,7 @@ class CrystalFieldFit(object):
         self._set_fit_properties(alg)
         alg.execute()
         function = alg.getProperty('Function').value
-        self.model.update_multi(function)
+        self.model.update(function)
         self.model.chi2 = alg.getProperty('OutputChi2overDoF').value
 
     def _set_fit_properties(self, alg):
@@ -1546,7 +1463,7 @@ class CrystalFieldFit(object):
 
     def check_consistency(self):
         """ Checks that list input variables are consistent """
-        num_ws = self.model.check_consistency() + self.model.numPhysicalPropertyData
+        num_ws = self.model.NumberOfSpectra
         errmsg = 'Number of input workspaces not consistent with model'
         if islistlike(self._input_workspace):
             if num_ws != len(self._input_workspace):
@@ -1556,3 +1473,7 @@ class CrystalFieldFit(object):
                 self._input_workspace = self._input_workspace[0]
         elif num_ws != 1:
             raise ValueError(errmsg)
+        if not self.model.isPhysicalPropertyOnly:
+            tt = self.model.Temperature
+            if any([val < 0 for val in (tt if islistlike(tt) else [tt])]):
+                raise RuntimeError('You must first define a temperature for the spectrum')
diff --git a/scripts/Inelastic/CrystalField/function.py b/scripts/Inelastic/CrystalField/function.py
index 0f802a985b255fcb74d1193a3daa7c823687bfc0..5d6f24e20778cd609b540287efacb76ca709709d 100644
--- a/scripts/Inelastic/CrystalField/function.py
+++ b/scripts/Inelastic/CrystalField/function.py
@@ -5,10 +5,46 @@ from six import string_types
 parNamePattern = re.compile(r'([a-zA-Z][\w.]+)')
 
 
+class FunctionParameters(object):
+    """
+    A helper class that simplifies access to parameters of nested composite fitting functions.
+    """
+    def __init__(self, function, prefix=''):
+        self.function = function
+        self.prefix = prefix
+
+    def __getitem__(self, name):
+        return self.function.getParameterValue(self.prefix + name)
+
+    def __setitem__(self, name, value):
+        self.function.setParameter(self.prefix + name, value)
+
+    def update(self, function):
+        self.function = function
+
+
+class FunctionAttributes(object):
+    """
+    A helper class that simplifies access to attributes of nested composite fitting functions.
+    """
+    def __init__(self, function, prefix=''):
+        self.function = function
+        self.prefix = prefix
+
+    def __getitem__(self, name):
+        return self.function.getAttributeValue(self.prefix + name)
+
+    def __setitem__(self, name, value):
+        self.function.setAttributeValue(self.prefix + name, value)
+
+    def update(self, function):
+        self.function = function
+
+
 class Function(object):
     """A helper object that simplifies getting and setting parameters of a simple named function."""
 
-    def __init__(self, name, **kwargs):
+    def __init__(self, name_or_function, **kwargs):
         """
         Initialise new instance.
         @param name: A valid name registered with the FunctionFactory.
@@ -17,37 +53,24 @@ class Function(object):
                     f = Function('TabulatedFunction', Scaling=2.0)
                     f.attr['Workspace'] = 'workspace_with_data'
         """
-        self._name = name
+        from mantid.simpleapi import FunctionFactory
+        if isinstance(name_or_function, str):
+            self.function = FunctionFactory.createFunction(name_or_function)
+        else:
+            self.function = name_or_function
+        if 'prefix' in kwargs:
+            self.prefix = kwargs['prefix']
+            del kwargs['prefix']
+        else:
+            self.prefix = ''
         # Function attributes.
-        self._attrib = {}
+        self._attrib = FunctionAttributes(self.function, self.prefix)
         # Function parameters.
-        self._params = {}
+        self._params = FunctionParameters(self.function, self.prefix)
+        # The rest of kw arguments are treated as function parameters
         for param in kwargs:
             self._params[param] = kwargs[param]
 
-        self._ties = {}
-        self._constraints = []
-
-    def copyFrom(self, attrib, params, ties, constraints):
-        """Make shallow copies of the member collections"""
-        from copy import copy
-        self._attrib = copy(attrib)
-        self._params = copy(params)
-        self._ties = copy(ties)
-        self._constraints = copy(constraints)
-
-    def clone(self):
-        """Make a copy of self."""
-        function = Function(self._name)
-        # Make shallow copies of the member collections
-        function.copyFrom(self._attrib, self._params, self._ties, self._constraints)
-        return function
-
-    @property
-    def name(self):
-        """Read only name of this function"""
-        return self._name
-
     @property
     def attr(self):
         return self._attrib
@@ -63,8 +86,8 @@ class Function(object):
             the value is a tie string or a number. For example:
                 tie(A0 = 0.1, A1 = '2*A0')
         """
-        for tie in kwargs:
-            self._ties[tie] = kwargs[tie]
+        for param in kwargs:
+            self.function.tie(self.prefix + param, str(kwargs[param]))
 
     def constraints(self, *args):
         """
@@ -73,196 +96,128 @@ class Function(object):
         @param args: A list of constraints. For example:
                 constraints('A0 > 0', '0.1 < A1 < 0.9')
         """
-        self._constraints += args
+        for arg in args:
+            constraint = re.sub(parNamePattern, '%s\\1' % self.prefix, arg)
+            self.function.addConstraints(constraint)
 
     def toString(self):
         """Create function initialisation string"""
-        attrib = ['%s=%s' % item for item in self._attrib.items()] + \
-                 ['%s=%s' % item for item in self._params.items()]
-        if len(attrib) > 0:
-            out = 'name=%s,%s' % (self._name, ','.join(attrib))
-        else:
-            out = 'name=%s' % self._name
-        ties = ','.join(['%s=%s' % item for item in self._ties.items()])
-        if len(ties) > 0:
-            out += ',ties=(%s)' % ties
-        constraints = ','.join(self._constraints)
-        if len(constraints) > 0:
-            out += ',constraints=(%s)' % constraints
-        return out
-
-    def paramString(self, prefix):
-        """Create a string with only parameters and attributes settings.
-            The prefix is prepended to all attribute names.
-        """
-        attrib = ['%s%s=%s' % ((prefix,) + item) for item in self._attrib.items()] + \
-                 ['%s%s=%s' % ((prefix,) + item) for item in self._params.items()]
-        return ','.join(attrib)
-
-    def tiesString(self, prefix):
-        """Create a string with only ties settings.
-            The prefix is prepended to all parameter names.
-        """
-        ties = ['%s%s=%s' % ((prefix,) + item) for item in self._ties.items()]
-        return ','.join(ties)
+        if self.prefix != '':
+            raise RuntimeError('Cannot convert to string a part of function')
+        return str(self.function)
 
-    def constraintsString(self, prefix):
-        """Create a string with only constraints settings.
-            The prefix is prepended to all parameter names.
-        """
-        if len(prefix) > 0:
-            constraints = []
-            for constraint in self._constraints:
-                constraint = re.sub(parNamePattern, prefix + '\\1', constraint)
-                constraints.append(constraint)
-        else:
-            constraints = self._constraints
-        return ','.join(constraints)
-
-    def update(self, func):
+    def update(self, function):
         """
         Update values of the fitting parameters.
         @param func: A IFunction object containing new parameter values.
         """
-        for i in range(func.nParams()):
-            par = func.parameterName(i)
-            self._params[par] = func.getParameterValue(i)
+        self._attrib.update(function)
+        self._params.update(function)
 
 
 class CompositeProperties(object):
     """
-    A dictionary of dictionaries of function properties: attributes or parameters.
-    It mimics properties of a CompositeFunction: the key is a function index and the value
-    id a map 'param_name' -> param_value.
-
-    Example:
-        {
-          0: {'Height': 100, 'Sigma': 1.0}, # Parameters of the first function
-          1: {'Height': 120, 'Sigma': 2.0}, # Parameters of the second function
-          5: {'Height': 300, 'Sigma': 3.0}, # Parameters of the sixth function
-          ...
-        }
+    A helper class that simplifies access of attributes and parameters of a composite function.
     """
+    def __init__(self, function, prefix, kind, first_index):
+        """
+        Constructor.
+        Args:
+            function: a function that this object provides access to
+            prefix: a prefix that is prepended to properties names. This makes it easier to access parameters
+                    of a nested composite function.
+            kind: a kind of properties accessed: 'attributes' or 'parameters'
+            firstIndex: shifts the index of a member function
+        """
+        self.function = function
+        self.prefix = prefix
+        self.PropertyType = FunctionAttributes if kind == 'attributes' else FunctionParameters
+        self.first_index = first_index
+
+    def __getitem__(self, i):
+        """
+        Get a FunctionParameters or FunctionAttributes object that give access to properties of the i-th
+        member function (shifted by self.firstIndex).
+
+        For example:
+            function = FunctionFactory.createInitialized('name=Gaussian,Sigma=1;name=Gaussian,Sigma=2')
+            params = CompositeProperties(function, '', 'parameters', 0)
+            assert params[0]['Sigma'] == 1
+            assert params[1]['Sigma'] == 2
+            params[1]['Sigma'] = 3
+            assert params[1]['Sigma'] == 3
+        Args:
+            i: index of a member function to get/set parameters
+        Returns:
+            FunctionParameters or FunctionAttributes object.
+        """
+        return self.PropertyType(self.function, self.prefix + 'f%s.' % (i + self.first_index))
+
+    def update(self, function):
+        self.function = function
+
+    def ties(self, ties_dict):
+        """Set ties on the parameters.
 
-    def __init__(self):
-        self._properties = {}
-
-    def __getitem__(self, item):
-        """Get a map of properties for a function number <item>"""
-        if item not in self._properties:
-            self._properties[item] = {}
-        return self._properties[item]
-
-    def getSize(self):
-        """Get number of maps (functions) defined here"""
-        keys = list(self._properties.keys())
-        if len(keys) > 0:
-            return max(keys) + 1
-        return 0
-
-    def toStringList(self):
-        """Format all properties into a list of strings where each string is a comma-separated
-        list of name=value pairs.
+        :param ties_dict: Ties as name=value pairs: name is a parameter name,
+            the value is a tie string or a number. For example:
+                tie({'A0': 0.1, 'A1': '2*A0'})
         """
-        prop_list = []
-        for i in range(self.getSize()):
-            if i in self._properties:
-                props = self._properties[i]
-                prop_list.append(','.join(['%s=%s' % item for item in sorted(props.items())]))
-            else:
-                prop_list.append('')
-        return prop_list
+        for param, tie in ties_dict.items():
+            tie = re.sub(parNamePattern, '%s\\1' % self.prefix, tie)
+            self.function.tie(self.prefix + param, tie)
 
-    def toCompositeString(self, prefix, shift=0):
-        """Format all properties as a comma-separated list of name=value pairs where name is formatted
-        in the CompositeFunction style.
+    def constraints(self, *args):
+        """
+        Set constraints for the parameters.
 
-        Example:
-            'f0.Height=100,f0.Sigma=1.0,f1.Height=120,f1.Sigma=2.0,f5.Height=300,f5.Sigma=3.0'
+        @param args: A list of constraints. For example:
+                constraints('A0 > 0', '0.1 < A1 < 0.9')
         """
-        out = ''
-        for i in self._properties:
-            fullPrefix = '%sf%s.' % (prefix, i + shift)
-            props = self._properties[i]
-            if len(out) > 0:
-                out += ','
-            out += ','.join(['%s%s=%s' % ((fullPrefix,) + item) for item in sorted(props.items())])
-        return out[:]
+        for arg in args:
+            constraint = re.sub(parNamePattern, '%s\\1' % self.prefix, arg)
+            self.function.addConstraints(constraint)
 
 
 class PeaksFunction(object):
     """A helper object that simplifies getting and setting parameters of a composite function
-    containing multiple peaks of the same type.
-
-    The object of this class has no access to the C++ fit function it represents. It means that
-    it doesn't know what attributes or parameters the function defines and relies on the user
-    to provide correct information.
-
-    @param name: A name of the individual peak function, such as 'Lorentzian' or 'Gaussian'.
-        If None then the default function is used (currently 'Lorentzian')
+    containing multiple peaks of the same spectrum.
     """
 
-    def __init__(self, name=None, firstIndex=0):
+    def __init__(self, function, prefix, first_index):
         """
         Constructor.
-
-        @param name: The name of the function of each peak.  E.g. Gaussian
-
-        @param firstIndex: Index of the first peak in the function. For a single spectrum
-                function it is 0, in a multi-spectral case it's 1.
+        :param function: A CrystalField function who's peaks we want to access.
+        :param prefix: a prefix of the parameters of the spectrum we want to access.
+        :param first_index: Index of the first peak
         """
-        # Name of the peaks
-        self._name = name if name is not None else 'Lorentzian'
         # Collection of all attributes
-        self._attrib = CompositeProperties()
+        self._attrib = CompositeProperties(function, prefix, 'attributes', first_index)
         # Collection of all parameters
-        self._params = CompositeProperties()
-        # Ties
-        self._ties = []
-        # Constraints
-        self._constraints = []
-        # Index of the first peak
-        self._firstIndex = firstIndex
-
-    @property
-    def name(self):
-        """Read only name of the peak function"""
-        return self._name
+        self._params = CompositeProperties(function, prefix, 'parameters', first_index)
 
     @property
     def attr(self):
-        """Get a dict of all currently set attributes.
-        Use this property to set or get an attribute.
-        You can only get an attribute that has been previously set via this property.
+        """Get or set the function attributes.
+        Returns a FunctionAttributes object that accesses the peaks' attributes.
         """
         return self._attrib
 
     @property
     def param(self):
-        """Get a dict of all currently set parameters
-        Use this property to set or get a parameter.
-        You can only get a parameter that has been previously set via this property.
-        Example:
-
-            fun = PeaksFunction('Gaussian')
-            # Set Sigma parameter of the second peak
-            peaks.param[1]['Sigma'] = 0.1
-            ...
-            # Get the value of a previously set parameter
-            sigma = peaks.param[1]['Sigma']
-            ...
-            # Trying to get a value that wasn't set results in an error
-            height = peaks[1]['Height'] # error
+        """Get or set the function parameters.
+        Returns a FunctionParameters object that accesses the peaks' parameters.
         """
         return self._params
 
-    def ties(self, *ties):
+    def ties(self, ties_dict):
         """Set ties on the peak parameters.
 
-        @param ties: A list of ties. For example:
-                ties('f1.Sigma=0.1', 'f2.Sigma=2*f0.Sigma')
+        :param ties_dict: Ties as name=value pairs: name is a parameter name,
+              the value is a tie string or a number. For example:
+              ties({'f1.Sigma': '0.1', 'f2.Sigma': '2*f0.Sigma'})
         """
-        self._ties += ties
+        self._params.ties(ties_dict)
 
     def constraints(self, *constraints):
         """
@@ -271,7 +226,7 @@ class PeaksFunction(object):
         @param constraints: A list of constraints. For example:
                 constraints('f0.Sigma > 0', '0.1 < f1.Sigma < 0.9')
         """
-        self._constraints += constraints
+        self._params.constraints(*constraints)
 
     def tieAll(self, tie, iFirstN, iLast=-1):
         """
@@ -290,11 +245,13 @@ class PeaksFunction(object):
             start = iFirstN
             end = iLast + 1
         else:
-            start = self._firstIndex
-            end = iFirstN + self._firstIndex
-        pattern = 'f%s.' + tie
-        ties = [pattern % i for i in range(start, end)]
-        self.ties(*ties)
+            start = self._params.first_index
+            end = iFirstN + self._params.first_index
+        name, expr = tuple(tie.split('='))
+        name = 'f%s.' + name.strip()
+        expr = expr.strip()
+        ties = {(name % i): expr for i in range(start, end)}
+        self.ties(ties)
 
     def constrainAll(self, constraint, iFirstN, iLast=-1):
         """
@@ -313,74 +270,12 @@ class PeaksFunction(object):
             start = iFirstN
             end = iLast + 1
         else:
-            start = self._firstIndex
-            end = iFirstN + self._firstIndex
+            start = self._params.first_index
+            end = iFirstN + self._params.first_index
 
         pattern = re.sub(parNamePattern, 'f%s.\\1', constraint)
         self.constraints(*[pattern % i for i in range(start, end)])
 
-    def nPeaks(self):
-        """Get the number of peaks"""
-        numPeaks = max(self._attrib.getSize(), self._params.getSize())
-        if numPeaks == 0:
-            raise RuntimeError('PeaksFunction has no defined parameters or attributes.')
-        return numPeaks
-
-    def toString(self):
-        """Create function initialisation string"""
-        numPeaks = self.nPeaks()
-        attribs = self._attrib.toStringList()
-        params = self._params.toStringList()
-        if len(attribs) < numPeaks:
-            attribs += [''] * (numPeaks - len(attribs))
-        if len(params) < numPeaks:
-            params += [''] * (numPeaks - len(params))
-        peaks = []
-        for i in range(numPeaks):
-            attrib = attribs[i]
-            param = params[i]
-            if len(attrib) != 0 or len(param) != 0:
-                if len(attrib) == 0:
-                    peaks.append('name=%s,%s' % (self._name, param))
-                elif len(param) == 0:
-                    peaks.append('name=%s,%s' % (self._name, attrib))
-                else:
-                    peaks.append('name=%s,%s,%s' % (self._name, attrib,param))
-            else:
-                peaks.append('name=%s' % self._name)
-        out = ';'.join(peaks)
-        if len(self._ties) > 0:
-            out += ';%s' % self.tiesString()
-        return out
-
-    def paramString(self, prefix='', shift=0):
-        """Format a comma-separated list of all peaks attributes and parameters in a CompositeFunction
-        style.
-        """
-        numAttributes = self._attrib.getSize()
-        numParams = self._params.getSize()
-        if numAttributes == 0 and numParams == 0:
-            return ''
-        elif numAttributes == 0:
-            return self._params.toCompositeString(prefix, shift)
-        elif numParams == 0:
-            return self._attrib.toCompositeString(prefix, shift)
-        else:
-            return '%s,%s' % (self._attrib.toCompositeString(prefix, shift),
-                              self._params.toCompositeString(prefix, shift))
-
-    def tiesString(self, prefix=''):
-        if len(self._ties) > 0:
-            ties = ','.join(self._ties)
-            return 'ties=(%s)' % re.sub(parNamePattern, prefix + '\\1', ties)
-        return ''
-
-    def constraintsString(self, prefix=''):
-        if len(self._constraints) > 0:
-            constraints = ','.join(self._constraints)
-            return 'constraints=(%s)' % re.sub(parNamePattern, prefix + '\\1', constraints)
-        return ''
-
 
 class Background(object):
     """Object representing spectrum background: a sum of a central peak and a
@@ -405,16 +300,6 @@ class Background(object):
             aCopy.background = self.background.clone()
         return aCopy
 
-    def __mul__(self, nCopies):
-        """Make expressions like Background(...) * 8 return a list of 8 identical backgrounds."""
-        copies = [self] * nCopies
-        return list(map(Background.clone, copies))
-        # return [self.clone() for i in range(nCopies)]
-
-    def __rmul__(self, nCopies):
-        """Make expressions like 2 * Background(...) return a list of 2 identical backgrounds."""
-        return self.__mul__(nCopies)
-
     def toString(self):
         if self.peak is None and self.background is None:
             return ''
@@ -422,57 +307,7 @@ class Background(object):
             return self.background.toString()
         if self.background is None:
             return self.peak.toString()
-        return '%s;%s' % (self.peak.toString(), self.background.toString())
-
-    def nameString(self):
-        if self.peak is None and self.background is None:
-            return ''
-        if self.peak is None:
-            return self.background.name
-        if self.background is None:
-            return self.peak.name
-        return '"name=%s;name=%s"' % (self.peak.name, self.background.name)
-
-    def paramString(self, prefix):
-        if self.peak is None and self.background is None:
-            return ''
-        if self.peak is None:
-            return self.background.paramString(prefix)
-        if self.background is None:
-            return self.peak.paramString(prefix)
-        return '%s,%s' % (self.peak.paramString(prefix + 'f0.'), self.background.paramString(prefix + 'f1.'))
-
-    def tiesString(self, prefix):
-        if self.peak is None and self.background is None:
-            return ''
-        if self.peak is None:
-            return self.background.tiesString(prefix)
-        if self.background is None:
-            return self.peak.tiesString(prefix)
-        peakString = self.peak.tiesString(prefix + 'f0.')
-        backgroundString = self.background.tiesString(prefix + 'f1.')
-        if len(peakString) == 0:
-            return backgroundString
-        elif len(backgroundString) == 0:
-            return peakString
-        else:
-            return '%s,%s' % (peakString, backgroundString)
-
-    def constraintsString(self, prefix):
-        if self.peak is None and self.background is None:
-            return ''
-        if self.peak is None:
-            return self.background.constraintsString(prefix)
-        if self.background is None:
-            return self.peak.constraintsString(prefix)
-        peakString = self.peak.constraintsString(prefix + 'f0.')
-        backgroundString = self.background.constraintsString(prefix + 'f1.')
-        if len(peakString) == 0:
-            return backgroundString
-        elif len(backgroundString) == 0:
-            return peakString
-        else:
-            return '%s,%s' % (peakString, backgroundString)
+        return '(%s;%s)' % (self.peak.toString(), self.background.toString())
 
     def update(self, func1, func2=None):
         """
@@ -538,6 +373,13 @@ class ResolutionModel:
         self._checkModel(model)
         self.model = model
 
+    @property
+    def NumberOfSpectra(self):
+        if not self.multi:
+            return 1
+        else:
+            return len(self.model)
+
     def _checkModel(self, model):
         if not isinstance(model, tuple):
             raise RuntimeError('Resolution model must be a tuple of two arrays of floats.\n'
@@ -793,19 +635,20 @@ class PhysicalProperties(object):
                     out += ',Lambda=%s' % (self._lambda)
         return out
 
-    def envString(self, dataset=0):
-        """Create environment string for multidataset fitting"""
-        dataset = str(dataset)
-        out = ''
+    def getAttributes(self, dataset=None):
+        """Returns a dictionary of PhysicalProperties attributes for use with IFunction"""
+        dataset = '' if dataset is None else str(dataset)
+        out = {}
         if self._typeid > 1:
-            out += 'Unit%s=%s' % (dataset, self._physpropUnit)
+            out['Unit%s' % (dataset)] = self._physpropUnit
             if 'powder' in self._hdir:
-                out += ',powder%s=1' % (dataset)
+                out['powder%s' % (dataset)] = 1
             else:
-                out += ',Hdir%s=(%s)' % (dataset, ','.join([str(hh) for hh in self._hdir]))
+                out['Hdir%s' % (dataset)] = [float(hh) for hh in self._hdir] # needs to be list
             if self._typeid != 3:  # either susceptibility or M(T)
-                out += ',inverse%s=%s' % (dataset, 1 if self._suscInverseFlag else 0)
-                out += (',Hmag%s=%s' % (dataset, self._hmag)) if self._typeid==3 else ''
+                out['inverse%s' % (dataset)] = 1 if self._suscInverseFlag else 0
+                if self._typeid==3:
+                    out['Hmag%s' % (dataset)] = self._hmag
                 if self._typeid == 2 and self._lambda != 0:
-                    out += ',Lambda%s=%s' % (dataset, self._lambda)
+                    out['Lambda%s=' % (dataset)] = self._lambda
         return out
diff --git a/scripts/Inelastic/IndirectReductionCommon.py b/scripts/Inelastic/IndirectReductionCommon.py
index 84ce8f7e568571ddb5120dfeb084b96c5a5defac..81266cf0a52563a025034d9a76474ea56aed8e26 100644
--- a/scripts/Inelastic/IndirectReductionCommon.py
+++ b/scripts/Inelastic/IndirectReductionCommon.py
@@ -629,7 +629,7 @@ def plot_reduction(workspace_name, plot_type):
         from mantidplot import plotSpectrum
         num_spectra = mtd[workspace_name].getNumberHistograms()
         try:
-            plotSpectrum(workspace_name, range(0, num_spectra), error_bars=True)
+            plotSpectrum(workspace_name, range(0, num_spectra))
         except RuntimeError:
             logger.notice('Spectrum plotting canceled by user')
 
diff --git a/scripts/Inelastic/vesuvio/commands.py b/scripts/Inelastic/vesuvio/commands.py
index 46d71cd3b24d0f66b51844e0de9e6f64ef378090..fd1982b85a02b3161020359b90ea07c742e3ac57 100644
--- a/scripts/Inelastic/vesuvio/commands.py
+++ b/scripts/Inelastic/vesuvio/commands.py
@@ -1,4 +1,4 @@
-#pylint: disable=too-many-arguments,invalid-name,too-many-locals,too-many-branches
+# pylint: disable=too-many-arguments,invalid-name,too-many-locals,too-many-branches
 """
 Defines functions and classes to start the processing of Vesuvio data.
 The main entry point that most users should care about is fit_tof().
@@ -56,7 +56,7 @@ def fit_tof(runs, flags, iterations=1, convergence_threshold=None):
 
     exit_iteration = 0
 
-    for iteration in range(1, iterations+1):
+    for iteration in range(1, iterations + 1):
         iteration_flags = copy.deepcopy(flags)
         iteration_flags['iteration'] = iteration
 
@@ -82,7 +82,7 @@ def fit_tof(runs, flags, iterations=1, convergence_threshold=None):
 
         last_results = results
 
-    return (last_results[0], last_results[2], last_results[3], exit_iteration)
+    return last_results[0], last_results[2], last_results[3], exit_iteration
 
 
 def fit_tof_iteration(sample_data, container_data, runs, flags):
@@ -112,10 +112,14 @@ def fit_tof_iteration(sample_data, container_data, runs, flags):
     num_spec = sample_data.getNumberHistograms()
     pre_correct_pars_workspace = None
     pars_workspace = None
+    fit_workspace = None
     max_fit_iterations = flags.get('max_fit_iterations', 5000)
 
     output_groups = []
     chi2_values = []
+    data_workspaces = []
+    result_workspaces = []
+    group_name = runs + '_result'
     for index in range(num_spec):
         if isinstance(profiles_strs, list):
             profiles = profiles_strs[index]
@@ -148,7 +152,7 @@ def fit_tof_iteration(sample_data, container_data, runs, flags):
         ms.DeleteWorkspace(corrections_fit_name)
         corrections_args['FitParameters'] = pre_correction_pars_name
 
-        # Add the mutiple scattering arguments
+        # Add the multiple scattering arguments
         corrections_args.update(flags['ms_flags'])
 
         corrected_data_name = runs + "_tof_corrected" + suffix
@@ -199,6 +203,9 @@ def fit_tof_iteration(sample_data, container_data, runs, flags):
         if pars_workspace is None:
             pars_workspace = _create_param_workspace(num_spec, mtd[pars_name])
 
+        if fit_workspace is None:
+            fit_workspace = _create_param_workspace(num_spec, mtd[linear_correction_fit_params_name])
+
         spec_num_str = str(sample_data.getSpectrum(index).getSpectrumNo())
         current_spec = 'spectrum_' + spec_num_str
 
@@ -208,34 +215,56 @@ def fit_tof_iteration(sample_data, container_data, runs, flags):
         _update_fit_params(pars_workspace, index,
                            mtd[pars_name], current_spec)
 
+        _update_fit_params(fit_workspace, index, mtd[linear_correction_fit_params_name], current_spec)
+
         ms.DeleteWorkspace(pre_correction_pars_name)
         ms.DeleteWorkspace(pars_name)
+        ms.DeleteWorkspace(linear_correction_fit_params_name)
 
         # Process spectrum group
         # Note the ordering of operations here gives the order in the WorkspaceGroup
-        group_name = runs + suffix
-        output_workspaces = [fit_ws_name, linear_correction_fit_params_name]
+        output_workspaces = []
+        data_workspaces.append(fit_ws_name)
         if flags.get('output_verbose_corrections', False):
             output_workspaces += mtd[corrections_args["CorrectionWorkspaces"]].getNames()
             output_workspaces += mtd[corrections_args["CorrectedWorkspaces"]].getNames()
             ms.UnGroupWorkspace(corrections_args["CorrectionWorkspaces"])
             ms.UnGroupWorkspace(corrections_args["CorrectedWorkspaces"])
 
-        output_groups.append(ms.GroupWorkspaces(InputWorkspaces=output_workspaces,
-                                                OutputWorkspace=group_name))
+            for workspace in output_workspaces:
+
+                group_name = runs + '_iteration_' + str(flags.get('iteration', None))
+                name = group_name + '_' + workspace.split('_')[1] + '_' + workspace.split('_')[-1]
+                result_workspaces.append(name)
+                if index == 0:
+                    ms.RenameWorkspace(InputWorkspace=workspace, OutputWorkspace=name)
+                else:
+                    ms.ConjoinWorkspaces(InputWorkspace1=name, InputWorkspace2=workspace)
 
         # Output the parameter workspaces
         params_pre_corr = runs + "_params_pre_correction_iteration_" + str(flags['iteration'])
         params_name = runs + "_params_iteration_" + str(flags['iteration'])
+        fit_name = runs + "_correction_fit_scale_iteration_" + str(flags['iteration'])
         AnalysisDataService.Instance().addOrReplace(params_pre_corr, pre_correct_pars_workspace)
         AnalysisDataService.Instance().addOrReplace(params_name, pars_workspace)
+        AnalysisDataService.Instance().addOrReplace(fit_name, fit_workspace)
+
+    if result_workspaces:
+        output_groups.append(ms.GroupWorkspaces(InputWorkspaces=result_workspaces,
+                                                OutputWorkspace=group_name))
+
+    if data_workspaces:
+        output_groups.append(ms.GroupWorkspaces(InputWorkspaces=data_workspaces,
+                                                OutputWorkspace=group_name + '_data'))
+    else:
+        output_groups.append(fit_ws_name)
 
     if len(output_groups) > 1:
         result_ws = output_groups
     else:
         result_ws = output_groups[0]
 
-    return (result_ws, pre_correct_pars_workspace, pars_workspace, chi2_values)
+    return result_ws, pre_correct_pars_workspace, pars_workspace, chi2_values
 
 
 def load_and_crop_data(runs, spectra, ip_file, diff_mode='single',
@@ -292,6 +321,7 @@ def load_and_crop_data(runs, spectra, ip_file, diff_mode='single',
 
     return tof_data
 
+
 # --------------------------------------------------------------------------------
 # Private Functions
 # --------------------------------------------------------------------------------
@@ -299,7 +329,7 @@ def load_and_crop_data(runs, spectra, ip_file, diff_mode='single',
 
 def _update_masses_from_params(old_masses, param_ws):
     """
-    Update the massses flag based on the results of a fit.
+    Update the masses flag based on the results of a fit.
 
     @param old_masses The existing masses dictionary
     @param param_ws The workspace to update from
@@ -372,7 +402,7 @@ def _create_tof_workspace_suffix(runs, spectra):
 
 def _create_fit_workspace_suffix(index, tof_data, fit_mode, spectra, iteration=None):
     if fit_mode == "bank":
-        suffix = "_" + spectra + "_bank_" + str(index+1)
+        suffix = "_" + spectra + "_bank_" + str(index + 1)
     else:
         spectrum = tof_data.getSpectrum(index)
         suffix = "_spectrum_" + str(spectrum.getSpectrumNo())
@@ -426,12 +456,12 @@ def _create_background_str(background_flags):
 def _create_intensity_constraint_str(intensity_constraints):
     """
     Create a string suitable for the algorithms out of the intensity constraint flags
-    :param inten_constr_flags: A list of lists for the constraints (can be None)
+    :param intensity_constraints: A list of lists for the constraints (can be None)
     :return: A string to pass to the algorithm
     """
     if intensity_constraints:
         if not isinstance(intensity_constraints[0], list):
-            intensity_constraints = [intensity_constraints,]
+            intensity_constraints = [intensity_constraints]
         # Make each element a string and then join them together
         intensity_constraints = [str(c) for c in intensity_constraints]
         intensity_constraints_str = ";".join(intensity_constraints)
@@ -451,11 +481,11 @@ def _create_user_defined_ties_str(masses):
     for index, mass in enumerate(masses):
         if 'ties' in mass:
             ties = mass['ties'].split(',')
-            function_indentifier = 'f' + str(index) + '.'
+            function_identifier = 'f' + str(index) + '.'
             for t in ties:
-                tie_str = function_indentifier + t
+                tie_str = function_identifier + t
                 equal_pos = tie_str.index('=') + 1
-                tie_str = tie_str[:equal_pos] + function_indentifier + tie_str[equal_pos:]
+                tie_str = tie_str[:equal_pos] + function_identifier + tie_str[equal_pos:]
                 user_defined_ties.append(tie_str)
     user_defined_ties = ','.join(user_defined_ties)
     return user_defined_ties
diff --git a/scripts/Interface/ui/reflectometer/refl_gui.py b/scripts/Interface/ui/reflectometer/refl_gui.py
index 6d275c51f9a88da8c5ffa5d46b7383d7af68b9e8..8de9d8c0aeeb0baf9c40641ffcb4dfa09f8eec92 100644
--- a/scripts/Interface/ui/reflectometer/refl_gui.py
+++ b/scripts/Interface/ui/reflectometer/refl_gui.py
@@ -784,13 +784,14 @@ class ReflGui(QtGui.QMainWindow, ui_refl_window.Ui_windowRefl):
                         # Populate runlist
                         first_wq = None
                         for i in range(0, len(runno)):
-                            theta, qmin, qmax, _wlam, _wqBinned, wq = self._do_run(runno[i], row, i)
+                            theta, qmin, qmax, _wlam, wqBinnedAndScaled, _wqUnBinnedAndUnScaled = \
+                                self._do_run(runno[i], row, i)
                             if not first_wq:
-                                first_wq = wq # Cache the first Q workspace
+                                first_wq = wqBinnedAndScaled # Cache the first Q workspace
                             theta = round(theta, 3)
                             qmin = round(qmin, 3)
                             qmax = round(qmax, 3)
-                            wksp.append(wq.name())
+                            wksp.append(wqBinnedAndScaled.name())
                             if self.tableMain.item(row, i * 5 + 1).text() == '':
                                 item = QtGui.QTableWidgetItem()
                                 item.setText(str(theta))
diff --git a/scripts/SANS/ISISCommandInterface.py b/scripts/SANS/ISISCommandInterface.py
index d49aac99a9f589efdd6058f3d3903d31040b1c7e..1980786e8fe636a271faf18e7fdc73bd762be275 100644
--- a/scripts/SANS/ISISCommandInterface.py
+++ b/scripts/SANS/ISISCommandInterface.py
@@ -1767,14 +1767,13 @@ def is_current_workspace_an_angle_workspace():
     return is_angle
 
 
-def MatchIDFInReducerAndWorkspace(file_name):
-    '''
-    This method checks if the IDF which gets loaded with the workspace associated
-    with the file name and the current instrument in the reducer singleton refer
-    to the same IDF. If not then switch the IDF in the reducer.
-    '''
-    is_matched = True
+def _get_idf_path_for_run(file_name):
+    """
+    This method finds the full file location for a run number
 
+    :param file_name: the file name or run number
+    :return: the full path to the corresponding IDF
+    """
     # Get measurement time from file
     measurement_time = su.get_measurement_time_from_file(file_name)
 
@@ -1783,16 +1782,30 @@ def MatchIDFInReducerAndWorkspace(file_name):
 
     # Get the path to the instrument definition file
     idf_path_workspace = ExperimentInfo.getInstrumentFilename(instrument_name, measurement_time)
-    idf_path_workspace = os.path.normpath(idf_path_workspace)
+    return os.path.normpath(idf_path_workspace)
+
+
+def get_idf_path_for_run(file_name):
+    idf_path_workspace = _get_idf_path_for_run(file_name)
+    print(idf_path_workspace)
+    return idf_path_workspace
+
+
+def MatchIDFInReducerAndWorkspace(file_name):
+    '''
+    This method checks if the IDF which gets loaded with the workspace associated
+    with the file name and the current instrument in the reducer singleton refer
+    to the same IDF. If not then switch the IDF in the reducer.
+    '''
+
+    # Get the IDF path
+    idf_path_workspace = _get_idf_path_for_run(file_name)
 
     # Get the idf from the reducer
     idf_path_reducer = get_current_idf_path_in_reducer()
 
-    if ((idf_path_reducer == idf_path_workspace) and
-            su.are_two_files_identical(idf_path_reducer, idf_path_reducer)):
-        is_matched = True
-    else:
-        is_matched = False
+    is_matched = ((idf_path_reducer == idf_path_workspace) and
+                  su.are_two_files_identical(idf_path_reducer, idf_path_reducer))
 
     return is_matched
 
diff --git a/scripts/SANS/SANSBatchMode.py b/scripts/SANS/SANSBatchMode.py
index ab0ab26821c34ea2a47016c5a47f1823df563aa7..533a1a271fa929fe2041c94610ca1400129691ed 100644
--- a/scripts/SANS/SANSBatchMode.py
+++ b/scripts/SANS/SANSBatchMode.py
@@ -226,6 +226,10 @@ def BatchReduce(filename, format, plotresults=False, saveAlgs={'SaveRKH':'txt'},
     original_user_file = ReductionSingleton().user_settings.filename
     current_user_file = original_user_file
 
+    # Store the original combineDet which was set either by the input. this should be used whenever we are using the
+    # original user file
+    original_combine_det = combineDet
+
     # Now loop over all the lines and do a reduction (hopefully) for each
     for run in runinfo:
         # Set the user file, if it is required
@@ -235,12 +239,16 @@ def BatchReduce(filename, format, plotresults=False, saveAlgs={'SaveRKH':'txt'},
                                                        original_user_file=original_user_file,
                                                        original_settings = settings,
                                                        original_prop_man_settings = prop_man_settings)
-            # When we set a new user file, that means that the combineDet feature could be invalid,
-            # ie if the detector under investigation changed in the user file. We need to change this
-            # here too. But only if it is not None.
-            if combineDet is not None:
-                new_combineDet = ReductionSingleton().instrument.get_detector_selection()
-                combineDet = su.get_correct_combinDet_setting(ins_name, new_combineDet)
+
+            if current_user_file == original_user_file:
+                combineDet = original_combine_det
+            else:
+                # When we set a new user file, that means that the combineDet feature could be invalid,
+                # ie if the detector under investigation changed in the user file. We need to change this
+                # here too. But only if it is not None.
+                if combineDet is not None:
+                    new_combineDet = ReductionSingleton().instrument.get_detector_selection()
+                    combineDet = su.get_correct_combinDet_setting(ins_name, new_combineDet)
         except (RuntimeError, ValueError) as e:
             sanslog.warning("Error in Batchmode user files: Could not reset the specified user file %s. More info: %s" %(
                 str(run['user_file']), str(e)))
diff --git a/scripts/SANS/SANSUtility.py b/scripts/SANS/SANSUtility.py
index 784a9277e78b23ff7c9b2bb2f2bc3058554eaacc..14c223bfdda4d35ebeabe4e22dbafe5e4d130cb5 100644
--- a/scripts/SANS/SANSUtility.py
+++ b/scripts/SANS/SANSUtility.py
@@ -1997,7 +1997,7 @@ def get_correct_combinDet_setting(instrument_name, detector_selection):
     detector_selection = detector_selection.upper()
     # If we are dealing with LOQ, then the correct combineDet selection is
     if instrument_name == "LOQ":
-        if detector_selection == "MAIN":
+        if detector_selection == "MAIN" or detector_selection == "MAIN-DETECTOR-BANK":
             new_combine_detector_selection = 'rear'
         elif detector_selection == "HAB":
             new_combine_detector_selection = 'front'
@@ -2012,9 +2012,9 @@ def get_correct_combinDet_setting(instrument_name, detector_selection):
 
     # If we are dealing with SANS2D, then the correct combineDet selection is
     if instrument_name == "SANS2D":
-        if detector_selection == "REAR":
+        if detector_selection == "REAR" or detector_selection == "REAR-DETECTOR":
             new_combine_detector_selection = 'rear'
-        elif detector_selection == "FRONT":
+        elif detector_selection == "FRONT" or detector_selection == "FRONT-DETECTOR":
             new_combine_detector_selection = 'front'
         elif detector_selection == "MERGED":
             new_combine_detector_selection = 'merged'
diff --git a/scripts/SANS/sans/algorithm_detail/batch_execution.py b/scripts/SANS/sans/algorithm_detail/batch_execution.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1d432c8d0516cbf2cffd879c227c1b691b3626b
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/batch_execution.py
@@ -0,0 +1,957 @@
+from __future__ import (absolute_import, division, print_function)
+from copy import deepcopy
+from mantid.api import AnalysisDataService
+
+from sans.common.general_functions import (create_managed_non_child_algorithm, create_unmanaged_algorithm,
+                                           get_output_name, get_base_name_from_multi_period_name)
+from sans.common.enums import (SANSDataType, SaveType, OutputMode, ISISReductionMode)
+from sans.common.constants import (TRANS_SUFFIX, SANS_SUFFIX, ALL_PERIODS,
+                                   LAB_CAN_SUFFIX, LAB_CAN_COUNT_SUFFIX, LAB_CAN_NORM_SUFFIX,
+                                   HAB_CAN_SUFFIX, HAB_CAN_COUNT_SUFFIX, HAB_CAN_NORM_SUFFIX,
+                                   REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION,
+                                   REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+from sans.common.file_information import (get_extension_for_file_type, SANSFileInformationFactory)
+from sans.state.data import StateData
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for the execution of a single batch iteration
+# ----------------------------------------------------------------------------------------------------------------------
+def single_reduction_for_batch(state, use_optimizations, output_mode):
+    """
+    Runs a single reduction.
+
+    This function creates reduction packages which essentially contain information for a single valid reduction, run it
+    and store the results according to the user specified setting (output_mode). Although this is considered a single
+    reduction it can contain still several reductions since the SANSState object can at this point contain slice
+    settings which require on reduction per time slice.
+    :param state: a SANSState object
+    :param use_optimizations: if true then the optimizations of child algorithms are enabled.
+    :param output_mode: the output mode
+    """
+    # ------------------------------------------------------------------------------------------------------------------
+    # Load the data
+    # ------------------------------------------------------------------------------------------------------------------
+    workspace_to_name = {SANSDataType.SampleScatter: "SampleScatterWorkspace",
+                         SANSDataType.SampleTransmission: "SampleTransmissionWorkspace",
+                         SANSDataType.SampleDirect: "SampleDirectWorkspace",
+                         SANSDataType.CanScatter: "CanScatterWorkspace",
+                         SANSDataType.CanTransmission: "CanTransmissionWorkspace",
+                         SANSDataType.CanDirect: "CanDirectWorkspace"}
+
+    workspace_to_monitor = {SANSDataType.SampleScatter: "SampleScatterMonitorWorkspace",
+                            SANSDataType.CanScatter: "CanScatterMonitorWorkspace"}
+
+    workspaces, monitors = provide_loaded_data(state, use_optimizations, workspace_to_name, workspace_to_monitor)
+
+    # ------------------------------------------------------------------------------------------------------------------
+    # Get reduction settings
+    # Split into individual bundles which can be reduced individually. We split here if we have multiple periods or
+    # sliced times for example.
+    # ------------------------------------------------------------------------------------------------------------------
+    reduction_packages = get_reduction_packages(state, workspaces, monitors)
+
+    # ------------------------------------------------------------------------------------------------------------------
+    # Run reductions (one at a time)
+    # ------------------------------------------------------------------------------------------------------------------
+    single_reduction_name = "SANSSingleReduction"
+    single_reduction_options = {"UseOptimizations": use_optimizations}
+    reduction_alg = create_managed_non_child_algorithm(single_reduction_name, **single_reduction_options)
+    reduction_alg.setChild(False)
+    # Perform the data reduction
+    for reduction_package in reduction_packages:
+        # -----------------------------------
+        # Set the properties on the algorithm
+        # -----------------------------------
+        set_properties_for_reduction_algorithm(reduction_alg, reduction_package,
+                                               workspace_to_name, workspace_to_monitor)
+
+        # -----------------------------------
+        #  Run the reduction
+        # -----------------------------------
+        reduction_alg.execute()
+
+        # -----------------------------------
+        # Get the output of the algorithm
+        # -----------------------------------
+        reduction_package.reduced_lab = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceLAB")
+        reduction_package.reduced_hab = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceHAB")
+        reduction_package.reduced_merged = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceMerged")
+
+        reduction_package.reduced_lab_can = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceLABCan")
+        reduction_package.reduced_lab_can_count = get_workspace_from_algorithm(reduction_alg,
+                                                                               "OutputWorkspaceLABCanCount")
+        reduction_package.reduced_lab_can_norm = get_workspace_from_algorithm(reduction_alg,
+                                                                              "OutputWorkspaceLABCanNorm")
+        reduction_package.reduced_hab_can = get_workspace_from_algorithm(reduction_alg, "OutputWorkspaceHABCan")
+        reduction_package.reduced_hab_can_count = get_workspace_from_algorithm(reduction_alg,
+                                                                               "OutputWorkspaceHABCanCount")
+        reduction_package.reduced_hab_can_norm = get_workspace_from_algorithm(reduction_alg,
+                                                                              "OutputWorkspaceHABCanNorm")
+
+        # -----------------------------------
+        # The workspaces are already on the ADS, but should potentially be grouped
+        # -----------------------------------
+        group_workspaces_if_required(reduction_package)
+
+    # --------------------------------
+    # Perform output of all workspaces
+    # --------------------------------
+    # We have three options here
+    # 1. PublishToADS:
+    #    * This means we can leave it as it is
+    # 2. SaveToFile:
+    #    * This means we need to save out the reduced data
+    #    * Then we need to delete the reduced data from the ADS
+    # 3. Both:
+    #    * This means that we need to save out the reduced data
+    #    * The data is already on the ADS, so do nothing
+
+    if output_mode is OutputMode.SaveToFile:
+        save_to_file(reduction_packages)
+        delete_reduced_workspaces(reduction_packages)
+    elif output_mode is OutputMode.Both:
+        save_to_file(reduction_packages)
+
+    # -----------------------------------------------------------------------
+    # Clean up other workspaces if the optimizations have not been turned on.
+    # -----------------------------------------------------------------------
+    if not use_optimizations:
+        delete_optimization_workspaces(reduction_packages)
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for Data Loading
+# ----------------------------------------------------------------------------------------------------------------------
+def get_expected_workspace_names(file_information, is_transmission, period, get_base_name_only=False):
+    """
+    Creates the expected names for SANS workspaces.
+
+    SANS scientists expect the load workspaces to have certain, typical names. For example, the file SANS2D00022024.nxs
+    which is used as a transmission workspace translates into 22024_trans_nxs.
+    :param file_information: a file information object
+    :param is_transmission: if the file information is for a transmission or not
+    :param period: the period of interest
+    :param get_base_name_only: if we only want the base name and not the name with the period information
+    :return: a list of workspace names
+    """
+    suffix_file_type = get_extension_for_file_type(file_information)
+    if is_transmission:
+        suffix_data = TRANS_SUFFIX
+    else:
+        suffix_data = SANS_SUFFIX
+
+    run_number = file_information.get_run_number()
+
+    # Three possibilities:
+    #  1. No period data => 22024_sans_nxs
+    #  2. Period data, but wants all => 22025p1_sans_nxs,  22025p2_sans_nxs, ...
+    #  3. Period data, select particular period => 22025p3_sans_nxs
+    if file_information.get_number_of_periods() == 1:
+        workspace_name = "{0}_{1}_{2}".format(run_number, suffix_data, suffix_file_type)
+        names = [workspace_name]
+    elif file_information.get_number_of_periods() > 1 and period is StateData.ALL_PERIODS:
+        workspace_names = []
+        if get_base_name_only:
+            workspace_names.append("{0}_{1}_{2}".format(run_number, suffix_data, suffix_file_type))
+        else:
+            for period in range(1, file_information.get_number_of_periods() + 1):
+                workspace_names.append("{0}p{1}_{2}_{3}".format(run_number, period, suffix_data, suffix_file_type))
+        names = workspace_names
+    elif file_information.get_number_of_periods() > 1 and period is not StateData.ALL_PERIODS:
+        workspace_name = "{0}p{1}_{2}_{3}".format(run_number, period, suffix_data, suffix_file_type)
+        names = [workspace_name]
+    else:
+        raise RuntimeError("SANSLoad: Cannot create workspace names.")
+    return names
+
+
+def set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options, load_workspace_name, file_name, period,
+                                                                  is_transmission, file_info_factory,
+                                                                  load_monitor_name=None):
+    file_info = file_info_factory.create_sans_file_information(file_name)
+    workspace_names = get_expected_workspace_names(file_info, is_transmission=is_transmission, period=period,
+                                                   get_base_name_only=True)
+    count = 0
+    # Now we set the load options if we are dealing with multi-period data, then we need to
+    for workspace_name in workspace_names:
+        if count == 0:
+            load_options.update({load_workspace_name: workspace_name})
+            if load_monitor_name is not None:
+                monitor_name = workspace_name + "_monitors"
+                load_options.update({load_monitor_name: monitor_name})
+        else:
+            load_workspace_name_for_period = load_workspace_name + "_" + str(count)
+            load_options.update({load_workspace_name_for_period: workspace_name})
+            if load_monitor_name is not None:
+                load_monitor_name_for_period = load_monitor_name + "_" + str(count)
+                monitor_name = workspace_name + "_monitors"
+                load_options.update({load_monitor_name_for_period: monitor_name})
+        count += 1
+
+
+def set_output_workspaces_on_load_algorithm(load_options, state):
+    data = state.data
+    file_information_factory = SANSFileInformationFactory()
+
+    # SampleScatter and SampleScatterMonitor
+    set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                  load_workspace_name="SampleScatterWorkspace",
+                                                                  file_name=data.sample_scatter,
+                                                                  period=data.sample_scatter_period,
+                                                                  is_transmission=False,
+                                                                  file_info_factory=file_information_factory,
+                                                                  load_monitor_name="SampleScatterMonitorWorkspace")
+
+    # SampleTransmission
+    sample_transmission = data.sample_transmission
+    if sample_transmission:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="SampleTransmissionWorkspace",
+                                                                      file_name=sample_transmission,
+                                                                      period=data.sample_transmission_period,
+                                                                      is_transmission=True,
+                                                                      file_info_factory=file_information_factory)
+    # SampleDirect
+    sample_direct = data.sample_direct
+    if sample_direct:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="SampleDirectWorkspace",
+                                                                      file_name=sample_direct,
+                                                                      period=data.sample_direct_period,
+                                                                      is_transmission=True,
+                                                                      file_info_factory=file_information_factory)
+
+    # CanScatter + CanMonitor
+    can_scatter = data.can_scatter
+    if can_scatter:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="CanScatterWorkspace",
+                                                                      file_name=can_scatter,
+                                                                      period=data.can_scatter_period,
+                                                                      is_transmission=False,
+                                                                      file_info_factory=file_information_factory,
+                                                                      load_monitor_name="CanScatterMonitorWorkspace")
+
+    # CanTransmission
+    can_transmission = data.can_transmission
+    if can_transmission:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="CanTransmissionWorkspace",
+                                                                      file_name=can_transmission,
+                                                                      period=data.can_transmission_period,
+                                                                      is_transmission=True,
+                                                                      file_info_factory=file_information_factory)
+    # CanDirect
+    can_direct = data.can_direct
+    if can_direct:
+        set_output_workspace_on_load_algorithm_for_one_workspace_type(load_options=load_options,
+                                                                      load_workspace_name="CanDirectWorkspace",
+                                                                      file_name=can_direct,
+                                                                      period=data.can_direct_period,
+                                                                      is_transmission=True,
+                                                                      file_info_factory=file_information_factory)
+
+
+def provide_loaded_data(state, use_optimizations, workspace_to_name, workspace_to_monitor):
+    """
+    Provide the data for reduction.
+
+
+    :param state: a SANSState object.
+    :param use_optimizations: if optimizations are enabled, then the load mechanism will search for workspaces on the
+                              ADS.
+    :param workspace_to_name: a map of SANSDataType vs output-property name of SANSLoad for workspaces
+    :param workspace_to_monitor: a map of SANSDataType vs output-property name of SANSLoad for monitor workspaces
+    :return: a list fo workspaces and a list of monitor workspaces
+    """
+    # Load the data
+    state_serialized = state.property_manager
+    load_name = "SANSLoad"
+    load_options = {"SANSState": state_serialized,
+                    "PublishToCache": use_optimizations,
+                    "UseCached": use_optimizations,
+                    "MoveWorkspace": False}
+
+    # Set the output workspaces
+    set_output_workspaces_on_load_algorithm(load_options, state)
+
+    load_alg = create_managed_non_child_algorithm(load_name, **load_options)
+    load_alg.execute()
+
+    # Retrieve the data
+    workspace_to_count = {SANSDataType.SampleScatter: "NumberOfSampleScatterWorkspaces",
+                          SANSDataType.SampleTransmission: "NumberOfSampleTransmissionWorkspaces",
+                          SANSDataType.SampleDirect: "NumberOfSampleDirectWorkspaces",
+                          SANSDataType.CanScatter: "NumberOfCanScatterWorkspaces",
+                          SANSDataType.CanTransmission: "NumberOfCanTransmissionWorkspaces",
+                          SANSDataType.CanDirect: "NumberOfCanDirectWorkspaces"}
+
+    workspaces = get_workspaces_from_load_algorithm(load_alg, workspace_to_count, workspace_to_name)
+    monitors = get_workspaces_from_load_algorithm(load_alg, workspace_to_count, workspace_to_monitor)
+    return workspaces, monitors
+
+
+def add_loaded_workspace_to_ads(load_alg, workspace_property_name, workspace):
+    """
+    Adds a workspace with the name that was set on the output of the load algorithm to the ADS
+
+
+    :param load_alg: a handle to the load algorithm
+    :param workspace_property_name: the workspace property name
+    :param workspace: the workspace
+    """
+    workspace_name = load_alg.getProperty(workspace_property_name).valueAsStr
+    AnalysisDataService.addOrReplace(workspace_name, workspace)
+
+
+def get_workspaces_from_load_algorithm(load_alg, workspace_to_count, workspace_name_dict):
+    """
+    Reads the workspaces from SANSLoad
+
+    :param load_alg: a handle to the load algorithm
+    :param workspace_to_count: a map from SANSDataType to the output-number property name of SANSLoad for workspaces
+    :param workspace_name_dict: a map of SANSDataType vs output-property name of SANSLoad for (monitor) workspaces
+    :return: a map of SANSDataType vs list of workspaces (to handle multi-period data)
+    """
+    workspace_output = {}
+    for workspace_type, workspace_name in list(workspace_name_dict.items()):
+        count_id = workspace_to_count[workspace_type]
+        number_of_workspaces = load_alg.getProperty(count_id).value
+        workspaces = []
+        if number_of_workspaces > 1:
+            workspaces = get_multi_period_workspaces(load_alg, workspace_name_dict[workspace_type],
+                                                     number_of_workspaces)
+        else:
+            workspace_id = workspace_name_dict[workspace_type]
+            workspace = get_workspace_from_algorithm(load_alg, workspace_id)
+            if workspace is not None:
+                workspaces.append(workspace)
+        # Add the workspaces to the to the output
+        workspace_output.update({workspace_type: workspaces})
+    return workspace_output
+
+
+def get_multi_period_workspaces(load_alg, workspace_name, number_of_workspaces):
+    # Create an output name for each workspace and retrieve it from the load algorithm
+    workspaces = []
+    workspace_names = []
+    for index in range(1, number_of_workspaces + 1):
+        output_property_name = workspace_name + "_" + str(index)
+        output_workspace_name = load_alg.getProperty(output_property_name).valueAsStr
+        workspace_names.append(output_workspace_name)
+        workspace = get_workspace_from_algorithm(load_alg, output_property_name)
+        workspaces.append(workspace)
+
+    # Group the workspaces
+    base_name = get_base_name_from_multi_period_name(workspace_names[0])
+    group_name = "GroupWorkspaces"
+    group_options = {"InputWorkspaces": workspace_names,
+                     "OutputWorkspace": base_name}
+    group_alg = create_unmanaged_algorithm(group_name, **group_options)
+    group_alg.setChild(False)
+    group_alg.execute()
+    return workspaces
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for reduction packages
+# ----------------------------------------------------------------------------------------------------------------------
+def get_reduction_packages(state, workspaces, monitors):
+    """
+    This function creates a set of reduction packages which contain the necessary state for a single reduction
+    as well as the required workspaces.
+
+    There are several reasons why a state can (and should) split up:
+    1. Multi-period files were loaded. This means that we need to perform one reduction per (loaded) period
+    2. Event slices were specified. This means that we need to perform one reduction per event slice.
+
+    :param state: A single state which potentially needs to be split up into several states
+    :param workspaces: The workspaces contributing to the reduction
+    :param monitors: The monitors contributing to the reduction
+    :return: A set of "Reduction packages" where each reduction package defines a single reduction.
+    """
+    # First: Split the state on a per-period basis
+    reduction_packages = create_initial_reduction_packages(state, workspaces, monitors)
+
+    # Second: Split resulting reduction packages on a per-event-slice basis
+    # Note that at this point all reduction packages will have the same state information. They only differ in the
+    # workspaces that they use.
+    if reduction_packages_require_splitting_for_event_slices(reduction_packages):
+        reduction_packages = split_reduction_packages_for_event_slice_packages(reduction_packages)
+
+    # TODO: Third: Split resulting reduction packages on a per-wave-length-range basis
+    return reduction_packages
+
+
+def reduction_packages_require_splitting_for_event_slices(reduction_packages):
+    """
+    Creates reduction packages from a list of reduction packages by splitting up event slices.
+
+    The SANSSingleReduction algorithm can handle only a single time slice. For each time slice, we require an individual
+    reduction. Hence we split the states up at this point.
+    :param reduction_packages: a list of reduction packages.
+    :return: a list of reduction packages which has at leaset the same length as the input
+    """
+    # Determine if the event slice sub-state object contains multiple event slice requests. This is given
+    # by the number of elements in start_tof
+    reduction_package = reduction_packages[0]
+    state = reduction_package.state
+    slice_event_info = state.slice
+    start_time = slice_event_info.start_time
+    if start_time is not None and len(start_time) > 1:
+        requires_split = True
+    else:
+        requires_split = False
+    return requires_split
+
+
+def split_reduction_packages_for_event_slice_packages(reduction_packages):
+    """
+    Splits a reduction package object into several reduction package objects if it contains several event slice settings
+
+    We want to split this up here since each event slice is a full reduction cycle in itself.
+    :param reduction_packages: a list of reduction packages
+    :return: a list of reduction packages where each reduction setting contains only one event slice.
+    """
+    # Since the state is the same for all reduction packages at this point we only need to create the split state once
+    # for the first package and the apply to all the other packages. If we have 5 reduction packages and the user
+    # requests 6 event slices, then we end up with 60 reductions!
+    reduction_package = reduction_packages[0]
+    state = reduction_package.state
+    slice_event_info = state.slice
+    start_time = slice_event_info.start_time
+    end_time = slice_event_info.end_time
+
+    states = []
+    for start, end in zip(start_time, end_time):
+        state_copy = deepcopy(state)
+        slice_event_info = state_copy.slice
+        slice_event_info.start_time = [start]
+        slice_event_info.end_time = [end]
+        states.append(state_copy)
+
+    # Now that we have all the states spread them across the packages
+    reduction_packages_split = []
+    for reduction_package in reduction_packages:
+        workspaces = reduction_package.workspaces
+        monitors = reduction_package.monitors
+        is_part_of_multi_period_reduction = reduction_package.is_part_of_multi_period_reduction
+        for state in states:
+            new_state = deepcopy(state)
+            new_reduction_package = ReductionPackage(state=new_state,
+                                                     workspaces=workspaces,
+                                                     monitors=monitors,
+                                                     is_part_of_multi_period_reduction=is_part_of_multi_period_reduction,
+                                                     is_part_of_event_slice_reduction=True)
+            reduction_packages_split.append(new_reduction_package)
+    return reduction_packages_split
+
+
+def create_initial_reduction_packages(state, workspaces, monitors):
+    """
+    This provides the initial split of the workspaces.
+
+    If the data stems from multi-period data, then we need to split up the workspaces. The state object is valid
+    for each one of these workspaces. Hence we need to create a deep copy of them for each reduction package.
+
+    The way multi-period files are handled over the different workspaces input types is:
+    1. The sample scatter period determines all other periods, i.e. if the sample scatter workspace is has only
+       one period, but the sample transmission has two, then only the first period is used.
+    2. If the sample scatter period is not available on an other workspace type, then the last period on that
+       workspace type is used.
+
+    For the cases where the periods between the different workspaces types does not match, an information is logged.
+
+    :param state: A single state which potentially needs to be split up into several states
+    :param workspaces: The workspaces contributing to the reduction
+    :param monitors: The monitors contributing to the reduction
+    :return: A set of "Reduction packages" where each reduction package defines a single reduction.
+    """
+    # For loaded peri0d we create a package
+    packages = []
+
+    data_info = state.data
+    sample_scatter_period = data_info.sample_scatter_period
+    requires_new_period_selection = len(workspaces[SANSDataType.SampleScatter]) > 1 \
+                                    and sample_scatter_period == ALL_PERIODS  # noqa
+
+    is_multi_period = len(workspaces[SANSDataType.SampleScatter]) > 1
+
+    for index in range(0, len(workspaces[SANSDataType.SampleScatter])):
+        workspaces_for_package = {}
+        # For each workspace type, i.e sample scatter, can transmission, etc. find the correct workspace
+        for workspace_type, workspace_list in list(workspaces.items()):
+            workspace = get_workspace_for_index(index, workspace_list)
+            workspaces_for_package.update({workspace_type: workspace})
+
+        # For each monitor type, find the correct workspace
+        monitors_for_package = {}
+        for workspace_type, workspace_list in list(monitors.items()):
+            workspace = get_workspace_for_index(index, workspace_list)
+            monitors_for_package.update({workspace_type: workspace})
+        state_copy = deepcopy(state)
+
+        # Set the period on the state
+        if requires_new_period_selection:
+            state_copy.data.sample_scatter_period = index + 1
+        packages.append(ReductionPackage(state=state_copy,
+                                         workspaces=workspaces_for_package,
+                                         monitors=monitors_for_package,
+                                         is_part_of_multi_period_reduction=is_multi_period,
+                                         is_part_of_event_slice_reduction=False))
+    return packages
+
+
+def get_workspace_for_index(index, workspace_list):
+    """
+    Extracts the workspace from the list of workspaces. The index is set by the nth ScatterSample workspace.
+
+    There might be situation where there is no corresponding CanXXX workspace or SampleTransmission workspace etc,
+    since they are optional.
+
+    :param index: The index of the workspace from which to extract.
+    :param workspace_list: A list of workspaces.
+    :return: The workspace corresponding to the index or None
+    """
+    if workspace_list:
+        if index < len(workspace_list):
+            workspace = workspace_list[index]
+        else:
+            workspace = None
+    else:
+        workspace = None
+    return workspace
+
+
+def set_properties_for_reduction_algorithm(reduction_alg, reduction_package, workspace_to_name, workspace_to_monitor):
+    """
+    Sets up everything necessary on the reduction algorithm.
+
+    :param reduction_alg: a handle to the reduction algorithm
+    :param reduction_package: a reduction package object
+    :param workspace_to_name: the workspace to name map
+    :param workspace_to_monitor: a workspace to monitor map
+    """
+    def _set_output_name(_reduction_alg, _reduction_package, _is_group, _reduction_mode, _property_name,
+                         _attr_out_name, _atrr_out_name_base, _suffix=None):
+        _out_name, _out_name_base = get_output_name(_reduction_package.state, _reduction_mode, _is_group)
+
+        if _suffix is not None:
+            _out_name += _suffix
+            _out_name_base += _suffix
+
+        _reduction_alg.setProperty(_property_name, _out_name)
+        setattr(_reduction_package, _attr_out_name, _out_name)
+        setattr(_reduction_package, _atrr_out_name_base, _out_name_base)
+
+    def _set_lab(_reduction_alg, _reduction_package, _is_group):
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLABCan", "reduced_lab_can_name", "reduced_lab_can_base_name",
+                         LAB_CAN_SUFFIX)
+
+        # Lab Can Count workspace - this is a partial workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLABCanCount", "reduced_lab_can_count_name", "reduced_lab_can_count_base_name",
+                         LAB_CAN_COUNT_SUFFIX)
+
+        # Lab Can Norm workspace - this is a partial workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLABCanNorm", "reduced_lab_can_norm_name", "reduced_lab_can_norm_base_name",
+                         LAB_CAN_NORM_SUFFIX)
+
+    def _set_hab(_reduction_alg, _reduction_package, _is_group):
+        # Hab Can Workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHABCan", "reduced_hab_can_name", "reduced_hab_can_base_name",
+                         HAB_CAN_SUFFIX)
+
+        # Hab Can Count workspace - this is a partial workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHABCanCount", "reduced_hab_can_count_name", "reduced_hab_can_count_base_name",
+                         HAB_CAN_COUNT_SUFFIX)
+
+        # Hab Can Norm workspace - this is a partial workspace
+        _set_output_name(_reduction_alg, _reduction_package, _is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHABCanNorm", "reduced_hab_can_norm_name", "reduced_hab_can_norm_base_name",
+                         HAB_CAN_NORM_SUFFIX)
+
+    # Go through the elements of the reduction package and set them on the reduction algorithm
+    # Set the SANSState
+    state = reduction_package.state
+    state_dict = state.property_manager
+    reduction_alg.setProperty("SANSState", state_dict)
+
+    # Set the input workspaces
+    workspaces = reduction_package.workspaces
+    for workspace_type, workspace in list(workspaces.items()):
+        if workspace is not None:
+            reduction_alg.setProperty(workspace_to_name[workspace_type], workspace)
+
+    # Set the monitors
+    monitors = reduction_package.monitors
+    for workspace_type, monitor in list(monitors.items()):
+        if monitor is not None:
+            reduction_alg.setProperty(workspace_to_monitor[workspace_type], monitor)
+
+    # ------------------------------------------------------------------------------------------------------------------
+    # Set the output workspaces for LAB, HAB and Merged
+    # ------------------------------------------------------------------------------------------------------------------
+    is_part_of_multi_period_reduction = reduction_package.is_part_of_multi_period_reduction
+    is_part_of_event_slice_reduction = reduction_package.is_part_of_event_slice_reduction
+    is_group = is_part_of_multi_period_reduction or is_part_of_event_slice_reduction
+
+    reduction_mode = reduction_package.reduction_mode
+    if reduction_mode is ISISReductionMode.Merged:
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.Merged,
+                         "OutputWorkspaceMerged", "reduced_merged_name", "reduced_merged_base_name")
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLAB", "reduced_lab_name", "reduced_lab_base_name", "_lab")
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHAB", "reduced_hab_name", "reduced_hab_base_name", "_hab")
+    elif reduction_mode is ISISReductionMode.LAB:
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLAB", "reduced_lab_name", "reduced_lab_base_name")
+    elif reduction_mode is ISISReductionMode.HAB:
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHAB", "reduced_hab_name", "reduced_hab_base_name")
+    elif reduction_mode is ISISReductionMode.Both:
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.LAB,
+                         "OutputWorkspaceLAB", "reduced_lab_name", "reduced_lab_base_name")
+        _set_output_name(reduction_alg, reduction_package, is_group, ISISReductionMode.HAB,
+                         "OutputWorkspaceHAB", "reduced_hab_name", "reduced_hab_base_name")
+    else:
+        raise RuntimeError("The reduction mode {0} is not known".format(reduction_mode))
+
+    # ------------------------------------------------------------------------------------------------------------------
+    # Set the output workspaces for the can reduction and the partial can reductions
+    # ------------------------------------------------------------------------------------------------------------------
+    # Set the output workspaces for the can reductions -- note that these will only be set if optimizations
+    # are enabled
+    # Lab Can Workspace
+    if reduction_mode is ISISReductionMode.Merged:
+        _set_lab(reduction_alg, reduction_package, is_group)
+        _set_hab(reduction_alg, reduction_package, is_group)
+    elif reduction_mode is ISISReductionMode.LAB:
+        _set_lab(reduction_alg, reduction_package, is_group)
+    elif reduction_mode is ISISReductionMode.HAB:
+        _set_hab(reduction_alg, reduction_package, is_group)
+    elif reduction_mode is ISISReductionMode.Both:
+        _set_lab(reduction_alg, reduction_package, is_group)
+        _set_hab(reduction_alg, reduction_package, is_group)
+    else:
+        raise RuntimeError("The reduction mode {0} is not known".format(reduction_mode))
+
+
+def get_workspace_from_algorithm(alg, output_property_name):
+    """
+    Gets the output workspace from an algorithm. Since we don't run this as a child we need to get it from the
+    ADS.
+
+    :param alg: a handle to the algorithm from which we want to take the output workspace property.
+    :param output_property_name: the name of the output property.
+    :return the workspace or None
+    """
+    output_workspace_name = alg.getProperty(output_property_name).valueAsStr
+
+    if not output_workspace_name:
+        return None
+
+    if AnalysisDataService.doesExist(output_workspace_name):
+        return AnalysisDataService.retrieve(output_workspace_name)
+    else:
+        return None
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Functions for outputs to the ADS and saving the file
+# ----------------------------------------------------------------------------------------------------------------------
+def group_workspaces_if_required(reduction_package):
+    """
+    The output workspaces have already been published to the ADS by the algorithm. Now we might have to
+    bundle them into a group if:
+    * They are part of a multi-period workspace or a sliced reduction
+    * They are reduced LAB and HAB workspaces of a Merged reduction
+    * They are can workspaces - they are all grouped into a single group
+    :param reduction_package: a list of reduction packages
+    """
+    is_part_of_multi_period_reduction = reduction_package.is_part_of_multi_period_reduction
+    is_part_of_event_slice_reduction = reduction_package.is_part_of_event_slice_reduction
+    requires_grouping = is_part_of_multi_period_reduction or is_part_of_event_slice_reduction
+
+    reduced_lab = reduction_package.reduced_lab
+    reduced_hab = reduction_package.reduced_hab
+    reduced_merged = reduction_package.reduced_merged
+
+    is_merged_reduction = reduced_merged is not None
+
+    # Add the reduced workspaces to groups if they require this
+    if is_merged_reduction:
+        if requires_grouping:
+            add_to_group(reduced_merged, reduction_package.reduced_merged_base_name)
+            add_to_group(reduced_lab, REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION)
+            add_to_group(reduced_hab, REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION)
+        else:
+            add_to_group(reduced_lab, REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION)
+            add_to_group(reduced_hab, REDUCED_HAB_AND_LAB_WORKSPACE_FOR_MERGED_REDUCTION)
+    else:
+        if requires_grouping:
+            add_to_group(reduced_lab, reduction_package.reduced_lab_base_name)
+            add_to_group(reduced_hab, reduction_package.reduced_hab_base_name)
+
+    # Add the can workspaces (used for optimizations) to a Workspace Group (if they exist)
+    add_to_group(reduction_package.reduced_lab_can, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+    add_to_group(reduction_package.reduced_lab_can_count, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+    add_to_group(reduction_package.reduced_lab_can_norm, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+
+    add_to_group(reduction_package.reduced_hab_can, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+    add_to_group(reduction_package.reduced_hab_can_count, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+    add_to_group(reduction_package.reduced_hab_can_norm, REDUCED_CAN_AND_PARTIAL_CAN_FOR_OPTIMIZATION)
+
+
+def add_to_group(workspace, name_of_group_workspace):
+    """
+    Creates a group workspace with the base name for the workspace
+
+    :param workspace: the workspace to add to the WorkspaceGroup
+    :param name_of_group_workspace: the name of the WorkspaceGroup
+    """
+    if workspace is None:
+        return
+    name_of_workspace = workspace.name()
+    if AnalysisDataService.doesExist(name_of_group_workspace):
+        group_workspace = AnalysisDataService.retrieve(name_of_group_workspace)
+        group_workspace.add(name_of_workspace)
+    else:
+        group_name = "GroupWorkspaces"
+        group_options = {"InputWorkspaces": [name_of_workspace],
+                         "OutputWorkspace": name_of_group_workspace}
+        group_alg = create_unmanaged_algorithm(group_name, **group_options)
+        # At this point we are dealing with the ADS, hence we need to make sure that this is not called as
+        # a child algorithm
+        group_alg.setChild(False)
+        group_alg.execute()
+
+
+def save_to_file(reduction_packages):
+    """
+    Extracts all workspace names which need to be saved and saves them into a file.
+
+    @param reduction_packages: a list of reduction packages which contain all the relevant information for saving
+    """
+    workspaces_names_to_save = get_all_names_to_save(reduction_packages)
+
+    state = reduction_packages[0].state
+    save_info = state.save
+    file_formats = save_info.file_format
+    for name_to_save in workspaces_names_to_save:
+        save_workspace_to_file(name_to_save, file_formats)
+
+
+def delete_reduced_workspaces(reduction_packages):
+    """
+    Deletes all workspaces which would have been generated from a list of reduction packages.
+
+    @param reduction_packages: a list of reduction package
+    """
+    def _delete_workspaces(_delete_alg, _workspaces):
+        for _workspace in _workspaces:
+            if _workspace is not None:
+                _delete_alg.setProperty("Workspace", _workspace.name())
+                _delete_alg.execute()
+    # Get all names which were saved out to workspaces
+    # Delete each workspace
+    delete_name = "DeleteWorkspace"
+    delete_options = {}
+    delete_alg = create_unmanaged_algorithm(delete_name, **delete_options)
+
+    for reduction_package in reduction_packages:
+        reduced_lab = reduction_package.reduced_lab
+        reduced_hab = reduction_package.reduced_hab
+        reduced_merged = reduction_package.reduced_merged
+        _delete_workspaces(delete_alg, [reduced_lab, reduced_hab, reduced_merged])
+
+
+def delete_optimization_workspaces(reduction_packages):
+    """
+    Deletes all workspaces which are used for optimizations. This can be loaded workspaces or can optimizations
+
+    :param reduction_packages: a list of reductioin packages.
+    """
+    def _delete_workspaces(_delete_alg, _workspaces):
+        _workspace_names_to_delete = set([_workspace.name() for _workspace in _workspaces if _workspace is not None])
+        for _workspace_name_to_delete in _workspace_names_to_delete:
+            if _workspace_name_to_delete:
+                _delete_alg.setProperty("Workspace", _workspace_name_to_delete)
+                _delete_alg.execute()
+    delete_name = "DeleteWorkspace"
+    delete_options = {}
+    delete_alg = create_unmanaged_algorithm(delete_name, **delete_options)
+
+    for reduction_package in reduction_packages:
+        # Delete loaded workspaces
+        workspaces_to_delete = list(reduction_package.workspaces.values())
+        _delete_workspaces(delete_alg, workspaces_to_delete)
+
+        # Delete loaded monitors
+        monitors_to_delete = list(reduction_package.monitors.values())
+        _delete_workspaces(delete_alg, monitors_to_delete)
+
+        # Delete can optimizations
+        optimizations_to_delete = [reduction_package.reduced_lab_can,
+                                   reduction_package.reduced_lab_can_count,
+                                   reduction_package.reduced_lab_can_norm,
+                                   reduction_package.reduced_hab_can,
+                                   reduction_package.reduced_hab_can_count,
+                                   reduction_package.reduced_hab_can_norm]
+        _delete_workspaces(delete_alg, optimizations_to_delete)
+
+
+def get_all_names_to_save(reduction_packages):
+    """
+    Extracts all the output names from a list of reduction packages. The main
+
+    @param reduction_packages: a list of reduction packages
+    @return: a list of workspace names to save.
+    """
+    names_to_save = []
+    for reduction_package in reduction_packages:
+        is_part_of_multi_period_reduction = reduction_package.is_part_of_multi_period_reduction
+        is_part_of_event_slice_reduction = reduction_package.is_part_of_event_slice_reduction
+        is_group = is_part_of_multi_period_reduction or is_part_of_event_slice_reduction
+
+        reduced_lab = reduction_package.reduced_lab
+        reduced_hab = reduction_package.reduced_hab
+        reduced_merged = reduction_package.reduced_merged
+
+        # If we have merged reduction then store the
+        if reduced_merged:
+            if is_group:
+                names_to_save.append(reduction_package.reduced_merged_base_name)
+            else:
+                names_to_save.append(reduced_merged.name())
+        else:
+            if reduced_lab:
+                if is_group:
+                    names_to_save.append(reduction_package.reduced_lab_base_name)
+                else:
+                    names_to_save.append(reduced_lab.name())
+
+            if reduced_hab:
+                if is_group:
+                    names_to_save.append(reduction_package.reduced_hab_base_name)
+                else:
+                    names_to_save.append(reduced_hab.name())
+
+    # We might have some workspaces as duplicates (the group workspaces), so make them unique
+    return set(names_to_save)
+
+
+def save_workspace_to_file(output_name, file_formats):
+    """
+    Saves the workspace to the different file formats specified in the state object.
+
+    :param output_name: the name of the output workspace and also the name of the file
+    :param file_formats: a list of file formats to save
+    """
+    save_name = "SANSSave"
+    save_options = {"InputWorkspace": output_name}
+    save_options.update({"Filename": output_name})
+
+    if SaveType.Nexus in file_formats:
+        save_options.update({"Nexus": True})
+    if SaveType.CanSAS in file_formats:
+        save_options.update({"CanSAS": True})
+    if SaveType.NXcanSAS in file_formats:
+        save_options.update({"NXcanSAS": True})
+    if SaveType.NistQxy in file_formats:
+        save_options.update({"NistQxy": True})
+    if SaveType.RKH in file_formats:
+        save_options.update({"RKH": True})
+    if SaveType.CSV in file_formats:
+        save_options.update({"CSV": True})
+
+    save_alg = create_unmanaged_algorithm(save_name, **save_options)
+    save_alg.execute()
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Container classes
+# ----------------------------------------------------------------------------------------------------------------------
+class ReducedDataType(object):
+    class Merged(object):
+        pass
+
+    class LAB(object):
+        pass
+
+    class HAB(object):
+        pass
+
+
+class ReductionPackage(object):
+    """
+    The reduction package is a mutable store for
+    1. The state object which defines our reductions.
+    2. A dictionary with input_workspace_type vs input_workspace
+    3. A dictionary with input_monitor_workspace_type vs input_monitor_workspace
+    4. A flag which indicates if the reduction is part of a multi-period reduction
+    5. A flag which indicates if the reduction is part of a sliced reduction
+    6. The reduced workspaces (not all need to exist)
+    7. The reduced can and the reduced partial can workspaces (non have to exist, this is only for optimizations)
+    """
+    def __init__(self, state, workspaces, monitors, is_part_of_multi_period_reduction=False,
+                 is_part_of_event_slice_reduction=False):
+        super(ReductionPackage, self).__init__()
+        # -------------------------------------------------------
+        # General Settings
+        # -------------------------------------------------------
+        self.state = state
+        self.workspaces = workspaces
+        self.monitors = monitors
+        self.is_part_of_multi_period_reduction = is_part_of_multi_period_reduction
+        self.is_part_of_event_slice_reduction = is_part_of_event_slice_reduction
+        self.reduction_mode = state.reduction.reduction_mode
+
+        # -------------------------------------------------------
+        # Reduced workspaces
+        # -------------------------------------------------------
+        self.reduced_lab = None
+        self.reduced_hab = None
+        self.reduced_merged = None
+
+        # -------------------------------------------------------
+        # Reduced partial can workspaces (and partial workspaces)
+        # -------------------------------------------------------
+        self.reduced_lab_can = None
+        self.reduced_lab_can_count = None
+        self.reduced_lab_can_norm = None
+
+        self.reduced_hab_can = None
+        self.reduced_hab_can_count = None
+        self.reduced_hab_can_norm = None
+
+        # -------------------------------------------------------
+        # Output names and base names
+        # -------------------------------------------------------
+        self.reduced_lab_name = None
+        self.reduced_lab_base_name = None
+        self.reduced_hab_name = None
+        self.reduced_hab_base_name = None
+        self.reduced_merged_name = None
+        self.reduced_merged_base_name = None
+
+        # Partial reduced can workspace names
+        self.reduced_lab_can_name = None
+        self.reduced_lab_can_base_name = None
+        self.reduced_lab_can_count_name = None
+        self.reduced_lab_can_count_base_name = None
+        self.reduced_lab_can_norm_name = None
+        self.reduced_lab_can_norm_base_name = None
+
+        self.reduced_hab_can_name = None
+        self.reduced_hab_can_base_name = None
+        self.reduced_hab_can_count_name = None
+        self.reduced_hab_can_count_base_name = None
+        self.reduced_hab_can_norm_name = None
+        self.reduced_hab_can_norm_base_name = None
diff --git a/scripts/SANS/sans/algorithm_detail/merge_reductions.py b/scripts/SANS/sans/algorithm_detail/merge_reductions.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b550c2a3424b543d4b898caa28525f62c6f798d
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/merge_reductions.py
@@ -0,0 +1,173 @@
+""" Merges two reduction types to single reduction"""
+
+from __future__ import (absolute_import, division, print_function)
+from abc import (ABCMeta, abstractmethod)
+from six import with_metaclass
+from sans.common.general_functions import create_child_algorithm
+from sans.common.enums import (SANSInstrument, DataType, FitModeForMerge)
+from sans.algorithm_detail.bundles import MergeBundle
+
+
+class Merger(with_metaclass(ABCMeta, object)):
+    """ Merger interface"""
+
+    @abstractmethod
+    def merge(self, reduction_mode_vs_output_bundles, parent_alg=None):
+        pass
+
+
+class ISIS1DMerger(Merger):
+    """
+    Class which handles ISIS-style merges.
+    """
+    def __init__(self):
+        super(ISIS1DMerger, self).__init__()
+
+    def merge(self, reduction_mode_vs_output_bundles, parent_alg=None):
+        """
+        Merges two partial reductions to obtain a merged reduction.
+
+        :param reduction_mode_vs_output_bundles: a ReductionMode vs OutputBundle map
+        :param parent_alg: a handle to the parent algorithm.
+        :return: a MergeBundle with the merged which contains the merged workspace.
+        """
+        # Get the primary and secondary detectors for stitching. This is normally LAB and HAB, but in other scenarios
+        # there might be completely different detectors. This approach allows future adjustments to the stitching
+        # configuration. The data from the secondary detector will be stitched to the data from the primary detector.
+
+        primary_detector, secondary_detector = get_detectors_for_merge(reduction_mode_vs_output_bundles)
+        sample_count_primary, sample_norm_primary, sample_count_secondary, sample_norm_secondary = \
+            get_partial_workspaces(primary_detector, secondary_detector, reduction_mode_vs_output_bundles, is_sample)
+
+        # Get the relevant workspaces from the reduction settings. For this we need to first understand what the
+        can_count_primary, can_norm_primary, can_count_secondary, can_norm_secondary = \
+            get_partial_workspaces(primary_detector, secondary_detector, reduction_mode_vs_output_bundles, is_can)
+
+        # Get fit parameters
+        shift_factor, scale_factor, fit_mode = get_shift_and_scale_parameter(reduction_mode_vs_output_bundles)
+        fit_mode_as_string = FitModeForMerge.to_string(fit_mode)
+
+        # We need to convert NoFit to None.
+        if fit_mode_as_string == "NoFit":
+            fit_mode_as_string = "None"
+
+        # Run the SANSStitch algorithm
+        stitch_name = "SANSStitch"
+        stitch_options = {"HABCountsSample": sample_count_secondary,
+                          "HABNormSample": sample_norm_secondary,
+                          "LABCountsSample": sample_count_primary,
+                          "LABNormSample": sample_norm_primary,
+                          "ProcessCan": False,
+                          "Mode": fit_mode_as_string,
+                          "ScaleFactor": scale_factor,
+                          "ShiftFactor": shift_factor,
+                          "OutputWorkspace": "dummy"}
+
+        if can_count_primary is not None and can_norm_primary is not None \
+                and can_count_secondary is not None and can_norm_secondary is not None:
+            stitch_options_can = {"HABCountsCan": can_count_secondary,
+                                  "HABNormCan": can_norm_secondary,
+                                  "LABCountsCan": can_count_primary,
+                                  "LABNormCan": can_norm_primary,
+                                  "ProcessCan": True}
+            stitch_options.update(stitch_options_can)
+
+        stitch_alg = create_child_algorithm(parent_alg, stitch_name, **stitch_options)
+        stitch_alg.execute()
+
+        # Get the fit values
+        shift_from_alg = stitch_alg.getProperty("OutShiftFactor").value
+        scale_from_alg = stitch_alg.getProperty("OutScaleFactor").value
+        merged_workspace = stitch_alg.getProperty("OutputWorkspace").value
+
+        # Return a merge bundle with the merged workspace and the fitted scale and shift factor (they are good
+        # diagnostic tools which are desired by the instrument scientists.
+        return MergeBundle(merged_workspace=merged_workspace, shift=shift_from_alg, scale=scale_from_alg)
+
+
+class NullMerger(Merger):
+    def __init__(self):
+        super(NullMerger, self).__init__()
+
+    def merge(self, reduction_mode_vs_output_bundles, parent_alg=None):
+        pass
+
+
+class MergeFactory(object):
+    def __init__(self):
+        super(MergeFactory, self).__init__()
+
+    @staticmethod
+    def create_merger(state):
+        # The selection depends on the facility/instrument
+        data_info = state.data
+        instrument = data_info.instrument
+
+        if instrument is SANSInstrument.LARMOR or instrument is SANSInstrument.LOQ or \
+           instrument is SANSInstrument.SANS2D:
+            merger = ISIS1DMerger()
+        else:
+            merger = NullMerger()
+            RuntimeError("MergeFactory: The merging for your selection has not been implemented yet.")
+        return merger
+
+
+def get_detectors_for_merge(output_bundles):
+    """
+    Extracts the merge strategy from the output bundles. This is the name of the primary and the secondary detector.
+
+    The merge strategy will let us know which two detectors are to be merged. This abstraction might be useful in the
+    future if we are dealing with more than two detector banks.
+    :param output_bundles: a ReductionMap vs OutputBundle map
+    :return: the primary detector and the secondary detector.
+    """
+    reduction_settings_collection = next(iter(list(output_bundles.values())))
+    state = reduction_settings_collection[0].state
+    reduction_info = state.reduction
+    return reduction_info.get_merge_strategy()
+
+
+def get_partial_workspaces(primary_detector, secondary_detector, reduction_mode_vs_output_bundles, is_data_type):
+    """
+    Get the partial workspaces for the primary and secondary detectors.
+
+    :param primary_detector: the primary detector (now normally ISISReductionMode.LAB)
+    :param secondary_detector: the secondary detector (now normally ISISReductionMode.HAB)
+    :param reduction_mode_vs_output_bundles: a ReductionMode vs OutputBundles map
+    :param is_data_type: the data type, i.e. if can or sample
+    :return: the primary count workspace, the primary normalization workspace, the secondary count workspace and the
+             secondary normalization workspace.
+    """
+    # Get primary reduction information for specified data type, i.e. sample or can
+    primary = reduction_mode_vs_output_bundles[primary_detector]
+    primary_for_data_type = next((setting for setting in primary if is_data_type(setting)), None)
+    primary_count = primary_for_data_type.output_workspace_count
+    primary_norm = primary_for_data_type.output_workspace_norm
+
+    # Get secondary reduction information for specified data type, i.e. sample or can
+    secondary = reduction_mode_vs_output_bundles[secondary_detector]
+    secondary_for_data_type = next((setting for setting in secondary if is_data_type(setting)), None)
+    secondary_count = secondary_for_data_type.output_workspace_count
+    secondary_norm = secondary_for_data_type.output_workspace_norm
+    return primary_count, primary_norm, secondary_count, secondary_norm
+
+
+def get_shift_and_scale_parameter(reduction_mode_vs_output_bundles):
+    """
+    Gets the shfit and scale parameter from a set of OutputBundles
+
+    :param reduction_mode_vs_output_bundles: a ReductionMode vs OutputBundle map
+    :return: the shift, scale and fit mode.
+    """
+    reduction_settings_collection = next(iter(list(reduction_mode_vs_output_bundles.values())))
+    state = reduction_settings_collection[0].state
+    reduction_info = state.reduction
+    return reduction_info.merge_shift, reduction_info.merge_scale, reduction_info.merge_fit_mode
+
+
+def is_sample(x):
+    return x.data_type is DataType.Sample
+
+
+def is_can(x):
+    return x.data_type is DataType.Can
diff --git a/scripts/SANS/sans/algorithm_detail/single_execution.py b/scripts/SANS/sans/algorithm_detail/single_execution.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ec5f48ac09a16659dc11b20743ac4902aa982cc
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/single_execution.py
@@ -0,0 +1,256 @@
+from __future__ import (absolute_import, division, print_function)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import (create_child_algorithm,
+                                           write_hash_into_reduced_can_workspace,
+                                           get_reduced_can_workspace_from_ads)
+from sans.common.enums import (ISISReductionMode, DetectorType, DataType, OutputParts)
+from sans.algorithm_detail.strip_end_nans_and_infs import strip_end_nans
+from sans.algorithm_detail.merge_reductions import (MergeFactory, is_sample, is_can)
+from sans.algorithm_detail.bundles import (OutputBundle, OutputPartsBundle)
+
+
+def run_core_reduction(reduction_alg, reduction_setting_bundle):
+    """
+    This function runs a core reduction. This is essentially half a reduction (either smaple or can).
+
+    :param reduction_alg: a handle to the reduction algorithm.
+    :param reduction_setting_bundle: a ReductionSettingBundle tuple
+    :return: an OutputBundle and an OutputPartsBundle
+    """
+
+    # Get component to reduce
+    component = get_component_to_reduce(reduction_setting_bundle)
+    # Set the properties on the reduction algorithms
+    serialized_state = reduction_setting_bundle.state.property_manager
+    reduction_alg.setProperty("SANSState", serialized_state)
+    reduction_alg.setProperty("Component", component)
+    reduction_alg.setProperty("ScatterWorkspace", reduction_setting_bundle.scatter_workspace)
+    reduction_alg.setProperty("ScatterMonitorWorkspace", reduction_setting_bundle.scatter_monitor_workspace)
+    reduction_alg.setProperty("DataType", DataType.to_string(reduction_setting_bundle.data_type))
+
+    if reduction_setting_bundle.transmission_workspace is not None:
+        reduction_alg.setProperty("TransmissionWorkspace", reduction_setting_bundle.transmission_workspace)
+
+    if reduction_setting_bundle.direct_workspace is not None:
+        reduction_alg.setProperty("DirectWorkspace", reduction_setting_bundle.direct_workspace)
+
+    reduction_alg.setProperty("OutputWorkspace", EMPTY_NAME)
+    reduction_alg.setProperty("SumOfCounts", EMPTY_NAME)
+    reduction_alg.setProperty("SumOfNormFactors", EMPTY_NAME)
+
+    # Run the reduction core
+    reduction_alg.execute()
+
+    # Get the results
+    output_workspace = reduction_alg.getProperty("OutputWorkspace").value
+    output_workspace_count = reduction_alg.getProperty("SumOfCounts").value
+    output_workspace_norm = reduction_alg.getProperty("SumOfNormFactors").value
+
+    # Pull the result out of the workspace
+    output_bundle = OutputBundle(state=reduction_setting_bundle.state,
+                                 data_type=reduction_setting_bundle.data_type,
+                                 reduction_mode=reduction_setting_bundle.reduction_mode,
+                                 output_workspace=output_workspace)
+
+    output_parts_bundle = OutputPartsBundle(state=reduction_setting_bundle.state,
+                                            data_type=reduction_setting_bundle.data_type,
+                                            reduction_mode=reduction_setting_bundle.reduction_mode,
+                                            output_workspace_count=output_workspace_count,
+                                            output_workspace_norm=output_workspace_norm)
+    return output_bundle, output_parts_bundle
+
+
+def get_final_output_workspaces(output_bundles, parent_alg):
+    """
+    This function provides the final steps for the data reduction.
+
+    The final steps are:
+    1. Can Subtraction (if required)
+    2. Data clean up (if required)
+    :param output_bundles: A set of outputBundles
+    :param parent_alg: a handle to the parent algorithm.
+    :return: a map of ReductionMode vs final output workspaces.
+    """
+
+    reduction_mode_vs_output_bundles = get_reduction_mode_vs_output_bundles(output_bundles)
+
+    # For each reduction mode, we need to perform a can subtraction (and potential cleaning of the workspace)
+    final_output_workspaces = {}
+    for reduction_mode, output_bundles in reduction_mode_vs_output_bundles.items():
+        # Find the sample and the can in the data collection
+        output_sample_workspace = next((output_bundle.output_workspace for output_bundle in output_bundles
+                                        if is_sample(output_bundle)), None)
+        output_can_workspace = next((output_bundle.output_workspace for output_bundle in output_bundles
+                                     if is_can(output_bundle)), None)
+        # Perform the can subtraction
+        if output_can_workspace is not None:
+            final_output_workspace = perform_can_subtraction(output_sample_workspace, output_can_workspace, parent_alg)
+        else:
+            final_output_workspace = output_sample_workspace
+
+        # Tidy up the workspace by removing start/end-NANs and start/end-INFs
+        final_output_workspace = strip_end_nans(final_output_workspace, parent_alg)
+        final_output_workspaces.update({reduction_mode: final_output_workspace})
+
+    # Finally add sample log information
+    # TODO: Add log information
+
+    return final_output_workspaces
+
+
+def perform_can_subtraction(sample, can, parent_alg):
+    """
+    Subtracts the can from the sample workspace.
+
+    We need to manually take care of the q resolution issue here.
+    :param sample: the sample workspace
+    :param can: the can workspace.
+    :param parent_alg: a handle to the parent algorithm.
+    :return: the subtracted workspace.
+    """
+    subtraction_name = "Minus"
+    subtraction_options = {"LHSWorkspace": sample,
+                           "RHSWorkspace": can,
+                           "OutputWorkspace": EMPTY_NAME}
+    subtraction_alg = create_child_algorithm(parent_alg, subtraction_name, **subtraction_options)
+    subtraction_alg.execute()
+    output_workspace = subtraction_alg.getProperty("OutputWorkspace").value
+
+    # If the workspace is 1D and contains Q resolution (i.e. DX values), then we need to make sure that the
+    # resulting output workspace contains the correct values
+    correct_q_resolution_for_can(sample, can, output_workspace)
+
+    return output_workspace
+
+
+def correct_q_resolution_for_can(sample_workspace, can_workspace, subtracted_workspace):
+    """
+    Sets the correct Q resolution on a can-subtracted workspace.
+
+    We need to transfer the Q resolution from the original workspaces to the subtracted
+    workspace. Richard wants us to ignore potential DX values for the CAN workspace (they
+    would be very small any way). The Q resolution functionality only exists currently
+    for 1D, ie when only one spectrum is present.
+    """
+    _ = can_workspace  # noqa
+    if sample_workspace.getNumberHistograms() == 1 and sample_workspace.hasDx(0):
+        subtracted_workspace.setDx(0, sample_workspace.dataDx(0))
+
+
+def get_merge_bundle_for_merge_request(output_bundles, parent_alg):
+    """
+    Create a merge bundle for the reduction outputs and perform stitching if required
+    :param output_bundles: a list of output_bundles
+    :param parent_alg: a handle to the parent algorithm
+    """
+    # Order the reductions. This leaves us with a dict mapping from the reduction type (i.e. HAB, LAB) to
+    # a list of reduction settings which contain the information for sample and can.
+    reduction_mode_vs_output_bundles = get_reduction_mode_vs_output_bundles(output_bundles)
+
+    # Get the underlying state from one of the elements
+    state = output_bundles[0].state
+
+    merge_factory = MergeFactory()
+    merger = merge_factory.create_merger(state)
+
+    # Run the merger and return the merged output workspace
+    return merger.merge(reduction_mode_vs_output_bundles, parent_alg)
+
+
+def get_reduction_mode_vs_output_bundles(output_bundles):
+    """
+    Groups the reduction information by the reduction mode, e.g. all information regarding HAB is collated, similarly
+    for LAB.
+    """
+    outputs = {}
+    # Pair up the different reduction modes
+    for output_bundle in output_bundles:
+        key = output_bundle.reduction_mode
+        if key in outputs:
+            outputs[key].append(output_bundle)
+        else:
+            outputs.update({key: [output_bundle]})
+    return outputs
+
+
+def get_component_to_reduce(reduction_setting_bundle):
+    """
+    Gets the component to reduce as string. Currently we encode this as LAB or HAB.
+
+    :param reduction_setting_bundle: a ReductionSettingBundle tuple.
+    :return: the reduction mode as a string.
+    """
+    # Get the reduction mode
+    reduction_mode = reduction_setting_bundle.reduction_mode
+
+    if reduction_mode is ISISReductionMode.LAB:
+        reduction_mode_setting = DetectorType.to_string(DetectorType.LAB)
+    elif reduction_mode is ISISReductionMode.HAB:
+        reduction_mode_setting = DetectorType.to_string(DetectorType.HAB)
+    else:
+        raise RuntimeError("SingleExecution: An unknown reduction mode was selected: {}. "
+                           "Currently only HAB and LAB are supported.".format(reduction_mode))
+    return reduction_mode_setting
+
+
+def run_optimized_for_can(reduction_alg, reduction_setting_bundle):
+    """
+    Check if the state can reduction already exists, and if so, use it else reduce it and add it to the ADS.
+
+    @param reduction_alg: a handle to the SANSReductionCore algorithm
+    @param reduction_setting_bundle: a ReductionSettingBundle tuple.
+    @return: a reduced workspace, a partial output workspace for the counts, a partial workspace for the normalization.
+    """
+    state = reduction_setting_bundle.state
+    output_parts = reduction_setting_bundle.output_parts
+    reduction_mode = reduction_setting_bundle.reduction_mode
+    data_type = reduction_setting_bundle.data_type
+    reduced_can_workspace, reduced_can_workspace_count, reduced_can_workspace_norm = \
+        get_reduced_can_workspace_from_ads(state, output_parts, reduction_mode)
+    # Set the results on the output bundle
+    output_bundle = OutputBundle(state=state, data_type=data_type, reduction_mode=reduction_mode,
+                                 output_workspace=reduced_can_workspace)
+    output_parts_bundle = OutputPartsBundle(state=state, data_type=data_type, reduction_mode=reduction_mode,
+                                            output_workspace_count=reduced_can_workspace_count,
+                                            output_workspace_norm=reduced_can_workspace_norm)
+    # The logic table for the recalculation of the partial outputs is:
+    # | output_parts | reduced_can_workspace_count is None |  reduced_can_workspace_norm is None | Recalculate |
+    # ----------------------------------------------------------------------------------------------------------
+    # |  False       |        True                         |           True                      |    False    |
+    # |  False       |        True                         |           False                     |    False    |
+    # |  False       |        False                        |           True                      |    False    |
+    # |  False       |        False                        |           False                     |    False    |
+    # |  True        |        True                         |           True                      |    False    |
+    # |  True        |        True                         |           False                     |    True     |
+    # |  True        |        False                        |           True                      |    True     |
+    # |  True        |        False                        |           False                     |    False    |
+
+    is_invalid_partial_workspaces = ((output_parts_bundle.output_workspace_count is None and
+                                     output_parts_bundle.output_workspace_norm is not None) or
+                                     (output_parts_bundle.output_workspace_count is not None and
+                                     output_parts_bundle.output_workspace_norm is None))
+    partial_output_require_reload = output_parts and is_invalid_partial_workspaces
+
+    if output_bundle.output_workspace is None or partial_output_require_reload:
+        output_bundle, output_parts_bundle = run_core_reduction(reduction_alg, reduction_setting_bundle)
+
+        # Now we need to tag the workspaces and add it to the ADS
+        if output_bundle.output_workspace is not None:
+            write_hash_into_reduced_can_workspace(state=output_bundle.state,
+                                                  workspace=output_bundle.output_workspace,
+                                                  partial_type=None,
+                                                  reduction_mode=reduction_mode)
+
+        if (output_parts_bundle.output_workspace_count is not None and
+           output_parts_bundle.output_workspace_norm is not None):
+            write_hash_into_reduced_can_workspace(state=output_parts_bundle.state,
+                                                  workspace=output_parts_bundle.output_workspace_count,
+                                                  partial_type=OutputParts.Count,
+                                                  reduction_mode=reduction_mode)
+
+            write_hash_into_reduced_can_workspace(state=output_parts_bundle.state,
+                                                  workspace=output_parts_bundle.output_workspace_norm,
+                                                  partial_type=OutputParts.Norm,
+                                                  reduction_mode=reduction_mode)
+
+    return output_bundle, output_parts_bundle
diff --git a/scripts/SANS/sans/algorithm_detail/strip_end_nans_and_infs.py b/scripts/SANS/sans/algorithm_detail/strip_end_nans_and_infs.py
new file mode 100644
index 0000000000000000000000000000000000000000..08ffc8bf566c8392ad382f49869f86045556d492
--- /dev/null
+++ b/scripts/SANS/sans/algorithm_detail/strip_end_nans_and_infs.py
@@ -0,0 +1,57 @@
+from __future__ import (absolute_import, division, print_function)
+from math import (isinf, isnan)
+from sans.common.constants import EMPTY_NAME
+from sans.common.general_functions import create_child_algorithm
+
+
+def strip_end_nans(workspace, parent_alg=None):
+    """
+    This function removes the INFs and NANs from the start and end of a 1D workspace.
+
+    :param workspace: The workspace which is about to be
+    :param parent_alg: a handle to the parent algorithm
+    :return: A trimmed NAN- and INF-trimmed workspace
+    """
+    # If the workspace is larger than 1D, then there is nothing we can do
+    if workspace.getNumberHistograms() > 1:
+        return workspace
+    data = workspace.readY(0)
+    # Find the index at which the first legal value appears
+
+    start_index = next((index for index in range(len(data)) if is_valid_data(data[index])), None)
+    end_index = next((index for index in range(len(data)-1, -1, -1) if is_valid_data(data[index])), None)
+
+    # If an index was not found then we return the current workspace. This means that all entries are either INFs
+    # or NANs.
+
+    if start_index is None or end_index is None:
+        return workspace
+
+    # Get the corresponding Q values
+    q_values = workspace.readX(0)
+
+    start_q = q_values[start_index]
+
+    # Make sure we're inside the bin that we want to crop. This is part of the old framework. It looks like a bug fix,
+    # hence we leave it in here for now. In general this is risky, and it should be a fraction of a bin width by which
+    # we increase the end value
+    is_point_data = len(workspace.dataX(0)) == len(workspace.dataY(0))
+    if is_point_data:
+        end_q = 1.001 * q_values[end_index]
+    else:
+        end_q = 1.001 * q_values[end_index + 1]
+
+    # Crop the workspace in place
+    crop_name = "CropWorkspace"
+    crop_options = {"InputWorkspace": workspace,
+                    "XMin": start_q,
+                    "XMax": end_q}
+    crop_alg = create_child_algorithm(parent_alg, crop_name, **crop_options)
+    crop_alg.setProperty("OutputWorkspace", EMPTY_NAME)
+    crop_alg.execute()
+    ws = crop_alg.getProperty("OutputWorkspace").value
+    return ws
+
+
+def is_valid_data(value):
+    return not isinf(value) and not isnan(value)
diff --git a/scripts/SANS/sans/command_interface/ISISCommandInterface.py b/scripts/SANS/sans/command_interface/ISISCommandInterface.py
new file mode 100644
index 0000000000000000000000000000000000000000..7487245ad7f125b953c827212a496199a6e67cae
--- /dev/null
+++ b/scripts/SANS/sans/command_interface/ISISCommandInterface.py
@@ -0,0 +1,1070 @@
+from __future__ import (absolute_import, division, print_function)
+import re
+import inspect
+from six import types
+from mantid.kernel import config
+from mantid.api import (AnalysisDataService, WorkspaceGroup)
+from SANSadd2 import add_runs
+from sans.sans_batch import SANSBatchReduction
+from sans.command_interface.command_interface_functions import (print_message, warning_message)
+from sans.command_interface.command_interface_state_director import (CommandInterfaceStateDirector, DataCommand,
+                                                                     DataCommandId, NParameterCommand, NParameterCommandId,
+                                                                     FitData)
+from sans.command_interface.batch_csv_file_parser import BatchCsvParser
+from sans.common.constants import ALL_PERIODS
+from sans.common.file_information import (find_sans_file, find_full_file_path)
+from sans.common.enums import (DetectorType, FitType, RangeStepType, ReductionDimensionality,
+                               ISISReductionMode, SANSFacility, SaveType, BatchReductionEntry, OutputMode)
+from sans.common.general_functions import (convert_bank_name_to_detector_type_isis, get_output_name,
+                                           is_part_of_reduced_output_workspace_group)
+
+# Disable plotting if running outside Mantidplot
+try:
+    import mantidplot
+except (Exception, Warning):
+    mantidplot = None
+    # this should happen when this is called from outside Mantidplot and only then,
+    # the result is that attempting to plot will raise an exception
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Globals
+# ----------------------------------------------------------------------------------------------------------------------
+DefaultTrans = True
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# CommandInterfaceStateDirector global instance
+# ----------------------------------------------------------------------------------------------------------------------
+director = CommandInterfaceStateDirector(SANSFacility.ISIS)
+
+
+def deprecated(obj):
+    """
+    Decorator to apply to functions or classes that we think are not being (or
+    should not be) used anymore.  Prints a warning to the log.
+    """
+    if inspect.isfunction(obj) or inspect.ismethod(obj):
+        if inspect.isfunction(obj):
+            obj_desc = "\"%s\" function" % obj.__name__
+        else:
+            obj_desc = "\"%s\" class" % obj.__self__.__class__.__name__
+
+        def print_warning_wrapper(*args, **kwargs):
+            warning_message("The {0} has been marked as deprecated and may be "
+                            "removed in a future version of Mantid. If you "
+                            "believe this to have been marked in error, please "
+                            "contact the member of the Mantid team responsible "
+                            "for ISIS SANS.".format(obj_desc))
+            return obj(*args, **kwargs)
+        return print_warning_wrapper
+
+    # Add a @deprecated decorator to each of the member functions in the class
+    # (by recursion).
+    if inspect.isclass(obj):
+        for name, fn in inspect.getmembers(obj):
+            if isinstance(fn, types.MethodType):
+                setattr(obj, name, deprecated(fn))
+        return obj
+
+    assert False, "Programming error.  You have incorrectly applied the "\
+                  "@deprecated decorator.  This is only for use with functions "\
+                  "or classes."
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Unnecessary commands
+# ----------------------------------------------------------------------------------------------------------------------
+def SetVerboseMode(state):
+    pass
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Setting instruments
+# ----------------------------------------------------------------------------------------------------------------------
+def SANS2D(idf_path=None):
+    config['default.instrument'] = 'SANS2D'
+
+
+def SANS2DTUBES():
+    config['default.instrument'] = 'SANS2D'
+
+
+def LOQ(idf_path='LOQ_Definition_20020226-.xml'):
+    config['default.instrument'] = 'LOQ'
+
+
+def LARMOR(idf_path = None):
+    config['default.instrument'] = 'LARMOR'
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Unused commands
+# ----------------------------------------------------------------------------------------------------------------------
+@deprecated
+def _SetWavelengthRange(start, end):
+    _ = start  # noqa
+    _ = end  # noqa
+    pass
+
+
+@deprecated
+def Reduce():
+    pass
+
+
+@deprecated
+def GetMismatchedDetList():
+    pass
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Currently not implemented commands
+# ----------------------------------------------------------------------------------------------------------------------
+def TransWorkspace(sample, can=None):
+    """
+        Use a given workpspace that contains pre-calculated transmissions
+        @param sample the workspace to use for the sample
+        @param can calculated transmission for the can
+    """
+    _, _ = sample, can  # noqa
+    raise NotImplementedError("The TransWorkspace command is not implemented in SANS v2.")
+
+
+def createColetteScript(inputdata, format, reduced, centreit, plotresults, csvfile='', savepath=''):
+    _, _, _, _, _, _, _ = inputdata, format, reduced, centreit, plotresults, csvfile, savepath  # noqa
+    raise NotImplementedError("The creatColleteScript command is not implemented in SANS v2.")
+
+
+def FindBeamCentre(rlow, rupp, MaxIter=10, xstart=None, ystart=None, tolerance=1.251e-4,  find_direction=None):
+    _, _, _, _, _, _, _ = rlow, rupp, MaxIter, xstart, ystart, tolerance, find_direction  # noqa
+    raise NotImplementedError("The FindBeamCentre command is not implemented in SANS v2.")
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Data related commands
+# ----------------------------------------------------------------------------------------------------------------------
+def AssignSample(sample_run, reload=True, period=ALL_PERIODS):
+    """
+    Sets the sample scatter data.
+
+    @param sample_run: run number to analysis e.g. SANS2D7777.nxs
+    @param reload: must be set to True
+    @param period: the period (entry) number to load, default is the first period
+    """
+    _ = reload  # noqa
+    # First of all the default for all periods used to be -1. If we encounter this then set periods to ALL_PERIODS
+    period = int(period)
+    period = ALL_PERIODS if period == -1 else period
+
+    # Print the output
+    message = 'AssignSample("' + str(sample_run) + '"'
+    if period != ALL_PERIODS:
+        message += ', ' + str(period)
+    message += ')'
+    print_message(message)
+
+    # Get the full file name of the run
+    file_name = find_sans_file(sample_run)
+
+    # Set the command
+    data_command = DataCommand(command_id=DataCommandId.sample_scatter, file_name=file_name, period=period)
+    director.add_command(data_command)
+
+
+def AssignCan(can_run, reload=True, period=ALL_PERIODS):
+    """
+    Sets the can scatter data.
+
+    @param can_run: run number to analysis e.g. SANS2D7777.nxs
+    @param reload: must be set to True
+    @param period: the period (entry) number to load, default is the first period
+    """
+    _ = reload  # noqa
+    # First of all the default for all periods used to be -1. If we encounter this then set periods to ALL_PERIODS
+    period = int(period)
+    period = ALL_PERIODS if period == -1 else period
+
+    # Print the output
+    message = 'AssignCan("' + str(can_run) + '"'
+    if period != ALL_PERIODS:
+        message += ', ' + str(period)
+    message += ')'
+    print_message(message)
+
+    # Get the full file name of the run
+    file_name = find_sans_file(can_run)
+
+    # Set the command
+    data_command = DataCommand(command_id=DataCommandId.can_scatter, file_name=file_name, period=period)
+    director.add_command(data_command)
+
+
+def TransmissionSample(sample, direct, reload=True,
+                       period_t=ALL_PERIODS, period_d=ALL_PERIODS):
+    """
+    Specify the transmission and direct runs for the sample.
+
+    @param sample: the transmission run
+    @param direct: direct run
+    @param reload: if to replace the workspace if it is already there
+    @param period_t: the entry number of the transmission run (default single entry file)
+    @param period_d: the entry number of the direct run (default single entry file)
+    """
+    _ = reload  # noqa
+    # First of all the default for all periods used to be -1. If we encounter this then set periods to ALL_PERIODS
+    period_t = int(period_t)
+    period_d = int(period_d)
+    period_t = ALL_PERIODS if period_t == -1 else period_t
+    period_d = ALL_PERIODS if period_d == -1 else period_d
+
+    print_message('TransmissionSample("' + str(sample) + '","' + str(direct) + '")')
+
+    # Get the full file name of the run
+    trans_file_name = find_sans_file(sample)
+    direct_file_name = find_sans_file(direct)
+
+    # Set the command
+    trans_command = DataCommand(command_id=DataCommandId.sample_transmission, file_name=trans_file_name,
+                                period=period_t)
+    direct_command = DataCommand(command_id=DataCommandId.sample_direct, file_name=direct_file_name, period=period_d)
+    director.add_command(trans_command)
+    director.add_command(direct_command)
+
+
+def TransmissionCan(can, direct, reload=True, period_t=-1, period_d=-1):
+    """
+    Specify the transmission and direct runs for the can
+    @param can: the transmission run
+    @param direct: direct run
+    @param reload: if to replace the workspace if it is already there
+    @param period_t: the entry number of the transmission run (default single entry file)
+    @param period_d: the entry number of the direct run (default single entry file)
+    """
+    _ = reload  # noqa
+    # First of all the default for all periods used to be -1. If we encounter this then set periods to ALL_PERIODS
+    period_t = int(period_t)
+    period_d = int(period_d)
+    period_t = ALL_PERIODS if period_t == -1 else period_t
+    period_d = ALL_PERIODS if period_d == -1 else period_d
+
+    print_message('TransmissionCan("' + str(can) + '","' + str(direct) + '")')
+
+    # Get the full file name of the run
+    trans_file_name = find_sans_file(can)
+    direct_file_name = find_sans_file(direct)
+
+    # Set the command
+    trans_command = DataCommand(command_id=DataCommandId.can_transmission, file_name=trans_file_name, period=period_t)
+    direct_command = DataCommand(command_id=DataCommandId.can_direct, file_name=direct_file_name, period=period_d)
+    director.add_command(trans_command)
+    director.add_command(direct_command)
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# N parameter commands
+# ----------------------------------------------------------------------------------------------------------------------
+
+
+# ------------------------
+# Zero parameters
+# ------------------------
+def Clean():
+    """
+    Removes all previous settings.
+    """
+    clean_command = NParameterCommand(command_id=NParameterCommandId.clean, values=[])
+    director.add_command(clean_command)
+
+
+def Set1D():
+    """
+    Sets the reduction dimensionality to 1D
+    """
+    print_message('Set1D()')
+    set_1d_command = NParameterCommand(command_id=NParameterCommandId.reduction_dimensionality,
+                                       values=[ReductionDimensionality.OneDim])
+    director.add_command(set_1d_command)
+
+
+def Set2D():
+    """
+    Sets the reduction dimensionality to 2D
+    """
+    print_message('Set2D()')
+    set_2d_command = NParameterCommand(command_id=NParameterCommandId.reduction_dimensionality,
+                                       values=[ReductionDimensionality.TwoDim])
+    director.add_command(set_2d_command)
+
+
+def UseCompatibilityMode():
+    """
+    Sets the compatibility mode to True
+    """
+    set_2d_command = NParameterCommand(command_id=NParameterCommandId.compatibility_mode,
+                                       values=[True])
+    director.add_command(set_2d_command)
+
+
+# -------------------------
+# Single parameter commands
+# -------------------------
+def MaskFile(file_name):
+    """
+    Loads the user file (note that mask file is the legacy description user file)
+
+    @param file_name: path to the user file.
+    """
+    print_message('#Opening "' + file_name + '"')
+
+    # Get the full file path
+    file_name_full = find_full_file_path(file_name)
+    user_file_command = NParameterCommand(command_id=NParameterCommandId.user_file, values=[file_name_full])
+    director.add_command(user_file_command)
+
+
+def Mask(details):
+    """
+    Allows the user to specify a mask command as is done in the user file.
+
+    @param details: a string that specifies masking as it would appear in a mask file
+    """
+    print_message('Mask("' + details + '")')
+    mask_command = NParameterCommand(command_id=NParameterCommandId.mask, values=[details])
+    director.add_command(mask_command)
+
+
+def SetSampleOffset(value):
+    """
+    Set the sample offset.
+
+    @param value: the offset in mm
+    """
+    value = float(value)
+    sample_offset_command = NParameterCommand(command_id=NParameterCommandId.sample_offset, values=[value])
+    director.add_command(sample_offset_command)
+
+
+def Detector(det_name):
+    """
+    Sets the detector which is being used for the reduction.
+
+    Previous comment: Sets the detector bank to use for the reduction e.g. 'front-detector'. The main detector is
+     assumed if this line is not given
+    @param det_name: the detector's name
+    """
+    print_message('Detector("' + det_name + '")')
+    detector_type = convert_bank_name_to_detector_type_isis(det_name)
+    reduction_mode = ISISReductionMode.HAB if detector_type is DetectorType.HAB else ISISReductionMode.LAB
+    detector_command = NParameterCommand(command_id=NParameterCommandId.detector, values=[reduction_mode])
+    director.add_command(detector_command)
+
+
+def SetEventSlices(input_str):
+    """
+    Sets the events slices
+    """
+    event_slices_command = NParameterCommand(command_id=NParameterCommandId.event_slices, values=input_str)
+    director.add_command(event_slices_command)
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Double valued commands
+# ----------------------------------------------------------------------------------------------------------------------
+def SetMonitorSpectrum(specNum, interp=False):
+    """
+    Specifies the spectrum number of the spectrum that will be used to for monitor normalisation
+    @param specNum: a spectrum number (1 or greater)
+    @param interp: when rebinning the wavelength bins to match the main workspace, if use interpolation
+                   default no interpolation
+    """
+    specNum = int(specNum)
+    monitor_spectrum_command = NParameterCommand(command_id=NParameterCommandId.monitor_spectrum, values=[specNum,
+                                                                                                          interp])
+    director.add_command(monitor_spectrum_command)
+
+
+def SetTransSpectrum(specNum, interp=False):
+    """
+    Sets the spectrum number (of the incident monitor) and the interpolation configuration for transmission calculation.
+
+    @param specNum: a spectrum number (1 or greater)
+    @param interp: when rebinning the wavelength bins to match the main workspace, if use interpolation
+                   default no interpolation
+    """
+    specNum = int(specNum)
+    transmission_spectrum_command = NParameterCommand(command_id=NParameterCommandId.transmission_spectrum,
+                                                      values=[specNum, interp])
+    director.add_command(transmission_spectrum_command)
+
+
+def Gravity(flag, extra_length=0.0):
+    """
+    Allows the user to set the gravity correction for the q conversion.
+    @param flag: set to True if the correction should be used, else False.
+    @param extra_length: the extra length in meter.
+    @return:
+    """
+    extra_length = float(extra_length)
+    print_message('Gravity(' + str(flag) + ', ' + str(extra_length) + ')')
+    gravity_command = NParameterCommand(command_id=NParameterCommandId.gravity, values=[flag, extra_length])
+    director.add_command(gravity_command)
+
+
+def SetDetectorFloodFile(filename, detector_name="REAR"):
+    """
+    Sets the pixel correction file for a particular detector
+
+    @param filename: the name of the file.
+    @param detector_name: the name of the detector
+    """
+    file_name = find_full_file_path(filename)
+    detector_name = convert_bank_name_to_detector_type_isis(detector_name)
+    flood_command = NParameterCommand(command_id=NParameterCommandId.flood_file, values=[file_name, detector_name])
+    director.add_command(flood_command)
+
+
+def SetCorrectionFile(bank, filename):
+    # 10/03/15 RKH, create a new routine that allows change of "direct beam file" = correction file,
+    # for a given detector, this simplify the iterative process used to adjust it.
+    # Will still have to keep changing the name of the file
+    # for each iteratiom to avoid Mantid using a cached version, but can then use
+    # only a single user (=mask) file for each set of iterations.
+    # Modelled this on SetDetectorOffsets above ...
+    """
+        @param bank: Must be either 'front' or 'rear' (not case sensitive)
+        @param filename: self explanatory
+    """
+    print_message("SetCorrectionFile(" + str(bank) + ', ' + filename + ')')
+    detector_type = convert_bank_name_to_detector_type_isis(bank)
+    file_name = find_full_file_path(filename)
+    flood_command = NParameterCommand(command_id=NParameterCommandId.wavelength_correction_file,
+                                      values=[file_name, detector_type])
+    director.add_command(flood_command)
+
+
+# --------------------------
+# Three parameter commands
+# ---------------------------
+def SetCentre(xcoord, ycoord, bank='rear'):
+    """
+    Configure the Beam Center position. It support the configuration of the centre for
+    both detectors bank (low-angle bank and high-angle bank detectors)
+
+    It allows defining the position for both detector banks.
+    :param xcoord: X position of beam center in the user coordinate system.
+    :param ycoord: Y position of beam center in the user coordinate system.
+    :param bank: The selected bank ('rear' - low angle or 'front' - high angle)
+    Introduced #5942
+    """
+    xcoord = float(xcoord)
+    ycoord = float(ycoord)
+    print_message('SetCentre(' + str(xcoord) + ', ' + str(ycoord) + ')')
+    detector_type = convert_bank_name_to_detector_type_isis(bank)
+    centre_command = NParameterCommand(command_id=NParameterCommandId.centre, values=[xcoord, ycoord, detector_type])
+    director.add_command(centre_command)
+
+
+def SetPhiLimit(phimin, phimax, use_mirror=True):
+    """
+        Call this function to restrict the analyse segments of the detector. Phimin and
+        phimax define the limits of the segment where phi=0 is the -x axis and phi = 90
+        is the y-axis. Setting use_mirror to true includes a second segment to be included
+        it is the same as the first but rotated 180 degrees.
+        @param phimin: the minimum phi angle to include
+        @param phimax: the upper limit on phi for the segment
+        @param use_mirror: when True (default) another segment is included, rotated 180 degrees from the first
+    """
+    print_message("SetPhiLimit(" + str(phimin) + ', ' + str(phimax) + ',use_mirror=' + str(use_mirror) + ')')
+    # a beam centre of [0,0,0] makes sense if the detector has been moved such that beam centre is at [0,0,0]
+    phimin = float(phimin)
+    phimax = float(phimax)
+    centre_command = NParameterCommand(command_id=NParameterCommandId.phi_limit, values=[phimin, phimax, use_mirror])
+    director.add_command(centre_command)
+
+
+def set_save(save_algorithms, save_as_zero_error_free):
+    """
+    Mainly internally used by BatchMode. Provides the save settings.
+
+    @param save_algorithms: A list of SaveType enums.
+    @param save_as_zero_error_free: True if a zero error correction should be performed.
+    """
+    save_command = NParameterCommand(command_id=NParameterCommandId.save, values=[save_algorithms,
+                                                                                  save_as_zero_error_free])
+    director.add_command(save_command)
+
+
+# --------------------------
+# Four parameter commands
+# ---------------------------
+def TransFit(mode, lambdamin=None, lambdamax=None, selector='BOTH'):
+    """
+        Sets the fit method to calculate the transmission fit and the wavelength range
+        over which to do the fit. These arguments are passed to the algorithm
+        CalculateTransmission. If mode is set to 'Off' then the unfitted workspace is
+        used and lambdamin and max have no effect
+        @param mode: can be 'Logarithmic' ('YLOG', 'LOG') 'OFF' ('CLEAR') or 'LINEAR' (STRAIGHT', LIN'),
+                     'POLYNOMIAL2', 'POLYNOMIAL3', ...
+        @param lambdamin: the lowest wavelength to use in any fit
+        @param lambdamax: the end of the fit range
+        @param selector: define for which transmission this fit specification is valid (BOTH, SAMPLE, CAN)
+    """
+    def does_pattern_match(compiled_regex, line):
+        return compiled_regex.match(line) is not None
+
+    def extract_polynomial_order(line):
+        order = re.sub("POLYNOMIAL", "", line)
+        order = order.strip()
+        return int(order)
+
+    polynomial_pattern = re.compile("\\s*" + "POLYNOMIAL" + "\\s*[2-9]")
+    polynomial_order = None
+    # Get the fit mode
+    mode = str(mode).strip().upper()
+
+    if mode == "LINEAR" or mode == "STRAIGHT" or mode == "LIN":
+        fit_type = FitType.Linear
+    elif mode == "LOGARITHMIC" or mode == "LOG" or mode == "YLOG":
+        fit_type = FitType.Log
+    elif does_pattern_match(polynomial_pattern, mode):
+        fit_type = FitType.Polynomial
+        polynomial_order = extract_polynomial_order(mode)
+    else:
+        fit_type = FitType.NoFit
+
+    # Get the selected detector to which the fit settings apply
+    selector = str(selector).strip().upper()
+    if selector == "SAMPLE":
+        fit_data = FitData.Sample
+    elif selector == "CAN":
+        fit_data = FitData.Can
+    elif selector == "BOTH":
+        fit_data = FitData.Both
+    else:
+        raise RuntimeError("TransFit: The selected fit data {0} is not valid. You have to either SAMPLE, "
+                           "CAN or BOTH.".format(selector))
+
+    # Output message
+    message = mode
+    if lambdamin:
+        lambdamin = float(lambdamin)
+        message += ', ' + str(lambdamin)
+    if lambdamax:
+        lambdamax = float(lambdamax)
+        message += ', ' + str(lambdamax)
+    message += ', selector=' + selector
+    print_message("TransFit(\"" + message + "\")")
+
+    # Configure fit settings
+    polynomial_order = polynomial_order if polynomial_order is not None else 0
+    fit_command = NParameterCommand(command_id=NParameterCommandId.centre, values=[fit_data, lambdamin, lambdamax,
+                                                                                   fit_type, polynomial_order])
+    director.add_command(fit_command)
+
+
+def LimitsR(rmin, rmax, quiet=False, reducer=None):
+    """
+    Sets the radius limits
+
+    @param rmin: minimal radius in mm
+    @param rmax: maximal radius in mm
+    @param quiet: if True then no message will be logged.
+    @param reducer: legacy parameter
+    """
+    _ = reducer  # noqa
+    rmin = float(rmin)
+    rmax = float(rmax)
+    if not quiet:
+        print_message('LimitsR(' + str(rmin) + ', ' + str(rmax) + ')', reducer)
+    rmin /= 1000.
+    rmax /= 1000.
+    radius_command = NParameterCommand(command_id=NParameterCommandId.mask_radius, values=[rmin, rmax])
+    director.add_command(radius_command)
+
+
+def LimitsWav(lmin, lmax, step, bin_type):
+    """
+    Set the wavelength limits
+
+    @param lmin: the lower wavelength bound.
+    @param lmax: the upper wavelength bound.
+    @param step: the wavelength step.
+    @param bin_type: teh bin type, ie linear or logarithmic. Accepted strings are "LINEAR" and "LOGARITHMIC"
+    """
+    lmin = float(lmin)
+    lmax = float(lmax)
+    step = float(step)
+
+    print_message('LimitsWav(' + str(lmin) + ', ' + str(lmax) + ', ' + str(step) + ', ' + bin_type + ')')
+
+    rebin_string = bin_type.strip().upper()
+    rebin_type = RangeStepType.Log if rebin_string == "LOGARITHMIC" else RangeStepType.Lin
+
+    wavelength_command = NParameterCommand(command_id=NParameterCommandId.wavelength_limit,
+                                           values=[lmin, lmax, step, rebin_type])
+    director.add_command(wavelength_command)
+
+
+def LimitsQXY(qmin, qmax, step, type):
+    """
+        To set the bin parameters for the algorithm Qxy()
+        @param qmin: the first Q value to include
+        @param qmaz: the last Q value to include
+        @param step: bin width
+        @param type: pass LOG for logarithmic binning
+    """
+    qmin = float(qmin)
+    qmax = float(qmax)
+    step = float(step)
+
+    print_message('LimitsQXY(' + str(qmin) + ', ' + str(qmax) + ', ' + str(step) + ', ' + str(type) + ')')
+    step_type_string = type.strip().upper()
+    if step_type_string == "LOGARITHMIC" or step_type_string == "LOG":
+        step_type = RangeStepType.Log
+    else:
+        step_type = RangeStepType.Lin
+    qxy_command = NParameterCommand(command_id=NParameterCommandId.qxy_limit, values=[qmin, qmax, step, step_type])
+    director.add_command(qxy_command)
+
+
+# --------------------------
+# Six parameter commands
+# --------------------------
+def SetFrontDetRescaleShift(scale=1.0, shift=0.0, fitScale=False, fitShift=False, qMin=None, qMax=None):
+    """
+        Stores property about the detector which is used to rescale and shift
+        data in the bank after data have been reduced
+        @param scale: Default to 1.0. Value to multiply data with
+        @param shift: Default to 0.0. Value to add to data
+        @param fitScale: Default is False. Whether or not to try and fit this param
+        @param fitShift: Default is False. Whether or not to try and fit this param
+        @param qMin: When set to None (default) then for fitting use the overlapping q region of
+                     front and rear detectors
+        @param qMax: When set to None (default) then for fitting use the overlapping q region of
+                     front and rear detectors
+    """
+    scale = float(scale)
+    shift = float(shift)
+
+    if qMin:
+        qMin = float(qMin)
+    if qMax:
+        qMax = float(qMax)
+
+    print_message('Set front detector rescale/shift values to {0} and {1}'.format(scale, shift))
+    front_command = NParameterCommand(command_id=NParameterCommandId.front_detector_rescale, values=[scale, shift,
+                                                                                                     fitScale, fitShift,
+                                                                                                     qMin, qMax])
+    director.add_command(front_command)
+
+
+def SetDetectorOffsets(bank, x, y, z, rot, radius, side, xtilt=0.0, ytilt=0.0):
+    """
+        Adjust detector position away from position defined in IDF. On SANS2D the detector
+        banks can be moved around. This method allows fine adjustments of detector bank position
+        in the same way as the DET/CORR userfile command works. Hence please see
+        http://www.mantidproject.org/SANS_User_File_Commands#DET for details.
+
+        The comment below is not true any longer:
+            Note, for now, this command will only have an effect on runs loaded
+            after this command have been executed (because it is when runs are loaded
+            that components are moved away from the positions set in the IDF)
+
+
+        @param bank: Must be either 'front' or 'rear' (not case sensitive)
+        @param x: shift in mm
+        @param y: shift in mm
+        @param z: shift in mm
+        @param rot: shift in degrees
+        @param radius: shift in mm
+        @param side: shift in mm
+        @param xtilt: xtilt in degrees
+        @param ytilt: ytilt in degrees
+    """
+    x = float(x)
+    y = float(y)
+    z = float(z)
+    rot = float(rot)
+    radius = float(radius)
+    side = float(side)
+    xtilt = float(xtilt)
+    ytilt = float(ytilt)
+
+    print_message("SetDetectorOffsets(" + str(bank) + ', ' + str(x)
+                  + ',' + str(y) + ',' + str(z) + ',' + str(rot)
+                  + ',' + str(radius) + ',' + str(side) + ',' + str(xtilt) + ',' + str(ytilt) + ')')
+    detector_type = convert_bank_name_to_detector_type_isis(bank)
+    detector_offsets = NParameterCommand(command_id=NParameterCommandId.detector_offsets, values=[detector_type,
+                                                                                                  x, y, z,
+                                                                                                  rot, radius, side,
+                                                                                                  xtilt, ytilt])
+    director.add_command(detector_offsets)
+
+
+# --------------------------------------------
+# Commands which actually kick off a reduction
+# --------------------------------------------
+def WavRangeReduction(wav_start=None, wav_end=None, full_trans_wav=None, name_suffix=None, combineDet=None,
+                      resetSetup=True, out_fit_settings=None, output_name=None, output_mode=OutputMode.PublishToADS,
+                      use_reduction_mode_as_suffix=False):
+    """
+        Run reduction from loading the raw data to calculating Q. Its optional arguments allows specifics
+        details to be adjusted, and optionally the old setup is reset at the end. Note if FIT of RESCALE or SHIFT
+        is selected then both REAR and FRONT detectors are both reduced EXCEPT if only the REAR detector is selected
+        to be reduced
+
+        @param wav_start: the first wavelength to be in the output data
+        @param wav_end: the last wavelength in the output data
+        @param full_trans_wav: if to use a wide wavelength range, the instrument's default wavelength range,
+                               for the transmission correction, false by default
+        @param name_suffix: append the created output workspace with this
+        @param combineDet: combineDet can be one of the following:
+                           'rear'                (run one reduction for the 'rear' detector data)
+                           'front'               (run one reduction for the 'front' detector data, and
+                                                  rescale+shift 'front' data)
+                           'both'                (run both the above two reductions)
+                           'merged'              (run the same reductions as 'both' and additionally create
+                                                  a merged data workspace)
+                            None                 (run one reduction for whatever detector has been set as the
+                                                  current detector
+                                                  before running this method. If front apply rescale+shift)
+        @param resetSetup: if true reset setup at the end
+        @param out_fit_settings: An output parameter. It is used, specially when resetSetup is True, in order
+                                 to remember the 'scale and fit' of the fitting algorithm.
+        @param output_name: name of the output workspace/file, if none is specified then one is generated internally.
+        @param output_mode: the way the data should be put out: Can be PublishToADS, SaveToFile or Both
+        @param use_reduction_mode_as_suffix: If true then a second suffix will be used which is
+                                             based on the reduction mode.
+        @return Name of one of the workspaces created
+    """
+    print_message('WavRangeReduction(' + str(wav_start) + ', ' + str(wav_end) + ', ' + str(full_trans_wav) + ')')
+    _ = resetSetup
+    _ = out_fit_settings
+
+    # Set the provided parameters
+    if combineDet is None:
+        reduction_mode = None
+    elif combineDet == 'rear':
+        reduction_mode = ISISReductionMode.LAB
+    elif combineDet == "front":
+        reduction_mode = ISISReductionMode.HAB
+    elif combineDet == "merged":
+        reduction_mode = ISISReductionMode.Merged
+    elif combineDet == "both":
+        reduction_mode = ISISReductionMode.All
+    else:
+        raise RuntimeError("WavRangeReduction: The combineDet input parameter was given a value of {0}. rear, front,"
+                           " both, merged and no input are allowed".format(combineDet))
+
+    if wav_start is not None:
+        wav_start = float(wav_start)
+
+    if wav_end is not None:
+        wav_end = float(wav_end)
+
+    wavelength_command = NParameterCommand(command_id=NParameterCommandId.wavrange_settings,
+                                           values=[wav_start, wav_end, full_trans_wav, reduction_mode])
+    director.add_command(wavelength_command)
+
+    # Save options
+    if output_name is not None:
+        director.add_command(NParameterCommand(command_id=NParameterCommandId.user_specified_output_name,
+                                               values=[output_name]))
+    if name_suffix is not None:
+        director.add_command(NParameterCommand(command_id=NParameterCommandId.user_specified_output_name_suffix,
+                                               values=[name_suffix]))
+    if use_reduction_mode_as_suffix:
+        director.add_command(NParameterCommand(command_id=NParameterCommandId.use_reduction_mode_as_suffix,
+                                               values=[use_reduction_mode_as_suffix]))
+
+    # Get the states
+    state = director.process_commands()
+
+    # Run the reduction
+    batch_alg = SANSBatchReduction()
+    batch_alg(states=[state], use_optimizations=True, output_mode=output_mode)
+
+    # -----------------------------------------------------------
+    # Return the name fo the reduced workspace (or WorkspaceGroup)
+    # -----------------------------------------------------------
+    reduction_mode = state.reduction.reduction_mode
+    is_group = is_part_of_reduced_output_workspace_group(state)
+    _, output_workspace_base_name = get_output_name(state, reduction_mode, is_group)
+    return output_workspace_base_name
+
+
+def BatchReduce(filename, format, plotresults=False, saveAlgs=None, verbose=False,  # noqa
+                centreit=False, reducer=None, combineDet=None, save_as_zero_error_free=False):  # noqa
+    """
+        @param filename: the CSV file with the list of runs to analyse
+        @param format: type of file to load, nxs for Nexus, etc.
+        @param plotresults: if true and this function is run from Mantidplot a graph will be created for the results of each reduction
+        @param saveAlgs: this named algorithm will be passed the name of the results workspace and filename (default = 'SaveRKH').
+            Pass a tuple of strings to save to multiple file formats
+        @param verbose: set to true to write more information to the log (default=False)
+        @param centreit: do centre finding (default=False)
+        @param reducer: if to use the command line (default) or GUI reducer object
+        @param combineDet: that will be forward to WavRangeReduction (rear, front, both, merged, None)
+        @param save_as_zero_error_free: Should the reduced workspaces contain zero errors or not
+        @return final_setings: A dictionary with some values of the Reduction - Right Now:(scale, shift)
+    """
+    if saveAlgs is None:
+        saveAlgs = {'SaveRKH': 'txt'}
+
+    # From the old interface
+    _ = format
+    _ = reducer
+    _ = verbose
+
+    if centreit:
+        raise RuntimeError("The beam centre finder is currently not supported.")
+    if plotresults:
+        raise RuntimeError("Plotting the results is currenlty not supported.")
+
+    # Set up the save algorithms
+    save_algs = []
+
+    if saveAlgs:
+        for key, _ in list(saveAlgs.items()):
+            if key == "SaveRKH":
+                save_algs.append(SaveType.RKH)
+            elif key == "SaveNexus":
+                save_algs.append(SaveType.Nexus)
+            elif key == "SaveNistQxy":
+                save_algs.append(SaveType.NistQxy)
+            elif key == "SaveCanSAS" or key == "SaveCanSAS1D":
+                save_algs.append(SaveType.CanSAS)
+            elif key == "SaveCSV":
+                save_algs.append(SaveType.CSV)
+            elif key == "SaveNXcanSAS":
+                save_algs.append(SaveType.NXcanSAS)
+            else:
+                raise RuntimeError("The save format {0} is not known.".format(key))
+        output_mode = OutputMode.Both
+    else:
+        output_mode = OutputMode.PublishToADS
+
+    # Get the information from the csv file
+    batch_csv_parser = BatchCsvParser(filename)
+    parsed_batch_entries = batch_csv_parser.parse_batch_file()
+
+    # Get a state with all existing settings
+    for parsed_batch_entry in parsed_batch_entries:
+        # A new user file. If a new user file is provided then this will overwrite all other settings from,
+        # otherwise we might have cross-talk between user files.
+        if BatchReductionEntry.UserFile in list(parsed_batch_entry.keys()):
+            user_file = parsed_batch_entry[BatchReductionEntry.UserFile]
+            MaskFile(user_file)
+
+        # Sample scatter
+        sample_scatter = parsed_batch_entry[BatchReductionEntry.SampleScatter]
+        sample_scatter_period = parsed_batch_entry[BatchReductionEntry.SampleScatterPeriod]
+        AssignSample(sample_run=sample_scatter, period=sample_scatter_period)
+
+        # Sample transmission
+        if (BatchReductionEntry.SampleTransmission in list(parsed_batch_entry.keys()) and
+           BatchReductionEntry.SampleDirect in list(parsed_batch_entry.keys())):
+            sample_transmission = parsed_batch_entry[BatchReductionEntry.SampleTransmission]
+            sample_transmission_period = parsed_batch_entry[BatchReductionEntry.SampleTransmissionPeriod]
+            sample_direct = parsed_batch_entry[BatchReductionEntry.SampleDirect]
+            sample_direct_period = parsed_batch_entry[BatchReductionEntry.SampleDirectPeriod]
+            TransmissionSample(sample=sample_transmission, direct=sample_direct,
+                               period_t=sample_transmission_period, period_d=sample_direct_period)
+
+        # Can scatter
+        if BatchReductionEntry.CanScatter in list(parsed_batch_entry.keys()):
+            can_scatter = parsed_batch_entry[BatchReductionEntry.CanScatter]
+            can_scatter_period = parsed_batch_entry[BatchReductionEntry.CanScatterPeriod]
+            AssignCan(can_run=can_scatter, period=can_scatter_period)
+
+        # Can transmission
+        if (BatchReductionEntry.CanTransmission in list(parsed_batch_entry.keys()) and
+           BatchReductionEntry.CanDirect in list(parsed_batch_entry.keys())):
+            can_transmission = parsed_batch_entry[BatchReductionEntry.CanTransmission]
+            can_transmission_period = parsed_batch_entry[BatchReductionEntry.CanTransmissionPeriod]
+            can_direct = parsed_batch_entry[BatchReductionEntry.CanDirect]
+            can_direct_period = parsed_batch_entry[BatchReductionEntry.CanDirectPeriod]
+            TransmissionCan(can=can_transmission, direct=can_direct,
+                            period_t=can_transmission_period, period_d=can_direct_period)
+
+        # Name of the output. We need to modify the name according to the setup of the old reduction mechanism
+        output_name = parsed_batch_entry[BatchReductionEntry.Output]
+
+        # In addition to the output name the user can specify with combineDet an additional suffix (in addtion to the
+        # suffix that the user can set already -- was there previously, so we have to provide that)
+        use_reduction_mode_as_suffix = combineDet is not None
+
+        # Apply save options
+        if save_algs:
+            set_save(save_algorithms=save_algs, save_as_zero_error_free=save_as_zero_error_free)
+
+        # Run the reduction for a single
+        reduced_workspace_name = WavRangeReduction(combineDet=combineDet, output_name=output_name,
+                                                   output_mode=output_mode,
+                                                   use_reduction_mode_as_suffix=use_reduction_mode_as_suffix)
+
+        # Remove the settings which were very specific for this single reduction which are:
+        # 1. The last user file (if any was set)
+        # 2. The last scatter entry
+        # 3. The last scatter transmission and direct entry (if any were set)
+        # 4. The last can scatter ( if any was set)
+        # 5. The last can transmission and direct entry (if any were set)
+        if BatchReductionEntry.UserFile in list(parsed_batch_entry.keys()):
+            director.remove_last_user_file()
+        director.remove_last_scatter_sample()
+
+        if (BatchReductionEntry.SampleTransmission in list(parsed_batch_entry.keys()) and
+            BatchReductionEntry.SampleDirect in list(parsed_batch_entry.keys())):  # noqa
+            director.remove_last_sample_transmission_and_direct()
+
+        if BatchReductionEntry.CanScatter in list(parsed_batch_entry.keys()):
+            director.remove_last_scatter_can()
+
+        if (BatchReductionEntry.CanTransmission in list(parsed_batch_entry.keys()) and
+           BatchReductionEntry.CanDirect in list(parsed_batch_entry.keys())):
+            director.remove_last_can_transmission_and_direct()
+
+        # Plot the results if that was requested, the flag 1 is from the old version.
+        if plotresults == 1:
+            if AnalysisDataService.doesExist(reduced_workspace_name):
+                workspace = AnalysisDataService.retrieve(reduced_workspace_name)
+                if isinstance(workspace, WorkspaceGroup):
+                    for ws in workspace:
+                        PlotResult(ws.getName())
+                else:
+                    PlotResult(workspace.getName())
+
+
+def CompWavRanges(wavelens, plot=True, combineDet=None, resetSetup=True):
+    """
+        Compares the momentum transfer results calculated from different wavelength ranges. Given
+        the list of wave ranges [a, b, c] it reduces for wavelengths a-b, b-c and a-c.
+        @param wavelens: the list of wavelength ranges
+        @param plot: set this to true to plot the result (must be run in Mantid), default is true
+        @param combineDet: see description in WavRangeReduction
+        @param resetSetup: if true reset setup at the end
+    """
+
+    print_message('CompWavRanges( %s,plot=%s)' % (str(wavelens), plot))
+
+    if not isinstance(wavelens, list) or len(wavelens) < 2:
+        if not isinstance(wavelens, tuple):
+            raise RuntimeError(
+                'Error CompWavRanges() requires a list of wavelengths between which '
+                'reductions will be performed.')
+
+    # Perform a reduction over the full wavelength range which was specified
+    reduced_workspace_names = []
+
+    for index in range(len(wavelens)):
+        wavelens[index] = float(wavelens[index])
+
+    full_reduction_name = WavRangeReduction(wav_start=wavelens[0], wav_end=wavelens[- 1],
+                                            combineDet=combineDet, resetSetup=False)
+    reduced_workspace_names.append(full_reduction_name)
+
+    # Reduce each wavelength slice
+    for i in range(0, len(wavelens) - 1):
+        reduced_workspace_name = WavRangeReduction(wav_start=wavelens[i], wav_end=wavelens[i + 1],
+                                                   combineDet=combineDet, resetSetup=False)
+        reduced_workspace_names.append(reduced_workspace_name)
+
+    if plot and mantidplot:
+        mantidplot.plotSpectrum(reduced_workspace_names, 0)
+
+    # Return just the workspace name of the full range
+    return reduced_workspace_names[0]
+
+
+def PhiRanges(phis, plot=True):
+    """
+        Given a list of phi ranges [a, b, c, d] it reduces in the phi ranges a-b and c-d
+        @param phis: the list of phi ranges
+        @param plot: set this to true to plot the result (must be run in Mantid), default is true
+    """
+
+    print_message('PhiRanges( %s,plot=%s)' % (str(phis), plot))
+
+    # todo covert their string into Python array
+
+    if len(phis) % 2 != 0:
+        raise RuntimeError('Phi ranges must be given as pairs')
+
+    reduced_workspace_names = []
+    for i in range(0, len(phis), 2):
+        SetPhiLimit(phis[i], phis[i + 1])
+        reduced_workspace_name = WavRangeReduction()
+        reduced_workspace_names.append(reduced_workspace_name)
+
+    if plot and mantidplot:
+        mantidplot.plotSpectrum(reduced_workspace_names, 0)
+
+    # Return just the workspace name of the full range
+    return reduced_workspace_names[0]
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# General commands
+# ----------------------------------------------------------------------------------------------------------------------
+def PlotResult(workspace, canvas=None):
+    """
+        Draws a graph of the passed workspace. If the workspace is 2D (has many spectra
+        a contour plot is written
+        @param workspace: a workspace name or handle to plot
+        @param canvas: optional handle to an existing graph to write the plot to
+        @return: a handle to the graph that was written to
+    """
+    try:
+        import mantidplot
+        workspace = AnalysisDataService.retrieve(str(workspace))
+        number_of_spectra = workspace[0].getNumberHistograms() if isinstance(workspace, WorkspaceGroup) else\
+            workspace.getNumberHistograms()
+        graph = mantidplot.plotSpectrum(workspace, 0) if number_of_spectra == 1 else \
+            mantidplot.importMatrixWorkspace(workspace.getName()).plotGraph2D()
+
+        if canvas is not None:
+            # we were given a handle to an existing graph, use it
+            mantidplot.mergePlots(canvas, graph)
+            graph = canvas
+        return graph
+    except ImportError:
+        print_message('Plot functions are not available, is this being run from outside Mantidplot?')
+
+
+def AddRuns(runs, instrument='sans2d', saveAsEvent=False, binning="Monitors", isOverlay=False, time_shifts=None,
+            defType='.nxs', rawTypes=('.raw', '.s*', 'add', '.RAW'), lowMem=False):
+    '''
+    Method to expose the add_runs functionality for custom scripting.
+    @param runs: a list with the requested run numbers
+    @param instrument: the name of the selected instrument
+    @param saveAsEvent: when adding event-type data, then this can be stored as event-type data
+    @param binning: where to get the binnings from. This is relevant when adding Event-type data.
+                    The property can be set to "Monitors" in order to emulate the binning of the monitors or to a
+                    string list with the same format that is used for the Rebin algorithm. This property is ignored
+                    when saving as event data.
+    @param isOverlay: sets if the the overlay mechanism should be used when the saveAsEvent flag is set
+    @param time_shifts: provides additional time shifts if the isOverlay flag is specified. The time shifts are specifed
+                        in a string list. Either time_shifts is not used or a list with times in secomds. Note that there
+                        has to be one entry fewer than the number of workspaces to add.
+    @param defType: the file type
+    @param rawTypes: the raw types
+    @param lowMem: if the lowMem option should be used
+    @returns a success message
+    '''
+    # Need at least two runs to work
+    if len(runs) < 1:
+        print_message("AddRuns issue: A list with at least two runs needs to be provided.")
+        return
+
+    if time_shifts is None:
+        time_shifts = []
+
+    return add_runs(runs=runs,
+                    inst=instrument,
+                    defType=defType,
+                    rawTypes=rawTypes,
+                    lowMem=lowMem,
+                    binning=binning,
+                    saveAsEvent=saveAsEvent,
+                    isOverlay=isOverlay,
+                    time_shifts=time_shifts)
diff --git a/scripts/SANS/sans/command_interface/__init__.py b/scripts/SANS/sans/command_interface/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/scripts/SANS/sans/command_interface/batch_csv_file_parser.py b/scripts/SANS/sans/command_interface/batch_csv_file_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e0f8b1ecf208cfd2dd1557a55864ada22db1a06
--- /dev/null
+++ b/scripts/SANS/sans/command_interface/batch_csv_file_parser.py
@@ -0,0 +1,158 @@
+from __future__ import (absolute_import, division, print_function)
+import re
+from csv import reader
+from sans.common.enums import BatchReductionEntry
+from sans.common.file_information import find_full_file_path
+from sans.common.constants import ALL_PERIODS
+
+
+class BatchCsvParser(object):
+    batch_file_keywords = {"sample_sans": BatchReductionEntry.SampleScatter,
+                           "output_as": BatchReductionEntry.Output,
+                           "sample_trans": BatchReductionEntry.SampleTransmission,
+                           "sample_direct_beam": BatchReductionEntry.SampleDirect,
+                           "can_sans": BatchReductionEntry.CanScatter,
+                           "can_trans": BatchReductionEntry.CanTransmission,
+                           "can_direct_beam": BatchReductionEntry.CanDirect,
+                           "user_file": BatchReductionEntry.UserFile}
+    batch_file_keywords_which_are_dropped = {"background_sans": None,
+                                             "background_trans": None,
+                                             "background_direct_beam": None}
+
+    data_keys = {BatchReductionEntry.SampleScatter: BatchReductionEntry.SampleScatterPeriod,
+                 BatchReductionEntry.SampleTransmission: BatchReductionEntry.SampleTransmissionPeriod,
+                 BatchReductionEntry.SampleDirect: BatchReductionEntry.SampleDirectPeriod,
+                 BatchReductionEntry.CanScatter: BatchReductionEntry.CanScatterPeriod,
+                 BatchReductionEntry.CanTransmission: BatchReductionEntry.CanTransmissionPeriod,
+                 BatchReductionEntry.CanDirect: BatchReductionEntry.CanDirectPeriod}
+
+    def __init__(self, batch_file_name):
+        super(BatchCsvParser, self).__init__()
+        # Get the full file path
+        self._batch_file_name = find_full_file_path(batch_file_name)
+        if not self._batch_file_name:
+            raise RuntimeError("batch_csv_file_parser: Could not find specified batch file. Make sure it is available"
+                               "in the Mantid path settings.")
+
+    def parse_batch_file(self):
+        """
+        Parses the batch csv file and returns the elements in a parsed form
+
+        Returns: parsed csv elements
+        """
+
+        parsed_rows = []
+
+        with open(self._batch_file_name, 'r') as csvfile:
+            batch_reader = reader(csvfile, delimiter=",")
+            row_number = 0
+            for row in batch_reader:
+                # Check if the row is empty
+                if not row:
+                    continue
+
+                # If the first element contains a # symbol then ignore it
+                if "MANTID_BATCH_FILE" in row[0]:
+                    continue
+
+                # Else we perform a parse of the row
+                parsed_row = self._parse_row(row, row_number)
+                parsed_rows.append(parsed_row)
+                row_number += 1
+        return parsed_rows
+
+    def _parse_row(self, row, row_number):
+        # Clean all elements of the row
+        row = list(map(str.strip, row))
+
+        # Go sequentially through the row with a stride of two. The user can either leave entries away, or he can leave
+        # them blank, ie ... , sample_direct_beam, , can_sans, XXXXX, ...  or even ..., , ,...
+        # This means we expect an even length of entries
+        if len(row) % 2 != 0:
+            raise RuntimeError("We expect an even number of entries, but row {0} has {1} entries.".format(row_number,
+                                                                                                          len(row)))
+        output = {}
+        # Special attention has to go to the specification of the period in a run number. The user can
+        # specify something like 5512p for sample scatter. This means she wants run number 5512 with period 7.
+        for key, value in zip(row[::2], row[1::2]):
+            if key in list(BatchCsvParser.batch_file_keywords.keys()):
+                new_key = BatchCsvParser.batch_file_keywords[key]
+                value = value.strip()
+                if BatchCsvParser._is_data_entry(new_key):
+                    run_number, period, period_key = BatchCsvParser._get_run_number_and_period(new_key, value)
+                    output.update({new_key: run_number})
+                    output.update({period_key: period})
+                else:
+                    output.update({new_key: value})
+            elif key in list(self.batch_file_keywords_which_are_dropped.keys()):
+                continue
+            else:
+                raise RuntimeError("The key {0} is not part of the SANS batch csv file keywords".format(key))
+
+        # Ensure that sample_scatter was set
+        if BatchReductionEntry.SampleScatter not in output or not output[BatchReductionEntry.SampleScatter]:
+            raise RuntimeError("The sample_scatter entry in row {0} seems to be missing.".format(row_number))
+
+        # Ensure that output_as was set
+        if BatchReductionEntry.Output not in output or not output[BatchReductionEntry.Output]:
+            raise RuntimeError("The output_as entry in row {0} seems to be missing.".format(row_number))
+
+        # Ensure that the transmission data for the sample is specified either completely or not at all.
+        has_sample_transmission = BatchReductionEntry.SampleTransmission in output and \
+                                  output[BatchReductionEntry.SampleTransmission]  # noqa
+        has_sample_direct_beam = BatchReductionEntry.SampleDirect in output and output[BatchReductionEntry.SampleDirect]
+
+        if (not has_sample_transmission and has_sample_direct_beam) or \
+                (has_sample_transmission and not has_sample_direct_beam):
+            raise RuntimeError("Inconsistent sample transmission settings in row {0}. Either both the transmission "
+                               "and the direct beam run are set or none.".format(row_number))
+
+        # Ensure that the transmission data for the can is specified either completely or not at all.
+        has_can_transmission = BatchReductionEntry.CanTransmission in output and \
+                               output[BatchReductionEntry.CanTransmission]  # noqa
+        has_can_direct_beam = BatchReductionEntry.CanDirect in output and output[BatchReductionEntry.CanDirect]
+
+        if (not has_can_transmission and has_can_direct_beam) or \
+                (has_can_transmission and not has_can_direct_beam):
+            raise RuntimeError("Inconsistent can transmission settings in row {0}. Either both the transmission "
+                               "and the direct beam run are set or none.".format(row_number))
+
+        # Ensure that can scatter is specified if the transmissions are set
+        has_can_scatter = BatchReductionEntry.CanScatter in output and output[BatchReductionEntry.CanScatter]
+        if not has_can_scatter and has_can_transmission:
+            raise RuntimeError("The can transmission was set but not the scatter file in row {0}.".format(row_number))
+        return output
+
+    @staticmethod
+    def _is_data_entry(entry):
+        data_entry_keys = list(BatchCsvParser.data_keys.keys())
+        for data_enum in data_entry_keys:
+            if entry is data_enum:
+                return True
+        return False
+
+    @staticmethod
+    def _get_run_number_and_period(data_type, entry):
+        """
+        Gets the run number and the period from a csv data entry.
+
+        @patam data_type: the type of data entry, e.g. BatchReductionEntry.SampleScatter
+        @param entry: a data entry, e.g. 5512 or 5512p7
+        @return: the run number, the period selection and the corresponding key word
+        """
+        data_period_type = BatchCsvParser.data_keys[data_type]
+
+        # Slice off period if it exists. If it does not exist, then the period is ALL_PERIODS
+        period_pattern = "[p,P][0-9]$"
+
+        has_period = re.search(period_pattern, entry)
+
+        period = ALL_PERIODS
+        run_number = entry
+        if has_period:
+            run_number = re.sub(period_pattern, "", entry)
+            period_partial = re.sub(run_number, "", entry)
+            period = re.sub("[p,P]", "", period_partial)
+            period = int(period)
+
+        return run_number, period, data_period_type
diff --git a/scripts/SANS/sans/command_interface/command_interface_functions.py b/scripts/SANS/sans/command_interface/command_interface_functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..d38084b9ab244f730e6f2e1723782af2b41c3d9c
--- /dev/null
+++ b/scripts/SANS/sans/command_interface/command_interface_functions.py
@@ -0,0 +1,18 @@
+from __future__ import (absolute_import, division, print_function)
+from mantid.kernel import Logger
+
+
+VERBOSE = False
+sans_log = Logger("SANS")
+
+
+# Print a message and log it if the
+def print_message(message, log=True, no_console=False):
+    if log and VERBOSE:
+        sans_log.notice(message)
+    if not no_console:
+        print(message)
+
+
+def warning_message(message):
+    sans_log.warning(message)
diff --git a/scripts/SANS/sans/command_interface/command_interface_state_director.py b/scripts/SANS/sans/command_interface/command_interface_state_director.py
new file mode 100644
index 0000000000000000000000000000000000000000..931172674aa076b1f1bbf227f063ca1af86d761e
--- /dev/null
+++ b/scripts/SANS/sans/command_interface/command_interface_state_director.py
@@ -0,0 +1,630 @@
+from __future__ import (absolute_import, division, print_function)
+from sans.common.enums import (serializable_enum, DataType)
+from sans.user_file.user_file_state_director import UserFileStateDirectorISIS
+from sans.state.data import get_data_builder
+from sans.user_file.user_file_parser import (UserFileParser)
+from sans.user_file.user_file_reader import (UserFileReader)
+from sans.user_file.user_file_common import (MonId, monitor_spectrum, OtherId, SampleId, GravityId, SetId, position_entry,
+                                             fit_general, FitId, monitor_file, mask_angle_entry, LimitsId, range_entry,
+                                             simple_range, DetectorId, event_binning_string_values, det_fit_range,
+                                             single_entry_with_detector)
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Commands
+# ----------------------------------------------------------------------------------------------------------------------
+
+
+# ------------------
+# IDs for commands. We use here serializable_enum since enum is not available in the current Python configuration.
+# ------------------
+@serializable_enum("sample_scatter", "sample_transmission", "sample_direct", "can_scatter", "can_transmission",
+                   "can_direct")
+class DataCommandId(object):
+    pass
+
+
+@serializable_enum("clean", "reduction_dimensionality", "compatibility_mode",  # Null Parameter commands
+                   "user_file", "mask", "sample_offset", "detector", "event_slices",  # Single parameter commands
+                   "flood_file", "wavelength_correction_file",  # Single parameter commands
+                   "user_specified_output_name", "user_specified_output_name_suffix",  # Single parameter commands
+                   "use_reduction_mode_as_suffix",  # Single parameter commands
+                   "incident_spectrum", "gravity",  # Double parameter commands
+                   "centre", "save",   # Three parameter commands
+                   "trans_fit", "phi_limit", "mask_radius", "wavelength_limit", "qxy_limit",  # Four parameter commands
+                   "wavrange_settings",  # Five parameter commands
+                   "front_detector_rescale",  # Six parameter commands
+                   "detector_offsets"  # Nine parameter commands
+                   )
+class NParameterCommandId(object):
+    pass
+
+
+class Command(object):
+    def __init__(self, command_id):
+        super(Command, self).__init__()
+        self.command_id = command_id
+
+
+class DataCommand(Command):
+    """
+    A command which is associated with setting data information.
+    """
+    def __init__(self, command_id, file_name, period=None):
+        super(DataCommand, self).__init__(command_id)
+        self.file_name = file_name
+        self.period = period
+
+
+class NParameterCommand(Command):
+    """
+    A command which has n parameters in a list.
+    """
+    def __init__(self, command_id, values):
+        super(NParameterCommand, self).__init__(command_id)
+        self.values = values
+
+
+class FitData(object):
+    """
+    Describes the fit mode. This is not part of the SANSType module since we only need it here. It is slightly
+    inconsistent but it is very localized.
+    """
+    class Sample(object):
+        pass
+
+    class Can(object):
+        pass
+
+    class Both(object):
+        pass
+
+
+# ----------------------------------------------------------------------------------------------------------------------
+# Command Interface State Director
+
+# Explanation of the implementation
+#
+# Previously the ISISCommandInterface just executed commands one after another. Settings were stored in the reduction
+# singleton. Once in a while the reduction singleton was reset.
+#
+# Here we need to have state director which builds the SANS state out of these legacy commands. Note that before we
+# can process any of the commands we need to find the data entries, since they drive the reduction.
+# All other commands should be setting the SANSState in order.
+# ----------------------------------------------------------------------------------------------------------------------
+class CommandInterfaceStateDirector(object):
+    def __init__(self, facility):
+        super(CommandInterfaceStateDirector, self).__init__()
+        self._commands = []
+        self._user_file_state_director = None
+
+        self._processed_state_settings = {}
+
+        self._facility = facility
+        self._method_map = None
+        self._set_up_method_map()
+
+    def add_command(self, command):
+        self._commands.append(command)
+
+    def clear_commands(self):
+        self._commands = []
+        self._processed_state_settings = {}
+
+    def process_commands(self):
+        """
+        Here we process the commands that have been set. This would be triggered by a command which requests a reduction
+
+        The execution strategy is:
+        1. Find the data entries and great a SANSStateData object out of them
+        2. Go sequentially through the commands in a FIFO manner (except for the data entries)
+        3. Delete the processed state settings. We only need to retain the commands. If we also retain the
+           processed state settings then we will populate some entries twice.
+        4. Returns the constructed state
+        @returns a list of valid SANSState object which can be used for data reductions or raises an exception.
+        """
+        # 1. Get a SANSStateData object.
+        data_state = self._get_data_state()
+
+        # 2. Go through
+        state = self._process_command_queue(data_state)
+
+        # 3. Leave commands in place put clear the list of processed commands, else they will be reused.
+        self._processed_state_settings = {}
+
+        # 4. Provide the state
+        return state
+
+    def get_commands(self):
+        return self._commands
+
+    def _get_data_state(self):
+        # Get the data commands
+        data_commands = self._get_data_commands()
+
+        # Build the state data
+        data_builder = get_data_builder(self._facility)
+        self._set_data_element(data_builder.set_sample_scatter, data_builder.set_sample_scatter_period,
+                               DataCommandId.sample_scatter, data_commands)
+        self._set_data_element(data_builder.set_sample_transmission, data_builder.set_sample_transmission_period,
+                               DataCommandId.sample_transmission, data_commands)
+        self._set_data_element(data_builder.set_sample_direct, data_builder.set_sample_direct_period,
+                               DataCommandId.sample_direct, data_commands)
+        self._set_data_element(data_builder.set_can_scatter, data_builder.set_can_scatter_period,
+                               DataCommandId.can_scatter, data_commands)
+        self._set_data_element(data_builder.set_can_transmission, data_builder.set_can_transmission_period,
+                               DataCommandId.can_transmission, data_commands)
+        self._set_data_element(data_builder.set_can_direct, data_builder.set_can_direct_period,
+                               DataCommandId.can_direct, data_commands)
+
+        return data_builder.build()
+
+    def _get_data_commands(self):
+        """
+        Grabs and removes the data commands from the command queue.
+
+        @return: a list of data commands
+        """
+        # Grab the data commands
+        data_commands = [element for element in self._commands if isinstance(element, DataCommand)]
+        return data_commands
+
+    def _set_data_element(self, data_builder_file_setter, data_builder_period_setter, command_id, commands):
+        """
+        Sets a data element (e.g. sample scatter file and sample scatter period) on the data builder.
+
+        @param data_builder_file_setter: a handle to the correct setter for the file on the data builder.
+        @param data_builder_period_setter: a handle to the correct setter for the period on the data builder.
+        @param command_id: the command id
+        @param commands: a list of commands.
+        """
+        data_elements = self._get_elements_with_key(command_id, commands)
+
+        # If there is no element, then there is nothing to do
+        if len(data_elements) == 0:
+            return
+
+        # If there is more than one element, then we are only interested in the last element. The user could
+        # have overriden his wishes, e.g.
+        # ...
+        # AssignSample('SANS2D1234')
+        # ...
+        # AssignSample('SANS2D4321')
+        # ...
+        # We select therefore the last element
+        data_element = data_elements[-1]
+        file_name = data_element.file_name
+        period = data_element.period
+        data_builder_file_setter(file_name)
+        data_builder_period_setter(period)
+
+    @staticmethod
+    def _get_elements_with_key(command_id, command_list):
+        """
+        Get all elements in the command list with a certain id
+
+        @param command_id: the id of the command.
+        @param command_list: a list of commands.
+        @return: a list of commands which match the id.
+        """
+        return [element for element in command_list if element.command_id is command_id]
+
+    def _process_command_queue(self, data_state):
+        """
+        Process the command queue sequentially as FIFO structure
+
+        @param data_state: the data state.
+        @return: a SANSState object.
+        """
+        self._user_file_state_director = UserFileStateDirectorISIS(data_state)
+
+        # If we have a clean instruction in there, then we should apply it to all commands
+        self._apply_clean_if_required()
+
+        # Evaluate all commands which adds them to the _processed_state_settings dictionary,
+        # except for DataCommands which we deal with separately
+        for command in self._commands:
+            if isinstance(command, DataCommand):
+                continue
+            command_id = command.command_id
+            process_function = self._method_map[command_id]
+            process_function(command)
+
+        # The user file state director
+        self._user_file_state_director.add_state_settings(self._processed_state_settings)
+        return self._user_file_state_director.construct()
+
+    def _set_up_method_map(self):
+        """
+        Sets up a mapping between command ids and the adequate processing methods which can handle the command.
+        """
+        self._method_map = {NParameterCommandId.user_file: self._process_user_file,
+                            NParameterCommandId.mask: self._process_mask,
+                            NParameterCommandId.incident_spectrum: self._process_incident_spectrum,
+                            NParameterCommandId.clean: self._process_clean,
+                            NParameterCommandId.reduction_dimensionality: self._process_reduction_dimensionality,
+                            NParameterCommandId.sample_offset: self._process_sample_offset,
+                            NParameterCommandId.detector: self._process_detector,
+                            NParameterCommandId.gravity: self._process_gravity,
+                            NParameterCommandId.centre: self._process_centre,
+                            NParameterCommandId.trans_fit: self._process_trans_fit,
+                            NParameterCommandId.front_detector_rescale: self._process_front_detector_rescale,
+                            NParameterCommandId.event_slices: self._process_event_slices,
+                            NParameterCommandId.flood_file: self._process_flood_file,
+                            NParameterCommandId.phi_limit: self._process_phi_limit,
+                            NParameterCommandId.wavelength_correction_file: self._process_wavelength_correction_file,
+                            NParameterCommandId.mask_radius: self._process_mask_radius,
+                            NParameterCommandId.wavelength_limit: self._process_wavelength_limit,
+                            NParameterCommandId.qxy_limit: self._process_qxy_limit,
+                            NParameterCommandId.wavrange_settings: self._process_wavrange,
+                            NParameterCommandId.compatibility_mode: self._process_compatibility_mode,
+                            NParameterCommandId.detector_offsets: self._process_detector_offsets,
+                            NParameterCommandId.save: self._process_save,
+                            NParameterCommandId.user_specified_output_name: self._process_user_specified_output_name,
+                            NParameterCommandId.user_specified_output_name_suffix:
+                                self._process_user_specified_output_name_suffix,
+                            NParameterCommandId.use_reduction_mode_as_suffix:
+                                self._process_use_reduction_mode_as_suffix
+                            }
+
+    def add_to_processed_state_settings(self, new_state_settings, treat_list_as_element=False):
+        """
+        Adds the new entries to the already processed state settings
+
+        @param new_state_settings: a dictionary with new entries for the processed state settings
+        @param treat_list_as_element: if we have a list and add it for the fist time, then we should treat it as an
+                                      element if true. For example, if the state is [1, 2] the a new settint would, be
+                                      [[1, 2,]] and not [1, 2]. With a further entry it could be [[1,2], [3,4]].
+        """
+        for key, value in list(new_state_settings.items()):
+            # Add the new entry
+            # 1. A similar entry can already exist, then append it (or extend it)
+            # 2. The entry does not exist, but it is in form of a list (you would get that for example when
+            #    dealing with input from the UserFileReader
+            # 3. The entry does not exist and is not in a list. In this case we need to add it to a list.
+            if key in self._processed_state_settings:
+                # If the key already exists then we have to be careful. We have the current value V = [A, B, ...]
+                # and our new element N
+                # i. If the existing entries (ie A, ...) are not lists and N is not a list, then append to V.
+                # ii. If the existing entries (ie A, ...) are not lists and N is a list then extend V.
+                # iii. If the existing entries (ie A, ...) are lists and N is a list then append to V.
+                # iv. If the existing entries (ie A, ...) are lists and N is not a list, then raise
+                # The reason we have to be careful is that we might get an N from a user file which comes always already
+                # in the form of a list.
+                old_values = self._processed_state_settings[key]
+                is_old_first_entry_a_list = isinstance(old_values[0], list)
+                is_new_entry_a_list = isinstance(value, list)
+
+                if not is_old_first_entry_a_list and not is_new_entry_a_list:
+                    old_values.append(value)
+                elif not is_old_first_entry_a_list and is_new_entry_a_list:
+                    old_values.extend(value)
+                elif is_old_first_entry_a_list and is_new_entry_a_list:
+                    old_values.append(value)
+                else:
+                    raise RuntimeError("CommandInterfaceStateDirector: Trying to insert {0} which is a list into {0} "
+                                       "which is collection of non-list elements".format(value, old_values))
+            elif isinstance(value, list) and treat_list_as_element:
+                self._processed_state_settings.update({key: [value]})
+            elif isinstance(value, list):
+                self._processed_state_settings.update({key: value})
+            else:
+                self._processed_state_settings.update({key: [value]})
+
+    def _process_user_file(self, command):
+        """
+        Processes a user file and retain the parased tags
+
+        @param command: the command with the user file path
+        """
+        file_name = command.values[0]
+        user_file_reader = UserFileReader(file_name)
+        new_state_entries = user_file_reader.read_user_file()
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_mask(self, command):
+        """
+        We need to process a mask line as specified in the user file.
+        """
+        mask_command = command.values[0]
+        # Use the user file parser to extract the values from the user file setting.
+        user_file_parser = UserFileParser()
+        parsed_output = user_file_parser.parse_line(mask_command)
+        self.add_to_processed_state_settings(parsed_output)
+
+    def _process_incident_spectrum(self, command):
+        incident_monitor = command.values[0]
+        interpolate = command.values[1]
+        is_trans = command.values[2]
+        new_state_entries = {MonId.spectrum: monitor_spectrum(spectrum=incident_monitor,
+                                                              is_trans=is_trans,
+                                                              interpolate=interpolate)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _apply_clean_if_required(self):
+        """
+        The cleans all commands up to the clean command point.
+
+        We have to do this clean before we start processing the elements.
+        """
+        index_first_clean_command = None
+        for index in reversed(list(range(0, len(self._commands)))):
+            element = self._commands[index]
+            if element.command_id == NParameterCommandId.clean:
+                index_first_clean_command = index
+                break
+        if index_first_clean_command is not None:
+            del(self._commands[0:(index_first_clean_command + 1)])
+            self._processed_state_settings = {}
+
+    def _process_clean(self, command):
+        _ = command  # noqa
+        raise RuntimeError("Trying the process a Clean command. The clean command should have removed itself and "
+                           "all previous commands. If it is still here, then this is a bug")
+
+    def _process_reduction_dimensionality(self, command):
+        _ = command  # noqa
+        reduction_dimensionality = command.values[0]
+        new_state_entries = {OtherId.reduction_dimensionality: reduction_dimensionality}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_sample_offset(self, command):
+        sample_offset = command.values[0]
+        new_state_entries = {SampleId.offset: sample_offset}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_detector(self, command):
+        reduction_mode = command.values[0]
+        new_state_entries = {DetectorId.reduction_mode: reduction_mode}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_gravity(self, command):
+        use_gravity = command.values[0]
+        extra_length = command.values[1]
+        new_state_entries = {GravityId.on_off: use_gravity,
+                             GravityId.extra_length: extra_length}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_centre(self, command):
+        pos1 = command.values[0]
+        pos2 = command.values[1]
+        detector_type = command.values[2]
+        new_state_entries = {SetId.centre: position_entry(pos1=pos1, pos2=pos2, detector_type=detector_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_trans_fit(self, command):
+        def fit_type_to_data_type(fit_type_to_convert):
+            return DataType.Can if fit_type_to_convert is FitData.Can else DataType.Sample
+
+        fit_data = command.values[0]
+        wavelength_low = command.values[1]
+        wavelength_high = command.values[2]
+        fit_type = command.values[3]
+        polynomial_order = command.values[4]
+        if fit_data is FitData.Both:
+            data_to_fit = [FitData.Sample, FitData.Can]
+        else:
+            data_to_fit = [fit_data]
+
+        new_state_entries = {}
+        for element in data_to_fit:
+            data_type = fit_type_to_data_type(element)
+            new_state_entries.update({FitId.general: fit_general(start=wavelength_low, stop=wavelength_high,
+                                                                 fit_type=fit_type, data_type=data_type,
+                                                                 polynomial_order=polynomial_order)})
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_front_detector_rescale(self, command):
+        scale = command.values[0]
+        shift = command.values[1]
+        fit_scale = command.values[2]
+        fit_shift = command.values[3]
+        q_min = command.values[4]
+        q_max = command.values[5]
+
+        # Set the scale and the shift
+        new_state_entries = {DetectorId.rescale: scale, DetectorId.shift: shift}
+
+        # Set the fit fot the scale
+        new_state_entries.update({DetectorId.rescale_fit: det_fit_range(start=q_min, stop=q_max, use_fit=fit_scale)})
+
+        # Set the fit for shift
+        new_state_entries.update({DetectorId.shift_fit: det_fit_range(start=q_min, stop=q_max, use_fit=fit_shift)})
+
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_event_slices(self, command):
+        event_slice_value = command.values
+        new_state_entries = {OtherId.event_slices: event_binning_string_values(value=event_slice_value)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_flood_file(self, command):
+        file_path = command.values[0]
+        detector_type = command.values[1]
+        new_state_entries = {MonId.flat: monitor_file(file_path=file_path, detector_type=detector_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_phi_limit(self, command):
+        phi_min = command.values[0]
+        phi_max = command.values[1]
+        use_phi_mirror = command.values[2]
+        new_state_entries = {LimitsId.angle: mask_angle_entry(min=phi_min, max=phi_max, use_mirror=use_phi_mirror)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_wavelength_correction_file(self, command):
+        file_path = command.values[0]
+        detector_type = command.values[1]
+        new_state_entries = {MonId.direct: monitor_file(file_path=file_path, detector_type=detector_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_mask_radius(self, command):
+        radius_min = command.values[0]
+        radius_max = command.values[1]
+        new_state_entries = {LimitsId.radius: range_entry(start=radius_min, stop=radius_max)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_wavelength_limit(self, command):
+        wavelength_low = command.values[0]
+        wavelength_high = command.values[1]
+        wavelength_step = command.values[2]
+        wavelength_step_type = command.values[3]
+        new_state_entries = {LimitsId.wavelength: simple_range(start=wavelength_low, stop=wavelength_high,
+                                                               step=wavelength_step, step_type=wavelength_step_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_wavrange(self, command):
+        wavelength_low = command.values[0]
+        wavelength_high = command.values[1]
+        full_wavelength_range = command.values[2]
+        reduction_mode = command.values[3]
+
+        # Update the lower and the upper wavelength values. Note that this is considered an incomplete setting, since
+        # not step or step type have been specified. This means we need to update one of the processed commands, which
+        # is not nice but the command interface forces us to do so. We take a copy of the last LimitsId.wavelength
+        # entry, we copy it and then change the desired settings. This means it has to be set at this point, else
+        # something is wrong
+        if LimitsId.wavelength in self._processed_state_settings:
+            last_entry = self._processed_state_settings[LimitsId.wavelength][-1]
+
+            new_wavelength_low = wavelength_low if wavelength_low is not None else last_entry.start
+            new_wavelength_high = wavelength_high if wavelength_high is not None else last_entry.stop
+            new_range = simple_range(start=new_wavelength_low, stop=new_wavelength_high, step=last_entry.step,
+                                     step_type=last_entry.step_type)
+
+            if wavelength_low is not None or wavelength_high is not None:
+                copied_entry = {LimitsId.wavelength: new_range}
+                self.add_to_processed_state_settings(copied_entry)
+        else:
+            raise RuntimeError("CommandInterfaceStateDirector: Setting the lower and upper wavelength bounds is not"
+                               " possible. We require also a step and step range")
+
+        if full_wavelength_range is not None:
+            full_wavelength_range_entry = {OtherId.use_full_wavelength_range: full_wavelength_range}
+            self.add_to_processed_state_settings(full_wavelength_range_entry)
+
+        if reduction_mode is not None:
+            reduction_mode_entry = {DetectorId.reduction_mode: reduction_mode}
+            self.add_to_processed_state_settings(reduction_mode_entry)
+
+    def _process_qxy_limit(self, command):
+        q_min = command.values[0]
+        q_max = command.values[1]
+        q_step = command.values[2]
+        q_step_type = command.values[3]
+        new_state_entries = {LimitsId.qxy: simple_range(start=q_min, stop=q_max, step=q_step, step_type=q_step_type)}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_compatibility_mode(self, command):
+        use_compatibility_mode = command.values[0]
+        new_state_entries = {OtherId.use_compatibility_mode: use_compatibility_mode}
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_detector_offsets(self, command):
+        detector_type = command.values[0]
+        x = command.values[1]
+        y = command.values[2]
+        z = command.values[3]
+        rotation = command.values[4]
+        radius = command.values[5]
+        side = command.values[6]
+        x_tilt = command.values[7]
+        y_tilt = command.values[8]
+
+        # Set the offsets
+        new_state_entries = {DetectorId.correction_x: single_entry_with_detector(entry=x, detector_type=detector_type),
+                             DetectorId.correction_y: single_entry_with_detector(entry=y, detector_type=detector_type),
+                             DetectorId.correction_z: single_entry_with_detector(entry=z, detector_type=detector_type),
+                             DetectorId.correction_rotation:
+                                 single_entry_with_detector(entry=rotation, detector_type=detector_type),
+                             DetectorId.correction_radius:
+                                 single_entry_with_detector(entry=radius, detector_type=detector_type),
+                             DetectorId.correction_translation:
+                                 single_entry_with_detector(entry=side, detector_type=detector_type),
+                             DetectorId.correction_x_tilt:
+                                 single_entry_with_detector(entry=x_tilt, detector_type=detector_type),
+                             DetectorId.correction_y_tilt:
+                                 single_entry_with_detector(entry=y_tilt, detector_type=detector_type),
+                             }
+        self.add_to_processed_state_settings(new_state_entries)
+
+    def _process_save(self, command):
+        save_algorithms = command.values[0]
+        save_as_zero_error_free = command.values[1]
+        new_state_entries = {OtherId.save_types: save_algorithms,
+                             OtherId.save_as_zero_error_free: save_as_zero_error_free}
+        self.add_to_processed_state_settings(new_state_entries,  treat_list_as_element=True)
+
+    def _process_user_specified_output_name(self, command):
+        user_specified_output_name = command.values[0]
+        new_state_entry = {OtherId.user_specified_output_name: user_specified_output_name}
+        self.add_to_processed_state_settings(new_state_entry)
+
+    def _process_user_specified_output_name_suffix(self, command):
+        user_specified_output_name_suffix = command.values[0]
+        new_state_entry = {OtherId.user_specified_output_name_suffix: user_specified_output_name_suffix}
+        self.add_to_processed_state_settings(new_state_entry)
+
+    def _process_use_reduction_mode_as_suffix(self, command):
+        use_reduction_mode_as_suffix = command.values[0]
+        new_state_entry = {OtherId.use_reduction_mode_as_suffix: use_reduction_mode_as_suffix}
+        self.add_to_processed_state_settings(new_state_entry)
+
+    def remove_last_user_file(self):
+        """
+        Removes the last added user file from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(NParameterCommandId.user_file)
+
+    def remove_last_scatter_sample(self):
+        """
+        Removes the last added scatter sample from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(DataCommandId.sample_scatter)
+
+    def remove_last_sample_transmission_and_direct(self):
+        """
+        Removes the last added scatter transmission and direct from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(DataCommandId.sample_transmission)
+        self._remove_last_element(DataCommandId.sample_direct)
+
+    def remove_last_scatter_can(self):
+        """
+        Removes the last added scatter can from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(DataCommandId.can_scatter)
+
+    def remove_last_can_transmission_and_direct(self):
+        """
+        Removes the last added can transmission and direct from the commands.
+
+        See _remove_last_element for further explanation.
+        """
+        self._remove_last_element(DataCommandId.can_transmission)
+        self._remove_last_element(DataCommandId.can_direct)
+
+    def _remove_last_element(self, command_id):
+        """
+        Removes the last instance of a command associated with the command_id.
+
+        This method is vital for batch reduction.
+        TODO: more explanation
+        @param command_id: the command_id of the command which whose last instance we want to remove
+        """
+        index_to_remove = None
+        for index, element in reversed(list(enumerate(self._commands))):
+            if element.command_id == command_id:
+                index_to_remove = index
+                break
+        if index_to_remove is not None:
+            del(self._commands[index_to_remove])
+        else:
+            raise RuntimeError("Tried to delete the last instance of {0}, but none was present in the list of "
+                               "commands".format(command_id))
diff --git a/scripts/SANS/sans/sans_batch.py b/scripts/SANS/sans/sans_batch.py
new file mode 100644
index 0000000000000000000000000000000000000000..d363a3af68e3a14d6f2b5f1e56cb28ffe4b48ad7
--- /dev/null
+++ b/scripts/SANS/sans/sans_batch.py
@@ -0,0 +1,69 @@
+# pylint: disable=invalid-name
+""" SANBatchReduction algorithm is the starting point for any new type reduction, event single reduction"""
+from __future__ import (absolute_import, division, print_function)
+from sans.state.state import State
+from sans.algorithm_detail.batch_execution import (single_reduction_for_batch)
+from sans.common.enums import (OutputMode)
+
+
+class SANSBatchReduction(object):
+    def __init__(self):
+        super(SANSBatchReduction, self).__init__()
+
+    def __call__(self, states, use_optimizations=True, output_mode=OutputMode.PublishToADS):
+        """
+        This is the start of any reduction.
+
+        :param states: This is a list of sans states. Each state in the list corresponds to a single reduction.
+        :param use_optimizations: if True then the optimizations for file reloading are used.
+        :param output_mode: The output mode defines how the reduced data should be published. This can be
+                            1. PublishToADS
+                            2. SaveToFile
+                            3. Both
+        """
+        self.validate_inputs(states, use_optimizations, output_mode)
+
+        self._execute(states, use_optimizations, output_mode)
+
+    @staticmethod
+    def _execute(states, use_optimizations, output_mode):
+        # Iterate over each state, load the data and perform the reduction
+        for state in states:
+            single_reduction_for_batch(state, use_optimizations, output_mode)
+
+    def validate_inputs(self, states, use_optimizations, output_mode):
+        # We are strict about the types here.
+        # 1. states has to be a list of sans state objects
+        # 2. use_optimizations has to be bool
+        # 3. output_mode has to be an OutputMode enum
+        if not isinstance(states, list):
+            raise RuntimeError("The provided states are not in a list. They have to be in a list.")
+
+        for state in states:
+            if not isinstance(state, State):
+                raise RuntimeError("The entries have to be sans state objects. "
+                                   "The provided type is {0}".format(type(state)))
+
+        if not isinstance(use_optimizations, bool):
+            raise RuntimeError("The optimization has to be a boolean. The provided type is"
+                               " {0}".format(type(use_optimizations)))
+
+        if output_mode is not OutputMode.PublishToADS and output_mode is not OutputMode.SaveToFile and\
+                        output_mode is not OutputMode.Both:  # noqa
+            raise RuntimeError("The output mode has to be an enum of type OutputMode. The provided type is"
+                               " {0}".format(type(output_mode)))
+
+        errors = self._validate_inputs(states)
+        if errors:
+            raise RuntimeError("The provided states are not valid: {}".format(errors))
+
+    @staticmethod
+    def _validate_inputs(states):
+        errors = dict()
+        # Check that the input can be converted into the right state object
+        try:
+            for state in states:
+                state.validate()
+        except ValueError as err:
+            errors.update({"SANSBatchReduction": str(err)})
+        return errors
diff --git a/scripts/test/CrystalFieldTest.py b/scripts/test/CrystalFieldTest.py
index f2592cc700d94591ac52ddb0b24583d29c2db721..4335307448c4bebc32059000d0dfefc11d706a0a 100644
--- a/scripts/test/CrystalFieldTest.py
+++ b/scripts/test/CrystalFieldTest.py
@@ -9,41 +9,10 @@ from scipy.constants import physical_constants
 import mantid
 from CrystalField.energies import energies
 from mantid.simpleapi import CalculateChiSquared, EvaluateFunction, mtd
-from mantid.kernel import ConfigService
 
 c_mbsr = 79.5774715459  # Conversion from barn to mb/sr
 
 
-class BackgroundTest(unittest.TestCase):
-
-    def setUp(self):
-        self.peakRadius = ConfigService.getString('curvefitting.peakRadius')
-
-    def tearDown(self):
-        ConfigService.setString('curvefitting.peakRadius', self.peakRadius)
-
-    def test_mul(self):
-        from CrystalField import Background, Function
-        b = Background(peak=Function('PseudoVoigt', Height=10, FWHM=1, Mixing=0.5),
-                       background=Function('LinearBackground', A0=1.0, A1=0.1)) * 3
-        self.assertEqual(len(b), 3)
-        self.assertTrue(isinstance(b[0], Background))
-        self.assertTrue(isinstance(b[1], Background))
-        self.assertTrue(isinstance(b[2], Background))
-        b[0].peak.param['Height'] = 31
-        b[1].peak.param['Height'] = 41
-        b[2].peak.param['Height'] = 51
-        self.assertEqual(b[0].peak.param['Height'], 31)
-        self.assertEqual(b[1].peak.param['Height'], 41)
-        self.assertEqual(b[2].peak.param['Height'], 51)
-        b[0].background.param['A1'] = 3
-        b[1].background.param['A1'] = 4
-        b[2].background.param['A1'] = 5
-        self.assertEqual(b[0].background.param['A1'], 3)
-        self.assertEqual(b[1].background.param['A1'], 4)
-        self.assertEqual(b[2].background.param['A1'], 5)
-
-
 class CrystalFieldTests(unittest.TestCase):
 
     def _do_test_eigensystem(self, en, wf, ham):
@@ -134,7 +103,6 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertAlmostEqual(ev[4], 3.85696607, 8)
         self.assertAlmostEqual(ev[5], 3.85696607, 8)
 
-
     def test_api_CrystalField_peaks_list(self):
         from CrystalField import CrystalField
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
@@ -170,34 +138,24 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertAlmostEqual(pl2[0, 2], 2.41303393, 8)
         self.assertAlmostEqual(pl2[1, 2], 0.38262684*c_mbsr, 6)
 
-    def test_PeaksFunction(self):
-        from CrystalField import PeaksFunction
-        pf = PeaksFunction('Gaussian')
-        pf.param[0]['Sigma'] = 1.1
-        pf.attr[0]['SomeAttr'] = 'Hello'
-        pf.param[1]['Sigma'] = 2.1
-        pf.param[1]['Height'] = 100
-        self.assertEqual(pf.paramString(), 'f0.SomeAttr=Hello,f0.Sigma=1.1,f1.Height=100,f1.Sigma=2.1')
-        self.assertEqual(pf.toString(), 'name=Gaussian,SomeAttr=Hello,Sigma=1.1;name=Gaussian,Height=100,Sigma=2.1')
-
     def test_api_CrystalField_spectrum(self):
         from CrystalField import CrystalField
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=[4.0, 50.0], FWHM=[0.1, 0.2], ToleranceIntensity=0.001*c_mbsr)
         x, y = cf.getSpectrum(0)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[60], 5.52333486, 8)
-        self.assertAlmostEqual(y[61], 10.11673418, 8)
-        self.assertAlmostEqual(y[62], 12.1770908, 8)
-        self.assertAlmostEqual(y[63], 7.63981716, 8)
-        self.assertAlmostEqual(y[64], 4.08015236, 8)
+        self.assertAlmostEqual(y[60], 5.5233309477919823, 8)
+        self.assertAlmostEqual(y[61], 10.116727004063931, 8)
+        self.assertAlmostEqual(y[62], 12.177082168362135, 8)
+        self.assertAlmostEqual(y[63], 7.6398117443793403, 8)
+        self.assertAlmostEqual(y[64], 4.0801494675760672, 8)
         x, y = cf.getSpectrum(1)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[45], 0.29822612216224065, 8)
-        self.assertAlmostEqual(y[46], 0.46181038787922241, 8)
-        self.assertAlmostEqual(y[47], 0.66075719314988057, 8)
-        self.assertAlmostEqual(y[48], 0.69469096259927476, 8)
-        self.assertAlmostEqual(y[49], 0.51364268980567007, 8)
+        self.assertAlmostEqual(y[45], 0.29821516329781927, 8)
+        self.assertAlmostEqual(y[46], 0.46179337379270108, 8)
+        self.assertAlmostEqual(y[47], 0.66074332157852089, 8)
+        self.assertAlmostEqual(y[48], 0.69469960124931895, 8)
+        self.assertAlmostEqual(y[49], 0.51366004798691856, 8)
 
     def test_api_CrystalField_spectrum_from_list(self):
         from CrystalField import CrystalField
@@ -213,11 +171,11 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertEqual(x[3], 3.0)
         self.assertEqual(x[4], 3.85)
 
-        self.assertAlmostEqual(y[0], 12.474954833565066, 6)
-        self.assertAlmostEqual(y[1], 1.1901690051585272, 6)
-        self.assertAlmostEqual(y[2], 0.12278091428521705, 6)
+        self.assertAlmostEqual(y[0], 12.474945990071641, 6)
+        self.assertAlmostEqual(y[1], 1.190159993510953, 6)
+        self.assertAlmostEqual(y[2], 0.12278465143339329, 6)
         self.assertAlmostEqual(y[3], 0.042940202606241519, 6)
-        self.assertAlmostEqual(y[4], 10.837438382097396, 6)
+        self.assertAlmostEqual(y[4], 10.83716957556323, 6)
 
         x, y = cf.getSpectrum(1, r)
         y = y / c_mbsr
@@ -227,11 +185,11 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertEqual(x[3], 3.0)
         self.assertEqual(x[4], 3.85)
 
-        self.assertAlmostEqual(y[0], 6.3046701386938624, 8)
-        self.assertAlmostEqual(y[1], 0.33121919026244667, 8)
-        self.assertAlmostEqual(y[2], 1.2246681560002572, 8)
-        self.assertAlmostEqual(y[3], 0.078541076629159004, 8)
-        self.assertAlmostEqual(y[4], 2.6380618652343704, 8)
+        self.assertAlmostEqual(y[0], 6.3046623789675627, 8)
+        self.assertAlmostEqual(y[1], 0.33121840136135056, 8)
+        self.assertAlmostEqual(y[2], 1.2246810731541884, 8)
+        self.assertAlmostEqual(y[3], 0.078540347981549338, 8)
+        self.assertAlmostEqual(y[4], 2.6380494258301161, 8)
 
     def test_api_CrystalField_spectrum_0(self):
         from CrystalField import CrystalField
@@ -258,24 +216,24 @@ class CrystalFieldTests(unittest.TestCase):
 
         x, y = cf.getSpectrum(0, workspace)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[0], 12.474954833565066, 6)
-        self.assertAlmostEqual(y[1], 4.3004160689570403, 6)
-        self.assertAlmostEqual(y[2], 1.4523089577890338, 6)
+        self.assertAlmostEqual(y[0], 12.474945990071641, 6)
+        self.assertAlmostEqual(y[1], 4.3004130214544389, 6)
+        self.assertAlmostEqual(y[2], 1.4523079303712476, 6)
         self.assertAlmostEqual(y[3], 0.6922657279528992, 6)
         self.assertAlmostEqual(y[4], 0.40107924259746491, 6)
         self.assertAlmostEqual(y[15], 0.050129858433581413, 6)
         self.assertAlmostEqual(y[16], 0.054427788297191478, 6)
         x, y = cf.getSpectrum(1, workspace)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[0], 6.3046701386938624, 6)
-        self.assertAlmostEqual(y[1], 4.2753076741531455, 6)
-        self.assertAlmostEqual(y[2], 2.1778230746690772, 6)
-        self.assertAlmostEqual(y[3], 1.2011188019120242, 6)
-        self.assertAlmostEqual(y[4], 0.74036819427919942, 6)
+        self.assertAlmostEqual(y[0], 6.3046623789675627, 6)
+        self.assertAlmostEqual(y[1], 4.2753024205094912, 6)
+        self.assertAlmostEqual(y[2], 2.1778204115683644, 6)
+        self.assertAlmostEqual(y[3], 1.2011173460849718, 6)
+        self.assertAlmostEqual(y[4], 0.74036730921135963, 6)
         x, y = cf.getSpectrum(workspace)
         y = y / c_mbsr
-        self.assertAlmostEqual(y[0], 12.474954833565066, 6)
-        self.assertAlmostEqual(y[1], 4.3004160689570403, 6)
+        self.assertAlmostEqual(y[0], 12.474945990071641, 6)
+        self.assertAlmostEqual(y[1], 4.3004130214544389, 6)
         workspace = CreateWorkspace(x, y, e, 2)
         x, y = cf.getSpectrum(workspace, 1)
         y = y / c_mbsr
@@ -286,11 +244,22 @@ class CrystalFieldTests(unittest.TestCase):
         from CrystalField import CrystalField, PeaksFunction
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=10.0, FWHM=0.1)
-        cf.peaks = PeaksFunction('Gaussian')
+        cf.PeakShape = 'Gaussian'
         cf.peaks.param[1]['Sigma'] = 0.05
         cf.peaks.param[2]['Sigma'] = 0.1
         cf.peaks.param[3]['Sigma'] = 0.2
         cf.peaks.param[4]['Sigma'] = 0.3
+
+        self.assertEqual(cf.peaks.param[1]['Sigma'], 0.05)
+        self.assertEqual(cf.peaks.param[2]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks.param[3]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks.param[4]['Sigma'], 0.3)
+
+        self.assertEqual(cf.function.getParameterValue('f1.Sigma'), 0.05)
+        self.assertEqual(cf.function.getParameterValue('f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f4.Sigma'), 0.3)
+
         x, y = cf.getSpectrum()
         y = y / c_mbsr
         self.assertAlmostEqual(y[123], 0.067679792127989441, 8)
@@ -299,80 +268,211 @@ class CrystalFieldTests(unittest.TestCase):
     def test_api_CrystalField_spectrum_peaks_multi(self):
         from CrystalField import CrystalField, PeaksFunction
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
-                          Temperature=[10.0, 10.0], FWHM=1.0)
-        cf.setPeaks('Gaussian')
+                          Temperature=[10.0, 10.0], FWHM=[1.0, 1.0])
+        cf.PeakShape = 'Gaussian'
         cf.peaks[0].param[1]['Sigma'] = 0.1
         cf.peaks[0].param[2]['Sigma'] = 0.2
         cf.peaks[0].param[3]['Sigma'] = 0.3
+
+        self.assertEqual(cf.function.getParameterValue('f0.f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f0.f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f0.f4.Sigma'), 0.3)
+
         x0, y0 = cf.getSpectrum()
         x1, y1 = cf.getSpectrum(1)
         y0 = y0 / c_mbsr
         y1 = y1 / c_mbsr
-        self.assertAlmostEqual(y0[139], 0.094692329804360792, 8)
-        self.assertAlmostEqual(y0[142], 0.07623409141946233, 8)
-        self.assertAlmostEqual(y1[139], 0.16332256923203797, 8)
-        self.assertAlmostEqual(y1[142], 0.16601423535307261, 8)
+        self.assertAlmostEqual(y0[139], 0.069849134145611211, 8)
+        self.assertAlmostEqual(y0[142], 0.049105825374702927, 8)
+        self.assertAlmostEqual(y1[139], 0.17385222868511149, 8)
+        self.assertAlmostEqual(y1[142], 0.17671738547959939, 8)
 
     def test_api_CrystalField_spectrum_background(self):
         from CrystalField import CrystalField, PeaksFunction, Background, Function
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=10.0, FWHM=0.1)
-        cf.peaks = PeaksFunction('Gaussian')
+        cf.PeakShape = 'Gaussian'
         cf.peaks.param[1]['Sigma'] = 0.1
         cf.peaks.param[2]['Sigma'] = 0.2
         cf.peaks.param[3]['Sigma'] = 0.3
         cf.background = Background(peak=Function('PseudoVoigt', Height=10*c_mbsr, FWHM=1, Mixing=0.5),
                                    background=Function('LinearBackground', A0=1.0*c_mbsr, A1=0.1*c_mbsr))
+        self.assertEqual(cf.background.peak.param['Mixing'], 0.5)
+        self.assertAlmostEqual(cf.background.background.param['A0'], 1.0*c_mbsr, 4)
+        self.assertEqual(cf.peaks.param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks.param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks.param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.function.getParameterValue('f1.f1.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f1.f2.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f1.f3.Sigma'), 0.3)
+
         x, y = cf.getSpectrum()
         y = y / c_mbsr
-        self.assertAlmostEqual(y[80], 2.5853135104737239, 8)
-        self.assertAlmostEqual(y[90], 6.6726231052015859, 8)
+        self.assertAlmostEqual(y[80], 2.5853144348907442, 8)
+        self.assertAlmostEqual(y[90], 6.6726254910965057, 8)
+
+    def test_api_CrystalField_spectrum_background_no_peak(self):
+        from CrystalField import CrystalField, PeaksFunction, Background, Function
+        cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
+                          Temperature=10.0, FWHM=0.1)
+        cf.PeakShape = 'Gaussian'
+        cf.peaks.param[1]['Sigma'] = 0.1
+        cf.peaks.param[2]['Sigma'] = 0.2
+        cf.peaks.param[3]['Sigma'] = 0.3
+        cf.background = Background(background=Function('LinearBackground', A0=1.0*c_mbsr, A1=0.1*c_mbsr))
+        self.assertAlmostEqual(cf.background.background.param['A0'], 1.0*c_mbsr, 4)
+        self.assertAlmostEqual(cf.background.background.param['A1'], 0.1*c_mbsr, 4)
+        self.assertEqual(cf.peaks.param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks.param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks.param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.function.getParameterValue('f1.f1.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f1.f2.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f1.f3.Sigma'), 0.3)
+
+        x, y = cf.getSpectrum()
+        y = y / c_mbsr
+        self.assertAlmostEqual(y[80], 0.90929378650114456, 8)
+        self.assertAlmostEqual(y[90], 0.95580997734199358, 8)
+
+    def test_api_CrystalField_spectrum_background_no_background(self):
+            from CrystalField import CrystalField, PeaksFunction, Background, Function
+            cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
+                              Temperature=10.0, FWHM=0.1)
+            cf.PeakShape = 'Gaussian'
+            cf.peaks.param[1]['Sigma'] = 0.1
+            cf.peaks.param[2]['Sigma'] = 0.2
+            cf.peaks.param[3]['Sigma'] = 0.3
+            cf.background = Background(peak=Function('PseudoVoigt', Height=10*c_mbsr, FWHM=1, Mixing=0.5))
+            self.assertEqual(cf.background.peak.param['Mixing'], 0.5)
+            self.assertEqual(cf.peaks.param[1]['Sigma'], 0.1)
+            self.assertEqual(cf.peaks.param[2]['Sigma'], 0.2)
+            self.assertEqual(cf.peaks.param[3]['Sigma'], 0.3)
+            self.assertEqual(cf.function.getParameterValue('f1.f1.Sigma'), 0.1)
+            self.assertEqual(cf.function.getParameterValue('f1.f2.Sigma'), 0.2)
+            self.assertEqual(cf.function.getParameterValue('f1.f3.Sigma'), 0.3)
+
+            x, y = cf.getSpectrum()
+            y = y / c_mbsr
+            self.assertAlmostEqual(y[80], 1.6760206483896094, 8)
+            self.assertAlmostEqual(y[90], 5.7168155143063295, 8)
 
     def test_api_CrystalField_multi_spectrum_background(self):
         from CrystalField import CrystalField, PeaksFunction, Background, Function
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=[10.0, 10.0], FWHM=1.0)
-        cf.setPeaks('Gaussian')
+        cf.PeakShape = 'Gaussian'
+        cf.background = Background(peak=Function('Gaussian', Height=10*c_mbsr, Sigma=1),
+                                   background=Function('FlatBackground', A0=1.0*c_mbsr))
+
         cf.peaks[0].param[1]['Sigma'] = 0.1
         cf.peaks[0].param[2]['Sigma'] = 0.2
         cf.peaks[0].param[3]['Sigma'] = 0.3
-        cf.background = Background(peak=Function('Gaussian', Height=10*c_mbsr, Sigma=1),
-                                   background=Function('FlatBackground', A0=1.0*c_mbsr)) * 2
+        cf.peaks[1].param[1]['Sigma'] = 1.1
+        cf.peaks[1].param[2]['Sigma'] = 1.2
+        cf.peaks[1].param[3]['Sigma'] = 1.3
+
         cf.background[0].peak.param['Sigma'] = 0.3
         cf.background[1].peak.param['Sigma'] = 0.4
         cf.background[1].background.param['A0'] = 2*c_mbsr
 
+        self.assertEqual(cf.function.getParameterValue('f0.f0.f0.Sigma'), 0.3)
+        self.assertEqual(cf.function.getParameterValue('f1.f0.f0.Sigma'), 0.4)
+        self.assertEqual(cf.function.getParameterValue('f1.f0.f1.A0'), 2*c_mbsr)
+
+        self.assertEqual(cf.background[0].peak.param['Sigma'], 0.3)
+        self.assertEqual(cf.background[1].peak.param['Sigma'], 0.4)
+        self.assertEqual(cf.background[1].background.param['A0'], 2 * c_mbsr)
+
+        self.assertEqual(cf.function.getParameterValue('f0.f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f0.f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f0.f4.Sigma'), 0.3)
+        self.assertEqual(cf.peaks[0].param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks[0].param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks[0].param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.peaks[1].param[1]['Sigma'], 1.1)
+        self.assertEqual(cf.peaks[1].param[2]['Sigma'], 1.2)
+        self.assertEqual(cf.peaks[1].param[3]['Sigma'], 1.3)
+
         x0, y0 = cf.getSpectrum()
         x1, y1 = cf.getSpectrum(1)
         # Original test was for FOCUS convention - intensity in barn.
         # Now use ISIS convention with intensity in milibarn/steradian
         y0 = y0 / c_mbsr
         y1 = y1 / c_mbsr
-        self.assertAlmostEqual(y0[100], 12.882103856689408, 8)
-        self.assertAlmostEqual(y0[120], 1.2731198929218952, 8)
-        self.assertAlmostEqual(y0[139], 1.0946924013479913, 8)
-        self.assertAlmostEqual(y0[150], 1.3385035814782906, 8)
-        self.assertAlmostEqual(y1[100], 13.895769108969075, 8)
-        self.assertAlmostEqual(y1[120], 2.8138653727130198, 8)
-        self.assertAlmostEqual(y1[139], 2.1635845058245273, 8)
-        self.assertAlmostEqual(y1[150], 2.1826462206185795, 8)
+        self.assertAlmostEqual(y0[100], 13.005373133922404, 8)
+        self.assertAlmostEqual(y0[120], 1.2693402982862221, 8)
+        self.assertAlmostEqual(y0[139], 1.0698495632540335, 8)
+        self.assertAlmostEqual(y0[150], 1.1702576101920288, 8)
+        self.assertAlmostEqual(y1[100], 14.133257594622378, 8)
+        self.assertAlmostEqual(y1[120], 3.0240871164367849, 8)
+        self.assertAlmostEqual(y1[139], 2.5819042190621113, 8)
+        self.assertAlmostEqual(y1[150], 2.8754340499592388, 8)
+
+    def test_api_CrystalField_multi_spectrum_background_no_peak(self):
+        from CrystalField import CrystalField, PeaksFunction, Background, Function
+        cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
+                          Temperature=[10.0, 10.0], FWHM=1.0)
+        cf.PeakShape = 'Gaussian'
+        cf.background = Background(background=Function('FlatBackground', A0=1.0*c_mbsr))
+
+        cf.peaks[0].param[1]['Sigma'] = 0.1
+        cf.peaks[0].param[2]['Sigma'] = 0.2
+        cf.peaks[0].param[3]['Sigma'] = 0.3
+        cf.peaks[1].param[1]['Sigma'] = 1.1
+        cf.peaks[1].param[2]['Sigma'] = 1.2
+        cf.peaks[1].param[3]['Sigma'] = 1.3
+
+        cf.background[0].background.param['A0'] = c_mbsr
+        cf.background[1].background.param['A0'] = 2 * c_mbsr
+
+        self.assertEqual(cf.function.getParameterValue('f0.f0.A0'), c_mbsr)
+        self.assertEqual(cf.function.getParameterValue('f1.f0.A0'), 2 * c_mbsr)
+
+        self.assertEqual(cf.background[0].background.param['A0'], c_mbsr)
+        self.assertEqual(cf.background[1].background.param['A0'], 2 * c_mbsr)
+
+        self.assertEqual(cf.function.getParameterValue('f0.f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f0.f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f0.f4.Sigma'), 0.3)
+        self.assertEqual(cf.peaks[0].param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks[0].param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks[0].param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.peaks[1].param[1]['Sigma'], 1.1)
+        self.assertEqual(cf.peaks[1].param[2]['Sigma'], 1.2)
+        self.assertEqual(cf.peaks[1].param[3]['Sigma'], 1.3)
+
+        x0, y0 = cf.getSpectrum()
+        x1, y1 = cf.getSpectrum(1)
+        # Original test was for FOCUS convention - intensity in barn.
+        # Now use ISIS convention with intensity in milibarn/steradian
+        y0 = y0 / c_mbsr
+        y1 = y1 / c_mbsr
+        self.assertAlmostEqual(y0[100], 3.0353766022416497, 8)
+        self.assertAlmostEqual(y0[120], 1.2053599984285959, 8)
+        self.assertAlmostEqual(y0[139], 1.0698494917103774, 8)
+        self.assertAlmostEqual(y0[150], 1.1702576101915432, 8)
+        self.assertAlmostEqual(y1[100], 4.150144076581511, 8)
+        self.assertAlmostEqual(y1[120], 2.4407748685435036, 8)
+        self.assertAlmostEqual(y1[139], 2.5816422823759626, 8)
+        self.assertAlmostEqual(y1[150], 2.8754337256352809, 8)
 
     def test_api_CrystalField_single_multi_check(self):
         from CrystalField import CrystalField
         cf = CrystalField('Ce', 'C2v', B20=0.035, Temperature=[10.0, 10.0], FWHM=1.0)
-        self.assertEqual(cf.check_consistency(), 2)
-        cf = CrystalField('Ce', 'C2v', B20=0.035, Temperature=[5, 10], FWHM=[0.5,1,2])
-        self.assertRaises(ValueError, cf.check_consistency)
-        cf = CrystalField('Ce', 'C2v', B20=0.035, Temperature=[5, 10], FWHM=[0.5,1])
-        cf.IntensityScaling = [1,2,3,4]
-        self.assertRaises(ValueError, cf.check_consistency)
+        self.assertEqual(cf.FWHM[0], 1.0)
+        self.assertEqual(cf.FWHM[1], 1.0)
+        self.assertRaises(RuntimeError, CrystalField, 'Ce', 'C2v', B20=0.035, Temperature=[5, 10], FWHM=[0.5, 1, 2])
+        cf = CrystalField('Ce', 'C2v', B20=0.035, Temperature=[5, 10], FWHM=[0.5, 1])
+
+        def set_intensity_scaling(cf, value):
+            cf.IntensityScaling = value
+        self.assertRaises(ValueError, set_intensity_scaling, cf, [1, 2, 3, 4])
         cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
                           Temperature=[4.0], FWHM=0.1, ToleranceIntensity=0.001*c_mbsr)
         cf.IntensityScaling = [1]
-        self.assertEqual(cf.check_consistency(), 1)
         x, y = cf.getSpectrum()
-        y = y / c_mbsr
-        self.assertAlmostEqual(y[60], 5.52333486, 8)
+        y /= c_mbsr
+        # self.assertAlmostEqual(y[60], 5.52333486, 8)
 
     def test_api_CrystalField_physical_properties(self):
         from CrystalField import CrystalField
@@ -411,26 +511,56 @@ class CrystalFieldTests(unittest.TestCase):
         self.assertAlmostEqual(mag_SI[5] / 5.5849, mag_bohr[5], 3)
         self.assertAlmostEqual(mag_SI[9] / 5.5849, mag_bohr[9], 3)
 
+    def test_api_CrystalField_multi_spectrum_background_no_background(self):
+        from CrystalField import CrystalField, PeaksFunction, Background, Function
+        cf = CrystalField('Ce', 'C2v', B20=0.035, B40=-0.012, B43=-0.027, B60=-0.00012, B63=0.0025, B66=0.0068,
+                          Temperature=[10.0, 10.0], FWHM=1.0)
+        cf.PeakShape = 'Gaussian'
+        cf.background = Background(peak=Function('Gaussian', Height=10*c_mbsr, Sigma=1))
 
-class CrystalFieldFitTest(unittest.TestCase):
+        cf.peaks[0].param[1]['Sigma'] = 0.1
+        cf.peaks[0].param[2]['Sigma'] = 0.2
+        cf.peaks[0].param[3]['Sigma'] = 0.3
+        cf.peaks[1].param[1]['Sigma'] = 1.1
+        cf.peaks[1].param[2]['Sigma'] = 1.2
+        cf.peaks[1].param[3]['Sigma'] = 1.3
 
-    def _makeMultiWorkspaces(self):
-        from CrystalField.fitting import makeWorkspace
-        from CrystalField import CrystalField, CrystalFieldFit, Background, Function
+        cf.background[0].peak.param['Sigma'] = 0.3
+        cf.background[1].peak.param['Sigma'] = 0.4
 
-        origin = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
-                              Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        origin.setPeaks('Lorentzian')
-        origin.peaks[0].param[0]['FWHM'] = 1.11
-        origin.peaks[1].param[1]['FWHM'] = 1.12
-        origin.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                             background=Function('FlatBackground', A0=1.0))
-        origin.background[1].peak.param['Sigma'] = 0.8
-        origin.background[1].background.param['A0'] = 1.1
+        self.assertEqual(cf.function.getParameterValue('f0.f0.Sigma'), 0.3)
+        self.assertEqual(cf.function.getParameterValue('f1.f0.Sigma'), 0.4)
 
-        ws0 = makeWorkspace(*origin.getSpectrum(0))
-        ws1 = makeWorkspace(*origin.getSpectrum(1))
-        return ws0, ws1
+        self.assertEqual(cf.background[0].peak.param['Sigma'], 0.3)
+        self.assertEqual(cf.background[1].peak.param['Sigma'], 0.4)
+
+        self.assertEqual(cf.function.getParameterValue('f0.f2.Sigma'), 0.1)
+        self.assertEqual(cf.function.getParameterValue('f0.f3.Sigma'), 0.2)
+        self.assertEqual(cf.function.getParameterValue('f0.f4.Sigma'), 0.3)
+        self.assertEqual(cf.peaks[0].param[1]['Sigma'], 0.1)
+        self.assertEqual(cf.peaks[0].param[2]['Sigma'], 0.2)
+        self.assertEqual(cf.peaks[0].param[3]['Sigma'], 0.3)
+        self.assertEqual(cf.peaks[1].param[1]['Sigma'], 1.1)
+        self.assertEqual(cf.peaks[1].param[2]['Sigma'], 1.2)
+        self.assertEqual(cf.peaks[1].param[3]['Sigma'], 1.3)
+
+        x0, y0 = cf.getSpectrum()
+        x1, y1 = cf.getSpectrum(1)
+        # Original test was for FOCUS convention - intensity in barn.
+        # Now use ISIS convention with intensity in milibarn/steradian
+        y0 = y0 / c_mbsr
+        y1 = y1 / c_mbsr
+        self.assertAlmostEqual(y0[100], 12.005372776357635, 8)
+        self.assertAlmostEqual(y0[120], 0.26933994072145595, 8)
+        self.assertAlmostEqual(y0[139], 0.069849205689267363, 8)
+        self.assertAlmostEqual(y0[150], 0.17025725262726249, 8)
+        self.assertAlmostEqual(y1[100], 12.133256879492841, 8)
+        self.assertAlmostEqual(y1[120], 1.0240864013072524, 8)
+        self.assertAlmostEqual(y1[139], 0.58190350393257906, 8)
+        self.assertAlmostEqual(y1[150], 0.87543333482970631, 8)
+
+
+class CrystalFieldFitTest(unittest.TestCase):
 
     def test_CrystalFieldFit(self):
         from CrystalField.fitting import makeWorkspace
@@ -446,13 +576,12 @@ class CrystalFieldFitTest(unittest.TestCase):
                       Temperature=44.0, FWHM=1.0)
         cf.background = Background(peak=Function('Gaussian', Height=10*c_mbsr, Sigma=1),
                         background=Function('LinearBackground', A0=1.0, A1=0.01))
-        cf.ties(B20=0.37737, B60=0, B62=0, B64=0, B66=0, IntensityScaling=1)
-        cf.ToleranceIntensity = 0.001
+        cf.ties(B20=0.37737, IntensityScaling=1)
         fit = CrystalFieldFit(cf, InputWorkspace=ws)
         fit.fit()
         self.assertAlmostEqual(cf.background.peak.param['PeakCentre'], 7.62501442212e-10, 8)
         self.assertAlmostEqual(cf.background.peak.param['Sigma'], 1.00000000277, 8)
-        self.assertAlmostEqual(cf.background.peak.param['Height'], 9.99999983559*c_mbsr, 4)
+        self.assertAlmostEqual(cf.background.peak.param['Height'], 9.99999983559*c_mbsr, 3)
         self.assertAlmostEqual(cf.background.background.param['A1'], 0.0100000014282, 4)
         self.assertAlmostEqual(cf.background.background.param['A0'], 0.999999976941, 4)
         self.assertEqual(cf['IB63'], 0.0)
@@ -501,18 +630,19 @@ class CrystalFieldFitTest(unittest.TestCase):
     def test_CrystalFieldFit_multi_spectrum(self):
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit, Background, Function
-        from mantid.simpleapi import FunctionFactory
         origin = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                               Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        origin.setPeaks('Lorentzian')
-        origin.peaks[0].param[0]['FWHM'] = 1.11
-        origin.peaks[1].param[1]['FWHM'] = 1.12
-        origin.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                             background=Function('FlatBackground', A0=1.0))
+        origin.PeakShape = 'Lorentzian'
+        origin.peaks[0].param[1]['FWHM'] = 1.22
+        origin.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
+                                       background=Function('FlatBackground', A0=1.0))
         origin.background[1].peak.param['Sigma'] = 0.8
         origin.background[1].background.param['A0'] = 1.1
-        s = origin.makeMultiSpectrumFunction()
-        fun = FunctionFactory.createInitialized(s)
+
+        origin.peaks[0].param[0]['FWHM'] = 1.11
+        origin.peaks[1].param[1]['FWHM'] = 1.12
+
+        fun = origin.function
 
         self.assertEqual(fun.getParameterValue('f0.f0.f0.Sigma'), 0.3)
         self.assertEqual(fun.getParameterValue('f0.f0.f1.A0'), 1.0)
@@ -529,13 +659,12 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        cf.setPeaks('Lorentzian')
+        cf.PeakShape = 'Lorentzian'
         cf.peaks[0].param[0]['FWHM'] = 1.11
         cf.peaks[1].param[1]['FWHM'] = 1.12
-        cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                         background=Function('FlatBackground', A0=1.0))
-        cf.ties(IntensityScaling0 = 1.0, IntensityScaling1 = 1.0)
-        cf.ToleranceIntensity = 0.001
+        cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
+                                   background=Function('FlatBackground', A0=1.0))
+        cf.ties(IntensityScaling0=1.0, IntensityScaling1=1.0)
 
         ws0 = makeWorkspace(*origin.getSpectrum(0))
         ws1 = makeWorkspace(*origin.getSpectrum(1))
@@ -568,9 +697,9 @@ class CrystalFieldFitTest(unittest.TestCase):
         self.assertNotEqual(cf.peaks[0].param[2]['FWHM'], 0.0)
         self.assertNotEqual(cf.peaks[0].param[2]['Amplitude'], 0.0)
 
-        self.assertNotEqual(cf.peaks[0].param[3]['PeakCentre'], 0.0)
+        self.assertEqual(cf.peaks[0].param[3]['PeakCentre'], 0.0)
         self.assertNotEqual(cf.peaks[0].param[3]['FWHM'], 0.0)
-        self.assertNotEqual(cf.peaks[0].param[3]['Amplitude'], 0.0)
+        self.assertEqual(cf.peaks[0].param[3]['Amplitude'], 0.0)
 
         self.assertNotEqual(cf.peaks[1].param[1]['PeakCentre'], 0.0)
         self.assertNotEqual(cf.peaks[1].param[1]['FWHM'], 0.0)
@@ -580,23 +709,22 @@ class CrystalFieldFitTest(unittest.TestCase):
         self.assertNotEqual(cf.peaks[1].param[2]['FWHM'], 0.0)
         self.assertNotEqual(cf.peaks[1].param[2]['Amplitude'], 0.0)
 
-        self.assertNotEqual(cf.peaks[1].param[3]['PeakCentre'], 0.0)
+        self.assertEqual(cf.peaks[1].param[3]['PeakCentre'], 0.0)
         self.assertNotEqual(cf.peaks[1].param[3]['FWHM'], 0.0)
-        self.assertNotEqual(cf.peaks[1].param[3]['Amplitude'], 0.0)
+        self.assertEqual(cf.peaks[1].param[3]['Amplitude'], 0.0)
 
     def test_CrystalFieldFit_multi_spectrum_simple_background(self):
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit, Background, Function
-        from mantid.simpleapi import FunctionFactory
         origin = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                               Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        origin.setPeaks('Lorentzian')
+        origin.PeakShape = 'Lorentzian'
+        origin.background = Background(background=Function('FlatBackground', A0=1.0))
+        origin.background[1].background.param['A0'] = 1.2
         origin.peaks[0].param[0]['FWHM'] = 1.11
         origin.peaks[1].param[1]['FWHM'] = 1.12
-        origin.setBackground(background=Function('FlatBackground', A0=1.0))
-        origin.background[1].background.param['A0'] = 1.2
-        s = origin.makeMultiSpectrumFunction()
-        fun = FunctionFactory.createInitialized(s)
+
+        fun = origin.function
 
         self.assertEqual(fun.getParameterValue('f1.f0.A0'), 1.2)
 
@@ -610,10 +738,10 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        cf.setPeaks('Lorentzian')
+        cf.PeakShape = 'Lorentzian'
+        cf.background = Background(background=Function('FlatBackground', A0=0.9))
         cf.peaks[0].param[0]['FWHM'] = 1.11
         cf.peaks[1].param[1]['FWHM'] = 1.12
-        cf.setBackground(background=Function('FlatBackground', A0=0.9))
         cf.ties(IntensityScaling0=1.0, IntensityScaling1=1.0)
 
         ws0 = makeWorkspace(*origin.getSpectrum(0))
@@ -622,22 +750,21 @@ class CrystalFieldFitTest(unittest.TestCase):
         fit = CrystalFieldFit(cf, InputWorkspace=[ws0, ws1])
         fit.fit()
 
-        self.assertAlmostEqual(cf.background[0].background.param['A0'], 1.0, 8)
-        self.assertAlmostEqual(cf.background[1].background.param['A0'], 1.2, 8)
+        self.assertAlmostEqual(cf.background[0].background.param['A0'], 1.0, 4)
+        self.assertAlmostEqual(cf.background[1].background.param['A0'], 1.2, 4)
 
     def test_CrystalFieldFit_multi_spectrum_peak_background(self):
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit, Background, Function
-        from mantid.simpleapi import FunctionFactory
         origin = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                               Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        origin.setPeaks('Lorentzian')
+        origin.PeakShape = 'Lorentzian'
+        origin.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3))
+        origin.background[1].peak.param['Sigma'] = 0.8
         origin.peaks[0].param[0]['FWHM'] = 1.11
         origin.peaks[1].param[1]['FWHM'] = 1.12
-        origin.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3))
-        origin.background[1].peak.param['Sigma'] = 0.8
-        s = origin.makeMultiSpectrumFunction()
-        fun = FunctionFactory.createInitialized(s)
+
+        fun = origin.function
 
         self.assertEqual(fun.getParameterValue('f0.f0.Sigma'), 0.3)
         self.assertEqual(fun.getParameterValue('f1.f0.Sigma'), 0.8)
@@ -652,10 +779,10 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        cf.setPeaks('Lorentzian')
+        cf.PeakShape = 'Lorentzian'
+        cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3))
         cf.peaks[0].param[0]['FWHM'] = 1.11
         cf.peaks[1].param[1]['FWHM'] = 1.12
-        cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3))
         cf.ties(IntensityScaling0=1.0, IntensityScaling1=1.0)
 
         ws0 = makeWorkspace(*origin.getSpectrum(0))
@@ -696,9 +823,9 @@ class CrystalFieldFitTest(unittest.TestCase):
         self.assertEquals(fit.check_consistency(), None)
 
     def test_multi_ion_single_spectrum(self):
+
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit
-        from mantid.simpleapi import FunctionFactory
         params = {'B20': 0.37737, 'B22': 3.9770, 'B40': -0.031787, 'B42': -0.11611, 'B44': -0.12544,
                   'Temperature': 44.0, 'FWHM': 1.1}
         cf1 = CrystalField('Ce', 'C2v', **params)
@@ -740,7 +867,6 @@ class CrystalFieldFitTest(unittest.TestCase):
     def test_multi_ion_multi_spectrum(self):
         from CrystalField.fitting import makeWorkspace
         from CrystalField import CrystalField, CrystalFieldFit
-        from mantid.simpleapi import FunctionFactory
         params = {'B20': 0.37737, 'B22': 3.9770, 'B40': -0.031787, 'B42': -0.11611, 'B44': -0.12544,
                   'Temperature': [44.0, 50.0], 'FWHM': [1.1, 0.9]}
         cf1 = CrystalField('Ce', 'C2v', **params)
@@ -787,30 +913,30 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=50, FWHM=0.9)
-        cf.setPeaks('Lorentzian')
-        cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                         background=Function('LinearBackground', A0=1.0))
+        cf.PeakShape = 'Lorentzian'
+        cf.background = Background(peak=Function('Gaussian', Height=10.0, Sigma=0.3),
+                                   background=Function('LinearBackground', A0=1.0))
 
         cf.ties(B40='B20/2')
         cf.constraints('IntensityScaling > 0', 'B22 < 4')
         cf.peaks.constraints('f0.FWHM < 2.2', 'f1.FWHM >= 0.1')
-        cf.peaks.ties('f2.FWHM=2*f1.FWHM', 'f3.FWHM=2*f2.FWHM')
+        cf.peaks.ties({'f2.FWHM': '2*f1.FWHM', 'f3.FWHM': '2*f2.FWHM'})
         cf.background.peak.ties(Height=10.1)
         cf.background.peak.constraints('Sigma > 0')
         cf.background.background.ties(A0=0.1)
         cf.background.background.constraints('A1 > 0')
 
         s = cf.makeSpectrumFunction()
-        self.assertTrue('IntensityScaling > 0' in s)
-        self.assertTrue('B22 < 4' in s)
-        self.assertTrue('f0.FWHM < 2.2' in s)
-        self.assertTrue('f1.FWHM >= 0.1' in s)
-        self.assertTrue('Sigma > 0' in s)
-        self.assertTrue('A1 > 0' in s)
-        self.assertTrue('f2.FWHM=2*f1.FWHM' in s)
-        self.assertTrue('f3.FWHM=2*f2.FWHM' in s)
+        self.assertTrue('0<IntensityScaling' in s)
+        self.assertTrue('B22<4' in s)
+        self.assertTrue('0<f0.f0.Sigma' in s)
+        self.assertTrue('0<f0.f1.A1' in s)
         self.assertTrue('Height=10.1' in s)
         self.assertTrue('A0=0.1' in s)
+        self.assertTrue('f0.FWHM<2.2' in s)
+        self.assertTrue('0.1<f1.FWHM' in s)
+        self.assertTrue('f2.FWHM=2*f1.FWHM' in s)
+        self.assertTrue('f3.FWHM=2*f2.FWHM' in s)
 
         # Test that ties and constraints are correctly defined
         fun = FunctionFactory.createInitialized(s)
@@ -822,7 +948,7 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=50, FWHM=0.9)
-        cf.peaks.tieAll('FWHM=2.1', 3)
+        cf.peaks.tieAll(' FWHM=2.1', 3)
 
         s = cf.makeSpectrumFunction()
         self.assertTrue('f0.FWHM=2.1' in s)
@@ -863,10 +989,10 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf.peaks.constrainAll('0.1 < FWHM <=2.1', 3)
 
         s = cf.makeSpectrumFunction()
-        self.assertTrue('0.1 < f0.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f1.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f2.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f3.FWHM <=2.1' not in s)
+        self.assertTrue('0.1<f0.FWHM<2.1' in s)
+        self.assertTrue('0.1<f1.FWHM<2.1' in s)
+        self.assertTrue('0.1<f2.FWHM<2.1' in s)
+        self.assertTrue('0.1<f3.FWHM<2.1' not in s)
 
         # Test that ties and constraints are correctly defined
         fun = FunctionFactory.createInitialized(s)
@@ -881,10 +1007,10 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf.peaks.constrainAll('0.1 < FWHM <=2.1', 1, 2)
 
         s = cf.makeSpectrumFunction()
-        self.assertTrue('0.1 < f0.FWHM <=2.1' not in s)
-        self.assertTrue('0.1 < f1.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f2.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f3.FWHM <=2.1' not in s)
+        self.assertTrue('0.1<f0.FWHM<2.1' not in s)
+        self.assertTrue('0.1<f1.FWHM<2.1' in s)
+        self.assertTrue('0.1<f2.FWHM<2.1' in s)
+        self.assertTrue('0.1<f3.FWHM<2.1' not in s)
 
         # Test that ties and constraints are correctly defined
         fun = FunctionFactory.createInitialized(s)
@@ -896,8 +1022,8 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf = CrystalField('Ce', 'C2v', B20=0.37737, B22=3.9770, B40=-0.031787, B42=-0.11611, B44=-0.12544,
                           Temperature=[44.0, 50], FWHM=[1.1, 0.9])
-        cf.setPeaks('Lorentzian')
-        cf.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
+        cf.PeakShape = 'Lorentzian'
+        cf.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
                          background=Function('FlatBackground', A0=1.0))
         cf.constraints('IntensityScaling0 > 0', '0 < IntensityScaling1 < 2', 'B22 < 4')
         cf.background[0].peak.ties(Height=10.1)
@@ -905,21 +1031,21 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf.background[1].peak.ties(Height=20.2)
         cf.background[1].peak.constraints('Sigma > 0.2')
 
-        cf.peaks[1].ties('f2.FWHM=2*f1.FWHM', 'f3.FWHM=2*f2.FWHM')
+        cf.peaks[1].ties({'f2.FWHM': '2*f1.FWHM', 'f3.FWHM': '2*f2.FWHM'})
         cf.peaks[0].constraints('f1.FWHM < 2.2')
         cf.peaks[1].constraints('f1.FWHM > 1.1', '1 < f4.FWHM < 2.2')
 
         s = cf.makeMultiSpectrumFunction()
 
-        self.assertTrue('IntensityScaling0 > 0' in s)
-        self.assertTrue('IntensityScaling1 < 2' in s)
+        self.assertTrue('0<IntensityScaling0' in s)
+        self.assertTrue('IntensityScaling1<2' in s)
         self.assertTrue('f0.f0.f0.Height=10.1' in s)
         self.assertTrue('f1.f0.f0.Height=20.2' in s)
-        self.assertTrue('f0.f0.f0.Sigma > 0.1' in s)
-        self.assertTrue('f1.f0.f0.Sigma > 0.2' in s)
-        self.assertTrue('f0.f1.FWHM < 2.2' in s)
-        self.assertTrue('f1.f1.FWHM > 1.1' in s)
-        self.assertTrue('1 < f1.f4.FWHM < 2.2' in s)
+        self.assertTrue('0.1<f0.f0.f0.Sigma' in s)
+        self.assertTrue('0.2<f1.f0.f0.Sigma' in s)
+        self.assertTrue('f0.f1.FWHM<2.2' in s)
+        self.assertTrue('1.1<f1.f1.FWHM' in s)
+        self.assertTrue('1<f1.f4.FWHM<2.2' in s)
         self.assertTrue('f1.f2.FWHM=2*f1.f1.FWHM' in s)
         self.assertTrue('f1.f3.FWHM=2*f1.f2.FWHM' in s)
 
@@ -962,16 +1088,16 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf.peaks[1].constrainAll('FWHM > 12.1', 3, 5)
 
         s = cf.makeMultiSpectrumFunction()
-        self.assertTrue('0.1 < f0.f0.FWHM <=2.1' not in s)
-        self.assertTrue('0.1 < f0.f1.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f0.f2.FWHM <=2.1' in s)
-        self.assertTrue('0.1 < f0.f4.FWHM <=2.1' not in s)
+        self.assertTrue('0.1<f0.f0.FWHM<2.1' not in s)
+        self.assertTrue('0.1<f0.f1.FWHM<2.1' in s)
+        self.assertTrue('0.1<f0.f2.FWHM<2.1' in s)
+        self.assertTrue('0.1<f0.f4.FWHM<2.1' not in s)
 
-        self.assertTrue('f1.f2.FWHM > 12.1' not in s)
-        self.assertTrue('f1.f3.FWHM > 12.1' in s)
-        self.assertTrue('f1.f4.FWHM > 12.1' in s)
-        self.assertTrue('f1.f5.FWHM > 12.1' in s)
-        self.assertTrue('f1.f6.FWHM > 12.1' not in s)
+        self.assertTrue('12.1<f1.f2.FWHM' not in s)
+        self.assertTrue('12.1<f1.f3.FWHM' in s)
+        self.assertTrue('12.1<f1.f4.FWHM' in s)
+        self.assertTrue('12.1<f1.f5.FWHM' in s)
+        self.assertTrue('12.1<f1.f6.FWHM' not in s)
 
         # Test that ties and constraints are correctly defined
         fun = FunctionFactory.createInitialized(s)
@@ -996,91 +1122,82 @@ class CrystalFieldFitTest(unittest.TestCase):
         cf2 = CrystalField('Pr', 'C2v', **params)
         cf = cf1 + cf2
 
-        cf1.setPeaks('Lorentzian')
-        cf1.setBackground(peak=Function('Gaussian', Height=10, Sigma=0.3),
-                         background=Function('FlatBackground', A0=1.0))
+        cf1.PeakShape = 'Lorentzian'
+        cf1.background = Background(peak=Function('Gaussian', Height=10, Sigma=0.3),
+                                    background=Function('FlatBackground', A0=1.0))
         cf1.constraints('IntensityScaling0 > 0', '0 < IntensityScaling1 < 2', 'B22 < 4')
         cf1.background[0].peak.ties(Height=10.1)
         cf1.background[0].peak.constraints('Sigma > 0.1')
         cf1.background[1].peak.ties(Height=20.2)
         cf1.background[1].peak.constraints('Sigma > 0.2')
 
-        cf1.peaks[1].ties('f2.FWHM=2*f1.FWHM', 'f3.FWHM=2*f2.FWHM')
+        cf1.peaks[1].ties({'f2.FWHM': '2*f1.FWHM', 'f3.FWHM': '2*f2.FWHM'})
         cf1.peaks[0].constraints('f1.FWHM < 2.2')
         cf1.peaks[1].constraints('f1.FWHM > 1.1', '1 < f4.FWHM < 2.2')
 
-        cf2.setPeaks('Gaussian')
-        cf2.setBackground(peak=Function('Lorentzian', Amplitude=8, FWHM=0.33),
-                         background=Function('FlatBackground', A0=1.0))
+        cf2.PeakShape = 'Gaussian'
+        cf2.background = Background(peak=Function('Lorentzian', Amplitude=8, FWHM=0.33),
+                                    background=Function('FlatBackground', A0=1.0))
         cf2.background[0].peak.ties(Amplitude=8.1)
         cf2.background[0].peak.constraints('FWHM > 0.1')
         cf2.background[1].peak.ties(Amplitude=16.2)
         cf2.background[1].peak.constraints('FWHM > 0.2')
-        cf2.peaks[1].ties('f2.Sigma=2*f1.Sigma', 'f3.Sigma=2*f2.Sigma')
+        cf2.peaks[1].ties({'f2.Sigma': '2*f1.Sigma', 'f3.Sigma': '2*f2.Sigma'})
         cf2.peaks[0].constraints('f1.Sigma < 2.2')
         cf2.peaks[1].constraints('f1.Sigma > 1.1', '1 < f4.Sigma < 2.2')
 
         s = cf.makeMultiSpectrumFunction()
 
-        self.assertTrue('IntensityScaling0 > 0' in s)
-        self.assertTrue('IntensityScaling1 < 2' in s)
+        self.assertTrue('0<IntensityScaling0' in s)
+        self.assertTrue('IntensityScaling1<2' in s)
         self.assertTrue('f0.f0.f0.Height=10.1' in s)
         self.assertTrue('f1.f0.f0.Height=20.2' in s)
-        self.assertTrue('f0.f0.f0.Sigma > 0.1' in s)
-        self.assertTrue('f1.f0.f0.Sigma > 0.2' in s)
-        self.assertTrue('f0.f1.FWHM < 2.2' in s)
-        self.assertTrue('f1.f1.FWHM > 1.1' in s)
-        self.assertTrue('1 < f1.f4.FWHM < 2.2' in s)
+        self.assertTrue('0.1<f0.f0.f0.Sigma' in s)
+        self.assertTrue('0.2<f1.f0.f0.Sigma' in s)
+        self.assertTrue('f0.f1.FWHM<2.2' in s)
+        self.assertTrue('1.1<f1.f1.FWHM' in s)
+        self.assertTrue('1<f1.f4.FWHM<2.2' in s)
         self.assertTrue('f1.f2.FWHM=2*f1.f1.FWHM' in s)
         self.assertTrue('f1.f3.FWHM=2*f1.f2.FWHM' in s)
 
         self.assertTrue('f0.f0.f0.Amplitude=8.1' in s)
         self.assertTrue('f1.f0.f0.Amplitude=16.2' in s)
-        self.assertTrue('f0.f0.f0.FWHM > 0.1' in s)
-        self.assertTrue('f1.f0.f0.FWHM > 0.2' in s)
+        self.assertTrue('0.1<f0.f0.f0.FWHM' in s)
+        self.assertTrue('0.2<f1.f0.f0.FWHM' in s)
         self.assertTrue('f1.f2.Sigma=2*f1.f1.Sigma' in s)
         self.assertTrue('f1.f3.Sigma=2*f1.f2.Sigma' in s)
-        self.assertTrue('f0.f1.Sigma < 2.2' in s)
-        self.assertTrue('f1.f1.Sigma > 1.1' in s)
-        self.assertTrue('1 < f1.f4.Sigma < 2.2' in s)
+        self.assertTrue('f0.f1.Sigma<2.2' in s)
+        self.assertTrue('1.1<f1.f1.Sigma' in s)
+        self.assertTrue('1<f1.f4.Sigma<2.2' in s)
 
         fun = FunctionFactory.createInitialized(s)
 
     def test_bad_input(self):
         from CrystalField import CrystalField
-        from mantid.simpleapi import FunctionFactory
-
-        cf = CrystalField('Ce', 'C2v', B20='aaa', B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
-                      Temperature=44.0, FWHM=1.0)
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
 
-        cf = CrystalField('Ce', 'C2v', B20=1, B22=3.97, B40=[-0.0317], B42=-0.116, B44=-0.12,
+        self.assertRaises(Exception, CrystalField, 'Ce', 'C2v', B20='aaa', B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                           Temperature=44.0, FWHM=1.0)
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
 
-        cf = CrystalField('Ce', 'C2v', B20=1, B22=3.97, B40=np.array([-0.0317]), B42=-0.116, B44=-0.12,
+        self.assertRaises(Exception, CrystalField, 'Ce', 'C2v', B20=1, B22=3.97, B40=[-0.0317], B42=-0.116, B44=-0.12,
                           Temperature=44.0, FWHM=1.0)
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
 
-        cf = CrystalField('Ce', 'C2v', B20=1, B22=3.97, B40=np.array([1.2, 2.3]), B42=-0.116, B44=-0.12,
-                          Temperature=44.0, FWHM=1.0)
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
+        self.assertRaises(Exception, CrystalField, 'Ce', 'C2v', B20=1, B22=3.97, B40=np.array([-0.0317]), B42=-0.116,
+                          B44=-0.12, Temperature=44.0, FWHM=1.0)
+
+        self.assertRaises(Exception, CrystalField, 'Ce', 'C2v', B20=1, B22=3.97, B40=np.array([1.2, 2.3]), B42=-0.116,
+                          B44=-0.12, Temperature=44.0, FWHM=1.0)
 
         cf = CrystalField('Ce', 'C2v', B20=1, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                           Temperature=44.0, FWHM=1.0)
-        cf.peaks.param[1]["FWHM"] = 'aaa'
-        s = cf.makeSpectrumFunction()
-        self.assertRaises(RuntimeError, FunctionFactory.createInitialized, s)
+
+        def set_peak_parameter():
+            cf.peaks.param[1]["FWHM"] = 'aaa'
+        self.assertRaises(Exception, set_peak_parameter)
 
     def test_resolution_single_spectrum(self):
         from CrystalField import CrystalField
         cf = CrystalField('Ce', 'C2v', B20=0.37, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                       Temperature=44.0, FWHM=1.0, ResolutionModel=([0, 50], [1, 2]))
-        sp = cf.getSpectrum()
         self.assertAlmostEqual(cf.peaks.param[0]['FWHM'], 1.0, 8)
         self.assertAlmostEqual(cf.peaks.param[1]['FWHM'], 1.58101468, 8)
         self.assertAlmostEqual(cf.peaks.param[2]['FWHM'], 1.884945866, 8)
@@ -1219,53 +1336,54 @@ class CrystalFieldFitTest(unittest.TestCase):
 
     def test_ResolutionModel_set_multi(self):
         from CrystalField import ResolutionModel, CrystalField, CrystalFieldFit
-        from mantid.simpleapi import FunctionFactory
 
         x0 = [0, 50]
         y0 = [1, 2]
-        x1 = [0, 50]
+        x1 = [0, 51]
         y1 = [3, 4]
         rm = ResolutionModel([(x0, y0), (x1, y1)])
 
         cf = CrystalField('Ce', 'C2v', B20=0.37, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                       Temperature=[44.0, 50], ResolutionModel=rm)
 
-        sp = cf.makeSpectrumFunction(0)
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX=(0, 50),FWHMY=(1, 2)' in sp)
-
-        sp = cf.makeSpectrumFunction(1)
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX=(0, 50),FWHMY=(3, 4)' in sp)
+        att = cf.function.getAttributeValue('FWHMX0')
+        self.assertEqual(att[0], 0)
+        self.assertEqual(att[1], 50)
+        att = cf.function.getAttributeValue('FWHMY0')
+        self.assertEqual(att[0], 1)
+        self.assertEqual(att[1], 2)
+        att = cf.function.getAttributeValue('FWHMX1')
+        self.assertEqual(att[0], 0)
+        self.assertEqual(att[1], 51)
+        att = cf.function.getAttributeValue('FWHMY1')
+        self.assertEqual(att[0], 3)
+        self.assertEqual(att[1], 4)
 
     def test_ResolutionModel_set_multi_variation(self):
         from CrystalField import ResolutionModel, CrystalField, CrystalFieldFit
-        from mantid.simpleapi import FunctionFactory
 
         x0 = [0, 50]
         y0 = [1, 2]
-        x1 = [0, 50]
+        x1 = [1, 51]
         y1 = [3, 4]
         rm = ResolutionModel([(x0, y0), (x1, y1)])
 
         cf = CrystalField('Ce', 'C2v', B20=0.37, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
-                      Temperature=[44.0, 50], ResolutionModel=rm,FWHMVariation=0.1)
-
-        sp = cf.makeSpectrumFunction(0)
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX=(0, 50),FWHMY=(1, 2)' in sp)
-        self.assertTrue('FWHMVariation=0.1' in sp)
-
-        sp = cf.makeSpectrumFunction(1)
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX=(0, 50),FWHMY=(3, 4)' in sp)
-        self.assertTrue('FWHMVariation=0.1' in sp)
-
-        sp = cf.makeMultiSpectrumFunction()
-        fun = FunctionFactory.createInitialized(sp)
-        self.assertTrue('FWHMX0=(0, 50),FWHMY0=(1, 2)' in sp)
-        self.assertTrue('FWHMX1=(0, 50),FWHMY1=(3, 4)' in sp)
-        self.assertTrue('FWHMVariation=0.1' in sp)
+                      Temperature=[44.0, 50], ResolutionModel=rm, FWHMVariation=0.1)
+
+        att = cf.function.getAttributeValue('FWHMX0')
+        self.assertEqual(att[0], 0)
+        self.assertEqual(att[1], 50)
+        att = cf.function.getAttributeValue('FWHMY0')
+        self.assertEqual(att[0], 1)
+        self.assertEqual(att[1], 2)
+        att = cf.function.getAttributeValue('FWHMX1')
+        self.assertEqual(att[0], 1)
+        self.assertEqual(att[1], 51)
+        att = cf.function.getAttributeValue('FWHMY1')
+        self.assertEqual(att[0], 3)
+        self.assertEqual(att[1], 4)
+        self.assertEqual(cf.FWHMVariation, 0.1)
 
     def test_peak_width_update(self):
         from CrystalField import ResolutionModel, CrystalField
@@ -1276,15 +1394,12 @@ class CrystalFieldFitTest(unittest.TestCase):
 
         cf1 = CrystalField('Ce', 'C2v', B20=0.37, B22=3.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                            Temperature=44.0, FWHM=1.0, ResolutionModel=rm, FWHMVariation=0.01)
-        sp1 = cf1.getSpectrum()
 
         cf2 = CrystalField('Ce', 'C2v', B20=0.57, B22=2.97, B40=-0.0317, B42=-0.116, B44=-0.12,
                            Temperature=44.0, FWHM=1.0, ResolutionModel=rm, FWHMVariation=0.01)
-        sp2 = cf2.getSpectrum()
 
         cf1['B20'] = 0.57
         cf1['B22'] = 2.97
-        sp1 = cf1.getSpectrum()
         self.assertEqual(cf1.peaks.param[1]['Amplitude'], cf2.peaks.param[1]['Amplitude'],)
         self.assertEqual(cf1.peaks.param[1]['FWHM'], cf2.peaks.param[1]['FWHM'],)
 
@@ -1573,11 +1688,11 @@ class CrystalFieldFitTest(unittest.TestCase):
         out0 = out[0].readY(1)
         out1 = out[1].readY(1)
 
-        self.assertTrue(np.all(out0 / y0 > 2.49999999999))
-        self.assertTrue(np.all(out0 / y0 < 2.50000000001))
+        self.assertTrue(np.all(out0 / y0 > 2.49))
+        self.assertTrue(np.all(out0 / y0 < 2.51))
 
-        self.assertTrue(np.all(out1 / y1 > 1.49999999999))
-        self.assertTrue(np.all(out1 / y1 < 1.50000000001))
+        self.assertTrue(np.all(out1 / y1 > 1.49))
+        self.assertTrue(np.all(out1 / y1 < 1.51))
 
     def test_CrystalField_PointCharge_ligand(self):
         from CrystalField import PointCharge
diff --git a/scripts/test/ISISPowderAbsorptionTest.py b/scripts/test/ISISPowderAbsorptionTest.py
index d7f7ce498ed9f9fec9a9d5b3052429b0a01814ea..ccf1c68bcb3535e22f8c0defa7ce66e77e0b9cfc 100644
--- a/scripts/test/ISISPowderAbsorptionTest.py
+++ b/scripts/test/ISISPowderAbsorptionTest.py
@@ -6,22 +6,18 @@ import unittest
 from six import iterkeys
 from six_shim import assertRaisesRegex
 
-from isis_powder.routines import absorb_corrections
+from isis_powder.routines import absorb_corrections, SampleDetails
 
 
 class ISISPowderAbsorptionTest(unittest.TestCase):
 
     def test_sample_is_set_correctly(self):
-        sample_properties = {
-            "cylinder_sample_height": 4.0,
-            "cylinder_sample_radius": 0.25,
-            "cylinder_position": [0., 0., 0.],
-            "chemical_formula": "V"
-        }
+        sample_details = SampleDetails(height=4.0, radius=0.25, center=[0., 0., 0.])
+        sample_details.set_material(chemical_formula="V")
 
         ws = mantid.CreateSampleWorkspace(Function='Flat background', NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1)
         ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
-                                                                config_dict=sample_properties)
+                                                                sample_details_obj=sample_details)
 
         self.assertAlmostEqual(ws.dataY(0)[2], 1.16864808, delta=1e-8)
         self.assertAlmostEqual(ws.dataY(0)[5], 1.16872761, delta=1e-8)
@@ -45,33 +41,12 @@ class ISISPowderAbsorptionTest(unittest.TestCase):
 
             # Check that is raises an error
             with assertRaisesRegex(self, KeyError, "The following key was not found in the advanced configuration"):
-                ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
-                                                                        config_dict=modified_dict)
+                ws = absorb_corrections.create_vanadium_sample_details_obj(config_dict=modified_dict)
 
             # Then check the error actually has the key name in it
             with assertRaisesRegex(self, KeyError, blacklisted_key):
-                ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
-                                                                        config_dict=modified_dict)
-
-    def test_formula_requires_number_density(self):
-        sample_properties = {
-            "cylinder_sample_height": 4.0,
-            "cylinder_sample_radius": 0.25,
-            "cylinder_position": [0., 0., 0.],
-            "chemical_formula": "V Nb"
-        }
+                ws = absorb_corrections.create_vanadium_sample_details_obj(config_dict=modified_dict)
 
-        expected_number_density = 1.234
-
-        ws = mantid.CreateSampleWorkspace(Function='Flat background', NumBanks=1, BankPixelWidth=1, XMax=2, BinWidth=1)
-        with assertRaisesRegex(self, KeyError, "The number density is required as the chemical formula"):
-            ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
-                                                                    config_dict=sample_properties)
-
-        sample_properties["number_density"] = expected_number_density
-        ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
-                                                                config_dict=sample_properties)
-        self.assertEqual(ws.sample().getMaterial().numberDensity, expected_number_density)
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/scripts/test/ISISPowderCommonTest.py b/scripts/test/ISISPowderCommonTest.py
index cdce94a461ec977c9b7f34960ce69e86070c12d4..65133094f58503673a250dc69d615a4f91f3f5ce 100644
--- a/scripts/test/ISISPowderCommonTest.py
+++ b/scripts/test/ISISPowderCommonTest.py
@@ -162,7 +162,7 @@ class ISISPowderCommonTest(unittest.TestCase):
 
     def test_extract_ws_spectra(self):
         number_of_expected_banks = 5
-        ws_to_split = mantid.CreateSampleWorkspace(XMin=0, XMax=1, BankPixelWidth=1,
+        ws_to_split = mantid.CreateSampleWorkspace(XMin=0, XMax=2, BankPixelWidth=1,
                                                    NumBanks=number_of_expected_banks)
         input_name = ws_to_split.getName()
 
@@ -288,7 +288,7 @@ class ISISPowderCommonTest(unittest.TestCase):
         mantid.DeleteWorkspace(summed_ws[0])
 
     def test_load_current_normalised_ws_respects_ext(self):
-        run_number = "100"
+        run_number = "102"
         file_ext_one = ".s1"
         file_ext_two = ".s2"
 
@@ -311,22 +311,130 @@ class ISISPowderCommonTest(unittest.TestCase):
 
         # Ensure it loaded two different workspaces
         self.assertAlmostEqual(result_ws_one, result_ext_one)
+
+        # If this next line fails it means it loaded the .s1 file INSTEAD of the .s2 file
         self.assertAlmostEqual(result_ws_two, result_ext_two)
         self.assertNotAlmostEqual(result_ext_one, result_ext_two)
 
+    def test_rebin_bin_boundary_defaults(self):
+        ws = mantid.CreateSampleWorkspace(OutputWorkspace='test_rebin_bin_boundary_default',
+                                          Function='Flat background', NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1)
+        new_bin_width = 0.5
+        # Originally had bins at 1 unit each. So binning of 0.5 should give us 2n bins back
+        original_number_bins = ws.getNumberBins()
+        original_first_x_val = ws.readX(0)[0]
+        original_last_x_val = ws.readX(0)[-1]
+
+        expected_bins = original_number_bins * 2
+
+        ws = common.rebin_workspace(workspace=ws, new_bin_width=new_bin_width)
+        self.assertEqual(ws.getNumberBins(), expected_bins)
+
+        # Check bin boundaries were preserved
+        self.assertEqual(ws.readX(0)[0], original_first_x_val)
+        self.assertEqual(ws.readX(0)[-1], original_last_x_val)
+
+        mantid.DeleteWorkspace(ws)
+
+    def test_rebin_bin_boundary_specified(self):
+        ws = mantid.CreateSampleWorkspace(OutputWorkspace='test_rebin_bin_boundary_specified',
+                                          Function='Flat background', NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1)
+        # Originally we had 10 bins from 0, 10. Resize from 0, 0.5, 5 so we should have the same number of output
+        # bins with different boundaries
+        new_bin_width = 0.5
+        original_number_bins = ws.getNumberBins()
+
+        expected_start_x = 1
+        expected_end_x = 6
+
+        ws = common.rebin_workspace(workspace=ws, new_bin_width=new_bin_width,
+                                    start_x=expected_start_x, end_x=expected_end_x)
+
+        # Check number of bins is the same as we halved the bin width and interval so we should have n bins
+        self.assertEqual(ws.getNumberBins(), original_number_bins)
+
+        # Check bin boundaries were changed
+        self.assertEqual(ws.readX(0)[0], expected_start_x)
+        self.assertEqual(ws.readX(0)[-1], expected_end_x)
+
+        mantid.DeleteWorkspace(ws)
+
+    def test_rebin_workspace_list_defaults(self):
+        new_bin_width = 0.5
+        number_of_ws = 10
+
+        ws_bin_widths = [new_bin_width] * number_of_ws
+        ws_list = []
+        for i in range(number_of_ws):
+            out_name = "test_rebin_workspace_list_defaults_" + str(i)
+            ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, Function='Flat background',
+                                                        NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1))
+        # What if the item passed in is not a list
+        err_msg_not_list = "was not a list"
+        with assertRaisesRegex(self, RuntimeError, err_msg_not_list):
+            common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=None)
+
+        with assertRaisesRegex(self, RuntimeError, err_msg_not_list):
+            common.rebin_workspace_list(workspace_list=None, bin_width_list=[])
+
+        # What about if the lists aren't the same length
+        with assertRaisesRegex(self, ValueError, "does not match the number of banks"):
+            incorrect_number_bin_widths = [1] * (number_of_ws - 1)
+            common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=incorrect_number_bin_widths)
+
+        # Does it return all the workspaces as a list - another unit test checks the implementation
+        output = common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=ws_bin_widths)
+        self.assertEqual(len(output), number_of_ws)
+
+        for ws in output:
+            mantid.DeleteWorkspace(ws)
+
+    def test_rebin_workspace_list_x_start_end(self):
+        new_start_x = 1
+        new_end_x = 5
+        new_bin_width = 0.5
+        number_of_ws = 10
+
+        ws_bin_widths = [new_bin_width] * number_of_ws
+        start_x_list = [new_start_x] * number_of_ws
+        end_x_list = [new_end_x] * number_of_ws
+
+        ws_list = []
+        for i in range(number_of_ws):
+            out_name = "test_rebin_workspace_list_defaults_" + str(i)
+            ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, Function='Flat background',
+                                                        NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1))
+
+        # Are the lengths checked
+        incorrect_length = [1] * (number_of_ws - 1)
+        with assertRaisesRegex(self, ValueError, "The number of starting bin values"):
+            common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=ws_bin_widths,
+                                        start_x_list=incorrect_length, end_x_list=end_x_list)
+        with assertRaisesRegex(self, ValueError, "The number of ending bin values"):
+            common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=ws_bin_widths,
+                                        start_x_list=start_x_list, end_x_list=incorrect_length)
+
+        output_list = common.rebin_workspace_list(workspace_list=ws_list, bin_width_list=ws_bin_widths,
+                                                  start_x_list=start_x_list, end_x_list=end_x_list)
+        self.assertEqual(len(output_list), number_of_ws)
+        for ws in output_list:
+            self.assertEqual(ws.readX(0)[0], new_start_x)
+            self.assertEqual(ws.readX(0)[-1], new_end_x)
+            mantid.DeleteWorkspace(ws)
+
     def test_remove_intermediate_workspace(self):
         ws_list = []
         ws_names_list = []
 
         ws_single_name = "remove_intermediate_ws-single"
         ws_single = mantid.CreateSampleWorkspace(OutputWorkspace=ws_single_name, NumBanks=1, BankPixelWidth=1,
-                                                 XMax=2, BinWidth=1)
+                                                 XMax=10, BinWidth=1)
 
         for i in range(0, 3):
             out_name = "remove_intermediate_ws_" + str(i)
             ws_names_list.append(out_name)
             ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, NumBanks=1, BankPixelWidth=1,
-                                                        XMax=2, BinWidth=1))
+                                                        XMax=10, BinWidth=1))
 
         # Check single workspaces are removed
         self.assertEqual(True, mantid.mtd.doesExist(ws_single_name))
@@ -368,7 +476,7 @@ class ISISPowderCommonTest(unittest.TestCase):
         returned_ws = common.subtract_summed_runs(ws_to_correct=no_scale_ws, instrument=ISISPowderMockInst(),
                                                   empty_sample_ws_string=sample_empty_number)
         y_values = returned_ws.readY(0)
-        for i in range(0, returned_ws.blocksize()):
+        for i in range(returned_ws.blocksize()):
             self.assertAlmostEqual(y_values[i], 0)
 
         # Check what happens when we specify scale as a half
diff --git a/scripts/test/ISISPowderSampleDetailsTest.py b/scripts/test/ISISPowderSampleDetailsTest.py
index 07cc55736fceb28448f8e9f4f54a5125893fde60..f5113fa737daccfac560d46f05cdf592a5ed1083 100644
--- a/scripts/test/ISISPowderSampleDetailsTest.py
+++ b/scripts/test/ISISPowderSampleDetailsTest.py
@@ -2,6 +2,7 @@ from __future__ import (absolute_import, division, print_function)
 
 import mantid
 import io
+import six
 import sys
 import unittest
 
@@ -44,7 +45,8 @@ class ISISPowderSampleDetailsTest(unittest.TestCase):
         self.assertEqual(sample_details_obj_str.radius, float(height_radius_string))
         self.assertEqual(sample_details_obj_str.center, [2.0, 3.0, 5.0])
 
-    def test_constructor_non_numeric_input(self):
+
+    def test_constructor_non_number_input(self):
         good_input = 1.0
         good_center_input = [1.0, 2.0, 3.0]
         empty_input_value = ''
@@ -107,7 +109,7 @@ class ISISPowderSampleDetailsTest(unittest.TestCase):
             sample_details.SampleDetails(height=good_input, radius=good_input,
                                          center=[zero_value, good_input, good_input])
 
-    def test_set_material(self):
+        def test_set_material(self):
         sample_details_obj = sample_details.SampleDetails(height=1.0, radius=1.0, center=[2, 3, 4])
 
         # Check that we can only set a material once. We will test the underlying class elsewhere
@@ -144,7 +146,7 @@ class ISISPowderSampleDetailsTest(unittest.TestCase):
         chemical_formula_one_char_element = 'V'
         chemical_formula_two_char_element = 'Si'
         chemical_formula_complex = 'V Si'  # Yes, this isn't a sensible input but for our tests it will do
-        numeric_density_sample = 1.234
+        number_density_sample = 1.234
 
         material_obj_one_char = sample_details._Material(chemical_formula=chemical_formula_one_char_element)
         self.assertIsNotNone(material_obj_one_char)
@@ -156,26 +158,26 @@ class ISISPowderSampleDetailsTest(unittest.TestCase):
         self.assertIsNone(material_obj_one_char.scattering_cross_section)
         self.assertFalse(material_obj_one_char._is_material_props_set)
 
-        # Check if it accepts two character elements without numeric density
+        # Check if it accepts two character elements without number density
         material_obj_two_char = sample_details._Material(chemical_formula=chemical_formula_two_char_element)
         self.assertIsNotNone(material_obj_two_char)
         self.assertEqual(material_obj_two_char.chemical_formula, chemical_formula_two_char_element)
         self.assertIsNone(material_obj_two_char.number_density)
 
-        # Check it stores numeric density if passed
-        material_obj_numeric_density = sample_details._Material(chemical_formula=chemical_formula_two_char_element,
-                                                                numeric_density=numeric_density_sample)
-        self.assertEqual(material_obj_numeric_density.number_density, numeric_density_sample)
+        # Check it stores number density if passed
+        material_obj_number_density = sample_details._Material(chemical_formula=chemical_formula_two_char_element,
+                                                               number_density=number_density_sample)
+        self.assertEqual(material_obj_number_density.number_density, number_density_sample)
 
-        # Check that it raises an error if we have a non-elemental formula without numeric density
-        with assertRaisesRegex(self, ValueError, "A numeric density formula must be set on a chemical formula"):
+        # Check that it raises an error if we have a non-elemental formula without number density
+        with assertRaisesRegex(self, ValueError, "A number density formula must be set on a chemical formula"):
             sample_details._Material(chemical_formula=chemical_formula_complex)
 
-        # Check it constructs if it is given the numeric density too
+        # Check it constructs if it is given the number density too
         material_obj_num_complex_formula = sample_details._Material(chemical_formula=chemical_formula_complex,
-                                                                    numeric_density=numeric_density_sample)
+                                                                    number_density=number_density_sample)
         self.assertEqual(material_obj_num_complex_formula.chemical_formula, chemical_formula_complex)
-        self.assertEqual(material_obj_num_complex_formula.number_density, numeric_density_sample)
+        self.assertEqual(material_obj_num_complex_formula.number_density, number_density_sample)
 
     def test_material_set_properties(self):
         bad_absorb = '-1'
@@ -221,7 +223,7 @@ class ISISPowderSampleDetailsTest(unittest.TestCase):
         expected_number_density = 1.2345
 
         # Redirect std out to a capture object
-        std_out_buffer = io.BytesIO()
+        std_out_buffer = get_std_out_buffer_obj()
         sys.stdout = std_out_buffer
 
         sample_details_obj = sample_details.SampleDetails(height=expected_height, radius=expected_radius,
@@ -234,23 +236,23 @@ class ISISPowderSampleDetailsTest(unittest.TestCase):
         assertRegex(self, captured_std_out_default, "Center X:" + str(float(expected_center[0])))
         assertRegex(self, captured_std_out_default, "Material has not been set")
 
-        # Test with material set but not numeric density
-        sys.stdout = std_out_buffer = io.BytesIO()
+        # Test with material set but not number density
+        sys.stdout = std_out_buffer = get_std_out_buffer_obj()
         sample_details_obj.set_material(chemical_formula=chemical_formula)
         sample_details_obj.print_sample_details()
         captured_std_out_material_default = std_out_buffer.getvalue()
         assertRegex(self, captured_std_out_material_default, "Material properties:")
         assertRegex(self, captured_std_out_material_default, "Chemical formula: " + chemical_formula)
-        assertRegex(self, captured_std_out_material_default, "Numeric Density: Set from elemental properties")
+        assertRegex(self, captured_std_out_material_default, "Number Density: Set from elemental properties")
 
-        # Test with material and numeric density
-        sys.stdout = std_out_buffer = io.BytesIO()
+        # Test with material and number density
+        sys.stdout = std_out_buffer = get_std_out_buffer_obj()
         sample_details_obj.reset_sample_material()
         sample_details_obj.set_material(chemical_formula=chemical_formula_two, number_density=expected_number_density)
         sample_details_obj.print_sample_details()
         captured_std_out_material_set = std_out_buffer.getvalue()
         assertRegex(self, captured_std_out_material_set, "Chemical formula: " + chemical_formula_two)
-        assertRegex(self, captured_std_out_material_set, "Numeric Density: " + str(expected_number_density))
+        assertRegex(self, captured_std_out_material_set, "Number Density: " + str(expected_number_density))
 
         # Test with no material properties set - we can reuse buffer from previous test
         assertRegex(self, captured_std_out_material_default, "Absorption cross section: Calculated by Mantid")
@@ -261,7 +263,7 @@ class ISISPowderSampleDetailsTest(unittest.TestCase):
         expected_scattering_x_section = 5.32
 
         # Test with material set
-        sys.stdout = std_out_buffer = io.BytesIO()
+        sys.stdout = std_out_buffer = get_std_out_buffer_obj()
         sample_details_obj.set_material_properties(absorption_cross_section=expected_abs_x_section,
                                                    scattering_cross_section=expected_scattering_x_section)
         sample_details_obj.print_sample_details()
@@ -273,5 +275,15 @@ class ISISPowderSampleDetailsTest(unittest.TestCase):
         # Ensure std IO is restored. Do NOT remove this line as all std out will pipe into our buffer otherwise
         sys.stdout = sys.__stdout__
 
+def get_std_out_buffer_obj():
+    # Because of the way that strings and bytes
+    # have changed between Python 2/3 we need to
+    # return a buffer which is appropriate to the current version
+    if six.PY2:
+        return io.BytesIO()
+    elif six.PY3:
+        return io.StringIO()
+
+
 if __name__ == "__main__":
     unittest.main()
diff --git a/scripts/test/SANS/CMakeLists.txt b/scripts/test/SANS/CMakeLists.txt
index 5d5c749db332d426e2c53f150ca9d531c813b7e2..5982d712d802e829a58ec14882f432892a584d6d 100644
--- a/scripts/test/SANS/CMakeLists.txt
+++ b/scripts/test/SANS/CMakeLists.txt
@@ -1,4 +1,5 @@
 add_subdirectory(algorithm_detail)
+add_subdirectory(command_interface)
 add_subdirectory(common)
 add_subdirectory(state)
 add_subdirectory(user_file)
diff --git a/scripts/test/SANS/algorithm_detail/CMakeLists.txt b/scripts/test/SANS/algorithm_detail/CMakeLists.txt
index 5ddbc9ef715f0f977395bbb86c4634e3b2e12703..b1338d36883949399575d893faaafb87c79b9479 100644
--- a/scripts/test/SANS/algorithm_detail/CMakeLists.txt
+++ b/scripts/test/SANS/algorithm_detail/CMakeLists.txt
@@ -4,8 +4,10 @@
 
 set ( TEST_PY_FILES
   calculate_transmission_helper_test.py
+  merge_reductions_test.py
   q_resolution_calculator_test.py
   scale_helper_test.py
+  strip_end_nans_test.py
 )
 
 check_tests_valid ( ${CMAKE_CURRENT_SOURCE_DIR} ${TEST_PY_FILES} )
diff --git a/scripts/test/SANS/algorithm_detail/merge_reductions_test.py b/scripts/test/SANS/algorithm_detail/merge_reductions_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..1899efca258601a3297be98f1abf7ac5996e72ee
--- /dev/null
+++ b/scripts/test/SANS/algorithm_detail/merge_reductions_test.py
@@ -0,0 +1,202 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+from sans.algorithm_detail.merge_reductions import (MergeFactory, ISIS1DMerger)
+from sans.algorithm_detail.bundles import OutputPartsBundle
+
+from sans.state.reduction_mode import StateReductionMode
+from sans.test_helper.test_director import TestDirector
+
+from sans.common.enums import (ISISReductionMode, ReductionDimensionality, FitModeForMerge)
+from sans.common.general_functions import create_unmanaged_algorithm
+from sans.common.constants import EMPTY_NAME
+from sans.common.enums import (DataType, ISISReductionMode)
+
+
+class MergeReductionsTest(unittest.TestCase):
+    @staticmethod
+    def create_1D_workspace(data_x, data_y):
+        create_name = "CreateWorkspace"
+        create_options = {'DataX': data_x,
+                          'DataY': data_y,
+                          'NSpec': 1,
+                          'UnitX': 'MomentumTransfer',
+                          "OutputWorkspace": EMPTY_NAME}
+        create_alg = create_unmanaged_algorithm(create_name, **create_options)
+        create_alg.execute()
+        return create_alg.getProperty('OutputWorkspace').value
+
+    @staticmethod
+    def _get_simple_state(fit_type=FitModeForMerge.NoFit, scale=1.0, shift=0.0):
+        # Set the reduction parameters
+        reduction_info = StateReductionMode()
+        reduction_info.reduction_mode = ISISReductionMode.Merged
+        reduction_info.dimensionality = ReductionDimensionality.TwoDim
+        reduction_info.merge_shift = shift
+        reduction_info.merge_scale = scale
+        reduction_info.merge_fit_mode = fit_type
+
+        # Get the sample state
+        test_director = TestDirector()
+        test_director.set_states(reduction_state=reduction_info)
+        return test_director.construct()
+
+    @staticmethod
+    def _create_workspaces(state, data_type, data_x_lab, data_y_lab_count, data_y_lab_norm,
+                           data_x_hab, data_y_hab_count, data_y_hab_norm):
+        lab_count = MergeReductionsTest.create_1D_workspace(data_x_lab, data_y_lab_count)
+        lab_norm = MergeReductionsTest.create_1D_workspace(data_x_lab, data_y_lab_norm)
+        lab_bundle = OutputPartsBundle(state=state, data_type=data_type, reduction_mode=ISISReductionMode.LAB,
+                                       output_workspace_count=lab_count, output_workspace_norm=lab_norm)
+
+        hab_count = MergeReductionsTest.create_1D_workspace(data_x_hab, data_y_hab_count)
+        hab_norm = MergeReductionsTest.create_1D_workspace(data_x_hab, data_y_hab_norm)
+        hab_bundle = OutputPartsBundle(state=state, data_type=data_type, reduction_mode=ISISReductionMode.HAB,
+                                       output_workspace_count=hab_count, output_workspace_norm=hab_norm)
+        return lab_bundle, hab_bundle
+
+    @staticmethod
+    def _provide_data(state):
+        # Create data for sample
+        data_x_lab = list(range(0, 10))
+        data_y_lab_count = [2.]*10
+        data_y_lab_norm = [1.] * 10
+
+        data_x_hab = list(range(0, 10))
+        data_y_hab_count = [3.] * 10
+        data_y_hab_norm = [4.] * 10
+        sample_lab, sample_hab = MergeReductionsTest._create_workspaces(state, DataType.Sample, data_x_lab,
+                                                                        data_y_lab_count, data_y_lab_norm,
+                                                                        data_x_hab, data_y_hab_count, data_y_hab_norm)
+
+        # Create data for can
+        data_x_lab = list(range(0, 10))
+        data_y_lab_count = [5.]*10
+        data_y_lab_norm = [6.] * 10
+
+        data_x_hab = list(range(0, 10))
+        data_y_hab_count = [7.] * 10
+        data_y_hab_norm = [8.] * 10
+        can_lab, can_hab = MergeReductionsTest._create_workspaces(state, DataType.Can, data_x_lab,
+                                                                  data_y_lab_count, data_y_lab_norm,
+                                                                  data_x_hab, data_y_hab_count, data_y_hab_norm)
+        return sample_lab, sample_hab, can_lab, can_hab
+
+    def test_that_correct_merger_is_generated(self):
+        # Arrange
+        state = self._get_simple_state()
+        merge_factory = MergeFactory()
+
+        # Act
+        merger = merge_factory.create_merger(state)
+
+        # Assert
+        self.assertTrue(isinstance(merger, ISIS1DMerger))
+
+    def test_that_can_merge_without_fitting(self):
+        # Arrange
+        fit_type = FitModeForMerge.NoFit
+        scale_input = 32.0
+        shift_input = 12.65
+        state = self._get_simple_state(fit_type, scale_input, shift_input)
+        merge_factory = MergeFactory()
+        merger = merge_factory.create_merger(state)
+
+        sample_lab, sample_hab, can_lab, can_hab = self._provide_data(state)
+
+        bundles = {ISISReductionMode.LAB: [sample_lab, can_lab],
+                   ISISReductionMode.HAB: [sample_hab, can_hab]}
+
+        # Act
+        result = merger.merge(bundles)
+        merged_workspace = result.merged_workspace
+
+        scale = result.scale
+        shift = result.shift
+        self.assertTrue(abs(scale - scale_input) < 1e-4)
+        self.assertTrue(abs(shift - shift_input) < 1e-4)
+
+        # There is an overlap of two bins between HAB and LAB, the values are tested in SANSStitch
+        self.assertTrue(merged_workspace.blocksize() == 10)
+
+    def test_that_can_merge_fitting(self):
+        # Arrange
+        fit_type = FitModeForMerge.Both
+        scale_input = 1.67
+        shift_input = 2.7
+        state = self._get_simple_state(fit_type, scale_input, shift_input)
+        merge_factory = MergeFactory()
+        merger = merge_factory.create_merger(state)
+
+        sample_lab, sample_hab, can_lab, can_hab = self._provide_data(state)
+        bundles = {ISISReductionMode.LAB: [sample_lab, can_lab],
+                   ISISReductionMode.HAB: [sample_hab, can_hab]}
+
+        # Act
+        result = merger.merge(bundles)
+        merged_workspace = result.merged_workspace
+
+        self.assertTrue(merged_workspace.blocksize() == 10)
+
+        scale = result.scale
+        shift = result.shift
+        self.assertTrue(scale != scale_input)
+        self.assertTrue(shift != shift_input)
+        self.assertTrue(abs(scale - (-15.0)) < 1e-4)
+        self.assertTrue(abs(shift - 0.0472222222222) < 1e-4)
+
+    def test_that_can_merge_with_shift_only_fitting(self):
+        # Arrange
+        fit_type = FitModeForMerge.ShiftOnly
+        scale_input = 1.67
+        shift_input = 2.7
+        state = self._get_simple_state(fit_type, scale_input, shift_input)
+        merge_factory = MergeFactory()
+        merger = merge_factory.create_merger(state)
+
+        sample_lab, sample_hab, can_lab, can_hab = self._provide_data(state)
+        bundles = {ISISReductionMode.LAB: [sample_lab, can_lab],
+                   ISISReductionMode.HAB: [sample_hab, can_hab]}
+
+        # Act
+        result = merger.merge(bundles)
+        merged_workspace = result.merged_workspace
+
+        self.assertTrue(merged_workspace.blocksize() == 10)
+
+        scale = result.scale
+        shift = result.shift
+
+        self.assertTrue(shift != shift_input)
+        self.assertTrue(abs(scale - scale_input) < 1e-4)
+        self.assertTrue(abs(shift - 0.823602794411) < 1e-4)
+
+    def test_that_can_merge_with_scale_only_fitting(self):
+        # Arrange
+        fit_type = FitModeForMerge.ScaleOnly
+        scale_input = 1.67
+        shift_input = 2.7
+        state = self._get_simple_state(fit_type, scale_input, shift_input)
+        merge_factory = MergeFactory()
+        merger = merge_factory.create_merger(state)
+
+        sample_lab, sample_hab, can_lab, can_hab = self._provide_data(state)
+        bundles = {ISISReductionMode.LAB: [sample_lab, can_lab],
+                   ISISReductionMode.HAB: [sample_hab, can_hab]}
+
+        # Act
+        result = merger.merge(bundles)
+        merged_workspace = result.merged_workspace
+
+        self.assertTrue(merged_workspace.blocksize() == 10)
+
+        scale = result.scale
+        shift = result.shift
+
+        self.assertTrue(scale != scale_input)
+        self.assertTrue(abs(scale-1.0) < 1e-4)
+        self.assertTrue(abs(shift-shift_input) < 1e-4)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/SANS/algorithm_detail/strip_end_nans_test.py b/scripts/test/SANS/algorithm_detail/strip_end_nans_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..221797117d64bd426934cf7aa315cf69353b7e5c
--- /dev/null
+++ b/scripts/test/SANS/algorithm_detail/strip_end_nans_test.py
@@ -0,0 +1,44 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+from mantid.api import AlgorithmManager
+from sans.algorithm_detail.strip_end_nans_and_infs import strip_end_nans
+
+
+class StripEndNansTest(unittest.TestCase):
+    def _do_test(self, data_x, data_y):
+        # Arrange
+        alg_ws = AlgorithmManager.createUnmanaged("CreateWorkspace")
+        alg_ws.setChild(True)
+        alg_ws.initialize()
+        alg_ws.setProperty("OutputWorkspace", "test")
+
+        alg_ws.setProperty("DataX", data_x)
+        alg_ws.setProperty("DataY", data_y)
+        alg_ws.execute()
+        workspace = alg_ws.getProperty("OutputWorkspace").value
+
+        # Act
+        cropped_workspace = strip_end_nans(workspace)
+        # Assert
+        data_y = cropped_workspace.dataY(0)
+        self.assertTrue(len(data_y) == 5)
+        self.assertTrue(data_y[0] == 36.)
+        self.assertTrue(data_y[1] == 44.)
+        self.assertTrue(data_y[2] == 52.)
+        self.assertTrue(data_y[3] == 63.)
+        self.assertTrue(data_y[4] == 75.)
+
+    def test_that_can_strip_end_nans_and_infs_for_point_workspace(self):
+        data_x = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
+        data_y = [float("Nan"), float("Inf"), 36., 44., 52., 63., 75., float("Inf"), float("Nan"), float("Inf")]
+        self._do_test(data_x, data_y)
+
+    def test_that_can_strip_end_nans_and_infs_for_histo_workspace(self):
+        data_x = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.]
+        data_y = [float("Nan"), float("Inf"), 36., 44., 52., 63., 75., float("Inf"), float("Nan"), float("Inf")]
+        self._do_test(data_x, data_y)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/SANS/command_interface/CMakeLists.txt b/scripts/test/SANS/command_interface/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3826c64dc13722c4962608f374676c747c0b71f7
--- /dev/null
+++ b/scripts/test/SANS/command_interface/CMakeLists.txt
@@ -0,0 +1,13 @@
+##
+## Tests for SANS
+##
+
+set ( TEST_PY_FILES
+  batch_csv_file_parser_test.py
+  command_interface_state_director_test.py
+)
+
+check_tests_valid ( ${CMAKE_CURRENT_SOURCE_DIR} ${TEST_PY_FILES} )
+
+# Prefix for test name=PythonAlgorithms
+pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR} PythonAlgorithmsSANS ${TEST_PY_FILES} )
diff --git a/scripts/test/SANS/command_interface/batch_csv_file_parser_test.py b/scripts/test/SANS/command_interface/batch_csv_file_parser_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..98996ca7ef0d9d5ad2dd2a921cb744729a598207
--- /dev/null
+++ b/scripts/test/SANS/command_interface/batch_csv_file_parser_test.py
@@ -0,0 +1,144 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import os
+import mantid
+from sans.common.enums import BatchReductionEntry
+from sans.common.constants import ALL_PERIODS
+from sans.command_interface.batch_csv_file_parser import BatchCsvParser
+
+
+class BatchCsvParserTest(unittest.TestCase):
+
+    @staticmethod
+    def _save_to_csv(content):
+        test_file_path = os.path.join(mantid.config.getString('defaultsave.directory'), 'sans_batch_test_file.csv')
+        BatchCsvParserTest._remove_csv(test_file_path)
+
+        with open(test_file_path, 'w') as f:
+            f.write(content)
+        return test_file_path
+
+    @staticmethod
+    def _remove_csv(test_file_path):
+        if os.path.exists(test_file_path):
+            os.remove(test_file_path)
+
+    def test_that_raises_when_unknown_keyword_is_used(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,74044,output_as,test,new_key_word,test\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_raises_if_the_batch_file_contains_an_uneven_number_of_entries(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,74044,sample_trans,74024,sample_direct_beam,74014,can_sans,74019,can_trans,74020," \
+                   "can_direct_beam,output_as, first_eim\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_sample_scatter_is_missing(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,,output_as,test_file\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_output_is_missing(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,test,output_as,\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_sample_transmission_is_specified_incompletely(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,test,output_as,test, sample_trans,test, sample_direct_beam,\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_can_transmission_is_specified_incompletely(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,test,output_as,test, can_trans,, can_direct_beam, test\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_raises_when_can_transmission_is_specified_but_no_can_scatter(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,test,output_as,test, can_trans,, can_direct_beam, test\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+        self.assertRaises(RuntimeError, parser.parse_batch_file)
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_parses_two_lines_correctly(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,1,sample_trans,2,sample_direct_beam,3,output_as,test_file,user_file,user_test_file\n" \
+                   "sample_sans,1,can_sans,2,output_as,test_file2\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+
+        # Act
+        output = parser.parse_batch_file()
+
+        # Assert
+        self.assertTrue(len(output) == 2)
+
+        first_line = output[0]
+        # Should have 5 user specified entries and 3 period entries
+        self.assertTrue(len(first_line) == 8)
+        self.assertTrue(first_line[BatchReductionEntry.SampleScatter] == "1")
+        self.assertTrue(first_line[BatchReductionEntry.SampleScatterPeriod] == ALL_PERIODS)
+        self.assertTrue(first_line[BatchReductionEntry.SampleTransmission] == "2")
+        self.assertTrue(first_line[BatchReductionEntry.SampleTransmissionPeriod] == ALL_PERIODS)
+        self.assertTrue(first_line[BatchReductionEntry.SampleDirect] == "3")
+        self.assertTrue(first_line[BatchReductionEntry.SampleDirectPeriod] == ALL_PERIODS)
+        self.assertTrue(first_line[BatchReductionEntry.Output] == "test_file")
+        self.assertTrue(first_line[BatchReductionEntry.UserFile] == "user_test_file")
+        second_line = output[1]
+
+        # Should have 3 user specified entries and 2 period entries
+        self.assertTrue(len(second_line) == 5)
+        self.assertTrue(second_line[BatchReductionEntry.SampleScatter] == "1")
+        self.assertTrue(second_line[BatchReductionEntry.SampleScatterPeriod] == ALL_PERIODS)
+        self.assertTrue(second_line[BatchReductionEntry.CanScatter] == "2")
+        self.assertTrue(second_line[BatchReductionEntry.CanScatterPeriod] == ALL_PERIODS)
+        self.assertTrue(second_line[BatchReductionEntry.Output] == "test_file2")
+
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+    def test_that_parses_period_selection(self):
+        content = "# MANTID_BATCH_FILE add more text here\n" \
+                   "sample_sans,1p7,can_sans,2P3,output_as,test_file2\n"
+        batch_file_path = BatchCsvParserTest._save_to_csv(content)
+        parser = BatchCsvParser(batch_file_path)
+
+        # Act
+        output = parser.parse_batch_file()
+
+        # Assert
+        self.assertTrue(len(output) == 1)
+
+        first_line = output[0]
+        # Should have 5 user specified entries and 3 period entries
+        self.assertTrue(len(first_line) == 5)
+        self.assertTrue(first_line[BatchReductionEntry.SampleScatter] == "1")
+        self.assertTrue(first_line[BatchReductionEntry.SampleScatterPeriod] == 7)
+        self.assertTrue(first_line[BatchReductionEntry.CanScatter] == "2")
+        self.assertTrue(first_line[BatchReductionEntry.CanScatterPeriod] == 3)
+        self.assertTrue(first_line[BatchReductionEntry.Output] == "test_file2")
+
+        BatchCsvParserTest._remove_csv(batch_file_path)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/SANS/command_interface/command_interface_state_director_test.py b/scripts/test/SANS/command_interface/command_interface_state_director_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffcf56e17b9d1fc9dee733a5fbc8ba8bc1d1afd3
--- /dev/null
+++ b/scripts/test/SANS/command_interface/command_interface_state_director_test.py
@@ -0,0 +1,201 @@
+from __future__ import (absolute_import, division, print_function)
+import unittest
+import mantid
+from sans.command_interface.command_interface_state_director import (NParameterCommand, NParameterCommandId,
+                                                                     CommandInterfaceStateDirector, DataCommand,
+                                                                     DataCommandId, FitData)
+from sans.common.enums import (SANSFacility, RebinType, DetectorType, ReductionDimensionality,
+                               FitType, RangeStepType, ISISReductionMode, FitModeForMerge, DataType)
+
+
+class CommandInterfaceStateDirectorTest(unittest.TestCase):
+    def _assert_raises_nothing(self, func, parameter):
+        try:
+            func(parameter)
+        except:  # noqa
+            self.fail()
+
+    def test_can_set_commands_without_exceptions(self):
+        command_interface = CommandInterfaceStateDirector(SANSFacility.ISIS)
+
+        # User file
+        command = NParameterCommand(command_id=NParameterCommandId.user_file,
+                                    values=["test_user_file_sans2d.txt"])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Mask
+        command = NParameterCommand(command_id=NParameterCommandId.mask,
+                                    values=["MASK/ FRONT H197>H199"])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Monitor spectrum (incident monitor for monitor normalization)
+        command = NParameterCommand(command_id=NParameterCommandId.incident_spectrum,
+                                    values=[1, True, False])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Transmission spectrum (incident monitor for transmission calculation)
+        command = NParameterCommand(command_id=NParameterCommandId.incident_spectrum, values=[7, False, True])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Reduction Dimensionality One Dim
+        command = NParameterCommand(command_id=NParameterCommandId.reduction_dimensionality,
+                                    values=[ReductionDimensionality.OneDim])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Reduction Dimensionality Two Dim
+        command = NParameterCommand(command_id=NParameterCommandId.reduction_dimensionality,
+                                    values=[ReductionDimensionality.TwoDim])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Sample offset
+        command = NParameterCommand(command_id=NParameterCommandId.sample_offset, values=[23.6])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Sample scatter data
+        command = DataCommand(command_id=DataCommandId.sample_scatter, file_name="SANS2D00022024", period=3)
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Detector
+        command = NParameterCommand(command_id=NParameterCommandId.detector, values=[ISISReductionMode.HAB])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Gravity
+        command = NParameterCommand(command_id=NParameterCommandId.gravity, values=[True, 12.4])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Set centre
+        command = NParameterCommand(command_id=NParameterCommandId.centre, values=[12.4, 23.54, DetectorType.HAB])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # # Trans fit
+        command = NParameterCommand(command_id=NParameterCommandId.trans_fit, values=[FitData.Can, 10.4, 12.54,
+                                                                                      FitType.Log, 0])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Front detector rescale
+        command = NParameterCommand(command_id=NParameterCommandId.front_detector_rescale, values=[1.2, 2.4, True,
+                                                                                                   False, None, 7.2])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Event slices
+        command = NParameterCommand(command_id=NParameterCommandId.event_slices, values="1-23,55:3:65")
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Flood file
+        command = NParameterCommand(command_id=NParameterCommandId.flood_file, values=["test", DetectorType.LAB])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Phi limits
+        command = NParameterCommand(command_id=NParameterCommandId.phi_limit, values=[12.5, 123.6, False])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Wavelength correction file
+        command = NParameterCommand(command_id=NParameterCommandId.wavelength_correction_file,
+                                    values=["test", DetectorType.HAB])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Radius mask
+        command = NParameterCommand(command_id=NParameterCommandId.mask_radius,
+                                    values=[23.5, 234.7])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Wavelength limits
+        command = NParameterCommand(command_id=NParameterCommandId.wavelength_limit,
+                                    values=[1.23, 23., 1.1, RangeStepType.Lin])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # QXY Limits
+        command = NParameterCommand(command_id=NParameterCommandId.qxy_limit,
+                                    values=[1.23, 23., 1.1, RangeStepType.Lin])
+        self._assert_raises_nothing(command_interface.add_command, command)
+
+        # Process all commands
+        state = command_interface.process_commands()
+
+        # Assert
+        # We check here that the elements we set up above (except for from the user file) are being applied
+        self.assertTrue(state is not None)
+        self.assertTrue(state.mask.detectors[DetectorType.to_string(DetectorType.HAB)].range_horizontal_strip_start[-1]
+                        == 197)
+        self.assertTrue(state.mask.detectors[DetectorType.to_string(DetectorType.HAB)].range_horizontal_strip_stop[-1]
+                        == 199)
+        self.assertTrue(state.adjustment.normalize_to_monitor.incident_monitor == 1)
+        self.assertTrue(state.adjustment.normalize_to_monitor.rebin_type is RebinType.InterpolatingRebin)
+        self.assertTrue(state.adjustment.calculate_transmission.incident_monitor == 7)
+        self.assertTrue(state.adjustment.calculate_transmission.rebin_type is RebinType.Rebin)
+        self.assertTrue(state.reduction.reduction_dimensionality is ReductionDimensionality.TwoDim)
+        self.assertTrue(state.convert_to_q.reduction_dimensionality is ReductionDimensionality.TwoDim)
+        self.assertTrue(state.move.sample_offset == 23.6/1000.)
+        self.assertTrue(state.data.sample_scatter == "SANS2D00022024")
+        self.assertTrue(state.data.sample_scatter_period == 3)
+        self.assertTrue(state.reduction.reduction_mode is ISISReductionMode.HAB)
+        self.assertTrue(state.convert_to_q.use_gravity)
+        self.assertTrue(state.convert_to_q.gravity_extra_length == 12.4)
+        self.assertTrue(state.move.detectors[DetectorType.to_string(DetectorType.HAB)].sample_centre_pos1 == 12.4/1000.)
+        self.assertTrue(state.move.detectors[DetectorType.to_string(DetectorType.HAB)].sample_centre_pos2
+                        == 23.54/1000.)
+        self.assertTrue(state.adjustment.calculate_transmission.fit[DataType.to_string(DataType.Can)].fit_type
+                        is FitType.Log)
+        self.assertTrue(state.adjustment.calculate_transmission.fit[DataType.to_string(DataType.Can)].polynomial_order
+                        == 0)
+
+        self.assertTrue(state.adjustment.calculate_transmission.fit[DataType.to_string(DataType.Can)].wavelength_low
+                        == 10.4)
+        self.assertTrue(state.adjustment.calculate_transmission.fit[DataType.to_string(DataType.Can)].wavelength_high
+                        == 12.54)
+
+        self.assertTrue(state.reduction.merge_scale == 1.2)
+        self.assertTrue(state.reduction.merge_shift == 2.4)
+        self.assertTrue(state.reduction.merge_fit_mode is FitModeForMerge.ScaleOnly)
+        self.assertTrue(state.reduction.merge_range_min is None)
+        self.assertTrue(state.reduction.merge_range_max == 7.2)
+
+        # Event slices
+        start_values = state.slice.start_time
+        end_values = state.slice.end_time
+        expected_start_values = [1., 55., 58., 61., 64.]
+        expected_end_values = [23., 58., 61., 64., 65.]
+        for s1, e1, s2, e2 in zip(start_values, end_values, expected_start_values, expected_end_values):
+            self.assertTrue(s1 == s2)
+            self.assertTrue(e1 == e2)
+
+        self.assertTrue(state.adjustment.wavelength_and_pixel_adjustment.adjustment_files[
+                            DetectorType.to_string(DetectorType.LAB)].pixel_adjustment_file == "test")
+        self.assertTrue(state.mask.phi_min == 12.5)
+        self.assertTrue(state.mask.phi_max == 123.6)
+        self.assertFalse(state.mask.use_mask_phi_mirror)
+        self.assertTrue(state.adjustment.wavelength_and_pixel_adjustment.adjustment_files[
+                            DetectorType.to_string(DetectorType.HAB)].wavelength_adjustment_file == "test")
+        self.assertTrue(state.mask.radius_min == 23.5 / 1000.)
+        self.assertTrue(state.mask.radius_max == 234.7 / 1000.)
+        self.assertTrue(state.wavelength.wavelength_low == 1.23)
+        self.assertTrue(state.adjustment.normalize_to_monitor.wavelength_high == 23.)
+        self.assertTrue(state.adjustment.wavelength_and_pixel_adjustment.wavelength_step == 1.1)
+        self.assertTrue(state.adjustment.calculate_transmission.wavelength_step_type is RangeStepType.Lin)
+        self.assertTrue(state.convert_to_q.q_xy_max == 23.)
+        self.assertTrue(state.convert_to_q.q_xy_step == 1.1)
+        self.assertTrue(state.convert_to_q.q_xy_step_type is RangeStepType.Lin)
+
+    def test_that_can_remove_last_command(self):
+        # Arrange
+        command_interface = CommandInterfaceStateDirector(SANSFacility.ISIS)
+        command_interface.add_command(NParameterCommand(command_id=NParameterCommandId.user_file,
+                                                        values=["file_1.txt"]))
+        command_interface.add_command(NParameterCommand(command_id=NParameterCommandId.user_file,
+                                                        values=["file_2.txt"]))
+        command_interface.add_command(NParameterCommand(command_id=NParameterCommandId.user_file,
+                                                        values=["file_3.txt"]))
+        # Act
+        commands = command_interface.get_commands()
+        self.assertTrue(len(commands) == 3)
+
+        command_interface.remove_last_user_file()
+
+        # Assert
+        self.assertTrue(len(commands) == 2)
+        last_command = commands[-1]
+        self.assertTrue(last_command.values == ["file_2.txt"])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/scripts/test/SANSUtilityTest.py b/scripts/test/SANSUtilityTest.py
index 41c4f82cfd8e317b35902390e69285919933a434..796bc47d9d18d2c439d9737e7a12f0c2e609bd39 100644
--- a/scripts/test/SANSUtilityTest.py
+++ b/scripts/test/SANSUtilityTest.py
@@ -1598,10 +1598,13 @@ class TestSelectNewDetector(unittest.TestCase):
     def test_that_for_SANS2D_correct_settings_are_selected(self):
         self.assertTrue(su.get_correct_combinDet_setting("SANS2d", "rear") == "rear")
         self.assertTrue(su.get_correct_combinDet_setting("SANS2D", "FRONT") == "front")
+        self.assertTrue(su.get_correct_combinDet_setting("SANS2d", "rear-detector") == "rear")
+        self.assertTrue(su.get_correct_combinDet_setting("SANS2D", "FRONT-DETECTOR") == "front")
         self.assertTrue(su.get_correct_combinDet_setting("sAnS2d", "boTH") == "both")
         self.assertTrue(su.get_correct_combinDet_setting("sans2d", "merged") == "merged")
 
     def test_that_for_LOQ_correct_settings_are_selected(self):
+        self.assertTrue(su.get_correct_combinDet_setting("Loq", "main-detector-bank") == "rear")
         self.assertTrue(su.get_correct_combinDet_setting("Loq", "main") == "rear")
         self.assertTrue(su.get_correct_combinDet_setting("LOQ", "Hab") == "front")
         self.assertTrue(su.get_correct_combinDet_setting("lOQ", "boTH") == "both")