From 0ae726e1abeac41433ee9bf24d7df923ad2e4268 Mon Sep 17 00:00:00 2001
From: Marina Ganeva <m.ganeva@fz-juelich.de>
Date: Thu, 8 Oct 2015 11:32:17 +0200
Subject: [PATCH] Algorithm to merge TOFTOF runs.

---
 .../plugins/algorithms/TOFTOFMergeRuns.py     | 190 ++++++++++++++++++
 .../plugins/algorithms/mlzutils.py            |  66 +++++-
 .../python/plugins/algorithms/CMakeLists.txt  |   1 +
 .../plugins/algorithms/TOFTOFMergeRunsTest.py |  93 +++++++++
 docs/source/algorithms/TOFTOFMergeRuns-v1.rst | 150 ++++++++++++++
 5 files changed, 495 insertions(+), 5 deletions(-)
 create mode 100644 Framework/PythonInterface/plugins/algorithms/TOFTOFMergeRuns.py
 create mode 100644 Framework/PythonInterface/test/python/plugins/algorithms/TOFTOFMergeRunsTest.py
 create mode 100644 docs/source/algorithms/TOFTOFMergeRuns-v1.rst

diff --git a/Framework/PythonInterface/plugins/algorithms/TOFTOFMergeRuns.py b/Framework/PythonInterface/plugins/algorithms/TOFTOFMergeRuns.py
new file mode 100644
index 00000000000..8a78f121f11
--- /dev/null
+++ b/Framework/PythonInterface/plugins/algorithms/TOFTOFMergeRuns.py
@@ -0,0 +1,190 @@
+from mantid.kernel import Direction, StringArrayProperty, StringArrayLengthValidator
+from mantid.api import PythonAlgorithm, AlgorithmFactory, WorkspaceProperty, WorkspaceGroup
+import mantid.simpleapi as api
+import numpy as np
+from dateutil.parser import parse
+import mlzutils
+
+
+class TOFTOFMergeRuns(PythonAlgorithm):
+    """ Clean the Sample Logs of workspace after merging for TOFTOF instrument
+    """
+
+    mandatory_properties = ['channel_width', 'chopper_ratio', 'chopper_speed', 'Ei', 'wavelength', 'full_channels', 'EPP']
+    optional_properties = ['temperature', 'run_title']
+    properties_to_merge = ['temperature', 'monitor_counts', 'duration', 'run_number', 'run_start', 'run_end']
+    must_have_properties = ['monitor_counts', 'duration', 'run_number', 'run_start', 'run_end']
+
+    def __init__(self):
+        """
+        Init
+        """
+        PythonAlgorithm.__init__(self)
+        self.wsNames = []
+
+    def category(self):
+        """ Return category
+        """
+        return "PythonAlgorithms\\MLZ\\TOFTOF;Utility"
+
+    def name(self):
+        """ Return summary
+        """
+        return "TOFTOFMergeRuns"
+
+    def summary(self):
+        return "Merge runs and the sample logs."
+
+    def PyInit(self):
+        """ Declare properties
+        """
+        validator = StringArrayLengthValidator()
+        validator.setLengthMin(1)
+        self.declareProperty(StringArrayProperty(name="InputWorkspaces", direction=Direction.Input, validator=validator),
+                             doc="Comma separated list of workspaces or groups of workspaces.")
+        self.declareProperty(WorkspaceProperty("OutputWorkspace", "", direction=Direction.Output),
+                             doc="Name of the workspace that will contain the merged workspaces.")
+        return
+
+    def _validate_input(self):
+        """
+        Checks for the valid input:
+            all given workspaces and/or groups must exist
+            gets names of the grouped workspaces
+        """
+        workspaces = self.getProperty("InputWorkspaces").value
+        mlzutils.ws_exist(workspaces, self.log())
+        if len(workspaces) < 1:
+            message = "List of workspaces is empty. Nothing to merge."
+            self.log().error(message)
+            raise RuntimeError(message)
+        for wsname in workspaces:
+            wks = api.AnalysisDataService.retrieve(wsname)
+            if isinstance(wks, WorkspaceGroup):
+                self.wsNames.extend(wks.getNames())
+            else:
+                self.wsNames.append(wsname)
+
+    def _can_merge(self):
+        """
+        Checks whether given workspaces can be merged
+        """
+        # mandatory properties must be identical
+        mlzutils.compare_mandatory(self.wsNames, self.mandatory_properties, self.log())
+
+        # timing (x-axis binning) must match
+        # is it possible to use WorkspaceHelpers::matchingBins from python?
+        self.timingsMatch(self.wsNames)
+
+        # Check sample logs for must have properties
+        for wsname in self.wsNames:
+            wks = api.AnalysisDataService.retrieve(wsname)
+            run = wks.getRun()
+            for prop in self.must_have_properties:
+                if not run.hasProperty(prop):
+                    message = "Error: Workspace " + wsname + " does not have property " + prop +\
+                        ". Cannot merge."
+                    self.log().error(message)
+                    raise RuntimeError(message)
+
+        # warnig if optional properties are not identical must be given
+        ws1 = api.AnalysisDataService.retrieve(self.wsNames[0])
+        run1 = ws1.getRun()
+        for wsname in self.wsNames[1:]:
+            wks = api.AnalysisDataService.retrieve(wsname)
+            run = wks.getRun()
+            mlzutils.compare_properties(run1, run, self.optional_properties, self.log(), tolerance=0.01)
+        return True
+
+    def PyExec(self):
+        """ Main execution body
+        """
+        # get list of input workspaces
+        self._validate_input()
+        workspaceCount = len(self.wsNames)
+        self.log().information("Workspaces to merge " + str(workspaceCount))
+        wsOutput = self.getPropertyValue("OutputWorkspace")
+
+        if workspaceCount < 2:
+            api.CloneWorkspace(InputWorkspace=self.wsNames[0], OutputWorkspace=wsOutput)
+            self.log().warning("Cannot merge one workspace. Clone is produced.")
+            return
+
+        # check whether given workspaces can be merged
+        self._can_merge()
+
+        # delete output workspace if it exists
+        if api.mtd.doesExist(wsOutput):
+            api.DeleteWorkspace(Workspace=wsOutput)
+
+        #  Merge runs
+        api.MergeRuns(InputWorkspaces=self.wsNames, OutputWorkspace=wsOutput)
+
+        # Merge logs
+        # MergeRuns by default copies all logs from the first workspace
+        pdict = {}
+        for prop in self.properties_to_merge:
+            pdict[prop] = []
+
+        for wsname in self.wsNames:
+            wks = api.AnalysisDataService.retrieve(wsname)
+            run = wks.getRun()
+            for prop in self.properties_to_merge:
+                if run.hasProperty(prop):
+                    pdict[prop].append(run.getProperty(prop).value)
+
+        # take average for temperatures
+        nentries = len(pdict['temperature'])
+        if nentries > 0:
+            temps = [float(temp) for temp in pdict['temperature']]
+            tmean = sum(temps)/nentries
+            api.AddSampleLog(Workspace=wsOutput, LogName='temperature', LogText=str(tmean),
+                             LogType='Number', LogUnit='K')
+        # sum monitor counts
+        mcounts = [int(mco) for mco in pdict['monitor_counts']]
+        # check for zero monitor counts
+        zeros = np.where(np.array(mcounts) == 0)[0]
+        if len(zeros) > 0:
+            for index in zeros:
+                self.log().warning("Workspace " + self.wsNames[index] + " has zero monitor counts.")
+        # create sample log
+        api.AddSampleLog(Workspace=wsOutput, LogName='monitor_counts', LogText=str(sum(mcounts)),
+                         LogType='Number')
+        # sum durations
+        durations = [int(dur) for dur in pdict['duration']]
+        api.AddSampleLog(Workspace=wsOutput, LogName='duration', LogText=str(sum(durations)),
+                         LogType='Number', LogUnit='s')
+        # get minimal run_start
+        fmt = "%Y-%m-%dT%H:%M:%S%z"
+        run_start = [parse(entry) for entry in pdict['run_start']]
+        api.AddSampleLog(Workspace=wsOutput, LogName='run_start',
+                         LogText=min(run_start).strftime(fmt), LogType='String')
+        # get maximal run_end
+        run_end = [parse(entry) for entry in pdict['run_end']]
+        api.AddSampleLog(Workspace=wsOutput, LogName='run_end',
+                         LogText=max(run_end).strftime(fmt), LogType='String')
+        # list of run_numbers
+        api.AddSampleLog(Workspace=wsOutput, LogName='run_number',
+                         LogText=str(pdict['run_number']), LogType='String')
+
+        self.setProperty("OutputWorkspace", wsOutput)
+
+    def timingsMatch(self, wsNames):
+        """
+        :param wsNames:
+        :return:
+        """
+        for i in range(len(wsNames)):
+            leftWorkspace = wsNames[i]
+            rightWorkspace = wsNames[i+1]
+            leftXData = api.mtd[leftWorkspace].dataX(0)
+            rightXData = api.mtd[rightWorkspace].dataX(0)
+            leftDeltaX = leftXData[0] - leftXData[1]
+            rightDeltaX = rightXData[0] - rightXData[1]
+            if abs(leftDeltaX - rightDeltaX) >= 1e-4 or abs(rightXData[0] - leftXData[0]) >= 1e-4:
+                raise RuntimeError("Timings don't match")
+            else:
+                return True
+
+# Register algorithm with Mantid.
+AlgorithmFactory.subscribe(TOFTOFMergeRuns)
diff --git a/Framework/PythonInterface/plugins/algorithms/mlzutils.py b/Framework/PythonInterface/plugins/algorithms/mlzutils.py
index 11929ab069a..3b69302522b 100644
--- a/Framework/PythonInterface/plugins/algorithms/mlzutils.py
+++ b/Framework/PythonInterface/plugins/algorithms/mlzutils.py
@@ -55,7 +55,7 @@ def ws_exist(wslist, logger):
     return True
 
 
-def compare_properties(lhs_run, rhs_run, plist, logger):
+def compare_properties(lhs_run, rhs_run, plist, logger, tolerance=5e-3):
     """
     checks whether properties match in the given runs, produces warnings
         @param lhs_run Left-hand-side run
@@ -65,11 +65,16 @@ def compare_properties(lhs_run, rhs_run, plist, logger):
     """
     lhs_title = ""
     rhs_title = ""
-    if lhs_run.hasProperty('run_title'):
+    if lhs_run.hasProperty('run_title') and rhs_run.hasProperty('run_title'):
         lhs_title = lhs_run.getProperty('run_title').value
-    if rhs_run.hasProperty('run_title'):
         rhs_title = rhs_run.getProperty('run_title').value
 
+    # for TOFTOF run_titles can be identical
+    if lhs_title == rhs_title:
+        if lhs_run.hasProperty('run_number') and rhs_run.hasProperty('run_number'):
+            lhs_title = str(lhs_run.getProperty('run_number').value)
+            rhs_title = str(rhs_run.getProperty('run_number').value)
+
     for property_name in plist:
         if lhs_run.hasProperty(property_name) and rhs_run.hasProperty(property_name):
             lhs_property = lhs_run.getProperty(property_name)
@@ -81,8 +86,8 @@ def compare_properties(lhs_run, rhs_run, plist, logger):
                             lhs_title + ": " + lhs_property.value + ", but " + \
                             rhs_title + ": " + rhs_property.value
                         logger.warning(message)
-                if lhs_property.type == 'number':
-                    if abs(lhs_property.value - rhs_property.value) > 5e-3:
+                elif lhs_property.type == 'number':
+                    if abs(lhs_property.value - rhs_property.value) > tolerance:
                         message = "Property " + property_name + " does not match! " + \
                             lhs_title + ": " + str(lhs_property.value) + ", but " + \
                             rhs_title + ": " + str(rhs_property.value)
@@ -98,3 +103,54 @@ def compare_properties(lhs_run, rhs_run, plist, logger):
                 lhs_title + " or " + rhs_title + " - skipping comparison."
             logger.warning(message)
     return
+
+
+def compare_mandatory(wslist, plist, logger, tolerance=0.01):
+    """
+    Compares properties which are required to be the same.
+    Produces error message and throws exception if difference is observed
+    or if one of the sample logs is not found.
+    Important: exits after the first difference is observed. No further check is performed.
+        @param wslist  List of workspaces
+        @param plist   List of properties to compare
+        @param logger  Logger self.log()
+        @param tolerance  Tolerance for comparison of the double values.
+    """
+    # retrieve the workspaces, form dictionary {wsname: run}
+    runs = {}
+    for wsname in wslist:
+        wks = api.AnalysisDataService.retrieve(wsname)
+        runs[wsname] = wks.getRun()
+
+    for prop in plist:
+        properties = []
+        for wsname in wslist:
+            run = runs[wsname]
+            if not run.hasProperty(prop):
+                message = "Workspace " + wsname + " does not have sample log " + prop
+                logger.error(message)
+                raise RuntimeError(message)
+
+            curprop = run.getProperty(prop)
+            if curprop.type == 'string':
+                properties.append(curprop.value)
+            elif curprop.type == 'number':
+                properties.append(int(curprop.value/tolerance))
+            else:
+                message = "Unknown type " + str(curprop.type) + " for the sample log " +\
+                    prop + " in the workspace " + wsname
+                logger.error(message)
+                raise RuntimeError(message)
+        # this should never happen, but lets check
+        nprop = len(properties)
+        if nprop != len(wslist):
+            message = "Error. Number of properties " + str(nprop) + " for property " + prop +\
+                " is not equal to number of workspaces " + str(len(wslist))
+            logger.error(message)
+            raise RuntimeError(message)
+        pvalue = properties[0]
+        if properties.count(pvalue) != nprop:
+            message = "Sample log " + prop + " is not identical in the given list of workspaces. \n" +\
+                "Workspaces: " + ", ".join(wslist) + "\n Values: " + str(properties)
+            logger.error(message)
+            raise RuntimeError(message)
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt b/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
index bb68b040a2e..c12c040cddb 100644
--- a/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/CMakeLists.txt
@@ -75,6 +75,7 @@ set ( TEST_PY_FILES
   UpdatePeakParameterTableValueTest.py
   SANSSubtractTest.py
   TimeSliceTest.py
+  TOFTOFMergeRunsTest.py
   TOSCABankCorrectionTest.py
   TransformToIqtTest.py
   ExportSampleLogsToCSVFileTest.py
diff --git a/Framework/PythonInterface/test/python/plugins/algorithms/TOFTOFMergeRunsTest.py b/Framework/PythonInterface/test/python/plugins/algorithms/TOFTOFMergeRunsTest.py
new file mode 100644
index 00000000000..3e0b4dac93a
--- /dev/null
+++ b/Framework/PythonInterface/test/python/plugins/algorithms/TOFTOFMergeRunsTest.py
@@ -0,0 +1,93 @@
+import unittest
+from mantid.simpleapi import Load, DeleteWorkspace, AddSampleLogMultiple, \
+    DeleteLog
+from testhelpers import run_algorithm
+from mantid.api import AnalysisDataService
+
+
+class TOFTOFMergeRunsTest(unittest.TestCase):
+
+    def setUp(self):
+        input_ws = Load(Filename="TOFTOFTestdata.nxs")
+        self._input_ws_base = input_ws
+        self._input_good = input_ws
+        AddSampleLogMultiple(Workspace=self._input_good, LogNames=['run_number'], LogValues=[001])
+
+        self._input_bad_entry = input_ws+0
+        # remove a compulsory entry in Logs
+        DeleteLog(self._input_bad_entry, 'duration')
+
+        self._input_bad_value = input_ws+0
+        AddSampleLogMultiple(Workspace=self._input_bad_value, LogNames=['wavelength'], LogValues=[0.])
+
+    def test_success(self):
+        OutputWorkspaceName = "output_ws"
+        Inputws = "%s, %s" % (self._input_ws_base.name(), self._input_good.name())
+
+        alg_test = run_algorithm("TOFTOFMergeRuns",
+                                 InputWorkspaces=Inputws,
+                                 OutputWorkspace=OutputWorkspaceName)
+        self.assertTrue(alg_test.isExecuted())
+
+        wsoutput = AnalysisDataService.retrieve(OutputWorkspaceName)
+
+        run_out = wsoutput.getRun()
+        run_in = self._input_ws_base.getRun()
+        self.assertEqual(run_out.getLogData('wavelength').value, run_in.getLogData('wavelength').value)
+        self.assertEqual(run_out.getLogData('chopper_speed').value, run_in.getLogData('chopper_speed').value)
+        self.assertEqual(run_out.getLogData('chopper_ratio').value, run_in.getLogData('chopper_ratio').value)
+        self.assertEqual(run_out.getLogData('channel_width').value, run_in.getLogData('channel_width').value)
+        self.assertEqual(run_out.getLogData('Ei').value, run_in.getLogData('Ei').value)
+        self.assertEqual(run_out.getLogData('EPP').value, run_in.getLogData('EPP').value)
+        self.assertEqual(run_out.getLogData('proposal_number').value, run_in.getLogData('proposal_number').value)
+        self.assertEqual(run_out.getLogData('proposal_title').value, run_in.getLogData('proposal_title').value)
+        self.assertEqual(run_out.getLogData('mode').value, run_in.getLogData('mode').value)
+        self.assertEqual(run_out.getLogData('experiment_team').value, run_in.getLogData('experiment_team').value)
+
+        run_in_good = self._input_good.getRun()
+        self.assertEqual(run_out.getLogData('run_number').value,
+                         str([run_in.getLogData('run_number').value, run_in_good.getLogData('run_number').value]))
+
+        self.assertEqual(run_out.getLogData('temperature').value, float(run_in.getLogData('temperature').value))
+        self.assertEqual(run_out.getLogData('duration').value,
+                         float(run_in.getLogData('duration').value) + float(run_in_good.getLogData('duration').value))
+        self.assertEqual(run_out.getLogData('run_start').value, run_in.getLogData('run_start').value)
+        self.assertEqual(run_out.getLogData('run_end').value, run_in.getLogData('run_end').value)
+        self.assertEqual(run_out.getLogData('full_channels').value, run_in.getLogData('full_channels').value)
+        self.assertEqual(run_out.getLogData('monitor_counts').value, 2*int(run_in.getLogData('monitor_counts').value))
+        # Dimension output workspace
+        self.assertEqual(wsoutput.getNumberHistograms(), self._input_ws_base.getNumberHistograms())
+        self.assertEqual(wsoutput.blocksize(), self._input_ws_base.blocksize())
+        # check instrument
+        self.assertEqual(wsoutput.getInstrument().getName(), "TOFTOF")
+
+        AnalysisDataService.remove("output_ws")
+
+    def test_failed(self):
+        """
+        Failed tests because of missing keys or different values
+        """
+        OutputWorkspaceName = "output_ws"
+        Inputws_badvalue = "%s, %s" % (self._input_ws_base.name(), self._input_bad_value.name())
+        self.assertRaises(RuntimeError,
+                          run_algorithm, 'TOFTOFMergeRuns',
+                          InputWorkspaces=Inputws_badvalue,
+                          OutputWorkspace=OutputWorkspaceName,
+                          rethrow=True)
+
+        Inputws_badentry = "%s, %s" % (self._input_ws_base.name(), self._input_bad_entry.name())
+        self.assertRaises(RuntimeError,
+                          run_algorithm, 'TOFTOFMergeRuns',
+                          InputWorkspaces=Inputws_badentry,
+                          OutputWorkspace=OutputWorkspaceName,
+                          rethrow=True)
+
+        if "output_ws" is not None:
+            AnalysisDataService.remove("output_ws")
+
+    def cleanUp(self):
+        if self._input_ws_base is not None:
+            DeleteWorkspace(self._input_ws_base)
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/docs/source/algorithms/TOFTOFMergeRuns-v1.rst b/docs/source/algorithms/TOFTOFMergeRuns-v1.rst
new file mode 100644
index 00000000000..25aec61d5f1
--- /dev/null
+++ b/docs/source/algorithms/TOFTOFMergeRuns-v1.rst
@@ -0,0 +1,150 @@
+.. algorithm::
+
+.. summary::
+
+.. alias::
+
+.. properties::
+
+Description
+-----------
+
+Merges workspaces from a given list using :ref:`algm-MergeRuns` algorithm. Sample logs are merged in the following way.
+
++---------++-------------------------------+
+| Type of || Parameter                     |
+| merging ||                               |
++=========++===============================+
+| Average || temperature                   |
++---------++-------------------------------+
+| Minimum || run_start                     |
++---------++-------------------------------+
+| Maximum || run_end                       |
++---------++-------------------------------+
+| Summed  || duration, monitor_counts      |
++---------++-------------------------------+
+| Listed  || run_number                    |
++---------++-------------------------------+
+
+Other sample logs are copied from the first workspace.
+
+**Valid input workspaces**
+
+Algorithm accepts both, matrix workspaces and groups of matrix workspaces. Valid input workspaces
+
+- must have following sample logs: *channel_width*, *chopper_ratio*, *chopper_speed*, *Ei*, *wavelength*, *full_channels*, *EPP*, *monitor_counts*, *duration*, *run_number*, *run_start*, *run_end*
+- must have identical following sample logs: *channel_width*, *chopper_ratio*, *chopper_speed*, *Ei*, *wavelength*, *full_channels*, *EPP*. Tolerance for double comparison is 0.01.
+- must have common binning for all its spectra for each input workspace.
+
+If these conditions are not fulfilled, algorithm terminates with an error message.
+
+Sample log *temperature* is optional. If it is present in some of input workspaces, mean value will be calculated. Otherwise, no *temperature* sample log will be created in the output workspace.
+
+Algorithm will produce warning if 
+- *temperature* and *run_title* sample logs are not present or different,
+- some of input workspaces have zero monitor counts.
+
+Usage
+-----
+
+**Example - Merge list of workspaces**
+
+.. testcode:: ExTOFTOFMergeRuns2ws
+
+    ws1 = LoadMLZ(Filename='TOFTOFTestdata.nxs')
+    ws2 = LoadMLZ(Filename='TOFTOFTestdata.nxs')
+
+    # change sample logs for a second workspace, not needed for real workspaces
+    lognames = 'temperature,run_start,run_end,monitor_counts,run_number'
+    logvalues = '296.15,2013-07-28T11:32:19+0053,2013-07-28T12:32:19+0053,145145,TOFTOFTestdata2'
+    AddSampleLogMultiple(ws2, lognames, logvalues)
+
+    # Input = list of workspaces
+    ws3 = TOFTOFMergeRuns('ws1,ws2')
+
+    # Temperature 
+    print "Temperature of experiment for 1st workspace (in K): ", ws1.getRun().getLogData('temperature').value
+    print "Temperature of experiment for 2nd workspace (in K): ", ws2.getRun().getLogData('temperature').value
+    print "Temperature of experiment for merged workspaces = average over workspaces (in K): ",  ws3.getRun().getLogData('temperature').value
+
+    # Duration
+    print "Duration of experiment for 1st workspace (in s): ",  ws1.getRun().getLogData('duration').value
+    print "Duration of experiment for 2nd workspace (in s): ",  ws2.getRun().getLogData('duration').value
+    print "Duration of experiment for merged workspaces = sum of all durations (in s): ",  ws3.getRun().getLogData('duration').value
+
+    # Run start 
+    print "Start of experiment for 1st workspace: ",  ws1.getRun().getLogData('run_start').value
+    print "Start of experiment for 2nd workspace: ",  ws2.getRun().getLogData('run_start').value
+    print "Start of experiment for merged workspaces = miminum of all workspaces: ",  ws3.getRun().getLogData('run_start').value
+
+    # Run end 
+    print "End of experiment for 1st workspace: ",  ws1.getRun().getLogData('run_end').value
+    print "End of experiment for 2nd workspace: ",  ws2.getRun().getLogData('run_end').value
+    print "End of experiment for merged workspaces = maximum of all workspaces: ",  ws3.getRun().getLogData('run_end').value
+    
+    # Run number 
+    print "Run number for 1st workspace: ",  ws1.getRun().getLogData('run_number').value
+    print "Run number for 2nd workspace: ",  ws2.getRun().getLogData('run_number').value
+    print "Run number for merged workspaces = list of all workspaces: ",  ws3.getRun().getLogData('run_number').value      
+ 
+    # Monitor counts
+    print "Monitor counts for 1st workspace: ",  ws1.getRun().getLogData('monitor_counts').value
+    print "Monitor counts for 2nd workspace: ",  ws2.getRun().getLogData('monitor_counts').value
+    print "Monitor counts for merged workspaces = sum over all workspaces: ",  ws3.getRun().getLogData('monitor_counts').value      
+   
+
+Output:
+
+.. testoutput:: ExTOFTOFMergeRuns2ws
+
+    Temperature of experiment for 1st workspace (in K):  294.149414
+    Temperature of experiment for 2nd workspace (in K):  296.15
+    Temperature of experiment for merged workspaces = average over workspaces (in K):  295.149707
+    Duration of experiment for 1st workspace (in s):  3601
+    Duration of experiment for 2nd workspace (in s):  3601
+    Duration of experiment for merged workspaces = sum of all durations (in s):  7202
+    Start of experiment for 1st workspace:  2013-07-28T10:32:19+0053
+    Start of experiment for 2nd workspace:  2013-07-28T11:32:19+0053
+    Start of experiment for merged workspaces = miminum of all workspaces:  2013-07-28T10:32:19+0053
+    End of experiment for 1st workspace:  2013-07-28T11:32:20+0053
+    End of experiment for 2nd workspace:  2013-07-28T12:32:19+0053
+    End of experiment for merged workspaces = maximum of all workspaces:  2013-07-28T12:32:19+0053
+    Run number for 1st workspace:  TOFTOFTestdata
+    Run number for 2nd workspace:  TOFTOFTestdata2
+    Run number for merged workspaces = list of all workspaces:  ['TOFTOFTestdata', 'TOFTOFTestdata2']
+    Monitor counts for 1st workspace:  136935
+    Monitor counts for 2nd workspace:  145145
+    Monitor counts for merged workspaces = sum over all workspaces:  282080
+
+**Example - Merge group of workspaces**
+
+.. testcode:: ExTOFTOFMergeRunsGroup
+
+    ws1 = LoadMLZ(Filename='TOFTOFTestdata.nxs')
+    ws2 = LoadMLZ(Filename='TOFTOFTestdata.nxs')
+
+    # change sample logs for a second workspace, not needed for real workspaces
+    lognames = 'temperature,run_start,run_end,monitor_counts,run_number'
+    logvalues = '296.15,2013-07-28T11:32:19+0053,2013-07-28T12:32:19+0053,145145,TOFTOFTestdata2'
+    AddSampleLogMultiple(ws2, lognames, logvalues)
+
+    group=GroupWorkspaces('ws1,ws2')
+    groupmerged=TOFTOFMergeRuns(group)
+    print "Monitor counts for 1st workspace: ",  ws1.getRun().getLogData('monitor_counts').value
+    print "Monitor counts for 2nd workspace: ",  ws2.getRun().getLogData('monitor_counts').value
+    print "Monitor counts for merged workspaces = sum over all workspaces: ",  groupmerged.getRun().getLogData('monitor_counts').value         
+
+Output:
+
+.. testoutput:: ExTOFTOFMergeRunsGroup
+
+    Monitor counts for 1st workspace:  136935
+    Monitor counts for 2nd workspace:  145145
+    Monitor counts for merged workspaces = sum over all workspaces:  282080
+
+.. categories::
+
+.. sourcelink::
+
+  
+
-- 
GitLab