From 98041ca89fda676a096bd38e1a6e3a20961a52ba Mon Sep 17 00:00:00 2001
From: Martyn Gigg <martyn.gigg@stfc.ac.uk>
Date: Mon, 9 Feb 2015 10:49:15 +0000
Subject: [PATCH] Initial dump of system test supporting framework

Some modules were dropped from the other repository as they were
not used, namely
 - emailreporter.py
 - sqlresultreporter.py
 - StressTests directory.
Fixes for path changes etc will still be required for this to work.
Refs #10870
---
 .../lib/systemtests/algorithm_decorator.py    |   61 +
 .../lib/systemtests/stresstesting.py          | 1043 +++++++++++++++++
 .../lib/systemtests/xmlreporter.py            |   86 ++
 .../SystemTests/scripts/InstallerTests.py     |  127 ++
 .../SystemTests/scripts/mantidinstaller.py    |  242 ++++
 .../scripts/performance/README.txt            |    1 +
 .../scripts/performance/analysis.py           |  699 +++++++++++
 .../scripts/performance/make_report.py        |   84 ++
 .../scripts/performance/reporters.py          |  126 ++
 .../scripts/performance/sqlresults.py         |  327 ++++++
 .../scripts/performance/testresult.py         |  120 ++
 .../scripts/performance/xunit_to_sql.py       |  137 +++
 .../SystemTests/scripts/runSystemTests.py     |  118 ++
 .../tests/analysis/reference/README.md        |    1 +
 14 files changed, 3172 insertions(+)
 create mode 100644 Code/Mantid/Testing/SystemTests/lib/systemtests/algorithm_decorator.py
 create mode 100644 Code/Mantid/Testing/SystemTests/lib/systemtests/stresstesting.py
 create mode 100644 Code/Mantid/Testing/SystemTests/lib/systemtests/xmlreporter.py
 create mode 100644 Code/Mantid/Testing/SystemTests/scripts/InstallerTests.py
 create mode 100644 Code/Mantid/Testing/SystemTests/scripts/mantidinstaller.py
 create mode 100644 Code/Mantid/Testing/SystemTests/scripts/performance/README.txt
 create mode 100644 Code/Mantid/Testing/SystemTests/scripts/performance/analysis.py
 create mode 100755 Code/Mantid/Testing/SystemTests/scripts/performance/make_report.py
 create mode 100644 Code/Mantid/Testing/SystemTests/scripts/performance/reporters.py
 create mode 100644 Code/Mantid/Testing/SystemTests/scripts/performance/sqlresults.py
 create mode 100644 Code/Mantid/Testing/SystemTests/scripts/performance/testresult.py
 create mode 100755 Code/Mantid/Testing/SystemTests/scripts/performance/xunit_to_sql.py
 create mode 100755 Code/Mantid/Testing/SystemTests/scripts/runSystemTests.py
 create mode 100644 Code/Mantid/Testing/SystemTests/tests/analysis/reference/README.md

diff --git a/Code/Mantid/Testing/SystemTests/lib/systemtests/algorithm_decorator.py b/Code/Mantid/Testing/SystemTests/lib/systemtests/algorithm_decorator.py
new file mode 100644
index 00000000000..91c0fd54546
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/lib/systemtests/algorithm_decorator.py
@@ -0,0 +1,61 @@
+import inspect
+import re
+
+def make_decorator(algorithm_to_decorate):
+    """
+    Dynamically create a builder pattern style decorator around a Mantid algorithm.
+    This allows you to separate out setting algorithm parameters from the actual method execution. Parameters may be reset multiple times.
+    
+    Usage:
+     rebin = make_decorator(Rebin)
+     rebin.set_Params([0, 0.1, 1])
+     ....
+     rebin.execute()
+    
+    Arguments:
+     algorithm_to_decorate: The mantid.simpleapi algorithm to decorate.
+     
+     
+    
+    """
+    
+    class Decorator(object):
+        
+        def __init__(self, alg_subject):
+            self.__alg_subject = alg_subject
+            self.__parameters__ = dict()
+        
+        def execute(self, additional=None, verbose=False):
+            if verbose:
+                print "Algorithm Parameters:"
+                print self.__parameters__
+                print 
+            out = self.__alg_subject(**self.__parameters__)
+            return out
+        
+        def set_additional(self, additional):
+            self.__parameters__.update(**additional)
+
+    def add_getter_setter(type, name):
+        
+        def setter(self, x):
+            self.__parameters__[name] = x
+            
+        def getter(self):
+            return self.__parameters__[name]
+            
+        setattr(type, "set_" + name, setter)
+        setattr(type, "get_" + name, getter)
+
+
+    argspec = inspect.getargspec(algorithm_to_decorate)
+    for parameter in argspec.varargs.split(','):
+        m = re.search('(^\w+)', parameter) # Take the parameter key part from the defaults given as 'key=value'
+        if m:
+            parameter = m.group(0).strip()
+        m = re.search('\w+$', parameter) # strip off any leading numerical values produced by argspec
+        if m:
+            parameter = m.group(0).strip()
+        add_getter_setter(Decorator, m.group(0).strip())
+
+    return Decorator(algorithm_to_decorate) 
diff --git a/Code/Mantid/Testing/SystemTests/lib/systemtests/stresstesting.py b/Code/Mantid/Testing/SystemTests/lib/systemtests/stresstesting.py
new file mode 100644
index 00000000000..7527065812e
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/lib/systemtests/stresstesting.py
@@ -0,0 +1,1043 @@
+'''
+Mantid stress testing framework. This module contains all of the necessary code
+to run sets of stress tests on the Mantid framework by executing scripts directly
+or by importing them into MantidPlot.
+
+Copyright &copy; 2009 STFC Rutherford Appleton Laboratories
+
+This file is part of Mantid.
+
+Mantid is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+Mantid is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+File change history is stored at: <https://github.com/mantidproject/systemtests>.
+'''
+
+import sys
+import os
+import types
+import re
+import time
+import datetime
+import platform
+import subprocess
+import tempfile
+import imp
+import inspect
+import abc
+import numpy
+import unittest
+
+#########################################################################
+# The base test class.
+#########################################################################
+class MantidStressTest(unittest.TestCase):
+    '''Defines a base class for stress tests, providing functions
+    that should be overridden by inheriting classes to perform tests.
+    '''
+
+    # Define a delimiter when reporting results
+    DELIMITER = '|'
+    
+    # Define a prefix for reporting results
+    PREFIX = 'RESULT'
+
+    def __init__(self):
+        super(MantidStressTest, self).__init__()
+        # A list of things not to check when validating
+        self.disableChecking = []
+        # Whether or not to strip off whitespace when doing simple ascii diff
+        self.stripWhitespace = True
+        # Tolerance
+        self.tolerance = 0.00000001
+        # Store the resident memory of the system (in MB) before starting the test
+        import mantid.api
+        mantid.api.FrameworkManager.clear()
+        from mantid.kernel import MemoryStats
+        self.memory = MemoryStats().residentMem()/1024
+    
+    def runTest(self):
+        raise NotImplementedError('"runTest(self)" should be overridden in a derived class')
+    
+    def skipTests(self):
+        '''
+        Override this to return True when the tests should be skipped for some
+        reason. 
+        See also: requiredFiles() and requiredMemoryMB()
+        '''
+        return False
+
+    def validate(self):
+        '''
+        Override this to provide a pair of workspaces which should be checked for equality
+        by the doValidation method.
+        The overriding method should return a pair of strings. This could be two workspace
+        names, e.g. return 'workspace1','workspace2', or a workspace name and a nexus
+        filename (which must have nxs suffix), e.g. return 'workspace1','GEM00001.nxs'.
+        '''
+        return None
+
+    def requiredFiles(self):
+        '''
+        Override this method if you want to require files for the test.
+        Return a list of files.
+        '''
+        return []
+    
+    def requiredMemoryMB(self):
+        '''
+        Override this method to specify the amount of free memory,
+        in megabytes, that is required to run the test.
+        The test is skipped if there is not enough memory.
+        '''
+        return 0
+
+    def validateMethod(self):
+        '''
+        Override this to specify which validation method to use. Look at the validate* methods to 
+        see what allowed values are.
+        '''
+        return "WorkspaceToNeXus"
+
+    def maxIterations(self):
+        '''Override this to perform more than 1 iteration of the implemented test.'''
+        return 1
+
+    def reportResult(self, name, value):
+        '''
+        Send a result to be stored as a name,value pair
+        '''
+        print self.PREFIX + self.DELIMITER + name + self.DELIMITER + str(value) + '\n',
+        
+    def __verifyRequiredFile(self, filename):
+        '''Return True if the specified file name is findable by Mantid.'''
+        from mantid.api import FileFinder
+
+        # simple way is just getFullPath which never uses archive search
+        if os.path.exists(FileFinder.getFullPath(filename)):
+            return True
+
+        # try full findRuns which will use archive search if it is turned on
+        try:
+            candidates = FileFinder.findRuns(filename)
+            for item in candidates:
+                if os.path.exists(item):
+                    return True
+        except RuntimeError, e:
+            return False
+                
+
+        # file was not found
+        return False
+
+    def __verifyRequiredFiles(self):
+        # first see if there is anything to do
+        reqFiles = self.requiredFiles()
+        if len(reqFiles) <= 0:
+            return
+
+        # by default everything is ok
+        foundAll = True
+
+        # initialize mantid so it can get the data directories to look in
+        import mantid
+        # check that all of the files exist
+        for filename in reqFiles:
+            if not self.__verifyRequiredFile(filename):
+                print "Missing required file: '%s'" % filename
+                foundAll = False
+
+        if not foundAll:
+            sys.exit(PythonTestRunner.SKIP_TEST)
+            
+    def __verifyMemory(self):
+        """ Do we need to skip due to lack of memory? """
+        required = self.requiredMemoryMB()
+        if required <= 0:
+            return
+        
+        # Check if memory is available
+        from mantid.kernel import MemoryStats
+        MB_avail = MemoryStats().availMem()/(1024.)
+        if (MB_avail < required):
+            print "Insufficient memory available to run test! %g MB available, need %g MB." % (MB_avail,required)
+            sys.exit(PythonTestRunner.SKIP_TEST)
+
+    def execute(self):
+        '''
+        Run the defined number of iterations of this test
+        '''
+        # Do we need to skip due to missing files?
+        self.__verifyRequiredFiles()
+        
+        self.__verifyMemory()
+        
+        # A custom check for skipping the tests for other reasons
+        if self.skipTests():
+            sys.exit(PythonTestRunner.SKIP_TEST)
+
+        # Start timer
+        start = time.time()
+        countmax = self.maxIterations() + 1
+        for i in range(1, countmax):
+            istart = time.time()
+            self.runTest()
+            delta_t = time.time() - istart
+            self.reportResult('iteration time_taken', str(i) + ' %.2f' % delta_t)
+        delta_t = float(time.time() - start)
+        # Finish
+        #self.reportResult('time_taken', '%.2f' % delta_t)
+        
+    def __prepASCIIFile(self, filename):
+        """
+        Prepare an ascii file for comparison using difflib.
+        """
+        handle = open(filename, mode='r')
+        stuff = handle.readlines()
+        if self.stripWhitespace:
+            stuff = [line.strip() for line in stuff]
+        handle.close()
+        return stuff
+
+    def validateASCII(self):
+        """
+        Validate ASCII files using difflib.
+        """
+        (measured, expected) = self.validate()
+        measured = self.__prepASCIIFile(measured)
+        expected = self.__prepASCIIFile(expected)
+
+        # calculate the difference
+        import difflib
+        diff = difflib.Differ().compare(measured, expected)
+        result = []
+        for line in diff:
+            if line.startswith('+') or line.startswith('-') or line.startswith('?'):
+                result.append(line)
+
+        # print the difference
+        if len(result) > 0:
+            if self.stripWhitespace:
+                msg = "(whitespace striped from ends)"
+            else:
+                msg = ""
+            print "******************* Difference in files", msg
+            print "\n".join(result)
+            print "*******************"
+            return False
+        else:
+            return True
+
+    def validateWorkspaceToNeXus(self):
+        '''
+        Assumes the second item from self.validate() is a nexus file and loads it 
+        to compare to the supplied workspace.
+        '''
+        valNames = list(self.validate())
+        from mantid.simpleapi import Load
+        numRezToCheck=len(valNames)
+        mismatchName=None;
+
+        validationResult =True;
+        for ik in range(0,numRezToCheck,2): # check All results
+            workspace2 = valNames[ik+1]
+            if workspace2.endswith('.nxs'):
+                Load(Filename=workspace2,OutputWorkspace="RefFile")
+                workspace2 = "RefFile"
+            else:
+                raise RuntimeError("Should supply a NeXus file: %s" % workspace2)
+            valPair=(valNames[ik],"RefFile");
+            if numRezToCheck>2:
+                mismatchName = valNames[ik];
+
+            if not(self.validateWorkspaces(valPair,mismatchName)):
+                validationResult = False;
+                print 'Workspace {0} not equal to its reference file'.format(valNames[ik]);
+        #end check All results
+
+        return validationResult;
+
+    def validateWorkspaceToWorkspace(self):
+        '''
+        Assumes the second item from self.validate() is an existing workspace
+        to compare to the supplied workspace.
+        '''
+        valNames = list(self.validate())
+        return self.validateWorkspaces(valNames)
+
+    def validateWorkspaces(self, valNames=None,mismatchName=None):
+        '''
+        Performs a check that two workspaces are equal using the CheckWorkspacesMatch
+        algorithm. Loads one workspace from a nexus file if appropriate.
+        Returns true if: the workspaces match 
+                      OR the validate method has not been overridden.
+        Returns false if the workspace do not match. The reason will be in the log.
+        '''
+        if valNames is None:
+            valNames = self.validate()
+
+        from mantid.simpleapi import SaveNexus, AlgorithmManager
+        checker = AlgorithmManager.create("CheckWorkspacesMatch")
+        checker.setLogging(True)
+        checker.setPropertyValue("Workspace1",valNames[0])
+        checker.setPropertyValue("Workspace2",valNames[1])
+        checker.setPropertyValue("Tolerance", str(self.tolerance))
+        if hasattr(self,'tolerance_is_reller') and self.tolerance_is_reller:
+           checker.setPropertyValue("ToleranceRelerr", "1")
+        for d in self.disableChecking:
+            checker.setPropertyValue("Check"+d,"0")
+        checker.execute()
+        if checker.getPropertyValue("Result") != 'Success!':
+            print self.__class__.__name__
+            if mismatchName:
+                SaveNexus(InputWorkspace=valNames[0],Filename=self.__class__.__name__+mismatchName+'-mismatch.nxs')
+            else:
+                SaveNexus(InputWorkspace=valNames[0],Filename=self.__class__.__name__+'-mismatch.nxs')
+            return False
+                    
+        return True
+
+    def doValidation(self):
+        """
+        Perform validation. This selects which validation method to use by the result 
+        of validateMethod() and validate(). If validate() is not overridden this will
+        return True.
+        """
+        # if no validation is specified then it must be ok
+        validation = self.validate()
+        if validation is None:
+            return True
+        
+        # if a simple boolean then use this
+        if type(validation) == bool:
+            return validation
+        # or numpy boolean
+        if type(validation) == numpy.bool_:
+            return bool(validation)
+
+        # switch based on validation methods
+        method = self.validateMethod()
+        if method is None:
+            return True # don't validate
+        method = method.lower()
+        if "validateworkspacetonexus".endswith(method):
+            return self.validateWorkspaceToNeXus()
+        elif "validateworkspacetoworkspace".endswith(method):
+            return self.validateWorkspaceToWorkspace()
+        elif "validateascii".endswith(method):
+            return self.validateASCII()
+        else:
+            raise RuntimeError("invalid validation method '%s'" % self.validateMethod())
+    
+    def returnValidationCode(self,code):
+        """
+        Calls doValidation() and returns 0 in success and code if failed. This will be
+        used as return code from the calling python subprocess
+        """
+        if self.doValidation():
+            retcode = 0
+        else:
+            retcode = code
+        if retcode == 0:
+            self._success = True
+        else:
+            self._success = False
+        # Now the validation is complete we can clear out all the stored data and check memory usage
+        import mantid.api
+        mantid.api.FrameworkManager.clear()
+        # Get the resident memory again and work out how much it's gone up by (in MB)
+        from mantid.kernel import MemoryStats
+        memorySwallowed = MemoryStats().residentMem()/1024 - self.memory
+        # Store the result
+        self.reportResult('memory footprint increase', memorySwallowed )
+        return retcode
+
+    def succeeded(self):
+        """
+        Returns true if the test has been run and it succeeded, false otherwise
+        """
+        if hasattr(self, '_success'):
+            return self._success
+        else:
+            return False
+
+    def cleanup(self):
+        '''
+        This function is called after a test has completed and can be used to
+        clean up, i.e. remove workspaces etc
+        '''
+        pass
+    
+   
+    def assertDelta(self, value, expected, delta, msg=""):
+        """
+        Check that a value is within +- delta of the expected value
+        """
+        # Build the error message
+        if msg != "": msg += " "
+        msg += "Expected %g == %g within +- %g." % (value, expected, delta)
+        
+        if (value > expected+delta) or  (value < expected-delta):
+            raise Exception(msg)
+    
+    def assertLessThan(self, value, expected, msg=""):
+        """
+        Check that a value is < expected.
+        """
+        # Build the error message
+        if msg != "": msg += " "
+        msg += "Expected %g < %g " % (value, expected)
+        
+        if (value >= expected):
+            raise Exception(msg)
+    
+    def assertGreaterThan(self, value, expected, msg=""):
+        """
+        Check that a value is > expected.
+        """
+        # Build the error message
+        if msg != "": msg += " "
+        msg += "Expected %g > %g " % (value, expected)
+        
+        if (value <= expected):
+            raise Exception(msg)
+            
+    
+#########################################################################
+# A class to store the results of a test 
+#########################################################################
+class TestResult(object):
+    '''
+    Stores the results of each test so that they can be reported later.
+    '''
+    
+    def __init__(self):
+        self._results = []
+        self.name = ''
+        self.filename = ''
+        self.date = ''
+        self.status = ''
+        self.time_taken = ''
+        self.total_time = ''
+        self.output = ''
+        self.err = ''
+    
+    def addItem(self, item):
+        '''
+        Add an item to the store, this should be a list containing 2 entries: [Name, Value]
+        '''
+        self._results.append(item)
+        
+    def resultLogs(self):
+        '''
+        Get the map storing the results
+        '''
+        return self._results
+
+#########################################################################
+# A base class to support report results in an appropriate manner
+#########################################################################
+class ResultReporter(object):
+    '''
+    A base class for results reporting. In order to get the results in an
+    appropriate form, subclass this class and implement the dispatchResults 
+    method.
+    '''
+
+    def __init__(self):
+        '''Initialize a class instance, e.g. connect to a database'''
+        pass
+
+    def dispatchResults(self, result):
+        raise NotImplementedError('"dispatchResults(self, result)" should be overridden in a derived class')
+
+#########################################################################
+# A class to report results as formatted text output
+#########################################################################
+class TextResultReporter(ResultReporter):
+    '''
+    Report the results of a test using standard out
+    '''
+    
+    def dispatchResults(self, result):
+        '''
+        Print the results to standard out
+        '''
+        nstars = 30
+        print '*' * nstars
+        for t in result.resultLogs():
+            print '\t' + str(t[0]).ljust(15) + '->  ', str(t[1])
+        print '*' * nstars
+
+#########################################################################
+# A class to report results as junit xml
+#########################################################################
+from xmlreporter import XmlResultReporter
+
+#########################################################################
+# A class to report results via email
+#########################################################################
+from emailreporter import EmailResultReporter
+
+#########################################################################
+# A base class for a TestRunner
+#########################################################################
+class PythonTestRunner(object):
+    '''
+    A base class to serve as a wrapper to actually run the tests in a specific 
+    environment, i.e. console, gui
+    '''
+    SUCCESS_CODE = 0
+    GENERIC_FAIL_CODE = 1
+    SEGFAULT_CODE = 139
+    VALIDATION_FAIL_CODE = 99
+    NOT_A_TEST = 98
+    SKIP_TEST = 97
+
+    def __init__(self, need_escaping = False):
+        self._mtdpy_header = ''
+        self._test_dir = ''
+        # Get the path that this module resides in so that the tests know about it
+        self._framework_path = ''
+        for p in sys.path:
+            if 'Framework' in p:
+                self._framework_path =  os.path.abspath(p).replace('\\','/')
+        # A string to prefix the code with
+        self._code_prefix = ''
+        self._using_escape = need_escaping
+
+    def commandString(self, pycode):
+        '''
+        Return the appropriate command to pass to subprocess.Popen
+        '''
+        raise NotImplementedError('"commandString(self)" should be overridden in a derived class')
+
+    def setMantidDir(self, mtdheader_dir):
+        # Store the path to MantidPythonAPI
+        self._mtdpy_header = os.path.abspath(mtdheader_dir).replace('\\','/')
+
+    def setTestDir(self, test_dir):
+        self._test_dir = os.path.abspath(test_dir).replace('\\','/')
+
+    def createCodePrefix(self):
+        if self._using_escape == True:
+            esc = '\\'
+        else:
+            esc = ''
+
+        self._code_prefix = 'import sys, time;'
+        self._code_prefix += 'sys.path.insert(0, ' + esc + '"' + self._mtdpy_header + esc + '");' + \
+        'sys.path.append(' + esc + '"' + self._framework_path + esc + '");' + \
+        'sys.path.append(' + esc + '"' + self._test_dir + esc + '");'
+
+    def getCodePrefix(self):
+        '''
+        Return a prefix to the code that will be executed
+        '''
+        return self._code_prefix
+
+    def spawnSubProcess(self, cmd):
+        '''
+        Spawn a new process and run the given command within it
+        '''
+
+        proc = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize=-1)
+        std_out = ""
+        std_err = ""
+        for line in proc.stdout:
+            print line,
+            std_out += line
+        proc.wait()
+
+        return proc.returncode, std_out, std_err 
+    
+    def start(self, pycode):
+        '''
+        Run the given test code in a new subprocess
+        '''
+        raise NotImplementedError('"run(self, pycode)" should be overridden in a derived class')
+    
+#########################################################################
+# A runner class to execute the tests on using the command line interface
+#########################################################################
+class PythonConsoleRunner(PythonTestRunner):
+    '''
+    This class executes tests within a Mantid environment inside a standalone python
+    interpreter
+    '''
+    
+    def __init__(self):
+        PythonTestRunner.__init__(self, True)
+
+    def start(self, pycode):
+        '''
+        Run the code in a new instance of a python interpreter
+        '''
+        return self.spawnSubProcess(sys.executable + ' -c \"' + self.getCodePrefix() + pycode + '\"')
+
+#########################################################################
+# A runner class to execute the tests on using the command line interface
+#########################################################################
+class MantidPlotTestRunner(PythonTestRunner):
+    '''
+    This class executes tests within the Python scripting environment inside 
+    MantidPlot
+    '''
+    
+    def __init__(self, mtdplot_dir):
+        PythonTestRunner.__init__(self)
+        mtdplot_bin = mtdplot_dir + '/MantidPlot'
+        if os.name == 'nt':
+            mtdplot_bin += '.exe'
+        self._mtdplot_bin = os.path.abspath(mtdplot_bin).replace('\\','/')
+        
+    def start(self, pycode):
+        '''
+        Run the code in a new instance of the MantidPlot scripting environment
+        '''
+        # The code needs wrapping in a temporary file so that it can be passed
+        # to MantidPlot, along with the redirection of the scripting output to
+        # stdout
+        # On Windows, just using the file given back by tempfile doesn't work
+        # as the name is mangled to a short version where all characters after 
+        # a space are replace by ~. So on windows use put the file in the 
+        # current directory
+        if os.name == 'nt':
+            loc = '.'
+        else:
+            loc = ''
+        # MG 11/09/2009: I tried the simple tempfile.NamedTemporaryFile() method
+        # but this didn't work on Windows so I had to be a little long winded
+        # about it
+        fd, tmpfilepath = tempfile.mkstemp(suffix = '.py', dir = loc, text=True)
+
+        os.write(fd, 'import sys\nsys.stdout = sys.__stdout__\n' + self.getCodePrefix() + pycode)
+        retcode, output, err = self.spawnSubProcess('"' +self._mtdplot_bin + '" -xq \'' + tmpfilepath + '\'') 
+        # Remove the temporary file
+        os.close(fd)
+        os.remove(tmpfilepath)
+        return retcode, output, err
+                
+#########################################################################
+# A class to tie together a test and its results
+#########################################################################
+class TestSuite(object):
+    '''
+    Tie together a test and its results.
+    '''
+    def __init__(self, modname, testname, filename = None):
+        self._modname = modname
+        self._fullname = modname
+        # A None testname indicates the source did not load properly
+        # It has come this far so that it gets reported as a proper failure
+        # by the framework
+        if testname is not None:
+            self._fullname += '.' + testname
+
+        self._result = TestResult()
+        # Add some results that are not linked to the actually test itself
+        self._result.name = self._fullname
+        if filename:
+            self._result.filename = filename
+        else:
+            self._result.filename = self._fullname
+        self._result.addItem(['test_name', self._fullname])
+        sysinfo = platform.uname()
+        self._result.addItem(['host_name', sysinfo[1]])
+        self._result.addItem(['environment', self.envAsString()])
+        self._result.status = 'skipped' # the test has been skipped until it has been executed
+
+    name = property(lambda self: self._fullname)
+    status = property(lambda self: self._result.status)
+
+    def envAsString(self):
+        if os.name == 'nt':
+            system = platform.system().lower()[:3]
+            arch = platform.architecture()[0][:2]
+            env = system + arch
+        elif os.name == 'mac':
+            env = platform.mac_ver()[0]
+        else:
+            env = platform.dist()[0]
+        return env
+
+    def markAsSkipped(self, reason):
+        self.setOutputMsg(reason)
+        self._result.status = 'skipped'
+
+    def execute(self, runner):
+        print time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + ': Executing ' + self._fullname
+        pycode = 'import ' + self._modname + ';'\
+                 + 'systest = ' + self._fullname + '();'\
+                 + 'systest.execute();'\
+                 + 'retcode = systest.returnValidationCode('+str(PythonTestRunner.VALIDATION_FAIL_CODE)+');'\
+                 + 'systest.cleanup();'\
+                 + 'sys.exit(retcode)'
+        # Start the new process
+        self._result.date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+        self._result.addItem(['test_date',self._result.date])
+        retcode, output, err = runner.start(pycode)
+        
+
+        if retcode == PythonTestRunner.SUCCESS_CODE:
+            status = 'success'
+        elif retcode == PythonTestRunner.GENERIC_FAIL_CODE:
+            # This is most likely an algorithm failure, but it's not certain
+            status = 'algorithm failure'
+        elif retcode == PythonTestRunner.VALIDATION_FAIL_CODE:
+            status = 'failed validation'
+        elif retcode == PythonTestRunner.SEGFAULT_CODE:
+            status = 'crashed'
+        elif retcode == PythonTestRunner.SKIP_TEST:
+            status = 'skipped'
+        elif retcode < 0:
+            status = 'hung'
+        else:
+            status = 'unknown'
+
+        # Check return code and add result
+        self._result.status = status
+        self._result.addItem(['status', status])
+        # Dump std out so we know what happened
+        print output
+        self._result.output = output
+        all_lines = output.split('\n')
+        # Find the test results
+        for line in all_lines:
+            entries = line.split(MantidStressTest.DELIMITER)
+            if len(entries) == 3 and entries[0] == MantidStressTest.PREFIX:
+                self._result.addItem([entries[1], entries[2]])
+                
+    def setOutputMsg(self, msg=None):
+        if msg is not None:
+            self._result.output = msg
+
+    def reportResults(self, reporters):
+        for r in reporters:
+            r.dispatchResults(self._result)
+
+#########################################################################
+# The main API class
+#########################################################################
+class TestManager(object):
+    '''A manager class that is responsible for overseeing the testing process. 
+    This is the main interaction point for the framework.
+    '''
+
+    def __init__(self, test_loc, runner = PythonConsoleRunner(), output = [TextResultReporter()],
+                 testsInclude=None, testsExclude=None):
+        '''Initialize a class instance'''
+
+        # Check whether the MANTIDPATH variable is set
+        mtdheader_dir = os.getenv("MANTIDPATH")
+        if mtdheader_dir is None:
+            raise RuntimeError('MANTIDPATH variable not be found. Please ensure Mantid is installed correctly.')
+
+        # Runners and reporters    
+        self._runner = runner
+        self._reporters = output
+        
+        # Init mantid
+        sys.path.append(os.path.abspath(mtdheader_dir).replace('\\','/'))
+        runner.setMantidDir(mtdheader_dir)
+
+        # If given option is a directory
+        if os.path.isdir(test_loc) == True:
+            test_dir = os.path.abspath(test_loc).replace('\\','/')
+            sys.path.append(test_dir)
+            runner.setTestDir(test_dir)
+            self._tests = self.loadTestsFromDir(test_dir)
+        else:
+            if os.path.exists(test_loc) == False:
+                print 'Cannot find file ' + test_loc + '.py. Please check the path.'
+                exit(2)
+            test_dir = os.path.abspath(os.path.dirname(test_loc)).replace('\\','/')
+            sys.path.append(test_dir)
+            runner.setTestDir(test_dir)
+            self._tests = self.loadTestsFromModule(os.path.basename(test_loc))
+
+        if len(self._tests) == 0:
+            print 'No tests defined in ' + test_dir + '. Please ensure all test classes sub class stresstesting.MantidStressTest.'
+            exit(2)
+
+        self._passedTests = 0
+        self._skippedTests = 0
+        self._failedTests = 0
+        self._lastTestRun = 0
+
+        self._testsInclude = testsInclude
+        self._testsExclude = testsExclude
+
+        # Create a prefix to use when executing the code
+        runner.createCodePrefix()
+
+    totalTests = property(lambda self: len(self._tests))
+    skippedTests = property(lambda self: (self.totalTests - self._passedTests - self._failedTests))
+    passedTests = property(lambda self: self._passedTests)
+    failedTests = property(lambda self: self._failedTests)
+
+    def __shouldTest(self, suite):
+        if self._testsInclude is not None:
+            if not self._testsInclude in suite.name:
+                suite.markAsSkipped("NotIncludedTest")
+                return False
+        if self._testsExclude is not None:
+            if self._testsExclude in suite.name:
+                suite.markAsSkipped("ExcludedTest")
+                return False
+        return True
+
+    def executeTests(self):
+        # Get the defined tests
+        for suite in self._tests:
+            if self.__shouldTest(suite):
+                suite.execute(self._runner)
+            if suite.status == "success":
+                self._passedTests += 1
+            elif suite.status == "skipped":
+                self._skippedTests += 1
+            else:
+                self._failedTests += 1
+            suite.reportResults(self._reporters)
+            self._lastTestRun += 1
+
+    def markSkipped(self, reason=None):
+        for suite in self._tests[self._lastTestRun:]:
+            suite.setOutputMsg(reason)
+            suite.reportResults(self._reporters) # just let people know you were skipped
+         
+    def loadTestsFromDir(self, test_dir):
+        ''' Load all of the tests defined in the given directory'''
+        entries = os.listdir(test_dir)
+        tests = []
+        regex = re.compile('^.*\.py$', re.IGNORECASE)
+        for file in entries:
+            if regex.match(file) != None:
+                tests.extend(self.loadTestsFromModule(os.path.join(test_dir,file)))
+        return tests
+
+    def loadTestsFromModule(self, filename):
+        '''
+        Load test classes from the given module object which has been
+        imported with the __import__ statement
+        '''
+        modname = os.path.basename(filename)
+        modname = modname.split('.py')[0]
+        path = os.path.dirname(filename)
+        pyfile = open(filename, 'r')
+        tests = []
+        try:
+            mod = imp.load_module(modname, pyfile, filename, ("","",imp.PY_SOURCE))
+            mod_attrs = dir(mod)
+            for key in mod_attrs:
+                value = getattr(mod, key)
+                if key is "MantidStressTest" or not inspect.isclass(value):
+                    continue
+                if self.isValidTestClass(value):
+                    test_name = key
+                    tests.append(TestSuite(modname, test_name, filename))
+        except Exception:
+            # Error loading the source, add fake unnamed test so that an error
+            # will get generated when the tests are run and it will be counted properly
+            tests.append(TestSuite(modname, None, filename))
+        finally:
+            pyfile.close()
+        return tests
+
+    def isValidTestClass(self, class_obj):
+        """Returns true if the test is a valid test class. It is valid
+        if: the class subclassses MantidStressTest and has no abstract methods
+        """
+        if not issubclass(class_obj, MantidStressTest):
+            return False
+        # Check if the get_reference_file is abstract or not
+        if hasattr(class_obj, "__abstractmethods__"):
+            if len(class_obj.__abstractmethods__) == 0:
+                return True
+            else:
+                return False
+        else:
+            return True
+
+#########################################################################
+# Class to handle the environment
+#########################################################################
+class MantidFrameworkConfig:
+
+    def __init__(self, mantidDir=None, sourceDir=None,
+                 loglevel='information', archivesearch=False):
+        # force the environment variable
+        if mantidDir is not None:
+            if os.path.isfile(mantidDir):
+                mantidDir = os.path.split(mantidDir)[0]
+            os.environ['MANTIDPATH'] = mantidDir
+
+        # add it to the python path
+        directory = os.getenv("MANTIDPATH")
+        if directory is None:
+            raise RuntimeError("MANTIDPATH not found.")
+        else:
+            sys.path.append(directory)
+        if not os.path.isdir(os.path.join(directory, "mantid")):
+            raise RuntimeError("Did not find mantid package in %s" % directory)
+
+        self.__sourceDir = self.__locateSourceDir(sourceDir)
+
+        # add location of stress tests
+        self.__testDir = self.__locateTestsDir()
+
+        # add location of the analysis tests
+        sys.path.insert(0,self.__locateTestsDir())
+
+        # setup the rest of the magic directories
+        parentDir = os.path.split(self.__sourceDir)[0]
+        self.__saveDir = os.path.join(parentDir, "logs/").replace('\\','/')
+        self.__dataDirs = [os.path.join(parentDir, "SystemTests"),
+                os.path.join(parentDir, "SystemTests/AnalysisTests/ReferenceResults"),
+                os.path.join(parentDir, "Data"),
+                os.path.join(parentDir, "Data/LOQ"),
+                os.path.join(parentDir, "Data/SANS2D"),
+                os.path.join(parentDir, "Data/PEARL"),
+                self.__saveDir
+                ]
+
+        # set the log level
+        self.__loglevel = loglevel
+        self.__datasearch =  archivesearch
+
+    def __locateSourceDir(self, suggestion):
+        if suggestion is None:
+            loc = os.path.abspath(__file__)
+            suggestion = os.path.split(loc)[0] # get the directory
+        loc = os.path.abspath(suggestion)
+        loc = os.path.normpath(loc)
+
+        if os.path.isdir(loc):
+            return loc
+        else:
+            raise RuntimeError("Failed to find source directory")
+
+    def __locateTestsDir(self):
+        loc = os.path.join(self.__sourceDir, '../SystemTests/AnalysisTests')
+        loc = os.path.abspath(loc)
+        if os.path.isdir(loc):
+            return loc
+        else:
+            raise RuntimeError("'%s' is not a directory (AnalysisTests)" % loc)
+
+    def __getDataDirs(self):
+        # get the file of the python script
+        testDir = os.path.split(self.__sourceDir)[0]
+
+        # add things to the data search path
+        dirs =[]
+        dirs.append(os.path.join(testDir, "Data"))
+        dirs.append(os.path.join(testDir, "Data/LOQ"))
+        dirs.append(os.path.join(testDir, "Data/SANS2D"))
+        dirs.append(os.path.join(testDir, "Data/PEARL"))
+        dirs.append(os.path.join(testDir, "SystemTests"))
+        dirs.append(os.path.join(testDir, \
+                                 "SystemTests/AnalysisTests/ReferenceResults"))
+        dirs.append(os.path.abspath(os.getenv("MANTIDPATH")))
+
+        dirs = [os.path.normpath(item) for item in dirs]
+
+        return dirs
+
+    def __moveFile(self, src, dst):
+        if os.path.exists(src):
+            import shutil
+            shutil.move(src, dst)
+
+    def __copyFile(self, src, dst):
+        if os.path.exists(src):
+            import shutil
+            shutil.copyfile(src, dst)
+
+    saveDir = property(lambda self: self.__saveDir)
+    testDir = property(lambda self: self.__testDir)
+
+    def config(self):
+        if not os.path.exists(self.__saveDir):
+            print "Making directory %s to save results" % self.__saveDir
+            os.mkdir(self.__saveDir)
+        else:
+            if not os.path.isdir(self.__saveDir):
+                raise RuntimeError("%s is not a directory" % self.__saveDir)
+
+        # Start mantid
+        import mantid
+        from mantid.kernel import config
+
+        # backup the existing user properties so we can step all over it
+        self.__userPropsFile = config.getUserFilename()
+        self.__userPropsFileBackup  = self.__userPropsFile + ".bak"
+        self.__userPropsFileSystest = self.__userPropsFile + ".systest"
+        self.__moveFile(self.__userPropsFile, self.__userPropsFileBackup)
+
+        # Make sure we only save these keys here
+        config.reset()
+
+        # Up the log level so that failures can give useful information
+        config['logging.loggers.root.level'] = self.__loglevel
+        # Set the correct search path
+        data_path = ''
+        for dir in self.__dataDirs:
+            if not os.path.exists(dir):
+                raise RuntimeError('Directory ' + dir + ' was not found.')
+            search_dir = dir.replace('\\','/')
+            if not search_dir.endswith('/'):
+                search_dir += '/'
+                data_path += search_dir + ';'
+        config['datasearch.directories'] = data_path
+
+        # Save path
+        config['defaultsave.directory'] = self.__saveDir
+
+        # Do not show paraview dialog
+        config['paraview.ignore'] = "1"
+
+        # Do not update instrument definitions
+        config['UpdateInstrumentDefinitions.OnStartup'] = "0"
+
+        # Disable usage reports
+        config['usagereports.enabled'] = "0"
+
+        # Case insensitive
+        config['filefinder.casesensitive'] = 'Off'
+        
+        # datasearch
+        if self.__datasearch:
+            config["datasearch.searcharchive"] = 'On'
+
+        # Save this configuration
+        config.saveConfig(self.__userPropsFile)
+
+    def restoreconfig(self):
+        self.__moveFile(self.__userPropsFile, self.__userPropsFileSystest)
+        self.__moveFile(self.__userPropsFileBackup, self.__userPropsFile)
+
+
+#==============================================================================
+def envAsString():
+    """Returns a string describing the environment
+    (platform) of this test."""
+    if os.name == 'nt':
+        system = platform.system().lower()[:3]
+        arch = platform.architecture()[0][:2]
+        env = system + arch
+    elif os.name == 'mac':
+        env = platform.mac_ver()[0]
+    else:
+        env = platform.dist()[0] + "-" + platform.dist()[1]
+    return env
diff --git a/Code/Mantid/Testing/SystemTests/lib/systemtests/xmlreporter.py b/Code/Mantid/Testing/SystemTests/lib/systemtests/xmlreporter.py
new file mode 100644
index 00000000000..a991fe30f7d
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/lib/systemtests/xmlreporter.py
@@ -0,0 +1,86 @@
+import os
+import sys
+from xml.dom.minidom import getDOMImplementation
+import stresstesting
+
+class XmlResultReporter(stresstesting.ResultReporter):
+
+	_time_taken = 0.0
+	_failures = []
+	_skipped = []
+	
+	def __init__(self, showSkipped=True):
+		self._doc = getDOMImplementation().createDocument(None,'testsuite',None)
+		self._show_skipped = showSkipped
+
+	def reportStatus(self):
+		return len(self._failures) == 0
+
+	def getResults(self):
+		# print the command line summary version of the results
+		self._failures.sort()
+		self._skipped.sort()
+		print
+		if self._show_skipped and len(self._skipped) > 0:
+			print "SKIPPED:"
+			for test in self._skipped:
+				print test.name
+		if len(self._failures) > 0:
+			print "FAILED:"
+			for test in self._failures:
+				print test.name
+
+		# return the xml document version
+		docEl = self._doc.documentElement
+		docEl.setAttribute('name','SystemTests')
+		docEl.setAttribute('tests',str(len(docEl.childNodes)))
+		docEl.setAttribute('failures',str(len(self._failures)))
+		docEl.setAttribute('skipped', str(len(self._skipped)))
+		docEl.setAttribute('time',str(self._time_taken))
+		return self._doc.toxml()
+
+	def dispatchResults(self, result):
+		''' This relies on the order and names of the items to give the correct output '''
+		test_name = result.name.split('.')
+		if len(test_name) > 1:
+			class_name = '.'.join(test_name[:-1])
+			name = test_name[-1]
+		else:
+			class_name = result.name
+			name = result.name
+		elem = self._doc.createElement('testcase')
+		elem.setAttribute('classname',"SystemTests." + class_name)
+		elem.setAttribute('name',name)
+		if result.status == 'skipped':
+			self._skipped.append(result)
+			skipEl = self._doc.createElement('skipped')
+			if len(result.output) > 0:
+				if "Missing required file" in result.output:
+					skipEl.setAttribute('message', "MissingRequiredFile")
+				else:
+					skipEl.setAttribute('message', result.output)
+				skipEl.appendChild(self._doc.createTextNode(result.output))
+			elem.appendChild(skipEl)
+		elif result.status != 'success':
+			self._failures.append(result)
+			failEl = self._doc.createElement('failure')
+			failEl.setAttribute('file',result.filename)
+			output = ''
+			if len(result.output) > 0:
+				output += result.output
+			if len(output) > 0:
+				failEl.appendChild(self._doc.createTextNode(output))
+			elem.appendChild(failEl)
+		else:
+			time_taken = 0.0
+			for t in result.resultLogs():
+				if t[0] == 'iteration time_taken':
+					time_taken = float(t[1].split(' ')[1])
+					self._time_taken += time_taken
+				if t[0] == 'memory footprint increase':
+					memEl = self._doc.createElement('memory')
+					memEl.appendChild(self._doc.createTextNode(t[1]))
+					elem.appendChild(memEl)
+			elem.setAttribute('time',str(time_taken))
+			elem.setAttribute('totalTime',str(time_taken))
+		self._doc.documentElement.appendChild(elem)
diff --git a/Code/Mantid/Testing/SystemTests/scripts/InstallerTests.py b/Code/Mantid/Testing/SystemTests/scripts/InstallerTests.py
new file mode 100644
index 00000000000..cae938d474b
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/InstallerTests.py
@@ -0,0 +1,127 @@
+import os
+import sys
+import platform
+import shutil
+import subprocess
+from getopt import getopt
+
+from mantidinstaller import (createScriptLog, log, stop, failure, scriptfailure, 
+                             get_installer, run)
+
+'''
+
+This script copies Mantid installer for the system it is running on from the build server,
+installs it, runs system tests and produces an xml report file SystemTestsReport.xml
+
+'''
+
+try:
+    opt, argv = getopt(sys.argv[1:],'nohvR:l:')
+except:
+    opt = [('-h','')]
+
+if ('-h','') in opt:
+    print "Usage: %s [OPTIONS]" % os.path.basename(sys.argv[0])
+    print
+    print "Valid options are:"
+    print "       -n Run tests without installing Mantid (it must be already installed)"
+    print "       -o Output to the screen instead of log files"
+    print "       -h Display the usage"
+    print "       -R Optionally only run the test matched by the regex"
+    print "       -l Log level"
+    sys.exit(0)
+
+doInstall = True
+test_regex = None
+out2stdout = False
+log_level = 'notice'
+for option, arg in opt:
+    if option == '-n':
+        doInstall = False
+    if option == '-o':
+        out2stdout = True
+    if option == '-R' and arg != "":
+        test_regex = arg
+    if option == '-l' and arg != "":
+        log_level = arg
+
+# The log file for this script
+parentDir = os.path.abspath('..').replace('\\','/')
+if not os.path.exists(parentDir + '/logs'):
+    os.mkdir(parentDir + '/logs')
+
+createScriptLog(parentDir + '/logs/TestScript.log')
+testRunLogPath = parentDir + '/logs/testsRun.log'
+testRunErrPath = parentDir + '/logs/testsRun.err'
+
+log('Starting system tests')
+installer = get_installer(doInstall)
+
+# Install the found package
+if doInstall:
+    log("Installing package '%s'" % installer.mantidInstaller)
+    try:
+        installer.install()
+        log("Application path " + installer.mantidPlotPath)
+        installer.no_uninstall = False
+    except Exception,err:
+        scriptfailure("Installing failed. "+str(err))
+else:
+    installer.no_uninstall = True
+
+# Ensure MANTIDPATH points at this directory so that 
+# the correct properties file is loaded              
+mantidPlotDir = os.path.dirname(installer.mantidPlotPath)
+log('MantidPlot directory %s' % mantidPlotDir)
+log('Pointing MANTIDPATH at MantidPlot directory %s' % mantidPlotDir)
+os.environ["MANTIDPATH"] = mantidPlotDir
+
+try:
+    # Keep hold of the version that was run
+    version = run(installer.mantidPlotPath + ' -v')
+    version_tested = open('version_tested.log','w')
+    if version and len(version) > 0:
+        version_tested.write(version)
+    version_tested.close()
+except Exception, err:
+    scriptfailure('Version test failed: '+str(err), installer)
+
+try:
+    # Now get the revision number/git commit ID (remove the leading 'g' that isn't part of it)
+    revision = run(installer.mantidPlotPath + ' -r').lstrip('g')
+    revision_tested = open('revision_tested.log','w')
+    if revision and len(version) > 0:
+        revision_tested.write(revision)
+    revision_tested.close()
+except Exception, err:
+    scriptfailure('Revision test failed: '+str(err), installer)
+
+log("Running system tests. Log files are: logs/testsRun.log and logs/testsRun.err")
+try:
+    # Pick the correct Mantid along with the bundled python on windows
+    run_test_cmd = "%s %s/runSystemTests.py --loglevel=%s --mantidpath=%s" % (installer.python_cmd, os.path.dirname(os.path.realpath(__file__)), log_level, mantidPlotDir)
+    if test_regex is not None:
+        run_test_cmd += " -R " + test_regex
+    if out2stdout:
+        p = subprocess.Popen(run_test_cmd, shell=True) # no PIPE: print on screen for debugging
+        p.wait()
+    else:
+        p = subprocess.Popen(run_test_cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
+        out,err = p.communicate() # waits for p to finish
+        testsRunLog = open(testRunLogPath,'w')
+        if out:
+            testsRunLog.write(out)
+        testsRunLog.close()
+        testsRunErr = open(testRunErrPath,'w')
+        if err:
+            testsRunErr.write(err)
+        testsRunErr.close()
+    if p.returncode != 0:
+        failure(installer)
+except Exception, exc:
+    scriptfailure(str(exc),installer)
+except:
+    failure(installer)
+
+# Test run completed successfully
+stop(installer)
diff --git a/Code/Mantid/Testing/SystemTests/scripts/mantidinstaller.py b/Code/Mantid/Testing/SystemTests/scripts/mantidinstaller.py
new file mode 100644
index 00000000000..3b566b8f4e1
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/mantidinstaller.py
@@ -0,0 +1,242 @@
+"""Defines classes for handling installation
+"""
+import platform
+import os
+import glob
+import sys
+import subprocess
+
+scriptLog = None
+
+def createScriptLog(path):
+    global scriptLog
+    scriptLog = open(path,'w')
+
+def stop(installer):
+    ''' Save the log, uninstall the package and exit with error code 0 '''
+    try:
+        installer.uninstall()
+    except Exception, exc:
+        log("Could not uninstall package %s: %s" % (installer.mantidInstaller, str(exc)))
+    scriptLog.close()
+    sys.exit(0)
+
+def log(txt):
+    ''' Write text to the script log file '''
+    if txt and len(txt) > 0:
+        scriptLog.write(txt)
+        if not txt.endswith('\n'):
+            scriptLog.write('\n')
+        print txt
+
+def failure(installer):
+    ''' Report failure of test(s), try to uninstall package and exit with code 1 '''
+    try:
+        installer.uninstall()
+    except Exception, exc:
+        log("Could not uninstall package %s: %s" % (installer.mantidInstaller, str(exc)))
+        pass
+
+    log('Tests failed')
+    print 'Tests failed'
+    sys.exit(1)
+
+def scriptfailure(txt, installer=None):
+    '''Report failure of this script, try to uninstall package and exit with code 1 '''
+    if txt:
+        log(txt)
+    if installer is not None:
+        try:
+            installer.uninstall()
+        except Exception:
+            log("Could not uninstall package %s " % self.mantidInstaller)
+    scriptLog.close()
+    sys.exit(1)
+
+
+def get_installer(do_install=True):
+    """
+    Creates the correct class for the current platform
+        @param do_install :: True if installation is to be performed
+    """
+    system = platform.system()
+    if system == 'Windows':
+        return NSISInstaller(do_install)
+    elif system == 'Linux':
+        dist = platform.dist()
+        if dist[0] == 'Ubuntu':
+            return DebInstaller(do_install)
+        elif dist[0] == 'redhat' and (dist[1].startswith('5.') or dist[1].startswith('6.')):
+            return RPMInstaller(do_install)
+        else:
+            scriptfailure('Unknown Linux flavour: %s' % str(dist))
+    elif system == 'Darwin':
+        return DMGInstaller(do_install)
+    else:
+        raise scriptfailure("Unsupported platform")
+
+def run(cmd):
+    """Run a command in a subprocess"""
+    try:
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
+        out = p.communicate()[0]
+        if p.returncode != 0:
+            raise Exception('Returned with code '+str(p.returncode)+'\n'+out)
+    except Exception,err:
+        log('Error in subprocess %s:\n' % str(err))
+        raise
+    log(out)
+    return out
+    
+
+class MantidInstaller(object):
+    """
+    Base-class for installer objects
+    """
+    mantidInstaller = None
+    mantidPlotPath = None
+    no_uninstall = False
+    python_cmd = "python"
+
+    def __init__(self, do_install, filepattern):
+        """Initialized with a pattern to 
+        find a path to an installer
+        """
+        if not do_install:
+            return
+        # Glob for packages
+        matches = glob.glob(os.path.abspath(filepattern))
+        if len(matches) > 0: 
+            # This will put the release mantid packages at the start and the nightly ones at the end
+            # with increasing version numbers
+            matches.sort() 
+            # Make sure we don't get Vates
+            for match in matches:
+                if 'vates'in match:
+                    matches.remove(match)
+        # Take the last one as it will have the highest version number
+        if len(matches) > 0: 
+            self.mantidInstaller = os.path.join(os.getcwd(), matches[-1])
+            log("Using installer " + self.mantidInstaller)
+        else:
+            raise RuntimeError('Unable to find installer package in "%s"' % os.getcwd())
+
+    def install(self):
+        self.do_install()
+
+    def do_install(self):
+        raise NotImplementedError("Override the do_install method")
+
+    def uninstall(self):
+        if not self.no_uninstall:
+            self.do_uninstall()
+
+    def do_uninstall(self):
+        raise NotImplementedError("Override the do_uninstall method")
+
+class NSISInstaller(MantidInstaller):
+    """Uses an NSIS installer
+    to install Mantid
+    """
+
+    def __init__(self, do_install):
+        MantidInstaller.__init__(self, do_install, 'Mantid-*-win*.exe')
+        self.mantidPlotPath = 'C:/MantidInstall/bin/MantidPlot.exe'
+        self.python_cmd = "C:/MantidInstall/bin/python.exe"
+        
+    def do_install(self):
+        """
+            The NSIS installer spawns a new process and returns immediately.
+            We use the start command with the /WAIT option to make it stay around
+            until completion.
+            The chained "&& exit 1" ensures that if the return code of the
+            installer > 0 then the resulting start process exits with a return code
+            of 1 so we can pick this up as a failure
+        """        
+        run('start "Installer" /wait ' + self.mantidInstaller + ' /S')
+
+    def do_uninstall(self):
+        "Runs the uninstall exe"
+        uninstall_path = 'C:/MantidInstall/Uninstall.exe'
+        run('start "Uninstaller" /wait ' + uninstall_path + ' /S')
+
+class DebInstaller(MantidInstaller):
+    """Uses a deb package to install mantid
+    """
+
+    def __init__(self, do_install):
+        MantidInstaller.__init__(self, do_install, 'mantid*.deb')
+        package = os.path.basename(self.mantidInstaller)
+        if 'mantidnightly' in package:
+            self.mantidPlotPath = '/opt/mantidnightly/bin/MantidPlot'
+        elif 'mantidunstable' in package:
+            self.mantidPlotPath = '/opt/mantidunstable/bin/MantidPlot'
+        else:
+            self.mantidPlotPath = '/opt/Mantid/bin/MantidPlot'
+        
+    def do_install(self):
+        """Uses gdebi to run the install
+        """
+        run('sudo gdebi -n ' + self.mantidInstaller)
+
+    def do_uninstall(self):
+        """Removes the debian package
+        """
+        package_name = os.path.basename(self.mantidInstaller).split("_")[0]
+        run('sudo dpkg --purge %s' % package_name)
+
+class RPMInstaller(MantidInstaller):
+    """Uses a rpm package to install mantid
+    """
+
+    def __init__(self, do_install):
+        MantidInstaller.__init__(self, do_install, 'mantid*.rpm')
+        package = os.path.basename(self.mantidInstaller)
+        if 'mantidnightly' in package:
+            self.mantidPlotPath = '/opt/mantidnightly/bin/MantidPlot'
+        elif 'mantidunstable' in package:
+            self.mantidPlotPath = '/opt/mantidunstable/bin/MantidPlot'
+        else:
+            self.mantidPlotPath = '/opt/Mantid/bin/MantidPlot'
+        
+    def do_install(self):
+        """Uses yum to run the install. Current user must be in sudoers
+        """
+        try:
+            run('sudo yum -y install ' + self.mantidInstaller)
+        except Exception, exc:
+            # This reports an error if the same package is already installed
+            if 'is already installed' in str(exc):
+                log("Current version is up-to-date, continuing.\n")
+                pass
+            else:
+                raise
+
+    def do_uninstall(self):
+        """Removes the rpm package
+        """
+        package_name = os.path.basename(self.mantidInstaller).split("-")[0]
+        run('sudo yum -y erase %s' % package_name)
+
+
+class DMGInstaller(MantidInstaller):
+    """Uses an OS X dmg file to install mantid
+    """
+    def __init__(self, do_install):
+        MantidInstaller.__init__(self, do_install, 'mantid-*.dmg')
+        self.mantidPlotPath = '/Applications/MantidPlot.app/Contents/MacOS/MantidPlot'
+        os.environ['DYLD_LIBRARY_PATH'] = '/Applications/MantidPlot.app/Contents/MacOS'
+        
+    def do_install(self):
+        """Mounts the dmg and copies the application into the right place.
+        """
+        p = subprocess.Popen(['hdiutil','attach',self.mantidInstaller],stdin=subprocess.PIPE,stdout=subprocess.PIPE)
+        p.stdin.write('yes') # This accepts the GPL
+        p.communicate()[0] # This captures (and discards) the GPL text
+        mantidInstallerName = os.path.basename(self.mantidInstaller)
+        mantidInstallerName = mantidInstallerName.replace('.dmg','')
+        run('sudo cp -r /Volumes/'+ mantidInstallerName+'/MantidPlot.app /Applications/' )
+        run('hdiutil detach /Volumes/'+ mantidInstallerName+'/')
+
+    def do_uninstall(self):
+        run('sudo rm -fr /Applications/MantidPlot.app/')
diff --git a/Code/Mantid/Testing/SystemTests/scripts/performance/README.txt b/Code/Mantid/Testing/SystemTests/scripts/performance/README.txt
new file mode 100644
index 00000000000..717d3e96cfe
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/performance/README.txt
@@ -0,0 +1 @@
+This is basically a fork of the performance test support code in the main Mantid repository (https://github.com/mantidproject/mantid/tree/master/Test/PerformanceTests) to enable performance monitoring of the system tests in a similar fashion.
diff --git a/Code/Mantid/Testing/SystemTests/scripts/performance/analysis.py b/Code/Mantid/Testing/SystemTests/scripts/performance/analysis.py
new file mode 100644
index 00000000000..c6d3429fae1
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/performance/analysis.py
@@ -0,0 +1,699 @@
+""" Module containing functions for test
+performance analyis, plotting, and saving
+to other formats (CSV, PDF) """
+
+import testresult
+import os
+import sys
+import sqlresults
+from sqlresults import get_results
+import matplotlib
+from pylab import *
+import numpy as np
+import datetime
+import random
+
+# This is the date string format as returned by the database
+DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
+
+#============================================================================================
+def get_orderby_clause(last_num):
+    """Returns a order by clause that limits to the last # revisions """
+    if last_num > 0:
+        return " ORDER BY revision DESC limit %d" % last_num
+    else:
+        return ''
+        
+
+#============================================================================================
+def get_data(name='', type='', x_field='revision', y_field='runtime', last_num=-1):
+    """Get the test runtime/iteration as a function of an X variable.
+    
+    Parameters
+    ----------
+        name :: full name of the test
+        type :: type of test to filter by
+        x_field :: name of the field for the X axis. 
+                e.g. 'revision' (default)
+                or 'date' : exact date/time of launch
+                or 'index' : using the date, but returning an index of build # 
+                    instead of the date (better scaling)
+        last_num :: only get the last this-many entries from the table, sorted by revision.
+                if < 0, then get everything
+        
+    Returns
+    -------
+        x :: list of X values, sorted increasing
+        y :: list of runtime/iteration for each x 
+        """
+
+    results = get_results(name, type, where_clause='', orderby_clause=get_orderby_clause(last_num))
+    
+    # Data dict. Key = X variable; Value = (iterations total, runtime total)
+    data = {}
+    for res in results:
+        # Get the x field value
+        if x_field == 'index':
+            x = res['date']
+        else:
+            x = res[x_field]
+            
+        if data.has_key(x):
+            old = data[x]
+            iters = old[0] + 1 # Iterations
+            runtime = old[1] + res[y_field]
+        else:
+            iters = 1 
+            runtime = res[y_field]
+        # Save the # of iterations and runtime
+        data[x] = (iters, runtime)
+        
+    # Now make a sorted list of (x, runtime/iteration)
+    sorted = [(x, y[1]/y[0]) for (x,y) in data.items()]
+    sorted.sort()
+    
+    x = [a for (a,b) in sorted]
+    # For index, convert into an integer index
+    if x_field == 'index':
+        x = range( len(x) )
+    y = [b for (a,b) in sorted]
+    
+    return (x,y)
+    
+
+#============================================================================================
+def get_unique_fields(results, field):
+    """Given a list of TestResult, return a 
+    list of all unique values of 'field'"""
+    out = set()
+    for res in results:
+        out.add( res[field] )
+    return list(out)
+
+#============================================================================================
+def get_results_matching(results, field, value):
+    """Given a list of TestResult, 
+    return a list of TestResult's where 'field' matches 'value'."""
+    out = []
+    for res in results:
+        if res[field] == value:
+            out.append(res)
+    return out
+
+
+#============================================================================================
+def smart_ticks(index, values):
+    """On the current figure, set the ticks at X positions
+    given by index, with value given by values (ints).
+    But it tries to space them out in a reasonable way.
+    """
+    if type(values[0]).__name__ == "unicode":
+        # Make the array of dates
+        dates = []
+        for val in values:
+            try:
+                datetime.datetime.strptime(val, DATE_STR_FORMAT)
+                dates.append(val)
+            except:
+                pass
+        if len(dates) == 0: return
+        td = dates[-1] - dates[0]
+        if (td < datetime.timedelta(hours=1)):
+            values_str = [d.strftime("%M:%S") for d in dates]
+        elif (td < datetime.timedelta(days=1)):
+            values_str = [d.strftime("%H:%M") for d in dates]
+        else:
+            values_str = [d.strftime("%m-%d, %H:%M") for d in dates]
+    else:
+        # convert to list of strings
+        values_str = [str(val) for val in values]
+        
+    if len(values_str) == 0: return
+        
+    w = gcf().get_figwidth()*gcf().get_dpi()
+    spacing = w/len(index)
+    
+    tick_index = []
+    tick_strings = []
+    
+    space_available = 0
+    for i in xrange(len(index)):
+        s = str(values_str[i]);
+        s_width = (len(s)+1) * 12.0 # About 12 pixels per letter? And add a space 
+        space_available +=spacing
+        if space_available >= s_width:
+            space_available = 0
+            tick_index.append(i)
+            tick_strings.append(s)
+    
+    xticks( tick_index, tick_strings )
+   
+    
+
+#============================================================================================
+def plot_success_count(type='system', last_num=-1, x_field='revision'):
+    """ Plot the count of successful/failed tests vs revision number 
+    
+    Parameters
+    ----------
+        type :: 'system', or 'performance'
+    """
+    results = get_results('', type, where_clause='', orderby_clause=get_orderby_clause(last_num))
+    revisions = get_unique_fields(results, x_field)
+    
+    # Go through each revision
+    success = []
+    fail = []
+    for revision in revisions:
+        these = get_results_matching(results, x_field, revision)
+        succeeded = 0
+        failed = 0
+        for res in these:
+            if res["success"]:
+                succeeded += 1
+            else:
+                failed += 1
+        # Keep the list of them
+        success.append(succeeded)
+        fail.append(failed)
+    
+    figure()
+    revisions = np.array(revisions)
+    fail = np.array(fail)
+    success = np.array(success)
+    
+    index = np.arange(len(revisions))
+#    p1 = bar(index, fail, color='r')
+#    p2 = bar(index, success, color='g', bottom=fail)
+#    legend( (p1[0], p2[0]), ('Failure', 'Success') )
+
+    p1 = fill_between(index, fail, 0, color='r')
+    p2 = fill_between(index, success+fail, fail, color='g')
+    #legend( (p1, p2), ('Failure', 'Success') )
+
+    smart_ticks( index, revisions)
+
+
+    ylabel('Success/Fail')
+    xlabel(x_field)
+    revsare = "all revs"
+    if last_num > 0: revsare = "last %d revs" % last_num
+    title("Success/Fail History of %s tests (%s)" % (type, revsare))
+
+
+#============================================================================================
+def plot_runtime(*args, **kwargs):
+    """ Call get_data() 
+    
+    Parameters
+    ----------
+        - See get_data() for the full list
+    """
+    (x,y) = get_data(*args, **kwargs)
+
+    figure()
+    index = np.arange(len(x))
+    plot(index,y,'-b.')
+    smart_ticks( index, x)
+    ylabel('Runtime/iteration (sec)')
+    xlabel(kwargs['x_field'])
+    
+    last_num =kwargs.get('last_num',-1) 
+    if last_num > 0:
+        title("Runtime History of %s (last %d revs)" % (kwargs['name'], kwargs["last_num"]) )
+    else:
+        title("Runtime History of %s (all revs)" % kwargs['name'])
+
+
+
+
+#============================================================================================
+def plot_memory(*args, **kwargs):
+    """ Call get_data() 
+    
+    Parameters
+    ----------
+        - See get_data() for the full list
+    """
+    (x,y) = get_data(*args, **kwargs)
+
+    figure()
+    index = np.arange(len(x))
+    plot(index,y,'-b.')
+    smart_ticks( index, x)
+    ylabel("Memory 'loss' (MB)")
+    xlabel(kwargs['x_field'])
+    
+    last_num =kwargs.get('last_num',-1) 
+    if last_num > 0:
+        title("Memory History of %s (last %d revs)" % (kwargs['name'], kwargs["last_num"]) )
+    else:
+        title("Memory History of %s (all revs)" % kwargs['name'])
+
+
+
+
+
+
+
+
+
+
+
+# The default HTML header
+default_html_header = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><html>
+<head><LINK href="report.css" rel="stylesheet" type="text/css"></head>
+"""
+
+default_html_footer =  """</body></html>"""
+
+#============================================================================================
+def make_css_file(path):
+    """ Make a save the report.css file to be used by all html """ 
+    default_css = """
+table
+{
+border-collapse:collapse;
+background-color:FFAAAA;
+}
+table, th, td
+{
+border: 1px solid black;
+padding: 2px 6px;
+}
+.failedrow, .failedrow TD, .failedrow TH
+{
+background-color:#FFAAAA;
+color:black;
+}
+.alternaterow, .alternaterow TD, .alternaterow TH
+{
+background-color:#FFFFAA;
+color:black;
+}
+.error
+{
+color:red;
+font-weight: bold;
+}
+    
+    """
+    f = open(os.path.join(path, "report.css"), 'w')
+    f.write(default_css)
+    f.close()
+
+#============================================================================================
+def make_environment_html(res):
+    """Return a HTML string with details of test environment, taken from the
+    'res' TestResult object"""
+    html = """<table border=1>
+    <tr><th>Host name:</th> <td>%s</td> </tr>
+    <tr><th>Environment:</th> <td>%s</td> </tr>
+    <tr><th>Type of runner:</th> <td>%s</td> </tr>
+    </table>
+    """ % (res['host'], res['environment'], res['runner'])
+    return html
+
+#============================================================================================
+def make_detailed_html_file(basedir, name, fig1, fig2, fig3, fig4, last_num):
+    """ Create a detailed HTML report for the named test """
+    html = default_html_header
+    html += """<h1>Detailed report for %s</h1><br>""" % (name)
+    html += """<img src="%s" alt="runtime vs revision number (latest %d entries)" />\n""" % (fig1, last_num)
+    html += """<img src="%s" alt="runtime vs revision number" />\n""" % (fig2)
+    html += """<img src="%s" alt="memory vs revision number (latest %d entries)" />\n""" % (fig3, last_num)
+    html += """<img src="%s" alt="memory vs revision number" />\n""" % (fig4)
+    html += """<h3>Test Results</h3>"""
+    
+    fields = ['revision', 'date', 'commitid', 'compare', 'status', 'runtime', 'cpu_fraction', 'memory_change', 'variables']
+    
+    table_row_header = "<tr>"
+    for field in fields:
+        if field == "runtime": field = "Runtime/Iter."
+        if field == "memory_change": field = "Memory 'loss'"
+        field = field[0].upper() + field[1:]
+        table_row_header += "<th>%s</th>" % field
+    table_row_header += "</tr>"
+    
+    html += """<table border="1">""" + table_row_header
+    
+    table_html = ''
+    results = get_results(name, type='', where_clause='')
+    sorted = [(res["revision"], res["variables"], res["date"], res) for res in results]
+    sorted.sort(reverse=False)
+    count = 0
+    last_rev = 0
+    commitid = ''
+    last_commitid = ''
+    row_class = ''
+    table_rows = []
+    for (rev, variable, date, res) in sorted:
+        table_row_html = ''
+        if (rev != last_rev):
+            # Changed SVN revision. Swap row color
+            if row_class == '': 
+                row_class = "class=alternaterow"
+            else:
+                row_class = ''
+            last_rev = rev
+            
+        if commitid != last_commitid:
+            last_commitid = commitid
+        
+        if res["success"]:
+            table_row_html += "<tr %s>\n" % row_class
+        else:
+            table_row_html += "<tr class=failedrow>\n"
+        
+        for field in fields:
+            val = ''
+            
+            if field == 'compare':
+                # Comparison to previous commit, if anything can be done
+                if (last_commitid != ""):
+                    val = """<a href="https://github.com/mantidproject/mantid/compare/%s...%s">diff</a>""" % (last_commitid, commitid)
+                
+            else:
+                # Normal fields 
+                val = res[field]
+                
+                # Trim the fractional seconds
+                if field=="date":
+                    val = str(val)[0:19]
+                    
+                # Add a trac link
+                if field=="commitid":
+                    commitid = val
+                    partial_commitid = val
+                    if (len(partial_commitid) > 7): partial_commitid = partial_commitid[0:7];
+                    val = """<a href="https://github.com/mantidproject/mantid/commit/%s">%s</a>""" % (commitid, partial_commitid)
+                    
+                if field=="runtime":
+                    val = "%.3f" % (res["runtime"])
+                    
+            table_row_html += "<td>%s</td>" % val
+        table_row_html += "\n</tr>\n"
+        table_rows.append(table_row_html)
+        
+    # Now print out all the rows in reverse order
+    table_rows.reverse()
+    for row in table_rows:
+        html += row
+#        # Add the row header every 30 entries
+#        count += 1
+#        if count % 30 == 0: html += table_row_header
+        
+    # And one more at the end for good measure
+    html += table_row_header
+    html += "</table>"
+    
+    if len(results)> 0:
+        html += """<h3>Environment</h3>
+        %s""" % make_environment_html(results[0])
+
+    html += default_html_footer
+
+#    last_date = sorted[-1][1]["date"]
+#    results = get_results(name, type='', get_log=False, where_clause=" date = '%s'" % last_date)
+#    if len(results)>0:
+#        html += 
+    
+    f = open(os.path.join(basedir, "%s.htm" % name), "w")
+    html = html.replace("\n", os.linesep) # Fix line endings for windows
+    f.write(html)
+    f.close()
+
+
+#============================================================================================
+def how_long_ago(timestr):
+    """Returns a string giving how long ago something happened,
+    in human-friendly way """
+    import time
+    now = datetime.datetime.now()
+    then = datetime.datetime.strptime(timestr, DATE_STR_FORMAT)
+    td = (now-then)
+    sec = td.seconds
+    min = int(sec / 60)
+    hours = int(min / 60)
+    days = td.days
+    weeks = int(days / 7)
+    sec = sec % 60
+    min = min % 60
+    hours = hours % 24
+    days = days % 7
+    
+    if weeks > 0:
+        return "%dw%dd" % (weeks,days)
+    elif days > 0:
+        return "%dd%dh" % (days, hours)
+    elif hours > 0:
+        return "%dh%dm" % (hours, min)
+    elif min > 0:
+        return "%dm%ds" % (min, sec)
+    else:
+        return "%ds" % (sec)
+    
+    return "" 
+     
+     
+#============================================================================================
+def get_html_summary_table(test_names):
+    """Returns a html string summarizing the tests with these names """
+    html = """ 
+    <table ><tr>
+    <th>Test Name</th> 
+    <th>Type</th> 
+    <th>Status</th> 
+    <th>When?</th>
+    <th>Total runtime (s)</th>
+    <th>Memory 'loss'</th>
+    """
+    
+    for name in test_names:
+        res = sqlresults.get_latest_result(name)
+        if not res is None:
+            # Calculate how long ago
+            
+            if not res["success"]:
+                html += """<tr class="failedrow">""" 
+            else:
+                html += """<tr>""" 
+            html += """<td><a href="%s.htm">%s</a></td>""" % (name, name)
+            html += """<td>%s</td>""" % res['type']
+            html += """<td>%s</td>""" % res['status']
+
+            # Friendly date
+            try:
+                date = datetime.datetime.strptime(res['date'], DATE_STR_FORMAT)
+                html += """<td>%s</td>""" %  date.strftime("%b %d, %H:%M:%S")
+            except:
+                html += """<td></td>"""
+            
+            html += """<td>%s</td>""" % res['runtime']
+            html += """<td>%s</td>""" % res['memory_change']
+            html += """</tr>"""
+            
+    html += """</table>"""
+    return html
+    
+    
+#============================================================================================
+def generate_html_subproject_report(path, last_num, x_field='revision', starts_with=""):
+    """ HTML report for a subproject set of tests.
+    
+    starts_with : the prefix of the test name
+    
+    Returns: (filename saved, HTML for a page with ALL figures in it)
+    """
+    basedir = os.path.abspath(path)
+    if not os.path.exists(basedir):
+        os.mkdir(basedir)
+        
+        
+    # Detect if you can do figures
+    dofigs = True
+    try:
+        figure()
+        rcParams['axes.titlesize'] = 'small'
+    except:
+        dofigs = False
+        
+    # Start the HTML
+    overview_html = ""
+
+    # ------ Find the test names of interest ----------------    
+    # Limit with only those tests that exist in the latest rev
+    latest_rev = sqlresults.get_latest_revison()
+    temp_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
+    # Filter by their start
+    test_names = []
+    for name in temp_names:
+        if name.startswith(starts_with):
+            test_names.append(name)
+           
+    test_names.sort()
+    
+    # -------- Report for each test ------------------------
+    for name in test_names:
+        print "Plotting", name
+        overview_html += """<hr><h2>%s</h2>\n""" % name
+        
+        # Path to the figures
+        fig1 = "%s.runtime.v.revision.png" % name
+        fig2 = "%s.runtime.v.revision.ALL.png" % name
+        fig3 = "%s.memory.v.revision.png" % name
+        fig4 = "%s.memory.v.revision.ALL.png" % name
+        
+        if dofigs:
+            # Only the latest X entries
+            plot_runtime(name=name,x_field=x_field,last_num=last_num)
+            savefig(os.path.join(basedir, fig1))
+            close()
+    
+            # Plot all svn times
+            plot_runtime(name=name,x_field=x_field,last_num=-1)
+            savefig(os.path.join(basedir, fig2))
+            close()
+            
+            # Only the latest X entries
+            plot_memory(name=name,x_field=x_field,y_field='memory_change',last_num=last_num)
+            savefig(os.path.join(basedir, fig3))
+            close()
+
+            # Plot all svn times
+            plot_memory(name=name,x_field=x_field,y_field='memory_change',last_num=-1)
+            savefig(os.path.join(basedir, fig4))
+            close()
+            
+            overview_html +=  """<img src="%s" alt="runtime vs revision number" />""" % (fig1)
+            overview_html +=  """<img src="%s" alt="memory vs revision number" />\n""" % (fig3)
+        
+        make_detailed_html_file(basedir, name, fig1, fig2, fig3, fig4, last_num)
+        detailed_html = """<br><a href="%s.htm">Detailed test report for %s</a>
+        <br><br>
+        """ % (name, name)
+        overview_html +=  detailed_html
+        
+    filename = starts_with + ".htm"
+    
+    return (filename, overview_html)
+    
+    
+
+#============================================================================================
+def generate_html_report(path, last_num, x_field='revision'):
+    """Make a comprehensive HTML report of runtime history for all tests. 
+    Parameters
+    ----------
+        path :: base path to the report folder
+        last_num :: in the shorter plot, how many SVN revs to show?
+        x_field :: the field to use as the x-axis. 'revision' or 'date' make sense
+    """
+    basedir = os.path.abspath(path)
+    if not os.path.exists(basedir):
+        os.mkdir(basedir)
+    
+    # Make the CSS file to be used by all HTML
+    make_css_file(path)
+    
+    # Detect if you can do figures
+    dofigs = True
+    try:
+        figure()
+    except:
+        dofigs = False
+        
+    # --------- Start the HTML --------------
+    html = default_html_header
+    html += """<h1>Mantid System Tests Auto-Generated Report</h1>"""
+    html += """<p><a href="overview_plot.htm">See an overview of performance plots for all tests by clicking here.</a></p> """
+    if not dofigs:
+        html += """<p class="error">There was an error generating plots. No figures will be present in the report.</p>"""
+        
+    html += """<h2>Run Environment</h2>
+    %s
+    """ % ( make_environment_html(sqlresults.get_latest_result()) )
+
+    overview_html = ""
+    
+    # ------ Find the test names of interest ----------------    
+    # Limit with only those tests that exist in the latest rev
+    latest_rev = sqlresults.get_latest_revison()
+    test_names = list(sqlresults.get_all_test_names(" revision = %d" % latest_rev))
+    test_names.sort()
+    
+    # ------ Find a list of subproject names --------
+    subprojects = set()
+    for name in test_names:
+        n = name.find(".")
+        if n > 0:
+            subprojects.add( name[:n] )
+    subprojects = list(subprojects)
+    subprojects.sort()
+    html += """<h2>Test Subprojects</h2>
+    <big>
+    <table cellpadding="10">    """
+    
+    for subproject in subprojects:
+        (filename, this_overview) = generate_html_subproject_report(path, last_num, x_field, subproject)
+        overview_html += this_overview
+        html += """<tr> <td> <a href="%s">%s</a> </td> </tr>
+        """ % (filename, subproject)
+    html += """</table></big>"""
+    
+    # --------- Table with the summary of latest results --------
+    html += """<h2>Overall Results Summary</h2>"""
+    html += get_html_summary_table(test_names)
+        
+    # -------- Overall success history graphs ------------
+    #if dofigs:
+    #    # We report the overall success
+    #    fig_path = "OverallSuccess.png"
+    #    plot_success_count(type='',last_num=last_num, x_field=x_field)
+    #    savefig(os.path.join(basedir, fig_path))
+    #    close()
+    #
+    #    fig_path2 = "OverallSuccess.ALL.png"
+    #    plot_success_count(type='',last_num=-1, x_field=x_field)
+    #    savefig(os.path.join(basedir, fig_path2))
+    #    close()
+    #
+    #    html += """<h2>Overall Success/Failure</h2>
+    #    <img src="%s" />
+    #    <img src="%s" />
+    #    """ % (fig_path, fig_path2)
+        
+    html += default_html_footer
+        
+    f = open(os.path.join(basedir, "report.htm"), "w")
+    html = html.replace("\n", os.linesep) # Fix line endings for windows
+    f.write(html)
+    f.close()
+    
+    # -------- Overview of plots ------------
+    f = open(os.path.join(basedir, "overview_plot.htm"), "w")
+    overview_html = overview_html.replace("\n", os.linesep) # Fix line endings for windows
+    f.write(overview_html)
+    f.close()
+    
+    print "Report complete!"
+
+
+
+
+#============================================================================================
+if __name__ == "__main__":
+    sqlresults.set_database_filename("MyFakeData.db")
+    # Make up some test data
+    if 0:
+        if os.path.exists("MyFakeData.db"): os.remove("MyFakeData.db")
+        sqlresults.generate_fake_data(300)
+    
+    
+    generate_html_report("../Report", 50)
+    
+#    plot_runtime(name='MyFakeTest', x_field='revision')
+#    plot_runtime(name='MyFakeTest', x_field='date')
+#    plot_success_count()
+#    show()
+    
diff --git a/Code/Mantid/Testing/SystemTests/scripts/performance/make_report.py b/Code/Mantid/Testing/SystemTests/scripts/performance/make_report.py
new file mode 100755
index 00000000000..1934d84e2a1
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/performance/make_report.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+import argparse
+import sys
+import os
+import subprocess
+import sqlite3
+
+#====================================================================================
+def getSourceDir():
+    """Returns the location of the source code."""
+    import os
+    import sys
+    script = os.path.abspath(sys.argv[0])
+    if os.path.islink(script):
+        script = os.path.realpath(script)
+    return os.path.dirname(script)
+
+
+
+def join_databases(dbfiles):
+    """Create a single DB joining several ones 
+    Returns: filename created
+    """
+    outfile = os.path.join(os.path.dirname(dbfiles[0]), "JoinedDatabases.db")
+    all_results = []
+    # Get the results of each file
+    for dbfile in dbfiles:
+        print "Reading", dbfile 
+        sqlresults.set_database_filename(dbfile)
+        these_results = sqlresults.get_results("")
+        all_results += these_results
+    # Write them into one
+    sqlresults.set_database_filename(outfile)
+    sqlresults.setup_database()
+    reporter = sqlresults.SQLResultReporter()
+    for res in all_results:
+        reporter.dispatchResults(res)
+    # Done!
+    return outfile
+    
+    
+
+#====================================================================================
+if __name__ == "__main__":
+    # Parse the command line
+    parser = argparse.ArgumentParser(description='Generates a HTML report using the Mantid System Tests results database')
+
+    parser.add_argument('--path', dest='path', 
+                        default="./Report",
+                        help='Path to the ouput HTML. Default "./Report".' )
+
+    parser.add_argument('--x_field', dest='x_field', 
+                        default="revision",
+                        help="Field to use as the x-axis. Default: 'revision'. Other possibilities: 'date'.")
+
+    parser.add_argument('dbfile', metavar='DBFILE', type=str, nargs='+',
+                        default=["./MantidSystemTests.db"], 
+                        help='Required: Path to the SQL database file(s).')
+        
+    
+    args = parser.parse_args()
+    
+    # Import the manager definition
+    import analysis
+    import sqlresults
+    
+    if len(args.dbfile) > 1:
+        # Several files - join them into one big .db
+        dbfile = join_databases(args.dbfile)
+    else:
+        # Only one file - use it
+        dbfile = args.dbfile[0]
+        
+    
+    if not os.path.exists(dbfile):
+        print "Error! Could not find", dbfile
+        sys.exit(1)
+    
+    # This is where we look for the DB file
+    sqlresults.set_database_filename(dbfile)
+    
+    # Make the report
+    analysis.generate_html_report(args.path, 100, args.x_field)
\ No newline at end of file
diff --git a/Code/Mantid/Testing/SystemTests/scripts/performance/reporters.py b/Code/Mantid/Testing/SystemTests/scripts/performance/reporters.py
new file mode 100644
index 00000000000..a2852ec1f4d
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/performance/reporters.py
@@ -0,0 +1,126 @@
+import os
+import sys
+
+#########################################################################
+# A base class to support report results in an appropriate manner
+#########################################################################
+class ResultReporter(object):
+    '''
+    A base class for results reporting. In order to get the results in an
+    appropriate form, subclass this class and implement the dispatchResults 
+    method.
+    '''
+
+    def __init__(self):
+        '''Initialize a class instance, e.g. connect to a database'''
+        pass
+
+    def dispatchResults(self, result):
+        """ 
+        Parameters
+            result: a TestResult object """
+        raise NotImplementedError('"dispatchResults(self, result)" should be overridden in a derived class')
+
+
+#########################################################################
+# A class to report results as formatted text output
+#########################################################################
+class TextResultReporter(ResultReporter):
+    '''
+    Report the results of a test using standard out
+    '''
+    
+    def dispatchResults(self, result):
+        '''
+        Print the results to standard out
+        '''
+        nstars = 30
+        print '*' * nstars
+        for (name, val) in result.data.items():
+            str_val = str(val)
+            str_val = str_val.replace("\n", " ")
+            if len(str_val) > 50:
+                str_val = str_val[:50] + " . . . "
+            print '    ' + name.ljust(15) + '->  ', str_val
+        print '*' * nstars
+
+
+#########################################################################
+# A class to report results as formatted text output
+#########################################################################
+class LogArchivingReporter(ResultReporter):
+    '''
+    Report the results of a test using standard out
+    '''
+    def __init__(self, logarchive):
+        # Path to a log archiving folder
+        self.logarchive = os.path.abspath(logarchive)
+        if not os.path.exists(self.logarchive):
+            os.mkdir(self.logarchive)
+    
+    def dispatchResults(self, result):
+        '''
+        Print the results to standard out
+        '''
+        fullpath = os.path.join(self.logarchive, result.get_logarchive_filename())
+        f = open(fullpath, "w")
+        f.write(result["log_contents"])
+        f.close()
+
+#########################################################################
+# A class to report results as XML that Hudson can interpret
+#########################################################################
+class JUnitXMLReporter(ResultReporter):
+    '''
+    Report the results of a test to a JUnit style XML format
+    that can be read by Hudson/Jenkins
+    '''
+    
+    def __init__(self, path):
+        # Path to .xml files
+        self._path = path
+    
+    def dispatchResults(self, result):
+        '''
+        Make a junit .xml file
+        '''
+        fullpath = os.path.join(self._path, "%s.xml" % result["name"])
+        f = open(fullpath, 'w')
+        
+        names  = result["name"].split(".")
+        suitename = names[0]  
+        testname = ".".join(names[1:])
+        
+        failure = ""
+        num_failures = 0
+        if not result["success"]:
+            failure = """\n        <failure type="failedAssert">%s</failure>
+            <system-out ><![CDATA[%s]]></system-out>""" % (result["status"], result["log_contents"])
+            num_failures = 1
+          
+        f.write("""<?xml version="1.0" encoding="UTF-8"?>
+<testsuite name="%s" tests="1" failures="%d" disabled="0" errors="0" time="0.0">
+    <testcase name="%s" time="%f" classname="%s">%s
+    </testcase>
+</testsuite>
+""" % (suitename, num_failures, testname, result["runtime"], suitename, failure) )
+
+
+
+if __name__=="__main__":
+    import testresult
+    rep = JUnitXMLReporter(".")
+    
+    res = testresult.TestResult()
+    res["name"] = "MyTestTest.Test"
+    res["status"] = "success maybe?"
+    res["success"] = True
+    res["runtime"] = 1.234
+    rep.dispatchResults(res)
+    
+    res = testresult.TestResult()
+    res["name"] = "MyTestTest.OtherTest"
+    res["status"] = "failure"
+    res["success"] = False
+    res["runtime"] = 3.456
+    rep.dispatchResults(res)
diff --git a/Code/Mantid/Testing/SystemTests/scripts/performance/sqlresults.py b/Code/Mantid/Testing/SystemTests/scripts/performance/sqlresults.py
new file mode 100644
index 00000000000..b7eb3e4289c
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/performance/sqlresults.py
@@ -0,0 +1,327 @@
+try:
+    import sqlite3
+    has_sql = True
+except ImportError:
+    has_sql = False
+    print "Error importing sqlite3. SQL will not work"
+    
+import reporters
+import datetime
+import testresult
+import os
+import shutil
+import math
+import random
+
+#====================================================================================
+def getSourceDir():
+    """Returns the location of the source code."""
+    import os
+    import sys
+    script = os.path.abspath(sys.argv[0])
+    if os.path.islink(script):
+        script = os.path.realpath(script)
+    return os.path.dirname(script)
+
+
+#=====================================================================
+# These are the table fields, in order
+TABLE_FIELDS = ['date', 'name', 'type', 'host', 'environment', 'runner',
+                 'revision', 'commitid', 'runtime', 'cpu_fraction', 
+                 'memory_change', 'success',
+                 'status', 'logarchive', 'variables']
+
+#=====================================================================
+# The default path to the database file
+database_file = os.path.join(getSourceDir(), "MantidSystemTests.db")     
+
+#=====================================================================
+def get_database_filename():
+    """Return the path to the database to use """
+    return database_file
+
+#=====================================================================
+def set_database_filename(value):
+    """Override the default database location"""
+    global database_file
+    database_file = value
+
+#=====================================================================
+def SQLgetConnection():
+    """ Get a connection to the SQL database """
+    # These are automatic conversion factors
+    return sqlite3.connect(get_database_filename())
+
+
+#=====================================================================
+def get_TestResult_from_row(row):     
+    """Return a filled TestResult object from a "row"
+    obtained by selecting * from the TestRuns table
+    Returns
+    -------
+        result :: TestResult object, with an extra
+            .testID member containing the ID into the table (testID field) 
+    """
+    res = testresult.TestResult()
+    res.testID = row[0]
+    # ------ Get each entry in the table ---------
+    for i in xrange(len(TABLE_FIELDS)):
+        res[TABLE_FIELDS[i]] = row[i+1]
+        
+    return (res)
+
+    
+#=====================================================================
+def get_latest_result(name=''):
+    """Returns a TestResult object corresponding to the 
+    last result in the table
+    Parameters
+    ----------
+        name :: optional, test name to filter by"""
+    db = SQLgetConnection()
+    c = db.cursor()
+    where = ""
+    if name != "": where = " WHERE name='%s'" % name
+    query = """SELECT * FROM TestRuns %s ORDER BY testID DESC LIMIT 1;""" % where
+    c.execute(query)
+    # Get all rows - there should be only one
+    rows = c.fetchall()
+    c.close()
+    
+    if len(rows) > 0:
+        res = get_TestResult_from_row(rows[0])
+        return res
+    else:
+        return None
+
+#=====================================================================
+def get_results(name, type="", where_clause='', orderby_clause=''):
+    """Return a list of testresult.TestResult objects
+    generated from looking up in the table 
+    Parameters:
+        name: test name to search for. Empty string = don't limit by name
+        type: limit by type; default empty string means = don't limit by type.
+        get_log : set to True to retrieve the log_contents too.
+        where_clause : an additional SQL "where" clause to further limit the search.
+            Do not include the WHERE keyword!
+            e.g "date > 2010 AND environment='mac'". 
+        orderby_clause : a clause to order and/or limit the results.
+            e.g. "ORDER BY revision DESC limit 100" to get only the latest 100 revisions.            
+        """
+    out = []
+    
+    db = SQLgetConnection()
+    c = db.cursor()
+    
+    query = "SELECT * FROM TestRuns "
+    
+    # Build up the where clause
+    where_clauses = []
+    if name != "":
+        where_clauses.append(" name = '%s'" % name)
+    if (type != ""):
+        where_clauses.append(" type = '%s'" % type)
+    if (where_clause != ""):
+        where_clauses.append(" (" + where_clause + ")")
+    # Add it to the query
+    if len(where_clauses) > 0:
+        query += "WHERE " + " AND ".join(where_clauses)
+    # Now the ordering clause
+    query += " " + orderby_clause
+    
+    c.execute(query)
+    
+    # Get all rows
+    rows = c.fetchall()
+    
+    for row in rows:
+        # Turn the row into TestResult
+        res = get_TestResult_from_row(row)
+        
+        out.append(res)
+    c.close()
+    return out
+      
+      
+#=====================================================================
+def get_all_field_values(field_name, where_clause=""):
+    """Return a list of every entry of the given
+    field (e.g. 'name' or 'environment').  
+    Parameters:
+        field_name: field/column name to search for.
+        where_clause : an additional SQL "where" clause to further limit the search.
+            Do not include the WHERE keyword!
+            e.g "date > 2010 AND environment='mac'". 
+            
+        """
+    db = SQLgetConnection()
+    c = db.cursor()
+    
+    query = "SELECT (%s) FROM TestRuns " % field_name
+    if (where_clause != ""):
+        query += "WHERE " + where_clause
+        
+    c.execute(query)
+    
+    # Get all rows
+    rows = c.fetchall()
+    
+    out = [x for (x,) in rows] 
+        
+    return out
+      
+#=====================================================================
+def get_latest_revison():
+    """ Return the latest revision number """
+    # Now get the latest revision
+    db = SQLgetConnection()
+    c = db.cursor()
+    query = "SELECT (revision) FROM Revisions ORDER BY revision DESC LIMIT 1;"  
+    c.execute(query)
+    rows = c.fetchall()
+    if (len(rows)>0):
+        return int(rows[0][0])
+    else:
+        return 0
+    
+#=====================================================================
+def add_revision():
+    """ Adds an entry with the current date/time to the table. 
+    Retrieve the index of that entry = the "revision".
+    Returns the current revision"""
+    db = SQLgetConnection()
+    c = db.cursor()
+    query = "INSERT INTO Revisions VALUES(NULL, '%s');" % str(datetime.datetime.now())
+    c.execute(query)
+    db.commit()
+    return get_latest_revison()
+
+            
+#=====================================================================
+def get_all_test_names(where_clause=""):
+    """Returns a set containing all the UNIQUE test names in the database.
+    ----
+    where_clause: Do not include the WHERE keyword! """
+    return set(get_all_field_values('name', where_clause))
+
+
+#=====================================================================
+def setup_database():
+    """ Routine to set up the mysql database the first time.
+    WARNING: THIS DELETES ANY TABLES ALREADY THERE 
+    """
+    print "Setting up SQL database at",get_database_filename()
+    if os.path.exists(get_database_filename()):
+        print "Creating a backup at", get_database_filename()+".bak"
+        shutil.copyfile(get_database_filename(), get_database_filename()+".bak")
+        
+    db = SQLgetConnection()
+
+    c = db.cursor()
+    try:
+        c.execute("DROP TABLE TestRuns;")
+        c.execute("DROP TABLE Revisions;")
+    except:
+        print "Error dropping tables. Perhaps one does not exist (this is normal on first run)."
+        
+    c.execute("""CREATE TABLE TestRuns (
+    testID INTEGER PRIMARY KEY,
+    date DATETIME, name VARCHAR(60), type VARCHAR(20), 
+    host VARCHAR(30), environment VARCHAR(50), runner VARCHAR(20), 
+    revision INT, commitid VARCHAR(45), 
+    runtime DOUBLE, cpu_fraction DOUBLE, memory_change INT,
+    success BOOL,
+    status VARCHAR(50), logarchive VARCHAR(80),
+    variables VARCHAR(200)
+    ); """)
+    
+    # Now a table that is just one entry per run (a fake "revision")
+            
+    c.execute("""CREATE TABLE Revisions (
+    revision INTEGER PRIMARY KEY,
+    date DATETIME
+    ); """)
+
+        
+###########################################################################
+# A class to report the results of stress tests to the Mantid Test database
+# (requires sqlite3 module)
+###########################################################################
+class SQLResultReporter(reporters.ResultReporter):
+    '''
+    Send the test results to the Mantid test results database
+    '''
+
+    def __init__(self):
+        pass
+       
+
+    def dispatchResults(self, result):
+        '''
+        Construct the SQL commands and send them to the databse
+        '''
+        dbcxn = SQLgetConnection()
+        cur = dbcxn.cursor()
+        #last_id = dbcxn.insert_id()
+        
+        # Create the field for the log archive name
+        result["logarchive"] = result.get_logarchive_filename()
+               
+        valuessql = "INSERT INTO TestRuns VALUES(NULL, "
+
+        # Insert the test results in the order of the table
+        for field in TABLE_FIELDS:
+            val = result[field]
+            # Make into a string
+            val_str = str(val)
+            
+            # Booleans must be 0 or 1
+            if type(val).__name__ == "bool":
+                val_str = ["0", "1"][val] 
+                
+            valuessql += "'" + val_str + "',"
+            
+        valuessql = valuessql.rstrip(',')
+        valuessql += ');'
+        cur.execute(valuessql)
+        # Save test id for iteration table
+        test_id = cur.lastrowid
+        
+        # Commit and close the connection
+        dbcxn.commit()
+        cur.close()
+        dbcxn.close()
+
+
+#============================================================================================
+def generate_fake_data(num_extra = 0):
+    """ Make up some data for a database """
+    print "Generating fake data..."
+    setup_database()
+    rep = SQLResultReporter()
+    for timer in [9400, 9410,9411, 9412] + range(9420,9440) + [9450, 9466] + range(9450, 9450+num_extra):
+        rev = add_revision()
+        for name in ["Project1.MyFakeTest", "Project1.AnotherFakeTest", "Project2.FakeTest", "Project2.OldTest"]:
+            if (name != "Project2.OldTest"):
+                result = testresult.TestResult()
+                result["name"] = name
+                result["date"] = datetime.datetime.now() + datetime.timedelta(days=timer, minutes=timer)
+                result["log_contents"] = "Revision %d" % rev
+                result["runtime"] = timer/10.0 + random.randrange(-2,2)
+                result["commitid"] = rev #'926bf82e36b4c90c95efc3f1151725696273de5a'
+                result["success"] = (random.randint(0,10) > 0)
+                result["status"] = ["failed","success"][result["success"]]
+                result["revision"] = rev
+                rep.dispatchResults(result)
+    print "... Fake data made."
+
+
+#=====================================================================
+if __name__ == "__main__":
+    set_database_filename("SqlResults.test.db")
+    generate_fake_data()
+    
+    res = get_latest_result()
+    print res
+    
+    
diff --git a/Code/Mantid/Testing/SystemTests/scripts/performance/testresult.py b/Code/Mantid/Testing/SystemTests/scripts/performance/testresult.py
new file mode 100644
index 00000000000..fd772de16a3
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/performance/testresult.py
@@ -0,0 +1,120 @@
+'''
+Data object for a TestResult
+
+Copyright &copy; 2009 STFC Rutherford Appleton Laboratories
+
+This file is part of Mantid.
+
+Mantid is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+Mantid is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+File change history is stored at: <https://github.com/mantidproject/mantid>.
+'''
+
+import sys
+import os
+import reporters
+import re
+import time
+import datetime
+import platform
+import subprocess
+import tempfile
+import sqlresults
+import numpy as np
+
+
+
+
+#########################################################################
+#########################################################################
+def envAsString():
+    """ Return the environment as a string """
+    if os.name == 'nt':
+        system = platform.system().lower()[:3]
+        arch = platform.architecture()[0][:2]
+        env = system + arch
+    elif os.name == 'mac':
+        env = platform.mac_ver()[0]
+    else:
+        env = " ".join(platform.dist())
+    return env
+    
+    
+#########################################################################
+# A class to store the results of a test 
+#########################################################################
+class TestResult(object):
+    '''
+    Stores the results of each test so that they can be reported later.
+    '''
+    
+    def __init__(self, 
+                 date = datetime.datetime.now(),
+                 name="",
+                 type="system",
+                 host=platform.uname()[1],
+                 environment=envAsString(),
+                 runner="",
+                 commitid='',
+                 revision=0,
+                 runtime=0.0,
+                 speed_up=0.0,
+                 cpu_fraction=0.0,
+                 memory_change=0, 
+                 iterations=1,
+                 success=False,
+                 status="",
+                 log_contents="",
+                 variables=""):
+        """ Fill the TestResult object with the contents """
+        self.data = {}
+        self.data["date"] = date
+        self.data["name"] = name
+        self.data["type"] = type
+        self.data["host"] = host
+        self.data["environment"] = environment
+        self.data["runner"] = runner
+        self.data["revision"] = revision
+        self.data["commitid"] = commitid
+        self.data["runtime"] = runtime
+        self.data["cpu_fraction"] = cpu_fraction
+        self.data["memory_change"] = memory_change
+        self.data["success"] = success
+        self.data["status"] = status
+        self.data["log_contents"] = log_contents
+        self.data["variables"] = variables
+        
+    
+    def get_logarchive_filename(self):
+        "Return a bare filename that will hold the archived log contents"
+        s = str(self.data["date"])
+        s = s.replace(" ", "_")
+        s = s.replace(":", "-")
+        return "%s.%s.log" % (s, self.data["name"])
+        
+    def __getitem__(self, key):
+        return self.data[key]
+    
+    def __setitem__(self, key, value):
+        self.data.__setitem__(key, value)
+            
+    def getData(self):
+        ''' Get the map storing the results   '''
+        return self.data
+    
+    def __str__(self):
+        return str(self.data)
+
+
+    
diff --git a/Code/Mantid/Testing/SystemTests/scripts/performance/xunit_to_sql.py b/Code/Mantid/Testing/SystemTests/scripts/performance/xunit_to_sql.py
new file mode 100755
index 00000000000..e70c27d91c9
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/performance/xunit_to_sql.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+""" Module to convert XUnit XML to SQL database of test results of the same type used
+by python system tests """
+
+import argparse
+import sys
+import os
+import sqlresults
+from testresult import TestResult, envAsString
+from xml.dom.minidom import parse, parseString
+import re
+import time
+import datetime
+import platform
+import subprocess
+import tempfile
+import sqlresults
+import numpy as np
+import glob
+
+# Global SQL result reporter 
+sql_reporter = None
+# Variables string for all tests
+variables = ""
+revision = 0
+commitid = ''
+
+def handle_testcase(case, suite_name):
+    """ Handle one test case and save it to DB"""
+    # Build the full name (Project.Suite.Case)
+    name = case.getAttribute("classname") + "." + case.getAttribute("name")
+    try:
+        time = float(case.getAttribute("time"))
+    except:
+        time = 0.0
+    try:
+        total_time = float(case.getAttribute("totalTime"))
+    except:
+        total_time = 0.0
+    try:
+        cpu_fraction = float(case.getAttribute("CPUFraction"))
+    except:
+        cpu_fraction = 0.0
+    try:
+        memory_change = int(case.getElementsByTagName("memory").item(0).firstChild.nodeValue)
+    except:
+        memory_change = 0
+        
+    
+    tr = TestResult(date = datetime.datetime.now(),
+                 name=name,
+                 type="performance",
+                 host=platform.uname()[1],
+                 environment=envAsString(),
+                 runner="runSystemTests.py",
+                 revision=revision,
+                 commitid=commitid,
+                 runtime=time,
+                 cpu_fraction=cpu_fraction,
+                 memory_change=memory_change,
+                 success=True,
+                 status="",
+                 log_contents="",
+                 variables=variables) 
+    #print tr.data
+    # Now report it to SQL
+    sql_reporter.dispatchResults(tr)
+
+def handle_suite(suite):
+    """ Handle all the test cases in a suite """
+    suite_name = suite.getAttribute("name")
+    cases = suite.getElementsByTagName("testcase")
+    for case in cases:
+        handle_testcase(case, suite_name)
+    
+
+def convert_xml(filename):
+    """Convert a single XML file to SQL db"""
+    # Parse the xml
+    print "Reading", filename
+    doc = parse(filename)
+    suites = doc.getElementsByTagName("testsuite")
+    for suite in suites:
+        handle_suite(suite)
+
+
+#====================================================================================
+if __name__ == "__main__":
+    # Parse the command line
+    parser = argparse.ArgumentParser(description='Add the contents of Xunit-style XML test result files to a SQL database.')
+
+    parser.add_argument('--db', dest='db', 
+                        default="./MantidPerformanceTests.db",
+                        help='Full path to the SQLite database holding the results (default "./MantidPerformanceTests.db"). The database will be created if it does not exist.')
+    
+    parser.add_argument('--variables', dest='variables', 
+                        default="",
+                        help='Optional string of comma-separated "VAR1NAME=VALUE,VAR2NAME=VALUE2" giving some parameters used, e.g. while building.')
+    
+    parser.add_argument('--commit', dest='commitid', 
+                        default="",
+                        help='Commit ID of the current build (a 40-character SHA string).')
+    
+    parser.add_argument('xmlpath', metavar='XMLPATH', type=str, nargs='+',
+                        default="", 
+                        help='Required: Path to the Xunit XML files.')
+    
+    args = parser.parse_args()
+        
+    # Setup the SQL database but only if it does not exist
+    sqlresults.set_database_filename(args.db)
+    if not os.path.exists(args.db):
+        sqlresults.setup_database()
+    # Set up the reporter    
+    sql_reporter = sqlresults.SQLResultReporter()
+    
+    variables = args.variables 
+    # Add a new revision and get the "revision" number
+    revision = sqlresults.add_revision()
+    # Save the commitid
+    commitid = args.commitid
+
+    # If a directory has been provided, look there for all of the XML files
+    if os.path.isdir(args.xmlpath[0]):
+        xmldir = args.xmlpath[0]
+        if not os.path.isabs(xmldir):
+            xmldir = os.path.abspath(xmldir)
+        xmlfiles = glob.glob(os.path.join(xmldir, '*.xml'))
+    else:
+        xmlfiles = args.xmlpath
+       
+    # Convert each file
+    for file in xmlfiles:
+        convert_xml(file)
+        
+        
+        
diff --git a/Code/Mantid/Testing/SystemTests/scripts/runSystemTests.py b/Code/Mantid/Testing/SystemTests/scripts/runSystemTests.py
new file mode 100755
index 00000000000..88785cc679b
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/scripts/runSystemTests.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python
+
+import os
+# set up the command line options
+VERSION = "1.1"
+DEFAULT_FRAMEWORK_LOC = os.path.dirname(os.path.realpath(__file__)) + "/../StressTestFramework"
+
+info = []
+info.append("This program will configure mantid run all of the system tests located in")
+info.append("the 'SystemTests/AnalysisTests' directory and log the results in 'logs/'.")
+info.append("This program will create a temporary 'Mantid.user.properties' file which")
+info.append("it will rename to 'Mantid.user.properties.systest' upon completion. The")
+info.append("current version of the code does not print to stdout while the test is")
+info.append("running, so the impatient user may ^C to kill the process. In this case")
+info.append("all of the tests that haven't been run will be marked as skipped in the")
+info.append("full logs.")
+
+import optparse
+parser = optparse.OptionParser("Usage: %prog [options]", None,
+                               optparse.Option, VERSION, 'error', ' '.join(info))
+parser.add_option("-m", "--mantidpath", dest="mantidpath",
+                  help="Location of mantid build")
+parser.add_option("", "--email", action="store_true",
+                  help="send an email with test status.")
+parser.add_option("", "--frameworkLoc",
+		  help="location of the stress test framework (default=%s)" % DEFAULT_FRAMEWORK_LOC)
+parser.add_option("", "--disablepropmake", action="store_false", dest="makeprop",
+                  help="By default this will move your properties file out of the way and create a new one. This option turns off this behavior.")
+parser.add_option("-R", "--tests-regex", dest="testsInclude",
+                  help="String specifying which tests to run. Simply uses 'string in testname'.")
+parser.add_option("-E", "--excluderegex", dest="testsExclude",
+                  help="String specifying which tests to not run. Simply uses 'string in testname'.")
+loglevelChoices=["error", "warning", "notice", "information", "debug"]
+parser.add_option("-l", "--loglevel", dest="loglevel",
+                  choices=loglevelChoices,
+                  help="Set the log level for test running: [" + ', '.join(loglevelChoices) + "]")
+parser.add_option("", "--showskipped", dest="showskipped", action="store_true",
+                  help="List the skipped tests.")
+parser.add_option("", "--archivesearch", dest="archivesearch", action="store_true",
+                  help="Turn on archive search for file finder.")
+parser.set_defaults(frameworkLoc=DEFAULT_FRAMEWORK_LOC, mantidpath=None, makeprop=True,
+                    loglevel="information")
+(options, args) = parser.parse_args()
+
+# import the stress testing framework
+import sys
+import os
+import platform
+sys.path.append(options.frameworkLoc)
+import stresstesting
+
+# Make sure the specified MantidFramework is picked up
+# Use specified option if given
+mantid_module_path = None
+if options.mantidpath is not None:
+  mantid_module_path = options.mantidpath
+elif os.path.exists("MantidFramework"):
+  pass # Current directory is in the already
+elif 'MANTIDPATH' in os.environ:
+  mantid_module_path = os.environ['MANTIDPATH']
+else: 
+  pass
+
+# Ensure that this is the one that is picked
+sys.path.insert(0, mantid_module_path)
+
+# On Windows & OSX we need to ensure the mantid libraries in bin/Contents/MacOS can be found.
+# Must be done before first import of mantid. This is the same as what a user would have to do to
+# import mantid in a vanilla python session
+# Unfortunately Python seems to know the path separator on each platform but
+# not the dynamic library path variable name
+if platform.system() == 'Windows':
+  path_var = "PATH"
+elif platform.system() == 'Darwin':
+  path_var = "DYLD_LIBRARY_PATH"
+else:
+  path_var = None
+# Set the path
+if path_var:
+  os.environ[path_var] = mantid_module_path + os.pathsep + os.environ.get(path_var, "")
+
+# Configure mantid
+mtdconf = stresstesting.MantidFrameworkConfig(mantid_module_path, loglevel=options.loglevel,
+                                              archivesearch=options.archivesearch)
+if options.makeprop:
+  mtdconf.config()
+
+# run the tests
+reporter = stresstesting.XmlResultReporter(showSkipped=options.showskipped)
+mgr = stresstesting.TestManager(mtdconf.testDir, output = [reporter],
+                                testsInclude=options.testsInclude, testsExclude=options.testsExclude)
+try:
+  mgr.executeTests()
+except KeyboardInterrupt:
+  mgr.markSkipped("KeyboardInterrupt")
+
+# report the errors
+success = reporter.reportStatus()
+xml_report = open(os.path.join(mtdconf.saveDir, "SystemTestsReport.xml"),'w')
+xml_report.write(reporter.getResults())
+xml_report.close()
+
+# put the configuratoin back to its original state
+if options.makeprop:
+  mtdconf.restoreconfig()
+
+print
+if mgr.skippedTests == mgr.totalTests:
+  print "All tests were skipped"
+  success = False # fail if everything was skipped
+else:
+  percent = 1.-float(mgr.failedTests)/float(mgr.totalTests-mgr.skippedTests)
+  percent = int(100. * percent)
+  print "%d%s tests passed, %d tests failed out of %d (%d skipped)" % \
+      (percent, '%', mgr.failedTests, (mgr.totalTests-mgr.skippedTests), mgr.skippedTests)
+print 'All tests passed? ' + str(success)
+if not success:
+  sys.exit(1)
diff --git a/Code/Mantid/Testing/SystemTests/tests/analysis/reference/README.md b/Code/Mantid/Testing/SystemTests/tests/analysis/reference/README.md
new file mode 100644
index 00000000000..820b616aa6b
--- /dev/null
+++ b/Code/Mantid/Testing/SystemTests/tests/analysis/reference/README.md
@@ -0,0 +1 @@
+This directory stores the content links to the reference files for the system tests
-- 
GitLab