diff --git a/Framework/DataHandling/src/ISISRunLogs.cpp b/Framework/DataHandling/src/ISISRunLogs.cpp
index d3606ffedaedb63b2345c5c34a766280ec817126..bfad70bab747d77da07a3ef5e2c41d2aa5346698 100644
--- a/Framework/DataHandling/src/ISISRunLogs.cpp
+++ b/Framework/DataHandling/src/ISISRunLogs.cpp
@@ -56,12 +56,12 @@ void ISISRunLogs::addStatusLog(API::Run &exptRun) {
  */
 void ISISRunLogs::addPeriodLogs(const int period, API::Run &exptRun) {
   auto periodLog = m_logParser->createPeriodLog(period);
-  auto logFilter = std::unique_ptr<LogFilter>();
-  const TimeSeriesProperty<bool> *maskProp(nullptr);
+  std::unique_ptr<LogFilter> logFilter{nullptr};
+  const TimeSeriesProperty<bool> *maskProp{nullptr};
   try {
     auto runningLog =
         exptRun.getTimeSeriesProperty<bool>(LogParser::statusLogName());
-    logFilter = std::make_unique<LogFilter>(runningLog);
+    logFilter = std::make_unique<LogFilter>(*runningLog);
   } catch (std::exception &) {
     g_log.warning(
         "Cannot find status log. Logs will be not be filtered by run status");
@@ -74,6 +74,8 @@ void ISISRunLogs::addPeriodLogs(const int period, API::Run &exptRun) {
       maskProp = logFilter->filter();
     } else
       maskProp = periodLog;
+  } else if (logFilter) {
+    maskProp = logFilter->filter();
   }
   // Filter logs if we have anything to filter on
   if (maskProp)
@@ -86,7 +88,7 @@ void ISISRunLogs::addPeriodLogs(const int period, API::Run &exptRun) {
   } catch (std::runtime_error &) {
     // Already has one
   }
-}
+} // namespace DataHandling
 
 /**
  * Add the period log to a run.
diff --git a/Framework/Geometry/src/Instrument/XMLInstrumentParameter.cpp b/Framework/Geometry/src/Instrument/XMLInstrumentParameter.cpp
index eede81254bde8c70189274b5cd573c684c135a4f..08d66b607d057196479e3ff712f781136057afad 100644
--- a/Framework/Geometry/src/Instrument/XMLInstrumentParameter.cpp
+++ b/Framework/Geometry/src/Instrument/XMLInstrumentParameter.cpp
@@ -145,7 +145,8 @@ double XMLInstrumentParameter::createParamValue(
       extractedValue =
           Kernel::filterByStatistic(logData, (*statisics_choice).second);
     }
-    // Looking for string: "position n", where n is an integer
+    // Looking for string: "position n", where n is an integer and is a 1-based
+    // index
     else if (m_extractSingleValueAs.find("position") == 0 &&
              m_extractSingleValueAs.size() >= 10) {
       std::stringstream extractPosition(m_extractSingleValueAs);
@@ -153,7 +154,7 @@ double XMLInstrumentParameter::createParamValue(
       int position;
       extractPosition >> dummy >> position;
 
-      extractedValue = logData->nthValue(position);
+      extractedValue = logData->nthValue(position - 1);
     } else {
       throw Kernel::Exception::InstrumentDefinitionError(
           std::string("extract-single-value-as attribute for <parameter>") +
diff --git a/Framework/Geometry/test/XMLInstrumentParameterTest.h b/Framework/Geometry/test/XMLInstrumentParameterTest.h
index b16fbcce39dd4e6d1c96caad6109d06be50f73e5..7e08c4c1377891c6d32f7ecf6094fa976e6b52a4 100644
--- a/Framework/Geometry/test/XMLInstrumentParameterTest.h
+++ b/Framework/Geometry/test/XMLInstrumentParameterTest.h
@@ -166,7 +166,7 @@ public:
     series.addValue("2000-11-30T01:01:03", 2);
     series.addValue("2000-11-30T01:01:04", 3);
 
-    XMLInstrumentParameter_sptr logFile = make_logfile_object("position 1");
+    XMLInstrumentParameter_sptr logFile = make_logfile_object("position 2");
     const double actualFilteredValue = logFile->createParamValue(&series);
     TSM_ASSERT_EQUALS("Filtering by Nth position is not performed correctly",
                       expectedFilteredValue, actualFilteredValue);
diff --git a/Framework/Kernel/inc/MantidKernel/LogParser.h b/Framework/Kernel/inc/MantidKernel/LogParser.h
index 6a76a552b7564bec36f827fd90a47ff681fb8784..9deaab2abbff79982b6d9fd8e4908d980b0e386d 100644
--- a/Framework/Kernel/inc/MantidKernel/LogParser.h
+++ b/Framework/Kernel/inc/MantidKernel/LogParser.h
@@ -41,10 +41,13 @@ within the interval.
 */
 class MANTID_KERNEL_DLL LogParser {
 public:
+  /// Returns the name of the log that contains the current period number
+  static const std::string currentPeriodLogName() { return "current_period"; }
   /// Returns the name of the log created that defines the status during a run
-  static const std::string statusLogName();
+  static const std::string statusLogName() { return "running"; }
   /// Returns the name of the log that contains all of the periods
-  static const std::string periodsLogName();
+  static const std::string periodsLogName() { return "periods"; }
+
   /// Creates a TimeSeriesProperty of either double or string type depending on
   /// the log data
   /// Returns a pointer to the created property
diff --git a/Framework/Kernel/src/LogParser.cpp b/Framework/Kernel/src/LogParser.cpp
index e7014dfab178906cbbf091502d657965eccfc9a9..81558426838710b4e0eb36125b6f45029ed29122 100644
--- a/Framework/Kernel/src/LogParser.cpp
+++ b/Framework/Kernel/src/LogParser.cpp
@@ -16,8 +16,8 @@
 #include <fstream>
 
 // constants for the new style icp event commands
-const char *START_COLLECTION = "START_COLLECTION";
-const char *STOP_COLLECTION = "STOP_COLLECTION";
+constexpr const char *START_COLLECTION = "START_COLLECTION";
+constexpr const char *STOP_COLLECTION = "STOP_COLLECTION";
 
 using std::size_t;
 
@@ -30,12 +30,6 @@ namespace {
 Logger g_log("LogParser");
 } // namespace
 
-/// @returns the name of the log created that defines the status during a run
-const std::string LogParser::statusLogName() { return std::string("running"); }
-
-/// @returns the name of the log that contains all of the periods
-const std::string LogParser::periodsLogName() { return std::string("periods"); }
-
 /**  Reads in log data from a log file and stores them in a TimeSeriesProperty.
 @param logFName :: The name of the log file
 @param name :: The name of the property
@@ -282,7 +276,7 @@ Kernel::TimeSeriesProperty<bool> *LogParser::createPeriodLog(int period) const {
  */
 Kernel::Property *LogParser::createCurrentPeriodLog(const int &period) const {
   Kernel::PropertyWithValue<int> *currentPeriodProperty =
-      new Kernel::PropertyWithValue<int>("current_period", period);
+      new Kernel::PropertyWithValue<int>(currentPeriodLogName(), period);
   return currentPeriodProperty;
 }
 
diff --git a/Framework/Kernel/src/TimeSeriesProperty.cpp b/Framework/Kernel/src/TimeSeriesProperty.cpp
index ad1045336e97683c14f4ce59f6a61447f150c0d5..0f97d8cb83063ed6a5f626aedc69918050523296 100644
--- a/Framework/Kernel/src/TimeSeriesProperty.cpp
+++ b/Framework/Kernel/src/TimeSeriesProperty.cpp
@@ -1982,6 +1982,14 @@ template <typename TYPE> void TimeSeriesProperty<TYPE>::countSize() const {
     }
     size_t nvalues = m_filterQuickRef.empty() ? m_values.size()
                                               : m_filterQuickRef.back().second;
+    // The filter logic can end up with the quick ref having a duplicate of the
+    // last time and value at the end if the last filter time is past the log
+    // time See "If it is out of upper boundary, still record it.  but make the
+    // log entry to mP.size()+1" in applyFilter
+    // Make the log seem the full size
+    if (nvalues == m_values.size() + 1) {
+      --nvalues;
+    }
     m_size = static_cast<int>(nvalues);
   }
 }
@@ -2331,6 +2339,13 @@ template <typename TYPE> void TimeSeriesProperty<TYPE>::applyFilter() const {
 
   // 6. Re-count size
   countSize();
+
+  if (name() == "proton_charge") {
+    for (const auto &item : m_filterQuickRef) {
+      std::cerr << "first=" << item.first << ",  second=" << item.second
+                << "\n";
+    }
+  }
 }
 
 /*
diff --git a/Framework/Muon/test/PlotAsymmetryByLogValueTest.h b/Framework/Muon/test/PlotAsymmetryByLogValueTest.h
index 8288e0936b963c8ce29702f29a9566ea1bf15b1c..b7e4e48248bda17f179657def0c4a2aa15c4c215 100644
--- a/Framework/Muon/test/PlotAsymmetryByLogValueTest.h
+++ b/Framework/Muon/test/PlotAsymmetryByLogValueTest.h
@@ -430,8 +430,8 @@ public:
     // rather than asymmetry (Y values)
     const auto &X = outWs->x(0);
 
-    TS_ASSERT_DELTA(X[0], 178.740476, 0.00001);
-    TS_ASSERT_DELTA(X[1], 178.849998, 0.00001);
+    TS_ASSERT_DELTA(X[0], 178.7, 1e-5);
+    TS_ASSERT_DELTA(X[1], 178.3, 1e-5);
   }
 
   void test_invalidRunNumbers() {
diff --git a/scripts/Muon/GUI/Common/contexts/fitting_context.py b/scripts/Muon/GUI/Common/contexts/fitting_context.py
index 33a0cc36f2a7416d712f72a75c21719ffcd9b9e7..c9589c79f405f002c38785a726a7ff441a463ce6 100644
--- a/scripts/Muon/GUI/Common/contexts/fitting_context.py
+++ b/scripts/Muon/GUI/Common/contexts/fitting_context.py
@@ -9,7 +9,9 @@ from __future__ import (absolute_import, division)
 from collections import OrderedDict
 import re
 
-from mantid.py3compat import iteritems, iterkeys
+from mantid.api import AnalysisDataService
+from mantid.py3compat import iteritems, iterkeys, string_types
+import numpy as np
 
 from Muon.GUI.Common.observer_pattern import Observable
 
@@ -188,8 +190,8 @@ class FitInformation(object):
         :param parameter_workspace: The workspace wrapper
         that contains all of the parameters from the fit
         :param fit_function_name: The name of the function used
-        :param input_workspace: The name of the workspace containing
-        the original data
+        :param input_workspace: The name or list of names
+        of the workspace(s) containing the original data
         :param output_workspace_names: A list containing the names of the output workspaces containing the fits
         :param global_parameters: An optional list of parameters
         that were tied together during the fit
@@ -197,20 +199,78 @@ class FitInformation(object):
         self._fit_parameters = FitParameters(parameter_workspace,
                                              global_parameters)
         self.fit_function_name = fit_function_name
-        self.input_workspace = input_workspace
+        self.input_workspaces = [input_workspace] if isinstance(
+            input_workspace, string_types) else input_workspace
         self.output_workspace_names = output_workspace_names
 
     def __eq__(self, other):
         """Objects are equal if each member is equal to the other"""
-        return self._fit_parameters.parameter_workspace_name == other._fit_parameters.parameter_workspace_name and \
+        return self.parameters == other.parameters and \
             self.fit_function_name == other.fit_function_name and \
-            self.input_workspace == other.input_workspace and \
+            self.input_workspaces == other.input_workspaces and \
             self.output_workspace_names == other.output_workspace_names
 
     @property
     def parameters(self):
         return self._fit_parameters
 
+    def log_names(self, filter_fn=None):
+        """
+        The names of the logs on the workspaces
+        associated with this fit.
+
+        :filter_fn: An optional unary function to filter the names out. It should accept a log
+        and return True if the log should be accepted
+        :return: A list of names
+        """
+        filter_fn = filter_fn if filter_fn is not None else lambda x: True
+
+        all_names = []
+        for ws_name in self.input_workspaces:
+            logs = _run(ws_name).getLogData()
+            all_names.extend([log.name for log in logs if filter_fn(log)])
+
+        return all_names
+
+    def has_log(self, log_name):
+        """
+        :param log_name: A string name
+        :return: True if the log exists on all of the input workspaces False, otherwise
+        """
+        for ws_name in self.input_workspaces:
+            run = _run(ws_name)
+            if not run.hasProperty(log_name):
+                return False
+
+        return True
+
+    def log_value(self, log_name):
+        """
+        Compute and return the log value for the named log.
+        If the log is a string then the value is converted to a float
+        if possible. If the log is a time series then the time-average
+        value is computed. If multiple workspaces are part of the fit
+        then the values computed above are averaged over each workspace.
+        It is assumed that all logs have been checked for existence.
+        :param log_name: The name of an existing log
+        :return: A single double value
+        """
+        ads = AnalysisDataService.Instance()
+
+        def value_from_workspace(wksp_name):
+            run = ads.retrieve(wksp_name).run()
+            prop = run.getProperty(log_name)
+            if hasattr(prop, 'timeAverageValue'):
+                return prop.timeAverageValue()
+            else:
+                return float(prop.value)
+
+        values = [
+            value_from_workspace(wksp_name)
+            for wksp_name in self.input_workspaces
+        ]
+        return np.mean(values)
+
 
 class FittingContext(object):
     """Context specific to fitting.
@@ -245,7 +305,8 @@ class FittingContext(object):
         """
         self.add_fit(
             FitInformation(parameter_workspace, fit_function_name,
-                           input_workspace, output_workspace_names, global_parameters))
+                           input_workspace, output_workspace_names,
+                           global_parameters))
 
     def add_fit(self, fit):
         """
@@ -263,18 +324,8 @@ class FittingContext(object):
         """
         return list(set([fit.fit_function_name for fit in self.fit_list]))
 
-    def find_fits_for_function(self, fit_function_name):
-        """
-        Find the fits in the list whose function name matches
-        :param fit_function_name: The name of the function
-        :return: A list of any matching fits
-        """
-        return [
-            fit for fit in self.fit_list
-            if fit.fit_function_name == fit_function_name
-        ]
-
-    def find_output_workspaces_for_input_workspace_name(self, input_workspace_name):
+    def find_output_workspaces_for_input_workspace_name(
+            self, input_workspace_name):
         """
         Find the fits in the list whose input workspace matches
         :param input_workspace_name: The name of the input_workspace
@@ -282,11 +333,29 @@ class FittingContext(object):
         """
         workspace_list = []
         for fit in self.fit_list:
-            if type(fit.input_workspace) == list:
-                for index, workspace in enumerate(fit.input_workspace):
-                    if workspace == input_workspace_name:
-                        workspace_list.append(fit.output_workspace_names[index])
-            else:
-                if input_workspace_name == fit.input_workspace:
-                    workspace_list.append(fit.output_workspace_names[0])
+            for index, workspace in enumerate(fit.input_workspaces):
+                if workspace == input_workspace_name:
+                    workspace_list.append(fit.output_workspace_names[index])
+
         return workspace_list
+
+    def log_names(self, filter_fn=None):
+        """
+        The names of the logs on the workspaces associated with all of the workspaces.
+
+        :filter_fn: An optional unary function to filter the names out. For more information see
+        FitInformation.log_names
+        :return: A list of names of logs
+        """
+        return [
+            name for fit in self.fit_list for name in fit.log_names(filter_fn)
+        ]
+
+
+# Private functions
+def _run(ws_name):
+    """
+    :param ws_name: A workspace name in the ADS
+    :return: A list of the log data for a workspace
+    """
+    return AnalysisDataService.Instance().retrieve(ws_name).run()
diff --git a/scripts/Muon/GUI/Common/results_tab_widget/results_tab_model.py b/scripts/Muon/GUI/Common/results_tab_widget/results_tab_model.py
index 487c2c2d0466ce45e7cdbadb7bdc097d9cb6d839..12c53b0245070dfdd159d7e433cc3d52b29dc6bb 100644
--- a/scripts/Muon/GUI/Common/results_tab_widget/results_tab_model.py
+++ b/scripts/Muon/GUI/Common/results_tab_widget/results_tab_model.py
@@ -7,10 +7,9 @@
 #  This file is part of the mantid workbench.
 from __future__ import (absolute_import, division, unicode_literals)
 
-from mantid.api import AnalysisDataService, WorkspaceFactory, WorkspaceGroup
+from mantid.api import AnalysisDataService, WorkspaceFactory
 from mantid.kernel import FloatTimeSeriesProperty
-from mantid.py3compat import string_types
-import numpy as np
+from mantid.py3compat import Enum
 
 from Muon.GUI.Common.observer_pattern import GenericObserver
 
@@ -22,6 +21,21 @@ ERROR_COL_SUFFIX = 'Error'
 # This is not a particularly robust way of ignoring this as it
 # depends on how Fit chooses to output the name of that value
 RESULTS_TABLE_COLUMNS_NO_ERRS = ['Cost function value']
+WORKSPACE_NAME_COL = 'workspace_name'
+
+
+class TableColumnType(Enum):
+    """Enumeration to match the expected int used for TableWorkspace.addColumn
+    for specifying the column type"""
+
+    NotSet = -1000
+    NoType = 0
+    X = 1
+    Y = 2
+    Z = 3
+    XErr = 4
+    YErr = 5
+    Label = 6
 
 
 class ResultsTabModel(object):
@@ -84,7 +98,7 @@ class ResultsTabModel(object):
         for index, fit in enumerate(self._fit_context.fit_list):
             if fit.fit_function_name != self.selected_fit_function():
                 continue
-            name = _result_workspace_name(fit)
+            name = fit.parameters.parameter_workspace_name
             if name in existing_selection:
                 checked = existing_selection[name][1]
             else:
@@ -103,11 +117,7 @@ class ResultsTabModel(object):
         format matches that of the ListSelectorPresenter class' model.
         """
         selection = {}
-        fits = self._fit_context.fit_list
-        if not fits:
-            return selection
-
-        logs = log_names(fits[0].input_workspace)
+        logs = self._fit_context.log_names(filter_fn=_log_should_be_displayed)
         for index, name in enumerate(logs):
             if name in existing_selection:
                 checked = existing_selection[name][1]
@@ -132,7 +142,8 @@ class ResultsTabModel(object):
         [(workspace, fit_position),...]
         It is assumed this is not empty and ordered as it should be displayed.
         """
-        self._raise_error_on_incompatible_selection(results_selection)
+        self._raise_error_on_incompatible_selection(log_selection,
+                                                    results_selection)
 
         results_table = self._create_empty_results_table(
             log_selection, results_selection)
@@ -141,44 +152,97 @@ class ResultsTabModel(object):
             fit = all_fits[position]
             fit_parameters = fit.parameters
             row_dict = {
-                'workspace_name': fit_parameters.parameter_workspace_name
+                WORKSPACE_NAME_COL: fit_parameters.parameter_workspace_name
             }
-            # logs first
-            if len(log_selection) > 0:
-                workspace = _workspace_for_logs(fit.input_workspace)
-                ws_run = workspace.run()
-                for log_name in log_selection:
-                    try:
-                        log_value = ws_run.getPropertyAsSingleValue(log_name)
-                    except Exception:
-                        log_value = np.nan
-                    row_dict.update({log_name: log_value})
-            # fit parameters
-            for param_name in fit_parameters.names():
-                row_dict.update({param_name: fit_parameters.value(param_name)})
-                if _param_error_should_be_displayed(param_name):
-                    row_dict.update({
-                        _error_column_name(param_name):
-                        fit_parameters.error(param_name)
-                    })
-
-            results_table.addRow(row_dict)
+            row_dict = self._add_logs_to_table(row_dict, fit, log_selection)
+            results_table.addRow(
+                self._add_parameters_to_table(row_dict, fit_parameters))
 
         AnalysisDataService.Instance().addOrReplace(self.results_table_name(),
                                                     results_table)
         return results_table
 
-    def _raise_error_on_incompatible_selection(self, results_selection):
+    def _add_logs_to_table(self, row_dict, fit, log_selection):
+        """
+        Add the log values into the row for the given fit
+        :param row_dict: The dict of current row values
+        :param fit: The fit object being processed
+        :param log_selection: The current selection of logs as a list of names
+        :return: The updated row values dict
+        """
+        if not log_selection:
+            return row_dict
+
+        for log_name in log_selection:
+            row_dict.update({log_name: fit.log_value(log_name)})
+
+        return row_dict
+
+    def _add_parameters_to_table(self, row_dict, fit_parameters):
+        """
+        Add the parameter values into the row for the given fit
+        :param row_dict: The dict of current row values
+        :param fit_parameters: The list of FitParameter objects
+        :return: The updated row dictionary
+        """
+        for param_name in fit_parameters.names():
+            row_dict.update({param_name: fit_parameters.value(param_name)})
+            if _param_error_should_be_displayed(param_name):
+                row_dict.update({
+                    _error_column_name(param_name):
+                    fit_parameters.error(param_name)
+                })
+
+        return row_dict
+
+    def _raise_error_on_incompatible_selection(self, log_selection,
+                                               results_selection):
         """If the selected results cannot be displayed together then raise an error
 
+        :param log_selection: See create_results_output
         :param results_selection: See create_results_output
-        :raises RuntimeError
+        :raises RuntimeError if the selection cannot produce a valid table
+        """
+        self._raise_if_log_selection_invalid(log_selection, results_selection)
+        self._raise_if_result_selection_is_invalid(results_selection)
+
+    def _raise_if_log_selection_invalid(self, log_selection,
+                                        results_selection):
+        """
+        Raise a RuntimeError if the log selection is invalid.
+        :param results_selection: The selected fit results
+        :param results_selection: The selected log values
+        """
+        all_fits = self._fit_context.fit_list
+        missing_msg = []
+        for selection in results_selection:
+            fit = all_fits[selection[1]]
+            missing = []
+            for log_name in log_selection:
+                if not fit.has_log(log_name):
+                    missing.append(log_name)
+            if missing:
+                missing_msg.append("  Fit '{}' is missing the logs {}".format(
+                    fit.parameters.parameter_workspace_name, missing))
+        if missing_msg:
+            raise RuntimeError(
+                "The logs for each selected fit do not match:\n" +
+                "\n".join(missing_msg))
+
+    def _raise_if_result_selection_is_invalid(self, results_selection):
+        """
+        Raise a RuntimeError if the result selection is invalid.
+        :param results_selection: The selected fit results
         """
         all_fits = self._fit_context.fit_list
-        nparams_selected = [len(all_fits[position].parameters) for _, position in results_selection]
+        nparams_selected = [
+            len(all_fits[position].parameters)
+            for _, position in results_selection
+        ]
         if nparams_selected[1:] != nparams_selected[:-1]:
             msg = "The number of parameters for each selected fit does not match:\n"
-            for (_, position), nparams in zip(results_selection, nparams_selected):
+            for (_, position), nparams in zip(results_selection,
+                                              nparams_selected):
                 fit = all_fits[position]
                 msg += "  {}: {}\n".format(
                     fit.parameters.parameter_workspace_name, nparams)
@@ -192,16 +256,17 @@ class ResultsTabModel(object):
         :return: A new TableWorkspace
         """
         table = WorkspaceFactory.Instance().createTable()
-        table.addColumn('str', 'workspace_name')
+        table.addColumn('str', 'workspace_name', TableColumnType.NoType.value)
         for log_name in log_selection:
-            table.addColumn('float', log_name)
+            table.addColumn('float', log_name, TableColumnType.X.value)
         # assume all fit functions are the same in fit_selection and take
         # the parameter names from the first fit.
         parameters = self._find_parameters_for_table(results_selection)
         for name in parameters.names():
-            table.addColumn('float', name)
+            table.addColumn('float', name, TableColumnType.Y.value)
             if _param_error_should_be_displayed(name):
-                table.addColumn('float', _error_column_name(name))
+                table.addColumn('float', _error_column_name(name),
+                                TableColumnType.YErr.value)
         return table
 
     # Private API
@@ -236,37 +301,7 @@ class ResultsTabModel(object):
         return first_fit.parameters
 
 
-# Public helper functions
-def log_names(workspace_name):
-    """
-    Return a list of log names from the given workspace.
-
-    :param workspace: A string name of a workspace in the ADS. If the name points to
-    a group then the logs of the first workspace are returned
-    :return: A list of sample log names
-    :raises KeyError: if the workspace does not exist in the ADS
-    """
-    workspace = _workspace_for_logs(workspace_name)
-    all_logs = workspace.run().getLogData()
-    return [log.name for log in all_logs if _log_should_be_displayed(log)]
-
-
 # Private helper functions
-def _workspace_for_logs(name_or_names):
-    """Return the workspace handle to be used to access the logs.
-    We assume workspace_name is a string or a list of strings
-    :param name_or_names: The name or list of names in the ADS
-    """
-    if not isinstance(name_or_names, string_types):
-        name_or_names = name_or_names[0]
-
-    workspace = AnalysisDataService.retrieve(name_or_names)
-    if isinstance(workspace, WorkspaceGroup):
-        workspace = workspace[0]
-
-    return workspace
-
-
 def _log_should_be_displayed(log):
     """Returns true if the given log should be included in the display"""
     return isinstance(log, FloatTimeSeriesProperty) or \
@@ -285,29 +320,3 @@ def _error_column_name(name):
     :return: A name for the error column
     """
     return name + ERROR_COL_SUFFIX
-
-
-def _result_workspace_name(fit):
-    """
-    Return the result workspace name for a given FitInformation object. The fit.input_workspace
-    can be a list of workspaces or a single value. If a list is found then the
-    first workspace is returned.
-    :param fit: A FitInformation object describing the fit
-    :return: A workspace name to be used in the fit results
-    """
-    name_or_names = fit.input_workspace
-    if isinstance(name_or_names, string_types):
-        return name_or_names
-    else:
-        return _create_multi_domain_fitted_workspace_name(
-            name_or_names, fit.fit_function_name)
-
-
-def _create_multi_domain_fitted_workspace_name(input_workspaces, function):
-    """Construct a name for a result workspace from the input list and function
-
-    :param input_workspaces: The list of input workspaces used for the fit
-    :param function: The fit function name
-    :return: A string result name
-    """
-    return input_workspaces[0] + '+ ...; Fitted; ' + function
diff --git a/scripts/test/Muon/CMakeLists.txt b/scripts/test/Muon/CMakeLists.txt
index 85138a16d606df646b29fd677d2159a34ddd608b..62b3cd42da29ac1d9b55cceeb7ca416ea710d8c9 100644
--- a/scripts/test/Muon/CMakeLists.txt
+++ b/scripts/test/Muon/CMakeLists.txt
@@ -7,6 +7,8 @@ set ( TEST_PY_FILES
    fitting_tab_widget/workspace_selector_dialog_presenter_test.py
    fitting_tab_widget/fitting_tab_presenter_test.py
    fitting_tab_widget/fitting_tab_model_test.py
+   fit_information_test.py
+   fit_parameters_test.py
    fitting_context_test.py
    home_grouping_widget_test.py
    home_instrument_widget_test.py
@@ -72,6 +74,8 @@ set ( TEST_PY_FILES_QT4
    fitting_tab_widget/workspace_selector_dialog_presenter_test.py
    fitting_tab_widget/fitting_tab_presenter_test.py
    fitting_tab_widget/fitting_tab_model_test.py
+   fit_information_test.py
+   fit_parameters_test.py
    fitting_context_test.py
    home_grouping_widget_test.py
    home_instrument_widget_test.py
diff --git a/scripts/test/Muon/fit_information_test.py b/scripts/test/Muon/fit_information_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd8289f9c89f40992a8635dfd97e570c7bf86bdd
--- /dev/null
+++ b/scripts/test/Muon/fit_information_test.py
@@ -0,0 +1,218 @@
+# Mantid Repository : https://github.com/mantidproject/mantid
+#
+# Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI,
+#     NScD Oak Ridge National Laboratory, European Spallation Source
+#     & Institut Laue - Langevin
+# SPDX - License - Identifier: GPL - 3.0 +
+from __future__ import (absolute_import, unicode_literals)
+
+import unittest
+
+from mantid.api import AnalysisDataService, WorkspaceFactory
+from mantid.kernel import FloatTimeSeriesProperty, StringPropertyWithValue
+from mantid.py3compat import mock
+
+from Muon.GUI.Common.contexts.fitting_context import FitInformation
+
+
+def create_test_workspace(ws_name=None,
+                          time_series_logs=None,
+                          string_value_logs=None):
+    """
+    Create a test workspace.
+    :param ws_name: An optional name for the workspace
+    :param time_series_logs: A set of (name, (values,...))
+    :param string_value_logs: A set of (name, value) pairs
+    :return: The new workspace
+    """
+    fake_ws = WorkspaceFactory.create('Workspace2D', 1, 1, 1)
+    run = fake_ws.run()
+    if time_series_logs is not None:
+        for name, values in time_series_logs:
+            tsp = FloatTimeSeriesProperty(name)
+            for item in values:
+                try:
+                    time, value = item[0], item[1]
+                except TypeError:
+                    time, value = "2000-05-01T12:00:00", item
+                tsp.addValue(time, value)
+            run.addProperty(name, tsp, replace=True)
+
+    if string_value_logs is not None:
+        for name, value in string_value_logs:
+            run.addProperty(
+                name, StringPropertyWithValue(name, value), replace=True)
+
+    ws_name = ws_name if ws_name is not None else 'fitting_context_model_test'
+    AnalysisDataService.Instance().addOrReplace(ws_name, fake_ws)
+    return fake_ws
+
+
+class FitInformationTest(unittest.TestCase):
+    def tearDown(self):
+        AnalysisDataService.Instance().clear()
+
+    def test_equality_with_no_globals(self):
+        fit_info = FitInformation(mock.MagicMock(), 'MuonGuassOsc',
+                                  mock.MagicMock(), mock.MagicMock())
+        self.assertEqual(fit_info, fit_info)
+
+    def test_equality_with_globals(self):
+        fit_info = FitInformation(mock.MagicMock(), 'MuonGuassOsc',
+                                  mock.MagicMock(), mock.MagicMock(), ['A'])
+        self.assertEqual(fit_info, fit_info)
+
+    def test_inequality_with_globals(self):
+        fit_info1 = FitInformation(mock.MagicMock(), 'MuonGuassOsc',
+                                   mock.MagicMock(), ['A'])
+        fit_info2 = FitInformation(mock.MagicMock(), 'MuonGuassOsc',
+                                   mock.MagicMock(), ['B'])
+        self.assertNotEqual(fit_info1, fit_info2)
+
+    def test_empty_global_parameters_if_none_specified(self):
+        fit_information_object = FitInformation(mock.MagicMock(),
+                                                mock.MagicMock(),
+                                                mock.MagicMock(),
+                                                mock.MagicMock())
+
+        self.assertEqual([],
+                         fit_information_object.parameters.global_parameters)
+
+    def test_global_parameters_are_captured(self):
+        fit_information_object = FitInformation(mock.MagicMock(),
+                                                mock.MagicMock(),
+                                                mock.MagicMock(),
+                                                mock.MagicMock(), ['A'])
+
+        self.assertEqual(['A'],
+                         fit_information_object.parameters.global_parameters)
+
+    def test_parameters_are_readonly(self):
+        fit_info = FitInformation(mock.MagicMock(), mock.MagicMock(),
+                                  mock.MagicMock(), mock.MagicMock())
+
+        self.assertRaises(AttributeError, setattr, fit_info, "parameters",
+                          mock.MagicMock())
+
+    def test_logs_from_workspace_without_logs_returns_emtpy_list(self):
+        fake_ws = create_test_workspace()
+        fit = FitInformation(mock.MagicMock(), 'func1', fake_ws.name(),
+                             mock.MagicMock())
+
+        allowed_logs = fit.log_names()
+        self.assertEqual(0, len(allowed_logs))
+
+    def test_logs_for_single_workspace_return_all_time_series_logs(self):
+        time_series_logs = (('ts_1', (1., )), ('ts_2', (3., )))
+        single_value_logs = (('sv_1', 'val1'), ('sv_2', 'val2'))
+        fake_ws = create_test_workspace(time_series_logs=time_series_logs)
+        fit = FitInformation(mock.MagicMock(), 'func1', fake_ws.name(),
+                             mock.MagicMock())
+
+        log_names = fit.log_names()
+        for name, _ in time_series_logs:
+            self.assertTrue(
+                name in log_names, msg="{} not found in log list".format(name))
+        for name, _ in single_value_logs:
+            self.assertFalse(
+                name in log_names, msg="{} found in log list".format(name))
+
+    def test_log_names_from_list_of_workspaces_gives_combined_set(self):
+        time_series_logs = (('ts_1', (1., )), ('ts_2', (3., )), ('ts_3', [2.]),
+                            ('ts_4', [3.]))
+
+        fake1 = create_test_workspace(
+            ws_name='fake1', time_series_logs=time_series_logs[:2])
+        fake2 = create_test_workspace(
+            ws_name='fake2', time_series_logs=time_series_logs[2:])
+        fit = FitInformation(mock.MagicMock(), 'func1',
+                             [fake1.name(), fake2.name()], mock.MagicMock())
+
+        log_names = fit.log_names()
+        self.assertEqual(len(time_series_logs), len(log_names))
+        for name, _ in time_series_logs:
+            self.assertTrue(
+                name in log_names, msg="{} not found in log list".format(name))
+
+    def test_log_names_uses_filter_fn(self):
+        time_series_logs = (('ts_1', (1., )), ('ts_2', (3., )), ('ts_3', [2.]),
+                            ('ts_4', [3.]))
+        fake1 = create_test_workspace(
+            ws_name='fake1', time_series_logs=time_series_logs)
+        fit = FitInformation(mock.MagicMock(), 'func1', fake1.name(),
+                             mock.MagicMock())
+
+        log_names = fit.log_names(lambda log: log.name == 'ts_1')
+        self.assertEqual(1, len(log_names))
+        self.assertEqual(time_series_logs[0][0], log_names[0])
+
+    def test_has_log_returns_true_if_all_workspaces_have_the_log(self):
+        time_series_logs = (('ts_1', (1., )), ('ts_2', (3., )))
+        fake1 = create_test_workspace(
+            ws_name='fake1', time_series_logs=time_series_logs)
+        fake2 = create_test_workspace(
+            ws_name='fake2', time_series_logs=time_series_logs)
+        fit = FitInformation(mock.MagicMock(), 'func1',
+                             [fake1.name(), fake2.name()], mock.MagicMock())
+
+        self.assertTrue(fit.has_log('ts_1'))
+
+    def test_has_log_returns_false_if_all_workspaces_do_not_have_log(self):
+        time_series_logs = [('ts_1', (1., ))]
+        fake1 = create_test_workspace(
+            ws_name='fake1', time_series_logs=time_series_logs)
+        fake2 = create_test_workspace(ws_name='fake2')
+        fit = FitInformation(mock.MagicMock(), 'func1',
+                             [fake1.name(), fake2.name()], mock.MagicMock())
+
+        self.assertFalse(
+            fit.has_log('ts_1'),
+            msg='All input workspaces should have the requested log')
+
+    def test_string_log_value_from_fit_with_single_workspace(self):
+        single_value_logs = [('sv_1', '5')]
+        fake1 = create_test_workspace(
+            ws_name='fake1', string_value_logs=single_value_logs)
+        fit = FitInformation(mock.MagicMock(), 'func1', [fake1.name()],
+                             mock.MagicMock())
+
+        self.assertEqual(
+            float(single_value_logs[0][1]),
+            fit.log_value(single_value_logs[0][0]))
+
+    def test_time_series_log_value_from_fit_with_single_workspace_uses_time_average(
+            self):
+        time_series_logs = \
+            [('ts_1', (("2000-05-01T12:00:00", 5.),
+             ("2000-05-01T12:00:10", 20.),
+             ("2000-05-01T12:05:00", 30.)))]
+        fake1 = create_test_workspace('fake1', time_series_logs)
+        fit = FitInformation(mock.MagicMock(), 'func1', [fake1.name()],
+                             mock.MagicMock())
+
+        time_average = (10 * 5 + 290 * 20) / 300.
+        self.assertAlmostEqual(time_average, fit.log_value('ts_1'), places=6)
+
+    def test_time_series_log_value_from_fit_with_multiple_workspaces_uses_average_of_time_average(
+            self):
+        time_series_logs1 = \
+            [('ts_1', (("2000-05-01T12:00:00", 5.),
+             ("2000-05-01T12:00:10", 20.),
+             ("2000-05-01T12:05:00", 30.)))]
+        fake1 = create_test_workspace('fake1', time_series_logs1)
+        time_series_logs2 = \
+            [('ts_1', (("2000-05-01T12:00:30", 10.),
+             ("2000-05-01T12:01:45", 30.),
+             ("2000-05-01T12:05:00", 40.)))]
+        fake2 = create_test_workspace('fake2', time_series_logs2)
+        fit = FitInformation(mock.MagicMock(), 'func1',
+                             [fake1.name(), fake2.name()], mock.MagicMock())
+
+        time_average1 = (10 * 5 + 290 * 20) / 300.
+        time_average2 = (75 * 10 + 195 * 30) / 270.
+        all_average = 0.5 * (time_average1 + time_average2)
+        self.assertAlmostEqual(all_average, fit.log_value('ts_1'), places=6)
+
+
+if __name__ == '__main__':
+    unittest.main(buffer=False, verbosity=2)
diff --git a/scripts/test/Muon/fit_parameters_test.py b/scripts/test/Muon/fit_parameters_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e7d46aef5e9a9637be04cc10644df14c226e070
--- /dev/null
+++ b/scripts/test/Muon/fit_parameters_test.py
@@ -0,0 +1,175 @@
+# Mantid Repository : https://github.com/mantidproject/mantid
+#
+# Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI,
+#     NScD Oak Ridge National Laboratory, European Spallation Source
+#     & Institut Laue - Langevin
+# SPDX - License - Identifier: GPL - 3.0 +
+from __future__ import (absolute_import, unicode_literals)
+
+from collections import OrderedDict
+import unittest
+
+from mantid.py3compat import iteritems, mock
+from Muon.GUI.Common.contexts.fitting_context import FitParameters
+
+
+def create_test_fit_parameters(test_parameters, global_parameters=None):
+    # needs to look like a standard fit table
+    fit_table = [{
+        'Name': name,
+        'Value': value,
+        'Error': error
+    } for name, (value, error) in iteritems(test_parameters)]
+
+    parameter_workspace = mock.MagicMock()
+    parameter_workspace.workspace.__iter__.return_value = fit_table
+    return FitParameters(parameter_workspace, global_parameters)
+
+
+class FitParametersTest(unittest.TestCase):
+    def test_equality_with_no_globals(self):
+        parameter_workspace = mock.MagicMock()
+        fit_params1 = FitParameters(parameter_workspace)
+        fit_params2 = FitParameters(parameter_workspace)
+
+        self.assertEqual(fit_params1, fit_params2)
+
+    def test_inequality_with_no_globals(self):
+        fit_params1 = FitParameters(mock.MagicMock())
+        fit_params2 = FitParameters(mock.MagicMock())
+
+        self.assertNotEqual(fit_params1, fit_params2)
+
+    def test_equality_with_globals(self):
+        parameter_workspace = mock.MagicMock()
+        fit_params1 = FitParameters(parameter_workspace, ['A'])
+        parameter_workspace = parameter_workspace
+        fit_params2 = FitParameters(parameter_workspace, ['A'])
+
+        self.assertEqual(fit_params1, fit_params2)
+
+    def test_inequality_with_globals(self):
+        parameter_workspace = mock.MagicMock()
+        fit_params1 = FitParameters(parameter_workspace, ['A'])
+        fit_params2 = FitParameters(parameter_workspace, ['B'])
+
+        self.assertNotEqual(fit_params1, fit_params2)
+
+    def test_length_returns_all_params_with_no_globals(self):
+        test_parameters = OrderedDict([('Height', (10., 0.4)), ('A0', (1,
+                                                                       0.01)),
+                                       ('Cost function', (0.1, 0.))])
+        fit_params = create_test_fit_parameters(test_parameters)
+
+        self.assertEqual(3, len(fit_params))
+
+    def test_length_returns_unique_params_with_globals(self):
+        test_parameters = OrderedDict([('f0.Height', (10., 0.4)),
+                                       ('f0.A0', (1, 0.01)),
+                                       ('f1.Height', (10., 0.4)),
+                                       ('f1.A0', (2, 0.001)),
+                                       ('Cost function', (0.1, 0.))])
+        fit_params = create_test_fit_parameters(
+            test_parameters, global_parameters=['Height'])
+
+        self.assertEqual(4, len(fit_params))
+
+    def test_names_value_error_returns_all_expected_values_with_no_globals(
+            self):
+        test_parameters = OrderedDict([('f0.Height', (10., 0.4)),
+                                       ('f0.A0', (1, 0.01)),
+                                       ('Cost function', (0.1, 0.))])
+        fit_params = create_test_fit_parameters(test_parameters)
+
+        self.assertEqual(list(test_parameters.keys()), fit_params.names())
+        self.assertEqual(3, len(fit_params))
+        for index, name in enumerate(fit_params.names()):
+            self.assertEqual(
+                test_parameters[name][0],
+                fit_params.value(name),
+                msg="Mismatch in error for parameter" + name)
+            self.assertEqual(
+                test_parameters[name][1],
+                fit_params.error(name),
+                msg="Mismatch in error for parameter" + name)
+
+    def test_names_return_globals_first_with_simultaneous_prefixes_stripped_for_single_fn(
+            self):
+        # Make some parameters that look like a simultaneous fit of 2 data sets
+        test_parameters = OrderedDict([
+            ('f0.Height', (10., 0.4)),
+            ('f0.A0', (1, 0.01)),  # global
+            ('f0.Sigma', (0.01, 0.0001)),  # global
+            ('f1.Height', (11., 0.5)),
+            ('f1.A0', (1, 0.01)),  # global
+            ('f1.Sigma', (0.01, 0.0001)),  # global
+            ('Cost function', (0.1, 0.)),
+        ])
+        global_parameters = ['A0', 'Sigma']
+        fit_params = create_test_fit_parameters(test_parameters,
+                                                global_parameters)
+
+        expected_keys = [
+            'A0', 'Sigma', 'f0.Height', 'f1.Height', 'Cost function'
+        ]
+        self.assertEqual(expected_keys, fit_params.names())
+
+    def test_names_return_globals_first_with_simultaneous_prefixes_stripped_for_composite_fn(
+            self):
+        # Make some parameters that look like a simultaneous fit of 2 data sets where parameters
+        # could be called the same thing in each function. The values are irrelevant for this test
+        test_parameters = OrderedDict([
+            # data set 0
+            ('f0.f0.A0', (10., 0.4)),
+            ('f0.f0.A1', (10., 0.4)),
+            ('f0.f1.A0', (10., 0.4)),
+            ('f0.f1.A1', (10., 0.4)),
+            # data set 1
+            ('f1.f0.A0', (10., 0.4)),
+            ('f1.f0.A1', (10., 0.4)),
+            ('f1.f1.A0', (10., 0.4)),
+            ('f1.f1.A1', (10., 0.4)),
+            ('Cost function', (0.1, 0.)),
+        ])
+        global_parameters = ['f0.A0']
+        fit_params = create_test_fit_parameters(test_parameters,
+                                                global_parameters)
+
+        expected_keys = [
+            'f0.A0', 'f0.f0.A1', 'f0.f1.A0', 'f0.f1.A1', 'f1.f0.A1',
+            'f1.f1.A0', 'f1.f1.A1', 'Cost function'
+        ]
+        self.assertEqual(expected_keys, fit_params.names())
+
+    def test_names_value_error_returns_all_expected_values_with_globals(self):
+        test_parameters = OrderedDict([
+            ('f0.Height', (10., 0.4)),  # global
+            ('f0.A0', (1, 0.01)),
+            ('f1.Height', (10., 0.4)),  # global
+            ('f1.A0', (2, 0.05)),
+            ('Cost function', (0.1, 0.)),
+        ])
+        global_parameters = ['Height']
+        # Make some parameters that look like a simultaneous fit
+        fit_params = create_test_fit_parameters(test_parameters,
+                                                global_parameters)
+
+        expected_keys = ['Height', 'f0.A0', 'f1.A0', 'Cost function']
+        self.assertEqual(expected_keys, fit_params.names())
+        for index, name in enumerate(fit_params.names()):
+            if name == 'Height':
+                orig_name = 'f0.Height'
+            else:
+                orig_name = name
+            self.assertEqual(
+                test_parameters[orig_name][0],
+                fit_params.value(name),
+                msg="Mismatch in error for parameter" + name)
+            self.assertEqual(
+                test_parameters[orig_name][1],
+                fit_params.error(name),
+                msg="Mismatch in error for parameter" + name)
+
+
+if __name__ == '__main__':
+    unittest.main(buffer=False, verbosity=2)
diff --git a/scripts/test/Muon/fitting_context_test.py b/scripts/test/Muon/fitting_context_test.py
index 55ae119ad0b760bbc947c3d7ec35b22537eac333..19327ed92824c2657c1784f556c976cbb6dcc950 100644
--- a/scripts/test/Muon/fitting_context_test.py
+++ b/scripts/test/Muon/fitting_context_test.py
@@ -9,11 +9,66 @@ from __future__ import (absolute_import, unicode_literals)
 from collections import OrderedDict
 import unittest
 
+from mantid.api import AnalysisDataService, WorkspaceFactory, WorkspaceGroup
+from mantid.kernel import FloatTimeSeriesProperty, StringPropertyWithValue
 from mantid.py3compat import iteritems, mock
 
 from Muon.GUI.Common.contexts.fitting_context import FittingContext, FitInformation, FitParameters
 
 
+def create_test_workspace(ws_name=None,
+                          time_series_logs=None,
+                          string_value_logs=None):
+    """
+    Create a test workspace.
+    :param ws_name: An optional name for the workspace
+    :param time_series_logs: A set of (name, (values,...))
+    :param string_value_logs: A set of (name, value) pairs
+    :return: The new workspace
+    """
+    fake_ws = WorkspaceFactory.create('Workspace2D', 1, 1, 1)
+    run = fake_ws.run()
+    if time_series_logs is not None:
+        for name, values in time_series_logs:
+            tsp = FloatTimeSeriesProperty(name)
+            for item in values:
+                try:
+                    time, value = item[0], item[1]
+                except TypeError:
+                    time, value = "2000-05-01T12:00:00", item
+                tsp.addValue(time, value)
+            run.addProperty(name, tsp, replace=True)
+
+    if string_value_logs is not None:
+        for name, value in string_value_logs:
+            run.addProperty(
+                name, StringPropertyWithValue(name, value), replace=True)
+
+    ws_name = ws_name if ws_name is not None else 'fitting_context_model_test'
+    AnalysisDataService.Instance().addOrReplace(ws_name, fake_ws)
+    return fake_ws
+
+
+def create_test_workspacegroup(group_name=None, size=None, items=None):
+    if size is not None and items is not None:
+        raise ValueError("Provide either size or items not both.")
+
+    group_name = group_name if group_name is not None else 'fitting_context_testgroup'
+    group = WorkspaceGroup()
+    if size is not None:
+        for i in range(size):
+            ws_name = '{}_{}'.format(group_name, i)
+            fake_ws = create_test_workspace(ws_name)
+            group.addWorkspace(fake_ws)
+    elif items is not None:
+        for item in items:
+            group.addWorkspace(item)
+
+    ads = AnalysisDataService.Instance()
+    ads.addOrReplace(group_name, group)
+    return group
+
+
 def create_test_fit_parameters(test_parameters, global_parameters=None):
     # needs to look like a standard fit table
     fit_table = [{
@@ -27,158 +82,14 @@ def create_test_fit_parameters(test_parameters, global_parameters=None):
     return FitParameters(parameter_workspace, global_parameters)
 
 
-class FitParametersTest(unittest.TestCase):
-    def test_equality_with_no_globals(self):
-        parameter_workspace = mock.MagicMock()
-        fit_params1 = FitParameters(parameter_workspace)
-        fit_params2 = FitParameters(parameter_workspace)
-
-        self.assertEqual(fit_params1, fit_params2)
-
-    def test_inequality_with_no_globals(self):
-        fit_params1 = FitParameters(mock.MagicMock())
-        fit_params2 = FitParameters(mock.MagicMock())
-
-        self.assertNotEqual(fit_params1, fit_params2)
-
-    def test_equality_with_globals(self):
-        parameter_workspace = mock.MagicMock()
-        fit_params1 = FitParameters(parameter_workspace, ['A'])
-        parameter_workspace = parameter_workspace
-        fit_params2 = FitParameters(parameter_workspace, ['A'])
-
-        self.assertEqual(fit_params1, fit_params2)
-
-    def test_inequality_with_globals(self):
-        parameter_workspace = mock.MagicMock()
-        fit_params1 = FitParameters(parameter_workspace, ['A'])
-        fit_params2 = FitParameters(parameter_workspace, ['B'])
-
-        self.assertNotEqual(fit_params1, fit_params2)
-
-    def test_length_returns_all_params_with_no_globals(self):
-        test_parameters = OrderedDict([('Height', (10., 0.4)), ('A0', (1,
-                                                                       0.01)),
-                                       ('Cost function', (0.1, 0.))])
-        fit_params = create_test_fit_parameters(test_parameters)
-
-        self.assertEqual(3, len(fit_params))
-
-    def test_length_returns_unique_params_with_globals(self):
-        test_parameters = OrderedDict([('f0.Height', (10., 0.4)),
-                                       ('f0.A0', (1, 0.01)),
-                                       ('f1.Height', (10., 0.4)),
-                                       ('f1.A0', (2, 0.001)),
-                                       ('Cost function', (0.1, 0.))])
-        fit_params = create_test_fit_parameters(
-            test_parameters, global_parameters=['Height'])
-
-        self.assertEqual(4, len(fit_params))
-
-    def test_names_value_error_returns_all_expected_values_with_no_globals(
-            self):
-        test_parameters = OrderedDict([('f0.Height', (10., 0.4)),
-                                       ('f0.A0', (1, 0.01)),
-                                       ('Cost function', (0.1, 0.))])
-        fit_params = create_test_fit_parameters(test_parameters)
-
-        self.assertEqual(list(test_parameters.keys()), fit_params.names())
-        self.assertEqual(3, len(fit_params))
-        for index, name in enumerate(fit_params.names()):
-            self.assertEqual(
-                test_parameters[name][0],
-                fit_params.value(name),
-                msg="Mismatch in error for parameter" + name)
-            self.assertEqual(
-                test_parameters[name][1],
-                fit_params.error(name),
-                msg="Mismatch in error for parameter" + name)
-
-    def test_names_return_globals_first_with_simultaneous_prefixes_stripped_for_single_fn(
-            self):
-        # Make some parameters that look like a simultaneous fit of 2 data sets
-        test_parameters = OrderedDict([
-            ('f0.Height', (10., 0.4)),
-            ('f0.A0', (1, 0.01)),  # global
-            ('f0.Sigma', (0.01, 0.0001)),  # global
-            ('f1.Height', (11., 0.5)),
-            ('f1.A0', (1, 0.01)),  # global
-            ('f1.Sigma', (0.01, 0.0001)),  # global
-            ('Cost function', (0.1, 0.)),
-        ])
-        global_parameters = ['A0', 'Sigma']
-        fit_params = create_test_fit_parameters(test_parameters,
-                                                global_parameters)
-
-        expected_keys = [
-            'A0', 'Sigma', 'f0.Height', 'f1.Height', 'Cost function'
-        ]
-        self.assertEqual(expected_keys, fit_params.names())
-
-    def test_names_return_globals_first_with_simultaneous_prefixes_stripped_for_composite_fn(
-            self):
-        # Make some parameters that look like a simultaneous fit of 2 data sets where parameters
-        # could be called the same thing in each function. The values are irrelevant for this test
-        test_parameters = OrderedDict([
-            # data set 0
-            ('f0.f0.A0', (10., 0.4)),
-            ('f0.f0.A1', (10., 0.4)),
-            ('f0.f1.A0', (10., 0.4)),
-            ('f0.f1.A1', (10., 0.4)),
-            # data set 1
-            ('f1.f0.A0', (10., 0.4)),
-            ('f1.f0.A1', (10., 0.4)),
-            ('f1.f1.A0', (10., 0.4)),
-            ('f1.f1.A1', (10., 0.4)),
-            ('Cost function', (0.1, 0.)),
-        ])
-        global_parameters = ['f0.A0']
-        fit_params = create_test_fit_parameters(test_parameters,
-                                                global_parameters)
-
-        expected_keys = [
-            'f0.A0', 'f0.f0.A1', 'f0.f1.A0', 'f0.f1.A1', 'f1.f0.A1',
-            'f1.f1.A0', 'f1.f1.A1', 'Cost function'
-        ]
-        self.assertEqual(expected_keys, fit_params.names())
-
-    def test_names_value_error_returns_all_expected_values_with_globals(self):
-        test_parameters = OrderedDict([
-            ('f0.Height', (10., 0.4)),  # global
-            ('f0.A0', (1, 0.01)),
-            ('f1.Height', (10., 0.4)),  # global
-            ('f1.A0', (2, 0.05)),
-            ('Cost function', (0.1, 0.)),
-        ])
-        global_parameters = ['Height']
-        # Make some parameters that look like a simultaneous fit
-        fit_params = create_test_fit_parameters(test_parameters,
-                                                global_parameters)
-
-        expected_keys = ['Height', 'f0.A0', 'f1.A0', 'Cost function']
-        self.assertEqual(expected_keys, fit_params.names())
-        for index, name in enumerate(fit_params.names()):
-            if name == 'Height':
-                orig_name = 'f0.Height'
-            else:
-                orig_name = name
-            self.assertEqual(
-                test_parameters[orig_name][0],
-                fit_params.value(name),
-                msg="Mismatch in error for parameter" + name)
-            self.assertEqual(
-                test_parameters[orig_name][1],
-                fit_params.error(name),
-                msg="Mismatch in error for parameter" + name)
-
-
 class FittingContextTest(unittest.TestCase):
     def setUp(self):
         self.fitting_context = FittingContext()
 
     def test_context_constructor_accepts_fit_list(self):
         fit_list = [
-            FitInformation(mock.MagicMock(), 'MuonGuassOsc', mock.MagicMock(), mock.MagicMock())
+            FitInformation(mock.MagicMock(), 'MuonGuassOsc', mock.MagicMock(),
+                           mock.MagicMock())
         ]
         context = FittingContext(fit_list)
 
@@ -187,61 +98,30 @@ class FittingContextTest(unittest.TestCase):
     def test_len_gives_length_of_fit_list(self):
         self.assertEqual(0, len(self.fitting_context))
         self.fitting_context.add_fit(
-            FitInformation(mock.MagicMock(), 'MuonGuassOsc', mock.MagicMock(), []))
+            FitInformation(mock.MagicMock(), 'MuonGuassOsc', mock.MagicMock(),
+                           mock.MagicMock()))
         self.assertEqual(1, len(self.fitting_context))
 
-    def test_fitinformation_equality_with_no_globals(self):
-        fit_info = FitInformation(mock.MagicMock(), 'MuonGuassOsc',
-                                  mock.MagicMock(), mock.MagicMock())
-        self.assertEqual(fit_info, fit_info)
-
-    def test_fitinformation_equality_with_globals(self):
-        fit_info = FitInformation(mock.MagicMock(), 'MuonGuassOsc',
-                                  mock.MagicMock(), ['A'])
-        self.assertEqual(fit_info, fit_info)
-
-    def test_fitinformation_inequality_with_globals(self):
-        fit_info1 = FitInformation(mock.MagicMock(), 'MuonGuassOsc',
-                                   mock.MagicMock(), ['A'])
-        fit_info2 = FitInformation(mock.MagicMock(), 'MuonGuassOsc',
-                                   mock.MagicMock(), ['B'])
-        self.assertNotEqual(fit_info1, fit_info2)
-
     def test_items_can_be_added_to_fitting_context(self):
-        fit_information_object = FitInformation(
-            mock.MagicMock(), 'MuonGuassOsc', mock.MagicMock(), mock.MagicMock())
-
-        self.fitting_context.add_fit(fit_information_object)
-
-        self.assertEqual(fit_information_object,
-                         self.fitting_context.fit_list[0])
-
-    def test_empty_global_parameters_if_none_specified(self):
         fit_information_object = FitInformation(mock.MagicMock(),
-                                                mock.MagicMock(),
+                                                'MuonGuassOsc',
                                                 mock.MagicMock(),
                                                 mock.MagicMock())
 
-        self.assertEqual([],
-                         fit_information_object.parameters.global_parameters)
-
-    def test_global_parameters_are_captured(self):
-        fit_information_object = FitInformation(mock.MagicMock(),
-                                                mock.MagicMock(),
-                                                mock.MagicMock(),
-                                                mock.MagicMock(),
-                                                ['A'])
+        self.fitting_context.add_fit(fit_information_object)
 
-        self.assertEqual(['A'],
-                         fit_information_object.parameters.global_parameters)
+        self.assertEqual(fit_information_object,
+                         self.fitting_context.fit_list[0])
 
     def test_fitfunctions_gives_list_of_unique_function_names(self):
         test_fit_function = 'MuonGuassOsc'
         self.fitting_context.add_fit_from_values(mock.MagicMock(),
                                                  test_fit_function,
+                                                 mock.MagicMock(),
                                                  mock.MagicMock(), [])
         self.fitting_context.add_fit_from_values(mock.MagicMock(),
                                                  test_fit_function,
+                                                 mock.MagicMock(),
                                                  mock.MagicMock(), [])
 
         fit_functions = self.fitting_context.fit_function_names()
@@ -249,35 +129,18 @@ class FittingContextTest(unittest.TestCase):
         self.assertEqual(len(fit_functions), 1)
         self.assertEqual(test_fit_function, fit_functions[0])
 
-    def test_can_retrieve_a_list_of_fit_objects_based_on_fit_function_name(
-            self):
-        fit_information_object_0 = FitInformation(mock.MagicMock(),
-                                                  'MuonGuassOsc',
-                                                  mock.MagicMock(), [])
-        fit_information_object_1 = FitInformation(mock.MagicMock(), 'MuonOsc',
-                                                  mock.MagicMock(), [])
-        fit_information_object_2 = FitInformation(mock.MagicMock(),
-                                                  'MuonGuassOsc',
-                                                  mock.MagicMock(), [])
-        self.fitting_context.add_fit(fit_information_object_0)
-        self.fitting_context.add_fit(fit_information_object_1)
-        self.fitting_context.add_fit(fit_information_object_2)
-
-        result = self.fitting_context.find_fits_for_function('MuonGuassOsc')
-
-        self.assertEqual([fit_information_object_0, fit_information_object_2],
-                         result)
-
     def test_can_add_fits_without_first_creating_fit_information_objects(self):
         parameter_workspace = mock.MagicMock()
         input_workspace = mock.MagicMock()
         output_workspace_names = mock.MagicMock()
         fit_function_name = 'MuonGuassOsc'
         fit_information_object = FitInformation(
-            parameter_workspace, fit_function_name, input_workspace, output_workspace_names)
+            parameter_workspace, fit_function_name, input_workspace,
+            output_workspace_names)
 
         self.fitting_context.add_fit_from_values(
-            parameter_workspace, fit_function_name, input_workspace, output_workspace_names)
+            parameter_workspace, fit_function_name, input_workspace,
+            output_workspace_names)
 
         self.assertEqual(fit_information_object,
                          self.fitting_context.fit_list[0])
@@ -311,6 +174,48 @@ class FittingContextTest(unittest.TestCase):
         self.assertRaises(AttributeError, setattr, fit_info, "parameters",
                           fit_params)
 
+    def test_log_names_returns_logs_from_all_fits_by_default(self):
+        time_series_logs = (('ts_1', (1., )), ('ts_2', (3., )), ('ts_3', [2.]),
+                            ('ts_4', [3.]))
+        fake1 = create_test_workspace(
+            ws_name='fake1', time_series_logs=time_series_logs[:2])
+        fake2 = create_test_workspace(
+            ws_name='fake2', time_series_logs=time_series_logs[2:])
+        self.fitting_context.add_fit(
+            FitInformation(mock.MagicMock(), 'func1', fake1.name(),
+                           mock.MagicMock()))
+        self.fitting_context.add_fit(
+            FitInformation(mock.MagicMock(), 'func1', fake2.name(),
+                           mock.MagicMock()))
+
+        log_names = self.fitting_context.log_names()
+        self.assertEqual(len(time_series_logs), len(log_names))
+        for name, _ in time_series_logs:
+            self.assertTrue(
+                name in log_names, msg="{} not found in log list".format(name))
+
+    def test_log_names_respects_filter(self):
+        time_series_logs = (('ts_1', (1., )), ('ts_2', (3., )), ('ts_3', [2.]),
+                            ('ts_4', [3.]))
+        fake1 = create_test_workspace(
+            ws_name='fake1', time_series_logs=time_series_logs[:2])
+        fake2 = create_test_workspace(
+            ws_name='fake2', time_series_logs=time_series_logs[2:])
+        self.fitting_context.add_fit(
+            FitInformation(mock.MagicMock(), 'func1', fake1.name(),
+                           mock.MagicMock()))
+        self.fitting_context.add_fit(
+            FitInformation(mock.MagicMock(), 'func1', fake2.name(),
+                           mock.MagicMock()))
+
+        required_logs = ('ts_2', 'ts_4')
+        log_names = self.fitting_context.log_names(
+            filter_fn=lambda log: log.name in required_logs)
+        self.assertEqual(len(required_logs), len(log_names))
+        for name in required_logs:
+            self.assertTrue(
+                name in log_names, msg="{} not found in log list".format(name))
+
 
 if __name__ == '__main__':
     unittest.main(buffer=False, verbosity=2)
diff --git a/scripts/test/Muon/home_runinfo_presenter_test.py b/scripts/test/Muon/home_runinfo_presenter_test.py
index 79298a747d5801ff3ee9c01845b8d8d0c74b99ad..7ffa2a1d42b6adc3f41bd65911996bb0c3dcd568 100644
--- a/scripts/test/Muon/home_runinfo_presenter_test.py
+++ b/scripts/test/Muon/home_runinfo_presenter_test.py
@@ -49,7 +49,7 @@ class HomeTabRunInfoPresenterTest(GuiTest):
         expected_string_list = ['Instrument:MUSR', 'Run:22725', 'Title:FeTeSeT=1F=100', 'Comment:FCfirstsample',
                                 'Start:2009-03-24T04:18:58', 'End:2009-03-24T04:56:26', 'Counts(MEv):20.076704',
                                 'GoodFrames:88540', 'CountsperGoodFrame:226.753',
-                                'CountsperGoodFrameperdet:3.543', 'AverageTemperature(K):2.53386',
+                                'CountsperGoodFrameperdet:3.543', 'AverageTemperature(K):19.69992',
                                 'SampleTemperature(K):1.0', 'SampleMagneticField(G):100.0']
 
         self.assertEqual(str(self.view.run_info_box.toPlainText()).replace(' ', '').splitlines(), expected_string_list)
diff --git a/scripts/test/Muon/results_tab_widget/results_tab_model_test.py b/scripts/test/Muon/results_tab_widget/results_tab_model_test.py
index f3deba34b11f38cd17ceb7f30a7917ac58fa61a1..7dcdb1032350527588467e8eace3449110ffc8e0 100644
--- a/scripts/test/Muon/results_tab_widget/results_tab_model_test.py
+++ b/scripts/test/Muon/results_tab_widget/results_tab_model_test.py
@@ -9,7 +9,7 @@ from __future__ import (absolute_import, print_function, unicode_literals)
 
 from collections import OrderedDict
 from copy import deepcopy
-import itertools
+import datetime
 import unittest
 
 from mantid.api import AnalysisDataService, ITableWorkspace, WorkspaceFactory, WorkspaceGroup
@@ -17,13 +17,9 @@ from mantid.kernel import FloatTimeSeriesProperty, StringPropertyWithValue
 from mantid.py3compat import iteritems, mock, string_types
 
 from Muon.GUI.Common.results_tab_widget.results_tab_model import (
-    DEFAULT_TABLE_NAME, ALLOWED_NON_TIME_SERIES_LOGS, log_names,
-    ResultsTabModel)
+    DEFAULT_TABLE_NAME, ResultsTabModel, TableColumnType)
 from Muon.GUI.Common.contexts.fitting_context import FittingContext, FitInformation
 
-# constants
-LOG_NAMES_FUNC = 'Muon.GUI.Common.results_tab_widget.results_tab_model.log_names'
-
 
 def create_test_workspace(ws_name=None):
     fake_ws = WorkspaceFactory.create('Workspace2D', 1, 1, 1)
@@ -32,24 +28,10 @@ def create_test_workspace(ws_name=None):
     return fake_ws
 
 
-def create_test_workspacegroup(size, group_name=None):
-    group_name = group_name if group_name is not None else 'results_tab_model_testgroup'
-    ads = AnalysisDataService.Instance()
-    group = WorkspaceGroup()
-    for i in range(size):
-        fake_ws = WorkspaceFactory.create('Workspace2D', 1, 1, 1)
-        ws_name = '{}_{}'.format(group_name, i)
-        ads.addOrReplace(ws_name, fake_ws)
-        group.addWorkspace(fake_ws)
-
-    ads.addOrReplace(group_name, group)
-    return group
-
-
 def create_test_fits(input_workspaces,
                      function_name,
                      parameters,
-                     output_workspace_names,
+                     output_workspace_names=None,
                      global_parameters=None):
     """
     Create a list of fits
@@ -60,6 +42,9 @@ def create_test_fits(input_workspaces,
     :param global_parameters: An optional list of tied parameters
     :return: A list of Fits
     """
+    output_workspace_names = output_workspace_names if output_workspace_names is not None else [
+        'test-output-ws'
+    ]
     # Convert parameters to fit table-like structure
     fit_table = [{
         'Name': name,
@@ -73,8 +58,8 @@ def create_test_fits(input_workspaces,
         parameter_workspace.workspace.__iter__.return_value = fit_table
         parameter_workspace.workspace_name = name + '_Parameters'
         fits.append(
-            FitInformation(parameter_workspace, function_name, name, output_workspace_names,
-                           global_parameters))
+            FitInformation(parameter_workspace, function_name, name,
+                           output_workspace_names, global_parameters))
 
     return fits
 
@@ -82,7 +67,7 @@ def create_test_fits(input_workspaces,
 def create_test_model(input_workspaces,
                       function_name,
                       parameters,
-                      output_workspace_names,
+                      output_workspace_names=None,
                       logs=None,
                       global_parameters=None):
     """
@@ -90,24 +75,15 @@ def create_test_model(input_workspaces,
     :param input_workspaces: See create_test_fits
     :param function_name: See create_test_fits
     :param parameters: See create_test_fits
-    :param logs: A list of log names to create
+    :param logs: A list of (name, (values...), (name, (values...)))
     :param global_parameters: An optional list of tied parameters
     :return: A list of Fits with workspaces/logs attached
     """
-    fits = create_test_fits(input_workspaces, function_name, parameters, output_workspace_names,
-                            global_parameters)
+    fits = create_test_fits(input_workspaces, function_name, parameters,
+                            output_workspace_names, global_parameters)
     logs = logs if logs is not None else []
     for fit, workspace_name in zip(fits, input_workspaces):
-        test_ws = create_test_workspace(workspace_name)
-        run = test_ws.run()
-        # populate with log data
-        for index, name in enumerate(logs):
-            tsp = FloatTimeSeriesProperty(name)
-            tsp.addValue("2019-05-30T09:00:00", float(index))
-            tsp.addValue("2019-05-30T09:00:05", float(index + 1))
-            run.addProperty(name, tsp, replace=True)
-
-        fit.input_workspace = workspace_name
+        add_logs(workspace_name, logs)
 
     fitting_context = FittingContext()
     for fit in fits:
@@ -115,6 +91,29 @@ def create_test_model(input_workspaces,
     return fitting_context, ResultsTabModel(fitting_context)
 
 
+def add_logs(workspace_name, logs):
+    """
+    Add a list of logs to a workspace
+    :param workspace_name: A workspace to contain the logs
+    :param logs: A list of logs and values
+    :return: The workspace reference
+    """
+    workspace = create_test_workspace(workspace_name)
+
+    run = workspace.run()
+    # populate with log data
+    dt_format = "%Y-%m-%dT%H:%M:%S"
+    for name, values in logs:
+        tsp = FloatTimeSeriesProperty(name)
+        time = datetime.datetime.strptime("2019-05-30T09:00:00", dt_format)
+        for value in values:
+            tsp.addValue(time.strftime(dt_format), float(value))
+            time += datetime.timedelta(seconds=5)
+        run.addProperty(name, tsp, replace=True)
+
+    return workspace
+
+
 class ResultsTabModelTest(unittest.TestCase):
     def setUp(self):
         self.f0_height = (2309.2, 16)
@@ -133,7 +132,9 @@ class ResultsTabModelTest(unittest.TestCase):
                                        ('Cost function value',
                                         self.cost_function)])
 
-        self.logs = ['sample_temp', 'sample_magn_field']
+        self.log_names = ['sample_temp', 'sample_magn_field']
+        self.logs = [(self.log_names[0], (50., 60.)),
+                     (self.log_names[1], (2., 3.))]
 
     def tearDown(self):
         AnalysisDataService.Instance().clear()
@@ -162,51 +163,6 @@ class ResultsTabModelTest(unittest.TestCase):
 
         self.assertEqual(model.selected_fit_function(), new_selection)
 
-    def test_log_names_from_workspace_with_logs(self):
-        fake_ws = create_test_workspace()
-        run = fake_ws.run()
-        # populate with log data
-        time_series_names = ('ts_1', 'ts_2')
-        for name in time_series_names:
-            run.addProperty(name, FloatTimeSeriesProperty(name), replace=True)
-        single_value_log_names = ('sv_1', 'sv_2')
-        for name in itertools.chain(single_value_log_names,
-                                    ALLOWED_NON_TIME_SERIES_LOGS):
-            run.addProperty(name,
-                            StringPropertyWithValue(name, 'test'),
-                            replace=True)
-        # verify
-        allowed_logs = log_names(fake_ws.name())
-        for name in itertools.chain(time_series_names,
-                                    ALLOWED_NON_TIME_SERIES_LOGS):
-            self.assertTrue(
-                name in allowed_logs,
-                msg="{} not found in allowed log list".format(name))
-        for name in single_value_log_names:
-            self.assertFalse(name in allowed_logs,
-                             msg="{} found in allowed log list".format(name))
-
-    def test_log_names_from_workspace_without_logs(self):
-        fake_ws = create_test_workspace()
-        allowed_logs = log_names(fake_ws.name())
-        self.assertEqual(0, len(allowed_logs))
-
-    def test_log_names_from_workspacegroup_uses_first_workspace(self):
-        def add_log(workspace, name):
-            run = workspace.run()
-            run.addProperty(name, FloatTimeSeriesProperty(name), replace=True)
-
-        fake_group = create_test_workspacegroup(size=2)
-        logs = ['log_1', 'log_2']
-        for index, name in enumerate(logs):
-            add_log(fake_group[index], name)
-
-        visible_logs = log_names(fake_group.name())
-        self.assertTrue(logs[0] in visible_logs,
-                        msg="{} not found in log list".format(logs[0]))
-        self.assertFalse(logs[1] in visible_logs,
-                         msg="{} not found in log list".format(logs[1]))
-
     def test_model_returns_fit_functions_from_context(self):
         _, model = create_test_model(('ws1', ), 'func1', self.parameters, [],
                                      self.logs)
@@ -218,18 +174,24 @@ class ResultsTabModelTest(unittest.TestCase):
         self.assertEqual(0, len(model.fit_selection({})))
 
     def test_model_creates_fit_selection_given_no_existing_state(self):
-        _, model = create_test_model(('ws1', 'ws2'), 'func1', self.parameters, [],
-                                     self.logs)
+        _, model = create_test_model(('ws1', 'ws2'), 'func1', self.parameters,
+                                     [], self.logs)
 
-        expected_list_state = {'ws1': [0, True, True], 'ws2': [1, True, True]}
+        expected_list_state = {
+            'ws1_Parameters': [0, True, True],
+            'ws2_Parameters': [1, True, True]
+        }
         self.assertDictEqual(expected_list_state, model.fit_selection({}))
 
     def test_model_creates_fit_selection_given_existing_state(self):
-        _, model = create_test_model(('ws1', 'ws2'), 'func1', self.parameters, [],
-                                     self.logs)
-
-        orig_list_state = {'ws1': [0, False, True]}
-        expected_list_state = {'ws1': [0, False, True], 'ws2': [1, True, True]}
+        _, model = create_test_model(('ws1', 'ws2'), 'func1', self.parameters,
+                                     [], self.logs)
+
+        orig_list_state = {'ws1_Parameters': [0, False, True]}
+        expected_list_state = {
+            'ws1_Parameters': [0, False, True],
+            'ws2_Parameters': [1, True, True]
+        }
         self.assertEqual(expected_list_state,
                          model.fit_selection(orig_list_state))
 
@@ -237,43 +199,26 @@ class ResultsTabModelTest(unittest.TestCase):
         model = ResultsTabModel(FittingContext())
         self.assertEqual(0, len(model.log_selection({})))
 
-    def test_model_returns_log_selection_of_first_workspace(self):
-        _, model = create_test_model(('ws1', 'ws2'), 'func1', self.parameters, [])
-        with mock.patch(LOG_NAMES_FUNC) as mock_log_names:
-            ws1_logs = ('run_number', 'run_start')
-            ws2_logs = ('temp', 'magnetic_field')
-
-            def side_effect(name):
-                return ws1_logs if name == 'ws1' else ws2_logs
-
-            mock_log_names.side_effect = side_effect
-            expected_selection = {
-                'run_number': [0, False, True],
-                'run_start': [1, False, True],
-            }
+    def test_model_combines_existing_log_selection(self):
+        _, model = create_test_model(('ws1', ), 'func1', self.parameters)
+        model._fit_context.log_names = mock.MagicMock()
+        model._fit_context.log_names.return_value = [
+            'run_number', 'run_start', 'magnetic_field'
+        ]
 
-            self.assertEqual(expected_selection, model.log_selection({}))
+        existing_selection = {
+            'run_number': [0, False, True],
+            'run_start': [1, True, True],
+        }
+        expected_selection = deepcopy(existing_selection)
+        expected_selection.update({
+            'run_number': [0, False, True],
+            'run_start': [1, True, True],
+            'magnetic_field': [2, False, True],
+        })
 
-    def test_model_combines_existing_log_selection(self):
-        _, model = create_test_model(('ws1', ), 'func1', self.parameters, [])
-        with mock.patch(LOG_NAMES_FUNC) as mock_log_names:
-            mock_log_names.return_value = [
-                'run_number', 'run_start', 'magnetic_field'
-            ]
-
-            existing_selection = {
-                'run_number': [0, False, True],
-                'run_start': [1, True, True],
-            }
-            expected_selection = deepcopy(existing_selection)
-            expected_selection.update({
-                'run_number': [0, False, True],
-                'run_start': [1, True, True],
-                'magnetic_field': [2, False, True],
-            })
-
-            self.assertDictEqual(expected_selection,
-                                 model.log_selection(existing_selection))
+        self.assertDictEqual(expected_selection,
+                             model.log_selection(existing_selection))
 
     def test_create_results_table_with_no_logs_or_global_parameters(self):
         _, model = create_test_model(('ws1', ), 'func1', self.parameters, [])
@@ -287,6 +232,13 @@ class ResultsTabModelTest(unittest.TestCase):
             'f1.HeightError', 'f1.PeakCentre', 'f1.PeakCentreError',
             'f1.Sigma', 'f1.SigmaError', 'Cost function value'
         ]
+        expected_types = (TableColumnType.NoType, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y)
         expected_content = [
             ('ws1_Parameters', self.f0_height[0], self.f0_height[1],
              self.f0_centre[0], self.f0_centre[1], self.f0_sigma[0],
@@ -294,22 +246,31 @@ class ResultsTabModelTest(unittest.TestCase):
              self.f1_centre[0], self.f1_centre[1], self.f1_sigma[0],
              self.f1_sigma[1], self.cost_function[0])
         ]
-        self._assert_table_matches_expected(expected_cols, expected_content,
-                                            table, model.results_table_name())
+        self._assert_table_matches_expected(zip(expected_cols, expected_types),
+                                            expected_content, table,
+                                            model.results_table_name())
 
     def test_create_results_table_with_logs_selected(self):
         _, model = create_test_model(('ws1', ), 'func1', self.parameters, [],
                                      self.logs)
         selected_results = [('ws1', 0)]
-        table = model.create_results_table(self.logs, selected_results)
+        table = model.create_results_table(self.log_names, selected_results)
 
-        expected_cols = ['workspace_name'] + self.logs + [
+        expected_cols = ['workspace_name'] + self.log_names + [
             'f0.Height', 'f0.HeightError', 'f0.PeakCentre',
             'f0.PeakCentreError', 'f0.Sigma', 'f0.SigmaError', 'f1.Height',
             'f1.HeightError', 'f1.PeakCentre', 'f1.PeakCentreError',
             'f1.Sigma', 'f1.SigmaError', 'Cost function value'
         ]
-        avg_log_values = 0.5, 1.5
+        expected_types = (TableColumnType.NoType, TableColumnType.X,
+                          TableColumnType.X, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y)
+        avg_log_values = 50., 2.0
         expected_content = [
             ('ws1_Parameters', avg_log_values[0], avg_log_values[1],
              self.f0_height[0], self.f0_height[1], self.f0_centre[0],
@@ -318,14 +279,15 @@ class ResultsTabModelTest(unittest.TestCase):
              self.f1_centre[1], self.f1_sigma[0], self.f1_sigma[1],
              self.cost_function[0])
         ]
-        self._assert_table_matches_expected(expected_cols, expected_content,
-                                            table, model.results_table_name())
+        self._assert_table_matches_expected(zip(expected_cols, expected_types),
+                                            expected_content, table,
+                                            model.results_table_name())
 
     def test_create_results_table_with_fit_with_global_parameters(self):
         logs = []
         global_parameters = ['Height']
-        _, model = create_test_model(('simul-1', ), 'func1', self.parameters, [],
-                                     logs, global_parameters)
+        _, model = create_test_model(('simul-1', ), 'func1', self.parameters,
+                                     [], logs, global_parameters)
         selected_results = [('simul-1', 0)]
         table = model.create_results_table(logs, selected_results)
 
@@ -335,19 +297,23 @@ class ResultsTabModelTest(unittest.TestCase):
             'f1.PeakCentreError', 'f1.Sigma', 'f1.SigmaError',
             'Cost function value'
         ]
+        expected_types = (TableColumnType.NoType, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y,
+                          TableColumnType.YErr, TableColumnType.Y)
         expected_content = [
             ('simul-1_Parameters', self.f0_height[0], self.f0_height[1],
              self.f0_centre[0], self.f0_centre[1], self.f0_sigma[0],
              self.f0_sigma[1], self.f1_centre[0], self.f1_centre[1],
              self.f1_sigma[0], self.f1_sigma[1], self.cost_function[0])
         ]
-        self._assert_table_matches_expected(expected_cols, expected_content,
-                                            table, model.results_table_name())
+        self._assert_table_matches_expected(zip(expected_cols, expected_types),
+                                            expected_content, table,
+                                            model.results_table_name())
 
     # ------------------------- failure tests ----------------------------
-    def test_log_names_from_workspace_not_in_ADS_raises_exception(self):
-        self.assertRaises(KeyError, log_names, 'not a workspace in ADS')
-
     def test_create_results_table_raises_error_if_number_params_different(
             self):
         parameters = OrderedDict([('Height', (100, 0.1)),
@@ -368,8 +334,10 @@ class ResultsTabModelTest(unittest.TestCase):
         parameters = OrderedDict([('f0.Height', (100, 0.1)),
                                   ('f1.Height', (90, 0.001)),
                                   ('Cost function value', (1.5, 0))])
-        fits_func1= create_test_fits(('ws1', ), 'func1', parameters, [])
-        fits_globals = create_test_fits(('ws2', ), 'func1', parameters, [],
+        fits_func1 = create_test_fits(('ws1', ), 'func1', parameters, [])
+        fits_globals = create_test_fits(('ws2', ),
+                                        'func1',
+                                        parameters, [],
                                         global_parameters=['Height'])
         model = ResultsTabModel(FittingContext(fits_func1 + fits_globals))
 
@@ -377,15 +345,35 @@ class ResultsTabModelTest(unittest.TestCase):
         self.assertRaises(RuntimeError, model.create_results_table, [],
                           selected_results)
 
+    def test_create_results_table_with_logs_missing_from_some_workspaces_raises(
+            self):
+        parameters = OrderedDict([('f0.Height', (100, 0.1))])
+        logs = [('log1', (1., 2.)), ('log2', (3., 4.)), ('log3', (4., 5.)),
+                ('log4', (5., 6.))]
+        fits_logs1 = create_test_fits(('ws1', ), 'func1', parameters)
+        add_logs(fits_logs1[0].input_workspaces[0], logs[:2])
+
+        fits_logs2 = create_test_fits(('ws2', ), 'func1', parameters)
+        add_logs(fits_logs2[0].input_workspaces[0], logs[2:])
+        model = ResultsTabModel(FittingContext(fits_logs1 + fits_logs2))
+
+        selected_results = [('ws1', 0), ('ws2', 1)]
+        selected_logs = ['log1', 'log3']
+        self.assertRaises(RuntimeError, model.create_results_table,
+                          selected_logs, selected_results)
+
     # ---------------------- Private helper functions -------------------------
 
     def _assert_table_matches_expected(self, expected_cols, expected_content,
                                        table, table_name):
         self.assertTrue(isinstance(table, ITableWorkspace))
         self.assertTrue(table_name in AnalysisDataService.Instance())
-        self.assertEqual(len(expected_cols), table.columnCount())
         self.assertEqual(len(expected_content), table.rowCount())
-        self.assertEqual(expected_cols, table.getColumnNames())
+        self.assertEqual(len(list(expected_cols)), table.columnCount())
+        actual_col_names = table.getColumnNames()
+        for index, (expected_name, expected_type) in enumerate(expected_cols):
+            self.assertEqual(expected_name, actual_col_names[index])
+            self.assertEqual(expected_type.value, table.getPlotType(index))
 
         for row_index, (expected_row,
                         actual_row) in enumerate(zip(expected_content, table)):