Skip to content
Snippets Groups Projects
GenericDataProcessorPresenter.cpp 55.8 KiB
Newer Older
#include "MantidQtCustomInterfaces/Reflectometry/GenericDataProcessorPresenter.h"
#include "MantidAPI/AlgorithmManager.h"
#include "MantidAPI/ITableWorkspace.h"
#include "MantidAPI/MatrixWorkspace.h"
#include "MantidAPI/NotebookWriter.h"
#include "MantidAPI/TableRow.h"
#include "MantidAPI/WorkspaceFactory.h"
#include "MantidGeometry/Instrument.h"
#include "MantidKernel/Strings.h"
#include "MantidKernel/TimeSeriesProperty.h"
#include "MantidKernel/Utils.h"
#include "MantidKernel/make_unique.h"
#include "MantidQtCustomInterfaces/ParseKeyValueString.h"
#include "MantidQtCustomInterfaces/ProgressableView.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorAlgorithmView.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorAppendRowCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorClearSelectedCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorCopySelectedCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorCutSelectedCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorDeleteRowCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorExpandCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorExportTableCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorGenerateNotebook.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorGroupRowsCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorImportTableCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorNewTableCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorOpenTableCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorOptionsCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorPasteSelectedCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorPlotGroupCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorPlotRowCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorPrependRowCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorProcessCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorSaveTableAsCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorSaveTableCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorSeparatorCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/DataProcessorWorkspaceCommand.h"
#include "MantidQtCustomInterfaces/Reflectometry/ProgressPresenter.h"
#include "MantidQtCustomInterfaces/Reflectometry/QDataProcessorTableModel.h"
#include "MantidQtCustomInterfaces/Reflectometry/QtDataProcessorOptionsDialog.h"
#include "MantidQtCustomInterfaces/Reflectometry/WorkspaceReceiver.h"
#include "MantidQtMantidWidgets/AlgorithmHintStrategy.h"

#include <boost/regex.hpp>
#include <boost/tokenizer.hpp>
#include <fstream>
#include <sstream>

using namespace Mantid::API;
using namespace Mantid::Geometry;
using namespace Mantid::Kernel;
using namespace MantidQt::MantidWidgets;

namespace {
void validateModel(ITableWorkspace_sptr model) {
  if (!model)
    throw std::runtime_error("Null pointer");

  if (model->columnCount() != 9)
    throw std::runtime_error("Selected table has the incorrect number of "
                             "columns (9) to be used as a reflectometry "
                             "table.");

  try {
    model->String(0, 0);
    model->String(0, 1);
    model->String(0, 2);
    model->String(0, 3);
    model->String(0, 4);
    model->String(0, 5);
    model->Double(0, 6);
    model->Int(0, 7);
    model->String(0, 8);
  } catch (const std::runtime_error &) {
    throw std::runtime_error("Selected table does not meet the specifications "
                             "to become a model for this interface.");
  }
}

bool isValidModel(Workspace_sptr model) {
  try {
    validateModel(boost::dynamic_pointer_cast<ITableWorkspace>(model));
  } catch (...) {
    return false;
  }
  return true;
}

ITableWorkspace_sptr createWorkspace() {
  ITableWorkspace_sptr ws = WorkspaceFactory::Instance().createTable();
  auto colRuns = ws->addColumn("str", "Run(s)");
  auto colTheta = ws->addColumn("str", "ThetaIn");
  auto colTrans = ws->addColumn("str", "TransRun(s)");
  auto colQmin = ws->addColumn("str", "Qmin");
  auto colQmax = ws->addColumn("str", "Qmax");
  auto colDqq = ws->addColumn("str", "dq/q");
  auto colScale = ws->addColumn("double", "Scale");
  auto colStitch = ws->addColumn("int", "StitchGroup");
  auto colOptions = ws->addColumn("str", "Options");

  colRuns->setPlotType(0);
  colTheta->setPlotType(0);
  colTrans->setPlotType(0);
  colQmin->setPlotType(0);
  colQmax->setPlotType(0);
  colDqq->setPlotType(0);
  colScale->setPlotType(0);
  colStitch->setPlotType(0);
  colOptions->setPlotType(0);

  return ws;
}

ITableWorkspace_sptr createDefaultWorkspace() {
  // Create a blank workspace with one line and set the scale column to 1
  auto ws = createWorkspace();
  ws->appendRow();
  ws->Double(0, MantidQt::CustomInterfaces::ReflTableSchema::COL_SCALE) = 1.0;
  return ws;
}
}

namespace MantidQt {
namespace CustomInterfaces {

/**
* Constructor
* @param tableView : [input] The view this presenter is going to handle
* @param progressView : [input] The progress view this presenter is going to
* handle
* @param dataProcessorAlgorithm : [input] The data processor algorithm's name as
* a string
* @param blacklist : [input] The set of blacklisted properties
*/
GenericDataProcessorPresenter::GenericDataProcessorPresenter(
    DataProcessorAlgorithmView *tableView, ProgressableView *progressView,
    const std::string &dataProcessorAlgorithm,
    const std::set<std::string> &blacklist)
    : WorkspaceObserver(), m_view(tableView), m_progressView(progressView),
      m_dataProcessorAlg(dataProcessorAlgorithm), m_tableDirty(false) {

  // Initialise options
  initOptions();

  // Populate an initial list of valid tables to open, and subscribe to the ADS
  // to keep it up to date
  Mantid::API::AnalysisDataServiceImpl &ads =
      Mantid::API::AnalysisDataService::Instance();

  auto items = ads.getObjectNames();
  for (auto const &name : items) {
    Workspace_sptr ws = ads.retrieve(name);

    if (isValidModel(ws))
      m_workspaceList.insert(name);
  }
  observeAdd();
  observePostDelete();
  observeRename();
  observeADSClear();
  observeAfterReplace();
  m_view->setTableList(m_workspaceList);

  // Provide autocompletion hints for the options column. We use the algorithm's
  // properties minus
  // those we blacklist. We blacklist any useless properties or ones we're
  // handling that the user
  // should'nt touch.
  IAlgorithm_sptr alg = AlgorithmManager::Instance().create(m_dataProcessorAlg);
  m_view->setOptionsHintStrategy(new AlgorithmHintStrategy(alg, blacklist));
GenericDataProcessorPresenter::~GenericDataProcessorPresenter() {}
int GenericDataProcessorPresenter::getUnusedGroup(
    std::set<int> ignoredRows) const {
  std::set<int> usedGroups;

  // Scan through all the rows, working out which group ids are used
  for (int idx = 0; idx < m_model->rowCount(); ++idx) {
    if (ignoredRows.find(idx) != ignoredRows.end())
      continue;

    // This is an unselected row. Add it to the list of used group ids
    usedGroups.insert(
        m_model->data(m_model->index(idx, ReflTableSchema::COL_GROUP)).toInt());
  }

  int groupId = 0;

  // While the group id is one of the used ones, increment it by 1
  while (usedGroups.find(groupId) != usedGroups.end())
    groupId++;

  return groupId;
}

/**
Process selected rows
*/
void GenericDataProcessorPresenter::process() {
  if (m_model->rowCount() == 0) {
    m_view->giveUserWarning("Cannot process an empty Table", "Warning");
  std::set<int> rows = m_view->getSelectedRows();
  if (rows.empty()) {
    if (m_options["WarnProcessAll"].toBool()) {
      // Does the user want to abort?
      if (!m_view->askUserYesNo(
              "This will process all rows in the table. Continue?",
              "Process all rows?"))
        return;
    }

    // They want to process all rows, so populate rows with every index in the
    // model
    for (int idx = 0; idx < m_model->rowCount(); ++idx)
      rows.insert(idx);
  }

  // Map group numbers to the set of rows in that group we want to process
  std::map<int, std::set<int>> groups;
  for (auto it = rows.begin(); it != rows.end(); ++it)
    groups[m_model->data(m_model->index(*it, ReflTableSchema::COL_GROUP))
               .toInt()]
        .insert(*it);

  // Check each group and warn if we're only partially processing it
  for (auto gIt = groups.begin(); gIt != groups.end(); ++gIt) {
    const int &groupId = gIt->first;
    const std::set<int> &groupRows = gIt->second;
    // Are we only partially processing a group?
    if (groupRows.size() < numRowsInGroup(gIt->first) &&
        m_options["WarnProcessPartialGroup"].toBool()) {
      std::stringstream err;
      err << "You have only selected " << groupRows.size() << " of the ";
      err << numRowsInGroup(groupId) << " rows in group " << groupId << ".";
      err << " Are you sure you want to continue?";
      if (!m_view->askUserYesNo(err.str(), "Continue Processing?"))
        return;
    }
  }

  if (!rowsValid(rows)) {
    return;
  }

  if (!processGroups(groups, rows)) {
    return;
  }

  // If "Output Notebook" checkbox is checked then create an ipython notebook
  if (m_view->getEnableNotebook()) {
    saveNotebook(groups, rows);
  }
}

/**
Display a dialog to choose save location for notebook, then save the notebook
there
@param groups : groups of rows to stitch
@param rows : rows selected for processing
*/
void GenericDataProcessorPresenter::saveNotebook(
    std::map<int, std::set<int>> groups, std::set<int> rows) {
  std::string filename = m_view->requestNotebookPath();
  auto notebook = Mantid::Kernel::make_unique<DataProcessorGenerateNotebook>(
      m_wsName, m_model, m_view->getProcessInstrument(),
      ReflTableSchema::COL_RUNS, ReflTableSchema::COL_TRANSMISSION,
      ReflTableSchema::COL_OPTIONS, ReflTableSchema::COL_ANGLE,
      ReflTableSchema::COL_QMIN, ReflTableSchema::COL_QMAX,
      ReflTableSchema::COL_DQQ, ReflTableSchema::COL_SCALE,
      ReflTableSchema::COL_GROUP);
  std::string generatedNotebook = notebook->generateNotebook(groups, rows);

  std::ofstream file(filename.c_str(), std::ofstream::trunc);
  file << generatedNotebook;
  file.flush();
  file.close();
}

/**
Stitches the workspaces created by the given rows together.
@param rows : the list of rows
*/
void GenericDataProcessorPresenter::stitchRows(std::set<int> rows) {
  // If we can get away with doing nothing, do.
  if (rows.size() < 2)
    return;

  // Properties for Stitch1DMany
  std::vector<std::string> workspaceNames;
  std::vector<std::string> runs;

  std::vector<double> params;
  std::vector<double> startOverlaps;
  std::vector<double> endOverlaps;

  // Go through each row and prepare the properties
  for (auto rowIt = rows.begin(); rowIt != rows.end(); ++rowIt) {
    const std::string runStr =
        m_model->data(m_model->index(*rowIt, ReflTableSchema::COL_RUNS))
            .toString()
            .toStdString();
    const double qmin =
        m_model->data(m_model->index(*rowIt, ReflTableSchema::COL_QMIN))
            .toDouble();
    const double qmax =
        m_model->data(m_model->index(*rowIt, ReflTableSchema::COL_QMAX))
            .toDouble();

    Workspace_sptr runWS = prepareRunWorkspace(runStr);
    if (runWS) {
      const std::string runNo = getRunNumber(runWS);
      if (AnalysisDataService::Instance().doesExist("IvsQ_" + runNo)) {
        runs.push_back(runNo);
        workspaceNames.emplace_back("IvsQ_" + runNo);
      }
    }

    startOverlaps.push_back(qmin);
    endOverlaps.push_back(qmax);
  }

  double dqq =
      m_model->data(m_model->index(*(rows.begin()), ReflTableSchema::COL_DQQ))
          .toDouble();

  // params are qmin, -dqq, qmax for the final output
  params.push_back(
      *std::min_element(startOverlaps.begin(), startOverlaps.end()));
  params.push_back(-dqq);
  params.push_back(*std::max_element(endOverlaps.begin(), endOverlaps.end()));

  // startOverlaps and endOverlaps need to be slightly offset from each other
  // See usage examples of Stitch1DMany to see why we discard first qmin and
  // last qmax
  startOverlaps.erase(startOverlaps.begin());
  endOverlaps.pop_back();

  std::string outputWSName = "IvsQ_" + boost::algorithm::join(runs, "_");

  // If the previous stitch result is in the ADS already, we'll need to remove
  // it.
  // If it's a group, we'll get an error for trying to group into a used group
  // name
  if (AnalysisDataService::Instance().doesExist(outputWSName))
    AnalysisDataService::Instance().remove(outputWSName);

  IAlgorithm_sptr algStitch =
      AlgorithmManager::Instance().create("Stitch1DMany");
  algStitch->initialize();
  algStitch->setProperty("InputWorkspaces", workspaceNames);
  algStitch->setProperty("OutputWorkspace", outputWSName);
  algStitch->setProperty("Params", params);
  algStitch->setProperty("StartOverlaps", startOverlaps);
  algStitch->setProperty("EndOverlaps", endOverlaps);

  algStitch->execute();

  if (!algStitch->isExecuted())
    throw std::runtime_error("Failed to run Stitch1DMany on IvsQ workspaces.");
}

/**
Process stitch groups
@param rows : rows in the model
@param groups : groups of rows to stitch
@returns true if successful, otherwise false
*/
bool GenericDataProcessorPresenter::processGroups(
    std::map<int, std::set<int>> groups, std::set<int> rows) {
  int progress = 0;
  // Each group and each row within count as a progress step.
  const int maxProgress = (int)(rows.size() + groups.size());
  ProgressPresenter progressReporter(progress, maxProgress, maxProgress,
                                     m_progressView);

  for (auto gIt = groups.begin(); gIt != groups.end(); ++gIt) {
    const std::set<int> groupRows = gIt->second;

    // Reduce each row
    for (auto rIt = groupRows.begin(); rIt != groupRows.end(); ++rIt) {
      try {
        reduceRow(*rIt);
        progressReporter.report();
      } catch (std::exception &ex) {
        const std::string rowNo =
            Mantid::Kernel::Strings::toString<int>(*rIt + 1);
        const std::string message =
            "Error encountered while processing row " + rowNo + ":\n";
        m_view->giveUserCritical(message + ex.what(), "Error");
        progressReporter.clear();
        return false;
      }
    }

    try {
      stitchRows(groupRows);
      progressReporter.report();
    } catch (std::exception &ex) {
      const std::string groupNo =
          Mantid::Kernel::Strings::toString<int>(gIt->first);
      const std::string message =
          "Error encountered while stitching group " + groupNo + ":\n";
      m_view->giveUserCritical(message + ex.what(), "Error");
      progressReporter.clear();
      return false;
    }
  }
  return true;
}

/**
Validate rows.
@param rows : Rows in the model to validate
@returns true if all rows are valid and false otherwise
*/
bool GenericDataProcessorPresenter::rowsValid(std::set<int> rows) {
  for (auto it = rows.begin(); it != rows.end(); ++it) {
    try {
      validateRow(*it);
      autofillRow(*it);
    } catch (std::exception &ex) {
      // Allow two theta to be blank
      if (ex.what() ==
          std::string("Value for two theta could not be found in log."))
        continue;

      const std::string rowNo = Mantid::Kernel::Strings::toString<int>(*it + 1);
      m_view->giveUserCritical(
          "Error found in row " + rowNo + ":\n" + ex.what(), "Error");
      return false;
    }
  }
  return true;
}

/**
Validate a row.
If a row passes validation, it is ready to be autofilled, but
not necessarily ready for processing.
@param rowNo : The row in the model to validate
@throws std::invalid_argument if the row fails validation
*/
void GenericDataProcessorPresenter::validateRow(int rowNo) const {
  if (rowNo >= m_model->rowCount())
    throw std::invalid_argument("Invalid row");

  if (m_model->data(m_model->index(rowNo, ReflTableSchema::COL_RUNS))
          .toString()
          .isEmpty())
    throw std::invalid_argument("Run column may not be empty.");
}

/**
Autofill a row
@param rowNo : The row in the model to autofill
@throws std::runtime_error if the row could not be auto-filled
*/
void GenericDataProcessorPresenter::autofillRow(int rowNo) {
  if (rowNo >= m_model->rowCount())
    throw std::runtime_error("Invalid row");

  const std::string runStr =
      m_model->data(m_model->index(rowNo, ReflTableSchema::COL_RUNS))
          .toString()
          .toStdString();
  auto runWS = prepareRunWorkspace(runStr);
  auto runMWS = boost::dynamic_pointer_cast<MatrixWorkspace>(runWS);
  auto runWSG = boost::dynamic_pointer_cast<WorkspaceGroup>(runWS);

  // If we've got a workspace group, use the first workspace in it
  if (!runMWS && runWSG)
    runMWS = boost::dynamic_pointer_cast<MatrixWorkspace>(runWSG->getItem(0));

  if (!runMWS)
    throw std::runtime_error("Could not convert " + runWS->name() +
                             " to a MatrixWorkspace.");

  // Fetch two theta from the log if needed
  if (m_model->data(m_model->index(rowNo, ReflTableSchema::COL_ANGLE))
          .toString()
          .isEmpty()) {
    Property *logData = NULL;

    // First try TwoTheta
    try {
      logData = runMWS->mutableRun().getLogData("Theta");
    } catch (std::exception &) {
      throw std::runtime_error(
          "Value for two theta could not be found in log.");
    }

    auto logPWV = dynamic_cast<const PropertyWithValue<double> *>(logData);
    auto logTSP = dynamic_cast<const TimeSeriesProperty<double> *>(logData);

    double thetaVal;
    if (logPWV)
      thetaVal = *logPWV;
    else if (logTSP && logTSP->realSize() > 0)
      thetaVal = logTSP->lastValue();
    else
      throw std::runtime_error(
          "Value for two theta could not be found in log.");

    // Update the model
    if (m_options["RoundAngle"].toBool())
      thetaVal =
          Utils::roundToDP(thetaVal, m_options["RoundAnglePrecision"].toInt());

    m_model->setData(m_model->index(rowNo, ReflTableSchema::COL_ANGLE),
                     thetaVal);
    m_tableDirty = true;
  }

  // If we need to calculate the resolution, do.
  if (m_model->data(m_model->index(rowNo, ReflTableSchema::COL_DQQ))
          .toString()
          .isEmpty()) {
    IAlgorithm_sptr calcResAlg =
        AlgorithmManager::Instance().create("CalculateResolution");
    calcResAlg->setProperty("Workspace", runMWS);
    calcResAlg->setProperty(
        "TwoTheta",
        m_model->data(m_model->index(rowNo, ReflTableSchema::COL_ANGLE))
            .toString()
            .toStdString());
    calcResAlg->execute();

    if (!calcResAlg->isExecuted())
      throw std::runtime_error("CalculateResolution failed. Please manually "
                               "enter a value in the dQ/Q column.");

    // Update the model
    double dqqVal = calcResAlg->getProperty("Resolution");

    if (m_options["RoundDQQ"].toBool())
      dqqVal = Utils::roundToDP(dqqVal, m_options["RoundDQQPrecision"].toInt());

    m_model->setData(m_model->index(rowNo, ReflTableSchema::COL_DQQ), dqqVal);
    m_tableDirty = true;
  }
}

/**
Extracts the run number of a workspace
@param ws : The workspace to fetch the run number from
@returns The run number of the workspace
*/
std::string
GenericDataProcessorPresenter::getRunNumber(const Workspace_sptr &ws) {
  // If we can, use the run number from the workspace's sample log
  MatrixWorkspace_sptr mws = boost::dynamic_pointer_cast<MatrixWorkspace>(ws);
  if (mws) {
    try {
      const Property *runProperty = mws->mutableRun().getLogData("run_number");
      auto runNumber =
          dynamic_cast<const PropertyWithValue<std::string> *>(runProperty);
      if (runNumber)
        return *runNumber;
    } catch (Mantid::Kernel::Exception::NotFoundError &) {
      // We'll just fall back to looking at the workspace's name
    }
  }

  // Okay, let's see what we can get from the workspace's name
  const std::string wsName = ws->name();

  // Matches TOF_13460 -> 13460
  boost::regex outputRegex("(TOF|IvsQ|IvsLam)_([0-9]+)");

  // Matches INTER13460 -> 13460
  boost::regex instrumentRegex("[a-zA-Z]{3,}([0-9]{3,})");

  boost::smatch matches;

  if (boost::regex_match(wsName, matches, outputRegex)) {
    return matches[2].str();
  } else if (boost::regex_match(wsName, matches, instrumentRegex)) {
    return matches[1].str();
  }

  // Resort to using the workspace name
  return wsName;
}

/**
Takes a user specified run, or list of runs, and returns a pointer to the
desired TOF workspace
@param runStr : The run or list of runs (separated by '+')
@throws std::runtime_error if the workspace could not be prepared
@returns a shared pointer to the workspace
*/
Workspace_sptr
GenericDataProcessorPresenter::prepareRunWorkspace(const std::string &runStr) {
  const std::string instrument = m_view->getProcessInstrument();

  std::vector<std::string> runs;
  boost::split(runs, runStr, boost::is_any_of("+"));

  if (runs.empty())
    throw std::runtime_error("No runs given");

  // Remove leading/trailing whitespace from each run
  for (auto runIt = runs.begin(); runIt != runs.end(); ++runIt)
    boost::trim(*runIt);

  // If we're only given one run, just return that
  if (runs.size() == 1)
    return loadRun(runs[0], instrument);

  const std::string outputName = "TOF_" + boost::algorithm::join(runs, "_");

  // Check if we've already prepared it
  if (AnalysisDataService::Instance().doesExist(outputName))
    return AnalysisDataService::Instance().retrieveWS<Workspace>(outputName);

  /* Ideally, this should be executed as a child algorithm to keep the ADS tidy,
  * but
  * that doesn't preserve history nicely, so we'll just take care of tidying up
  * in
  * the event of failure.
  */
  IAlgorithm_sptr algPlus = AlgorithmManager::Instance().create("Plus");
  algPlus->initialize();
  algPlus->setProperty("LHSWorkspace", loadRun(runs[0], instrument)->name());
  algPlus->setProperty("OutputWorkspace", outputName);

  // Drop the first run from the runs list
  runs.erase(runs.begin());

  try {
    // Iterate through all the remaining runs, adding them to the first run
    for (auto runIt = runs.begin(); runIt != runs.end(); ++runIt) {
      algPlus->setProperty("RHSWorkspace", loadRun(*runIt, instrument)->name());
      algPlus->execute();

      // After the first execution we replace the LHS with the previous output
      algPlus->setProperty("LHSWorkspace", outputName);
    }
  } catch (...) {
    // If we're unable to create the full workspace, discard the partial version
    AnalysisDataService::Instance().remove(outputName);

    // We've tidied up, now re-throw.
    throw;
  }

  return AnalysisDataService::Instance().retrieveWS<Workspace>(outputName);
}

/**
Loads a run from disk or fetches it from the AnalysisDataService
@param run : The name of the run
@param instrument : The instrument the run belongs to
@throws std::runtime_error if the run could not be loaded
@returns a shared pointer to the workspace
*/
Workspace_sptr
GenericDataProcessorPresenter::loadRun(const std::string &run,
                                       const std::string &instrument = "") {
  // First, let's see if the run given is the name of a workspace in the ADS
  if (AnalysisDataService::Instance().doesExist(run))
    return AnalysisDataService::Instance().retrieveWS<Workspace>(run);

  // Is the run string is numeric
  if (boost::regex_match(run, boost::regex("\\d+"))) {
    std::string wsName;

    // Look for "TOF_<run_number>" in the ADS
    wsName = "TOF_" + run;
    if (AnalysisDataService::Instance().doesExist(wsName))
      return AnalysisDataService::Instance().retrieveWS<Workspace>(wsName);

    // Look for "<instrument><run_number>" in the ADS
    wsName = instrument + run;
    if (AnalysisDataService::Instance().doesExist(wsName))
      return AnalysisDataService::Instance().retrieveWS<Workspace>(wsName);
  }

  // We'll just have to load it ourselves
  const std::string filename = instrument + run;
  IAlgorithm_sptr algLoadRun = AlgorithmManager::Instance().create("Load");
  algLoadRun->initialize();
  algLoadRun->setProperty("Filename", filename);
  algLoadRun->setProperty("OutputWorkspace", "TOF_" + run);
  algLoadRun->execute();

  if (!algLoadRun->isExecuted())
    throw std::runtime_error("Could not open " + filename);

  return AnalysisDataService::Instance().retrieveWS<Workspace>("TOF_" + run);
}

/**
Calculates the minimum and maximum values for Q
@param ws : The workspace to fetch the instrument values from
@param theta : The value of two theta to use in calculations
*/
std::vector<double> GenericDataProcessorPresenter::calcQRange(Workspace_sptr ws,
                                                              double theta) {
  auto mws = boost::dynamic_pointer_cast<MatrixWorkspace>(ws);
  auto wsg = boost::dynamic_pointer_cast<WorkspaceGroup>(ws);

  // If we've got a workspace group, use the first workspace in it
  if (!mws && wsg)
    mws = boost::dynamic_pointer_cast<MatrixWorkspace>(wsg->getItem(0));

  if (!mws)
    throw std::runtime_error("Could not convert " + ws->name() +
                             " to a MatrixWorkspace.");

  double lmin, lmax;
  try {
    const Instrument_const_sptr instrument = mws->getInstrument();
    lmin = instrument->getNumberParameter("LambdaMin")[0];
    lmax = instrument->getNumberParameter("LambdaMax")[0];
  } catch (std::exception &) {
    throw std::runtime_error("LambdaMin/LambdaMax instrument parameters are "
                             "required to calculate qmin/qmax");
  }

  double qmin = 4 * M_PI / lmax * sin(theta * M_PI / 180.0);
  double qmax = 4 * M_PI / lmin * sin(theta * M_PI / 180.0);

  if (m_options["RoundQMin"].toBool())
    qmin = Utils::roundToDP(qmin, m_options["RoundQMinPrecision"].toInt());

  if (m_options["RoundQMax"].toBool())
    qmax = Utils::roundToDP(qmax, m_options["RoundQMaxPrecision"].toInt());

  std::vector<double> ret;
  ret.push_back(qmin);
  ret.push_back(qmax);
  return ret;
}

/**
Create a transmission workspace
@param transString : the numbers of the transmission runs to use
*/
Workspace_sptr
GenericDataProcessorPresenter::makeTransWS(const std::string &transString) {
  const size_t maxTransWS = 2;

  std::vector<std::string> transVec;
  std::vector<Workspace_sptr> transWSVec;

  // Take the first two run numbers
  boost::split(transVec, transString, boost::is_any_of(","));
  if (transVec.size() > maxTransWS)
    transVec.resize(maxTransWS);

  if (transVec.size() == 0)
    throw std::runtime_error("Failed to parse the transmission run list.");

  for (auto it = transVec.begin(); it != transVec.end(); ++it)
    transWSVec.push_back(loadRun(*it, m_view->getProcessInstrument()));

  // If the transmission workspace is already in the ADS, re-use it
  std::string lastName = "TRANS_" + boost::algorithm::join(transVec, "_");
  if (AnalysisDataService::Instance().doesExist(lastName))
    return AnalysisDataService::Instance().retrieveWS<Workspace>(lastName);

  // We have the runs, so we can create a TransWS
  IAlgorithm_sptr algCreateTrans =
      AlgorithmManager::Instance().create("CreateTransmissionWorkspaceAuto");
  algCreateTrans->initialize();
  algCreateTrans->setProperty("FirstTransmissionRun", transWSVec[0]->name());
  if (transWSVec.size() > 1)
    algCreateTrans->setProperty("SecondTransmissionRun", transWSVec[1]->name());

  std::string wsName = "TRANS_" + getRunNumber(transWSVec[0]);
  if (transWSVec.size() > 1)
    wsName += "_" + getRunNumber(transWSVec[1]);

  algCreateTrans->setProperty("OutputWorkspace", wsName);

  if (!algCreateTrans->isInitialized())
    throw std::runtime_error(
        "Could not initialize CreateTransmissionWorkspaceAuto");

  algCreateTrans->execute();

  if (!algCreateTrans->isExecuted())
    throw std::runtime_error(
        "CreateTransmissionWorkspaceAuto failed to execute");

  return AnalysisDataService::Instance().retrieveWS<Workspace>(wsName);
}

/**
Reduce a row
@param rowNo : The row in the model to reduce
@throws std::runtime_error if reduction fails
*/
void GenericDataProcessorPresenter::reduceRow(int rowNo) {
  const std::string runStr =
      m_model->data(m_model->index(rowNo, ReflTableSchema::COL_RUNS))
          .toString()
          .toStdString();
  const std::string transStr =
      m_model->data(m_model->index(rowNo, ReflTableSchema::COL_TRANSMISSION))
          .toString()
          .toStdString();
  const std::string options =
      m_model->data(m_model->index(rowNo, ReflTableSchema::COL_OPTIONS))
          .toString()
          .toStdString();

  double theta = 0;

  bool thetaGiven =
      !m_model->data(m_model->index(rowNo, ReflTableSchema::COL_ANGLE))
           .toString()
           .isEmpty();

  if (thetaGiven)
    theta = m_model->data(m_model->index(rowNo, ReflTableSchema::COL_ANGLE))
                .toDouble();

  auto runWS = prepareRunWorkspace(runStr);
  const std::string runNo = getRunNumber(runWS);

  Workspace_sptr transWS;
  if (!transStr.empty())
    transWS = makeTransWS(transStr);

  IAlgorithm_sptr algReflOne =
      AlgorithmManager::Instance().create(m_dataProcessorAlg);
  algReflOne->initialize();
  algReflOne->setProperty("InputWorkspace", runWS->name());
  if (transWS)
    algReflOne->setProperty("FirstTransmissionRun", transWS->name());
  algReflOne->setProperty("OutputWorkspace", "IvsQ_" + runNo);
  algReflOne->setProperty("OutputWorkspaceWaveLength", "IvsLam_" + runNo);

  if (thetaGiven)
    algReflOne->setProperty("ThetaIn", theta);

  // Parse and set any user-specified options
  auto optionsMap = parseKeyValueString(options);
  for (auto kvp = optionsMap.begin(); kvp != optionsMap.end(); ++kvp) {
    try {
      algReflOne->setProperty(kvp->first, kvp->second);
    } catch (Mantid::Kernel::Exception::NotFoundError &) {
      throw std::runtime_error("Invalid property in options column: " +
                               kvp->first);
    }
  }

  algReflOne->execute();

  if (!algReflOne->isExecuted())
    throw std::runtime_error("Failed to run ReflectometryReductionOneAuto.");

  const double scale =
      m_model->data(m_model->index(rowNo, ReflTableSchema::COL_SCALE))
          .toDouble();
  if (scale != 1.0) {
    IAlgorithm_sptr algScale = AlgorithmManager::Instance().create("Scale");
    algScale->initialize();
    algScale->setProperty("InputWorkspace", "IvsQ_" + runNo);
    algScale->setProperty("OutputWorkspace", "IvsQ_" + runNo);
    algScale->setProperty("Factor", 1.0 / scale);
    algScale->execute();

    if (!algScale->isExecuted())
      throw std::runtime_error("Failed to run Scale algorithm");
  }

  // Reduction has completed. Put Qmin and Qmax into the table if needed, for
  // stitching.
  if (m_model->data(m_model->index(rowNo, ReflTableSchema::COL_QMIN))
          .toString()
          .isEmpty() ||
      m_model->data(m_model->index(rowNo, ReflTableSchema::COL_QMAX))
          .toString()
          .isEmpty()) {
    Workspace_sptr ws =
        AnalysisDataService::Instance().retrieveWS<Workspace>("IvsQ_" + runNo);
    std::vector<double> qrange = calcQRange(ws, theta);

    if (m_model->data(m_model->index(rowNo, ReflTableSchema::COL_QMIN))
            .toString()
            .isEmpty())
      m_model->setData(m_model->index(rowNo, ReflTableSchema::COL_QMIN),
                       qrange[0]);

    if (m_model->data(m_model->index(rowNo, ReflTableSchema::COL_QMAX))
            .toString()
            .isEmpty())
      m_model->setData(m_model->index(rowNo, ReflTableSchema::COL_QMAX),
                       qrange[1]);

    m_tableDirty = true;
  }

  // We need to make sure that qmin and qmax are respected, so we rebin to
  // those limits here.
  IAlgorithm_sptr algCrop = AlgorithmManager::Instance().create("Rebin");
  algCrop->initialize();
  algCrop->setProperty("InputWorkspace", "IvsQ_" + runNo);
  algCrop->setProperty("OutputWorkspace", "IvsQ_" + runNo);
  const double qmin =
      m_model->data(m_model->index(rowNo, ReflTableSchema::COL_QMIN))
          .toDouble();
  const double qmax =
      m_model->data(m_model->index(rowNo, ReflTableSchema::COL_QMAX))
          .toDouble();
  const double dqq =
      m_model->data(m_model->index(rowNo, ReflTableSchema::COL_DQQ)).toDouble();
  std::vector<double> params;
  params.push_back(qmin);
  params.push_back(-dqq);
  params.push_back(qmax);
  algCrop->setProperty("Params", params);
  algCrop->execute();

  if (!algCrop->isExecuted())
    throw std::runtime_error("Failed to run Rebin algorithm");

  // Also fill in theta if needed
  if (m_model->data(m_model->index(rowNo, ReflTableSchema::COL_ANGLE))
          .toString()
          .isEmpty() &&
      thetaGiven)
    m_model->setData(m_model->index(rowNo, ReflTableSchema::COL_ANGLE), theta);
}

/**
Inserts a new row in the specified location
@param index The index to insert the new row before
*/
void GenericDataProcessorPresenter::insertRow(int index) {
  const int groupId = getUnusedGroup();
  if (!m_model->insertRow(index))
    return;
  // Set the default scale to 1.0
  m_model->setData(m_model->index(index, ReflTableSchema::COL_SCALE), 1.0);
  // Set the group id of the new row
  m_model->setData(m_model->index(index, ReflTableSchema::COL_GROUP), groupId);
}

/**
Insert a row after the last selected row
*/
void GenericDataProcessorPresenter::appendRow() {
  std::set<int> rows = m_view->getSelectedRows();
  if (rows.empty())
    insertRow(m_model->rowCount());
  else
    insertRow(*rows.rbegin() + 1);
  m_tableDirty = true;
}

/**
Insert a row before the first selected row
*/
void GenericDataProcessorPresenter::prependRow() {
  std::set<int> rows = m_view->getSelectedRows();
  if (rows.empty())
    insertRow(0);
  else
    insertRow(*rows.begin());
  m_tableDirty = true;
}

/**
Get the index of the first blank row, or if none exists, returns -1.
*/
int GenericDataProcessorPresenter::getBlankRow() {
  // Go through every column of every row (except for the scale column) and
  // check if it's blank.
  // If there's a blank row, return it.
  const int rowCount = m_model->rowCount();
  for (int i = 0; i < rowCount; ++i) {
    bool isBlank = true;
    for (int j = ReflTableSchema::COL_RUNS; j <= ReflTableSchema::COL_OPTIONS;
         ++j) {
      // Don't bother checking the scale or group column, it'll always have a
      // value.
      if (j == ReflTableSchema::COL_SCALE || j == ReflTableSchema::COL_GROUP)
        continue;

      if (!m_model->data(m_model->index(i, j)).toString().isEmpty()) {