Commit 0f71eded authored by Hahn, Steven's avatar Hahn, Steven
Browse files

Refs #11815. Wrap Poco::StringTokenizer & start fixing tests.

parent 321b181b
......@@ -26,7 +26,7 @@
#include <Poco/ActiveResult.h>
#include <Poco/NotificationCenter.h>
#include <Poco/RWLock.h>
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
#include <Poco/Void.h>
#include <json/json.h>
......@@ -204,9 +204,10 @@ void Algorithm::progress(double p, const std::string &msg, double estimatedTime,
//---------------------------------------------------------------------------------------------
/// Function to return all of the categories that contain this algorithm
const std::vector<std::string> Algorithm::categories() const {
Poco::StringTokenizer tokenizer(category(), categorySeparator(),
Poco::StringTokenizer::TOK_TRIM |
Poco::StringTokenizer::TOK_IGNORE_EMPTY);
Mantid::Kernel::StringTokenizer tokenizer(
category(), categorySeparator(),
Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
std::vector<std::string> res(tokenizer.begin(), tokenizer.end());
......@@ -230,10 +231,10 @@ const std::string Algorithm::workspaceMethodName() const { return ""; }
*workspaceMethodName attached
*/
const std::vector<std::string> Algorithm::workspaceMethodOn() const {
Poco::StringTokenizer tokenizer(this->workspaceMethodOnTypes(),
WORKSPACE_TYPES_SEPARATOR,
Poco::StringTokenizer::TOK_TRIM |
Poco::StringTokenizer::TOK_IGNORE_EMPTY);
Mantid::Kernel::StringTokenizer tokenizer(
this->workspaceMethodOnTypes(), WORKSPACE_TYPES_SEPARATOR,
Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
std::vector<std::string> res;
res.reserve(tokenizer.count());
for (auto iter = tokenizer.begin(); iter != tokenizer.end(); ++iter) {
......
......@@ -7,7 +7,7 @@
#include "MantidKernel/LibraryManager.h"
#include "MantidKernel/ConfigService.h"
#include "Poco/StringTokenizer.h"
#include "MantidKernel/StringTokenizer.h"
namespace Mantid {
namespace API {
......@@ -391,9 +391,10 @@ void AlgorithmFactoryImpl::fillHiddenCategories(
std::set<std::string> *categorySet) const {
std::string categoryString = Kernel::ConfigService::Instance().getString(
"algorithms.categories.hidden");
Poco::StringTokenizer tokenizer(categoryString, ";",
Poco::StringTokenizer::TOK_TRIM |
Poco::StringTokenizer::TOK_IGNORE_EMPTY);
Mantid::Kernel::StringTokenizer tokenizer(
categoryString, ";",
Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
std::copy(tokenizer.begin(), tokenizer.end(),
std::inserter(*categorySet, categorySet->end()));
}
......
......@@ -309,9 +309,10 @@ std::string AlgorithmProxy::toString() const {
/// Function to return all of the categories that contain this algorithm
const std::vector<std::string> AlgorithmProxy::categories() const {
Poco::StringTokenizer tokenizer(category(), categorySeparator(),
Poco::StringTokenizer::TOK_TRIM |
Poco::StringTokenizer::TOK_IGNORE_EMPTY);
Mantid::Kernel::StringTokenizer tokenizer(
category(), categorySeparator(),
Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
std::vector<std::string> res(tokenizer.begin(), tokenizer.end());
......
......@@ -2,7 +2,7 @@
#include "MantidAPI/Expression.h"
#include "MantidAPI/IConstraint.h"
#include "MantidKernel/LibraryManager.h"
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
namespace Mantid {
namespace API {
......
......@@ -1134,14 +1134,14 @@ void ExperimentInfo::readParameterMap(const std::string &parameterStr) {
Geometry::ParameterMap &pmap = this->instrumentParameters();
Instrument_const_sptr instr = this->getInstrument()->baseInstrument();
int options = Poco::StringTokenizer::TOK_IGNORE_EMPTY;
options += Poco::StringTokenizer::TOK_TRIM;
Poco::StringTokenizer splitter(parameterStr, "|", options);
int options = Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY;
options += Mantid::Kernel::StringTokenizer::TOK_TRIM;
Mantid::Kernel::StringTokenizer splitter(parameterStr, "|", options);
auto iend = splitter.end();
// std::string prev_name;
for (auto itr = splitter.begin(); itr != iend; ++itr) {
Poco::StringTokenizer tokens(*itr, ";");
Mantid::Kernel::StringTokenizer tokens(*itr, ";");
if (tokens.count() < 4)
continue;
std::string comp_name = tokens[0];
......
......@@ -4,12 +4,12 @@
#include "MantidAPI/Expression.h"
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
namespace Mantid {
namespace API {
typedef Poco::StringTokenizer tokenizer;
typedef Mantid::Kernel::StringTokenizer tokenizer;
const std::string DEFAULT_OPS_STR[] = {";", ",", "=", "== != > < <= >=",
"&& || ^^", "+ -", "* /", "^"};
......
......@@ -14,7 +14,7 @@
#include <Poco/Path.h>
#include <Poco/File.h>
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
#include <Poco/Exception.h>
#include <boost/regex.hpp>
#include <boost/lexical_cast.hpp>
......@@ -597,9 +597,9 @@ FileFinderImpl::findRuns(const std::string &hintstr) const {
std::string hint = Kernel::Strings::strip(hintstr);
g_log.debug() << "findRuns hint = " << hint << "\n";
std::vector<std::string> res;
Poco::StringTokenizer hints(hint, ",",
Poco::StringTokenizer::TOK_TRIM |
Poco::StringTokenizer::TOK_IGNORE_EMPTY);
Mantid::Kernel::StringTokenizer hints(
hint, ",", Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
auto h = hints.begin();
for (; h != hints.end(); ++h) {
......@@ -616,9 +616,9 @@ FileFinderImpl::findRuns(const std::string &hintstr) const {
fileSuspected = true;
}
Poco::StringTokenizer range(*h, "-",
Poco::StringTokenizer::TOK_TRIM |
Poco::StringTokenizer::TOK_IGNORE_EMPTY);
Mantid::Kernel::StringTokenizer range(
*h, "-", Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
if ((range.count() > 2) && (!fileSuspected)) {
throw std::invalid_argument("Malformed range of runs: " + *h);
} else if ((range.count() == 2) && (!fileSuspected)) {
......
......@@ -8,7 +8,7 @@
#include "MantidAPI/Workspace.h"
#include "MantidAPI/AnalysisDataService.h"
#include "MantidKernel/LibraryManager.h"
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
#include <sstream>
namespace Mantid {
......
......@@ -24,7 +24,7 @@
#include <boost/lexical_cast.hpp>
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
#include <limits>
#include <sstream>
......@@ -253,9 +253,10 @@ void IFunction::setHandler(FunctionHandler *handler) {
/// Function to return all of the categories that contain this function
const std::vector<std::string> IFunction::categories() const {
Poco::StringTokenizer tokenizer(category(), categorySeparator(),
Poco::StringTokenizer::TOK_TRIM |
Poco::StringTokenizer::TOK_IGNORE_EMPTY);
Mantid::Kernel::StringTokenizer tokenizer(
category(), categorySeparator(),
Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
return std::vector<std::string>(tokenizer.begin(), tokenizer.end());
}
......@@ -572,8 +573,8 @@ protected:
m_value.erase(m_value.size() - 1);
}
}
Poco::StringTokenizer tokenizer(m_value, ",",
Poco::StringTokenizer::TOK_TRIM);
Mantid::Kernel::StringTokenizer tokenizer(
m_value, ",", Mantid::Kernel::StringTokenizer::TOK_TRIM);
v.resize(tokenizer.count());
for (size_t i = 0; i < v.size(); ++i) {
v[i] = boost::lexical_cast<double>(tokenizer[i]);
......
#include "MantidAPI/ScriptRepositoryFactory.h"
#include "MantidAPI/ScriptRepository.h"
#include "MantidKernel/LibraryManager.h"
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
#include <sstream>
namespace Mantid {
......
......@@ -201,7 +201,8 @@ void WorkspaceHistory::saveNexus(::NeXus::File *file) const {
*/
void getWordsInString(const std::string &words3, std::string &w1,
std::string &w2, std::string &w3) {
Poco::StringTokenizer data(words3, " ", Poco::StringTokenizer::TOK_TRIM);
Mantid::Kernel::StringTokenizer data(
words3, " ", Mantid::Kernel::StringTokenizer::TOK_TRIM);
if (data.count() != 3)
throw std::out_of_range("Algorithm list line " + words3 +
" is not of the correct format\n");
......@@ -224,7 +225,8 @@ void getWordsInString(const std::string &words3, std::string &w1,
*/
void getWordsInString(const std::string &words4, std::string &w1,
std::string &w2, std::string &w3, std::string &w4) {
Poco::StringTokenizer data(words4, " ", Poco::StringTokenizer::TOK_TRIM);
Mantid::Kernel::StringTokenizer data(
words4, " ", Mantid::Kernel::StringTokenizer::TOK_TRIM);
if (data.count() != 4)
throw std::out_of_range("Algorithm list line " + words4 +
" is not of the correct format\n");
......
......@@ -222,7 +222,8 @@ void ReadGroupsFromFile::readXMLGroupingFile(const std::string &filename) {
std::string ids = group->getAttribute("val");
Poco::StringTokenizer data(ids, ",", Poco::StringTokenizer::TOK_TRIM);
Mantid::Kernel::StringTokenizer data(
ids, ",", Mantid::Kernel::StringTokenizer::TOK_TRIM);
if (data.begin() != data.end()) {
for (auto it = data.begin(); it != data.end(); ++it) {
......
......@@ -6,7 +6,7 @@
#include <fstream>
#include <sstream>
#include <algorithm>
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
#include <boost/lexical_cast.hpp>
#include <boost/algorithm/string/replace.hpp>
......@@ -518,7 +518,7 @@ PlotPeakByLogValue::makeNames() const {
double start = 0;
double end = 0;
typedef Poco::StringTokenizer tokenizer;
typedef Mantid::Kernel::StringTokenizer tokenizer;
tokenizer names(inputList, ";",
tokenizer::TOK_IGNORE_EMPTY | tokenizer::TOK_TRIM);
for (auto it = names.begin(); it != names.end(); ++it) {
......
......@@ -144,8 +144,10 @@ private:
RangeHelper(){};
/// give an enum from poco a better name here
enum {
IGNORE_SPACES = Poco::StringTokenizer::TOK_TRIM ///< equal to
/// Poco::StringTokenizer::TOK_TRIM but
IGNORE_SPACES =
Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY ///< equal to
/// Mantid::Kernel::StringTokenizer::TOK_TRIM but
/// saves some typing
};
};
......@@ -239,8 +241,8 @@ private:
/// spectrum number to the this
EMPTY_LINE = 1001 - INT_MAX, ///< when reading from the input file this
/// value means that we found any empty line
IGNORE_SPACES = Poco::StringTokenizer::TOK_TRIM ///< equal to
/// Poco::StringTokenizer::TOK_TRIM but
IGNORE_SPACES = Mantid::Kernel::StringTokenizer::TOK_TRIM ///< equal to
/// Mantid::Kernel::StringTokenizer::TOK_TRIM but
/// saves some typing
};
......
......@@ -6,7 +6,7 @@
#include <Poco/DateTimeFormatter.h>
#include <Poco/DateTimeFormat.h>
#include <Poco/DateTimeParser.h>
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
// jsoncpp
#include <json/json.h>
......@@ -175,9 +175,10 @@ CheckMantidVersion::cleanVersionTag(const std::string &versionTag) const {
std::vector<int>
CheckMantidVersion::splitVersionString(const std::string &versionString) const {
std::vector<int> retVal;
Poco::StringTokenizer tokenizer(versionString, ".",
Poco::StringTokenizer::TOK_TRIM |
Poco::StringTokenizer::TOK_IGNORE_EMPTY);
Mantid::Kernel::StringTokenizer tokenizer(
versionString, ".",
Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
auto h = tokenizer.begin();
for (; h != tokenizer.end(); ++h) {
......
......@@ -732,10 +732,13 @@ void GroupDetectors2::processMatrixWorkspace(
*/
int GroupDetectors2::readInt(std::string line) {
// remove comments and white space (TOK_TRIM)
Poco::StringTokenizer dataComment(line, "#", Poco::StringTokenizer::TOK_TRIM);
Mantid::Kernel::StringTokenizer dataComment(
line, "#", Mantid::Kernel::StringTokenizer::TOK_TRIM);
if (dataComment.begin() != dataComment.end()) {
Poco::StringTokenizer data(*(dataComment.begin()), " ",
Poco::StringTokenizer::TOK_TRIM);
Mantid::Kernel::StringTokenizer data(
*(dataComment.begin()), " ",
Mantid::Kernel::StringTokenizer::TOK_TRIM |
Mantid::Kernel::StringTokenizer::TOK_IGNORE_EMPTY);
if (data.count() == 1) {
if (!data[0].empty()) {
try {
......@@ -855,7 +858,7 @@ void GroupDetectors2::readSpectraIndexes(std::string line,
std::vector<int64_t> &unUsedSpec,
std::string seperator) {
// remove comments and white space
Poco::StringTokenizer dataComment(line, seperator, IGNORE_SPACES);
Mantid::Kernel::StringTokenizer dataComment(line, seperator, IGNORE_SPACES);
for (auto itr = dataComment.begin(); itr != dataComment.end(); ++itr) {
std::vector<size_t> specNums;
specNums.reserve(output.capacity());
......@@ -1252,12 +1255,13 @@ void GroupDetectors2::RangeHelper::getList(const std::string &line,
// function
return;
}
Poco::StringTokenizer ranges(line, "-");
Mantid::Kernel::StringTokenizer ranges(line, "-");
try {
size_t loop = 0;
do {
Poco::StringTokenizer beforeHyphen(ranges[loop], " ", IGNORE_SPACES);
Mantid::Kernel::StringTokenizer beforeHyphen(ranges[loop], " ",
IGNORE_SPACES);
auto readPostion = beforeHyphen.begin();
if (readPostion == beforeHyphen.end()) {
throw std::invalid_argument("'-' found at the start of a list, can't "
......@@ -1273,7 +1277,8 @@ void GroupDetectors2::RangeHelper::getList(const std::string &line,
break;
}
Poco::StringTokenizer afterHyphen(ranges[loop + 1], " ", IGNORE_SPACES);
Mantid::Kernel::StringTokenizer afterHyphen(ranges[loop + 1], " ",
IGNORE_SPACES);
readPostion = afterHyphen.begin();
if (readPostion == afterHyphen.end()) {
throw std::invalid_argument("A '-' follows straight after another '-', "
......
......@@ -8,7 +8,7 @@
#include <Poco/Path.h>
#include <Poco/File.h>
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
#include <Poco/Exception.h>
#include <sstream>
......
......@@ -12,7 +12,7 @@
#include <fstream>
#include <boost/tokenizer.hpp>
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
// String utilities
#include <boost/algorithm/string.hpp>
......
......@@ -12,7 +12,7 @@
#include <fstream>
#include <boost/tokenizer.hpp>
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
// String utilities
#include <boost/algorithm/string.hpp>
#include <boost/regex.hpp>
......
......@@ -29,7 +29,7 @@
#include <boost/lexical_cast.hpp>
#include <boost/shared_array.hpp>
#include <Poco/StringTokenizer.h>
#include <MantidKernel/StringTokenizer.h>
#include <nexus/NeXusException.hpp>
......@@ -1762,7 +1762,8 @@ bool UDlesserExecCount(NXClassInfo elem1, NXClassInfo elem2) {
void LoadNexusProcessed::getWordsInString(const std::string &words3,
std::string &w1, std::string &w2,
std::string &w3) {
Poco::StringTokenizer data(words3, " ", Poco::StringTokenizer::TOK_TRIM);
Mantid::Kernel::StringTokenizer data(
words3, " ", Mantid::Kernel::StringTokenizer::TOK_TRIM);
if (data.count() != 3) {
g_log.warning() << "Algorithm list line " + words3 +
" is not of the correct format\n";
......@@ -1788,7 +1789,8 @@ void LoadNexusProcessed::getWordsInString(const std::string &words3,
void LoadNexusProcessed::getWordsInString(const std::string &words4,
std::string &w1, std::string &w2,
std::string &w3, std::string &w4) {
Poco::StringTokenizer data(words4, " ", Poco::StringTokenizer::TOK_TRIM);
Mantid::Kernel::StringTokenizer data(
words4, " ", Mantid::Kernel::StringTokenizer::TOK_TRIM);
if (data.count() != 4) {
g_log.warning() << "Algorithm list line " + words4 +
" is not of the correct format\n";
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment