Newer
Older
The LoadEventPreNeXus algorithm stores data from the pre-nexus neutron event data file in an [[EventWorkspace]]. The default histogram bin boundaries consist of a single bin able to hold all events (in all pixels), and will have their [[units]] set to time-of-flight. Since it is an [[EventWorkspace]], it can be rebinned to finer bins with no loss of data.
=== Optional properties ===
Specific pulse ID and mapping files can be specified if needed; these are guessed at automatically from the neutron filename, if not specified.
A specific list of pixel ids can be specified, in which case only events relating to these pixels will appear in the output.
The ChunkNumber and TotalChunks properties can be used to load only a section of the file; e.g. if these are 1 and 10 respectively only the first 10% of the events will be loaded.
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
*WIKI*/
#include "MantidDataHandling/LoadEventPreNexus2.h"
#include <algorithm>
#include <sstream>
#include <stdexcept>
#include <functional>
#include <iostream>
#include <set>
#include <vector>
#include <Poco/File.h>
#include <Poco/Path.h>
#include <boost/timer.hpp>
#include "MantidAPI/FileFinder.h"
#include "MantidAPI/LoadAlgorithmFactory.h"
#include "MantidAPI/WorkspaceFactory.h"
#include "MantidDataObjects/EventWorkspace.h"
#include "MantidDataObjects/EventList.h"
#include "MantidKernel/ArrayProperty.h"
#include "MantidKernel/FileValidator.h"
#include "MantidKernel/DateAndTime.h"
#include "MantidKernel/Glob.h"
#include "MantidAPI/FileProperty.h"
#include "MantidKernel/ConfigService.h"
#include "MantidKernel/BinaryFile.h"
#include "MantidKernel/System.h"
#include "MantidKernel/TimeSeriesProperty.h"
#include "MantidKernel/UnitFactory.h"
#include "MantidKernel/DateAndTime.h"
#include "MantidGeometry/IDetector.h"
#include "MantidKernel/CPUTimer.h"
#include "MantidKernel/VisibleWhenProperty.h"
#include "MantidDataObjects/Workspace2D.h"
#include <algorithm>
#include <sstream>
#include "MantidAPI/MemoryManager.h"
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
namespace Mantid
{
namespace DataHandling
{
// Register the algorithm into the AlgorithmFactory
DECLARE_ALGORITHM(LoadEventPreNexus2)
DECLARE_LOADALGORITHM(LoadEventPreNexus2)
using namespace Kernel;
using namespace API;
using namespace Geometry;
using boost::posix_time::ptime;
using boost::posix_time::time_duration;
using DataObjects::EventList;
using DataObjects::EventWorkspace;
using DataObjects::EventWorkspace_sptr;
using DataObjects::TofEvent;
using std::cout;
using std::endl;
using std::ifstream;
using std::runtime_error;
using std::stringstream;
using std::string;
using std::vector;
/*
* constants for locating the parameters to use in execution
*/
static const string EVENT_PARAM("EventFilename");
static const string PULSEID_PARAM("PulseidFilename");
static const string MAP_PARAM("MappingFilename");
static const string PID_PARAM("SpectrumList");
static const string PARALLEL_PARAM("UseParallelProcessing");
static const string BLOCK_SIZE_PARAM("LoadingBlockSize");
static const string OUT_PARAM("OutputWorkspace");
/// Default number of items to read in from any of the files.
static const size_t DEFAULT_BLOCK_SIZE = 1000000; // 100,000
/// All pixel ids with matching this mask are errors.
static const PixelType ERROR_PID = 0x80000000;
/// The maximum possible tof as native type
static const uint32_t MAX_TOF_UINT32 = std::numeric_limits<uint32_t>::max();
/// Conversion factor between 100 nanoseconds and 1 microsecond.
static const double TOF_CONVERSION = .1;
/// Conversion factor between picoColumbs and microAmp*hours
static const double CURRENT_CONVERSION = 1.e-6 / 3600.;
//-----------------------------------------------------------------------------
//Statistic Functions
static string getRunnumber(const string &filename) {
// start by trimming the filename
string runnumber(Poco::Path(filename).getBaseName());
if (runnumber.find("neutron") >= string::npos)
return "0";
std::size_t left = runnumber.find("_");
std::size_t right = runnumber.find("_", left+1);
return runnumber.substr(left+1, right-left-1);
}
static string generatePulseidName(string eventfile)
{
size_t start;
string ending;
// normal ending
ending = "neutron_event.dat";
start = eventfile.find(ending);
if (start != string::npos)
return eventfile.replace(start, ending.size(), "pulseid.dat");
// split up event files - yes this is copy and pasted code
ending = "neutron0_event.dat";
start = eventfile.find(ending);
if (start != string::npos)
return eventfile.replace(start, ending.size(), "pulseid0.dat");
ending = "neutron1_event.dat";
start = eventfile.find(ending);
if (start != string::npos)
return eventfile.replace(start, ending.size(), "pulseid1.dat");
return "";
}
static string generateMappingfileName(EventWorkspace_sptr &wksp)
{//
// get the name of the mapping file as set in the parameter files
std::vector<string> temp = wksp->getInstrument()->getStringParameter("TS_mapping_file");
if (temp.empty())
return "";
string mapping = temp[0];
// Try to get it from the working directory
Poco::File localmap(mapping);
if (localmap.exists())
return mapping;
// Try to get it from the data directories
string dataversion = Mantid::API::FileFinder::Instance().getFullPath(mapping);
if (!dataversion.empty())
return dataversion;
// get a list of all proposal directories
string instrument = wksp->getInstrument()->getName();
Poco::File base("/SNS/" + instrument + "/");
// try short instrument name
if (!base.exists())
{
instrument = Kernel::ConfigService::Instance().getInstrument(instrument).shortName();
base = Poco::File("/SNS/" + instrument + "/");
if (!base.exists())
return "";
}
vector<string> dirs; // poco won't let me reuse temp
base.list(dirs);
// check all of the proposals for the mapping file in the canonical place
const string CAL("_CAL");
const size_t CAL_LEN = CAL.length(); // cache to make life easier
vector<string> files;
for (size_t i = 0; i < dirs.size(); ++i) {
if ( (dirs[i].length() > CAL_LEN)
&& (dirs[i].compare(dirs[i].length() - CAL.length(), CAL.length(), CAL) == 0) ) {
if (Poco::File(base.path() + "/" + dirs[i] + "/calibrations/" + mapping).exists())
files.push_back(base.path() + "/" + dirs[i] + "/calibrations/" + mapping);
}
}
if (files.empty())
return "";
else if (files.size() == 1)
return files[0];
else // just assume that the last one is the right one, this should never be fired
return *(files.rbegin());
}
//-----------------------------------------------------------------------------
/*
* Constructor
*/
LoadEventPreNexus2::LoadEventPreNexus2() : Mantid::API::IDataFileChecker(), eventfile(NULL), max_events(0)
{
}
/*
* Desctructor
*/
LoadEventPreNexus2::~LoadEventPreNexus2()
{
delete this->eventfile;
}
/*
* Sets documentation strings for this algorithm
*/
void LoadEventPreNexus2::initDocs()
{
this->setWikiSummary("Loads SNS raw neutron event data format and stores it in a [[workspace]] ([[EventWorkspace]] class). ");
this->setOptionalMessage("Loads SNS raw neutron event data format and stores it in a workspace (EventWorkspace class).");
}
//-----------------------------------------------------------------------------
/*
* Initialize the algorithm
*/
void LoadEventPreNexus2::init()
{
// which files to use
vector<string> eventExts;
eventExts.push_back("_neutron_event.dat");
eventExts.push_back("_neutron0_event.dat");
eventExts.push_back("_neutron1_event.dat");
eventExts.push_back("_neutron2_event.dat");
eventExts.push_back("_neutron3_event.dat");
declareProperty(new FileProperty(EVENT_PARAM, "", FileProperty::Load, eventExts),
"The name of the neutron event file to read, including its full or relative path. In most cases, the file typically ends in neutron_event.dat (N.B. case sensitive if running on Linux).");
vector<string> pulseExts;
pulseExts.push_back("_pulseid.dat");
pulseExts.push_back("_pulseid0.dat");
pulseExts.push_back("_pulseid1.dat");
pulseExts.push_back("_pulseid2.dat");
pulseExts.push_back("_pulseid3.dat");
declareProperty(new FileProperty(PULSEID_PARAM, "", FileProperty::OptionalLoad, pulseExts),
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
"File containing the accelerator pulse information; the filename will be found automatically if not specified.");
declareProperty(new FileProperty(MAP_PARAM, "", FileProperty::OptionalLoad, ".dat"),
"File containing the pixel mapping (DAS pixels to pixel IDs) file (typically INSTRUMENT_TS_YYYY_MM_DD.dat). The filename will be found automatically if not specified.");
// which pixels to load
declareProperty(new ArrayProperty<int64_t>(PID_PARAM),
"A list of individual spectra (pixel IDs) to read, specified as e.g. 10:20. Only used if set.");
BoundedValidator<int> *mustBePositive = new BoundedValidator<int>();
mustBePositive->setLower(1);
declareProperty("ChunkNumber", EMPTY_INT(), mustBePositive,
"If loading the file by sections ('chunks'), this is the section number of this execution of the algorithm.");
declareProperty("TotalChunks", EMPTY_INT(), mustBePositive->clone(),
"If loading the file by sections ('chunks'), this is the total number of sections.");
// TotalChunks is only meaningful if ChunkNumber is set
// Would be nice to be able to restrict ChunkNumber to be <= TotalChunks at validation
setPropertySettings("TotalChunks", new VisibleWhenProperty(this, "ChunkNumber", IS_NOT_DEFAULT));
std::vector<std::string> propOptions;
propOptions.push_back("Auto");
propOptions.push_back("Serial");
propOptions.push_back("Parallel");
declareProperty("UseParallelProcessing", "Auto",new ListValidator(propOptions),
"Use multiple cores for loading the data?\n"
" Auto: Use serial loading for small data sets, parallel for large data sets.\n"
" Serial: Use a single core.\n"
" Parallel: Use all available cores.");
// the output workspace name
declareProperty(new WorkspaceProperty<IEventWorkspace>(OUT_PARAM,"",Direction::Output),
"The name of the workspace that will be created, filled with the read-in data and stored in the [[Analysis Data Service]].");
return;
}
/*
* Execute the algorithm
* 1. check all the inputs
* 2. create an EventWorkspace object
* 3. process events
* 4. set out output
*/
void LoadEventPreNexus2::exec()
{
// 1. Check!
// a. Check 'chunk' properties are valid, if set
const int chunks = getProperty("TotalChunks");
if ( !isEmpty(chunks) && int(getProperty("ChunkNumber")) > chunks )
{
throw std::out_of_range("ChunkNumber cannot be larger than TotalChunks");
}
prog = new Progress(this,0.0,1.0,100);
// b. what spectra (pixel ID's) to load
this->spectra_list = this->getProperty(PID_PARAM);
// c. the event file is needed in case the pulseid fileanme is empty
string event_filename = this->getPropertyValue(EVENT_PARAM);
string pulseid_filename = this->getPropertyValue(PULSEID_PARAM);
bool throwError = true;
if (pulseid_filename.empty())
{
pulseid_filename = generatePulseidName(event_filename);
if (!pulseid_filename.empty())
{
if (Poco::File(pulseid_filename).exists())
{
this->g_log.information() << "Found pulseid file " << pulseid_filename << std::endl;
throwError = false;
}
else
{
pulseid_filename = "";
}
}
}
// 2. Read input files
prog->report("Loading Pulse ID file");
this->readPulseidFile(pulseid_filename, throwError);
prog->report("Loading Event File");
this->openEventFile(event_filename);
// 3. Create otuput Workspace
prog->report("Creating output workspace");
// a. prep the output workspace
localWorkspace = EventWorkspace_sptr(new EventWorkspace());
// b. Make sure to initialize. We can use dummy numbers for arguments, for event workspace it doesn't matter
localWorkspace->initialize(1,1,1);
// c. Set the units
localWorkspace->getAxis(0)->unit() = UnitFactory::Instance().create("TOF");
localWorkspace->setYUnit("Counts");
// d. Set title
localWorkspace->setTitle("Dummy Title");
// 4. Properties:
// a. Add the run_start property (Use the first pulse as the run_start time)
if (this->num_pulses > 0)
{
// add the start of the run as a ISO8601 date/time string. The start = the first pulse.
// (this is used in LoadInstrument to find the right instrument file to use).
localWorkspace->mutableRun().addProperty("run_start", pulsetimes[0].toISO8601String(), true );
}
// b. determine the run number and add it to the run object
localWorkspace->mutableRun().addProperty("run_number", getRunnumber(event_filename));
// 5. Get the instrument!
prog->report("Loading Instrument");
this->runLoadInstrument(event_filename, localWorkspace);
// 6. load the mapping file
prog->report("Loading Mapping File");
string mapping_filename = this->getPropertyValue(MAP_PARAM);
if (mapping_filename.empty()) {
mapping_filename = generateMappingfileName(localWorkspace);
if (!mapping_filename.empty())
this->g_log.information() << "Found mapping file \"" << mapping_filename << "\"" << std::endl;
}
this->loadPixelMap(mapping_filename);
// 7. Process the events into pixels
this->procEvents(localWorkspace);
// set that the sort order on the event lists
if (this->num_pulses > 0 && this->pulsetimesincreasing)
{
const int64_t numberOfSpectra = localWorkspace->getNumberHistograms();
PARALLEL_FOR_NO_WSP_CHECK()
for (int64_t i = 0; i < numberOfSpectra; i++)
{
PARALLEL_START_INTERUPT_REGION
localWorkspace->getEventListPtr(i)->setSortOrder(DataObjects::PULSETIME_SORT);
PARALLEL_END_INTERUPT_REGION
}
PARALLEL_CHECK_INTERUPT_REGION
}
// 8. Save output
this->setProperty<IEventWorkspace_sptr>(OUT_PARAM, localWorkspace);
// 9. Fast frequency sample environment data
this->processImbedLogs();
// -1. Cleanup
delete prog;
return;
} // exec()
/*
* Process imbed logs (marked by bad pixel IDs)
*/
void LoadEventPreNexus2::processImbedLogs(){
std::vector<size_t> numpixels;
std::set<PixelType>::iterator pit;
std::map<PixelType, size_t>::iterator mit;
for (pit=this->wrongdetids.begin(); pit!=this->wrongdetids.end(); ++pit){
// a. pixel ID -> index
PixelType pid = *pit;
mit = this->wrongdetidmap.find(pid);
size_t mindex = mit->second;
if (mindex > this->wrongdetid_pulsetimes.size())
{
g_log.error() << "Wrong Index " << mindex << " for Pixel " << pid << std::endl;
throw std::invalid_argument("Wrong array index for pixel from map");
}
else
{
g_log.information() << "Processing imbed log marked by Pixel " << pid <<
" with size = " << this->wrongdetid_pulsetimes[mindex].size() << std::endl;
}
std::stringstream ssname;
ssname << "Pixel" << pid;
std::string wsname = ssname.str();
// b. Create output workspace2D
// i. Output information in workspaces
size_t nbins = this->wrongdetid_pulsetimes[mindex].size();
if (this->wrongdetid_tofs[mindex].size() != nbins)
{
g_log.error() << "For index " << mindex << ", pulse time vector and TOF vector have different length" << std::endl;
throw std::runtime_error("Fatal programming error");
}
// ii.Create workspace
DataObjects::Workspace2D_sptr ws2d = boost::dynamic_pointer_cast<DataObjects::Workspace2D>(
API::WorkspaceFactory::Instance().create("Workspace2D", 1, nbins, nbins));
// iii. set data
double y0 = 1.0;
size_t numzerodiffs = 0;
size_t numlessdiffs = 0;
size_t index = 0;
for (size_t k = 0; k < nbins; k ++){
if (this->wrongdetid_abstimes[mindex][k] > this->wrongdetid_abstimes[mindex][k-1]){
y0 = 1.0;
ws2d->dataX(0)[index] = static_cast<double>(this->wrongdetid_abstimes[mindex][k]);
ws2d->dataY(0)[index] = y0;
if (ws2d->dataX(0)[index] < 1.0E-9){
g_log.error() << "Bad cast @ point " << k << " = " << ws2d->dataX(0)[k] << " / " <<
this->wrongdetid_abstimes[mindex][k] << std::endl;
}
index ++;
} else if (this->wrongdetid_abstimes[mindex][k] == this->wrongdetid_abstimes[mindex][k-1]) {
numzerodiffs ++;
} else {
numlessdiffs ++;
}
} // ENDFOR: k
// c. Set up output Workspace2D
std::stringstream ssws;
ssws << "OutputPixel" << pid << "Workspace";
std::string outputtitle = ssws.str();
g_log.notice() << "Pixel " << pid << ": OutputWorkspace(" << outputtitle << ") <-- " << wsname << std::endl;
this->declareProperty(new WorkspaceProperty<DataObjects::Workspace2D>(outputtitle, wsname, Direction::Output),
"Set the output sample environment data record");
this->setProperty(outputtitle, ws2d);
// d. Add this to log
this->addToWorkspaceLog(wsname, mindex);
// z. Check workspace
g_log.notice() << "For log @ " << wsname << ": " << (nbins-numzerodiffs) << " events are individual" << std::endl;
g_log.notice() << "For log @ " << wsname << ": " << numzerodiffs << " events have same pulse time" << std::endl;
g_log.notice() << "For log @ " << wsname << ": " << numlessdiffs << " events are earlier than previous one" << std::endl;
g_log.notice() << "End of Processing This Log " << std::endl << std::endl;
} //ENDFOR pit
}
/*
* Add absolute time series to log
* @params
* - mindex: index of the the series in the list
*/
void LoadEventPreNexus2::addToWorkspaceLog(std::string logtitle, size_t mindex){
// 1. Set data structure and constants
size_t nbins = this->wrongdetid_pulsetimes[mindex].size();
TimeSeriesProperty<double>* property = new TimeSeriesProperty<double>(logtitle);
// 2. Set data
// double y0 = 1.0;
// int msize = property->size();
for (size_t k = 0; k < nbins; k ++){
// a) Add log
property->addValue(this->wrongdetid_pulsetimes[mindex][k], this->wrongdetid_tofs[mindex][k]);
/* Diabled
if (this->wrongdetid_abstimes[mindex][k] > this->wrongdetid_abstimes[mindex][k-1]){
property->addValue(Kernel::DateAndTime(this->wrongdetid_abstimes[mindex][k]), y0);
}
// b) Figure whether it is a good add or not
if (property->size() <= msize){
g_log.error() << "Cannot add entry " << k << ": Time = " <<
this->wrongdetid_abstimes[mindex][k] << ", ";
if (k > 0){
g_log.error() << " Previous time = " << this->wrongdetid_abstimes[mindex][k-1] << std::endl;
g_log.error() << " This is the first entry to add to log!" << std::endl;
*/
// msize = property->size();
// c) Update log
this->localWorkspace->mutableRun().addProperty(property, false);
g_log.information() << "Size of Property " << property->name() << " = " << property->size() <<
" vs Original Log Size = " << nbins << std::endl;
return;
}
/*
* Some output for debug purpose --- Disabled
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
void LoadEventPreNexus2::debugOutput(bool doit, size_t mindex){
if (!doit){
return;
}
size_t nbins = this->wrongdetid_abstimes[mindex].size();
size_t maxcounts = 3;
size_t counts = 0;
std::set<int64_t> deltas;
// i. do statistic
size_t numzerodeltat = 0;
size_t numfreq = 0;
int64_t sumdeltat = 0;
for (size_t k = 1; k < nbins; k ++){
int64_t deltat = this->wrongdetid_abstimes[mindex][k]-this->wrongdetid_abstimes[mindex][k-1];
deltas.insert(deltat);
if (deltat == 0){
numzerodeltat ++;
if (counts < maxcounts)
{
g_log.error() << "Delta T = 0: T = " << this->wrongdetid_abstimes[mindex][k] << std::endl;
counts ++;
}
} else {
numfreq ++;
sumdeltat += deltat;
}
}
double frequency = 1.0/(static_cast<double>(sumdeltat)/static_cast<double>(numfreq)*1.0E-9);
size_t numpt = this->wrongdetid_abstimes[mindex].size();
g_log.notice() << "Frequency = " << frequency << " Number of pixels with zero Delta T = " << numzerodeltat << std::endl;
int64_t t0 = this->wrongdetid_abstimes[mindex][0];
int64_t tf = this->wrongdetid_abstimes[mindex][numpt-1];
g_log.notice() << "T0 = " << t0 << ", Tf = " << tf << " Delta T = " << tf-t0 << " ns"<< std::endl;
g_log.notice() << "Theoretical number of events = " << static_cast<double>(tf-t0)*frequency*1.0E-9 << std::endl;
g_log.notice() << "Number of various delta T = " << deltas.size() << std::endl;
std::set<int64_t>::iterator dtit;
for (dtit=deltas.begin(); dtit!=deltas.end(); ++dtit){
g_log.notice() << *dtit <<", ";
}
g_log.notice() << std::endl;
return;
}
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
/**
* Returns the name of the property to be considered as the Filename for Load
* @returns A character string containing the file property's name
*/
const char * LoadEventPreNexus2::filePropertyName() const
{
return EVENT_PARAM.c_str();
}
/**
* Do a quick file type check by looking at the first 100 bytes of the file
* @param filePath :: path of the file including name.
* @param nread :: no.of bytes read
* @param header :: The first 100 bytes of the file as a union
* @return true if the given file is of type which can be loaded by this algorithm
*/
bool LoadEventPreNexus2::quickFileCheck(const std::string& filePath,size_t,const file_header&)
{
std::string ext = extension(filePath);
return (ext.rfind("dat") != std::string::npos);
}
/**
* Checks the file by opening it and reading few lines
* @param filePath :: name of the file inluding its path
* @return an integer value how much this algorithm can load the file
*/
int LoadEventPreNexus2::fileCheck(const std::string& filePath)
{
int confidence(0);
try
{
// If this looks like a binary file where the exact file length is a multiple
// of the DasEvent struct then we're probably okay.
// NOTE: Putting this on the stack gives a segfault on Windows when for some reason
// the BinaryFile destructor is called twice! I'm sure there is something I don't understand there
// but heap allocation seems to work so go for that.
BinaryFile<DasEvent> *event_file = new BinaryFile<DasEvent>(filePath);
confidence = 80;
delete event_file;
}
catch(std::runtime_error &)
{
// This BinaryFile constructor throws if the file does not contain an
// exact multiple of the sizeof(DasEvent) objects.
}
return confidence;
}
//-----------------------------------------------------------------------------
/** Load the instrument geometry File
* @param eventfilename :: Used to pick the instrument.
* @param localWorkspace :: MatrixWorkspace in which to put the instrument geometry
*/
void LoadEventPreNexus2::runLoadInstrument(const std::string &eventfilename, MatrixWorkspace_sptr localWorkspace)
{
// determine the instrument parameter file
string instrument = Poco::Path(eventfilename).getFileName();
size_t pos = instrument.rfind("_"); // get rid of 'event.dat'
pos = instrument.rfind("_", pos-1); // get rid of 'neutron'
pos = instrument.rfind("_", pos-1); // get rid of the run number
instrument = instrument.substr(0, pos);
// do the actual work
IAlgorithm_sptr loadInst= createSubAlgorithm("LoadInstrument");
// Now execute the sub-algorithm. Catch and log any error, but don't stop.
loadInst->setPropertyValue("InstrumentName", instrument);
loadInst->setProperty<MatrixWorkspace_sptr> ("Workspace", localWorkspace);
loadInst->setProperty("RewriteSpectraMap", false);
loadInst->executeAsSubAlg();
// Populate the instrument parameters in this workspace - this works around a bug
localWorkspace->populateInstrumentParameters();
}
//-----------------------------------------------------------------------------
/** Turn a pixel id into a "corrected" pixelid and period.
*
*/
inline void LoadEventPreNexus2::fixPixelId(PixelType &pixel, uint32_t &period) const
{
if (!this->using_mapping_file) { // nothing to do here
period = 0;
return;
}
PixelType unmapped_pid = pixel % this->numpixel;
period = (pixel - unmapped_pid) / this->numpixel;
pixel = this->pixelmap[unmapped_pid];
}
//-----------------------------------------------------------------------------
/** Process the event file properly.
* @param workspace :: EventWorkspace to write to.
*/
void LoadEventPreNexus2::procEvents(DataObjects::EventWorkspace_sptr & workspace)
{
this->num_error_events = 0;
this->num_good_events = 0;
this->num_ignored_events = 0;
this->num_bad_events = 0;
this->num_wrongdetid_events = 0;
//Default values in the case of no parallel
size_t loadBlockSize = Mantid::Kernel::DEFAULT_BLOCK_SIZE * 2;
shortest_tof = static_cast<double>(MAX_TOF_UINT32) * TOF_CONVERSION;
longest_tof = 0.;
//Initialize progress reporting.
size_t numBlocks = (max_events + loadBlockSize - 1) / loadBlockSize;
// We want to pad out empty pixels.
detid2det_map detector_map;
workspace->getInstrument()->getDetectors(detector_map);
// -------------- Determine processing mode
std::string procMode = getProperty("UseParallelProcessing");
if (procMode == "Serial")
parallelProcessing = false;
else if (procMode == "Parallel")
parallelProcessing = true;
else
{
// Automatic determination. Loading serially (for me) is about 3 million events per second,
// (which is sped up by ~ x 3 with parallel processing, say 10 million per second, e.g. 7 million events more per seconds).
// compared to a setup time/merging time of about 10 seconds per million detectors.
double setUpTime = double(detector_map.size()) * 10e-6;
parallelProcessing = ((double(max_events) / 7e6) > setUpTime);
g_log.debug() << (parallelProcessing ? "Using" : "Not using") << " parallel processing." << std::endl;
}
// determine maximum pixel id
detid2det_map::iterator it;
detid_max = 0; // seems like a safe lower bound
for (it = detector_map.begin(); it != detector_map.end(); it++)
if (it->first > detid_max)
detid_max = it->first;
// Pad all the pixels
prog->report("Padding Pixels");
this->pixel_to_wkspindex.reserve(detid_max+1); //starting at zero up to and including detid_max
// Set to zero
this->pixel_to_wkspindex.assign(detid_max+1, 0);
size_t workspaceIndex = 0;
for (it = detector_map.begin(); it != detector_map.end(); it++)
{
if (!it->second->isMonitor())
{
this->pixel_to_wkspindex[it->first] = workspaceIndex;
EventList & spec = workspace->getOrAddEventList(workspaceIndex);
spec.addDetectorID(it->first);
// Start the spectrum number at 1
spec.setSpectrumNo(specid_t(workspaceIndex+1));
workspaceIndex += 1;
}
}
workspace->doneAddingEventLists();
//For slight speed up
loadOnlySomeSpectra = (this->spectra_list.size() > 0);
//Turn the spectra list into a map, for speed of access
for (std::vector<int64_t>::iterator it = spectra_list.begin(); it != spectra_list.end(); it++)
spectraLoadMap[*it] = true;
CPUTimer tim;
// --------------- Create the partial workspaces ------------------------------------------
// Vector of partial workspaces, for parallel processing.
std::vector<EventWorkspace_sptr> partWorkspaces;
std::vector<DasEvent *> buffers;
/// Pointer to the vector of events
typedef std::vector<TofEvent> * EventVector_pt;
/// Bare array of arrays of pointers to the EventVectors
EventVector_pt ** eventVectors;
/// How many threads will we use?
size_t numThreads = 1;
if (parallelProcessing)
numThreads = size_t(PARALLEL_GET_MAX_THREADS);
partWorkspaces.resize(numThreads);
buffers.resize(numThreads);
eventVectors = new EventVector_pt *[numThreads];
// cppcheck-suppress syntaxError
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
PRAGMA_OMP( parallel for if (parallelProcessing) )
for (int i=0; i < int(numThreads); i++)
{
// This is the partial workspace we are about to create (if in parallel)
EventWorkspace_sptr partWS;
if (parallelProcessing)
{
prog->report("Creating Partial Workspace");
// Create a partial workspace
partWS = EventWorkspace_sptr(new EventWorkspace());
//Make sure to initialize.
partWS->initialize(1,1,1);
// Copy all the spectra numbers and stuff (no actual events to copy though).
partWS->copyDataFrom(*workspace);
// Push it in the array
partWorkspaces[i] = partWS;
}
else
partWS = workspace;
//Allocate the buffers
buffers[i] = new DasEvent[loadBlockSize];
// For each partial workspace, make an array where index = detector ID and value = pointer to the events vector
eventVectors[i] = new EventVector_pt[detid_max+1];
EventVector_pt * theseEventVectors = eventVectors[i];
for (detid_t j=0; j<detid_max+1; j++)
{
size_t wi = pixel_to_wkspindex[j];
// Save a POINTER to the vector<tofEvent>
theseEventVectors[j] = &partWS->getEventList(wi).getEvents();
}
}
g_log.debug() << tim << " to create " << partWorkspaces.size() << " workspaces for parallel loading." << std::endl;
prog->resetNumSteps( numBlocks, 0.1, 0.8);
// ---------------------------------- LOAD THE DATA --------------------------
PRAGMA_OMP( parallel for schedule(dynamic, 1) if (parallelProcessing) )
for (int blockNum=0; blockNum<int(numBlocks); blockNum++)
{
PARALLEL_START_INTERUPT_REGION
// Find the workspace for this particular thread
EventWorkspace_sptr ws;
size_t threadNum = 0;
if (parallelProcessing)
{
threadNum = PARALLEL_THREAD_NUMBER;
ws = partWorkspaces[threadNum];
}
else
ws = workspace;
// Get the buffer (for this thread)
DasEvent * event_buffer = buffers[threadNum];
// Get the speeding-up array of vector<tofEvent> where index = detid.
EventVector_pt * theseEventVectors = eventVectors[threadNum];
// Where to start in the file?
size_t fileOffset = first_event + (loadBlockSize * blockNum);
// May need to reduce size of last (or only) block
size_t current_event_buffer_size =
( blockNum == int(numBlocks-1) ) ? ( max_events - (numBlocks-1)*loadBlockSize ) : loadBlockSize;
// Load this chunk of event data (critical block)
PARALLEL_CRITICAL( LoadEventPreNexus2_fileAccess )
{
current_event_buffer_size = eventfile->loadBlockAt(event_buffer, fileOffset, current_event_buffer_size);
}
// This processes the events. Can be done in parallel!
procEventsLinear(ws, theseEventVectors, event_buffer, current_event_buffer_size, fileOffset);
// Report progress
prog->report("Load Event PreNeXus");
PARALLEL_END_INTERUPT_REGION
}
PARALLEL_CHECK_INTERUPT_REGION
g_log.debug() << tim << " to load the data." << std::endl;
// ---------------------------------- MERGE WORKSPACES BACK TOGETHER --------------------------
if (parallelProcessing)
{
PARALLEL_START_INTERUPT_REGION
prog->resetNumSteps( workspace->getNumberHistograms(), 0.8, 0.95);
size_t memoryCleared = 0;
MemoryManager::Instance().releaseFreeMemory();
// Merge all workspaces, index by index.
PARALLEL_FOR_NO_WSP_CHECK()
for (int iwi=0; iwi<int(workspace->getNumberHistograms()); iwi++)
{
size_t wi = size_t(iwi);
// The output event list.
EventList & el = workspace->getEventList(wi);
el.clear(false);
// How many events will it have?
size_t numEvents = 0;
for (size_t i=0; i<numThreads; i++)
numEvents += partWorkspaces[i]->getEventList(wi).getNumberEvents();
// This will avoid too much copying.
el.reserve(numEvents);
// Now merge the event lists
for (size_t i=0; i<numThreads; i++)
{
EventList & partEl = partWorkspaces[i]->getEventList(wi);
el += partEl.getEvents();
// Free up memory as you go along.
partEl.clear(false);
}
// With TCMalloc, release memory when you accumulate enough to make sense
PARALLEL_CRITICAL( LoadEventPreNexus2_trackMemory )
{
memoryCleared += numEvents;
if (memoryCleared > 10000000) // ten million events = about 160 MB
{
MemoryManager::Instance().releaseFreeMemory();
memoryCleared = 0;
}
}
prog->report("Merging Workspaces");
}
// Final memory release
MemoryManager::Instance().releaseFreeMemory();
g_log.debug() << tim << " to merge workspaces together." << std::endl;
PARALLEL_END_INTERUPT_REGION
}
PARALLEL_CHECK_INTERUPT_REGION
// Delete the buffers for each thread.
for (size_t i=0; i<numThreads; i++)
{
delete [] buffers[i];
delete [] eventVectors[i];
}
delete [] eventVectors;
//delete [] pulsetimes;
prog->resetNumSteps( 3, 0.94, 1.00);
//finalize loading
prog->report("Deleting Empty Lists");
if(loadOnlySomeSpectra)
workspace->deleteEmptyLists();
prog->report("Setting proton charge");
this->setProtonCharge(workspace);
g_log.debug() << tim << " to set the proton charge log." << std::endl;
//Make sure the MRU is cleared
workspace->clearMRU();
//Now, create a default X-vector for histogramming, with just 2 bins.
Kernel::cow_ptr<MantidVec> axis;
MantidVec& xRef = axis.access();
xRef.resize(2);
xRef[0] = shortest_tof - 1; //Just to make sure the bins hold it all
xRef[1] = longest_tof + 1;
workspace->setAllX(axis);
this->pixel_to_wkspindex.clear();
/* Disabled! Final process on wrong detector id events
for (size_t vi = 0; vi < this->wrongdetid_abstimes.size(); vi ++){
std::sort(this->wrongdetid_abstimes[vi].begin(), this->wrongdetid_abstimes[vi].end());
}
// Final message output
g_log.notice() << "Read " << this->num_good_events << " events + "
<< this->num_error_events << " errors"
<< ". Shortest TOF: " << shortest_tof << " microsec; longest TOF: "
<< longest_tof << " microsec." << std::endl;
g_log.notice() << "Bad Events = " << this->num_bad_events << " Events of Wrong Detector = " << this->num_wrongdetid_events << std::endl;
g_log.notice() << "Number of Wrong Detector IDs = " << this->wrongdetids.size() << std::endl;
std::set<PixelType>::iterator wit;
for (wit=this->wrongdetids.begin(); wit!=this->wrongdetids.end(); ++wit){
g_log.notice() << "Wrong Detector ID : " << *wit << std::endl;
}
std::map<PixelType, size_t>::iterator git;
for (git = this->wrongdetidmap.begin(); git != this->wrongdetidmap.end(); ++git){
PixelType tmpid = git->first;
size_t vindex = git->second;
g_log.notice() << "Pixel " << tmpid << ": Total number of events = " << this->wrongdetid_pulsetimes[vindex].size() << std::endl;
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
}
return;
} // End of procEvents
//-----------------------------------------------------------------------------
/** Linear-version of the procedure to process the event file properly.
* @param workspace :: EventWorkspace to write to.
* @param arrayOfVectors :: For speed up: this is an array, of size detid_max+1, where the
* index is a pixel ID, and the value is a pointer to the vector<tofEvent> in the given EventList.
* @param event_buffer :: The buffer containing the DAS events
* @param current_event_buffer_size :: The length of the given DAS buffer
* @param fileOffset :: Value for an offset into the binary file
*/
void LoadEventPreNexus2::procEventsLinear(DataObjects::EventWorkspace_sptr & /*workspace*/,
std::vector<TofEvent> ** arrayOfVectors, DasEvent * event_buffer,
size_t current_event_buffer_size, size_t fileOffset)
{
//Starting pulse time
DateAndTime pulsetime;
int64_t pulse_i = 0;
int64_t numPulses = static_cast<int64_t>(num_pulses);
if (event_indices.size() < num_pulses)
{