Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*WIKI*
The LoadEventPreNeXus algorithm stores data from the pre-nexus neutron event data file in an [[EventWorkspace]]. The default histogram bin boundaries consist of a single bin able to hold all events (in all pixels), and will have their [[units]] set to time-of-flight. Since it is an [[EventWorkspace]], it can be rebinned to finer bins with no loss of data.
=== Optional properties ===
Specific pulse ID and mapping files can be specified if needed; these are guessed at automatically from the neutron filename, if not specified.
*WIKI*/
#include "MantidDataHandling/LoadEventPreNexus2.h"
#include <algorithm>
#include <sstream>
#include <stdexcept>
#include <functional>
#include <iostream>
#include <set>
#include <vector>
#include <Poco/File.h>
#include <Poco/Path.h>
#include <boost/timer.hpp>
#include "MantidAPI/FileFinder.h"
#include "MantidAPI/LoadAlgorithmFactory.h"
#include "MantidAPI/WorkspaceFactory.h"
#include "MantidDataObjects/EventWorkspace.h"
#include "MantidDataObjects/EventList.h"
#include "MantidKernel/ArrayProperty.h"
#include "MantidKernel/FileValidator.h"
#include "MantidKernel/DateAndTime.h"
#include "MantidKernel/Glob.h"
#include "MantidAPI/FileProperty.h"
#include "MantidKernel/ConfigService.h"
#include "MantidKernel/BinaryFile.h"
#include "MantidKernel/System.h"
#include "MantidKernel/TimeSeriesProperty.h"
#include "MantidKernel/UnitFactory.h"
#include "MantidKernel/DateAndTime.h"
#include "MantidGeometry/IDetector.h"
#include "MantidKernel/CPUTimer.h"
#include "MantidKernel/VisibleWhenProperty.h"
#include "MantidDataObjects/Workspace2D.h"
#include <algorithm>
#include <sstream>
namespace Mantid
{
namespace DataHandling
{
// Register the algorithm into the AlgorithmFactory
DECLARE_ALGORITHM(LoadEventPreNexus2)
DECLARE_LOADALGORITHM(LoadEventPreNexus2)
using namespace Kernel;
using namespace API;
using namespace Geometry;
using boost::posix_time::ptime;
using boost::posix_time::time_duration;
using DataObjects::EventList;
using DataObjects::EventWorkspace;
using DataObjects::EventWorkspace_sptr;
using DataObjects::TofEvent;
using std::cout;
using std::endl;
using std::ifstream;
using std::runtime_error;
using std::stringstream;
using std::string;
using std::vector;
/*
* constants for locating the parameters to use in execution
*/
static const string EVENT_PARAM("EventFilename");
static const string PULSEID_PARAM("PulseidFilename");
static const string MAP_PARAM("MappingFilename");
static const string PID_PARAM("SpectrumList");
static const string PARALLEL_PARAM("UseParallelProcessing");
static const string BLOCK_SIZE_PARAM("LoadingBlockSize");
static const string OUT_PARAM("OutputWorkspace");
static const string PULSE_EXT("pulseid.dat");
static const string EVENT_EXT("event.dat");
/// Default number of items to read in from any of the files.
static const size_t DEFAULT_BLOCK_SIZE = 1000000; // 100,000
/// All pixel ids with matching this mask are errors.
static const PixelType ERROR_PID = 0x80000000;
/// The maximum possible tof as native type
static const uint32_t MAX_TOF_UINT32 = std::numeric_limits<uint32_t>::max();
/// Conversion factor between 100 nanoseconds and 1 microsecond.
static const double TOF_CONVERSION = .1;
/// Conversion factor between picoColumbs and microAmp*hours
static const double CURRENT_CONVERSION = 1.e-6 / 3600.;
//-----------------------------------------------------------------------------
//Statistic Functions
static string getRunnumber(const string &filename) {
// start by trimming the filename
string runnumber(Poco::Path(filename).getBaseName());
if (runnumber.find("neutron") >= string::npos)
return "0";
std::size_t left = runnumber.find("_");
std::size_t right = runnumber.find("_", left+1);
return runnumber.substr(left+1, right-left-1);
}
static string generatePulseidName(string eventfile)
{
size_t start;
string ending;
// normal ending
ending = "neutron_event.dat";
start = eventfile.find(ending);
if (start != string::npos)
return eventfile.replace(start, ending.size(), "pulseid.dat");
// split up event files - yes this is copy and pasted code
ending = "neutron0_event.dat";
start = eventfile.find(ending);
if (start != string::npos)
return eventfile.replace(start, ending.size(), "pulseid0.dat");
ending = "neutron1_event.dat";
start = eventfile.find(ending);
if (start != string::npos)
return eventfile.replace(start, ending.size(), "pulseid1.dat");
return "";
}
static string generateMappingfileName(EventWorkspace_sptr &wksp)
{//
// get the name of the mapping file as set in the parameter files
std::vector<string> temp = wksp->getInstrument()->getStringParameter("TS_mapping_file");
if (temp.empty())
return "";
string mapping = temp[0];
// Try to get it from the working directory
Poco::File localmap(mapping);
if (localmap.exists())
return mapping;
// Try to get it from the data directories
string dataversion = Mantid::API::FileFinder::Instance().getFullPath(mapping);
if (!dataversion.empty())
return dataversion;
// get a list of all proposal directories
string instrument = wksp->getInstrument()->getName();
Poco::File base("/SNS/" + instrument + "/");
// try short instrument name
if (!base.exists())
{
instrument = Kernel::ConfigService::Instance().getInstrument(instrument).shortName();
base = Poco::File("/SNS/" + instrument + "/");
if (!base.exists())
return "";
}
vector<string> dirs; // poco won't let me reuse temp
base.list(dirs);
// check all of the proposals for the mapping file in the canonical place
const string CAL("_CAL");
const size_t CAL_LEN = CAL.length(); // cache to make life easier
vector<string> files;
for (size_t i = 0; i < dirs.size(); ++i) {
if ( (dirs[i].length() > CAL_LEN)
&& (dirs[i].compare(dirs[i].length() - CAL.length(), CAL.length(), CAL) == 0) ) {
if (Poco::File(base.path() + "/" + dirs[i] + "/calibrations/" + mapping).exists())
files.push_back(base.path() + "/" + dirs[i] + "/calibrations/" + mapping);
}
}
if (files.empty())
return "";
else if (files.size() == 1)
return files[0];
else // just assume that the last one is the right one, this should never be fired
return *(files.rbegin());
}
//-----------------------------------------------------------------------------
/*
* Constructor
*/
LoadEventPreNexus2::LoadEventPreNexus2() : Mantid::API::IDataFileChecker(), eventfile(NULL), max_events(0)
{
}
/*
* Desctructor
*/
LoadEventPreNexus2::~LoadEventPreNexus2()
{
delete this->eventfile;
}
/*
* Sets documentation strings for this algorithm
*/
void LoadEventPreNexus2::initDocs()
{
this->setWikiSummary("Loads SNS raw neutron event data format and stores it in a [[workspace]] ([[EventWorkspace]] class). ");
this->setOptionalMessage("Loads SNS raw neutron event data format and stores it in a workspace (EventWorkspace class).");
}
//-----------------------------------------------------------------------------
/*
* Initialize the algorithm
*/
void LoadEventPreNexus2::init()
{
// which files to use
declareProperty(new FileProperty(EVENT_PARAM, "", FileProperty::Load, EVENT_EXT),
"The name of the neutron event file to read, including its full or relative path. The file typically ends in neutron_event.dat (N.B. case sensitive if running on Linux).");
declareProperty(new FileProperty(PULSEID_PARAM, "", FileProperty::OptionalLoad, PULSE_EXT),
"File containing the accelerator pulse information; the filename will be found automatically if not specified.");
declareProperty(new FileProperty(MAP_PARAM, "", FileProperty::OptionalLoad, ".dat"),
"File containing the pixel mapping (DAS pixels to pixel IDs) file (typically INSTRUMENT_TS_YYYY_MM_DD.dat). The filename will be found automatically if not specified.");
// which pixels to load
declareProperty(new ArrayProperty<int64_t>(PID_PARAM),
"A list of individual spectra (pixel IDs) to read, specified as e.g. 10:20. Only used if set.");
BoundedValidator<int> *mustBePositive = new BoundedValidator<int>();
mustBePositive->setLower(1);
declareProperty("ChunkNumber", EMPTY_INT(), mustBePositive,
"If loading the file by sections ('chunks'), this is the section number of this execution of the algorithm.");
declareProperty("TotalChunks", EMPTY_INT(), mustBePositive->clone(),
"If loading the file by sections ('chunks'), this is the total number of sections.");
// TotalChunks is only meaningful if ChunkNumber is set
// Would be nice to be able to restrict ChunkNumber to be <= TotalChunks at validation
setPropertySettings("TotalChunks", new VisibleWhenProperty(this, "ChunkNumber", IS_NOT_DEFAULT));
std::vector<std::string> propOptions;
propOptions.push_back("Auto");
propOptions.push_back("Serial");
propOptions.push_back("Parallel");
declareProperty("UseParallelProcessing", "Auto",new ListValidator(propOptions),
"Use multiple cores for loading the data?\n"
" Auto: Use serial loading for small data sets, parallel for large data sets.\n"
" Serial: Use a single core.\n"
" Parallel: Use all available cores.");
// the output workspace name
declareProperty(new WorkspaceProperty<IEventWorkspace>(OUT_PARAM,"",Direction::Output),
"The name of the workspace that will be created, filled with the read-in data and stored in the [[Analysis Data Service]].");
return;
}
/*
* Execute the algorithm
* 1. check all the inputs
* 2. create an EventWorkspace object
* 3. process events
* 4. set out output
*/
void LoadEventPreNexus2::exec()
{
// 1. Check!
// a. Check 'chunk' properties are valid, if set
const int chunks = getProperty("TotalChunks");
if ( !isEmpty(chunks) && int(getProperty("ChunkNumber")) > chunks )
{
throw std::out_of_range("ChunkNumber cannot be larger than TotalChunks");
}
prog = new Progress(this,0.0,1.0,100);
// b. what spectra (pixel ID's) to load
this->spectra_list = this->getProperty(PID_PARAM);
// c. the event file is needed in case the pulseid fileanme is empty
string event_filename = this->getPropertyValue(EVENT_PARAM);
string pulseid_filename = this->getPropertyValue(PULSEID_PARAM);
bool throwError = true;
if (pulseid_filename.empty())
{
pulseid_filename = generatePulseidName(event_filename);
if (!pulseid_filename.empty())
{
if (Poco::File(pulseid_filename).exists())
{
this->g_log.information() << "Found pulseid file " << pulseid_filename << std::endl;
throwError = false;
}
else
{
pulseid_filename = "";
}
}
}
// 2. Read input files
prog->report("Loading Pulse ID file");
this->readPulseidFile(pulseid_filename, throwError);
prog->report("Loading Event File");
this->openEventFile(event_filename);
// 3. Create otuput Workspace
prog->report("Creating output workspace");
// a. prep the output workspace
EventWorkspace_sptr localWorkspace = EventWorkspace_sptr(new EventWorkspace());
// b. Make sure to initialize. We can use dummy numbers for arguments, for event workspace it doesn't matter
localWorkspace->initialize(1,1,1);
// c. Set the units
localWorkspace->getAxis(0)->unit() = UnitFactory::Instance().create("TOF");
localWorkspace->setYUnit("Counts");
// d. Set title
localWorkspace->setTitle("Dummy Title");
// 4. Properties:
// a. Add the run_start property (Use the first pulse as the run_start time)
if (this->num_pulses > 0)
{
// add the start of the run as a ISO8601 date/time string. The start = the first pulse.
// (this is used in LoadInstrument to find the right instrument file to use).
localWorkspace->mutableRun().addProperty("run_start", pulsetimes[0].to_ISO8601_string(), true );
}
// b. determine the run number and add it to the run object
localWorkspace->mutableRun().addProperty("run_number", getRunnumber(event_filename));
// 5. Get the instrument!
prog->report("Loading Instrument");
this->runLoadInstrument(event_filename, localWorkspace);
// 6. load the mapping file
prog->report("Loading Mapping File");
string mapping_filename = this->getPropertyValue(MAP_PARAM);
if (mapping_filename.empty()) {
mapping_filename = generateMappingfileName(localWorkspace);
if (!mapping_filename.empty())
this->g_log.information() << "Found mapping file \"" << mapping_filename << "\"" << std::endl;
}
this->loadPixelMap(mapping_filename);
// 7. Process the events into pixels
this->procEvents(localWorkspace);
// 8. Save output
this->setProperty<IEventWorkspace_sptr>(OUT_PARAM, localWorkspace);
// 9. Fast frequency sample environment data
std::vector<size_t> numpixels;
std::set<PixelType>::iterator pit;
std::map<PixelType, size_t>::iterator mit;
for (pit=this->wrongdetids.begin(); pit!=this->wrongdetids.end(); ++pit){
// a. pixel ID -> index
PixelType pid = *pit;
mit = this->wrongdetidmap.find(pid);
size_t mindex = mit->second;
if (mindex > this->wrongdetid_abstimes.size()){
g_log.error() << "Wrong Index " << mindex << " for Pixel " << pid << std::endl;
throw std::invalid_argument("Wrong array index for pixel from map");
}
// b. Create output workspace2D
// i. Output information in workspaces
size_t nbins = this->wrongdetid_abstimes[mindex].size();
// ii.Create workspace
DataObjects::Workspace2D_sptr ws2d = boost::dynamic_pointer_cast<DataObjects::Workspace2D>(
API::WorkspaceFactory::Instance().create("Workspace2D", 1, nbins, nbins));
// iii. set data
double y0 = 0;
for (size_t k = 0; k < nbins; k ++){
ws2d->dataX(0)[k] = static_cast<double>(this->wrongdetid_abstimes[mindex][k]);
ws2d->dataY(0)[k] = y0;
y0 = 1.0-y0;
}
// c. Set up otuput Workspace2D
std::stringstream ssws;
ssws << "OutputPixel" << pid << "Workspace";
std::string outputtitle = ssws.str();
std::stringstream ssname;
ssname << "Pixel" << pid;
std::string wsname = ssname.str();
g_log.notice() << "Pixel " << pid << ": OutputWorkspace(" << outputtitle << ") <-- " << wsname << std::endl;
this->declareProperty(new WorkspaceProperty<DataObjects::Workspace2D>(outputtitle, wsname, Direction::Output),
"Set the output sample environment data record");
this->setProperty(outputtitle, ws2d);
// z. Check workspace
// TODO This will be removed later
g_log.error() << "Test Only! Delete this section later!" << std::endl;
size_t maxcounts = 3;
size_t counts = 0;
std::set<int64_t> deltas;
// i. do statistic
size_t numzerodeltat = 0;
size_t numfreq = 0;
int64_t sumdeltat = 0;
for (size_t k = 1; k < nbins; k ++){
int64_t deltat = this->wrongdetid_abstimes[mindex][k]-this->wrongdetid_abstimes[mindex][k-1];
deltas.insert(deltat);
if (deltat == 0){
numzerodeltat ++;
if (counts < maxcounts)
{
g_log.error() << "Delta T = 0: T = " << this->wrongdetid_abstimes[mindex][k] << std::endl;
counts ++;
}
} else {
numfreq ++;
sumdeltat += deltat;
}
}
double frequency = 1.0/(static_cast<double>(sumdeltat)/static_cast<double>(numfreq)*1.0E-9);
size_t numpt = this->wrongdetid_abstimes[mindex].size();
g_log.notice() << "Frequency = " << frequency << " Number of pixels with zero Delta T = " << numzerodeltat << std::endl;
int64_t t0 = this->wrongdetid_abstimes[mindex][0];
int64_t tf = this->wrongdetid_abstimes[mindex][numpt-1];
g_log.notice() << "T0 = " << t0 << ", Tf = " << tf << " Delta T = " << tf-t0 << " ns"<< std::endl;
g_log.notice() << "Theoretical number of events = " << static_cast<double>(tf-t0)*frequency*1.0E-9 << std::endl;
g_log.notice() << "Number of various delta T = " << deltas.size() << std::endl;
std::set<int64_t>::iterator dtit;
for (dtit=deltas.begin(); dtit!=deltas.end(); ++dtit){
g_log.notice() << *dtit <<", ";
}
g_log.notice() << std::endl;
} //ENDFOR pit
// -1. Cleanup
delete prog;
return;
} // exec()
/**
* Returns the name of the property to be considered as the Filename for Load
* @returns A character string containing the file property's name
*/
const char * LoadEventPreNexus2::filePropertyName() const
{
return EVENT_PARAM.c_str();
}
/**
* Do a quick file type check by looking at the first 100 bytes of the file
* @param filePath :: path of the file including name.
* @param nread :: no.of bytes read
* @param header :: The first 100 bytes of the file as a union
* @return true if the given file is of type which can be loaded by this algorithm
*/
bool LoadEventPreNexus2::quickFileCheck(const std::string& filePath,size_t,const file_header&)
{
std::string ext = extension(filePath);
return (ext.rfind("dat") != std::string::npos);
}
/**
* Checks the file by opening it and reading few lines
* @param filePath :: name of the file inluding its path
* @return an integer value how much this algorithm can load the file
*/
int LoadEventPreNexus2::fileCheck(const std::string& filePath)
{
int confidence(0);
try
{
// If this looks like a binary file where the exact file length is a multiple
// of the DasEvent struct then we're probably okay.
// NOTE: Putting this on the stack gives a segfault on Windows when for some reason
// the BinaryFile destructor is called twice! I'm sure there is something I don't understand there
// but heap allocation seems to work so go for that.
BinaryFile<DasEvent> *event_file = new BinaryFile<DasEvent>(filePath);
confidence = 80;
delete event_file;
}
catch(std::runtime_error &)
{
// This BinaryFile constructor throws if the file does not contain an
// exact multiple of the sizeof(DasEvent) objects.
}
return confidence;
}
//-----------------------------------------------------------------------------
/** Load the instrument geometry File
* @param eventfilename :: Used to pick the instrument.
* @param localWorkspace :: MatrixWorkspace in which to put the instrument geometry
*/
void LoadEventPreNexus2::runLoadInstrument(const std::string &eventfilename, MatrixWorkspace_sptr localWorkspace)
{
// determine the instrument parameter file
string instrument = Poco::Path(eventfilename).getFileName();
size_t pos = instrument.rfind("_"); // get rid of 'event.dat'
pos = instrument.rfind("_", pos-1); // get rid of 'neutron'
pos = instrument.rfind("_", pos-1); // get rid of the run number
instrument = instrument.substr(0, pos);
// do the actual work
IAlgorithm_sptr loadInst= createSubAlgorithm("LoadInstrument");
// Now execute the sub-algorithm. Catch and log any error, but don't stop.
loadInst->setPropertyValue("InstrumentName", instrument);
loadInst->setProperty<MatrixWorkspace_sptr> ("Workspace", localWorkspace);
loadInst->setProperty("RewriteSpectraMap", false);
loadInst->executeAsSubAlg();
// Populate the instrument parameters in this workspace - this works around a bug
localWorkspace->populateInstrumentParameters();
}
//-----------------------------------------------------------------------------
/** Turn a pixel id into a "corrected" pixelid and period.
*
*/
inline void LoadEventPreNexus2::fixPixelId(PixelType &pixel, uint32_t &period) const
{
if (!this->using_mapping_file) { // nothing to do here
period = 0;
return;
}
PixelType unmapped_pid = pixel % this->numpixel;
period = (pixel - unmapped_pid) / this->numpixel;
pixel = this->pixelmap[unmapped_pid];
}
//-----------------------------------------------------------------------------
/** Process the event file properly.
* @param workspace :: EventWorkspace to write to.
*/
void LoadEventPreNexus2::procEvents(DataObjects::EventWorkspace_sptr & workspace)
{
this->num_error_events = 0;
this->num_good_events = 0;
this->num_ignored_events = 0;
this->num_bad_events = 0;
this->num_wrongdetid_events = 0;
//Default values in the case of no parallel
size_t loadBlockSize = Mantid::Kernel::DEFAULT_BLOCK_SIZE * 2;
shortest_tof = static_cast<double>(MAX_TOF_UINT32) * TOF_CONVERSION;
longest_tof = 0.;
//Initialize progress reporting.
size_t numBlocks = (max_events + loadBlockSize - 1) / loadBlockSize;
// We want to pad out empty pixels.
detid2det_map detector_map;
workspace->getInstrument()->getDetectors(detector_map);
// -------------- Determine processing mode
std::string procMode = getProperty("UseParallelProcessing");
if (procMode == "Serial")
parallelProcessing = false;
else if (procMode == "Parallel")
parallelProcessing = true;
else
{
// Automatic determination. Loading serially (for me) is about 3 million events per second,
// (which is sped up by ~ x 3 with parallel processing, say 10 million per second, e.g. 7 million events more per seconds).
// compared to a setup time/merging time of about 10 seconds per million detectors.
double setUpTime = double(detector_map.size()) * 10e-6;
parallelProcessing = ((double(max_events) / 7e6) > setUpTime);
g_log.debug() << (parallelProcessing ? "Using" : "Not using") << " parallel processing." << std::endl;
}
// determine maximum pixel id
detid2det_map::iterator it;
detid_max = 0; // seems like a safe lower bound
for (it = detector_map.begin(); it != detector_map.end(); it++)
if (it->first > detid_max)
detid_max = it->first;
// Pad all the pixels
prog->report("Padding Pixels");
this->pixel_to_wkspindex.reserve(detid_max+1); //starting at zero up to and including detid_max
// Set to zero
this->pixel_to_wkspindex.assign(detid_max+1, 0);
size_t workspaceIndex = 0;
for (it = detector_map.begin(); it != detector_map.end(); it++)
{
if (!it->second->isMonitor())
{
this->pixel_to_wkspindex[it->first] = workspaceIndex;
EventList & spec = workspace->getOrAddEventList(workspaceIndex);
spec.addDetectorID(it->first);
// Start the spectrum number at 1
spec.setSpectrumNo(specid_t(workspaceIndex+1));
workspaceIndex += 1;
}
}
workspace->doneAddingEventLists();
//For slight speed up
loadOnlySomeSpectra = (this->spectra_list.size() > 0);
//Turn the spectra list into a map, for speed of access
for (std::vector<int64_t>::iterator it = spectra_list.begin(); it != spectra_list.end(); it++)
spectraLoadMap[*it] = true;
CPUTimer tim;
// --------------- Create the partial workspaces ------------------------------------------
// Vector of partial workspaces, for parallel processing.
std::vector<EventWorkspace_sptr> partWorkspaces;
std::vector<DasEvent *> buffers;
/// Pointer to the vector of events
typedef std::vector<TofEvent> * EventVector_pt;
/// Bare array of arrays of pointers to the EventVectors
EventVector_pt ** eventVectors;
/// How many threads will we use?
size_t numThreads = 1;
if (parallelProcessing)
numThreads = size_t(PARALLEL_GET_MAX_THREADS);
partWorkspaces.resize(numThreads);
buffers.resize(numThreads);
eventVectors = new EventVector_pt *[numThreads];
PRAGMA_OMP( parallel for if (parallelProcessing) )
for (int i=0; i < int(numThreads); i++)
{
// This is the partial workspace we are about to create (if in parallel)
EventWorkspace_sptr partWS;
if (parallelProcessing)
{
prog->report("Creating Partial Workspace");
// Create a partial workspace
partWS = EventWorkspace_sptr(new EventWorkspace());
//Make sure to initialize.
partWS->initialize(1,1,1);
// Copy all the spectra numbers and stuff (no actual events to copy though).
partWS->copyDataFrom(*workspace);
// Push it in the array
partWorkspaces[i] = partWS;
}
else
partWS = workspace;
//Allocate the buffers
buffers[i] = new DasEvent[loadBlockSize];
// For each partial workspace, make an array where index = detector ID and value = pointer to the events vector
eventVectors[i] = new EventVector_pt[detid_max+1];
EventVector_pt * theseEventVectors = eventVectors[i];
for (detid_t j=0; j<detid_max+1; j++)
{
size_t wi = pixel_to_wkspindex[j];
// Save a POINTER to the vector<tofEvent>
theseEventVectors[j] = &partWS->getEventList(wi).getEvents();
}
}
g_log.debug() << tim << " to create " << partWorkspaces.size() << " workspaces for parallel loading." << std::endl;
prog->resetNumSteps( numBlocks, 0.1, 0.8);
// ---------------------------------- LOAD THE DATA --------------------------
PRAGMA_OMP( parallel for schedule(dynamic, 1) if (parallelProcessing) )
for (int blockNum=0; blockNum<int(numBlocks); blockNum++)
{
PARALLEL_START_INTERUPT_REGION
// Find the workspace for this particular thread
EventWorkspace_sptr ws;
size_t threadNum = 0;
if (parallelProcessing)
{
threadNum = PARALLEL_THREAD_NUMBER;
ws = partWorkspaces[threadNum];
}
else
ws = workspace;
// Get the buffer (for this thread)
DasEvent * event_buffer = buffers[threadNum];
// Get the speeding-up array of vector<tofEvent> where index = detid.
EventVector_pt * theseEventVectors = eventVectors[threadNum];
// Where to start in the file?
size_t fileOffset = first_event + (loadBlockSize * blockNum);
// May need to reduce size of last (or only) block
size_t current_event_buffer_size =
( blockNum == int(numBlocks-1) ) ? ( max_events - (numBlocks-1)*loadBlockSize ) : loadBlockSize;
// Load this chunk of event data (critical block)
PARALLEL_CRITICAL( LoadEventPreNexus2_fileAccess )
{
current_event_buffer_size = eventfile->loadBlockAt(event_buffer, fileOffset, current_event_buffer_size);
}
// This processes the events. Can be done in parallel!
procEventsLinear(ws, theseEventVectors, event_buffer, current_event_buffer_size, fileOffset);
// Report progress
prog->report("Load Event PreNeXus");
PARALLEL_END_INTERUPT_REGION
}
PARALLEL_CHECK_INTERUPT_REGION
g_log.debug() << tim << " to load the data." << std::endl;
// ---------------------------------- MERGE WORKSPACES BACK TOGETHER --------------------------
if (parallelProcessing)
{
PARALLEL_START_INTERUPT_REGION
prog->resetNumSteps( workspace->getNumberHistograms(), 0.8, 0.95);
size_t memoryCleared = 0;
MemoryManager::Instance().releaseFreeMemory();
// Merge all workspaces, index by index.
PARALLEL_FOR_NO_WSP_CHECK()
for (int iwi=0; iwi<int(workspace->getNumberHistograms()); iwi++)
{
size_t wi = size_t(iwi);
// The output event list.
EventList & el = workspace->getEventList(wi);
el.clear(false);
// How many events will it have?
size_t numEvents = 0;
for (size_t i=0; i<numThreads; i++)
numEvents += partWorkspaces[i]->getEventList(wi).getNumberEvents();
// This will avoid too much copying.
el.reserve(numEvents);
// Now merge the event lists
for (size_t i=0; i<numThreads; i++)
{
EventList & partEl = partWorkspaces[i]->getEventList(wi);
el += partEl.getEvents();
// Free up memory as you go along.
partEl.clear(false);
}
// With TCMalloc, release memory when you accumulate enough to make sense
PARALLEL_CRITICAL( LoadEventPreNexus2_trackMemory )
{
memoryCleared += numEvents;
if (memoryCleared > 10000000) // ten million events = about 160 MB
{
MemoryManager::Instance().releaseFreeMemory();
memoryCleared = 0;
}
}
prog->report("Merging Workspaces");
}
// Final memory release
MemoryManager::Instance().releaseFreeMemory();
g_log.debug() << tim << " to merge workspaces together." << std::endl;
PARALLEL_END_INTERUPT_REGION
}
PARALLEL_CHECK_INTERUPT_REGION
// Delete the buffers for each thread.
for (size_t i=0; i<numThreads; i++)
{
delete [] buffers[i];
delete [] eventVectors[i];
}
delete [] eventVectors;
//delete [] pulsetimes;
prog->resetNumSteps( 3, 0.94, 1.00);
//finalize loading
prog->report("Deleting Empty Lists");
if(loadOnlySomeSpectra)
workspace->deleteEmptyLists();
prog->report("Setting proton charge");
this->setProtonCharge(workspace);
g_log.debug() << tim << " to set the proton charge log." << std::endl;
//Make sure the MRU is cleared
workspace->clearMRU();
//Now, create a default X-vector for histogramming, with just 2 bins.
Kernel::cow_ptr<MantidVec> axis;
MantidVec& xRef = axis.access();
xRef.resize(2);
xRef[0] = shortest_tof - 1; //Just to make sure the bins hold it all
xRef[1] = longest_tof + 1;
workspace->setAllX(axis);
this->pixel_to_wkspindex.clear();
// Final process on wrong detector id events
for (size_t vi = 0; vi < this->wrongdetid_abstimes.size(); vi ++){
std::sort(this->wrongdetid_abstimes[vi].begin(), this->wrongdetid_abstimes[vi].end());
}
// Final message output
g_log.notice() << "Read " << this->num_good_events << " events + "
<< this->num_error_events << " errors"
<< ". Shortest TOF: " << shortest_tof << " microsec; longest TOF: "
<< longest_tof << " microsec." << std::endl;
g_log.notice() << "Bad Events = " << this->num_bad_events << " Events of Wrong Detector = " << this->num_wrongdetid_events << std::endl;
g_log.notice() << "Number of Wrong Detector IDs = " << this->wrongdetids.size() << std::endl;
std::set<PixelType>::iterator wit;
for (wit=this->wrongdetids.begin(); wit!=this->wrongdetids.end(); ++wit){
g_log.notice() << "Wrong Detector ID : " << *wit << std::endl;
}
std::map<PixelType, size_t>::iterator git;
for (git = this->wrongdetidmap.begin(); git != this->wrongdetidmap.end(); ++git){
PixelType tmpid = git->first;
size_t vindex = git->second;
g_log.notice() << "Pixel " << tmpid << ": Total number of events = " << this->wrongdetid_abstimes[vindex].size() << std::endl;
}
return;
} // End of procEvents
//-----------------------------------------------------------------------------
/** Linear-version of the procedure to process the event file properly.
* @param workspace :: EventWorkspace to write to.
* @param arrayOfVectors :: For speed up: this is an array, of size detid_max+1, where the
* index is a pixel ID, and the value is a pointer to the vector<tofEvent> in the given EventList.
* @param event_buffer :: The buffer containing the DAS events
* @param current_event_buffer_size :: The length of the given DAS buffer
* @param fileOffset :: Value for an offset into the binary file
*/
void LoadEventPreNexus2::procEventsLinear(DataObjects::EventWorkspace_sptr & /*workspace*/,
std::vector<TofEvent> ** arrayOfVectors, DasEvent * event_buffer,
size_t current_event_buffer_size, size_t fileOffset)
{
//Starting pulse time
DateAndTime pulsetime;
int64_t pulse_i = 0;
int64_t numPulses = static_cast<int64_t>(num_pulses);
if (event_indices.size() < num_pulses)
{
g_log.warning() << "Event_indices vector is smaller than the pulsetimes array.\n";
numPulses = static_cast<int64_t>(event_indices.size());
}
size_t local_num_error_events = 0;
size_t local_num_bad_events = 0;
size_t local_num_wrongdetid_events = 0;
size_t local_num_ignored_events = 0;
size_t local_num_good_events = 0;
double local_shortest_tof = static_cast<double>(MAX_TOF_UINT32) * TOF_CONVERSION;
double local_longest_tof = 0.;
std::map<PixelType, size_t> local_pidindexmap;
std::vector<std::vector<int64_t> > local_abstimes;
std::set<PixelType> local_wrongdetids;
// process the individual events
for (size_t i = 0; i < current_event_buffer_size; i++)
{
DasEvent & temp = *(event_buffer + i);
PixelType pid = temp.pid;
bool iswrongdetid = false;
if ((pid & ERROR_PID) == ERROR_PID) // marked as bad
{
local_num_error_events++;
local_num_bad_events ++;
continue;
}
//Covert the pixel ID from DAS pixel to our pixel ID
if (this->using_mapping_file)
{
PixelType unmapped_pid = pid % this->numpixel;
pid = this->pixelmap[unmapped_pid];
}
// Wrong pixel IDs
if (pid > static_cast<PixelType>(detid_max))
{
iswrongdetid = true;
local_num_error_events++;
local_num_wrongdetid_events++;
local_wrongdetids.insert(pid);
}
//Now check if this pid we want to load.
if (loadOnlySomeSpectra && !iswrongdetid)
{
std::map<int64_t, bool>::iterator it;
it = spectraLoadMap.find(pid);
if (it == spectraLoadMap.end())
{
//Pixel ID was not found, so the event is being ignored.
local_num_ignored_events++;
continue;
}
}
// work with the good guys
//Find the pulse time for this event index
if (pulse_i < numPulses-1)
{
//This is the total offset into the file
size_t total_i = i + fileOffset;
//Go through event_index until you find where the index increases to encompass the current index. Your pulse = the one before.
while (!((total_i >= event_indices[pulse_i]) && (total_i < event_indices[pulse_i+1])) )
{
pulse_i++;
if (pulse_i >= (numPulses-1))
break;
}
//if (pulsetimes[pulse_i] != pulsetime) std::cout << pulse_i << " at " << pulsetimes[pulse_i] << "\n";
//Save the pulse time at this index for creating those events
pulsetime = pulsetimes[pulse_i];
} // Find pulse time
double tof = static_cast<double>(temp.tof) * TOF_CONVERSION;
if (!iswrongdetid){
// a) Regular events
TofEvent event(tof, pulsetime);
//Find the overall max/min tof
if (tof < local_shortest_tof)
local_shortest_tof = tof;
if (tof > local_longest_tof)
local_longest_tof = tof;
//The addEventQuickly method does not clear the cache, making things slightly faster.
//workspace->getEventList(this->pixel_to_wkspindex[pid]).addEventQuickly(event);
// This is equivalent to workspace->getEventList(this->pixel_to_wkspindex[pid]).addEventQuickly(event);
// But should be faster as a bunch of these calls were cached.
arrayOfVectors[pid]->push_back(event);
// TODO work with period
local_num_good_events++;
} else {
// b) Special events/Wrong detector id
// i. get/add index of the entry in map
std::map<PixelType, size_t>::iterator it;
it = local_pidindexmap.find(pid);
size_t theindex = 0;
if (it == local_pidindexmap.end()){
// Initialize it!
size_t newindex = local_abstimes.size();
local_pidindexmap[pid] = newindex;
std::vector<int64_t> tempvectime;
local_abstimes.push_back(tempvectime);
theindex = newindex;
} else {
// existing
theindex = it->second;
}
// ii. calculate and add absolute time
int64_t abstime = (pulsetime.total_nanoseconds()+int64_t(tof*1000));
local_abstimes[theindex].push_back(abstime);
if (abstime == 666977093983629966)
g_log.error() << "Check Special Event: Pulse ID = " << pulse_i << " Tof = " << temp.tof << std::endl;
}
} // ENDFOR each event
PARALLEL_CRITICAL( LoadEventPreNexus2_global_statistics )
{
this->num_good_events += local_num_good_events;
this->num_ignored_events += local_num_ignored_events;
this->num_error_events += local_num_error_events;
this->num_bad_events += local_num_bad_events;
this->num_wrongdetid_events += local_num_wrongdetid_events;