Newer
Older
Campbell, Stuart
committed
//----------------------------------------------------------------------
// Includes
//----------------------------------------------------------------------
Campbell, Stuart
committed
#include "MantidDataHandling/LoadEventNexus.h"
Campbell, Stuart
committed
#include "MantidGeometry/IInstrument.h"
#include "MantidGeometry/Instrument/CompAssembly.h"
#include "MantidKernel/ConfigService.h"
#include "MantidKernel/DateAndTime.h"
Janik Zikovsky
committed
#include "MantidKernel/ThreadPool.h"
#include "MantidKernel/FunctionTask.h"
Campbell, Stuart
committed
#include "MantidAPI/FileProperty.h"
#include "MantidKernel/UnitFactory.h"
#include "MantidKernel/Timer.h"
#include "MantidAPI/MemoryManager.h"
#include "MantidAPI/LoadAlgorithmFactory.h" // For the DECLARE_LOADALGORITHM macro
Gigg, Martyn Anthony
committed
#include "MantidAPI/SpectraAxis.h"
Campbell, Stuart
committed
#include <fstream>
#include <sstream>
#include <boost/algorithm/string/replace.hpp>
#include <Poco/File.h>
#include <Poco/Path.h>
using std::endl;
using std::map;
using std::string;
using std::vector;
using namespace ::NeXus;
Campbell, Stuart
committed
using namespace Mantid::Geometry;
using namespace Mantid::DataObjects;
Janik Zikovsky
committed
using namespace Mantid::Kernel;
Campbell, Stuart
committed
namespace Mantid
{
Campbell, Stuart
committed
namespace DataHandling
Campbell, Stuart
committed
{
DECLARE_ALGORITHM(LoadEventNexus)
DECLARE_LOADALGORITHM(LoadEventNexus)
Janik Zikovsky
committed
/// Sets documentation strings for this algorithm
void LoadEventNexus::initDocs()
{
this->setWikiSummary("Loads Event NeXus (produced by the SNS) files and stores it in an [[EventWorkspace]]. Optionally, you can filter out events falling outside a range of times-of-flight and/or a time interval. ");
this->setOptionalMessage("Loads Event NeXus (produced by the SNS) files and stores it in an EventWorkspace. Optionally, you can filter out events falling outside a range of times-of-flight and/or a time interval.");
}
Campbell, Stuart
committed
using namespace Kernel;
using namespace API;
using Geometry::Instrument;
Janik Zikovsky
committed
//===============================================================================================
//===============================================================================================
/** This task does the disk IO from loading the NXS file,
* and so will be on a disk IO mutex */
class ProcessBankData : public Task
{
public:
/**
*
* @param alg :: LoadEventNexus
* @param entry_name :: name of the bank
* @param pixelID_to_wi_map :: map pixel ID to Workspace Index
Janik Zikovsky
committed
* @param prog :: Progress reporter
* @param scheduler :: ThreadScheduler running this task
* @param event_id :: array with event IDs
* @param event_time_of_flight :: array with event TOFS
* @param numEvents :: how many events in the arrays
* @param startAt :: index of the first event from event_index
* @param event_index_ptr :: ptr to a vector of event index (length of # of pulses)
* @return
*/
ProcessBankData(LoadEventNexus * alg, std::string entry_name, detid2index_map * pixelID_to_wi_map,
Janik Zikovsky
committed
Progress * prog, ThreadScheduler * scheduler,
uint32_t * event_id, float * event_time_of_flight,
size_t numEvents, size_t startAt, std::vector<uint64_t> * event_index_ptr)
: Task(),
alg(alg), entry_name(entry_name), pixelID_to_wi_map(pixelID_to_wi_map), prog(prog), scheduler(scheduler),
Janik Zikovsky
committed
event_id(event_id), event_time_of_flight(event_time_of_flight), numEvents(numEvents), startAt(startAt),
event_index_ptr(event_index_ptr), event_index(*event_index_ptr)
{
// Cost is approximately proportional to the number of events to process.
Janik Zikovsky
committed
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
}
//----------------------------------------------------
// Run the data processing
void run()
{
//Local tof limits
double my_shortest_tof, my_longest_tof;
my_shortest_tof = static_cast<double>(std::numeric_limits<uint32_t>::max()) * 0.1;
my_longest_tof = 0.;
prog->report(entry_name + ": precount");
// ---- Pre-counting events per pixel ID ----
if (alg->precount)
{
std::map<uint32_t, size_t> counts; // key = pixel ID, value = count
for (size_t i=0; i < numEvents; i++)
{
uint32_t thisId = event_id[i];
std::map<uint32_t, size_t>::iterator map_found = counts.find(thisId);
if (map_found != counts.end())
{
map_found->second++;
}
else
{
counts[thisId] = 1; // First entry
}
if (alg->getCancel()) break; // User cancellation
}
// Now we pre-allocate (reserve) the vectors of events in each pixel counted
std::map<uint32_t, size_t>::iterator pixID;
for (pixID = counts.begin(); pixID != counts.end(); pixID++)
{
//Find the the workspace index corresponding to that pixel ID
size_t wi = static_cast<size_t>((*pixelID_to_wi_map)[ pixID->first ]);
Janik Zikovsky
committed
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
// Allocate it
alg->WS->getEventList(wi).reserve( pixID->second );
if (alg->getCancel()) break; // User cancellation
}
}
// Check for cancelled algorithm
if (alg->getCancel())
{ delete [] event_id; delete [] event_time_of_flight; return; }
//Default pulse time (if none are found)
Mantid::Kernel::DateAndTime pulsetime;
// Index into the pulse array
int pulse_i = 0;
// And there are this many pulses
int numPulses = static_cast<int>(alg->pulseTimes.size());
if (numPulses > static_cast<int>(event_index.size()))
{
alg->getLogger().warning() << "Entry " << entry_name << "'s event_index vector is smaller than the proton_charge DAS log. This is inconsistent, so we cannot find pulse times for this entry.\n";
//This'll make the code skip looking for any pulse times.
pulse_i = numPulses + 1;
}
prog->report(entry_name + ": filling events");
Janik Zikovsky
committed
// The workspace
EventWorkspace_sptr WS = alg->WS;
// Will we need to compress?
bool compress = (alg->compressTolerance >= 0);
// Which workspace indices were touched?
Janik Zikovsky
committed
Janik Zikovsky
committed
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
//Go through all events in the list
for (std::size_t i = 0; i < numEvents; i++)
{
//------ Find the pulse time for this event index ---------
if (pulse_i < numPulses-1)
{
bool breakOut = false;
//Go through event_index until you find where the index increases to encompass the current index. Your pulse = the one before.
while ( !((i+startAt >= event_index[pulse_i]) && (i+startAt < event_index[pulse_i+1])))
{
pulse_i++;
// Check once every new pulse if you need to cancel (checking on every event might slow things down more)
if (alg->getCancel()) breakOut = true;
if (pulse_i >= (numPulses-1))
break;
}
//Save the pulse time at this index for creating those events
pulsetime = alg->pulseTimes[pulse_i];
// Flag to break out of the event loop with using goto ;)
if (breakOut)
break;
}
//Create the tofevent
double tof = static_cast<double>( event_time_of_flight[i] );
if ((tof >= alg->filter_tof_min) && (tof <= alg->filter_tof_max))
{
//The event TOF passes the filter.
TofEvent event(tof, pulsetime);
//Find the the workspace index corresponding to that pixel ID
size_t wi = static_cast<size_t>((*pixelID_to_wi_map)[event_id[i]]);
Janik Zikovsky
committed
// Add it to the list at that workspace index
Janik Zikovsky
committed
WS->getEventList(wi).addEventQuickly( event );
Janik Zikovsky
committed
//Local tof limits
if (tof < my_shortest_tof) { my_shortest_tof = tof;}
if (tof > my_longest_tof) { my_longest_tof = tof;}
Janik Zikovsky
committed
// Track all the touched wi
if (compress)
{
if (usedWI.find(wi) == usedWI.end())
usedWI.insert(wi);
}
Janik Zikovsky
committed
}
} //(for each event)
Janik Zikovsky
committed
//------------ Compress Events ------------------
if (compress)
{
// Do it on all the workspace indices we touched
Janik Zikovsky
committed
for (it=usedWI.begin(); it!=usedWI.end(); it++)
{
EventList * el = WS->getEventListPtr(*it);
el->compressEvents(alg->compressTolerance, el);
}
}
Janik Zikovsky
committed
//Join back up the tof limits to the global ones
PARALLEL_CRITICAL(tof_limits)
{
//This is not thread safe, so only one thread at a time runs this.
if (my_shortest_tof < alg->shortest_tof) { alg->shortest_tof = my_shortest_tof;}
if (my_longest_tof > alg->longest_tof ) { alg->longest_tof = my_longest_tof;}
}
// Free Memory
delete [] event_id;
delete [] event_time_of_flight;
delete event_index_ptr;
Janik Zikovsky
committed
// For Linux with tcmalloc, make sure memory goes back;
// but don't call if more than 15% of memory is still available, since that slows down the loading.
MemoryManager::Instance().releaseFreeMemoryIfAbove(0.85);
Janik Zikovsky
committed
}
private:
Janik Zikovsky
committed
LoadEventNexus * alg;
Janik Zikovsky
committed
std::string entry_name;
detid2index_map * pixelID_to_wi_map;
Janik Zikovsky
committed
Progress * prog;
Janik Zikovsky
committed
ThreadScheduler * scheduler;
Janik Zikovsky
committed
uint32_t * event_id;
Janik Zikovsky
committed
float * event_time_of_flight;
Janik Zikovsky
committed
size_t numEvents;
Janik Zikovsky
committed
size_t startAt;
/// ptr to a vector of event index vs time (length of # of pulses)
Janik Zikovsky
committed
std::vector<uint64_t> * event_index_ptr;
Janik Zikovsky
committed
std::vector<uint64_t> & event_index;
};
//===============================================================================================
//===============================================================================================
/** This task does the disk IO from loading the NXS file,
* and so will be on a disk IO mutex */
class LoadBankFromDiskTask : public Task
{
public:
//---------------------------------------------------------------------------------------------------
/** Constructor
*
* @param entry_name :: The pathname of the bank to load
* @param pixelID_to_wi_map :: a map where key = pixelID and value = the workpsace index to use.
Janik Zikovsky
committed
* @param prog :: an optional Progress object
* @param ioMutex :: a mutex shared for all Disk I-O tasks
* @param scheduler :: the ThreadScheduler that runs this task.
*/
LoadBankFromDiskTask(LoadEventNexus * alg, std::string entry_name, detid2index_map * pixelID_to_wi_map,
Janik Zikovsky
committed
Progress * prog, Mutex * ioMutex, ThreadScheduler * scheduler)
: Task(),
alg(alg), entry_name(entry_name), pixelID_to_wi_map(pixelID_to_wi_map), prog(prog), scheduler(scheduler)
Janik Zikovsky
committed
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
{
setMutex(ioMutex);
}
//---------------------------------------------------------------------------------------------------
void run()
{
//The vectors we will be filling
std::vector<uint64_t> * event_index_ptr = new std::vector<uint64_t>();
std::vector<uint64_t> & event_index = *event_index_ptr;
// These give the limits in each file as to which events we actually load (when filtering by time).
std::vector<int> load_start(1); //TODO: Should this be size_t?
std::vector<int> load_size(1);
// Data arrays
uint32_t * event_id = NULL;
float * event_time_of_flight = NULL;
bool loadError = false ;
prog->report(entry_name + ": load from disk");
// Open the file
::NeXus::File file(alg->m_filename);
try
{
file.openGroup("entry", "NXentry");
//Open the bankN_event group
file.openGroup(entry_name, "NXevent_data");
// Get the event_index (a list of size of # of pulses giving the index in the event list for that pulse)
file.openData("event_index");
//Must be uint64
if (file.getInfo().type == ::NeXus::UINT64)
file.getData(event_index);
else
{
alg->getLogger().warning() << "Entry " << entry_name << "'s event_index field is not UINT64! It will be skipped.\n";
loadError = true;
}
file.closeData();
// Look for the sign that the bank is empty
if (event_index.size()==1)
{
if (event_index[0] == 0)
{
//One entry, only zero. This means NO events in this bank.
loadError = true;
alg->getLogger().debug() << "Bank " << entry_name << " is empty.\n";
}
}
if (event_index.size() != alg->pulseTimes.size())
{
alg->getLogger().debug() << "Bank " << entry_name << " has a mismatch between the number of event_index entries and the number of pulse times.\n";
Janik Zikovsky
committed
}
if (!loadError)
{
bool old_nexus_file_names = false;
// Get the list of pixel ID's
try
{
file.openData("event_id");
}
catch (::NeXus::Exception& )
{
//Older files (before Nov 5, 2010) used this field.
file.openData("event_pixel_id");
old_nexus_file_names = true;
}
// By default, use all available indices
Janik Zikovsky
committed
::NeXus::Info id_info = file.getInfo();
Janik Zikovsky
committed
//TODO: Handle the time filtering by changing the start/end offsets.
for (size_t i=0; i < alg->pulseTimes.size(); i++)
{
if (alg->pulseTimes[i] >= alg->filter_time_start)
{
start_event = event_index[i];
break; // stop looking
}
}
for (size_t i=0; i < alg->pulseTimes.size(); i++)
{
if (alg->pulseTimes[i] > alg->filter_time_stop)
{
stop_event = event_index[i];
break;
}
}
// Make sure it is within range
Janik Zikovsky
committed
stop_event = id_info.dims[0];
alg->getLogger().debug() << entry_name << ": start_event " << start_event << " stop_event "<< stop_event << std::endl;
// These are the arguments to getSlab()
load_start[0] = static_cast<int>(start_event);
load_size[0] = static_cast<int>(stop_event - start_event);
Janik Zikovsky
committed
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
if ((load_size[0] > 0) && (load_start[0]>=0) )
{
// Now we allocate the required arrays
event_id = new uint32_t[load_size[0]];
event_time_of_flight = new float[load_size[0]];
// Check that the required space is there in the file.
if (id_info.dims[0] < load_size[0]+load_start[0])
{
alg->getLogger().warning() << "Entry " << entry_name << "'s event_id field is too small (" << id_info.dims[0]
<< ") to load the desired data size (" << load_size[0]+load_start[0] << ").\n";
loadError = true;
}
if (alg->getCancel()) loadError = true; //To allow cancelling the algorithm
if (!loadError)
{
//Must be uint32
if (id_info.type == ::NeXus::UINT32)
file.getSlab(event_id, load_start, load_size);
else
{
alg->getLogger().warning() << "Entry " << entry_name << "'s event_id field is not UINT32! It will be skipped.\n";
loadError = true;
}
file.closeData();
}
if (alg->getCancel()) loadError = true; //To allow cancelling the algorithm
if (!loadError)
{
// Get the list of event_time_of_flight's
if (!old_nexus_file_names)
file.openData("event_time_offset");
else
file.openData("event_time_of_flight");
// Check that the required space is there in the file.
::NeXus::Info tof_info = file.getInfo();
if (tof_info.dims[0] < load_size[0]+load_start[0])
{
alg->getLogger().warning() << "Entry " << entry_name << "'s event_time_offset field is too small to load the desired data.\n";
loadError = true;
}
//Check that the type is what it is supposed to be
if (tof_info.type == ::NeXus::FLOAT32)
file.getSlab(event_time_of_flight, load_start, load_size);
else
{
alg->getLogger().warning() << "Entry " << entry_name << "'s event_time_offset field is not FLOAT32! It will be skipped.\n";
loadError = true;
}
if (!loadError)
{
std::string units;
file.getAttr("units", units);
if (units != "microsecond")
{
alg->getLogger().warning() << "Entry " << entry_name << "'s event_time_offset field's units are not microsecond. It will be skipped.\n";
loadError = true;
}
file.closeData();
} //no error
} //no error
} // Size is at least 1
else
{
// Found a size that was 0 or less; stop processign
loadError=true;
}
} //no error
} // try block
Janik Zikovsky
committed
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
{
alg->getLogger().error() << "Error while loading bank " << entry_name << ":" << std::endl;
alg->getLogger().error() << e.what() << std::endl;
loadError = true;
}
catch (...)
{
alg->getLogger().error() << "Unspecified error while loading bank " << entry_name << std::endl;
loadError = true;
}
//Close up the file even if errors occured.
file.closeGroup();
file.close();
//Abort if anything failed
if (loadError)
{
prog->reportIncrement(2, entry_name + ": skipping");
delete [] event_id;
delete [] event_time_of_flight;
delete event_index_ptr;
return;
}
// No error? Launch a new task to process that data.
size_t numEvents = load_size[0];
size_t startAt = load_start[0];
ProcessBankData * newTask = new ProcessBankData(alg, entry_name,pixelID_to_wi_map,prog,scheduler,
Janik Zikovsky
committed
event_id,event_time_of_flight, numEvents, startAt, event_index_ptr);
scheduler->push(newTask);
}
private:
Janik Zikovsky
committed
LoadEventNexus * alg;
Janik Zikovsky
committed
std::string entry_name;
detid2index_map * pixelID_to_wi_map;
Janik Zikovsky
committed
Progress * prog;
Janik Zikovsky
committed
ThreadScheduler * scheduler;
};
//===============================================================================================
//===============================================================================================
Campbell, Stuart
committed
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
/// Empty default constructor
LoadEventNexus::LoadEventNexus() : IDataFileChecker()
{}
/**
* Do a quick file type check by looking at the first 100 bytes of the file
* @param filePath :: path of the file including name.
* @param nread :: no.of bytes read
* @param header :: The first 100 bytes of the file as a union
* @return true if the given file is of type which can be loaded by this algorithm
*/
bool LoadEventNexus::quickFileCheck(const std::string& filePath,size_t nread, const file_header& header)
{
std::string ext = this->extension(filePath);
// If the extension is nxs then give it a go
if( ext.compare("nxs") == 0 ) return true;
// If not then let's see if it is a HDF file by checking for the magic cookie
if ( nread >= sizeof(int32_t) && (ntohl(header.four_bytes) == g_hdf_cookie) ) return true;
return false;
}
/**
* Checks the file by opening it and reading few lines
* @param filePath :: name of the file inluding its path
* @return an integer value how much this algorithm can load the file
*/
int LoadEventNexus::fileCheck(const std::string& filePath)
{
int confidence(0);
try
{
Gigg, Martyn Anthony
committed
// FIXME: We need a better test
Campbell, Stuart
committed
::NeXus::File file = ::NeXus::File(filePath);
// Open the base group called 'entry'
file.openGroup("entry", "NXentry");
// If all this succeeded then we'll assume this is an SNS Event NeXus file
confidence = 80;
}
catch(::NeXus::Exception&)
{
}
return confidence;
}
/// Initialisation method.
void LoadEventNexus::init()
{
std::vector<std::string> exts;
exts.push_back("_event.nxs");
exts.push_back(".nxs");
this->declareProperty(new FileProperty("Filename", "", FileProperty::Load, exts),
Campbell, Stuart
committed
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
"The name (including its full or relative path) of the Nexus file to\n"
"attempt to load. The file extension must either be .nxs or .NXS" );
this->declareProperty(
new WorkspaceProperty<IEventWorkspace>("OutputWorkspace", "", Direction::Output),
"The name of the output EventWorkspace in which to load the EventNexus file." );
declareProperty(
new PropertyWithValue<double>("FilterByTof_Min", EMPTY_DBL(), Direction::Input),
"Optional: To exclude events that do not fall within a range of times-of-flight.\n"\
"This is the minimum accepted value in microseconds." );
declareProperty(
new PropertyWithValue<double>("FilterByTof_Max", EMPTY_DBL(), Direction::Input),
"Optional: To exclude events that do not fall within a range of times-of-flight.\n"\
"This is the maximum accepted value in microseconds." );
declareProperty(
new PropertyWithValue<double>("FilterByTime_Start", EMPTY_DBL(), Direction::Input),
"Optional: To only include events after the provided start time, in seconds (relative to the start of the run).");
declareProperty(
new PropertyWithValue<double>("FilterByTime_Stop", EMPTY_DBL(), Direction::Input),
"Optional: To only include events before the provided stop time, in seconds (relative to the start of the run).");
declareProperty(
new PropertyWithValue<string>("BankName", "", Direction::Input),
"Optional: To only include events from one bank. Any bank whose name does not match the given string will have no events.");
declareProperty(
new PropertyWithValue<bool>("SingleBankPixelsOnly", true, Direction::Input),
"Optional: Only applies if you specified a single bank to load with BankName.\n"
"Only pixels in the specified bank will be created if true; all of the instrument's pixels will be created otherwise.");
declareProperty(
new PropertyWithValue<bool>("LoadMonitors", false, Direction::Input),
"Load the monitors from the file (optional, default False).");
declareProperty(
new PropertyWithValue<bool>("Precount", false, Direction::Input),
"Pre-count the number of events in each pixel before allocating memory (optional, default False). \n"
"This can significantly reduce memory use and memory fragmentation; it may also speed up loading.");
Janik Zikovsky
committed
declareProperty(
new PropertyWithValue<double>("CompressTolerance", -1.0, Direction::Input),
"Run CompressEvents while loading (optional, leave blank or negative to not do). \n"
"This specified the tolerance to use (in microseconds) when compressing.");
Campbell, Stuart
committed
}
//------------------------------------------------------------------------------------------------
/** Executes the algorithm. Reading in the file and creating and populating
* the output workspace
*/
void LoadEventNexus::exec()
{
// Retrieve the filename from the properties
m_filename = getPropertyValue("Filename");
precount = getProperty("Precount");
Janik Zikovsky
committed
compressTolerance = getProperty("CompressTolerance");
Campbell, Stuart
committed
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
loadlogs = true;
//Get the limits to the filter
filter_tof_min = getProperty("FilterByTof_Min");
filter_tof_max = getProperty("FilterByTof_Max");
if ( (filter_tof_min == EMPTY_DBL()) || (filter_tof_max == EMPTY_DBL()))
{
//Nothing specified. Include everything
filter_tof_min = -1e20;
filter_tof_max = +1e20;
}
else if ( (filter_tof_min != EMPTY_DBL()) || (filter_tof_max != EMPTY_DBL()))
{
//Both specified. Keep these values
}
else
throw std::invalid_argument("You must specify both the min and max of time of flight to filter, or neither!");
// Check to see if the monitors need to be loaded later
bool load_monitors = this->getProperty("LoadMonitors");
// Create the output workspace
WS = EventWorkspace_sptr(new EventWorkspace());
//Make sure to initialize.
// We can use dummy numbers for arguments, for event workspace it doesn't matter
WS->initialize(1,1,1);
// Set the units
WS->getAxis(0)->unit() = UnitFactory::Instance().create("TOF");
WS->setYUnit("Counts");
// Create a default "Universal" goniometer in the Run object
WS->mutableRun().getGoniometer().makeUniversalGoniometer();
Campbell, Stuart
committed
//Initialize progress reporting.
Janik Zikovsky
committed
int reports = 3;
Campbell, Stuart
committed
if (load_monitors)
reports++;
Janik Zikovsky
committed
Campbell, Stuart
committed
Progress prog(this,0.0,0.3, reports);
// The run_start will be loaded from the pulse times.
DateAndTime run_start(0,0);
Campbell, Stuart
committed
if (loadlogs)
{
// --------------------- Load DAS Logs -----------------
Janik Zikovsky
committed
prog.doReport("Loading DAS logs");
Campbell, Stuart
committed
//The pulse times will be empty if not specified in the DAS logs.
pulseTimes.clear();
Gigg, Martyn Anthony
committed
IAlgorithm_sptr loadLogs = createSubAlgorithm("LoadNexusLogs");
Campbell, Stuart
committed
// Now execute the sub-algorithm. Catch and log any error, but don't stop.
try
{
g_log.information() << "Loading logs from NeXus file..." << endl;
loadLogs->setPropertyValue("Filename", m_filename);
loadLogs->setProperty<MatrixWorkspace_sptr> ("Workspace", WS);
loadLogs->execute();
//If successful, we can try to load the pulse times
Kernel::TimeSeriesProperty<double> * log = dynamic_cast<Kernel::TimeSeriesProperty<double> *>( WS->mutableRun().getProperty("proton_charge") );
std::vector<Kernel::DateAndTime> temp = log->timesAsVector();
for (size_t i =0; i < temp.size(); i++)
pulseTimes.push_back( temp[i] );
// Use the first pulse as the run_start time.
if (temp.size() > 0)
{
run_start = WS->getFirstPulseTime();
Janik Zikovsky
committed
// add the start of the run as a ISO8601 date/time string. The start = first non-zero time.
Campbell, Stuart
committed
// (this is used in LoadInstrumentHelper to find the right instrument file to use).
Janik Zikovsky
committed
WS->mutableRun().addProperty("run_start", run_start.to_ISO8601_string(), true );
Campbell, Stuart
committed
}
else
g_log.warning() << "Empty proton_charge sample log. You will not be able to filter by time.\n";
Campbell, Stuart
committed
}
catch (...)
{
g_log.error() << "Error while loading Logs from SNS Nexus. Some sample logs may be missing." << std::endl;
}
}
else
{
g_log.information() << "Skipping the loading of sample logs!" << endl;
}
prog.report("Loading instrument");
Gigg, Martyn Anthony
committed
Campbell, Stuart
committed
//Load the instrument
runLoadInstrument(m_filename, WS);
if (!this->instrument_loaded_correctly)
throw std::runtime_error("Instrument was not initialized correctly! Loading cannot continue.");
if (load_monitors)
{
prog.report("Loading monitors");
this->runLoadMonitors();
}
// top level file information
::NeXus::File file(m_filename);
//Start with the base entry
file.openGroup("entry", "NXentry");
//Now we want to go through all the bankN_event entries
Gigg, Martyn Anthony
committed
vector<string> bankNames;
Campbell, Stuart
committed
map<string, string> entries = file.getEntries();
map<string,string>::const_iterator it = entries.begin();
for (; it != entries.end(); it++)
{
std::string entry_name(it->first);
std::string entry_class(it->second);
if ((entry_class == "NXevent_data"))
{
bankNames.push_back( entry_name );
}
}
//Close up the file
file.closeGroup();
file.close();
Gigg, Martyn Anthony
committed
// --------- Loading only one bank ? ----------------------------------
Campbell, Stuart
committed
std::string onebank = getProperty("BankName");
bool doOneBank = (onebank != "");
bool SingleBankPixelsOnly = getProperty("SingleBankPixelsOnly");
if (doOneBank)
{
bool foundIt = false;
for (std::vector<string>::iterator it=bankNames.begin(); it!= bankNames.end(); it++)
Gigg, Martyn Anthony
committed
{
if (*it == ( onebank + "_events") )
Gigg, Martyn Anthony
committed
{
foundIt = true;
Gigg, Martyn Anthony
committed
break;
}
}
if (!foundIt)
{
throw std::invalid_argument("No entry named '" + onebank + "_events'" + " was found in the .NXS file.\n");
}
Campbell, Stuart
committed
bankNames.clear();
bankNames.push_back( onebank + "_events" );
Gigg, Martyn Anthony
committed
if( !SingleBankPixelsOnly ) onebank = ""; // Marker to load all pixels
}
else
{
onebank = "";
Campbell, Stuart
committed
}
// Delete the output workspace name if it existed
std::string outName = getPropertyValue("OutputWorkspace");
if (AnalysisDataService::Instance().doesExist(outName))
AnalysisDataService::Instance().remove( outName );
Campbell, Stuart
committed
prog.report("Initializing all pixels");
//----------------- Pad Empty Pixels -------------------------------
Gigg, Martyn Anthony
committed
bool indexBySpectrum(false);
Gigg, Martyn Anthony
committed
// Create the required spectra mapping so that the workspace knows what to pad to
createSpectraMapping(m_filename, WS, onebank);
WS->padSpectra();
indexBySpectrum=true;
Gigg, Martyn Anthony
committed
// --------------------------- Time filtering ------------------------------------
Campbell, Stuart
committed
double filter_time_start_sec, filter_time_stop_sec;
filter_time_start_sec = getProperty("FilterByTime_Start");
filter_time_stop_sec = getProperty("FilterByTime_Stop");
//Default to ALL pulse times
bool is_time_filtered = false;
filter_time_start = Kernel::DateAndTime::minimum();
filter_time_stop = Kernel::DateAndTime::maximum();
if (pulseTimes.size() > 0)
{
//If not specified, use the limits of doubles. Otherwise, convert from seconds to absolute PulseTime
if (filter_time_start_sec != EMPTY_DBL())
{
filter_time_start = run_start + filter_time_start_sec;
Campbell, Stuart
committed
is_time_filtered = true;
}
if (filter_time_stop_sec != EMPTY_DBL())
{
filter_time_stop = run_start + filter_time_stop_sec;
Campbell, Stuart
committed
is_time_filtered = true;
}
//Silly values?
if (filter_time_stop < filter_time_start)
throw std::invalid_argument("Your filter for time's Stop value is smaller than the Start value.");
}
//Count the limits to time of flight
shortest_tof = static_cast<double>(std::numeric_limits<uint32_t>::max()) * 0.1;
longest_tof = 0.;
Janik Zikovsky
committed
Progress * prog2 = new Progress(this,0.3,1.0, bankNames.size()*3);
Campbell, Stuart
committed
//This map will be used to find the workspace index
Gigg, Martyn Anthony
committed
//@todo: Move to always index by spectrum so that we don't have to do this
detid2index_map * pixelID_to_wi_map(NULL);
if( indexBySpectrum )
{
pixelID_to_wi_map = WS->getSpectrumToWorkspaceIndexMap();
}
else
{
pixelID_to_wi_map = WS->getDetectorIDToWorkspaceIndexMap(false);
}
Janik Zikovsky
committed
// Make the thread pool
ThreadScheduler * scheduler = new ThreadSchedulerLargestCost();
ThreadPool pool(scheduler);
Mutex * diskIOMutex = new Mutex();
Campbell, Stuart
committed
for (int i=0; i < static_cast<int>(bankNames.size()); i++)
{
Janik Zikovsky
committed
// We make tasks for loading
pool.schedule( new LoadBankFromDiskTask(this,bankNames[i],pixelID_to_wi_map, prog2, diskIOMutex, scheduler) );
Campbell, Stuart
committed
}
Janik Zikovsky
committed
// Start and end all threads
pool.joinAll();
delete diskIOMutex;
Janik Zikovsky
committed
delete prog2;
Campbell, Stuart
committed
Janik Zikovsky
committed
Campbell, Stuart
committed
//Don't need the map anymore.
delete pixelID_to_wi_map;
Campbell, Stuart
committed
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
if (is_time_filtered)
{
//Now filter out the run, using the DateAndTime type.
WS->mutableRun().filterByTime(filter_time_start, filter_time_stop);
}
//Info reporting
g_log.information() << "Read " << WS->getNumberEvents() << " events"
<< ". Shortest TOF: " << shortest_tof << " microsec; longest TOF: "
<< longest_tof << " microsec." << std::endl;
//Now, create a default X-vector for histogramming, with just 2 bins.
Kernel::cow_ptr<MantidVec> axis;
MantidVec& xRef = axis.access();
xRef.resize(2);
xRef[0] = shortest_tof - 1; //Just to make sure the bins hold it all
xRef[1] = longest_tof + 1;
//Set the binning axis using this.
WS->setAllX(axis);
// set more properties on the workspace
this->loadEntryMetadata("entry");
//Save output
this->setProperty<IEventWorkspace_sptr>("OutputWorkspace", WS);
// Clear any large vectors to free up memory.
this->pulseTimes.clear();
Janik Zikovsky
committed
// Some memory feels like it sticks around (on Linux). Free it.
MemoryManager::Instance().releaseFreeMemory();
Campbell, Stuart
committed
return;
}
Janik Zikovsky
committed
/** Load the run number and other meta data from the given bank */
Campbell, Stuart
committed
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
void LoadEventNexus::loadEntryMetadata(const std::string &entry_name) {
// Open the file
::NeXus::File file(m_filename);
file.openGroup(entry_name, "NXentry");
// get the title
file.openData("title");
if (file.getInfo().type == ::NeXus::CHAR) {
string title = file.getStrData();
if (!title.empty())
WS->setTitle(title);
}
file.closeData();
// TODO get the run number
file.openData("run_number");
string run("");
if (file.getInfo().type == ::NeXus::CHAR) {
run = file.getStrData();
}
if (!run.empty()) {
WS->mutableRun().addProperty("run_number", run);
}
file.closeData();
// close the file
file.close();
}
//-----------------------------------------------------------------------------
/** Load the instrument geometry File
* @param nexusfilename :: Used to pick the instrument.
* @param localWorkspace :: MatrixWorkspace in which to put the instrument geometry
*/
void LoadEventNexus::runLoadInstrument(const std::string &nexusfilename, MatrixWorkspace_sptr localWorkspace)
{
this->instrument_loaded_correctly = false;
Campbell, Stuart
committed
string instrument;
// Get the instrument name
::NeXus::File nxfile(nexusfilename);
//Start with the base entry
nxfile.openGroup("entry", "NXentry");
// Open the instrument
nxfile.openGroup("instrument", "NXinstrument");
nxfile.openData("name");
instrument = nxfile.getStrData();
g_log.debug() << "Instrument name read from NeXus file is " << instrument << std::endl;
if (instrument.compare("POWGEN3") == 0) // hack for powgen b/c of bad long name
Gigg, Martyn Anthony
committed
instrument = "POWGEN";
Campbell, Stuart
committed
// Now let's close the file as we don't need it anymore to load the instrument.
nxfile.close();
// do the actual work
IAlgorithm_sptr loadInst= createSubAlgorithm("LoadInstrument");
// Now execute the sub-algorithm. Catch and log any error, but don't stop.
bool executionSuccessful(true);
try
{
loadInst->setPropertyValue("InstrumentName", instrument);
loadInst->setProperty<MatrixWorkspace_sptr> ("Workspace", localWorkspace);
Gigg, Martyn Anthony
committed
loadInst->setProperty("RewriteSpectraMap", false);
Campbell, Stuart
committed
loadInst->execute();
// Populate the instrument parameters in this workspace - this works around a bug
localWorkspace->populateInstrumentParameters();
} catch (std::invalid_argument& e)
{
g_log.information() << "Invalid argument to LoadInstrument sub-algorithm : " << e.what() << std::endl;
executionSuccessful = false;