9 #include <QtCore/QDateTime>
15 #include <tr1/functional>
23 #include "cass_version.h"
29 using tr1::placeholders::_1;
30 using tr1::placeholders::_2;
55 uint32_t timet(static_cast<uint32_t>((eventid & 0xFFFFFFFF00000000) >> 32));
56 uint32_t eventFiducial =
static_cast<uint32_t
>((eventid & 0x00000000FFFFFFFF) >> 8);
57 std::stringstream groupname;
61 groupname << time.
toString(Qt::ISODate).toStdString() <<
"_"<<eventFiducial;
63 groupname <<
"UnknownTime_"<<
eventid;
66 return H5Gcreate1(calibcycle, groupname.str().c_str(),0);
92 _fh.writeString(
string(
"Written with cass version '" + VERSION +
"'"),
102 _baseGroupname = name;
120 return _fh.currentFileSize();
129 const uint32_t &options(entry.
options);
131 const string &name(entry.
name);
135 const string dataName(_baseGroupname +
"/" + gname +
"/" + name);
144 _fh.writeScalar(
data.front(),dataName);
150 _fh.writeArray(
data.storage(), xaxis.nBins, dataName);
151 _fh.writeScalarAttribute(xaxis.low,
"xLow", dataName);
152 _fh.writeScalarAttribute(xaxis.up,
"xUp", dataName);
159 _fh.writeMatrix(
data.storage(),
data.shape(), dataName, options);
160 _fh.writeScalarAttribute(xaxis.low,
"xLow", dataName);
161 _fh.writeScalarAttribute(xaxis.up,
"xUp", dataName);
162 _fh.writeScalarAttribute(yaxis.low,
"yLow", dataName);
163 _fh.writeScalarAttribute(yaxis.up,
"yUp", dataName);
167 throw runtime_error(
"WriteEntry::operator(): data dimension '" +
204 _writeAttributes = std::tr1::bind(&AppendEntry::writeAttib,
this,_1,_2);
213 const uint32_t &options(entry.
options);
215 const string &name(entry.
name);
219 const string dataName(_baseGroupname +
"/" + gname +
"/" + name);
224 vector<size_t> shape(
data.np_shape());
226 _fh.appendData(
data.storage(),theShape,dataName,options);
227 _writeAttributes(
data,dataName);
241 _writeAttributes = std::tr1::bind(&AppendEntry::writeNothing,
this,_1,_2);
243 vector<CASSEvent::id_t> evtid(1,_id);
244 _fh.appendData(evtid,shape,
"eventIds");
267 _fh.writeScalarAttribute(xaxis.low,
"xLow", dsetName);
268 _fh.writeScalarAttribute(xaxis.up,
"xUp", dsetName);
275 _fh.writeScalarAttribute(xaxis.low,
"xLow", dsetName);
276 _fh.writeScalarAttribute(xaxis.up,
"xUp", dsetName);
277 _fh.writeScalarAttribute(yaxis.low,
"yLow", dsetName);
278 _fh.writeScalarAttribute(yaxis.up,
"yUp", dsetName);
282 throw runtime_error(
"AppendEntry::writeAttrib(): data dimension '" +
329 int compresslevel(s.
value(
"CompressLevel",2).toBool());
330 htri_t compavailable (H5Zfilter_avail(H5Z_FILTER_DEFLATE));
331 unsigned int filter_info;
332 H5Zget_filter_info(H5Z_FILTER_DEFLATE, &filter_info);
333 if (!compavailable ||
334 !(filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) ||
335 !(filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED))
336 throw logic_error(
"pp1002::loadSettings(): HDF5 library doesn't allow compression. Please use a hdf5 library that allows compression.");
338 bool allDepsAreThere(
true);
340 for (
int i = 0; i <
size; ++i)
343 string procname(s.
value(
"Name",
"Unknown").toString().toStdString());
344 if (procname ==
"Unknown")
347 allDepsAreThere = proc && allDepsAreThere;
348 string groupname(s.
value(
"GroupName",
"/").toString().toStdString());
355 for (
int i = 0; i <
size; ++i)
358 string procname(s.
value(
"Name",
"Unknown").toString().toStdString());
359 if (procname ==
"Unknown")
362 allDepsAreThere = proc && allDepsAreThere;
363 string groupname(s.
value(
"GroupName",
"/").toString().toStdString());
370 if (!(ret && allDepsAreThere))
391 bool multipleevents(s.
value(
"WriteMultipleEventsInOneFile",
false).toBool());
392 bool singleDataset(s.
value(
"WriteToSingleDatasets",
false).toBool());
419 string output(
"Processor '" +
name() +
"' will write histogram ");
420 for (list<entry_t>::const_iterator it(
_procList.begin());
422 output += (
"'" + it->proc->name() +
"' to Group '" + it->groupname +
423 "' with dataname '" + it->name +
"',");
425 "' as basename. 2D File will" + (compresslevel ?
"" :
" NOT") +
427 " be distributed. Events will"+ (multipleevents ?
" NOT" :
"") +
428 " be written to single files. In which case the data of the" +
429 " individual processors will" + (singleDataset ?
"" :
" NOT") +
430 " be put into a single dataset. Maximum file size is '" +
438 throw logic_error(
"pp1002::result: '"+
name()+
"' should never be called");
521 list<entry_t>::const_iterator it(
_procList.begin());
522 list<entry_t>::const_iterator last(
_procList.end());
549 list<entry_t>::const_iterator it(
_procList.begin());
550 list<entry_t>::const_iterator last(
_procList.end());
575 list<entry_t>::const_iterator it(
_procList.begin());
576 list<entry_t>::const_iterator last(
_procList.end());
578 (*_entryWriter)(*it++);
size_t dim() const
what is the dimension of the result
void writeAttib(const Processor::result_t &data, const string &dsetName)
write the attributes to the dataset
std::string name
name of the value in the file
Event to store all LCLS Data.
virtual void loadSettings(size_t)
load the settings of this pp
toString(const QString &format)
std::list< entry_t > _procSummaryList
container for all pps that should be written when program quits
WriteEntry(const string &filename, const CASSEvent::id_t id=0)
constructor
const name_t name() const
retrieve the name of this processor
string _baseGroupname
the base group name
std::tr1::function< void(const Processor::result_t &, const string &)> _writeAttributes
function to write the results axis attributes just once to the dataset
virtual void operator()(const pp1002::entry_t &entry)
write an entry to h5 file using the functions defined above
check if FEL is off by checking for bykick which is eventid
pp1002(const name_t &)
constructor
std::tr1::shared_ptr< hdf5::WriteEntry > entryWriter_t
define pointer to the entry writer
AppendEntry(const string &filename, const CASSEvent::id_t id=0)
constructor
size_t _maxFileSize
the maximum file size of the single file
void writeEventToSingleFile(const CASSEvent &evt)
function to write the events to a single file
virtual void processEvent(const CASSEvent &)
process the event
uint64_t id_t
define the id type
bool _hide
flag to tell whether this pp should be hidden in the dropdown list
size_t currentFileSize() const
retrieve the current file size
an axis of a more than 0 dimensional container
static std::string increaseDirCounter(const std::string &fname)
increase the alpha counter
void writeEventToMultipleEventsFile(const CASSEvent &evt)
function to write the events to a file that contains multiple events
things written only at end of run H5Dump ProcessorSummary size
std::tr1::function< void(const CASSEvent &)> _writeEvent
write event to file
virtual void setEventID(const CASSEvent::id_t id)
set the event id
::hdf5::Handler _fh
the file handle of the h5 file
std::tr1::function< void(void)> _writeSummary
write summary to file
static void add(Level level, const std::string &line)
add a string to the log
fromStdString(const std::string &str)
void setBaseGroup(const string &name)
set the base group name
beginReadArray(const QString &prefix)
base class for processors.
void writeSummaryToSingleFile()
function to write the summary to a single file
shared_pointer setupDependency(const std::string &depVarName, const name_t &name="")
setup the dependecy.
std::vector< hsize_t > shape_t
define the shape type
const axis_t & axis() const
read access to the axis
int _maxFilePerSubDir
the number of files in each subdir
std::string groupname
group where the data will be written to
struct bundleing info for writing an entry to file
void appendEventToMultipleEventsFile(const CASSEvent &evt)
function to write the events to a file that contains multiple events
void writeSummaryToMultipleEventsFile()
write the summary to a file that contains multiple events
std::list< entry_t > _procList
container with all pps that contain the histograms to dump to hdf5
file contains declaration of classes and functions that help other processors to do their job...
static std::string intializeFile(const std::string &fname)
initialize the filename
uint32_t options
options for writing
std::string toString(const Type &t)
convert any type to a string
auxiliary data[Processor]
value(const QString &key, const QVariant &defaultValue=QVariant()
entryWriter_t _entryWriter
the entry writer
virtual void operator()(const pp1002::entry_t &entry)
write an entry to h5 file using the functions defined above
void setupGeneral()
general setup of the processor
static std::string removeAlphaSubdir(const std::string &fname)
remove the alpha counter subdir from filename
void writeNothing(const Processor::result_t &, const string &)
write nothing
file contains specialized class that do the settings for cass
write an entity to a h5 file
shared_pointer proc
processor holding the data to be written
hid_t createGroupNameFromEventId(uint64_t eventid, hid_t calibcycle)
create group name for an event from its ID
declaration of pp1001 (hdf5_converter)
virtual const result_t & result(const CASSEvent::id_t eventid=0)
overwrite the retrieval of an histogram
shared_pointer _condition
pointer to the processor that will contain the condition
static std::string increaseFileCounter(const std::string &fname)
increase the alpha counter in the file name
QMutex _lock
a lock to make the process reentrant
bool setupCondition(bool defaultConditionType=true)
setup the condition.
std::string name_t
define the name type
contains a logger for cass
virtual void aboutToQuit()
dump all pp histograms to summary group just before quitting
std::tr1::shared_ptr< Processor > shared_pointer
a shared pointer of this
int _filecounter
counter to count how many files have been written
CASSEvent::id_t _id
the eventid to look for
easier api for hdf5 file writing
append an entity to a dataset in h5 file
static std::string intializeDir(const std::string &fname)
initialize the directory
beginGroup(const QString &prefix)
virtual const result_t & result(const CASSEvent::id_t eventid=0)
retrieve a result for a given id.
virtual void setEventID(CASSEvent::id_t id)
add the event id to the event id dataset
std::string _basefilename
the filename that the data will be written to