20 #include <boost/functional/hash.hpp>
41 return _processTree.begin();
45 return _processTree.end();
49 return _processTree.find(particle);
55 _particles.push_back(p);
61 bool can_add = parent < _particles.size();
62 for (
auto elem : daughters) {
63 can_add &= elem < _particles.size();
66 _processTree[parent] = daughters;
68 MSG_ERROR(
"One or more particle index is invalid: vertex NOT added.");
74 auto it = _processTree.find(parent);
75 if(it != _processTree.end()) {
78 for(
auto elem: daughters) {
79 removeVertex(elem,
true);
84 MSG_ERROR(
"You can't collapse after removing the tree head!");
87 auto it2 = _processTree.find(getParentId(parent));
88 if(it2 != _processTree.end()) {
89 it2->second.insert(it2->second.end(), daughters.begin(), daughters.end());
92 _processTree.erase(parent);
103 if(parent >= _particles.size()) {
106 auto res = getDaughtersIds(parent);
107 for (
auto& elem : res) {
108 if(elem != particle) {
109 temps.push_back(elem);
117 return getOrDefault(_parentDict, daughter, numeric_limits<size_t>::max());
119 for (
auto& elem : _processTree) {
120 if (::find(elem.second.begin(), elem.second.end(), daughter) != elem.second.end()) {
124 return numeric_limits<size_t>::max();
128 if (
id < _particles.size()) {
129 return _particles[id];
131 throw Error(
"Index out of range. You're gonna need a bigger boat, or a smaller shark.");
138 return (parentId > 0) ? a.pdgId() : -a.pdgId();
140 auto sorter = bind(
particlesByPdg, getter, placeholders::_1, placeholders::_2);
141 sort(parts.begin(), parts.end(), sorter);
147 auto res = getDaughtersIds(parent);
148 temps.reserve(res.size());
149 for (
auto& elem : res) {
150 temps.push_back(_particles[elem]);
153 return sortByParent(parent, temps);
160 return _particles[tmp];
165 if(parent >= _particles.size()) {
169 auto res = getDaughtersIds(parent);
170 temps.reserve(res.size()-1);
171 for (
auto& elem : res) {
172 if(elem != particle) {
173 temps.push_back(_particles[elem]);
177 return sortByParent(parent, temps);
183 return _processTree.find(particle) != _processTree.end();
186 pair<Particle, ParticleList> Process::getParticlesByVertex(
PdgId parent, vector<PdgId> daughters)
const {
188 auto it = _idTree.find(vertexId);
189 if (it != _idTree.end()){
190 return {_particles[it->second],getDaughters(it->second,
true)};
193 it = _idTree.find(vertexId);
194 if (it != _idTree.end()){
195 return {_particles[it->second],getDaughters(it->second,
true)};
200 pair<Particle, ParticleList> Process::getParticlesByVertex(
string vertex)
const {
201 PID& pdg = PID::instance();
203 for (
auto& elem : temp) {
204 auto it = _idTree.find(elem);
205 if (it != _idTree.end()){
206 return {_particles[it->second],getDaughters(it->second,
true)};
212 HashId Process::getVertexId(
string vertex)
const {
213 PID& pdg = PID::instance();
216 find_if(temp.begin(), temp.end(), [&](
HashId val) ->
bool {
return _fullId.find(val) != _fullId.end(); });
217 if (it == temp.end()) {
231 size_t Process::numParticles(
bool withoutPhotons)
const {
232 return _particles.size() - (withoutPhotons ? _erasedGammas.size() : 0ul);
237 return findFirstVertex();
239 return _firstVertexIdx;
244 for (
auto& elem : _processTree) {
245 for (
auto& elem2 : elem.second) {
250 if (used.find(i) == used.end()) {
254 return numeric_limits<size_t>::max();
257 void Process::cacheParticleDependencies() {
259 _parentDict.reserve(_particles.size()-1);
260 for(
auto& elem: _processTree) {
261 for(
auto& elem2: elem.second) {
262 _parentDict.insert({elem2, elem.first});
267 if(used.find(i) == used.end()) {
275 bool Process::initialize() {
276 if(_particles.size() == 0) {
282 cacheParticleDependencies();
290 void Process::pruneSoftPhotons() {
291 PID& pdg = PID::instance();
292 auto firstvertex = findFirstVertex();
293 for (
auto& elem : _processTree) {
294 auto& daughters = elem.second;
296 while (daughters.size() > 2 && find_if(daughters.begin(), daughters.end(), [&](
ParticleIndex& idx)->
bool {
return (_particles[idx].pdgId() == PID::PHOTON);}) != daughters.end()){
298 map<double, pair<ParticleIndex, Particle>> gammas;
299 for(
auto idx : daughters){
300 if(_particles[idx].pdgId() == PID::PHOTON) {
301 gammas.insert({_particles[idx].p().E(), make_pair(idx, _particles[idx])});
304 pair<ParticleIndex, Particle> gamma = gammas.begin()->second;
306 map<double, pair<ParticleIndex, Particle>, std::greater<double>> chps;
307 for(
auto idx : daughters){
309 const auto& mom1 = _particles[idx].p();
310 const auto& mom2 = gamma.second.p();
311 chps.insert({
costheta(mom1.pVec(),mom2.pVec()), make_pair(idx, _particles[idx])});
314 pair<ParticleIndex, Particle> nchp = chps.begin()->second;
316 set<ParticleIndex> chTree{nchp.first};
318 siblings.erase(
remove(siblings.begin(), siblings.end(), gamma.first), siblings.end());
319 set<ParticleIndex> YTree(siblings.begin(), siblings.end());
322 for (
auto elemY : YTree){
323 pY += _particles[elemY].p();
329 while ( (ix != firstvertex) && (::find(daughters.begin(), daughters.end(), ix) == daughters.end()) &&
330 (erasedgammas.find(ix) == erasedgammas.end()) ){
331 ix = getParentId(ix);
333 if (ix == nchp.first) {
336 if (::find(siblings.begin(), siblings.end(), ix) != siblings.end() && ix != gamma.first) {
342 double pYCh = 0.5*pP.
mass()*sqrt((1. - pow((pCh.
mass() + pY.mass()),2.)/pP.
mass2())*(1. - pow((pCh.
mass() - pY.mass()),2.)/pP.
mass2()));
343 double EY = (pP.
mass2() - pCh.
mass2() + pY.mass2())/(2.*pP.
mass());
344 double ECh = (pP.
mass2() + pCh.
mass2() - pY.mass2())/(2.*pP.
mass());
346 auto boostP = pY + pCh;
347 pY.boostToRestFrameOf(boostP);
348 pCh.boostToRestFrameOf(boostP);
350 double betagammaCh = (pYCh*pCh.E() - ECh*pCh.p())/pCh.mass2();
351 double betagammaY = (pYCh*pY.E() - EY*pY.p())/pY.mass2();
352 double betaCh = (betagammaCh > 0.0) ? 1./sqrt(1. + 1./pow(betagammaCh,2.)) : -1./sqrt(1. + 1./pow(betagammaCh,2.));
353 double betaY = (betagammaY > 0.0) ? 1./sqrt(1. + 1./pow(betagammaY,2.)) : -1./sqrt(1. + 1./pow(betagammaY,2.));
354 FourMomentum boostCh{1., betaCh*pCh.
px()/pCh.p(),betaCh*pCh.py()/pCh.p(),betaCh*pCh.pz()/pCh.p()};
355 FourMomentum boostY{1., betaY*pY.px()/pY.p(),betaY*pY.py()/pY.p(),betaY*pY.pz()/pY.p()};
356 for (
auto elemCh : chTree) {
357 _particles[elemCh].p().boostToRestFrameOf(boostP).boostFromRestFrameOf(boostCh).boostFromRestFrameOf(pP);
359 for (
auto elemY : YTree) {
360 _particles[elemY].p().boostToRestFrameOf(boostP).boostFromRestFrameOf(boostY).boostFromRestFrameOf(pP);
362 daughters.erase(
remove(daughters.begin(), daughters.end(), gamma.first), daughters.end());
363 erasedgammas.insert(gamma.first);
365 _erasedGammas.insert(erasedgammas.begin(),erasedgammas.end());
371 void Process::calcSignatures() {
374 for(
auto& elem: _processTree) {
375 vector<PdgId> daughterPdgs;
376 for(
auto elem2: elem.second) {
377 daughterPdgs.push_back(_particles[elem2].pdgId());
379 if (::find(daughterPdgs.begin(), daughterPdgs.end(), _particles[elem.first].pdgId()) != daughterPdgs.end()) {
383 _fullId.insert(decayId);
384 _idTree.insert({decayId, elem.first});
393 const set<HashId>& Process::fullId()
const {
397 void Process::disable() {
403 vector<uint64_t> idfull;
404 for (
auto elem : _fullId) {
405 idfull.push_back(elem);
407 auto serialfull = msgwriter->CreateVector(idfull);
408 Serial::FBProcIDsBuilder serialprocess{*msgwriter};
409 serialprocess.add_id(_hashId);
410 serialprocess.add_idfull(serialfull);
411 *msg = serialprocess.Finish();
415 if (msgreader !=
nullptr) {
416 _hashId = msgreader->id();
417 auto idfull = msgreader->idfull();
419 for (
unsigned int i = 0; i < idfull->size(); ++i) {
420 _fullId.insert(idfull->Get(i));
425 void Process::defineSettings() {
429 Log& Process::getLog()
const {
430 return Log::getLog(
"Hammer.Process");
PDG codes to UID functions.
auto const & getOrThrow(const std::map< KeyType, ValueType > &data, KeyType key, std::exception error)
TensorData read(const Serial::FBTensor *msgreader)
const ParticleIndices & daughtersId(Process::const_iterator it)
std::vector< PdgId > combineDaughters(const std::vector< PdgId > &daughters, const std::vector< PdgId > &subDaughters)
combine list of codes of daughters and grandaughters (for processes which parameterise two subsequent...
double p() const
returns the absolute value of the 3-momentum
std::vector< HashId > expandToValidVertexUIDs(const std::string &name, const bool &hadOnly=false) const
bool particlesByPdg(const std::function< PdgId(const Particle &)> &pdgGetter, const Particle &a, const Particle &b)
checks whether two particles are ordered according to the PDG code ordering used in computing hashes ...
Message logging routines.
double costheta(const std::array< double, 3 > &a, const std::array< double, 3 > &b)
double mass2() const
returns the squared invariant mass
double mass() const
returns the invariant mass if the invariant mass squared is negative returns
std::vector< PdgId > flipSigns(const std::vector< PdgId > &list)
return the PDG codes of the conjugate particles (itself if self-conjugate) for all the PDG codes in a...
ValueType getOrDefault(const std::map< KeyType, ValueType > &data, KeyType key, ValueType fallback)
auto begin(reversion_wrapper< T > w)
std::vector< Particle > ParticleList
int getThreeCharge(PdgId id) const
return where q is the particle electric charge from a PDG code
HashId combineProcessIDs(const std::set< HashId > &allIds)
PdgId flipSign(const PdgId &id)
return the PDG code of the conjugate particle (itself if self-conjugate)
Hammer class for dealing with particle data.
ParticleIndex parentId(Process::const_iterator it)
std::vector< ParticleIndex > ParticleIndices
Hammer particle data class.
HashId processID(PdgId parent, const std::vector< PdgId > &allDaughters)
compute a unique ID for a given process based on the PDG codes of the parent particle and the ordered...
std::set< ParticleIndex > UniqueParticleIndices
TreeMap::const_iterator const_iterator
Out-of-range error class.
auto end(reversion_wrapper< T > w)
Serialization related typedefs and includes.
double px() const
returns the momentum x component