Hammer  1.0.0
Helicity Amplitude Module for Matrix Element Reweighting
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
OuterContainer.cc
Go to the documentation of this file.
1 ///
2 /// @file OuterContainer.cc
3 /// @brief (Sum of) Outer product tensor data container
4 ///
5 
6 //**** This file is a part of the HAMMER library
7 //**** Copyright (C) 2016 - 2020 The HAMMER Collaboration
8 //**** HAMMER is licensed under version 3 of the GPL; see COPYING for details
9 //**** Please note the MCnet academic guidelines; see GUIDELINES for details
10 
11 // -*- C++ -*-
12 #include <numeric>
13 #include <functional>
14 #include <tuple>
15 #include <iterator>
16 
17 
23 #include "Hammer/Exceptions.hh"
24 #include "Hammer/Tools/Utils.hh"
25 #include "Hammer/Math/Utils.hh"
27 #include "Hammer/Tools/Logging.hh"
28 
29 using namespace std;
30 
31 namespace Hammer {
32 
33  namespace MultiDimensional {
34 
35  OuterContainer::OuterContainer() : _sharedData{false} { }
36 
38  if (!other._sharedData) {
39  auto tmp = other.clone();
40  OuterContainer* newouter = static_cast<OuterContainer*>(tmp.get());
41  _data.swap(newouter->_data);
42  _indexing = newouter->_indexing;
43  _accessors.swap(newouter->_accessors);
44  _sharedData = false;
45  }
46  else {
47  _data = other._data;
48  _indexing = other._indexing;
49  _accessors = other._accessors;
50  _sharedData = true;
51  }
52  }
53 
55  if(!other._sharedData) {
56  auto tmp = other.clone();
57  OuterContainer* newouter = static_cast<OuterContainer*>(tmp.get());
58  _data.swap(newouter->_data);
59  _accessors.swap(newouter->_accessors);
60  _indexing = newouter->_indexing;
61  _sharedData = false;
62  }
63  else {
64  _data = other._data;
65  _indexing = other._indexing;
66  _accessors = other._accessors;
67  _sharedData = true;
68  }
69  return *this;
70  }
71 
73  : _indexing{{left->dims(), right->dims()}, {left->labels(), right->labels()}}, _sharedData{false} {
74  size_t pos = left->rank();
75  vector<pair<SharedTensorData, bool>> vec{{SharedTensorData{left.release()}, false}, {SharedTensorData{right.release()}, false}};
76  ASSERT(vec[0].first.get() != nullptr);
77  ASSERT(vec[1].first.get() != nullptr);
78  _data.push_back(vec);
79  _accessors = {[pos](const IndexList& list, IContainer* elem) -> ElementType { return elem->element(list.begin(), list.begin()+pos); },
80  [pos](const IndexList& list, IContainer* elem) -> ElementType { return elem->element(list.begin()+pos, list.end()); }};
81  }
82 
83  OuterContainer::OuterContainer(TensorData toBeSquared, bool conjugate)
84  : _indexing{{toBeSquared->dims(), toBeSquared->dims()},
85  {toBeSquared->labels(),
86  conjugate ? flipListOfLabels(toBeSquared->labels()) : toBeSquared->labels()}},
87  _sharedData{false} {
88  size_t pos = toBeSquared->rank();
89  auto elem = SharedTensorData{toBeSquared.release()};
90  ASSERT(elem.get() != nullptr);
91  vector<pair<SharedTensorData, bool>> vec{{elem, false}, {elem, conjugate}};
92  _data.push_back(vec);
93  _accessors = {[pos](const IndexList& list, IContainer* item) -> ElementType { return item->element(list.begin(), list.begin()+pos); },
94  [pos](const IndexList& list, IContainer* item) -> ElementType { return item->element(list.begin()+pos, list.end()); }};
95  }
96 
97  OuterContainer::OuterContainer(vector<TensorData>&& group) : _sharedData{false} {
98  vector<pair<SharedTensorData, bool>> vec;
99  std::vector<IndexList> dimensions;
100  std::vector<LabelsList> labels;
101  dimensions.reserve(group.size());
102  labels.reserve(group.size());
103  vec.reserve(group.size());
104  size_t start = 0ul;
105  size_t finish = 0ul;
106  for(auto& elem: group) {
107  finish += elem->rank();
108  _accessors.push_back([start,finish](const IndexList& list, IContainer* item) -> ElementType { return item->element(list.begin()+start, list.begin()+finish); });
109  start = finish;
110  }
111  for(auto& elem: group) {
112  dimensions.push_back(elem->dims());
113  labels.push_back(elem->labels());
114  vec.push_back({SharedTensorData{elem.release()}, false});
115  }
116  _data.emplace_back(vec);
117  _indexing = BlockIndexing{dimensions, labels};
118  }
119 
120  OuterContainer::OuterContainer(SharedTensorData toBeSquared, bool conjugate)
121  : _indexing{{toBeSquared->dims(), toBeSquared->dims()},
122  {toBeSquared->labels(),
123  conjugate ? flipListOfLabels(toBeSquared->labels()) : toBeSquared->labels()}},
124  _sharedData{true} {
125  vector<pair<SharedTensorData, bool>> vec{{toBeSquared, false}, {toBeSquared, conjugate}};
126  _data.push_back(vec);
127  size_t pos = toBeSquared->rank();
128  _accessors = {[pos](const IndexList& list, IContainer* item) -> ElementType { return item->element(list.begin(), list.begin()+pos); },
129  [pos](const IndexList& list, IContainer* item) -> ElementType { return item->element(list.begin()+pos, list.end()); }};
130  }
131 
133  : _sharedData{true} {
134  std::vector<IndexList> dimensions;
135  std::vector<LabelsList> labels;
136  dimensions.reserve(data.size());
137  labels.reserve(data.size());
138  for (size_t i = 0; i < data.size(); ++i) {
139  dimensions.push_back(data[i].first->dims());
140  if (data[i].second) {
141  labels.push_back(flipListOfLabels(data[i].first->labels()));
142  } else {
143  labels.push_back(data[i].first->labels());
144  }
145  }
146  size_t start = 0ul;
147  size_t finish = 0ul;
148  for(auto& elem: data) {
149  finish += elem.first->rank();
150  _accessors.push_back([start,finish](const IndexList& list, IContainer* item) -> ElementType { return item->element(list.begin()+start, list.begin()+finish); });
151  start = finish;
152  }
153  _data.push_back(move(data));
154  _indexing = BlockIndexing{dimensions, labels};
155  }
156 
157  // OuterContainer::OuterContainer(vector<SharedTensorData>&& group, vector<bool>&& isConjugate)
158  // : _sharedData{true} {
159  // ASSERT((isConjugate.size() == 0 || group.size() == isConjugate.size()));
160  // vector<pair<SharedTensorData, bool>> vec;
161  // vec.reserve(group.size());
162  // if(isConjugate.size() > 0) {
163  // for (size_t i = 0; i < group.size(); ++i) {
164  // vec.push_back({group[i], isConjugate[i]});
165  // }
166  // }
167  // else {
168  // for (size_t i = 0; i < group.size(); ++i) {
169  // vec.push_back({group[i], false});
170  // }
171  // }
172  // OuterContainer{move(vec)};
173  // }
174 
175  OuterContainer::OuterContainer(const Serial::FBTensorList* input) {
176  if (input != nullptr) {
177  _sharedData = false;
178  auto serialvals = input->vals();
179  auto serialentries = input->entries();
180  if (serialvals != nullptr && serialentries != nullptr) {
181  vector<SharedTensorData> allEntries;
182  allEntries.reserve(serialvals->size());
183  for (unsigned int i = 0; i < serialvals->size(); ++i) {
184  size_t fills = serialvals->Get(i)->vals()->size();
185  size_t totals = 1ul;
186  for (unsigned int j = 0; j < serialvals->Get(i)->dims()->size(); ++j) {
187  totals *= serialvals->Get(i)->dims()->Get(j);
188  }
189  // IMPORTANT: this assumes tensor was optimized before writing!!
190  if (Ops::shouldBeSparse(fills, totals)) {
191  allEntries.emplace_back(SharedTensorData{new SparseContainer{serialvals->Get(i)}});
192  } else {
193  allEntries.emplace_back(SharedTensorData{new VectorContainer{serialvals->Get(i)}});
194  }
195  }
196  _data.reserve(serialentries->size());
197  for(unsigned int i = 0; i < serialentries->size(); ++i) {
198  auto serialitem = serialentries->Get(i);
199  vector<pair<SharedTensorData, bool>> vec;
200  vec.reserve(serialitem->idx()->size());
201  for (unsigned int j = 0; j < serialitem->idx()->size(); ++j) {
202  vec.push_back({allEntries[serialitem->idx()->Get(j)], serialitem->conj()->Get(j)});
203  }
204  _data.push_back(vec);
205  }
206  }
207  _accessors.clear();
208  if(_data.size() > 0) {
209  auto entry = _data.begin();
210  vector<LabelsList> tmplabels;
211  vector<IndexList> tmpdims;
212  tmplabels.reserve(entry->size());
213  tmpdims.reserve(entry->size());
214  size_t start = 0ul;
215  size_t finish = 0ul;
216  for(auto it = entry->begin(); it != entry->end(); ++it) {
217  tmplabels.push_back(((it->second) ? flipListOfLabels((it->first)->labels()) : (it->first)->labels()));
218  tmpdims.push_back((it->first)->dims());
219  finish += it->first->rank();
220  _accessors.push_back([start,finish](const IndexList& list, IContainer* item) -> ElementType { return item->element(list.begin()+start, list.begin()+finish); });
221  start = finish;
222  }
223  _indexing = BlockIndexing{tmpdims, tmplabels};
224  }
225  }
226  }
227 
228 
230  return _data.begin();
231  }
232 
234  return _data.begin();
235  }
236 
238  return _data.end();
239  }
240 
242  return _data.end();
243  }
244 
245 
248  // auto split = _indexing.splitIndices(indices);
249  // for(auto& elem: _data) {
250  // ElementType item = 1.;
251  // auto its = split.begin();
252  // auto it = elem.begin();
253  // for (; it != elem.end(); ++it, ++its) {
254  // auto tmp = it->first->element(*its);
255  // item *= it->second ? conj(tmp) : tmp;
256  // }
257  // result += item;
258  // }
259  ElementType result = 0;
260  for(auto& elem: _data) {
261  ElementType item = 1.;
262  ASSERT(elem.size() == _accessors.size());
263  for (size_t i = 0; i < elem.size(); ++i) {
264  auto tmp = _accessors[i](indices, elem[i].first.get());
265  item *= elem[i].second ? conj(tmp) : tmp;
266  }
267  result += item;
268  }
269  return result;
270  }
271 
272  OuterContainer::ElementType OuterContainer::value(IndexList::const_iterator first, IndexList::const_iterator last) const {
273  ASSERT(_indexing.checkValidIndices(first, last));
274  // ElementType result = 0;
275  // auto split = _indexing.splitIndices(first, last);
276  // for (auto& elem : _data) {
277  // ElementType item = 1.;
278  // auto its = split.begin();
279  // for (auto it = elem.begin(); it != elem.end(); ++it) {
280  // auto b = *its;
281  // ++its;
282  // auto e = *its;
283  // IndexList idx{b, e};
284  // auto tmp = it->first->element(idx);
285  // item *= it->second ? conj(tmp) : tmp;
286  // }
287  // result += item;
288  // }
289  // return result;
290  IndexList indices{first, last};
291  return value(indices);
292  }
293 
294  OuterContainer::ElementType OuterContainer::value(const vector<IndexList>& indices) const {
296  ElementType result = 0;
297  for (auto& elem : _data) {
298  auto it = elem.begin();
299  auto iti =indices.begin();
300  ElementType item = 1.;
301  for (; it != elem.end(); ++it, ++iti) {
302  auto tmp = it->first->element(*iti);
303  item *= it->second ? conj(tmp) : tmp;
304  }
305  result += item;
306  }
307  return result;
308  }
309 
310  size_t OuterContainer::numAddends() const {
311  return _data.size();
312  }
313 
314  size_t OuterContainer::rank() const {
315  return _indexing.rank();
316  }
317 
319  return _indexing.dims();
320  }
321 
323  return _indexing.labels();
324  }
325 
326  size_t OuterContainer::numValues() const {
327  return _indexing.numValues();
328  }
329 
331  return _indexing.labelIndex(label);
332  }
333 
334  IndexPairList OuterContainer::getSameLabelPairs(const IContainer& other,
335  const UniqueLabelsList& indices) const {
336  return _indexing.getSameLabelPairs(other.labels(), indices);
337  }
338 
341  }
342 
343  bool OuterContainer::isSameShape(const IContainer& other) const {
344  return _indexing.isSameLabelShape(other.labels(), other.dims());
345  }
346 
347  bool OuterContainer::canAddAt(const IContainer&, IndexLabel, IndexType) const {
348  return false;
349  }
350 
352  throw Error("Cannot access element of OuterContainer by reference");
353  }
354 
356  return value(coords);
357  }
358 
359  OuterContainer::reference OuterContainer::element(IndexList::const_iterator, IndexList::const_iterator) {
360  throw Error("Cannot access element of OuterContainer by reference");
361  }
362 
363  OuterContainer::ElementType OuterContainer::element(IndexList::const_iterator start, IndexList::const_iterator end) const {
364  return value(start, end);
365  }
366 
367  bool OuterContainer::isDataShared() const {
368  return _sharedData;
369  }
370 
371  size_t OuterContainer::dataSize() const {
372  map<IContainer*, size_t> occupancies;
373  for (auto& elem : _data) {
374  for (auto& elem2 : elem) {
375  auto it = occupancies.find(elem2.first.get());
376  if (it == occupancies.end()) {
377  occupancies.insert({elem2.first.get(), elem2.first->dataSize()});
378  }
379  }
380  }
381  return accumulate(occupancies.begin(), occupancies.end(), 0ul,
382  [](size_t var, const pair<IContainer*, size_t>& elem) -> size_t { return var + elem.second; });
383  }
384 
385  size_t OuterContainer::entrySize() const {
386  return sizeof(complex<double>);
387  }
388 
389  bool OuterContainer::shouldBeEvaluated() const {
390  if(_sharedData) return false;
391  map<IContainer*, size_t> occupancies;
392  double maxFillingFactor = 0.;
393  for(auto& elem: _data) {
394  double tmpFillingFactor = 1.;
395  for (auto& elem2 : elem) {
396  auto it = occupancies.find(elem2.first.get());
397  if (it == occupancies.end()) {
398  it = occupancies.insert({elem2.first.get(), elem2.first->dataSize()}).first;
399  }
400  tmpFillingFactor *= (static_cast<double>(it->second)) / (static_cast<double>(elem2.first->numValues() * elem2.first->entrySize()));
401  }
402  maxFillingFactor = max(maxFillingFactor, tmpFillingFactor);
403  }
404  size_t result = accumulate(occupancies.begin(), occupancies.end(), 0ul,
405  [](size_t var, const pair<IContainer*, size_t>& elem) -> size_t { return var + elem.second; });
406  return (maxFillingFactor * static_cast<double>(numValues() * (sizeof(complex<double>) + sizeof(PositionType))) < static_cast<double>(result));
407  }
408 
409  bool OuterContainer::compare(const IContainer& other) const {
410  const OuterContainer* tmp = dynamic_cast<const OuterContainer*>(&other);
411  if(tmp == nullptr) return false;
412  if(_sharedData != tmp->_sharedData) return false;
413  bool dimcheck = _indexing.isSameLabelShape(tmp->_indexing);
414  if(!dimcheck) return false;
415  if (_data.size() != tmp->_data.size())
416  return false;
417  if(_data.size() == 0) return true;
418  if(_data.begin()->size() != tmp->_data.begin()->size()) return false;
419  return equal(_data.begin(), _data.end(), tmp->_data.begin(),
420  [](const vector<pair<SharedTensorData, bool>>& a,
421  const vector<pair<SharedTensorData, bool>>& b) -> bool {
422  bool result = true;
423  auto ita = a.begin();
424  auto itb = b.begin();
425  for (; ita != a.end(); ++ita, ++itb) {
426  result &= (ita->second == itb->second) && ita->first->compare(*(itb->first));
427  }
428  return result;
429  });
430  }
431 
433  // always do a deep copy even if sharedData is true to avoid changes in contents downstream of dot if dot is semi-trivial.
434  // clone should not be triggered when sharedData is true for moving/copying around the outercontainer
435  // (i.e. make sure to use standard move/copy semantics)
436  OuterContainer* t = new OuterContainer{};
437  t->_indexing = _indexing;
438  map<IContainer*, SharedTensorData> others;
439  for (auto& elem : _data) {
440  for(auto& elem2: elem) {
441  if(others.find(elem2.first.get()) == others.end()) {
442  others.insert({elem2.first.get(), SharedTensorData{elem2.first->clone().release()}});
443  }
444  }
445  }
446  for(auto& elem: _data) {
447  vector<pair<SharedTensorData, bool>> tmp;
448  transform(elem.begin(), elem.end(), back_inserter(tmp),
449  [others](const pair<SharedTensorData, bool>& in) -> pair<SharedTensorData, bool> { return {others.find(in.first.get())->second, in.second}; });
450  t->_data.push_back(tmp);
451  }
452  t->_accessors = _accessors;
453  return TensorData{static_cast<IContainer*>(t)};
454  }
455 
456 
457 
458  void OuterContainer::clear() {
459  _data.clear();
460  }
461 
462  void OuterContainer::swapElement(IContainer* oldContainer, TensorData newContainer) {
463  SharedTensorData newshared{move(newContainer)};
464  for(auto& elem: _data) {
465  for(auto& elem2: elem) {
466  if (elem2.first.get() == oldContainer) {
467  elem2.first = newshared;
468  }
469  }
470  }
471  }
472 
473  set<IContainer*> OuterContainer::getUniquePtrs(size_t position, bool decoupleConjugates) {
474  map<IContainer*, bool> status;
475  set<SharedTensorData> toBeConjugate;
476  map<IContainer*, SharedTensorData> others;
477  set<IContainer*> result{};
478  ASSERT(position < _data.size());
479  auto& elem = _data[position];
480  if (!decoupleConjugates) {
481  for (auto& elem2 : elem) {
482  result.insert(elem2.first.get());
483  }
484  }
485  else {
486  for (auto it = elem.begin(); it != elem.end(); ++it) {
487  auto its = status.find(it->first.get());
488  if (its != status.end()) {
489  if (its->second != it->second) {
490  auto ito = others.find(it->first.get());
491  if (ito == others.end()) {
492  SharedTensorData newentry{it->first->clone()};
493  if (it->second) {
494  toBeConjugate.insert(newentry);
495  }
496  others.insert({it->first.get(), newentry});
497  it->first = newentry;
498  result.insert(newentry.get());
499  } else {
500  it->first = ito->second;
501  }
502  }
503  } else {
504  status.insert({it->first.get(), it->second});
505  result.insert(it->first.get());
506  if (it->second) {
507  toBeConjugate.insert(it->first);
508  }
509  }
510  }
511  for (auto& conjElem : toBeConjugate) {
512  conjElem->conjugate();
513  }
514  for (auto it = elem.begin(); it != elem.end(); ++it) {
515  it->second = false;
516  }
517  }
518  return result;
519  }
520 
521  set<IContainer*> OuterContainer::getUniquePtrs(bool decoupleConjugates) {
522  map<IContainer*, bool> status;
523  set<SharedTensorData> toBeConjugate;
524  map<IContainer*, SharedTensorData> others;
525  set<IContainer*> result{};
526  for (auto& elem : _data) {
527  if(!decoupleConjugates) {
528  for (auto& elem2 : elem) {
529  result.insert(elem2.first.get());
530  }
531  }
532  else {
533  for(auto it = elem.begin(); it != elem.end(); ++it) {
534  auto its = status.find(it->first.get());
535  if (its != status.end()) {
536  if(its->second != it->second) {
537  auto ito = others.find(it->first.get());
538  if(ito == others.end()) {
539  SharedTensorData newentry{it->first->clone()};
540  if(it->second) {
541  toBeConjugate.insert(newentry);
542  }
543  others.insert({it->first.get(), newentry});
544  it->first = newentry;
545  result.insert(newentry.get());
546  }
547  else {
548  it->first = ito->second;
549  }
550  }
551  }
552  else {
553  status.insert({it->first.get(), it->second});
554  result.insert(it->first.get());
555  if(it->second) {
556  toBeConjugate.insert(it->first);
557  }
558  }
559  }
560  for(auto& conjElem: toBeConjugate) {
561  conjElem->conjugate();
562  }
563  for(auto it = elem.begin(); it != elem.end(); ++it) {
564  it->second = false;
565  }
566  }
567  }
568  return result;
569  }
570 
572  if(_sharedData) {
573  MSG_WARNING("Scalar multiplication with shared data: forcing cloning");
574  auto tmp = this->clone();
575  OuterContainer* newouter = static_cast<OuterContainer*>(tmp.get());
576  _data.swap(newouter->_data);
577  _sharedData = false;
578  }
579  if(!_data.empty()) {
580  set<IContainer*> actualPtrs = getUniquePtrs();
581  size_t n = _data.begin()->size();
582  ElementType tmpvalue = pow(value, 1./static_cast<double>(n));
583  for (auto elem : actualPtrs) {
584  elem->operator*=(tmpvalue);
585  }
586  }
587  return static_cast<IContainer&>(*this);
588  }
589 
591  if (_sharedData) {
592  MSG_WARNING("Scalar multiplication with shared data: forcing cloning");
593  auto tmp = this->clone();
594  OuterContainer* newouter = static_cast<OuterContainer*>(tmp.get());
595  _data.swap(newouter->_data);
596  _sharedData = false;
597  }
598  if (!_data.empty()) {
599  std::set<IContainer*> actualPtrs = getUniquePtrs(!isZero(value.imag()));
600  size_t n = _data.begin()->size();
601  ElementType tmpvalue = pow(value, 1. / static_cast<double>(n));
602  for (auto elem : actualPtrs) {
603  elem->operator*=(tmpvalue);
604  }
605  }
606  return static_cast<IContainer&>(*this);
607  }
608 
610  for(auto& elem: _data) {
611  for(auto& elem2: elem) {
612  elem2.second = !elem2.second;
613  }
614  }
616  return static_cast<IContainer&>(*this);
617  }
618 
619  OuterContainer::SerialType OuterContainer::write(flatbuffers::FlatBufferBuilder* msgwriter) const {
620  vector<flatbuffers::Offset<Serial::FBSingleTensor>> tensors;
621  vector<flatbuffers::Offset<Serial::FBTensorListEntry>> entries;
622  entries.reserve(_data.size());
623  map<IContainer*, uint16_t> stored;
624  uint16_t nextId = 0;
625  for (auto& elem : _data) {
626  vector<uint16_t> values;
627  vector<bool> conjStatus;
628  values.reserve(elem.size());
629  conjStatus.reserve(elem.size());
630  for(auto it = elem.begin(); it != elem.end(); ++it) {
631  IContainer* ptr = it->first.get();
632  auto itstore = stored.find(ptr);
633  if(itstore == stored.end()) {
634  itstore = stored.insert(make_pair(ptr, nextId)).first;
635  ++nextId;
636  }
637  values.push_back(itstore->second);
638  conjStatus.push_back(it->second);
639  }
640  auto serialvals = msgwriter->CreateVector(values);
641  auto serialconj = msgwriter->CreateVector(conjStatus);
642  Serial::FBTensorListEntryBuilder serialentry{*msgwriter};
643  serialentry.add_idx(serialvals);
644  serialentry.add_conj(serialconj);
645  auto outEntry = serialentry.Finish();
646  entries.push_back(outEntry);
647  }
648  tensors.resize(stored.size());
649  for(auto& elem: stored) {
650  auto tmp = elem.first->write(msgwriter);
652  tensors[elem.second] = offt;
653  }
654  auto serialvals = msgwriter->CreateVector(tensors);
655  auto serialentries = msgwriter->CreateVector(entries);
656  Serial::FBTensorListBuilder serialtensor{*msgwriter};
657  serialtensor.add_vals(serialvals);
658  serialtensor.add_entries(serialentries);
659  auto out = serialtensor.Finish();
660  return make_pair(out.Union(), Serial::FBTensorTypes::FBTensorList);
661  }
662 
663  Log& OuterContainer::getLog() const {
664  return Log::getLog("Hammer.OuterContainer");
665  }
666 
667  const BlockIndexing& OuterContainer::getIndexing() const {
668  return _indexing;
669  }
670 
671  void OuterContainer::swap(DataType values) {
672  values.swap(_data);
673  }
674 
675  void OuterContainer::swapIndexing(BlockIndexing values) {
676  _indexing = values;
677  }
678 
679  void OuterContainer::reserve(size_t numTerms) {
680  size_t newCap = numTerms;
681  if(numTerms < _data.capacity()) {
682  newCap = max(numTerms * 10ul, 100ul);
683  }
684  }
685 
686  void OuterContainer::addTerm(vector<pair<SharedTensorData, bool>> tensorsAndConjFlags) {
687  if(tensorsAndConjFlags.size() == _indexing.numSubIndexing() || _indexing.numSubIndexing() == 0) {
688  _data.push_back(tensorsAndConjFlags);
689  }
690  else {
691  MSG_ERROR("Inconsistent term: " + to_string(tensorsAndConjFlags.size()) + " vs " + to_string(_indexing.numSubIndexing()) + " with " + to_string(_data.size()) + " terms. Not added.");
692  }
693  }
694 
695  bool OuterContainer::isOuterSquare() const {
696  return _data.size() == 1 && _data[0].size() == 2 && _data[0][0].first.get() == _data[0][1].first.get() && _data[0][0].second != _data[0][1].second;
697  }
698 
699  TensorData makeOuterSquare(const TensorData& base) {
700  auto tmp = base->clone();
701  auto result = new OuterContainer{move(tmp)};
702  return TensorData{static_cast<IContainer*>(result)};
703  }
704 
706  auto result = new OuterContainer{move(base)};
707  return TensorData{static_cast<IContainer*>(result)};
708  }
709 
710  TensorData combineTensors(const std::vector<TensorData>& base) {
711  if (base.size() == 0) {
712  return TensorData{new ScalarContainer()};
713  }
714  std::vector<TensorData> tmp;
715  tmp.reserve(base.size());
716  for(auto& elem: base) {
717  tmp.emplace_back(TensorData{elem->clone()});
718  }
719  return combineTensors(move(tmp));
720  }
721 
722  TensorData combineTensors(std::vector<TensorData>&& base) {
723  if(base.size() == 0) {
724  return TensorData{new ScalarContainer()};
725  }
726  auto result = new OuterContainer{move(base)};
727  return TensorData{static_cast<IContainer*>(result)};
728  }
729 
730  TensorData combineSharedTensors(std::vector<std::pair<SharedTensorData, bool>>&& data) {
731  if (data.size() == 0) {
732  return TensorData{new ScalarContainer()};
733  }
734  auto result = new OuterContainer{move(data)};
735  return TensorData{static_cast<IContainer*>(result)};
736  }
737 
738 
739 
740  } // namespace MultiDimensional
741 
742 } // namespace Hammer
IndexType labelIndex(IndexLabel label) const
void swapElement(IContainer *oldContainer, TensorData newContainer)
#define MSG_WARNING(x)
Definition: Logging.hh:366
reference element(const IndexList &coords={}) override
bool canAddAt(const IContainer &subContainer, IndexLabel coord, IndexType position) const override
void swapIndexing(BlockIndexing values)
std::pair< flatbuffers::Offset< void >, Serial::FBTensorTypes > SerialType
Definition: IContainer.hh:68
bool checkValidIndices(const IndexList &indices) const
check that the indices are within range for each component
IndexType labelToIndex(IndexLabel label) const override
Tensor storage re-optimization algorithm.
IndexPairList getSameLabelPairs(const LabelsList &otherLabels, const UniqueLabelsList &indices, bool sortedBySecond=true) const
returns the position of the indices in the two tensor (this and another) that can be contracted toget...
std::vector< IndexPair > IndexPairList
OuterContainer & operator=(const OuterContainer &other)
size_t PositionType
#define ASSERT(x)
Definition: Exceptions.hh:95
IndexList dims() const
get the dimensions of all the indices at once
Non-sparse tensor data container.
UniqueLabelsList spinIndices() const
returns only the labels corresponding to spin indices
Log & getLog() const
logging facility
bool isSameLabelShape(const LabelsList &otherLabels, const IndexList &otherIndices) const
OuterElemIterator::EntryType EntryType
bool compare(const IContainer &other) const override
Message logging routines.
std::complex< double > ElementType
Definition: IContainer.hh:34
uint16_t IndexType
SerialType write(flatbuffers::FlatBufferBuilder *msgwriter) const override
bool isSameShape(const IContainer &other) const override
std::unique_ptr< IContainer > TensorData
std::shared_ptr< IContainer > SharedTensorData
TensorData clone() const override
TensorData combineTensors(const std::vector< TensorData > &base)
static Log & getLog(const std::string &name)
Get a logger with the given name.
Definition: Logging.cc:139
(Sum of) Outer product tensor data container
Hammer exception definitions.
DataType::const_iterator const_iterator
Sparse tensor data container.
std::vector< IndexType > IndexList
Order-0 tensor data container.
TensorData makeOuterSquare(const TensorData &base)
IndexPairList getOppositeLabelPairs(const UniqueLabelsList &indices) const
returns the position of the indices that can be traced together, given a set of allowed index labels ...
IndexList dims() const override
IndexLabel
label identifiers of tensor indices they are used to determine which indices can be contracted togeth...
Definition: IndexLabels.hh:27
bool isZero(const std::complex< double > val)
Definition: Math/Utils.hh:25
void addTerm(std::vector< std::pair< SharedTensorData, bool >> tensorsAndConjFlags)
const BlockIndexing & getIndexing() const
size_t rank() const
rank of the tensor
TensorData combineSharedTensors(std::vector< std::pair< SharedTensorData, bool >> &&data)
std::vector< IndexLabel > LabelsList
PositionType numValues() const
the number of elements (product of all the dimensions)
std::set< IndexLabel > UniqueLabelsList
LabelsList labels() const
get the labels of all the indices at once
#define MSG_ERROR(x)
Definition: Logging.hh:367
bool shouldBeSparse(size_t fill, size_t total)
Definition: Optimize.hh:43
IndexPairList getSameLabelPairs(const IContainer &other, const UniqueLabelsList &indices) const override
IContainer & operator*=(double value) override
LabelsList labels() const override
auto end(reversion_wrapper< T > w)
Definition: Tools/Utils.hh:84
Serialization related typedefs and includes.
ElementType value(const IndexList &indices) const
LabelsList flipListOfLabels(LabelsList labels)
IndexPairList getSpinLabelPairs() const override
std::vector< std::function< ElementType(const IndexList &, IContainer *)> > _accessors
std::set< IContainer * > getUniquePtrs(bool decoupleConjugates=false)