33 namespace MultiDimensional {
35 OuterContainer::OuterContainer() : _sharedData{
false} { }
38 if (!other._sharedData) {
39 auto tmp = other.
clone();
48 _indexing = other._indexing;
49 _accessors = other._accessors;
56 auto tmp = other.
clone();
73 : _indexing{{left->dims(), right->dims()}, {left->labels(), right->labels()}}, _sharedData{
false} {
74 size_t pos = left->rank();
76 ASSERT(vec[0].first.get() !=
nullptr);
77 ASSERT(vec[1].first.get() !=
nullptr);
79 _accessors = {[pos](
const IndexList& list, IContainer* elem) -> ElementType {
return elem->element(list.begin(), list.begin()+pos); },
80 [pos](
const IndexList& list, IContainer* elem) -> ElementType {
return elem->element(list.begin()+pos, list.end()); }};
84 : _indexing{{toBeSquared->dims(), toBeSquared->dims()},
85 {toBeSquared->labels(),
86 conjugate ?
flipListOfLabels(toBeSquared->labels()) : toBeSquared->labels()}},
88 size_t pos = toBeSquared->rank();
90 ASSERT(elem.get() !=
nullptr);
91 vector<pair<SharedTensorData, bool>> vec{{elem,
false}, {elem, conjugate}};
93 _accessors = {[pos](
const IndexList& list, IContainer* item) -> ElementType {
return item->element(list.begin(), list.begin()+pos); },
94 [pos](
const IndexList& list, IContainer* item) -> ElementType {
return item->element(list.begin()+pos, list.end()); }};
98 vector<pair<SharedTensorData, bool>> vec;
99 std::vector<IndexList> dimensions;
100 std::vector<LabelsList> labels;
101 dimensions.reserve(group.size());
102 labels.reserve(group.size());
103 vec.reserve(group.size());
106 for(
auto& elem: group) {
107 finish += elem->rank();
108 _accessors.push_back([start,finish](
const IndexList& list, IContainer* item) -> ElementType {
return item->element(list.begin()+start, list.begin()+finish); });
111 for(
auto& elem: group) {
112 dimensions.push_back(elem->dims());
113 labels.push_back(elem->labels());
116 _data.emplace_back(vec);
117 _indexing = BlockIndexing{dimensions, labels};
121 : _indexing{{toBeSquared->dims(), toBeSquared->dims()},
122 {toBeSquared->labels(),
123 conjugate ?
flipListOfLabels(toBeSquared->labels()) : toBeSquared->labels()}},
125 vector<pair<SharedTensorData, bool>> vec{{toBeSquared,
false}, {toBeSquared, conjugate}};
126 _data.push_back(vec);
127 size_t pos = toBeSquared->rank();
128 _accessors = {[pos](
const IndexList& list, IContainer* item) -> ElementType {
return item->element(list.begin(), list.begin()+pos); },
129 [pos](
const IndexList& list, IContainer* item) -> ElementType {
return item->element(list.begin()+pos, list.end()); }};
133 : _sharedData{
true} {
134 std::vector<IndexList> dimensions;
135 std::vector<LabelsList> labels;
136 dimensions.reserve(data.size());
137 labels.reserve(data.size());
138 for (
size_t i = 0; i < data.size(); ++i) {
139 dimensions.push_back(data[i].first->dims());
140 if (data[i].second) {
143 labels.push_back(data[i].first->labels());
148 for(
auto& elem: data) {
149 finish += elem.first->rank();
150 _accessors.push_back([start,finish](
const IndexList& list, IContainer* item) -> ElementType {
return item->element(list.begin()+start, list.begin()+finish); });
153 _data.push_back(move(data));
154 _indexing = BlockIndexing{dimensions, labels};
176 if (input !=
nullptr) {
178 auto serialvals = input->vals();
179 auto serialentries = input->entries();
180 if (serialvals !=
nullptr && serialentries !=
nullptr) {
181 vector<SharedTensorData> allEntries;
182 allEntries.reserve(serialvals->size());
183 for (
unsigned int i = 0; i < serialvals->size(); ++i) {
184 size_t fills = serialvals->Get(i)->vals()->size();
186 for (
unsigned int j = 0; j < serialvals->Get(i)->dims()->size(); ++j) {
187 totals *= serialvals->Get(i)->dims()->Get(j);
191 allEntries.emplace_back(
SharedTensorData{
new SparseContainer{serialvals->Get(i)}});
193 allEntries.emplace_back(
SharedTensorData{
new VectorContainer{serialvals->Get(i)}});
196 _data.reserve(serialentries->size());
197 for(
unsigned int i = 0; i < serialentries->size(); ++i) {
198 auto serialitem = serialentries->Get(i);
199 vector<pair<SharedTensorData, bool>> vec;
200 vec.reserve(serialitem->idx()->size());
201 for (
unsigned int j = 0; j < serialitem->idx()->size(); ++j) {
202 vec.push_back({allEntries[serialitem->idx()->Get(j)], serialitem->conj()->Get(j)});
204 _data.push_back(vec);
208 if(
_data.size() > 0) {
209 auto entry =
_data.begin();
210 vector<LabelsList> tmplabels;
211 vector<IndexList> tmpdims;
212 tmplabels.reserve(entry->size());
213 tmpdims.reserve(entry->size());
216 for(
auto it = entry->begin(); it != entry->end(); ++it) {
217 tmplabels.push_back(((it->second) ?
flipListOfLabels((it->first)->labels()) : (it->first)->labels()));
218 tmpdims.push_back((it->first)->dims());
219 finish += it->first->rank();
223 _indexing = BlockIndexing{tmpdims, tmplabels};
230 return _data.begin();
234 return _data.begin();
260 for(
auto& elem:
_data) {
263 for (
size_t i = 0; i < elem.size(); ++i) {
264 auto tmp =
_accessors[i](indices, elem[i].first.get());
265 item *= elem[i].second ? conj(tmp) : tmp;
291 return value(indices);
297 for (
auto& elem : _data) {
298 auto it = elem.begin();
299 auto iti =indices.begin();
301 for (; it != elem.end(); ++it, ++iti) {
302 auto tmp = it->first->element(*iti);
303 item *= it->second ? conj(tmp) : tmp;
352 throw Error(
"Cannot access element of OuterContainer by reference");
356 return value(coords);
360 throw Error(
"Cannot access element of OuterContainer by reference");
364 return value(start, end);
372 map<IContainer*, size_t> occupancies;
373 for (
auto& elem : _data) {
374 for (
auto& elem2 : elem) {
375 auto it = occupancies.find(elem2.first.get());
376 if (it == occupancies.end()) {
377 occupancies.insert({elem2.first.get(), elem2.first->dataSize()});
381 return accumulate(occupancies.begin(), occupancies.end(), 0ul,
382 [](
size_t var,
const pair<IContainer*, size_t>& elem) ->
size_t {
return var + elem.second; });
386 return sizeof(complex<double>);
391 map<IContainer*, size_t> occupancies;
392 double maxFillingFactor = 0.;
393 for(
auto& elem: _data) {
394 double tmpFillingFactor = 1.;
395 for (
auto& elem2 : elem) {
396 auto it = occupancies.find(elem2.first.get());
397 if (it == occupancies.end()) {
398 it = occupancies.insert({elem2.first.get(), elem2.first->dataSize()}).first;
400 tmpFillingFactor *= (
static_cast<double>(it->second)) / (
static_cast<double>(elem2.first->numValues() * elem2.first->entrySize()));
402 maxFillingFactor = max(maxFillingFactor, tmpFillingFactor);
404 size_t result = accumulate(occupancies.begin(), occupancies.end(), 0ul,
405 [](
size_t var,
const pair<IContainer*, size_t>& elem) ->
size_t {
return var + elem.second; });
406 return (maxFillingFactor * static_cast<double>(
numValues() * (
sizeof(complex<double>) +
sizeof(
PositionType))) <
static_cast<double>(result));
411 if(tmp ==
nullptr)
return false;
414 if(!dimcheck)
return false;
415 if (_data.size() != tmp->_data.size())
417 if(_data.size() == 0)
return true;
418 if(_data.begin()->size() != tmp->_data.begin()->size())
return false;
419 return equal(_data.begin(), _data.end(), tmp->_data.begin(),
420 [](
const vector<pair<SharedTensorData, bool>>& a,
421 const vector<pair<SharedTensorData, bool>>& b) ->
bool {
423 auto ita = a.begin();
424 auto itb = b.begin();
425 for (; ita != a.end(); ++ita, ++itb) {
426 result &= (ita->second == itb->second) && ita->first->compare(*(itb->first));
438 map<IContainer*, SharedTensorData> others;
439 for (
auto& elem : _data) {
440 for(
auto& elem2: elem) {
441 if(others.find(elem2.first.get()) == others.end()) {
442 others.insert({elem2.first.get(),
SharedTensorData{elem2.first->clone().release()}});
446 for(
auto& elem: _data) {
447 vector<pair<SharedTensorData, bool>> tmp;
448 transform(elem.begin(), elem.end(), back_inserter(tmp),
449 [others](
const pair<SharedTensorData, bool>& in) -> pair<SharedTensorData, bool> {
return {others.find(in.first.get())->second, in.second}; });
450 t->_data.push_back(tmp);
464 for(
auto& elem: _data) {
465 for(
auto& elem2: elem) {
466 if (elem2.first.get() == oldContainer) {
467 elem2.first = newshared;
474 map<IContainer*, bool> status;
475 set<SharedTensorData> toBeConjugate;
476 map<IContainer*, SharedTensorData> others;
477 set<IContainer*> result{};
478 ASSERT(position < _data.size());
479 auto& elem = _data[position];
480 if (!decoupleConjugates) {
481 for (
auto& elem2 : elem) {
482 result.insert(elem2.first.get());
486 for (
auto it = elem.begin(); it != elem.end(); ++it) {
487 auto its = status.find(it->first.get());
488 if (its != status.end()) {
489 if (its->second != it->second) {
490 auto ito = others.find(it->first.get());
491 if (ito == others.end()) {
494 toBeConjugate.insert(newentry);
496 others.insert({it->first.get(), newentry});
497 it->first = newentry;
498 result.insert(newentry.get());
500 it->first = ito->second;
504 status.insert({it->first.get(), it->second});
505 result.insert(it->first.get());
507 toBeConjugate.insert(it->first);
511 for (
auto& conjElem : toBeConjugate) {
512 conjElem->conjugate();
514 for (
auto it = elem.begin(); it != elem.end(); ++it) {
522 map<IContainer*, bool> status;
523 set<SharedTensorData> toBeConjugate;
524 map<IContainer*, SharedTensorData> others;
525 set<IContainer*> result{};
526 for (
auto& elem : _data) {
527 if(!decoupleConjugates) {
528 for (
auto& elem2 : elem) {
529 result.insert(elem2.first.get());
533 for(
auto it = elem.begin(); it != elem.end(); ++it) {
534 auto its = status.find(it->first.get());
535 if (its != status.end()) {
536 if(its->second != it->second) {
537 auto ito = others.find(it->first.get());
538 if(ito == others.end()) {
541 toBeConjugate.insert(newentry);
543 others.insert({it->first.get(), newentry});
544 it->first = newentry;
545 result.insert(newentry.get());
548 it->first = ito->second;
553 status.insert({it->first.get(), it->second});
554 result.insert(it->first.get());
556 toBeConjugate.insert(it->first);
560 for(
auto& conjElem: toBeConjugate) {
561 conjElem->conjugate();
563 for(
auto it = elem.begin(); it != elem.end(); ++it) {
573 MSG_WARNING(
"Scalar multiplication with shared data: forcing cloning");
574 auto tmp = this->
clone();
576 _data.swap(newouter->_data);
581 size_t n = _data.begin()->size();
582 ElementType tmpvalue = pow(value, 1./static_cast<double>(n));
583 for (
auto elem : actualPtrs) {
584 elem->operator*=(tmpvalue);
592 MSG_WARNING(
"Scalar multiplication with shared data: forcing cloning");
593 auto tmp = this->
clone();
595 _data.swap(newouter->_data);
598 if (!_data.empty()) {
600 size_t n = _data.begin()->size();
601 ElementType tmpvalue = pow(value, 1. / static_cast<double>(n));
602 for (
auto elem : actualPtrs) {
603 elem->operator*=(tmpvalue);
610 for(
auto& elem: _data) {
611 for(
auto& elem2: elem) {
612 elem2.second = !elem2.second;
620 vector<flatbuffers::Offset<Serial::FBSingleTensor>> tensors;
621 vector<flatbuffers::Offset<Serial::FBTensorListEntry>> entries;
622 entries.reserve(_data.size());
623 map<IContainer*, uint16_t> stored;
625 for (
auto& elem : _data) {
626 vector<uint16_t> values;
627 vector<bool> conjStatus;
628 values.reserve(elem.size());
629 conjStatus.reserve(elem.size());
630 for(
auto it = elem.begin(); it != elem.end(); ++it) {
632 auto itstore = stored.find(ptr);
633 if(itstore == stored.end()) {
634 itstore = stored.insert(make_pair(ptr, nextId)).first;
637 values.push_back(itstore->second);
638 conjStatus.push_back(it->second);
640 auto serialvals = msgwriter->CreateVector(values);
641 auto serialconj = msgwriter->CreateVector(conjStatus);
642 Serial::FBTensorListEntryBuilder serialentry{*msgwriter};
643 serialentry.add_idx(serialvals);
644 serialentry.add_conj(serialconj);
645 auto outEntry = serialentry.Finish();
646 entries.push_back(outEntry);
648 tensors.resize(stored.size());
649 for(
auto& elem: stored) {
650 auto tmp = elem.first->write(msgwriter);
652 tensors[elem.second] = offt;
654 auto serialvals = msgwriter->CreateVector(tensors);
655 auto serialentries = msgwriter->CreateVector(entries);
656 Serial::FBTensorListBuilder serialtensor{*msgwriter};
657 serialtensor.add_vals(serialvals);
658 serialtensor.add_entries(serialentries);
659 auto out = serialtensor.Finish();
660 return make_pair(out.Union(), Serial::FBTensorTypes::FBTensorList);
680 size_t newCap = numTerms;
681 if(numTerms < _data.capacity()) {
682 newCap = max(numTerms * 10ul, 100ul);
688 _data.push_back(tensorsAndConjFlags);
691 MSG_ERROR(
"Inconsistent term: " + to_string(tensorsAndConjFlags.size()) +
" vs " + to_string(
_indexing.
numSubIndexing()) +
" with " + to_string(_data.size()) +
" terms. Not added.");
696 return _data.size() == 1 && _data[0].size() == 2 && _data[0][0].first.get() == _data[0][1].first.get() && _data[0][0].second != _data[0][1].second;
700 auto tmp = base->clone();
711 if (base.size() == 0) {
714 std::vector<TensorData> tmp;
715 tmp.reserve(base.size());
716 for(
auto& elem: base) {
723 if(base.size() == 0) {
731 if (data.size() == 0) {
IndexType labelIndex(IndexLabel label) const
size_t numAddends() const
void swapElement(IContainer *oldContainer, TensorData newContainer)
reference element(const IndexList &coords={}) override
bool canAddAt(const IContainer &subContainer, IndexLabel coord, IndexType position) const override
void swapIndexing(BlockIndexing values)
bool shouldBeEvaluated() const
std::pair< flatbuffers::Offset< void >, Serial::FBTensorTypes > SerialType
bool checkValidIndices(const IndexList &indices) const
check that the indices are within range for each component
IndexType labelToIndex(IndexLabel label) const override
Tensor storage re-optimization algorithm.
IndexPairList getSameLabelPairs(const LabelsList &otherLabels, const UniqueLabelsList &indices, bool sortedBySecond=true) const
returns the position of the indices in the two tensor (this and another) that can be contracted toget...
std::vector< IndexPair > IndexPairList
OuterContainer & operator=(const OuterContainer &other)
size_t entrySize() const override
IndexList dims() const
get the dimensions of all the indices at once
Non-sparse tensor data container.
UniqueLabelsList spinIndices() const
returns only the labels corresponding to spin indices
Log & getLog() const
logging facility
bool isSameLabelShape(const LabelsList &otherLabels, const IndexList &otherIndices) const
OuterElemIterator::EntryType EntryType
bool compare(const IContainer &other) const override
Message logging routines.
std::complex< double > ElementType
size_t rank() const override
DataType::iterator iterator
SerialType write(flatbuffers::FlatBufferBuilder *msgwriter) const override
bool isSameShape(const IContainer &other) const override
std::unique_ptr< IContainer > TensorData
std::shared_ptr< IContainer > SharedTensorData
void reserve(size_t numTerms)
TensorData clone() const override
TensorData combineTensors(const std::vector< TensorData > &base)
size_t numSubIndexing() const
static Log & getLog(const std::string &name)
Get a logger with the given name.
std::vector< EntryType > DataType
(Sum of) Outer product tensor data container
Hammer exception definitions.
size_t dataSize() const override
DataType::const_iterator const_iterator
Sparse tensor data container.
std::vector< IndexType > IndexList
Order-0 tensor data container.
TensorData makeOuterSquare(const TensorData &base)
IndexPairList getOppositeLabelPairs(const UniqueLabelsList &indices) const
returns the position of the indices that can be traced together, given a set of allowed index labels ...
bool isOuterSquare() const
void swap(DataType values)
IndexList dims() const override
IndexLabel
label identifiers of tensor indices they are used to determine which indices can be contracted togeth...
bool isZero(const std::complex< double > val)
void addTerm(std::vector< std::pair< SharedTensorData, bool >> tensorsAndConjFlags)
const BlockIndexing & getIndexing() const
size_t rank() const
rank of the tensor
TensorData combineSharedTensors(std::vector< std::pair< SharedTensorData, bool >> &&data)
std::vector< IndexLabel > LabelsList
PositionType numValues() const
the number of elements (product of all the dimensions)
std::set< IndexLabel > UniqueLabelsList
LabelsList labels() const
get the labels of all the indices at once
bool shouldBeSparse(size_t fill, size_t total)
IndexPairList getSameLabelPairs(const IContainer &other, const UniqueLabelsList &indices) const override
IContainer & operator*=(double value) override
size_t numValues() const override
bool isDataShared() const
LabelsList labels() const override
IContainer & conjugate() override
auto end(reversion_wrapper< T > w)
Serialization related typedefs and includes.
ElementType value(const IndexList &indices) const
LabelsList flipListOfLabels(LabelsList labels)
IndexPairList getSpinLabelPairs() const override
std::vector< std::function< ElementType(const IndexList &, IContainer *)> > _accessors
std::set< IContainer * > getUniquePtrs(bool decoupleConjugates=false)