15 #include <type_traits>
18 #include <boost/functional/hash.hpp>
40 namespace MultiDimensional {
42 using VTensor = VectorContainer;
43 using STensor = SparseContainer;
45 using Base = IContainer;
49 Dot::Dot(
const IndexPairList& indices, pair<bool, bool> shouldHC) : _indices{indices}, _hc{shouldHC} {
52 for(
auto& elem: _indices) {
53 _idxLeft.insert(elem.first);
54 _idxRight.insert(elem.second);
62 if (newdimlabs.first.size() == 0) {
64 for (
size_t i = 0; i < a.
numValues(); ++i) {
69 auto pospairs = a.
getIndexing().splitPosition(i, stridesA);
71 newscal->
element({}) += firstTerm * secondTerm;
73 return newscal.release();
76 auto stridesB = b.
getIndexing().getOuterStrides2nd(_indices);
79 for (
size_t i = 0; i < a.
numValues(); ++i) {
84 auto pospairs = a.
getIndexing().splitPosition(i, stridesA);
85 for (
size_t j = 0; j < reduced; ++j) {
86 auto posB = b.
getIndexing().build2ndPosition(j, pospairs.second, stridesB);
88 (*result)[pospairs.first * reduced + j] += firstTerm * secondTerm;
91 return static_cast<Base*
>(result);
99 IndexList inners((a.
dims().size() + b.
dims().size() - newdimlabs.first.size()) / 2);
100 vector<bool> innerAdds(inners.size(),
false);
101 if (newdimlabs.first.size() == 0) {
103 for (
auto& elemL : a) {
104 a.getIndexing().splitPosition(elemL.first, get<0>(leftinfo), get<1>(leftinfo),
107 for (
auto& elemR : b) {
108 auto tmpRight = b.getIndexing().splitPosition(elemR.first, get<0>(rightinfo),
109 get<1>(rightinfo), inners, innerAdds,
true);
110 if (tmpRight == numeric_limits<size_t>::max())
113 newscal->element({}) += firstTerm * secondTerm;
116 return newscal.release();
120 for (
auto& elemL : a) {
121 auto tmpLeft = a.
getIndexing().splitPosition(elemL.first, get<0>(leftinfo), get<1>(leftinfo),
124 for (
auto& elemR : b) {
125 auto tmpRight = b.getIndexing().splitPosition(elemR.first, get<0>(rightinfo),
126 get<1>(rightinfo), inners, innerAdds,
true);
127 if (tmpRight == numeric_limits<size_t>::max())
130 (*result)[tmpLeft * get<2>(rightinfo) + tmpRight] += firstTerm * secondTerm;
133 return static_cast<Base*
>(result);
139 IndexList inners((a.
dims().size() + b.
dims().size() - newdimlabs.first.size()) / 2);
140 vector<bool> innerAdds(inners.size(),
false);
142 if (newdimlabs.first.size() == 0) {
145 for (
auto& elemL : a) {
146 a.getIndexing().splitPosition(elemL.first, get<0>(leftinfo),
147 get<1>(leftinfo), inners, innerAdds);
150 newscal->element({}) += firstTerm * secondTerm;
153 for (
auto& elemL : a) {
154 a.getIndexing().splitPosition(elemL.first, get<0>(leftinfo),
155 get<1>(leftinfo), inners, innerAdds);
158 newscal->
element({}) += firstTerm * secondTerm;
161 return newscal.release();
165 if(b.
rank() == _indices.size()) {
167 for (
auto& elemL : a) {
168 auto tmpLeft = a.
getIndexing().splitPosition(elemL.first, get<0>(leftinfo), get<1>(leftinfo),
173 (*result)[tmpLeft] += firstTerm * secondTerm;
177 for (
auto& elemL : a) {
178 auto tmpLeft = a.getIndexing().splitPosition(elemL.first, get<0>(leftinfo), get<1>(leftinfo),
182 (*result)[tmpLeft] += firstTerm * secondTerm;
190 for (
auto& elemL : a) {
191 auto tmpLeft = a.getIndexing().splitPosition(elemL.first, get<0>(leftinfo),
192 get<1>(leftinfo), inners, innerAdds);
195 for(; *itB != *itBe; itB->next()) {
196 PositionType alPos = fakeB.posToAlignedPos(itB->position());
197 auto tmpRight = fakeB.splitPosition(
198 alPos, get<0>(rightinfo), get<1>(rightinfo), inners, innerAdds,
true);
199 if (tmpRight == numeric_limits<size_t>::max())
202 (*result)[tmpLeft * get<2>(rightinfo) + tmpRight] += firstTerm * secondTerm;
206 return static_cast<Base*
>(result);
217 return {
false,
false};
227 for(
auto& elemA: a) {
228 for(
auto& elemB: b) {
230 vector<pair<size_t,size_t>> multiplicities(leftinfo.size());
231 vector<bool> used(leftinfo.size(),
false);
232 for(
size_t i=0; i < leftinfo.size(); ++i) {
233 if(used[i])
continue;
237 for (
size_t j = i + 1; j < leftinfo.size(); ++j) {
238 auto tmp =
isSameDot(elemA, elemB, chunks[i+1], chunks[j+1]);
249 multiplicities[i] = {count, countHc};
253 for (
size_t i = 0; i < leftinfo.size(); ++i) {
254 if (multiplicities[i].first + multiplicities[i].second == 0)
258 leftTensors.reserve(get<0>(chunks[i + 1]).size());
259 transform(get<0>(chunks[i + 1]).
begin(), get<0>(chunks[i + 1]).
end(), back_inserter(leftTensors),
260 [&](
IndexType idx) ->
const pair<SharedTensorData, bool>& {
return elemA[idx]; });
262 rightTensors.reserve(get<1>(chunks[i + 1]).size());
263 transform(get<1>(chunks[i + 1]).
begin(), get<1>(chunks[i + 1]).
end(), back_inserter(rightTensors),
264 [&](
IndexType idx) ->
const pair<SharedTensorData, bool>& {
return elemB[idx]; });
265 IndexList inners(get<2>(chunks[i + 1]).size());
266 vector<bool> innerAdds(inners.size(),
false);
267 auto newdimlabs =
getNewIndexLabels(a.getIndexing(), b.getIndexing(), chunks[i + 1]);
270 size_t totalRankB = accumulate(rightTensors.begin(), rightTensors.end(), 0ul, [](
PositionType tot,
const pair<SharedTensorData, bool>& elem) ->
PositionType {
return tot + elem.first->rank(); });
271 if(totalRankB == inners.size()) {
272 IndexList::iterator itP1, itP2;
273 if (newdimlabs.first.size() == 0) {
275 for (; itA != itAEnd; ++itA) {
276 itP1 = inners.
begin();
277 a.getIndexing().splitPosition(itA, chunks[i + 1], get<0>(leftinfo[i]),
278 get<1>(leftinfo[i]), inners, innerAdds);
280 for(
auto& entry: rightTensors) {
281 itP2 = itP1 +
static_cast<ptrdiff_t
>(entry.first->rank());
283 ? conj(entry.first->element(itP1, itP2))
284 : entry.first->element(itP1, itP2);
285 firstTerm *= secondTerm;
291 newscal += firstTerm;
294 pow(newscal, multiplicities[i].first) * pow(conj(newscal), multiplicities[i].second);
298 for (; itA != itAEnd; ++itA) {
299 itP1 = inners.
begin();
301 a.getIndexing().splitPosition(itA, chunks[i + 1], get<0>(leftinfo[i]),
302 get<1>(leftinfo[i]), inners, innerAdds);
304 for (
auto& entry : rightTensors) {
305 itP2 = itP1 +
static_cast<ptrdiff_t
>(entry.first->rank());
307 ? conj(entry.first->element(itP1, itP2))
308 : entry.first->element(itP1, itP2);
309 firstTerm *= secondTerm;
315 (*result)[tmpLeft] += firstTerm;
318 currentTerm.insert(currentTerm.end(), multiplicities[i].first, {tmpShared,
false});
319 currentTerm.insert(currentTerm.end(), multiplicities[i].second, {tmpShared,
true});
324 if (newdimlabs.first.size() == 0) {
326 for (; itA != itAEnd; ++itA) {
327 a.getIndexing().splitPosition(itA, chunks[i + 1], get<0>(leftinfo[i]),
328 get<1>(leftinfo[i]), inners, innerAdds);
331 for (; itB != itBEnd; ++itB) {
333 itB, chunks[i + 1], get<0>(rightinfo[i]), get<1>(rightinfo[i]), inners,
335 if (tmpRight == numeric_limits<size_t>::max())
338 newscal += firstTerm * secondTerm;
341 currentWeight *= pow(newscal, multiplicities[i].first) *
342 pow(conj(newscal), multiplicities[i].second);
346 for (; itA != itAEnd; ++itA) {
348 a.
getIndexing().splitPosition(itA, chunks[i + 1], get<0>(leftinfo[i]),
349 get<1>(leftinfo[i]), inners, innerAdds);
352 for (; itB != itBEnd; ++itB) {
354 itB, chunks[i + 1], get<0>(rightinfo[i]), get<1>(rightinfo[i]), inners,
356 if (tmpRight == numeric_limits<size_t>::max())
359 (*result)[tmpLeft * get<2>(rightinfo[i]) + tmpRight] +=
360 firstTerm * secondTerm;
364 currentTerm.insert(currentTerm.end(), multiplicities[i].first, {tmpShared,
false});
365 currentTerm.insert(currentTerm.end(), multiplicities[i].second, {tmpShared,
true});
370 for(
auto elem: get<0>(chunks[0])) {
371 currentTerm.insert(currentTerm.end(), {elemA[elem].first, !elemA[elem].second != !
_hc.first});
373 for(
auto elem: get<1>(chunks[0])) {
374 currentTerm.insert(currentTerm.end(), {elemB[elem].first, !elemB[elem].second != !
_hc.second});
376 if(currentTerm.size() == 0) {
377 if(fullResult.get() ==
nullptr) {
378 fullResult =
makeScalar(currentWeight.real());
381 fullResult->element({}) += (currentWeight.real());
384 else if(currentTerm.size() == 1) {
385 TensorData out = currentTerm[0].first->clone();
386 if(currentTerm[0].second) {
389 if (!
isZero(currentWeight - 1.)) {
390 out->operator*=(currentWeight);
392 if(fullResult.get() ==
nullptr) {
393 fullResult = move(out);
397 fullResult =
calc2(move(fullResult), *out, summer,
"sum_outerdot");
401 if (!
isZero(currentWeight - 1.)) {
402 auto temp = currentTerm.back();
403 currentTerm.pop_back();
404 auto candNew = temp.first->clone();
405 candNew->operator*=(temp.second ? conj(currentWeight) : currentWeight);
408 if (oResult !=
nullptr) {
412 oResult =
static_cast<OTensor*
>(fullResult.get());
417 return static_cast<Base*
>(fullResult.release());
422 ASSERT(get<1>(chunks[0]).size() ==0);
423 ASSERT(get<2>(chunks[0]).size() == 0);
424 ASSERT(get<1>(chunks[1]).size() == 0);
429 ASSERT(leftinfo.size() == 1);
430 for (
auto& elemA : a) {
434 leftTensors.reserve(get<0>(chunks[1]).size());
435 transform(get<0>(chunks[1]).
begin(), get<0>(chunks[1]).
end(),
436 back_inserter(leftTensors),
437 [&](
IndexType idx) ->
const pair<SharedTensorData, bool>& {
return elemA[idx]; });
438 IndexList inners(get<2>(chunks[1]).size());
439 vector<bool> innerAdds(inners.size(),
false);
443 if (newdimlabs.first.size() == 0) {
445 for (; itA != itAEnd; ++itA) {
446 a.getIndexing().splitPosition(itA, chunks[1], get<0>(leftinfo[0]),
447 get<1>(leftinfo[0]), inners, innerAdds);
449 for (
auto& elemR : b) {
450 auto tmpRight = b.getIndexing().splitPosition(
451 elemR.first, get<0>(rightinfo), get<1>(rightinfo), inners, innerAdds,
true);
452 if (tmpRight == numeric_limits<size_t>::max())
455 newscal += firstTerm * secondTerm;
458 currentWeight *= newscal;
462 for (; itA != itAEnd; ++itA) {
464 itA, chunks[1], get<0>(leftinfo[0]), get<1>(leftinfo[0]), inners, innerAdds);
466 for (
auto& elemR : b) {
467 auto tmpRight = b.getIndexing().splitPosition(
468 elemR.first, get<0>(rightinfo), get<1>(rightinfo), inners, innerAdds,
true);
469 if (tmpRight == numeric_limits<size_t>::max())
472 (*result)[tmpLeft * get<2>(rightinfo) + tmpRight] += firstTerm * secondTerm;
476 currentTerm.push_back({tmpShared,
false});
478 for (
auto elem : get<0>(chunks[0])) {
479 currentTerm.insert(currentTerm.end(), {elemA[elem].first, !elemA[elem].second != !
_hc.first});
481 if (currentTerm.size() == 0) {
482 if (fullResult.get() ==
nullptr) {
483 fullResult =
makeScalar(currentWeight.real());
485 fullResult->element({}) += (currentWeight.real());
487 }
else if (currentTerm.size() == 1) {
488 TensorData out = currentTerm[0].first->clone();
489 if (currentTerm[0].second) {
492 if (!
isZero(currentWeight - 1.)) {
493 out->operator*=(currentWeight);
495 if (fullResult.get() ==
nullptr) {
496 fullResult = move(out);
499 fullResult =
calc2(move(fullResult), *out, summer,
"sum_outerdot");
502 if (!
isZero(currentWeight - 1.)) {
503 auto temp = currentTerm.back();
504 currentTerm.pop_back();
505 auto candNew = temp.first->clone();
506 candNew->operator*=(temp.second ? conj(currentWeight) : currentWeight);
509 if (oResult !=
nullptr) {
513 oResult =
static_cast<OTensor*
>(fullResult.get());
517 return static_cast<Base*
>(fullResult.release());
524 map<IndexType, IndexPairList, greater<IndexType>> edges;
525 for(
auto& elem: _indices) {
527 edges[keyvalA.first].push_back({keyvalA.second, elem.second});
530 for(
auto& elem: edges) {
531 for(
auto& elem2: elem.second) {
532 elem2.second =
static_cast<IndexType>(elem2.second + tmpshift);
539 if(newdimlabs.first.size() == 0) {
548 for (
auto& elem : a) {
550 for (
auto& elem2 : edges) {
552 Ops::Dot dotter{elem2.second, {elem[elem2.first].second,
false}};
555 current_entry =
calc2(elem[elem2.first].first, *current_entry, dotter,
"dot_outerdot");
558 current_entry =
calc2(elem[elem2.first].first, b, dotter,
"dot_outerdot");
561 if(result->rank() == 0) {
562 result->element({}) += current_entry->element({});
565 result =
calc2(move(result), *current_entry, summer,
"sum_outerdot");
568 return result.release();
572 newdata.reserve(a.numAddends());
573 vector<pair<SharedTensorData, bool>> current_entries(a.getIndexing().numSubIndexing() - edges.size() + 1);
575 size_t dotIdx = current_entries.size();
576 size_t curInIdx = elem.size() - 1;
577 size_t curOutIdx = current_entries.size() - 1;
578 for (
auto& elem2 : edges) {
579 if(elem2.first != curInIdx) {
581 for(; curInIdx != elem2.first; --curInIdx, --curOutIdx) {
582 current_entries[curOutIdx] = elem[curInIdx];
587 Ops::Dot dotter{elem2.second, {elem[curInIdx].second,
false}};
588 if (dotIdx < current_entries.size()) {
589 current_entries[dotIdx].first =
590 calc2(elem[curInIdx].first, *(current_entries[dotIdx].first), dotter,
"dot_outerdot");
593 dotIdx = curOutIdx--;
594 current_entries[dotIdx].first =
calc2(elem[curInIdx].first, b, dotter,
"dot_outerdot");
595 current_entries[dotIdx].second =
false;
598 newdata.push_back(current_entries);
600 vector<IndexList> dimlist;
601 vector<LabelsList> lablist;
602 for (
auto elem : newdata[0]) {
603 dimlist.push_back(elem.first->dims());
604 lablist.push_back(elem.first->labels());
607 a.swapIndexing(BlockIndexing{dimlist, lablist});
608 return static_cast<Base*
>(&a);
618 ASSERT(rightinfo.size() == 1);
619 for (
auto& elemB : b) {
623 rightTensors.reserve(get<1>(chunks[1]).size());
624 transform(get<1>(chunks[1]).
begin(), get<1>(chunks[1]).
end(),
625 back_inserter(rightTensors),
626 [&](
IndexType idx) ->
const pair<SharedTensorData, bool>& {
return elemB[idx]; });
627 IndexList inners(get<2>(chunks[1]).size());
628 vector<bool> innerAdds(inners.size(),
false);
629 auto newdimlabs = getNewIndexLabels(a.
getIndexing(), b.getIndexing(), chunks[1]);
630 size_t totalRankB = accumulate(rightTensors.begin(), rightTensors.end(), 0ul, [](
PositionType tot,
const pair<SharedTensorData, bool>& elem) ->
PositionType {
return tot + elem.first->rank(); });
631 if (totalRankB == inners.size()) {
632 IndexList::iterator itP1, itP2;
633 if (newdimlabs.first.size() == 0) {
635 for (
auto& elemL : a) {
636 itP1 = inners.begin();
637 a.getIndexing().splitPosition(elemL.first, get<0>(leftinfo), get<1>(leftinfo),
640 for (
auto& entry : rightTensors) {
641 itP2 = itP1 +
static_cast<ptrdiff_t
>(entry.first->rank());
643 ? conj(entry.first->element(itP1, itP2))
644 : entry.first->element(itP1, itP2);
645 firstTerm *= secondTerm;
651 newscal += firstTerm;
653 currentWeight *= newscal;
657 for (
auto& elemL : a) {
658 itP1 = inners.
begin();
659 auto tmpLeft = a.getIndexing().splitPosition(elemL.first, get<0>(leftinfo),
660 get<1>(leftinfo), inners, innerAdds);
662 for (
auto& entry : rightTensors) {
663 itP2 = itP1 +
static_cast<ptrdiff_t
>(entry.first->rank());
665 ? conj(entry.first->element(itP1, itP2))
666 : entry.first->element(itP1, itP2);
667 firstTerm *= secondTerm;
673 (*result)[tmpLeft] += firstTerm;
676 currentTerm.push_back({tmpShared,
false});
681 if (newdimlabs.first.size() == 0) {
683 for (
auto& elemL : a) {
684 a.getIndexing().splitPosition(elemL.first, get<0>(leftinfo), get<1>(leftinfo),
688 for (; itB != itBEnd; ++itB) {
690 itB, chunks[1], get<0>(rightinfo[0]), get<1>(rightinfo[0]), inners,
692 if (tmpRight == numeric_limits<size_t>::max())
695 newscal += firstTerm * secondTerm;
698 currentWeight *= newscal;
702 for (
auto& elemL : a) {
703 auto tmpLeft = a.
getIndexing().splitPosition(elemL.first, get<0>(leftinfo),
704 get<1>(leftinfo), inners, innerAdds);
707 for (; itB != itBEnd; ++itB) {
709 itB, chunks[1], get<0>(rightinfo[0]), get<1>(rightinfo[0]), inners,
711 if (tmpRight == numeric_limits<size_t>::max())
714 (*result)[tmpLeft * get<2>(rightinfo[0]) + tmpRight] += firstTerm * secondTerm;
718 currentTerm.push_back({tmpShared,
false});
721 for (
auto elem : get<1>(chunks[0])) {
722 currentTerm.insert(currentTerm.end(), {elemB[elem].first, !elemB[elem].second != !_hc.second});
724 if(currentTerm.size() == 0) {
725 if(fullResult.get() ==
nullptr) {
726 fullResult =
makeScalar(currentWeight.real());
729 fullResult->element({}) += (currentWeight.real());
732 else if(currentTerm.size() == 1) {
733 TensorData out = currentTerm[0].first->clone();
734 if(currentTerm[0].second) {
737 if (!
isZero(currentWeight - 1.)) {
738 out->operator*=(currentWeight);
740 if(fullResult.get() ==
nullptr) {
741 fullResult = move(out);
745 fullResult =
calc2(move(fullResult), *out, summer,
"sum_outerdot");
749 if (!
isZero(currentWeight - 1.)) {
750 auto temp = currentTerm.back();
751 currentTerm.pop_back();
752 auto candNew = temp.first->clone();
753 candNew->operator*=(temp.second ? conj(currentWeight) : currentWeight);
756 if (oResult !=
nullptr) {
760 oResult =
static_cast<OTensor*
>(fullResult.get());
764 return static_cast<Base*
>(fullResult.release());
768 auto newdimlabs = getNewIndexLabels(a, b);
769 if(newdimlabs.first.size() == 0) {
773 for (
auto elem : bf) {
781 for (
auto idx : _indices) {
782 fixed[idx.second] = elem[idx.first];
792 newscalar->element({}) = res;
793 return newscalar.release();
797 Base* results = tmp.release();
799 for (
auto elem : bf) {
807 for (
auto idx : _indices) {
808 fixed[idx.second] = elem[idx.first];
811 for (
auto elem2 : bf2) {
818 auto idxRes = combineIndex(elem, elem2);
819 results->
element(idxRes) += aVal * bVal;
827 throw Error(
"Invalid data types for tensor Dot");
833 result.erase(result.begin() + elem);
835 size_t base = result.size();
836 result.insert(result.end(),b.begin(), b.end());
838 result.erase(result.begin() +
static_cast<ptrdiff_t
>(base + elem));
843 pair<IndexList, LabelsList> Dot::getNewIndexLabels(
const Base& first,
const Base& second)
const {
847 resultD.erase(resultD.begin() + elem);
848 resultL.erase(resultL.begin() + elem);
850 size_t base = resultD.size();
853 resultD.insert(resultD.end(), dim2.begin(), dim2.end());
854 resultL.insert(resultL.end(), lab2.begin(), lab2.end());
856 resultD.erase(resultD.begin() +
static_cast<ptrdiff_t
>(base + elem));
857 resultL.erase(resultL.begin() +
static_cast<ptrdiff_t
>(base + elem));
859 return make_pair(resultD, resultL);
865 return calc2(move(origin), other, dotter,
"dot");
869 const std::string& type)
const {
877 template <
size_t N,
typename U,
typename... Types>
878 typename enable_if<is_convertible<vector<U>,
typename tuple_element<N, tuple<Types...>>::type>::value,
bool>::type
880 return find(get<N>(data).
begin(), get<N>(data).
end(), value) != get<N>(data).
end();
883 template <
size_t N,
typename U,
typename... Types>
884 typename enable_if<is_convertible<vector<U>,
typename tuple_element<N, tuple<Types...>>::type>::value,
887 if(find(get<N>(data).
begin(), get<N>(data).
end(), value) == get<N>(data).
end())
888 get<N>(data).push_back(value);
891 template <
size_t N,
typename... Types>
893 tuple<Types...>& to) {
894 get<N>(to).insert(get<N>(to).end(), get<N>(from).
begin(), get<N>(from).
end());
900 vector<bool> validPartitions;
903 iota(lfree.begin(), lfree.end(), 0);
904 iota(rfree.begin(), rfree.end(), 0);
906 auto frontelem = make_tuple<IndexList, IndexList, IndexPairList>({}, {}, {});
907 partitions.push_back(frontelem);
908 validPartitions.push_back(
true);
909 for (
auto& elem : _indices) {
912 lfree.erase(
remove(lfree.begin(), lfree.end(), lloc), lfree.end());
913 rfree.erase(
remove(rfree.begin(), rfree.end(), rloc), rfree.end());
914 vector<size_t> finds{};
916 const auto& data = partitions[pos];
917 return matchPartitions<0>(data, valL) || matchPartitions<1>(data, valR);
919 for (
size_t i = 0; i< partitions.size(); ++i) {
920 if (validPartitions[i] && match(i, lloc, rloc)) {
925 auto& data = partitions[finds[0]];
926 addPartitionEntry<0>(data, lidx);
927 addPartitionEntry<1>(data, ridx);
928 addPartitionEntry<2>(data, contraction);
930 auto merge_range = [&](
size_t other) ->
void {
931 auto& from = partitions[finds[other]];
932 auto& to = partitions[finds[0]];
933 appendPartitionEntries<0>(from, to);
934 appendPartitionEntries<1>(from, to);
935 appendPartitionEntries<2>(from, to);
937 if (finds.size() > 0) {
938 merge(lloc, rloc, elem);
939 for (
size_t idx = 1; idx < finds.size(); ++idx) {
940 if(validPartitions[finds[idx]]) {
942 validPartitions[finds[idx]] =
false;
947 auto newelem = make_tuple<IndexList, IndexList, IndexPairList>({lloc}, {rloc}, {elem});
948 partitions.push_back(newelem);
949 validPartitions.push_back(
true);
953 for(
size_t i = partitions.size(); 0 < i--;) {
954 if(!validPartitions[i]) partitions.erase(partitions.begin() +
static_cast<ptrdiff_t
>(i));
957 for(
auto& elem: partitions) {
958 std::sort(get<2>(elem).
begin(), get<2>(elem).
end(),
971 map<IndexType, IndexType> leftPosMaps;
972 map<IndexType, IndexType> rightPosMaps;
974 for (
auto elem : get<0>(chunk)) {
975 leftPosMaps.insert({elem, offset});
980 for (
auto elem : get<1>(chunk)) {
981 rightPosMaps.insert({elem, offset});
983 labels.insert(labels.end(), rhs.
getSubIndexing(elem).labels().begin(),
988 for(
auto& elem: get<2>(chunk)) {
991 deletes.insert(leftPosMaps[left.first] + left.second);
992 deletes.insert(rightPosMaps[right.first] + right.second);
995 dims.erase(dims.begin() +
static_cast<ptrdiff_t
>(elem));
996 labels.erase(labels.begin() +
static_cast<ptrdiff_t
>(elem));
998 return {dims, labels};
1005 iota(rfree.begin(), rfree.end(), 0);
1006 get<2>(partitions[1]) = _indices;
1007 for (
auto& elem : _indices) {
1009 rfree.erase(
remove(rfree.begin(), rfree.end(), rloc), rfree.end());
1010 get<1>(partitions[1]).push_back(rloc);
1013 for (
auto& elem : partitions) {
1014 std::sort(get<2>(elem).
begin(), get<2>(elem).
end(),
1026 iota(lfree.begin(), lfree.end(), 0);
1027 get<2>(partitions[1]) = _indices;
1028 for (
auto& elem : _indices) {
1030 lfree.erase(
remove(lfree.begin(), lfree.end(), lloc), lfree.end());
1031 get<0>(partitions[1]).push_back(lloc);
1034 for (
auto& elem : partitions) {
1035 std::sort(get<2>(elem).
begin(), get<2>(elem).
end(),
1048 map<IndexType, IndexType> rightPosMaps;
1050 dims.insert(dims.end(), lhs.dims().begin(),
1052 labels.insert(labels.end(), lhs.
labels().begin(),
1054 offset =
static_cast<IndexType>(offset + lhs.rank());
1055 for (
auto elem : get<1>(chunk)) {
1056 rightPosMaps.insert({elem, offset});
1059 labels.insert(labels.end(), rhs.
getSubIndexing(elem).labels().begin(),
1063 set<size_t> deletes;
1064 for (
auto& elem : get<2>(chunk)) {
1066 deletes.insert(elem.first);
1067 deletes.insert(rightPosMaps[right.first] + right.second);
1070 dims.erase(dims.begin() +
static_cast<ptrdiff_t
>(elem));
1071 labels.erase(labels.begin() +
static_cast<ptrdiff_t
>(elem));
1073 return {dims, labels};
1080 map<IndexType, IndexType> leftPosMaps;
1082 for (
auto elem : get<0>(chunk)) {
1083 leftPosMaps.insert({elem, offset});
1086 labels.insert(labels.end(), lhs.
getSubIndexing(elem).labels().begin(),
1090 dims.insert(dims.end(), rhs.dims().begin(),
1092 labels.insert(labels.end(), rhs.
labels().begin(),
1094 set<size_t> deletes;
1095 for (
auto& elem : get<2>(chunk)) {
1097 deletes.insert(leftPosMaps[left.first] + left.second);
1098 deletes.insert(offset + elem.second);
1101 dims.erase(dims.begin() +
static_cast<ptrdiff_t
>(elem));
1102 labels.erase(labels.begin() +
static_cast<ptrdiff_t
>(elem));
1104 return {dims, labels};
std::pair< IndexType, IndexType > IndexPair
TensorData makeEmptySparse(const IndexList &dimensions, const LabelsList &labels)
size_t rank() const override
const LabeledIndexing< SequentialIndexing > & getIndexing() const
std::tuple< IndexList, IndexList, IndexPairList > DotGroupType
const LabeledIndexing< AlignedIndexing > & getIndexing() const
virtual LabelsList labels() const =0
std::vector< IndexPair > IndexPairList
LabelsList labels() const override
reversion_wrapper< T > reverse_range(T &&iterable)
IContainer * operator()(VectorContainer &first, const VectorContainer &second)
Non-sparse tensor data container.
OuterElemIterator begin() const
enable_if< is_convertible< vector< U >, typename tuple_element< N, tuple< Types...> >::type >::value, void >::type addPartitionEntry(tuple< Types...> &data, U value)
std::vector< std::pair< SharedTensorData, bool >> EntryType
IndexList dims() const override
std::complex< double > ElementType
const LabelsList & labels() const
get the labels of all the indices at once
TensorPtr calc2(TensorPtr origin, const IContainer &other, Ops op, std::string opName)
std::unique_ptr< IContainer > TensorData
std::shared_ptr< IContainer > SharedTensorData
Tensor operations helper functions.
size_t numSubIndexing() const
std::vector< EntryType > DataType
std::pair< bool, bool > _hc
static pair< bool, bool > isSameDot(const OuterElemIterator::EntryType &a, const OuterElemIterator::EntryType &b, const DotGroupType &info, const DotGroupType &infoOther)
(Sum of) Outer product tensor data container
auto begin(reversion_wrapper< T > w)
Hammer exception definitions.
OuterElemIterator end() const
std::vector< DotGroupType > DotGroupList
enable_if<(N< sizeof...(Types)), void >::type appendPartitionEntries(const tuple< Types...> &from, tuple< Types...> &to)
size_t numValues() const override
std::pair< IndexList, LabelsList > getNewIndexLabels(const IContainer &first, const IContainer &second) const
Sparse tensor data container.
std::vector< IndexType > IndexList
Order-0 tensor data container.
virtual IndexList dims() const =0
Outer product tensor indexer.
std::vector< std::tuple< IndexList, std::vector< bool >, PositionType > > processShifts(const DotGroupList &chunks, IndexPairMember which) const
bool isZero(const std::complex< double > val)
TensorData makeScalar(complex< double > value)
void addTerm(std::vector< std::pair< SharedTensorData, bool >> tensorsAndConjFlags)
const BlockIndexing & getIndexing() const
reference element(const IndexList &coords={}) override
TensorData combineSharedTensors(std::vector< std::pair< SharedTensorData, bool >> &&data)
const LabeledIndexing< AlignedIndexing > & getSubIndexing(IndexType position) const
TensorData makeEmptyScalar()
std::vector< IndexLabel > LabelsList
TensorData makeEmptyVector(const IndexList &dimensions, const LabelsList &labels)
enable_if< is_convertible< vector< U >, typename tuple_element< N, tuple< Types...> >::type >::value, bool >::type matchPartitions(const tuple< Types...> &data, U value)
NonZeroIt endNonZero() const override
NonZeroIt firstNonZero() const override
Generic tensor indexing iterator.
Tensor dot product algorithm.
DotGroupList partitionContractions(const BlockIndexing &lhs, const BlockIndexing &rhs) const
auto end(reversion_wrapper< T > w)
IndexList dims() const override
LabelsList flipListOfLabels(LabelsList labels)
IndexPair getElementIndex(IndexType position) const
virtual reference element(const IndexList &coords={})=0