30 namespace MultiDimensional {
32 using VTensor = VectorContainer;
33 using STensor = SparseContainer;
35 using Base = IContainer;
41 for (
auto& elem : _indices) {
42 _idxSet.insert(elem.first);
43 _idxSet.insert(elem.second);
51 vector<bool> innerAdds(inners.size(),
false);
52 if (newdimlabs.first.size() > 0) {
55 for (
auto& elem : a) {
56 auto pospairs = a.
getIndexing().splitPosition(elem.first, get<0>(strides), get<1>(strides), inners, innerAdds);
57 if (pospairs == numeric_limits<size_t>::max())
59 (*result)[pospairs] += elem.second;
61 return static_cast<Base*
>(result);
65 for (
auto& elem : a) {
67 a.getIndexing().splitPosition(elem.first, get<0>(strides), get<1>(strides), inners, innerAdds);
68 if (pospairs == numeric_limits<size_t>::max())
72 newscalar->element({}) = res;
73 return newscalar.release();
80 if (newdimlabs.first.size() > 0) {
83 for (
size_t i = 0; i < a.
numValues(); ++i) {
87 auto pospairs = a.
getIndexing().splitPosition(i, strides);
88 if (pospairs.second == 0) {
89 (*result)[pospairs.first] += a[i];
92 return static_cast<Base*
>(result);
97 for (
size_t i = 0; i < a.
numValues(); ++i) {
101 auto pospairs = a.
getIndexing().splitPosition(i, strides);
102 if (pospairs.second == 0) {
107 return newscalar.release();
114 for(
auto& elem: _indices) {
122 contraction.reserve(_indices.size());
124 for(
auto& elem: _indices) {
125 if(elem.first < elem.second) {
126 contraction.push_back({elem.first, elem.second - divider});
129 contraction.push_back({elem.second, elem.first - divider});
135 if (newdimlabs.first.size() == 0) {
142 for (
auto& elem : a) {
143 Ops::Dot dotter{contraction, {elem[0].second, elem[1].second}};
144 current_entry =
calc2(elem[0].first, *elem[1].first, dotter,
"dot_outertrace");
145 if(result->rank() == 0) {
146 result->element({}) += current_entry->element({});
149 result =
calc2(move(result), *current_entry, summer,
"sum_outertrace");
152 return result.release();
156 throw Error(
"Unimplemented tr O for general case");
185 throw Error(
"Invalid data types for tensor Trace");
188 std::pair<IndexList, LabelsList> Trace::getNewIndexLabels(
const IContainer& original)
const {
192 resultD.erase(resultD.begin() + elem);
193 resultL.erase(resultL.begin() + elem);
195 return make_pair(resultD, resultL);
201 result.erase(result.begin() + elem);
TensorData makeEmptySparse(const IndexList &dimensions, const LabelsList &labels)
const LabeledIndexing< SequentialIndexing > & getIndexing() const
const LabeledIndexing< AlignedIndexing > & getIndexing() const
virtual LabelsList labels() const =0
std::vector< IndexPair > IndexPairList
reversion_wrapper< T > reverse_range(T &&iterable)
Non-sparse tensor data container.
IndexList dims() const override
std::complex< double > ElementType
IContainer * operator()(VectorContainer &first)
TensorPtr calc2(TensorPtr origin, const IContainer &other, Ops op, std::string opName)
std::unique_ptr< IContainer > TensorData
std::shared_ptr< IContainer > SharedTensorData
Tensor operations helper functions.
size_t numSubIndexing() const
(Sum of) Outer product tensor data container
Hammer exception definitions.
size_t numValues() const override
Sparse tensor data container.
std::vector< IndexType > IndexList
Order-0 tensor data container.
virtual IndexList dims() const =0
bool isZero(const std::complex< double > val)
const BlockIndexing & getIndexing() const
reference element(const IndexList &coords={}) override
const LabeledIndexing< AlignedIndexing > & getSubIndexing(IndexType position) const
TensorData makeEmptyScalar()
std::vector< IndexLabel > LabelsList
TensorData makeEmptyVector(const IndexList &dimensions, const LabelsList &labels)
Tensor dot product algorithm.
IndexPair getElementIndex(IndexType position) const
std::pair< IndexList, LabelsList > getNewIndexLabels(const IContainer &original) const