SpECTRE Documentation Coverage Report
Current view: top level - DataStructures/Tensor/Expressions - Evaluate.hpp Hit Total Coverage
Commit: 3ffcbc8ecf43797401b60bcca17d6040ee06f013 Lines: 6 6 100.0 %
Date: 2026-03-03 02:01:44
Legend: Lines: hit not hit

          Line data    Source code
       1           1 : // Distributed under the MIT License.
       2             : // See LICENSE.txt for details.
       3             : 
       4             : /// \file
       5             : /// Defines functions for evaluating `TensorExpression`s
       6             : 
       7             : #pragma once
       8             : 
       9             : #include <array>
      10             : #include <complex>
      11             : #include <cstddef>
      12             : #include <type_traits>
      13             : 
      14             : #include "DataStructures/ComplexDataVector.hpp"
      15             : #include "DataStructures/DataVector.hpp"
      16             : #include "DataStructures/Tensor/Expressions/DataTypeSupport.hpp"
      17             : #include "DataStructures/Tensor/Expressions/IndexPropertyCheck.hpp"
      18             : #include "DataStructures/Tensor/Expressions/LhsTensorSymmAndIndices.hpp"
      19             : #include "DataStructures/Tensor/Expressions/SpatialSpacetimeIndex.hpp"
      20             : #include "DataStructures/Tensor/Expressions/TensorExpression.hpp"
      21             : #include "DataStructures/Tensor/Expressions/TensorIndex.hpp"
      22             : #include "DataStructures/Tensor/Expressions/TensorIndexTransformation.hpp"
      23             : #include "DataStructures/Tensor/Expressions/TimeIndex.hpp"
      24             : #include "DataStructures/Tensor/Structure.hpp"
      25             : #include "DataStructures/Tensor/Tensor.hpp"
      26             : #include "Utilities/Algorithm.hpp"
      27             : #include "Utilities/ContainerHelpers.hpp"
      28             : #include "Utilities/ErrorHandling/Assert.hpp"
      29             : #include "Utilities/Gsl.hpp"
      30             : #include "Utilities/Requires.hpp"
      31             : #include "Utilities/TMPL.hpp"
      32             : 
      33             : namespace tenex {
      34             : namespace detail {
      35             : template <size_t NumIndices>
      36             : constexpr bool contains_indices_to_contract(
      37             :     const std::array<size_t, NumIndices>& tensorindices) {
      38             :   if constexpr (NumIndices < 2) {
      39             :     return false;
      40             :   } else {
      41             :     for (size_t i = 0; i < NumIndices - 1; i++) {
      42             :       for (size_t j = i + 1; j < NumIndices; j++) {
      43             :         const size_t current_tensorindex = gsl::at(tensorindices, i);
      44             :         // Concrete time indices are not contracted
      45             :         if ((not is_time_index_value(current_tensorindex)) and
      46             :             current_tensorindex == get_tensorindex_value_with_opposite_valence(
      47             :                                        gsl::at(tensorindices, j))) {
      48             :           return true;
      49             :         }
      50             :       }
      51             :     }
      52             :     return false;
      53             :   }
      54             : }
      55             : 
      56             : /// \brief Given the list of the positions of the LHS tensor's spacetime indices
      57             : /// where a generic spatial index is used and the list of positions where a
      58             : /// concrete time index is used, determine whether or not the component at the
      59             : /// given LHS multi-index should be computed
      60             : ///
      61             : /// \details
      62             : /// Not all of the LHS tensor's components may need to be computed. Cases when
      63             : /// the component at a LHS multi-index should not be not evaluated:
      64             : /// - If a generic spatial index is used for a spacetime index on the LHS,
      65             : /// the components for which that index's concrete index is the time index
      66             : /// should not be computed
      67             : /// - If a concrete time index is used for a spacetime index on the LHS, the
      68             : /// components for which that index's concrete index is a spatial index should
      69             : /// not be computed
      70             : ///
      71             : /// \param lhs_multi_index the multi-index of the LHS tensor to check
      72             : /// \param lhs_spatial_spacetime_index_positions the positions of the LHS
      73             : /// tensor's spacetime indices where a generic spatial index is used
      74             : /// \param lhs_time_index_positions the positions of the LHS tensor's spacetime
      75             : /// indices where a concrete time index is used
      76             : /// \return Whether or not `lhs_multi_index` is a multi-index of a component of
      77             : /// the LHS tensor that should be computed
      78             : template <size_t NumLhsIndices, size_t NumLhsSpatialSpacetimeIndices,
      79             :           size_t NumLhsConcreteTimeIndices>
      80             : constexpr bool is_evaluated_lhs_multi_index(
      81             :     const std::array<size_t, NumLhsIndices>& lhs_multi_index,
      82             :     const std::array<size_t, NumLhsSpatialSpacetimeIndices>&
      83             :         lhs_spatial_spacetime_index_positions,
      84             :     const std::array<size_t, NumLhsConcreteTimeIndices>&
      85             :         lhs_time_index_positions) {
      86             :   for (size_t i = 0; i < lhs_spatial_spacetime_index_positions.size(); i++) {
      87             :     if (gsl::at(lhs_multi_index,
      88             :                 gsl::at(lhs_spatial_spacetime_index_positions, i)) == 0) {
      89             :       return false;
      90             :     }
      91             :   }
      92             :   for (size_t i = 0; i < lhs_time_index_positions.size(); i++) {
      93             :     if (gsl::at(lhs_multi_index, gsl::at(lhs_time_index_positions, i)) != 0) {
      94             :       return false;
      95             :     }
      96             :   }
      97             :   return true;
      98             : }
      99             : 
     100             : template <typename SymmList>
     101             : struct CheckNoLhsAntiSymmetries;
     102             : 
     103             : template <template <typename...> class SymmList, typename... Symm>
     104             : struct CheckNoLhsAntiSymmetries<SymmList<Symm...>> {
     105             :   static constexpr bool value = (... and (Symm::value > 0));
     106             : };
     107             : 
     108             : /// \brief Given a tensor and its list of tensor indices, return the
     109             : /// canonicalized order of the tensor indices according to the tensor's symmetry
     110             : ///
     111             : /// \details
     112             : /// The canonical ordering of a `Tensor`'s `TensorIndex`s
     113             : /// (e.g. `ti::a`, `ti::b`, ti::c) is relevant to sets of indices that are
     114             : /// symmetric. Within each set of symmetric indices, the `TensorIndex` used for
     115             : /// each index can be freely reordered. Given a set of symmetric indices, this
     116             : /// function defines the canonical order of the `TensorIndex`s assigned to them
     117             : /// to be such that the lowest index positions take any generic spatial tensor
     118             : /// indices (e.g. `ti::i`, `ti::j`), the next lowest index positions take any
     119             : /// generic spacetime indices (e.g. `ti::a`, `ti::b`), and the highest index
     120             : /// positions take any concrete time indices (e.g. `ti::t`, `ti::T`). We can
     121             : /// imagine the canonical ordering of symmetric indices to look generally like:
     122             : /// `[spatial indices ... | spacetime indices... | time indices...]`.
     123             : ///
     124             : /// Within the subsets of spatial, spacetime, and time indices, the
     125             : /// `TensorIndex`s in each will be ordered such that lowercase indices come
     126             : /// before uppercase, where both are ordered alphabetically. Another way of
     127             : /// saying this is that if we had a rank N `Tensor` that was fully symmetric,
     128             : /// its canonical ordering would take the following form:
     129             : ///
     130             : /// ```
     131             : /// [ti::i, ti::j, ti::k, ..., ti::I, ti::J, ti::K, ...,  // spatial
     132             : ///  ti::a, ti::b, ti::c, ..., ti::A, ti::B, ti::C, ...,  // spacetime
     133             : ///  ti::t, ti::t, ti::t, ..., ti::T, ti::T, ti::T, ...]  // time
     134             : /// ```
     135             : ///
     136             : /// Here are some examples:
     137             : ///
     138             : /// ```
     139             : /// symmetry: <1, 1, 1>
     140             : /// set of `TensorIndex`s: {ti::t, ti::i, ti::a}
     141             : /// canonical ordering: [ti::i, ti::a, ti::t]
     142             : ///
     143             : /// symmetry: <1, 1, 1>
     144             : /// set of `TensorIndex`s: {ti::A, ti::a, ti::b}
     145             : /// canonical ordering: [ti::a, ti::b, ti::A]
     146             : /// ```
     147             : ///
     148             : /// When a `Tensor` is not fully symmetric, the `TensorIndex` labels for any
     149             : /// indices that do not have symmetry with any other will simply keep their
     150             : /// label because it cannot be swapped:
     151             : ///
     152             : /// ```
     153             : /// symmetry: <1, 2, 1>
     154             : /// set of `TensorIndex`s: {ti::a, ti::b, ti::i}
     155             : /// canonical ordering: [ti::i, ti::b, ti::a]
     156             : ///
     157             : /// symmetry: <2, 1, 1>
     158             : /// set of `TensorIndex`s: {ti::t, ti::k, ti::j}
     159             : /// canonical ordering: [ti::t, ti::j, ti::k]
     160             : /// ```
     161             : ///
     162             : /// If there is more than one set of symmetric indices, each of the subsets are
     163             : /// individually reordered:
     164             : ///
     165             : /// ```
     166             : /// symmetry: <1, 2, 2, 1>
     167             : /// set of `TensorIndex`s: {ti::a, ti::b, ti::t, ti::i}
     168             : /// canonical ordering: [ti::i, ti::b, ti::t, ti::a]
     169             : /// ```
     170             : ///
     171             : /// The motivation for this specific canonical reordering is to quickly assess
     172             : /// which components to assign to and which ones to skip when generic spatial
     173             : /// and/or concrete time indices are used for symmetric spacetime indices in the
     174             : /// resulting left hand side tensor when using `TensorExpression`s.
     175             : ///
     176             : /// Let's take the spacetime metric \f$g_{ab}\f$ as our motivating example. This
     177             : /// tensor has symmetric spacetime indices, and let's say we only want to assign
     178             : /// to \f$g_{ti}\f$. We want to loop over all 10 independent components of
     179             : /// \f$g_{ab}\f$ and skip components outside of \f$g_{ti}\f$, e.g. \f$g_{xy}\f$
     180             : /// (or \f$g_{12}\f$) and \f$g_{tt}\f$ (or \f$g_{00}\f$). To do so, when we see
     181             : /// a multi-index like `{2, 1}` in our loop, we align `{2, 1}` with `{t, i}` and
     182             : /// ask if the `2` is a valid index for `t` and if the `1` is a valid index for
     183             : /// `i`. `1` is valid for `i`, but `2` is not valid for `t`, so we correctly
     184             : /// skip over `{2, 1}` and don't assign to this component.
     185             : ///
     186             : /// However, this simple logic can lead to false positives or negatives when the
     187             : /// indices are symmetric. What if the multi-index we're asking about is
     188             : /// `{0, 1}` (\f$g_{01}\f$ or \f$g_{tx}\f$)? This logic would correctly
     189             : /// determine that this is one of the components we want to assign to. But what
     190             : /// if the multi-index was `{1, 0}`? The logic would incorrectly say to skip
     191             : /// over and not assign to this multi-index because `1` is not a valid index for
     192             : /// `t` and `0` is not valid for `i`. However, because \f$g_{ab}\f$ is
     193             : /// symmetric, both `{0, 1}` and `{1, 0}` should give the same result, but
     194             : /// `{1, 0}` gives us a false negative. Moreover and more generally, assigning
     195             : /// to \f$g_{ti}\f$ and \f$g_{it}\f$ should yield the same behavior (assign to
     196             : /// the same set of components).
     197             : ///
     198             : /// One way to address this would be to check the multi-indices for all
     199             : /// permutations of symmetric index values, e.g. is `{0, 1}` *or* `{1, 0}`
     200             : /// valid? And if so, then evaluate it. However, this adds work at runtime and
     201             : /// the number of permutations to check increases as we increase the number of
     202             : /// symmetric indices.
     203             : ///
     204             : /// The canonical reordering done by *this* function solves this problem by
     205             : /// reordering the `TensorIndex`s to align nicely with the canonical multi-index
     206             : /// ordering implemented by
     207             : /// `::Tensor_detail::Structure::get_canonical_tensor_index`.
     208             : /// `get_canonical_tensor_index` takes a storage index (which corresponds to an
     209             : /// independent tensor component) and returns a canonical multi-index. This
     210             : /// canonical multi-index is such that index values for symmetric indices will
     211             : /// be ordered to increase from right to left. For example, for a rank 2
     212             : /// symmetric tensor, `{1, 0}` is the canonical multi-index corresponding to the
     213             : /// dependent multi-indices `{0, 1}` and `{1, 0}`. Therefore, `{1, 0}` would be
     214             : /// the multi-index returned by `get_canonical_tensor_index` that corresponds to
     215             : /// the single independent component. In other words, when we loop over the
     216             : /// independent canonical multi-indices, we are looping over the multi-index
     217             : /// permutations that are in the lower triangle of the N-dimensional matrix
     218             : /// containing all multi-index permutations. The canonical reordering of LHS
     219             : /// `TensorIndex`s for symmetric indices that is done by *this* function is
     220             : /// implemented to match this: by making time indices the rightmost, then
     221             : /// spacetime the next rightmost, and then spatial indices leftmost, we
     222             : /// guarantee that looping over the lower triangle permutations given by
     223             : /// `get_canonical_tensor_index` will not produce false positives or negatives
     224             : /// using the earlier simple logic to check for valid multi-indices.
     225             : ///
     226             : /// We can use the spacetime metric as an example to demonstrate this. The
     227             : /// lower triangle multi-indices that are looped over are ordered with index
     228             : /// values increasing right to left, e.g. `{0, 0}`, `{1, 0}`, `{2, 0}`,
     229             : /// `{2, 1}`, etc. If a user wants to  assign to \f$g_{ti}\f$, then after this
     230             : /// function internally reorders the LHS indices to \f$g_{it}\f$, when we loop
     231             : /// and encounter `{1, 0}`, we correctly get that we should evaluate this
     232             : /// component without having to check its other permutation. Likewise, if a user
     233             : /// wants to assign to \f$g_{it}\f$, no reordering is done and we get the same
     234             : /// correct behavior.
     235             : ///
     236             : /// This function works in general because:
     237             : /// - any `0`s in the symmetric indices will "first be dealt" to any time
     238             : ///   `TensorIndex`s in the rightmost index positions and then any spacetime
     239             : ///   `TensorIndex`s, where `0` is correctly valid for both, but
     240             : /// - if there are more `0`s than time + spacetime `TensorIndex`s, they will be
     241             : ///   "dealt" to spatial indices, which is always correctly invalid, and
     242             : /// - if there are more time `TensorIndex`s than `0`s, values `> 0` will be
     243             : ///   "dealt" to time indices, which is also always correctly invalid.
     244             : ///
     245             : /// In this way, we don't ever have to check other permutations of sets of
     246             : /// symmetric index values.
     247             : ///
     248             : /// \tparam LhsTensorIndices the `TensorIndex`s of the `Tensor`, e.g. `ti::a`,
     249             : /// `ti::b`, `ti::c`
     250             : /// \param canonical_symmetry the canonicalized symmetry values of the tensor
     251             : /// (see `Symmetry` for definition of the canonical ordering of symmetry values)
     252             : /// \return reordered values of `LhsTensorIndices::value...`
     253             : template <typename... LhsTensorIndices, size_t NumIndices>
     254             : constexpr std::array<size_t, NumIndices> get_reordered_tensorindex_values(
     255             :     const std::array<std::int32_t, NumIndices>& canonical_symmetry) {
     256             :   constexpr std::array<size_t, NumIndices> lhs_tensorindex_values = {
     257             :       {LhsTensorIndices::value...}};
     258             :   if constexpr (NumIndices < 2) {
     259             :     return lhs_tensorindex_values;
     260             :   } else {
     261             :     const auto compare = [](const size_t tensorindex_value1,
     262             :                             const size_t tensorindex_value2) {
     263             :       // clang-tidy thinks these two branches are the same but they aren't:
     264             :       //   if (tensorindex_value2 == ti::T.value)
     265             :       //   if (tensorindex_value2 == ti::t.value)
     266             :       // NOLINTNEXTLINE (clang-tidy: bugprone-branch-clone)
     267             :       if (tensorindex_value2 == ti::T.value) {
     268             :         return false;
     269             :       } else if (is_time_index_value(tensorindex_value1)) {
     270             :         return true;
     271             :       } else if (tensorindex_value2 == ti::t.value) {
     272             :         return false;
     273             :       }
     274             : 
     275             :       return (is_generic_spacetime_index_value(tensorindex_value1) and
     276             :               is_generic_spatial_index_value(tensorindex_value2)) or
     277             :              (tensorindex_value1 > tensorindex_value2 and
     278             :               is_generic_spacetime_index_value(tensorindex_value1) ==
     279             :                   is_generic_spacetime_index_value(tensorindex_value2));
     280             :     };
     281             : 
     282             :     std::array<size_t, NumIndices> reordered_lhs_tensorindex_values =
     283             :         lhs_tensorindex_values;
     284             : 
     285             :     std::int32_t max_symm_value = *alg::max_element(canonical_symmetry);
     286             :     std::int32_t symm_value_to_find = 1;
     287             :     while (symm_value_to_find <= max_symm_value) {
     288             :       size_t i = NumIndices - 1;
     289             :       while (true) {
     290             :         // skip forward until we get to the position with the value we want
     291             :         while (i > 0 and canonical_symmetry[i] != symm_value_to_find) {
     292             :           i--;
     293             :         }
     294             :         if (i == 0) {
     295             :           break;
     296             :         }
     297             : 
     298             :         size_t max_tensorindex_value = reordered_lhs_tensorindex_values[i];
     299             :         size_t max_index = i;
     300             : 
     301             :         size_t j = i - 1;
     302             :         // note: because we need to hit 0 and size_t wraps around to max size_t
     303             :         while (j < NumIndices) {
     304             :           const std::int32_t compare_symm_value = canonical_symmetry[j];
     305             :           const size_t compare_tensorindex_value =
     306             :               reordered_lhs_tensorindex_values[j];
     307             :           if (compare_symm_value == symm_value_to_find and
     308             :               compare(compare_tensorindex_value, max_tensorindex_value)) {
     309             :             max_tensorindex_value = compare_tensorindex_value;
     310             :             max_index = j;
     311             :           }
     312             :           j--;
     313             :         }
     314             :         reordered_lhs_tensorindex_values[max_index] =
     315             :             reordered_lhs_tensorindex_values[i];
     316             :         reordered_lhs_tensorindex_values[i] = max_tensorindex_value;
     317             :         i--;
     318             :       }
     319             :       symm_value_to_find++;
     320             :     }
     321             : 
     322             :     return reordered_lhs_tensorindex_values;
     323             :   }
     324             : }
     325             : 
     326             : /*!
     327             :  * \ingroup TensorExpressionsGroup
     328             :  * \brief Evaluate subtrees of the RHS expression or the RHS expression as a
     329             :  * whole and assign the result to the LHS tensor
     330             :  *
     331             :  * \details This is for internal use only and should never be directly called.
     332             :  * See `tenex::evaluate` and use it, instead.
     333             :  *
     334             :  * `EvaluateSubtrees` controls whether we wish to evaluate RHS subtrees or the
     335             :  * entire RHS expression as one expression. See`TensorExpression` documentation
     336             :  * on equation splitting for more details on what this means.
     337             :  *
     338             :  * If `EvaluateSubtrees == false`, then it's safe if the LHS tensor is used in
     339             :  * the RHS expression, so long as the generic index orders are the same. This
     340             :  * means that the callee of this function needs to first verify this is true
     341             :  * before calling this function. Under these conditions, this is a safe
     342             :  * operation because the implementation modifies each LHS component once and
     343             :  * does not revisit and access any LHS components after they've been updated.
     344             :  * For example, say we do `tenex::evaluate<ti_a, ti_b>(make_not_null(&L),
     345             :  * 5.0 * L(ti_a, ti_b));`. This function will first compute the RHS for some
     346             :  * concrete LHS, e.g. \f$L_{00}\f$. To compute this, it accesses \f$L_{00}\f$
     347             :  * in the RHS tree, multiplies it by `5.0`, then updates \f$L_{00}\f$ to be the
     348             :  * result of this multiplication. Next, it might compute \f$L_{01}\f$, where
     349             :  * only \f$L_{01}\f$ is accessed, and which hasn't yet been modified. Then the
     350             :  * next component is computed and updated, and so forth. These steps are
     351             :  * performed once for each unique LHS index. Therefore, it is important to note
     352             :  * that this kind of operation being safe to perform is
     353             :  * implementation-dependent. Specifically, the safety of the operation depends
     354             :  * on the order of LHS component access and assignment.
     355             :  *
     356             :  * \note `LhsTensorIndices` must be passed by reference because non-type
     357             :  * template parameters cannot be class types until C++20.
     358             :  *
     359             :  * @tparam EvaluateSubtrees whether or not to evaluate subtrees of RHS
     360             :  * expression
     361             :  * @tparam LhsTensorIndices the `TensorIndex`s of the `Tensor` on the LHS of the
     362             :  * tensor expression, e.g. `ti::a`, `ti::b`, `ti::c`
     363             :  * @param lhs_tensor pointer to the resultant LHS `Tensor` to fill
     364             :  * @param rhs_tensorexpression the RHS TensorExpression to be evaluated
     365             :  */
     366             : template <bool EvaluateSubtrees, typename... LhsTensorIndices,
     367             :           typename LhsDataType, typename LhsSymmetry, typename LhsIndexList,
     368             :           typename Derived, typename RhsDataType, typename RhsSymmetry,
     369             :           typename RhsIndexList, typename... RhsTensorIndices,
     370             :           size_t... LhsInts>
     371             : void evaluate_impl(
     372             :     const gsl::not_null<Tensor<LhsDataType, LhsSymmetry, LhsIndexList>*>
     373             :         lhs_tensor,
     374             :     const TensorExpression<Derived, RhsDataType, RhsSymmetry, RhsIndexList,
     375             :                            tmpl::list<RhsTensorIndices...>>&
     376             :         rhs_tensorexpression,
     377             :     const std::index_sequence<LhsInts...>& /*lhs_ints*/) {
     378             :   constexpr size_t num_lhs_indices = sizeof...(LhsTensorIndices);
     379             :   constexpr size_t num_rhs_indices = sizeof...(RhsTensorIndices);
     380             : 
     381             :   using lhs_tensorindex_list = tmpl::list<LhsTensorIndices...>;
     382             :   using rhs_tensorindex_list = tmpl::list<RhsTensorIndices...>;
     383             : 
     384             :   using lhs_tensor_type = typename std::decay_t<decltype(*lhs_tensor)>;
     385             : 
     386             :   static_assert(is_supported_tensor_datatype_v<LhsDataType> and
     387             :                     is_supported_tensor_datatype_v<RhsDataType>,
     388             :                 "TensorExpressions currently only support Tensors whose data "
     389             :                 "type is double, std::complex<double>, DataVector, or "
     390             :                 "ComplexDataVector. It is possible to add support for other "
     391             :                 "data types that are supported by Tensor.");
     392             :   static_assert(
     393             :       is_assignable_v<LhsDataType, RhsDataType>,
     394             :       "Assignment of the LHS Tensor's data type to the RHS TensorExpression's "
     395             :       "data type is not supported. This happens from doing something like e.g. "
     396             :       "trying to assign a Tensor<double> to a Tensor<DataVector> or a "
     397             :       "Tensor<DataVector> to a Tensor<ComplexDataVector>.");
     398             :   // `Symmetry` currently prevents this because antisymmetries are not currently
     399             :   // supported for `Tensor`s. This check is repeated here because if
     400             :   // antisymmetries are later supported for `Tensor`, using antisymmetries in
     401             :   // `TensorExpression`s will not automatically work. The implementations of the
     402             :   // derived `TensorExpression` types assume no antisymmetries (assume positive
     403             :   // `Symmetry` values), so support for antisymmetries in `TensorExpression`s
     404             :   // will still need to be implemented.
     405             :   static_assert(CheckNoLhsAntiSymmetries<LhsSymmetry>::value,
     406             :                 "Anti-symmetric Tensors are not currently supported by "
     407             :                 "TensorExpressions.");
     408             :   static_assert(
     409             :       tmpl::equal_members<
     410             :           typename remove_time_indices<lhs_tensorindex_list>::type,
     411             :           typename remove_time_indices<rhs_tensorindex_list>::type>::value,
     412             :       "The generic indices on the LHS of a tensor equation (that is, the "
     413             :       "template parameters specified in evaluate<...>) must match the generic "
     414             :       "indices of the RHS TensorExpression. This error occurs as a result of a "
     415             :       "call like evaluate<ti::a, ti::b>(R(ti::A, ti::b) * S(ti::a, ti::c)), "
     416             :       "where the generic indices of the evaluated RHS expression are ti::b and "
     417             :       "ti::c, but the generic indices provided for the LHS are ti::a and "
     418             :       "ti::b.");
     419             :   static_assert(
     420             :       tensorindex_list_is_valid<lhs_tensorindex_list>::value,
     421             :       "Cannot assign a tensor expression to a LHS tensor with a repeated "
     422             :       "generic index, e.g. evaluate<ti::a, ti::a>. (Note that the concrete "
     423             :       "time indices (ti::T and ti::t) can be repeated.)");
     424             :   static_assert(
     425             :       not contains_indices_to_contract<num_lhs_indices>(
     426             :           {{LhsTensorIndices::value...}}),
     427             :       "Cannot assign a tensor expression to a LHS tensor with generic "
     428             :       "indices that would be contracted, e.g. evaluate<ti::A, ti::a>.");
     429             :   // `IndexPropertyCheck` does also check that valence (Up/Lo) of indices that
     430             :   // correspond in the RHS and LHS tensors are equal, but the assertion message
     431             :   // below does not mention this because a mismatch in valence should have been
     432             :   // caught due to the combination of (i) the Tensor::operator() assertion
     433             :   // checking that generic indices' valences match the tensor's indices'
     434             :   // valences and (ii) the above assertion that RHS and LHS generic indices
     435             :   // match
     436             :   static_assert(
     437             :       IndexPropertyCheck<LhsIndexList, RhsIndexList, lhs_tensorindex_list,
     438             :                          rhs_tensorindex_list>::value,
     439             :       "At least one index of the tensor evaluated from the RHS expression "
     440             :       "cannot be evaluated to its corresponding index in the LHS tensor. This "
     441             :       "is due to a difference in number of spatial dimensions or Frame type "
     442             :       "between the index on the RHS and LHS. "
     443             :       "e.g. evaluate<ti::a, ti::b>(L, R(ti::b, ti::a));, where R's first "
     444             :       "index has 2 spatial dimensions but L's second index has 3 spatial "
     445             :       "dimensions. Check RHS and LHS indices that use the same generic index.");
     446             :   static_assert(Derived::height_relative_to_closest_tensor_leaf_in_subtree <
     447             :                     std::numeric_limits<size_t>::max(),
     448             :                 "Either no Tensors were found in the RHS TensorExpression or "
     449             :                 "the depth of the tree exceeded the maximum size_t value (very "
     450             :                 "unlikely). If there is indeed a Tensor in the RHS expression "
     451             :                 "and assuming the tree's height is not actually the maximum "
     452             :                 "size_t value, then there is a flaw in the logic for computing "
     453             :                 "the derived TensorExpression types' member, "
     454             :                 "height_relative_to_closest_tensor_leaf_in_subtree.");
     455             : 
     456             :   if constexpr (EvaluateSubtrees) {
     457             :     // Make sure the LHS tensor doesn't also appear in the RHS tensor expression
     458             :     (~rhs_tensorexpression).assert_lhs_tensor_not_in_rhs_expression(lhs_tensor);
     459             :     // If the LHS data type is a vector, size the LHS tensor components if their
     460             :     // size does not match the size from a `Tensor` in the RHS expression
     461             :     if constexpr (is_derived_of_vector_impl_v<LhsDataType>) {
     462             :       const size_t rhs_component_size =
     463             :           (~rhs_tensorexpression).get_rhs_tensor_component_size();
     464             :       if (rhs_component_size != (*lhs_tensor)[0].size()) {
     465             :         for (auto& lhs_component : *lhs_tensor) {
     466             :           lhs_component = LhsDataType(rhs_component_size);
     467             :         }
     468             :       }
     469             :     }
     470             :   }
     471             : 
     472             :   constexpr std::array<std::int32_t, num_lhs_indices> lhs_symmetry = {
     473             :       {tmpl::at_c<LhsSymmetry, LhsInts>::value...}};
     474             :   constexpr std::array<size_t, num_lhs_indices> reordered_tensorindex_values =
     475             :       get_reordered_tensorindex_values<LhsTensorIndices...>(lhs_symmetry);
     476             :   using reordered_lhs_tensorindex_list =
     477             :       tmpl::list<TensorIndex<reordered_tensorindex_values[LhsInts]>...>;
     478             : 
     479             :   constexpr std::array<size_t, num_rhs_indices> index_transformation =
     480             :       compute_tensorindex_transformation<num_lhs_indices, num_rhs_indices>(
     481             :           reordered_tensorindex_values, {{RhsTensorIndices::value...}});
     482             : 
     483             :   // positions of indices in LHS tensor where generic spatial indices are used
     484             :   // for spacetime indices
     485             :   constexpr auto lhs_spatial_spacetime_index_positions =
     486             :       get_spatial_spacetime_index_positions<LhsIndexList,
     487             :                                             reordered_lhs_tensorindex_list>();
     488             :   // positions of indices in RHS tensor where generic spatial indices are used
     489             :   // for spacetime indices
     490             :   constexpr auto rhs_spatial_spacetime_index_positions =
     491             :       get_spatial_spacetime_index_positions<RhsIndexList,
     492             :                                             rhs_tensorindex_list>();
     493             : 
     494             :   // positions of indices in LHS tensor where concrete time indices are used
     495             :   constexpr auto lhs_time_index_positions =
     496             :       get_time_index_positions<reordered_lhs_tensorindex_list>();
     497             : 
     498             :   using rhs_expression_type =
     499             :       typename std::decay_t<decltype(~rhs_tensorexpression)>;
     500             : 
     501             :   for (size_t i = 0; i < lhs_tensor_type::size(); i++) {
     502             :     auto lhs_multi_index =
     503             :         lhs_tensor_type::structure::get_canonical_tensor_index(i);
     504             :     if (is_evaluated_lhs_multi_index(lhs_multi_index,
     505             :                                      lhs_spatial_spacetime_index_positions,
     506             :                                      lhs_time_index_positions)) {
     507             :       for (size_t j = 0; j < lhs_spatial_spacetime_index_positions.size();
     508             :            j++) {
     509             :         gsl::at(lhs_multi_index,
     510             :                 gsl::at(lhs_spatial_spacetime_index_positions, j)) -= 1;
     511             :       }
     512             :       auto rhs_multi_index =
     513             :           transform_multi_index(lhs_multi_index, index_transformation);
     514             :       for (size_t j = 0; j < rhs_spatial_spacetime_index_positions.size();
     515             :            j++) {
     516             :         gsl::at(rhs_multi_index,
     517             :                 gsl::at(rhs_spatial_spacetime_index_positions, j)) += 1;
     518             :       }
     519             : 
     520             :       // The expression will either be evaluated as one whole expression
     521             :       // or it will be split up into subtrees that are evaluated one at a time.
     522             :       // See the section on splitting in the documentation for the
     523             :       // `TensorExpression` class to understand the logic and terminology used
     524             :       // in this control flow below.
     525             :       if constexpr (EvaluateSubtrees) {
     526             :         // the expression is split up, so evaluate subtrees at splits
     527             :         (~rhs_tensorexpression)
     528             :             .evaluate_primary_subtree((*lhs_tensor)[i], rhs_multi_index);
     529             :         if constexpr (not rhs_expression_type::is_primary_start) {
     530             :           // the root expression type is not the starting point of a leg, so it
     531             :           // has not yet been evaluated, so now we evaluate this last leg of the
     532             :           // expression at the root of the tree
     533             :           (*lhs_tensor)[i] =
     534             :               (~rhs_tensorexpression)
     535             :                   .get_primary((*lhs_tensor)[i], rhs_multi_index);
     536             :         }
     537             :       } else {
     538             :         // the expression is not split up, so evaluate full expression
     539             :         (*lhs_tensor)[i] = (~rhs_tensorexpression).get(rhs_multi_index);
     540             :       }
     541             :     }
     542             :   }
     543             : }
     544             : 
     545             : /*!
     546             :  * \ingroup TensorExpressionsGroup
     547             :  * \brief Assign a value to components of the LHS tensor
     548             :  *
     549             :  * \details This is for internal use only and should never be directly called.
     550             :  * See `tenex::evaluate` and use it, instead.
     551             :  *
     552             :  * \note `LhsTensorIndices` must be passed by reference because non-type
     553             :  * template parameters cannot be class types until C++20.
     554             :  *
     555             :  * @tparam LhsTensorIndices the `TensorIndex`s of the `Tensor` on the LHS of the
     556             :  * tensor expression, e.g. `ti::a`, `ti::b`, `ti::c`
     557             :  * @param lhs_tensor pointer to the resultant LHS `Tensor` to fill
     558             :  * @param rhs_value the RHS value to assigned
     559             :  */
     560             : template <typename... LhsTensorIndices, typename X, typename LhsSymmetry,
     561             :           typename LhsIndexList, typename NumberType, size_t... LhsInts>
     562             : void evaluate_impl(
     563             :     const gsl::not_null<Tensor<X, LhsSymmetry, LhsIndexList>*> lhs_tensor,
     564             :     const NumberType& rhs_value,
     565             :     const std::index_sequence<LhsInts...>& /*lhs_ints*/) {
     566             :   using lhs_tensor_type = typename std::decay_t<decltype(*lhs_tensor)>;
     567             :   constexpr size_t num_lhs_indices = sizeof...(LhsTensorIndices);
     568             :   using lhs_tensorindex_list = tmpl::list<LhsTensorIndices...>;
     569             : 
     570             :   static_assert(is_supported_tensor_datatype_v<X> and
     571             :                 "TensorExpressions currently only support Tensors whose data "
     572             :                 "type is double, std::complex<double>, DataVector, or "
     573             :                 "ComplexDataVector. It is possible to add support for other "
     574             :                 "data types that are supported by Tensor.");
     575             :   static_assert(
     576             :       is_assignable_v<X, NumberType>,
     577             :       "Assignment of the LHS Tensor's data type to the RHS number's data type "
     578             :       "is not supported within TensorExpressions. This happens from doing "
     579             :       "something like e.g. trying to assign a double to a DataVector or a "
     580             :       "DataVector to a ComplexDataVector.");
     581             :   // `Symmetry` currently prevents this because antisymmetries are not currently
     582             :   // supported for `Tensor`s. This check is repeated here because if
     583             :   // antisymmetries are later supported for `Tensor`, using antisymmetries in
     584             :   // `TensorExpression`s will not automatically work. The implementations of the
     585             :   // derived `TensorExpression` types assume no antisymmetries (assume positive
     586             :   // `Symmetry` values), so support for antisymmetries in `TensorExpression`s
     587             :   // will still need to be implemented.
     588             :   static_assert(CheckNoLhsAntiSymmetries<LhsSymmetry>::value,
     589             :                 "Anti-symmetric Tensors are not currently supported by "
     590             :                 "TensorExpressions.");
     591             :   static_assert(
     592             :       tensorindex_list_is_valid<lhs_tensorindex_list>::value,
     593             :       "Cannot assign a tensor expression to a LHS tensor with a repeated "
     594             :       "generic index, e.g. evaluate<ti::a, ti::a>. (Note that the concrete "
     595             :       "time indices (ti::T and ti::t) can be repeated.)");
     596             :   static_assert(
     597             :       not contains_indices_to_contract<num_lhs_indices>(
     598             :           {{LhsTensorIndices::value...}}),
     599             :       "Cannot assign a tensor expression to a LHS tensor with generic "
     600             :       "indices that would be contracted, e.g. evaluate<ti::A, ti::a>.");
     601             : 
     602             :   if constexpr (is_derived_of_vector_impl_v<X>) {
     603             :     ASSERT(get_size((*lhs_tensor)[0]) > 0,
     604             :            "Tensors with vector components must be sized before calling "
     605             :            "\ntenex::evaluate<...>("
     606             :            "\n\tgsl::not_null<Tensor<VectorType, ...>*>, number).");
     607             :   }
     608             : 
     609             :   constexpr std::array<std::int32_t, num_lhs_indices> lhs_symmetry = {
     610             :       {tmpl::at_c<LhsSymmetry, LhsInts>::value...}};
     611             :   constexpr std::array<size_t, num_lhs_indices> reordered_tensorindex_values =
     612             :       get_reordered_tensorindex_values<LhsTensorIndices...>(lhs_symmetry);
     613             :   (void)reordered_tensorindex_values;  // silence false unused variable warning
     614             :   using reordered_lhs_tensorindex_list =
     615             :       tmpl::list<TensorIndex<reordered_tensorindex_values[LhsInts]>...>;
     616             : 
     617             :   // positions of indices in LHS tensor where generic spatial indices are used
     618             :   // for spacetime indices
     619             :   constexpr auto lhs_spatial_spacetime_index_positions =
     620             :       get_spatial_spacetime_index_positions<LhsIndexList,
     621             :                                             reordered_lhs_tensorindex_list>();
     622             : 
     623             :   // positions of indices in LHS tensor where concrete time indices are used
     624             :   constexpr auto lhs_time_index_positions =
     625             :       get_time_index_positions<reordered_lhs_tensorindex_list>();
     626             : 
     627             :   for (size_t i = 0; i < lhs_tensor_type::size(); i++) {
     628             :     auto lhs_multi_index =
     629             :         lhs_tensor_type::structure::get_canonical_tensor_index(i);
     630             :     if (is_evaluated_lhs_multi_index(lhs_multi_index,
     631             :                                      lhs_spatial_spacetime_index_positions,
     632             :                                      lhs_time_index_positions)) {
     633             :       (*lhs_tensor)[i] = rhs_value;
     634             :     }
     635             :   }
     636             : }
     637             : }  // namespace detail
     638             : 
     639             : /*!
     640             :  * \ingroup TensorExpressionsGroup
     641             :  * \brief Assign the result of a RHS tensor expression to a tensor with the LHS
     642             :  * index order set in the template parameters
     643             :  *
     644             :  * \details Uses the right hand side (RHS) TensorExpression's index ordering
     645             :  * (`RhsTE::args_list`) and the desired left hand side (LHS) tensor's index
     646             :  * ordering (`LhsTensorIndices`) to fill the provided LHS Tensor with that LHS
     647             :  * index ordering. This can carry out the evaluation of a RHS tensor expression
     648             :  * to a LHS tensor with the same index ordering, such as \f$L_{ab} = R_{ab}\f$,
     649             :  * or different ordering, such as \f$L_{ba} = R_{ab}\f$.
     650             :  *
     651             :  * The symmetry of the provided LHS Tensor need not match the symmetry
     652             :  * determined from evaluating the RHS TensorExpression according to its order of
     653             :  * operations. This allows one to specify LHS symmetries (via `lhs_tensor`) that
     654             :  * may not be preserved by the RHS expression's order of operations, which
     655             :  * depends on how the expression is written and implemented.
     656             :  *
     657             :  * The LHS `Tensor` cannot be part of the RHS expression, e.g.
     658             :  * `evaluate(make_not_null(&L), L() + R());`, because the LHS `Tensor` will
     659             :  * generally not be computed correctly when the RHS `TensorExpression` is split
     660             :  * up and the LHS tensor components are computed by accumulating the result of
     661             :  * subtrees (see the section on splitting in the documentation for the
     662             :  * `TensorExpression` class). If you need to use the LHS `Tensor` on the RHS,
     663             :  * use `tenex::update` instead.
     664             :  *
     665             :  * ### Example usage
     666             :  * Given `Tensor`s `R`, `S`, `T`, `G`, and `H`, we can compute the LHS tensor
     667             :  * \f$L\f$ in the equation \f$L_{a} = R_{ab} S^{b} + G_{a} - H_{ba}{}^{b} T\f$
     668             :  * by doing:
     669             :  *
     670             :  * \snippet Test_MixedOperations.cpp use_evaluate_with_result_as_arg
     671             :  *
     672             :  * \note `LhsTensorIndices` must be passed by reference because non-type
     673             :  * template parameters cannot be class types until C++20.
     674             :  *
     675             :  * @tparam LhsTensorIndices the `TensorIndex`s of the `Tensor` on the LHS of the
     676             :  * tensor expression, e.g. `ti::a`, `ti::b`, `ti::c`
     677             :  * @param lhs_tensor pointer to the resultant LHS `Tensor` to fill
     678             :  * @param rhs_tensorexpression the RHS TensorExpression to be evaluated
     679             :  */
     680             : template <auto&... LhsTensorIndices, typename LhsDataType, typename LhsSymmetry,
     681             :           typename LhsIndexList, typename Derived, typename RhsDataType,
     682             :           typename RhsSymmetry, typename RhsIndexList,
     683             :           typename... RhsTensorIndices>
     684           1 : void evaluate(
     685             :     const gsl::not_null<Tensor<LhsDataType, LhsSymmetry, LhsIndexList>*>
     686             :         lhs_tensor,
     687             :     const TensorExpression<Derived, RhsDataType, RhsSymmetry, RhsIndexList,
     688             :                            tmpl::list<RhsTensorIndices...>>&
     689             :         rhs_tensorexpression) {
     690             :   using rhs_expression_type =
     691             :       typename std::decay_t<decltype(~rhs_tensorexpression)>;
     692             :   constexpr bool evaluate_subtrees =
     693             :       rhs_expression_type::primary_subtree_contains_primary_start;
     694             :   detail::evaluate_impl<evaluate_subtrees,
     695             :                         std::decay_t<decltype(LhsTensorIndices)>...>(
     696             :       lhs_tensor, rhs_tensorexpression,
     697             :       std::make_index_sequence<sizeof...(LhsTensorIndices)>{});
     698             : }
     699             : 
     700             : /// @{
     701             : /*!
     702             :  * \ingroup TensorExpressionsGroup
     703             :  * \brief Assign a number to components of a tensor with the LHS index order
     704             :  * set in the template parameters
     705             :  *
     706             :  * \details
     707             :  * Example usage:
     708             :  * \snippet Test_MixedOperations.cpp assign_double_to_index_subsets
     709             :  *
     710             :  * \note The components of the LHS `Tensor` passed in must already be sized
     711             :  * because there is no way to infer component size from the RHS
     712             :  *
     713             :  * \note `LhsTensorIndices` must be passed by reference because non-type
     714             :  * template parameters cannot be class types until C++20.
     715             :  *
     716             :  * @tparam LhsTensorIndices the `TensorIndex`s of the `Tensor` on the LHS of the
     717             :  * tensor expression, e.g. `ti::a`, `ti::b`, `ti::c`
     718             :  * @param lhs_tensor pointer to the resultant LHS `Tensor` to fill
     719             :  * @param rhs_value the RHS value to assign
     720             :  */
     721             : template <auto&... LhsTensorIndices, typename X, typename LhsSymmetry,
     722             :           typename LhsIndexList, typename N,
     723             :           Requires<std::is_arithmetic_v<N>> = nullptr>
     724           1 : void evaluate(
     725             :     const gsl::not_null<Tensor<X, LhsSymmetry, LhsIndexList>*> lhs_tensor,
     726             :     const N rhs_value) {
     727             :   detail::evaluate_impl<std::decay_t<decltype(LhsTensorIndices)>...>(
     728             :       lhs_tensor, rhs_value,
     729             :       std::make_index_sequence<sizeof...(LhsTensorIndices)>{});
     730             : }
     731             : template <auto&... LhsTensorIndices, typename X, typename LhsSymmetry,
     732             :           typename LhsIndexList, typename N>
     733           1 : void evaluate(
     734             :     const gsl::not_null<Tensor<X, LhsSymmetry, LhsIndexList>*> lhs_tensor,
     735             :     const std::complex<N>& rhs_value) {
     736             :   detail::evaluate_impl<std::decay_t<decltype(LhsTensorIndices)>...>(
     737             :       lhs_tensor, rhs_value,
     738             :       std::make_index_sequence<sizeof...(LhsTensorIndices)>{});
     739             : }
     740             : /// @}
     741             : 
     742             : /*!
     743             :  * \ingroup TensorExpressionsGroup
     744             :  * \brief Assign the result of a RHS tensor expression to a tensor with the LHS
     745             :  * index order set in the template parameters
     746             :  *
     747             :  * \details Uses the right hand side (RHS) TensorExpression's index ordering
     748             :  * (`RhsTE::args_list`) and the desired left hand side (LHS) tensor's index
     749             :  * ordering (`LhsTensorIndices`) to construct a LHS Tensor with that LHS index
     750             :  * ordering. This can carry out the evaluation of a RHS tensor expression to a
     751             :  * LHS tensor with the same index ordering, such as \f$L_{ab} = R_{ab}\f$, or
     752             :  * different ordering, such as \f$L_{ba} = R_{ab}\f$.
     753             :  *
     754             :  * The symmetry of the returned LHS Tensor depends on the order of operations in
     755             :  * the RHS TensorExpression, i.e. how the expression is written. If you would
     756             :  * like to specify the symmetry of the LHS Tensor instead of it being determined
     757             :  * by the order of operations in the RHS expression, please use the other
     758             :  * `tenex::evaluate` overload that takes an empty LHS Tensor as its first
     759             :  * argument.
     760             :  *
     761             :  * ### Example usage
     762             :  * Given `Tensor`s `R`, `S`, `T`, `G`, and `H`, we can compute the LHS tensor
     763             :  * \f$L\f$ in the equation \f$L_{a} = R_{ab} S^{b} + G_{a} - H_{ba}{}^{b} T\f$
     764             :  * by doing:
     765             :  *
     766             :  * \snippet Test_MixedOperations.cpp use_evaluate_to_return_result
     767             :  *
     768             :  * \parblock
     769             :  * \note If a generic spatial index is used for a spacetime index in the RHS
     770             :  * tensor, its corresponding index in the LHS tensor type will be a spatial
     771             :  * index with the same valence, frame, and number of spatial dimensions. If a
     772             :  * concrete time index is used for a spacetime index in the RHS tensor, the
     773             :  * index will not appear in the LHS tensor (i.e. there will NOT be a
     774             :  * corresponding LHS index where only the time index of that index has been
     775             :  * computed and its spatial indices are empty).
     776             :  * \endparblock
     777             :  *
     778             :  * \parblock
     779             :  * \note `LhsTensorIndices` must be passed by reference because non-type
     780             :  * template parameters cannot be class types until C++20.
     781             :  * \endparblock
     782             :  *
     783             :  * @tparam LhsTensorIndices the TensorIndexs of the Tensor on the LHS of the
     784             :  * tensor expression, e.g. `ti::a`, `ti::b`, `ti::c`
     785             :  * @param rhs_tensorexpression the RHS TensorExpression to be evaluated
     786             :  * @return the resultant LHS Tensor with index order specified by
     787             :  * LhsTensorIndices
     788             :  */
     789             : template <auto&... LhsTensorIndices, typename RhsTE,
     790             :           Requires<std::is_base_of_v<Expression, RhsTE>> = nullptr>
     791           1 : auto evaluate(const RhsTE& rhs_tensorexpression) {
     792             :   using lhs_tensorindex_list =
     793             :       tmpl::list<std::decay_t<decltype(LhsTensorIndices)>...>;
     794             :   using rhs_tensorindex_list = typename RhsTE::args_list;
     795             :   using rhs_symmetry = typename RhsTE::symmetry;
     796             :   using rhs_tensorindextype_list = typename RhsTE::index_list;
     797             : 
     798             :   // Stores (potentially reordered) symmetry and indices needed for constructing
     799             :   // the LHS tensor, with index order specified by LhsTensorIndices
     800             :   using lhs_tensor_symm_and_indices =
     801             :       LhsTensorSymmAndIndices<rhs_tensorindex_list, lhs_tensorindex_list,
     802             :                               rhs_symmetry, rhs_tensorindextype_list>;
     803             : 
     804             :   Tensor<typename RhsTE::type, typename lhs_tensor_symm_and_indices::symmetry,
     805             :          typename lhs_tensor_symm_and_indices::tensorindextype_list>
     806             :       lhs_tensor{};
     807             : 
     808             :   evaluate<LhsTensorIndices...>(make_not_null(&lhs_tensor),
     809             :                                 rhs_tensorexpression);
     810             :   return lhs_tensor;
     811             : }
     812             : 
     813             : /*!
     814             :  * \ingroup TensorExpressionsGroup
     815             :  * \brief If the LHS tensor is used in the RHS expression, this should be used
     816             :  * to assign a LHS tensor to the result of a RHS tensor expression that contains
     817             :  * it
     818             :  *
     819             :  * \details See documentation for `tenex::evaluate` for basic functionality.
     820             :  *
     821             :  * `tenex::update` differs from `tenex::evaluate` in that `tenex::update` should
     822             :  * be used when some LHS `Tensor` has been partially computed, and now we would
     823             :  * like to update it with a RHS expression that contains it. In other words,
     824             :  * this should be used when we would like to emulate assignment operations like
     825             :  * `LHS +=`, `LHS -=`, `LHS *=`, etc.
     826             :  *
     827             :  * One important difference to note with `tenex::update` is that it cannot split
     828             :  * up the RHS expression and evaluate subtrees, while `tenex::evaluate` can (see
     829             :  * `TensorExpression` documentation). From benchmarking, it was found that the
     830             :  * runtime of `DataVector` expressions scales poorly as we increase the number
     831             :  * of operations. For this reason, when the data type held by the tensors in the
     832             :  * expression is `DataVector`, it's best to avoid passing RHS expressions with a
     833             :  * large number of operations (e.g. an inner product that sums over many terms).
     834             :  *
     835             :  * ### Example usage
     836             :  * In implementing a large equation with many operations, we can manually break
     837             :  * up the equation and evaluate different subexpressions at a time by making one
     838             :  * initial call to `tenex::evaluate` followed by any number of calls to
     839             :  * `tenex::update` that use the LHS tensor in the RHS expression and will
     840             :  * compute the rest of the equation:
     841             :  *
     842             :  * \snippet Test_MixedOperations.cpp use_update
     843             :  *
     844             :  * \note `LhsTensorIndices` must be passed by reference because non-type
     845             :  * template parameters cannot be class types until C++20.
     846             :  *
     847             :  * @tparam LhsTensorIndices the TensorIndexs of the Tensor on the LHS of the
     848             :  * tensor expression, e.g. `ti_a`, `ti_b`, `ti_c`
     849             :  * @param lhs_tensor pointer to the resultant LHS Tensor to fill
     850             :  * @param rhs_tensorexpression the RHS TensorExpression to be evaluated
     851             :  */
     852             : template <auto&... LhsTensorIndices, typename LhsDataType, typename RhsDataType,
     853             :           typename LhsSymmetry, typename LhsIndexList, typename Derived,
     854             :           typename RhsSymmetry, typename RhsIndexList,
     855             :           typename... RhsTensorIndices>
     856           1 : void update(
     857             :     const gsl::not_null<Tensor<LhsDataType, LhsSymmetry, LhsIndexList>*>
     858             :         lhs_tensor,
     859             :     const TensorExpression<Derived, RhsDataType, RhsSymmetry, RhsIndexList,
     860             :                            tmpl::list<RhsTensorIndices...>>&
     861             :         rhs_tensorexpression) {
     862             :   using lhs_tensorindex_list =
     863             :       tmpl::list<std::decay_t<decltype(LhsTensorIndices)>...>;
     864             :   // Assert that each instance of the LHS tensor in the RHS tensor expression
     865             :   // uses the same generic index order that the LHS uses
     866             :   (~rhs_tensorexpression)
     867             :       .template assert_lhs_tensorindices_same_in_rhs<lhs_tensorindex_list>(
     868             :           lhs_tensor);
     869             : 
     870             :   detail::evaluate_impl<false, std::decay_t<decltype(LhsTensorIndices)>...>(
     871             :       lhs_tensor, rhs_tensorexpression,
     872             :       std::make_index_sequence<sizeof...(LhsTensorIndices)>{});
     873             : }
     874             : }  // namespace tenex

Generated by: LCOV version 1.14