Line data Source code
1 1 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : /// \file
5 : /// Defines ET for adding and subtracting tensors
6 :
7 : #pragma once
8 :
9 : #include <array>
10 : #include <complex>
11 : #include <cstddef>
12 : #include <iterator>
13 : #include <limits>
14 : #include <type_traits>
15 : #include <utility>
16 :
17 : #include "DataStructures/DataVector.hpp"
18 : #include "DataStructures/Tensor/Expressions/DataTypeSupport.hpp"
19 : #include "DataStructures/Tensor/Expressions/IndexPropertyCheck.hpp"
20 : #include "DataStructures/Tensor/Expressions/NumberAsExpression.hpp"
21 : #include "DataStructures/Tensor/Expressions/SpatialSpacetimeIndex.hpp"
22 : #include "DataStructures/Tensor/Expressions/TensorExpression.hpp"
23 : #include "DataStructures/Tensor/Expressions/TensorIndex.hpp"
24 : #include "DataStructures/Tensor/Expressions/TensorIndexTransformation.hpp"
25 : #include "DataStructures/Tensor/Expressions/TimeIndex.hpp"
26 : #include "Utilities/Algorithm.hpp"
27 : #include "Utilities/ForceInline.hpp"
28 : #include "Utilities/MakeArray.hpp"
29 : #include "Utilities/Requires.hpp"
30 : #include "Utilities/TMPL.hpp"
31 :
32 : /// \cond
33 : namespace tenex {
34 : template <typename T1, typename T2, typename ArgsList1, typename ArgsList2,
35 : int Sign>
36 : struct AddSub;
37 : } // namespace tenex
38 : template <typename Derived, typename DataType, typename Symm,
39 : typename IndexList, typename Args, typename ReducedArgs>
40 : struct TensorExpression;
41 : /// \endcond
42 :
43 1 : namespace tenex {
44 : namespace detail {
45 : /// \brief Computes the rearranged symmetry of one operand according to the
46 : /// generic index order of the other operand
47 : ///
48 : /// \details
49 : /// Here is an example of what the algorithm does:
50 : ///
51 : /// Given \f$R_{abc} + S_{cab}\f$, reorder \f$S\f$' symmetry according to
52 : /// \f$R\f$'s index order
53 : /// `tensorindex_transformation`:
54 : /// \code
55 : /// {1, 2, 0} // positions of R's indices {a, b, c} in S' indices {c, a, b}
56 : /// \endcode
57 : /// `input_symm`:
58 : /// \code
59 : /// {2, 2, 1} // S' symmetry, where c and a are symmetric indices
60 : /// \endcode
61 : /// returned equivalent `output_symm`:
62 : /// \code
63 : /// {2, 1, 2} // S' symmetry (input_symm) rearranged to R's index order (abc)
64 : /// \endcode
65 : ///
66 : /// One special case scenario to note is when concrete time indices are
67 : /// involved in the transformation. Consider transforming the symmetry for
68 : /// some tensor \f$S_{ab}\f$ to the index order of another tensor
69 : /// \f$R_{btat}\f$. This would be necessary in an expression such as
70 : /// \f$R_{btat} + S_{ab}\f$. The transformation of the symmetry for \f$S\f$
71 : /// according to the index order of \f$R\f$ cannot simply be the list of
72 : /// positions of \f$R\f$'s indices in \f$S\f$' indices, as \f$S\f$ does not
73 : /// contain all of \f$R\f$'s indices, because it has no time indices. To handle
74 : /// cases like this, a placeholder value for the position of any time index
75 : /// must be substituted for an actual position, since one may not exist. In this
76 : /// example, the proper input transformation (`tensorindex_transformation`)
77 : /// would need to be `{1, PLACEHOLDER_VALUE, 0, PLACEHOLDER_VALUE}`, where
78 : /// `PLACEHOLDER_VALUE` is defined by
79 : /// `TensorIndexTransformation_detail::time_index_position_placeholder`. `1` and
80 : /// `0` are the positions of \f$b\f$ and \f$a\f$ in \f$S\f$, and the placeholder
81 : /// is used for the positions of time indices. In computing the output
82 : /// transformed symmetry, the function will insert a `0` at each position where
83 : /// this placeholder is found in the transformation. For example, if
84 : /// `input_symm` is `{2, 1}`, the returned output multi-index will be
85 : /// `{1, 0, 2, 0}`. Note that the symmetry returned by this function is not
86 : /// necessarily in the canonical form defined by ::Symmetry. This example with
87 : /// time indices is an example of this, as `0` is not a permitted ::Symmetry
88 : /// value, and the canonical form would have increasing symmetry values from
89 : /// right to left. In addition, even though the time indices in the rearranged
90 : /// symmetry will have the same symmetry value (`0`), this bears no impact on
91 : /// `get_addsub_symm`'s computation of the symmetry of the tensor resulting
92 : /// from the addition or subtraction.
93 : ///
94 : /// \tparam NumIndicesIn the number of indices in the operand whose symmetry is
95 : /// being transformed
96 : /// \tparam NumIndicesOut the number of indices in the other operand whose index
97 : /// order is the order the input operand symmetry is being transformed to
98 : /// \param input_symm the input operand symmetry to transform
99 : /// \param tensorindex_transformation the positions of the other operand's
100 : /// generic indices in the generic indices of the operand whose symmetry is
101 : /// being transformed (see details)
102 : /// \return the input operand symmetry rearranged according to the generic index
103 : /// order of the other operand
104 : template <size_t NumIndicesIn, size_t NumIndicesOut>
105 : SPECTRE_ALWAYS_INLINE constexpr std::array<std::int32_t, NumIndicesOut>
106 : transform_addsub_symm(
107 : const std::array<std::int32_t, NumIndicesIn>& input_symm,
108 : const std::array<size_t, NumIndicesOut>& tensorindex_transformation) {
109 : std::array<std::int32_t, NumIndicesOut> output_symm =
110 : make_array<NumIndicesOut, std::int32_t>(0);
111 : for (size_t i = 0; i < NumIndicesOut; i++) {
112 : gsl::at(output_symm, i) =
113 : (gsl::at(tensorindex_transformation, i) ==
114 : TensorIndexTransformation_detail::time_index_position_placeholder)
115 : ? 0
116 : : gsl::at(input_symm, gsl::at(tensorindex_transformation, i));
117 : }
118 : return output_symm;
119 : }
120 :
121 : /// @{
122 : /// \ingroup TensorExpressionsGroup
123 : /// \brief Returns the canonical symmetry of the tensor resulting from
124 : /// adding or subtracting two tensors, according to their symmetries
125 : ///
126 : /// \details The canonical symmetry returned follows the convention defined by
127 : /// ::Symmetry: symmetry values are in ascending order from right to left. If
128 : /// the convention implemented by ::Symmetry changes, this function will also
129 : /// need to be updated to match the new convention. The ::Symmetry metafunction
130 : /// could instead be used on the result of this function, but that would
131 : /// introduce avoidable and unnecessary extra computations, so it is not used.
132 : ///
133 : /// This function treats the two input symmetries as aligned (i.e. each position
134 : /// of `symm1` and `symm2` corresponds to a shared generic index at that
135 : /// position). The resultant symmetry is determined as follows: indices that are
136 : /// symmetric in both input symmetries are also symmetric in the resultant
137 : /// tensor.
138 : ///
139 : /// \param symm1 the symmetry of the first tensor being added or subtracted
140 : /// \param symm2 the symmetry of the second tensor being added or subtracted
141 : /// \return the canonical symmetry of the tensor resulting from adding or
142 : /// subtracting two tensors
143 : template <size_t NumIndices, Requires<(NumIndices >= 2)> = nullptr>
144 : constexpr std::array<std::int32_t, NumIndices> get_addsub_symm(
145 : const std::array<std::int32_t, NumIndices>& symm1,
146 : const std::array<std::int32_t, NumIndices>& symm2) {
147 : constexpr std::int32_t max_int = std::numeric_limits<std::int32_t>::max();
148 : std::array<std::int32_t, NumIndices> addsub_symm =
149 : make_array<NumIndices>(max_int);
150 : size_t right_index = NumIndices - 1;
151 : std::int32_t symm_value_to_set = 1;
152 :
153 : while (right_index < NumIndices) {
154 : std::int32_t symm1_value_to_find = symm1[right_index];
155 : std::int32_t symm2_value_to_find = symm2[right_index];
156 : // if we haven't yet set right_index for the resultant symmetry
157 : if (addsub_symm[right_index] == max_int) {
158 : addsub_symm[right_index] = symm_value_to_set;
159 : for (size_t left_index = right_index - 1; left_index < NumIndices;
160 : left_index--) {
161 : // if left_index of the resultant symmetry is not yet set and we've
162 : // found a common symmetry between symm1 and symm2 at this index
163 : if (addsub_symm[left_index] == max_int and
164 : symm1[left_index] == symm1_value_to_find and
165 : symm2[left_index] == symm2_value_to_find) {
166 : addsub_symm[left_index] = symm_value_to_set;
167 : }
168 : }
169 : symm_value_to_set++;
170 : }
171 : right_index--;
172 : }
173 :
174 : return addsub_symm;
175 : }
176 :
177 : template <size_t NumIndices, Requires<(NumIndices == 1)> = nullptr>
178 : constexpr std::array<std::int32_t, NumIndices> get_addsub_symm(
179 : const std::array<std::int32_t, NumIndices>& /*symm1*/,
180 : const std::array<std::int32_t, NumIndices>& /*symm2*/) {
181 : // return {{1}} instead of symm1 in case symm1 is not in the canonical form
182 : return {{1}};
183 : }
184 :
185 : template <size_t NumIndices, Requires<(NumIndices == 0)> = nullptr>
186 : constexpr std::array<std::int32_t, NumIndices> get_addsub_symm(
187 : const std::array<std::int32_t, NumIndices>& symm1,
188 : const std::array<std::int32_t, NumIndices>& /*symm2*/) {
189 : return symm1;
190 : }
191 : /// @}
192 :
193 : /// \ingroup TensorExpressionsGroup
194 : /// \brief Helper struct for computing the canonical symmetry of the tensor
195 : /// resulting from adding or subtracting two tensors, according to their
196 : /// symmetries and generic index orders
197 : ///
198 : /// \details The resultant symmetry (`type`) values correspond to the index
199 : /// order of the first tensor operand being added or subtracted:
200 : /// `TensorIndexList1`.
201 : ///
202 : /// \tparam SymmList1 the ::Symmetry of the first operand
203 : /// \tparam SymmList2 the ::Symmetry of the second operand
204 : /// \tparam TensorIndexList1 the generic indices of the first operand
205 : /// \tparam TensorIndexList2 the generic indices of the second operand
206 : template <typename SymmList1, typename SymmList2, typename TensorIndexList1,
207 : typename TensorIndexList2,
208 : size_t NumIndices1 = tmpl::size<SymmList1>::value,
209 : size_t NumIndices2 = tmpl::size<SymmList2>::value,
210 : typename IndexSequence1 = std::make_index_sequence<NumIndices1>>
211 : struct AddSubSymmetry;
212 :
213 : template <
214 : template <typename...> class SymmList1, typename... Symm1,
215 : template <typename...> class SymmList2, typename... Symm2,
216 : template <typename...> class TensorIndexList1, typename... TensorIndices1,
217 : template <typename...> class TensorIndexList2, typename... TensorIndices2,
218 : size_t NumIndices1, size_t NumIndices2, size_t... Ints1>
219 : struct AddSubSymmetry<SymmList1<Symm1...>, SymmList2<Symm2...>,
220 : TensorIndexList1<TensorIndices1...>,
221 : TensorIndexList2<TensorIndices2...>, NumIndices1,
222 : NumIndices2, std::index_sequence<Ints1...>> {
223 : static constexpr std::array<size_t, NumIndices1> tensorindex_values1 = {
224 : {TensorIndices1::value...}};
225 : static constexpr std::array<size_t, NumIndices2> tensorindex_values2 = {
226 : {TensorIndices2::value...}};
227 : // positions of tensorindex_values1 in tensorindex_values2
228 : static constexpr std::array<size_t, NumIndices1> op2_to_op1_map =
229 : ::tenex::compute_tensorindex_transformation(tensorindex_values2,
230 : tensorindex_values1);
231 :
232 : static constexpr std::array<std::int32_t, NumIndices1> symm1 = {
233 : {Symm1::value...}};
234 : static constexpr std::array<std::int32_t, NumIndices2> symm2 = {
235 : {Symm2::value...}};
236 : // 2nd argument is symm2 rearranged according to `TensorIndexList1` order
237 : // so that the two symmetry arguments to `get_addsub_symm` are aligned
238 : // w.r.t. their generic index orders
239 : static constexpr std::array<std::int32_t, NumIndices1> addsub_symm =
240 : get_addsub_symm(symm1, transform_addsub_symm(symm2, op2_to_op1_map));
241 :
242 : using type = tmpl::integral_list<std::int32_t, addsub_symm[Ints1]...>;
243 : };
244 :
245 : /// \ingroup TensorExpressionsGroup
246 : /// \brief Helper struct for defining the symmetry, index list, and
247 : /// generic index list of the tensor resulting from adding or
248 : /// subtracting two tensor expressions
249 : ///
250 : /// \tparam T1 the first tensor expression operand
251 : /// \tparam T2 the second tensor expression operand
252 : template <typename T1, typename T2>
253 : struct AddSubType {
254 : static_assert(std::is_base_of_v<Expression, T1> and
255 : std::is_base_of_v<Expression, T2>,
256 : "Parameters to AddSubType must be TensorExpressions");
257 : using type =
258 : typename get_binop_datatype<typename T1::type, typename T2::type>::type;
259 : using symmetry =
260 : typename AddSubSymmetry<typename T1::symmetry, typename T2::symmetry,
261 : typename T1::args_list,
262 : typename T2::args_list>::type;
263 : using index_list = typename T1::index_list;
264 : using tensorindex_list = typename T1::args_list;
265 : };
266 : } // namespace detail
267 :
268 : /// \ingroup TensorExpressionsGroup
269 : /// \brief Defines the tensor expression representing the addition or
270 : /// subtraction of two tensor expressions
271 : ///
272 : /// \details
273 : /// For details on aliases and members defined in this class, as well as general
274 : /// `TensorExpression` terminology used in its members' documentation, see
275 : /// documentation for `TensorExpression`.
276 : ///
277 : /// \tparam T1 the left operand expression
278 : /// \tparam T2 the right operand expression
279 : /// \tparam ArgsList1 generic `TensorIndex`s of the left operand
280 : /// \tparam ArgsList2 generic `TensorIndex`s of the right operand
281 : /// \tparam Sign the sign of the operation selected, 1 for addition or -1 for
282 : /// subtraction
283 : template <typename T1, typename T2, typename ArgsList1, typename ArgsList2,
284 : int Sign>
285 1 : struct AddSub;
286 :
287 : template <typename T1, typename T2, template <typename...> class ArgsList1,
288 : template <typename...> class ArgsList2, typename... Args1,
289 : typename... Args2, int Sign>
290 0 : struct AddSub<T1, T2, ArgsList1<Args1...>, ArgsList2<Args2...>, Sign>
291 : : public TensorExpression<
292 : AddSub<T1, T2, ArgsList1<Args1...>, ArgsList2<Args2...>, Sign>,
293 : typename detail::AddSubType<T1, T2>::type,
294 : typename detail::AddSubType<T1, T2>::symmetry,
295 : typename detail::AddSubType<T1, T2>::index_list,
296 : typename detail::AddSubType<T1, T2>::tensorindex_list> {
297 : static_assert(
298 : detail::tensorexpression_binop_datatypes_are_supported_v<T1, T2>,
299 : "Cannot add or subtract the given TensorExpression types with the given "
300 : "data types. This can occur from e.g. trying to add a Tensor with data "
301 : "type double and a Tensor with data type DataVector.");
302 : static_assert(
303 : not((std::is_same_v<T1, NumberAsExpression<std::complex<double>>> and
304 : std::is_same_v<typename T2::type, DataVector>) or
305 : (std::is_same_v<T2, NumberAsExpression<std::complex<double>>> and
306 : std::is_same_v<typename T1::type, DataVector>)),
307 : "Cannot perform addition and subtraction between a std::complex<double> "
308 : "and a TensorExpression whose data type is DataVector because Blaze does "
309 : "not support addition and subtraction between std::complex<double> and "
310 : "DataVector.");
311 : static_assert(
312 : detail::IndexPropertyCheck<typename T1::index_list,
313 : typename T2::index_list, ArgsList1<Args1...>,
314 : ArgsList2<Args2...>>::value,
315 : "You are attempting to add indices of different types, e.g. T^a_b + "
316 : "S^b_a, which doesn't make sense. The indices may also be in different "
317 : "frames, different types (spatial vs. spacetime) or of different "
318 : "dimension.");
319 : static_assert(Sign == 1 or Sign == -1,
320 : "Invalid Sign provided for addition or subtraction of Tensor "
321 : "elements. Sign must be 1 (addition) or -1 (subtraction).");
322 :
323 : // === Index properties ===
324 : /// The type of the data being stored in the result of the expression
325 1 : using type = typename detail::AddSubType<T1, T2>::type;
326 : /// The ::Symmetry of the result of the expression
327 1 : using symmetry = typename detail::AddSubType<T1, T2>::symmetry;
328 : /// The list of \ref SpacetimeIndex "TensorIndexType"s of the result of the
329 : /// expression
330 1 : using index_list = typename detail::AddSubType<T1, T2>::index_list;
331 : /// The list of generic `TensorIndex`s of the result of the
332 : /// expression
333 1 : using args_list = typename T1::args_list;
334 : /// The number of tensor indices in the result of the expression. This also
335 : /// doubles as the left operand's number of indices.
336 1 : static constexpr auto num_tensor_indices = tmpl::size<index_list>::value;
337 : /// The number of tensor indices in the right operand expression
338 1 : static constexpr auto num_tensor_indices_op2 = sizeof...(Args2);
339 : /// Mapping from the left operand's index order to the right operand's index
340 : /// order
341 : static constexpr std::array<size_t, num_tensor_indices_op2>
342 1 : operand_index_transformation =
343 : compute_tensorindex_transformation<num_tensor_indices,
344 : num_tensor_indices_op2>(
345 : {{Args1::value...}}, {{Args2::value...}});
346 : /// Positions of indices in first operand where generic spatial indices are
347 : /// used for spacetime indices
348 1 : static constexpr auto op1_spatial_spacetime_index_positions =
349 : detail::get_spatial_spacetime_index_positions<typename T1::index_list,
350 : ArgsList1<Args1...>>();
351 : /// Positions of indices in second operand where generic spatial indices are
352 : /// used for spacetime indices
353 1 : static constexpr auto op2_spatial_spacetime_index_positions =
354 : detail::get_spatial_spacetime_index_positions<typename T2::index_list,
355 : ArgsList2<Args2...>>();
356 :
357 : /// Whether or not the two operands have the same `TensorIndex`s in the same
358 : /// order (including concrete time indices)
359 1 : static constexpr bool ops_have_generic_indices_at_same_positions =
360 : generic_indices_at_same_positions<tmpl::list<Args1...>,
361 : tmpl::list<Args2...>>::value;
362 :
363 : // === Expression subtree properties ===
364 : /// The number of arithmetic tensor operations done in the subtree for the
365 : /// left operand
366 1 : static constexpr size_t num_ops_left_child = T1::num_ops_subtree;
367 : /// The number of arithmetic tensor operations done in the subtree for the
368 : /// right operand
369 1 : static constexpr size_t num_ops_right_child = T2::num_ops_subtree;
370 : // This helps ensure we are minimizing breadth in the overall tree when we
371 : // have addition (subtraction is not commutative)
372 : static_assert(Sign == -1 or num_ops_left_child >= num_ops_right_child,
373 : "The left operand of an AddSub expression performing addition "
374 : "should be a subtree with equal or more tensor operations than "
375 : "the right operand's subtree.");
376 : /// The total number of arithmetic tensor operations done in this expression's
377 : /// whole subtree
378 1 : static constexpr size_t num_ops_subtree =
379 : num_ops_left_child + num_ops_right_child + 1;
380 : /// The height of this expression's node in the expression tree relative to
381 : /// the closest `TensorAsExpression` leaf in its subtree
382 1 : static constexpr size_t height_relative_to_closest_tensor_leaf_in_subtree =
383 : T2::height_relative_to_closest_tensor_leaf_in_subtree <=
384 : T1::height_relative_to_closest_tensor_leaf_in_subtree
385 : ? (T2::height_relative_to_closest_tensor_leaf_in_subtree !=
386 : std::numeric_limits<size_t>::max()
387 : ? T2::height_relative_to_closest_tensor_leaf_in_subtree + 1
388 : : T2::height_relative_to_closest_tensor_leaf_in_subtree)
389 : : T1::height_relative_to_closest_tensor_leaf_in_subtree + 1;
390 :
391 : // === Properties for splitting up subexpressions along the primary path ===
392 : // These definitions only have meaning if this expression actually ends up
393 : // being along the primary path that is taken when evaluating the whole tree.
394 : // See documentation for `TensorExpression` for more details.
395 : /// If on the primary path, whether or not the expression is an ending point
396 : /// of a leg
397 1 : static constexpr bool is_primary_end = T1::is_primary_start;
398 : /// If on the primary path, this is the remaining number of arithmetic tensor
399 : /// operations that need to be done in the subtree of the child along the
400 : /// primary path, given that we will have already computed the whole subtree
401 : /// at the next lowest leg's starting point.
402 1 : static constexpr size_t num_ops_to_evaluate_primary_left_child =
403 : is_primary_end ? 0 : T1::num_ops_to_evaluate_primary_subtree;
404 : /// If on the primary path, this is the remaining number of arithmetic tensor
405 : /// operations that need to be done in the right operand's subtree. No
406 : /// splitting is currently done, so this is just `num_ops_right_child`.
407 1 : static constexpr size_t num_ops_to_evaluate_primary_right_child =
408 : num_ops_right_child;
409 : /// If on the primary path, this is the remaining number of arithmetic tensor
410 : /// operations that need to be done for this expression's subtree, given that
411 : /// we will have already computed the subtree at the next lowest leg's
412 : /// starting point
413 1 : static constexpr size_t num_ops_to_evaluate_primary_subtree =
414 : num_ops_to_evaluate_primary_left_child +
415 : num_ops_to_evaluate_primary_right_child + 1;
416 : /// If on the primary path, whether or not the expression is a starting point
417 : /// of a leg
418 1 : static constexpr bool is_primary_start =
419 : num_ops_to_evaluate_primary_subtree >=
420 : detail::max_num_ops_in_sub_expression<type>;
421 : /// When evaluating along a primary path, whether each operand's subtrees
422 : /// should be evaluated separately. Since `DataVector` expression runtime
423 : /// scales poorly with increased number of operations, evaluating the two
424 : /// expression subtrees separately like this is beneficial when at least one
425 : /// of the subtrees contains a large number of operations.
426 1 : static constexpr bool evaluate_children_separately =
427 : is_primary_start and (num_ops_to_evaluate_primary_left_child >=
428 : detail::max_num_ops_in_sub_expression<type> or
429 : num_ops_to_evaluate_primary_right_child >=
430 : detail::max_num_ops_in_sub_expression<type>);
431 : /// If on the primary path, whether or not the expression's child along the
432 : /// primary path is a subtree that contains a starting point of a leg along
433 : /// the primary path
434 1 : static constexpr bool primary_child_subtree_contains_primary_start =
435 : T1::primary_subtree_contains_primary_start;
436 : /// If on the primary path, whether or not this subtree contains a starting
437 : /// point of a leg along the primary path
438 1 : static constexpr bool primary_subtree_contains_primary_start =
439 : is_primary_start or primary_child_subtree_contains_primary_start;
440 :
441 0 : AddSub(T1 t1, T2 t2) : t1_(std::move(t1)), t2_(std::move(t2)) {}
442 0 : ~AddSub() override = default;
443 :
444 : /// \brief Assert that the LHS tensor of the equation does not also appear in
445 : /// this expression's subtree
446 : template <typename LhsTensor>
447 1 : SPECTRE_ALWAYS_INLINE void assert_lhs_tensor_not_in_rhs_expression(
448 : const gsl::not_null<LhsTensor*> lhs_tensor) const {
449 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T1>) {
450 : t1_.assert_lhs_tensor_not_in_rhs_expression(lhs_tensor);
451 : }
452 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T2>) {
453 : t2_.assert_lhs_tensor_not_in_rhs_expression(lhs_tensor);
454 : }
455 : }
456 :
457 : /// \brief Assert that each instance of the LHS tensor in the RHS tensor
458 : /// expression uses the same generic index order that the LHS uses
459 : ///
460 : /// \tparam LhsTensorIndices the list of generic `TensorIndex`s of the LHS
461 : /// result `Tensor` being computed
462 : /// \param lhs_tensor the LHS result `Tensor` being computed
463 : template <typename LhsTensorIndices, typename LhsTensor>
464 1 : SPECTRE_ALWAYS_INLINE void assert_lhs_tensorindices_same_in_rhs(
465 : const gsl::not_null<LhsTensor*> lhs_tensor) const {
466 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T1>) {
467 : t1_.template assert_lhs_tensorindices_same_in_rhs<LhsTensorIndices>(
468 : lhs_tensor);
469 : }
470 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T2>) {
471 : t2_.template assert_lhs_tensorindices_same_in_rhs<LhsTensorIndices>(
472 : lhs_tensor);
473 : }
474 : }
475 :
476 : /// \brief Get the size of a component from a `Tensor` in this expression's
477 : /// subtree of the RHS `TensorExpression`
478 : ///
479 : /// \return the size of a component from a `Tensor` in this expression's
480 : /// subtree of the RHS `TensorExpression`
481 1 : SPECTRE_ALWAYS_INLINE size_t get_rhs_tensor_component_size() const {
482 : if constexpr (T1::height_relative_to_closest_tensor_leaf_in_subtree <=
483 : T2::height_relative_to_closest_tensor_leaf_in_subtree) {
484 : return t1_.get_rhs_tensor_component_size();
485 : } else {
486 : return t2_.get_rhs_tensor_component_size();
487 : }
488 : }
489 :
490 : /// \brief Return the second operand's multi-index given the first operand's
491 : /// multi-index
492 : ///
493 : /// \param op1_multi_index the multi-index of the left operand
494 : /// \return the second operand's multi-index
495 : SPECTRE_ALWAYS_INLINE std::array<size_t, num_tensor_indices_op2>
496 1 : get_op2_multi_index(
497 : const std::array<size_t, num_tensor_indices>& op1_multi_index) const {
498 : if constexpr (ops_have_generic_indices_at_same_positions) {
499 : if constexpr (op1_spatial_spacetime_index_positions.size() != 0 or
500 : op2_spatial_spacetime_index_positions.size() != 0) {
501 : // Operands have the same generic index order, but at least one of them
502 : // has at least one spacetime index where a spatial index has been used,
503 : // so we need to compute the 2nd operand's (possibly) shifted
504 : // multi-index values
505 : constexpr std::array<std::int32_t, num_tensor_indices>
506 : spatial_spacetime_index_transformation =
507 : detail::spatial_spacetime_index_transformation_from_positions<
508 : num_tensor_indices>(op1_spatial_spacetime_index_positions,
509 : op2_spatial_spacetime_index_positions);
510 : std::array<size_t, num_tensor_indices> op2_multi_index =
511 : op1_multi_index;
512 : for (size_t i = 0; i < num_tensor_indices; i++) {
513 : gsl::at(op2_multi_index, i) = static_cast<size_t>(
514 : static_cast<std::int32_t>(gsl::at(op2_multi_index, i)) +
515 : gsl::at(spatial_spacetime_index_transformation, i));
516 : }
517 : return op2_multi_index;
518 : } else {
519 : // Operands have the same generic index order and neither of them has
520 : // a spacetime index where a spatial index has been used, so
521 : // both operands have the same multi-index
522 : return op1_multi_index;
523 : }
524 : } else {
525 : if constexpr (op1_spatial_spacetime_index_positions.size() != 0 or
526 : op2_spatial_spacetime_index_positions.size() != 0) {
527 : // Operands don't have the same generic index order and at least one of
528 : // them has at least one spacetime index where a spatial index has been
529 : // used, so we need to compute the 2nd operand's (possibly) shifted
530 : // multi-index values and reorder them with respect to the 2nd operand's
531 : // generic index order
532 :
533 : // The list of positions where generic spatial indices were used for
534 : // spacetime indices in the second operand, but rearranged in terms of
535 : // the first operand's generic index order.
536 : constexpr std::array<size_t,
537 : op2_spatial_spacetime_index_positions.size()>
538 : transformed_op2_spatial_spacetime_index_positions = []() {
539 : std::array<size_t, op2_spatial_spacetime_index_positions.size()>
540 : transformed_positions{};
541 : for (size_t i = 0;
542 : i < op2_spatial_spacetime_index_positions.size(); i++) {
543 : gsl::at(transformed_positions, i) =
544 : gsl::at(operand_index_transformation,
545 : gsl::at(op2_spatial_spacetime_index_positions, i));
546 : }
547 : return transformed_positions;
548 : }();
549 :
550 : // According to the transformed positions above, compute the value shift
551 : // needed to convert from multi-indices of the first operand to
552 : // multi-indices of the 2nd operand (with the generic index order of the
553 : // first)
554 : constexpr std::array<std::int32_t, num_tensor_indices>
555 : spatial_spacetime_index_transformation =
556 : detail::spatial_spacetime_index_transformation_from_positions<
557 : num_tensor_indices>(
558 : op1_spatial_spacetime_index_positions,
559 : transformed_op2_spatial_spacetime_index_positions);
560 : std::array<size_t, num_tensor_indices> op2_multi_index =
561 : op1_multi_index;
562 : for (size_t i = 0; i < num_tensor_indices; i++) {
563 : gsl::at(op2_multi_index, i) = static_cast<size_t>(
564 : static_cast<std::int32_t>(gsl::at(op2_multi_index, i)) +
565 : gsl::at(spatial_spacetime_index_transformation, i));
566 : }
567 : return transform_multi_index(op2_multi_index,
568 : operand_index_transformation);
569 : } else {
570 : // Operands don't have the same generic index order, but neither of them
571 : // has a spacetime index where a spatial index has been used, so we just
572 : // need to reorder the 2nd operand's multi_index according to its
573 : // generic index order
574 : return transform_multi_index(op1_multi_index,
575 : operand_index_transformation);
576 : }
577 : }
578 : }
579 :
580 : /// \brief Helper function for computing the sum of or difference between
581 : /// components at given multi-indices from both operands of the expression
582 : ///
583 : /// \details Both multi-index arguments must be ordered according to their
584 : /// operand's respective generic index ordering
585 : ///
586 : /// \param op1_multi_index the multi-index of the component of the first
587 : /// operand
588 : /// \param op2_multi_index the multi-index of the component of the second
589 : /// operand
590 : /// \return the sum of or difference between the two components' values
591 1 : SPECTRE_ALWAYS_INLINE decltype(auto) add_or_subtract(
592 : const std::array<size_t, num_tensor_indices>& op1_multi_index,
593 : const std::array<size_t, num_tensor_indices_op2>& op2_multi_index) const {
594 : if constexpr (Sign == 1) {
595 : return t1_.get(op1_multi_index) + t2_.get(op2_multi_index);
596 : } else {
597 : return t1_.get(op1_multi_index) - t2_.get(op2_multi_index);
598 : }
599 : }
600 :
601 : /// \brief Return the value of the component at the given multi-index of the
602 : /// tensor resulting from addition or subtraction
603 : ///
604 : /// \details One important detail to note about the type of the `AddSub`
605 : /// expression is that its two operands may have (i) different generic index
606 : /// orders, and/or (ii) different indices in their `index_list`s if where one
607 : /// operand uses a generic spatial index for a spacetime index, the other
608 : /// tensor may use that generic spatial index for a spatial index of the same
609 : /// dimension, valence, and frame. Therefore, there are four possible cases
610 : /// for an `AddSub` expression that are considered in the implementation:
611 : /// - same generic index order, spatial spacetime indices in expression
612 : /// - same generic index order, spatial spacetime indices not in expression
613 : /// - different generic index order, spatial spacetime indices in expression
614 : /// - different generic index order, spatial spacetime indices not in
615 : /// expression
616 : ///
617 : /// This means that for expressions where the generic index orders differ, a
618 : /// multi-index for a component of one operand is a (possible) rearrangement
619 : /// of the equivalent multi-index for a component in the other operand. This
620 : /// also means that for expressions where (at least once) a generic spatial
621 : /// index is used for a spacetime index, then, after accounting
622 : /// for potential reordering due to different generic index orders, a
623 : /// multi-index's values for a component of one operand are (possibly) shifted
624 : /// by one, compared to the multi-index's values for a component in the other
625 : /// operand.
626 : ///
627 : /// For example, given \f$R_{ij} + S_{ji}\f$, let \f$R\f$'s first index be
628 : /// a spacetime index, but \f$R\f$'s second index and both of \f$S\f$' indices
629 : /// be spatial indices. If \f$i = 2\f$ and \f$j = 0\f$, then when we compute
630 : /// \f$R_{20} + S_{02}\f$, the multi-index for \f$R_{20}\f$ is
631 : /// `{2 + 1, 0} = {3, 0}` (first value shifted because it is a spacetime
632 : /// index) and the multi-index for \f$S_{02}\f$ is `[0, 2]`. Because the first
633 : /// operand of an `AddSub` expresion propagates its generic index order and
634 : /// index list ( \ref SpacetimeIndex "TensorIndexType"s) as the `AddSub`'s own
635 : /// generic index order and index list, the `result_multi_index` is equivalent
636 : /// to the multi-index for the first operand. Thus, we need only compute the
637 : /// second operand's multi-index as a transformation of the first: reorder and
638 : /// shift the values of the first operand to compute the equivalent
639 : /// multi-index for the second operand.
640 : ///
641 : /// \param result_multi_index the multi-index of the component of the result
642 : /// tensor to retrieve
643 : /// \return the value of the component at `result_multi_index` in the result
644 : /// tensor
645 1 : SPECTRE_ALWAYS_INLINE decltype(auto) get(
646 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
647 : return add_or_subtract(result_multi_index,
648 : get_op2_multi_index(result_multi_index));
649 : }
650 :
651 : /// \brief Helper for evaluating the LHS Tensor's result component at this
652 : /// subtree by evaluating the two operand's subtrees separately and adding or
653 : /// subtracting them
654 : ///
655 : /// \details
656 : /// The left and right operands' subtrees are evaluated successively with
657 : /// two separate assignments to the LHS result component. Since `DataVector`
658 : /// expression runtime scales poorly with increased number of operations,
659 : /// evaluating the two expression subtrees separately like this is beneficial
660 : /// when at least one of the subtrees contains a large number of operations.
661 : /// Instead of evaluating a larger expression with their combined total number
662 : /// of operations, we evaluate two smaller ones.
663 : ///
664 : /// This function also differs from `add_or_subtract` in that it takes into
665 : /// account whether we have already computed part of the result component at a
666 : /// lower subtree. In recursively computing this sum/difference, the current
667 : /// result component will be substituted in for the most recent (highest)
668 : /// subtree below it that has already been evaluated.
669 : ///
670 : /// \param result_component the LHS tensor component to evaluate
671 : /// \param op1_multi_index the multi-index of the component of the first
672 : /// operand of the sum or difference to evaluate
673 : /// \param op2_multi_index the multi-index of the component of the second
674 : /// operand of the sum or difference to evaluate
675 : template <typename ResultType>
676 1 : SPECTRE_ALWAYS_INLINE void add_or_subtract_primary_children(
677 : ResultType& result_component,
678 : const std::array<size_t, num_tensor_indices>& op1_multi_index,
679 : const std::array<size_t, num_tensor_indices_op2>& op2_multi_index) const {
680 : if constexpr (Sign == 1) {
681 : // We're performing addition
682 : if constexpr (is_primary_end) {
683 : (void)op1_multi_index;
684 : // We've already computed the whole child subtree on the primary path,
685 : // so just add the result of the other child's subtree to the current
686 : // result
687 : result_component += t2_.get(op2_multi_index);
688 : } else {
689 : // We haven't yet evaluated the whole subtree of the primary child, so
690 : // first assign the result component to be the result of computing the
691 : // primary child's subtree
692 : result_component = t1_.get_primary(result_component, op1_multi_index);
693 : // Now that the primary child's subtree has been computed, add the
694 : // result of evaluating the other child's subtree to the current result
695 : result_component += t2_.get(op2_multi_index);
696 : }
697 : } else {
698 : // We're performing subtraction
699 : if constexpr (is_primary_end) {
700 : (void)op1_multi_index;
701 : // We've already computed the whole child subtree on the primary path,
702 : // so just subtract the result of the other child's subtree from the
703 : // current result
704 : result_component -= t2_.get(op2_multi_index);
705 : } else {
706 : // We haven't yet evaluated the whole subtree of the primary child, so
707 : // first assign the result component to be the result of computing the
708 : // primary child's subtree
709 : result_component = t1_.get_primary(result_component, op1_multi_index);
710 : // Now that the primary child's subtree has been computed, subtract the
711 : // result of evaluating the other child's subtree from the current
712 : // result
713 : result_component -= t2_.get(op2_multi_index);
714 : }
715 : }
716 : }
717 :
718 : /// \brief Evaluate the LHS Tensor's result component at this subtree by
719 : /// evaluating the two operand's subtrees separately and adding or subtracting
720 : /// them
721 : ///
722 : /// \details
723 : /// See `add_or_subtract_primary_children` for more details
724 : ///
725 : /// \param result_component the LHS tensor component to evaluate
726 : /// \param result_multi_index the multi-index of the component of the result
727 : /// tensor to evaluate
728 : template <typename ResultType>
729 1 : SPECTRE_ALWAYS_INLINE void evaluate_primary_children(
730 : ResultType& result_component,
731 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
732 : add_or_subtract_primary_children(result_component, result_multi_index,
733 : get_op2_multi_index(result_multi_index));
734 : }
735 :
736 : /// \brief Helper function for returning the sum of or difference between
737 : /// components at given multi-indices from both operands of the expression
738 : ///
739 : /// \details
740 : /// This function differs from `add_or_subtract` in that it takes into account
741 : /// whether we have already computed part of the result component at a lower
742 : /// subtree. In recursively computing this sum/difference, the current result
743 : /// component will be substituted in for the most recent (highest) subtree
744 : /// below it that has already been evaluated.
745 : ///
746 : /// \param result_component the LHS tensor component to evaluate
747 : /// \param op1_multi_index the multi-index of the component of the first
748 : /// operand
749 : /// \param op2_multi_index the multi-index of the component of the second
750 : /// operand
751 : /// \return the sum of or difference between the two components' values
752 : template <typename ResultType>
753 1 : SPECTRE_ALWAYS_INLINE decltype(auto) add_or_subtract_primary(
754 : const ResultType& result_component,
755 : const std::array<size_t, num_tensor_indices>& op1_multi_index,
756 : const std::array<size_t, num_tensor_indices_op2>& op2_multi_index) const {
757 : if constexpr (Sign == 1) {
758 : // We're performing addition
759 : if constexpr (is_primary_end) {
760 : (void)op1_multi_index;
761 : // We've already computed the whole child subtree on the primary path,
762 : // so just add the result of the other child's subtree to the current
763 : // result
764 : return result_component + t2_.get(op2_multi_index);
765 : } else {
766 : // We haven't yet evaluated the whole subtree for this expression, so
767 : // return the sum of the results of the two operands' subtrees
768 : return t1_.get_primary(result_component, op1_multi_index) +
769 : t2_.get(op2_multi_index);
770 : }
771 : } else {
772 : // We're performing subtraction
773 : if constexpr (is_primary_end) {
774 : (void)op1_multi_index;
775 : // We've already computed the whole child subtree on the primary path,
776 : // so just subtract the result of the other child's subtree from the
777 : // current result
778 : return result_component - t2_.get(op2_multi_index);
779 : } else {
780 : // We haven't yet evaluated the whole subtree for this expression, so
781 : // return the difference between the results of the two operands'
782 : // subtrees
783 : return t1_.get_primary(result_component, op1_multi_index) -
784 : t2_.get(op2_multi_index);
785 : }
786 : }
787 : }
788 :
789 : /// \brief Return the value of the component at the given multi-index of the
790 : /// tensor resulting from addition or subtraction
791 : ///
792 : /// \details
793 : /// This function differs from `get` in that it takes into account whether we
794 : /// have already computed part of the result component at a lower subtree.
795 : /// In recursively computing this sum/difference, the current result component
796 : /// will be substituted in for the most recent (highest) subtree below it that
797 : /// has already been evaluated.
798 : ///
799 : /// \param result_component the LHS tensor component to evaluate
800 : /// \param result_multi_index the multi-index of the component of the result
801 : /// tensor to retrieve
802 : /// \return the value of the component at `result_multi_index` in the result
803 : /// tensor
804 : template <typename ResultType>
805 1 : SPECTRE_ALWAYS_INLINE decltype(auto) get_primary(
806 : const ResultType& result_component,
807 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
808 : return add_or_subtract_primary(result_component, result_multi_index,
809 : get_op2_multi_index(result_multi_index));
810 : }
811 :
812 : /// \brief Successively evaluate the LHS Tensor's result component at each
813 : /// leg in this expression's subtree
814 : ///
815 : /// \details
816 : /// This function takes into account whether we have already computed part of
817 : /// the result component at a lower subtree. In recursively computing this
818 : /// sum/difference, the current result component will be substituted in for
819 : /// the most recent (highest) subtree below it that has already been
820 : /// evaluated.
821 : ///
822 : /// \param result_component the LHS tensor component to evaluate
823 : /// \param result_multi_index the multi-index of the component of the result
824 : /// tensor to evaluate
825 : template <typename ResultType>
826 1 : SPECTRE_ALWAYS_INLINE void evaluate_primary_subtree(
827 : ResultType& result_component,
828 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
829 : if constexpr (primary_child_subtree_contains_primary_start) {
830 : // The primary child's subtree contains at least one leg, so recurse down
831 : // and evaluate that first
832 : t1_.evaluate_primary_subtree(result_component, result_multi_index);
833 : }
834 :
835 : if constexpr (is_primary_start) {
836 : // We want to evaluate the subtree for this expression
837 : if constexpr (evaluate_children_separately) {
838 : // Evaluate operand's subtrees separately
839 : evaluate_primary_children(result_component, result_multi_index);
840 : } else {
841 : // Evaluate whole subtree as one expression
842 : result_component = get_primary(result_component, result_multi_index);
843 : }
844 : }
845 : }
846 :
847 : private:
848 : /// Left operand expression
849 1 : T1 t1_;
850 : /// Right operand expression
851 1 : T2 t2_;
852 : };
853 : } // namespace tenex
854 :
855 : /*!
856 : * \ingroup TensorExpressionsGroup
857 : */
858 : template <typename T1, typename T2, typename X1, typename X2, typename Symm1,
859 : typename Symm2, typename IndexList1, typename IndexList2,
860 : typename Args1, typename Args2>
861 0 : SPECTRE_ALWAYS_INLINE auto operator+(
862 : const TensorExpression<T1, X1, Symm1, IndexList1, Args1>& t1,
863 : const TensorExpression<T2, X2, Symm2, IndexList2, Args2>& t2) {
864 : using op1_generic_indices =
865 : typename tenex::detail::remove_time_indices<Args1>::type;
866 : using op2_generic_indices =
867 : typename tenex::detail::remove_time_indices<Args2>::type;
868 : static_assert(tmpl::size<op1_generic_indices>::value ==
869 : tmpl::size<op2_generic_indices>::value,
870 : "Tensor addition is only possible when the same number of "
871 : "generic indices are used with both operands.");
872 : static_assert(
873 : tmpl::equal_members<op1_generic_indices, op2_generic_indices>::value,
874 : "The generic indices when adding two tensors must be equal. This error "
875 : "occurs from expressions like R(ti::a, ti::b) + S(ti::c, ti::a)");
876 : if constexpr (T1::num_ops_subtree >= T2::num_ops_subtree) {
877 : return tenex::AddSub<T1, T2, Args1, Args2, 1>(~t1, ~t2);
878 : } else {
879 : return tenex::AddSub<T2, T1, Args2, Args1, 1>(~t2, ~t1);
880 : }
881 : }
882 :
883 : /// @{
884 : /// \ingroup TensorExpressionsGroup
885 : /// \brief Returns the tensor expression representing the sum of a tensor
886 : /// expression and a number
887 : ///
888 : /// \details
889 : /// The tensor expression operand must represent an expression that, when
890 : /// evaluated, would be a rank 0 tensor. For example, if `R` and `S` are
891 : /// Tensors, here is a non-exhaustive list of some of the acceptable forms that
892 : /// the tensor expression operand could take:
893 : /// - `R()`
894 : /// - `R(ti::A, ti::a)`
895 : /// - `(R(ti::A, ti::B) * S(ti::a, ti::b))`
896 : /// - `R(ti::t, ti::t)`
897 : ///
898 : /// \param t the tensor expression operand of the sum
899 : /// \param number the numeric operand of the sum
900 : /// \return the tensor expression representing the sum of the tensor expression
901 : /// and the number
902 : template <typename T, typename X, typename Symm, typename IndexList,
903 : typename... Args, typename N,
904 : Requires<std::is_arithmetic_v<N>> = nullptr>
905 1 : SPECTRE_ALWAYS_INLINE auto operator+(
906 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t,
907 : const N number) {
908 : static_assert(
909 : (... and tt::is_time_index<Args>::value),
910 : "Can only add a number to a tensor expression that evaluates to a rank 0"
911 : "tensor.");
912 : return t + tenex::NumberAsExpression(number);
913 : }
914 : template <typename T, typename X, typename Symm, typename IndexList,
915 : typename... Args, typename N,
916 : Requires<std::is_arithmetic_v<N>> = nullptr>
917 1 : SPECTRE_ALWAYS_INLINE auto operator+(
918 : const N number,
919 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t) {
920 : static_assert(
921 : (... and tt::is_time_index<Args>::value),
922 : "Can only add a number to a tensor expression that evaluates to a rank 0"
923 : "tensor.");
924 : return t + tenex::NumberAsExpression(number);
925 : }
926 : template <typename T, typename X, typename Symm, typename IndexList,
927 : typename... Args, typename N>
928 1 : SPECTRE_ALWAYS_INLINE auto operator+(
929 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t,
930 : const std::complex<N>& number) {
931 : static_assert(
932 : (... and tt::is_time_index<Args>::value),
933 : "Can only add a number to a tensor expression that evaluates to a rank 0"
934 : "tensor.");
935 : return t + tenex::NumberAsExpression(number);
936 : }
937 : template <typename T, typename X, typename Symm, typename IndexList,
938 : typename... Args, typename N>
939 1 : SPECTRE_ALWAYS_INLINE auto operator+(
940 : const std::complex<N>& number,
941 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t) {
942 : static_assert(
943 : (... and tt::is_time_index<Args>::value),
944 : "Can only add a number to a tensor expression that evaluates to a rank 0"
945 : "tensor.");
946 : return t + tenex::NumberAsExpression(number);
947 : }
948 : /// @}
949 :
950 : /*!
951 : * \ingroup TensorExpressionsGroup
952 : */
953 : template <typename T1, typename T2, typename X1, typename X2, typename Symm1,
954 : typename Symm2, typename IndexList1, typename IndexList2,
955 : typename Args1, typename Args2>
956 0 : SPECTRE_ALWAYS_INLINE auto operator-(
957 : const TensorExpression<T1, X1, Symm1, IndexList1, Args1>& t1,
958 : const TensorExpression<T2, X2, Symm2, IndexList2, Args2>& t2) {
959 : using op1_generic_indices =
960 : typename tenex::detail::remove_time_indices<Args1>::type;
961 : using op2_generic_indices =
962 : typename tenex::detail::remove_time_indices<Args2>::type;
963 : static_assert(tmpl::size<op1_generic_indices>::value ==
964 : tmpl::size<op2_generic_indices>::value,
965 : "Tensor subtraction is only possible when the same number of "
966 : "generic indices are used with both operands.");
967 : static_assert(
968 : tmpl::equal_members<op1_generic_indices, op2_generic_indices>::value,
969 : "The generic indices when subtracting two tensors must be equal. This "
970 : "error occurs from expressions like R(ti::a, ti::b) - S(ti::c, ti::a)");
971 : return tenex::AddSub<T1, T2, Args1, Args2, -1>(~t1, ~t2);
972 : }
973 :
974 : /// @{
975 : /// \ingroup TensorExpressionsGroup
976 : /// \brief Returns the tensor expression representing the difference of a tensor
977 : /// expression and a number
978 : ///
979 : /// \details
980 : /// The tensor expression operand must represent an expression that, when
981 : /// evaluated, would be a rank 0 tensor. For example, if `R` and `S` are
982 : /// Tensors, here is a non-exhaustive list of some of the acceptable forms that
983 : /// the tensor expression operand could take:
984 : /// - `R()`
985 : /// - `R(ti::A, ti::a)`
986 : /// - `(R(ti::A, ti::B) * S(ti::a, ti::b))`
987 : /// - `R(ti::t, ti::t)`
988 : ///
989 : /// \param t the tensor expression operand of the difference
990 : /// \param number the numeric operand of the difference
991 : /// \return the tensor expression representing the difference of the tensor
992 : /// expression and the number
993 : template <typename T, typename X, typename Symm, typename IndexList,
994 : typename... Args, typename N,
995 : Requires<std::is_arithmetic_v<N>> = nullptr>
996 1 : SPECTRE_ALWAYS_INLINE auto operator-(
997 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t,
998 : const N number) {
999 : static_assert(
1000 : (... and tt::is_time_index<Args>::value),
1001 : "Can only subtract a number from a tensor expression that evaluates to a "
1002 : "rank 0 tensor.");
1003 : return t + tenex::NumberAsExpression<N>(-number);
1004 : }
1005 : template <typename T, typename X, typename Symm, typename IndexList,
1006 : typename... Args, typename N,
1007 : Requires<std::is_arithmetic_v<N>> = nullptr>
1008 1 : SPECTRE_ALWAYS_INLINE auto operator-(
1009 : const N number,
1010 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t) {
1011 : static_assert(
1012 : (... and tt::is_time_index<Args>::value),
1013 : "Can only subtract a number from a tensor expression that evaluates to a "
1014 : "rank 0 tensor.");
1015 : return tenex::NumberAsExpression<N>(number) - t;
1016 : }
1017 : template <typename T, typename X, typename Symm, typename IndexList,
1018 : typename... Args, typename N>
1019 1 : SPECTRE_ALWAYS_INLINE auto operator-(
1020 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t,
1021 : const std::complex<N>& number) {
1022 : static_assert(
1023 : (... and tt::is_time_index<Args>::value),
1024 : "Can only subtract a number from a tensor expression that evaluates to a "
1025 : "rank 0 tensor.");
1026 : return t + tenex::NumberAsExpression(-number);
1027 : }
1028 : template <typename T, typename X, typename Symm, typename IndexList,
1029 : typename... Args, typename N>
1030 1 : SPECTRE_ALWAYS_INLINE auto operator-(
1031 : const std::complex<N>& number,
1032 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t) {
1033 : static_assert(
1034 : (... and tt::is_time_index<Args>::value),
1035 : "Can only subtract a number from a tensor expression that evaluates to a "
1036 : "rank 0 tensor.");
1037 : return tenex::NumberAsExpression<N>(number) - t;
1038 : }
1039 : /// @}
|