Line data Source code
1 1 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : /// \file
5 : /// Defines ET for adding and subtracting tensors
6 :
7 : #pragma once
8 :
9 : #include <array>
10 : #include <complex>
11 : #include <cstddef>
12 : #include <iterator>
13 : #include <limits>
14 : #include <type_traits>
15 : #include <utility>
16 :
17 : #include "DataStructures/DataVector.hpp"
18 : #include "DataStructures/Tensor/Expressions/DataTypeSupport.hpp"
19 : #include "DataStructures/Tensor/Expressions/IndexPropertyCheck.hpp"
20 : #include "DataStructures/Tensor/Expressions/NumberAsExpression.hpp"
21 : #include "DataStructures/Tensor/Expressions/SpatialSpacetimeIndex.hpp"
22 : #include "DataStructures/Tensor/Expressions/TensorExpression.hpp"
23 : #include "DataStructures/Tensor/Expressions/TensorIndex.hpp"
24 : #include "DataStructures/Tensor/Expressions/TensorIndexTransformation.hpp"
25 : #include "DataStructures/Tensor/Expressions/TimeIndex.hpp"
26 : #include "Utilities/Algorithm.hpp"
27 : #include "Utilities/ForceInline.hpp"
28 : #include "Utilities/MakeArray.hpp"
29 : #include "Utilities/Requires.hpp"
30 : #include "Utilities/TMPL.hpp"
31 :
32 : /// \cond
33 : namespace tenex {
34 : template <typename T1, typename T2, typename ArgsList1, typename ArgsList2,
35 : int Sign>
36 : struct AddSub;
37 : } // namespace tenex
38 : template <typename Derived, typename DataType, typename Symm,
39 : typename IndexList, typename Args, typename ReducedArgs>
40 : struct TensorExpression;
41 : /// \endcond
42 :
43 1 : namespace tenex {
44 : namespace detail {
45 : /// \brief Computes the rearranged symmetry of one operand according to the
46 : /// generic index order of the other operand
47 : ///
48 : /// \details
49 : /// Here is an example of what the algorithm does:
50 : ///
51 : /// Given \f$R_{abc} + S_{cab}\f$, reorder \f$S\f$' symmetry according to
52 : /// \f$R\f$'s index order
53 : /// `tensorindex_transformation`:
54 : /// \code
55 : /// {1, 2, 0} // positions of R's indices {a, b, c} in S' indices {c, a, b}
56 : /// \endcode
57 : /// `input_symm`:
58 : /// \code
59 : /// {2, 2, 1} // S' symmetry, where c and a are symmetric indices
60 : /// \endcode
61 : /// returned equivalent `output_symm`:
62 : /// \code
63 : /// {2, 1, 2} // S' symmetry (input_symm) rearranged to R's index order (abc)
64 : /// \endcode
65 : ///
66 : /// One special case scenario to note is when concrete time indices are
67 : /// involved in the transformation. Consider transforming the symmetry for
68 : /// some tensor \f$S_{ab}\f$ to the index order of another tensor
69 : /// \f$R_{btat}\f$. This would be necessary in an expression such as
70 : /// \f$R_{btat} + S_{ab}\f$. The transformation of the symmetry for \f$S\f$
71 : /// according to the index order of \f$R\f$ cannot simply be the list of
72 : /// positions of \f$R\f$'s indices in \f$S\f$' indices, as \f$S\f$ does not
73 : /// contain all of \f$R\f$'s indices, because it has no time indices. To handle
74 : /// cases like this, a placeholder value for the position of any time index
75 : /// must be substituted for an actual position, since one may not exist. In this
76 : /// example, the proper input transformation (`tensorindex_transformation`)
77 : /// would need to be `{1, PLACEHOLDER_VALUE, 0, PLACEHOLDER_VALUE}`, where
78 : /// `PLACEHOLDER_VALUE` is defined by
79 : /// `TensorIndexTransformation_detail::time_index_position_placeholder`. `1` and
80 : /// `0` are the positions of \f$b\f$ and \f$a\f$ in \f$S\f$, and the placeholder
81 : /// is used for the positions of time indices. In computing the output
82 : /// transformed symmetry, the function will insert a `0` at each position where
83 : /// this placeholder is found in the transformation. For example, if
84 : /// `input_symm` is `{2, 1}`, the returned output multi-index will be
85 : /// `{1, 0, 2, 0}`. Note that the symmetry returned by this function is not
86 : /// necessarily in the canonical form defined by ::Symmetry. This example with
87 : /// time indices is an example of this, as `0` is not a permitted ::Symmetry
88 : /// value, and the canonical form would have increasing symmetry values from
89 : /// right to left. In addition, even though the time indices in the rearranged
90 : /// symmetry will have the same symmetry value (`0`), this bears no impact on
91 : /// `get_addsub_symm`'s computation of the symmetry of the tensor resulting
92 : /// from the addition or subtraction.
93 : ///
94 : /// \tparam NumIndicesIn the number of indices in the operand whose symmetry is
95 : /// being transformed
96 : /// \tparam NumIndicesOut the number of indices in the other operand whose index
97 : /// order is the order the input operand symmetry is being transformed to
98 : /// \param input_symm the input operand symmetry to transform
99 : /// \param tensorindex_transformation the positions of the other operand's
100 : /// generic indices in the generic indices of the operand whose symmetry is
101 : /// being transformed (see details)
102 : /// \return the input operand symmetry rearranged according to the generic index
103 : /// order of the other operand
104 : template <size_t NumIndicesIn, size_t NumIndicesOut>
105 : SPECTRE_ALWAYS_INLINE constexpr std::array<std::int32_t, NumIndicesOut>
106 : transform_addsub_symm(
107 : const std::array<std::int32_t, NumIndicesIn>& input_symm,
108 : const std::array<size_t, NumIndicesOut>& tensorindex_transformation) {
109 : std::array<std::int32_t, NumIndicesOut> output_symm =
110 : make_array<NumIndicesOut, std::int32_t>(0);
111 : for (size_t i = 0; i < NumIndicesOut; i++) {
112 : gsl::at(output_symm, i) =
113 : (gsl::at(tensorindex_transformation, i) ==
114 : TensorIndexTransformation_detail::time_index_position_placeholder)
115 : ? 0
116 : : gsl::at(input_symm, gsl::at(tensorindex_transformation, i));
117 : }
118 : return output_symm;
119 : }
120 :
121 : /// @{
122 : /// \ingroup TensorExpressionsGroup
123 : /// \brief Returns the canonical symmetry of the tensor resulting from
124 : /// adding or subtracting two tensors, according to their symmetries
125 : ///
126 : /// \details The canonical symmetry returned follows the convention defined by
127 : /// ::Symmetry: symmetry values are in ascending order from right to left. If
128 : /// the convention implemented by ::Symmetry changes, this function will also
129 : /// need to be updated to match the new convention. The ::Symmetry metafunction
130 : /// could instead be used on the result of this function, but that would
131 : /// introduce avoidable and unnecessary extra computations, so it is not used.
132 : ///
133 : /// This function treats the two input symmetries as aligned (i.e. each position
134 : /// of `symm1` and `symm2` corresponds to a shared generic index at that
135 : /// position). The resultant symmetry is determined as follows: indices that are
136 : /// symmetric in both input symmetries are also symmetric in the resultant
137 : /// tensor.
138 : ///
139 : /// \param symm1 the symmetry of the first tensor being added or subtracted
140 : /// \param symm2 the symmetry of the second tensor being added or subtracted
141 : /// \return the canonical symmetry of the tensor resulting from adding or
142 : /// subtracting two tensors
143 : template <size_t NumIndices, Requires<(NumIndices >= 2)> = nullptr>
144 : constexpr std::array<std::int32_t, NumIndices> get_addsub_symm(
145 : const std::array<std::int32_t, NumIndices>& symm1,
146 : const std::array<std::int32_t, NumIndices>& symm2) {
147 : constexpr std::int32_t max_int = std::numeric_limits<std::int32_t>::max();
148 : std::array<std::int32_t, NumIndices> addsub_symm =
149 : make_array<NumIndices>(max_int);
150 : size_t right_index = NumIndices - 1;
151 : std::int32_t symm_value_to_set = 1;
152 :
153 : while (right_index < NumIndices) {
154 : std::int32_t symm1_value_to_find = symm1[right_index];
155 : std::int32_t symm2_value_to_find = symm2[right_index];
156 : // if we haven't yet set right_index for the resultant symmetry
157 : if (addsub_symm[right_index] == max_int) {
158 : addsub_symm[right_index] = symm_value_to_set;
159 : for (size_t left_index = right_index - 1; left_index < NumIndices;
160 : left_index--) {
161 : // if left_index of the resultant symmetry is not yet set and we've
162 : // found a common symmetry between symm1 and symm2 at this index
163 : if (addsub_symm[left_index] == max_int and
164 : symm1[left_index] == symm1_value_to_find and
165 : symm2[left_index] == symm2_value_to_find) {
166 : addsub_symm[left_index] = symm_value_to_set;
167 : }
168 : }
169 : symm_value_to_set++;
170 : }
171 : right_index--;
172 : }
173 :
174 : return addsub_symm;
175 : }
176 :
177 : template <size_t NumIndices, Requires<(NumIndices == 1)> = nullptr>
178 : constexpr std::array<std::int32_t, NumIndices> get_addsub_symm(
179 : const std::array<std::int32_t, NumIndices>& /*symm1*/,
180 : const std::array<std::int32_t, NumIndices>& /*symm2*/) {
181 : // return {{1}} instead of symm1 in case symm1 is not in the canonical form
182 : return {{1}};
183 : }
184 :
185 : template <size_t NumIndices, Requires<(NumIndices == 0)> = nullptr>
186 : constexpr std::array<std::int32_t, NumIndices> get_addsub_symm(
187 : const std::array<std::int32_t, NumIndices>& symm1,
188 : const std::array<std::int32_t, NumIndices>& /*symm2*/) {
189 : return symm1;
190 : }
191 : /// @}
192 :
193 : /// \ingroup TensorExpressionsGroup
194 : /// \brief Helper struct for computing the canonical symmetry of the tensor
195 : /// resulting from adding or subtracting two tensors, according to their
196 : /// symmetries and generic index orders
197 : ///
198 : /// \details The resultant symmetry (`type`) values correspond to the index
199 : /// order of the first tensor operand being added or subtracted:
200 : /// `TensorIndexList1`.
201 : ///
202 : /// \tparam SymmList1 the ::Symmetry of the first operand
203 : /// \tparam SymmList2 the ::Symmetry of the second operand
204 : /// \tparam TensorIndexList1 the generic indices of the first operand
205 : /// \tparam TensorIndexList2 the generic indices of the second operand
206 : template <typename SymmList1, typename SymmList2, typename TensorIndexList1,
207 : typename TensorIndexList2,
208 : size_t NumIndices1 = tmpl::size<SymmList1>::value,
209 : size_t NumIndices2 = tmpl::size<SymmList2>::value,
210 : typename IndexSequence1 = std::make_index_sequence<NumIndices1>>
211 : struct AddSubSymmetry;
212 :
213 : template <
214 : template <typename...> class SymmList1, typename... Symm1,
215 : template <typename...> class SymmList2, typename... Symm2,
216 : template <typename...> class TensorIndexList1, typename... TensorIndices1,
217 : template <typename...> class TensorIndexList2, typename... TensorIndices2,
218 : size_t NumIndices1, size_t NumIndices2, size_t... Ints1>
219 : struct AddSubSymmetry<SymmList1<Symm1...>, SymmList2<Symm2...>,
220 : TensorIndexList1<TensorIndices1...>,
221 : TensorIndexList2<TensorIndices2...>, NumIndices1,
222 : NumIndices2, std::index_sequence<Ints1...>> {
223 : static constexpr std::array<size_t, NumIndices1> tensorindex_values1 = {
224 : {TensorIndices1::value...}};
225 : static constexpr std::array<size_t, NumIndices2> tensorindex_values2 = {
226 : {TensorIndices2::value...}};
227 : // positions of tensorindex_values1 in tensorindex_values2
228 : static constexpr std::array<size_t, NumIndices1> op2_to_op1_map =
229 : ::tenex::compute_tensorindex_transformation(tensorindex_values2,
230 : tensorindex_values1);
231 :
232 : static constexpr std::array<std::int32_t, NumIndices1> symm1 = {
233 : {Symm1::value...}};
234 : static constexpr std::array<std::int32_t, NumIndices2> symm2 = {
235 : {Symm2::value...}};
236 : // 2nd argument is symm2 rearranged according to `TensorIndexList1` order
237 : // so that the two symmetry arguments to `get_addsub_symm` are aligned
238 : // w.r.t. their generic index orders
239 : static constexpr std::array<std::int32_t, NumIndices1> addsub_symm =
240 : get_addsub_symm(symm1, transform_addsub_symm(symm2, op2_to_op1_map));
241 :
242 : using type = tmpl::integral_list<std::int32_t, addsub_symm[Ints1]...>;
243 : };
244 :
245 : /// \ingroup TensorExpressionsGroup
246 : /// \brief Helper struct for defining the symmetry, index list, and
247 : /// generic index list of the tensor resulting from adding or
248 : /// subtracting two tensor expressions
249 : ///
250 : /// \tparam T1 the first tensor expression operand
251 : /// \tparam T2 the second tensor expression operand
252 : template <typename T1, typename T2>
253 : struct AddSubType {
254 : static_assert(std::is_base_of_v<Expression, T1> and
255 : std::is_base_of_v<Expression, T2>,
256 : "Parameters to AddSubType must be TensorExpressions");
257 : using type =
258 : typename get_binop_datatype<typename T1::type, typename T2::type>::type;
259 : using symmetry =
260 : typename AddSubSymmetry<typename T1::symmetry, typename T2::symmetry,
261 : typename T1::args_list,
262 : typename T2::args_list>::type;
263 : using index_list = typename T1::index_list;
264 : using tensorindex_list = typename T1::args_list;
265 : };
266 : } // namespace detail
267 :
268 : /// \ingroup TensorExpressionsGroup
269 : /// \brief Defines the tensor expression representing the addition or
270 : /// subtraction of two tensor expressions
271 : ///
272 : /// \details
273 : /// For details on aliases and members defined in this class, as well as general
274 : /// `TensorExpression` terminology used in its members' documentation, see
275 : /// documentation for `TensorExpression`.
276 : ///
277 : /// \tparam T1 the left operand expression
278 : /// \tparam T2 the right operand expression
279 : /// \tparam ArgsList1 generic `TensorIndex`s of the left operand
280 : /// \tparam ArgsList2 generic `TensorIndex`s of the right operand
281 : /// \tparam Sign the sign of the operation selected, 1 for addition or -1 for
282 : /// subtraction
283 : template <typename T1, typename T2, typename ArgsList1, typename ArgsList2,
284 : int Sign>
285 1 : struct AddSub;
286 :
287 : template <typename T1, typename T2, template <typename...> class ArgsList1,
288 : template <typename...> class ArgsList2, typename... Args1,
289 : typename... Args2, int Sign>
290 0 : struct AddSub<T1, T2, ArgsList1<Args1...>, ArgsList2<Args2...>, Sign>
291 : : public TensorExpression<
292 : AddSub<T1, T2, ArgsList1<Args1...>, ArgsList2<Args2...>, Sign>,
293 : typename detail::AddSubType<T1, T2>::type,
294 : typename detail::AddSubType<T1, T2>::symmetry,
295 : typename detail::AddSubType<T1, T2>::index_list,
296 : typename detail::AddSubType<T1, T2>::tensorindex_list> {
297 : static_assert(
298 : detail::tensorexpression_binop_datatypes_are_supported_v<T1, T2>,
299 : "Cannot add or subtract the given TensorExpression types with the given "
300 : "data types. This can occur from e.g. trying to add a Tensor with data "
301 : "type double and a Tensor with data type DataVector.");
302 : static_assert(
303 : not((std::is_same_v<T1, NumberAsExpression<std::complex<double>>> and
304 : std::is_same_v<typename T2::type, DataVector>) or
305 : (std::is_same_v<T2, NumberAsExpression<std::complex<double>>> and
306 : std::is_same_v<typename T1::type, DataVector>)),
307 : "Cannot perform addition and subtraction between a std::complex<double> "
308 : "and a TensorExpression whose data type is DataVector because Blaze does "
309 : "not support addition and subtraction between std::complex<double> and "
310 : "DataVector.");
311 : static_assert(
312 : detail::IndexPropertyCheck<typename T1::index_list,
313 : typename T2::index_list, ArgsList1<Args1...>,
314 : ArgsList2<Args2...>>::value,
315 : "You are attempting to add indices of different types, e.g. T^a_b + "
316 : "S^b_a, which doesn't make sense. The indices may also be in different "
317 : "frames, different types (spatial vs. spacetime) or of different "
318 : "dimension.");
319 : static_assert(Sign == 1 or Sign == -1,
320 : "Invalid Sign provided for addition or subtraction of Tensor "
321 : "elements. Sign must be 1 (addition) or -1 (subtraction).");
322 :
323 : // === Index properties ===
324 : /// The type of the data being stored in the result of the expression
325 1 : using type = typename detail::AddSubType<T1, T2>::type;
326 : /// The ::Symmetry of the result of the expression
327 1 : using symmetry = typename detail::AddSubType<T1, T2>::symmetry;
328 : /// The list of \ref SpacetimeIndex "TensorIndexType"s of the result of the
329 : /// expression
330 1 : using index_list = typename detail::AddSubType<T1, T2>::index_list;
331 : /// The list of generic `TensorIndex`s of the result of the
332 : /// expression
333 1 : using args_list = typename T1::args_list;
334 : /// The number of tensor indices in the result of the expression. This also
335 : /// doubles as the left operand's number of indices.
336 1 : static constexpr auto num_tensor_indices = tmpl::size<index_list>::value;
337 : /// The number of tensor indices in the right operand expression
338 1 : static constexpr auto num_tensor_indices_op2 = sizeof...(Args2);
339 : /// Mapping from the left operand's index order to the right operand's index
340 : /// order
341 : static constexpr std::array<size_t, num_tensor_indices_op2>
342 1 : operand_index_transformation =
343 : compute_tensorindex_transformation<num_tensor_indices,
344 : num_tensor_indices_op2>(
345 : {{Args1::value...}}, {{Args2::value...}});
346 : /// Positions of indices in first operand where generic spatial indices are
347 : /// used for spacetime indices
348 1 : static constexpr auto op1_spatial_spacetime_index_positions =
349 : detail::get_spatial_spacetime_index_positions<typename T1::index_list,
350 : ArgsList1<Args1...>>();
351 : /// Positions of indices in second operand where generic spatial indices are
352 : /// used for spacetime indices
353 1 : static constexpr auto op2_spatial_spacetime_index_positions =
354 : detail::get_spatial_spacetime_index_positions<typename T2::index_list,
355 : ArgsList2<Args2...>>();
356 :
357 : /// Whether or not the two operands have the same `TensorIndex`s in the same
358 : /// order (including concrete time indices)
359 1 : static constexpr bool ops_have_generic_indices_at_same_positions =
360 : generic_indices_at_same_positions<tmpl::list<Args1...>,
361 : tmpl::list<Args2...>>::value;
362 :
363 : // === Expression subtree properties ===
364 : /// The number of arithmetic tensor operations done in the subtree for the
365 : /// left operand
366 1 : static constexpr size_t num_ops_left_child = T1::num_ops_subtree;
367 : /// The number of arithmetic tensor operations done in the subtree for the
368 : /// right operand
369 1 : static constexpr size_t num_ops_right_child = T2::num_ops_subtree;
370 : // This helps ensure we are minimizing breadth in the overall tree when we
371 : // have addition (subtraction is not commutative)
372 : static_assert(Sign == -1 or num_ops_left_child >= num_ops_right_child,
373 : "The left operand of an AddSub expression performing addition "
374 : "should be a subtree with equal or more tensor operations than "
375 : "the right operand's subtree.");
376 : /// The total number of arithmetic tensor operations done in this expression's
377 : /// whole subtree
378 1 : static constexpr size_t num_ops_subtree =
379 : num_ops_left_child + num_ops_right_child + 1;
380 : /// The height of this expression's node in the expression tree relative to
381 : /// the closest `TensorAsExpression` leaf in its subtree
382 1 : static constexpr size_t height_relative_to_closest_tensor_leaf_in_subtree =
383 : T2::height_relative_to_closest_tensor_leaf_in_subtree <=
384 : T1::height_relative_to_closest_tensor_leaf_in_subtree
385 : ? (T2::height_relative_to_closest_tensor_leaf_in_subtree !=
386 : std::numeric_limits<size_t>::max()
387 : ? T2::height_relative_to_closest_tensor_leaf_in_subtree + 1
388 : : T2::height_relative_to_closest_tensor_leaf_in_subtree)
389 : : T1::height_relative_to_closest_tensor_leaf_in_subtree + 1;
390 :
391 : // === Properties for splitting up subexpressions along the primary path ===
392 : // These definitions only have meaning if this expression actually ends up
393 : // being along the primary path that is taken when evaluating the whole tree.
394 : // See documentation for `TensorExpression` for more details.
395 : /// If on the primary path, whether or not the expression is an ending point
396 : /// of a leg
397 1 : static constexpr bool is_primary_end = T1::is_primary_start;
398 : /// If on the primary path, this is the remaining number of arithmetic tensor
399 : /// operations that need to be done in the subtree of the child along the
400 : /// primary path, given that we will have already computed the whole subtree
401 : /// at the next lowest leg's starting point.
402 1 : static constexpr size_t num_ops_to_evaluate_primary_left_child =
403 : is_primary_end ? 0 : T1::num_ops_to_evaluate_primary_subtree;
404 : /// If on the primary path, this is the remaining number of arithmetic tensor
405 : /// operations that need to be done in the right operand's subtree. No
406 : /// splitting is currently done, so this is just `num_ops_right_child`.
407 1 : static constexpr size_t num_ops_to_evaluate_primary_right_child =
408 : num_ops_right_child;
409 : /// If on the primary path, this is the remaining number of arithmetic tensor
410 : /// operations that need to be done for this expression's subtree, given that
411 : /// we will have already computed the subtree at the next lowest leg's
412 : /// starting point
413 1 : static constexpr size_t num_ops_to_evaluate_primary_subtree =
414 : num_ops_to_evaluate_primary_left_child +
415 : num_ops_to_evaluate_primary_right_child + 1;
416 : /// If on the primary path, whether or not the expression is a starting point
417 : /// of a leg
418 1 : static constexpr bool is_primary_start =
419 : num_ops_to_evaluate_primary_subtree >=
420 : detail::max_num_ops_in_sub_expression<type>;
421 : /// When evaluating along a primary path, whether each operand's subtrees
422 : /// should be evaluated separately. Since `DataVector` expression runtime
423 : /// scales poorly with increased number of operations, evaluating the two
424 : /// expression subtrees separately like this is beneficial when at least one
425 : /// of the subtrees contains a large number of operations.
426 1 : static constexpr bool evaluate_children_separately =
427 : is_primary_start and (num_ops_to_evaluate_primary_left_child >=
428 : detail::max_num_ops_in_sub_expression<type> or
429 : num_ops_to_evaluate_primary_right_child >=
430 : detail::max_num_ops_in_sub_expression<type>);
431 : /// If on the primary path, whether or not the expression's child along the
432 : /// primary path is a subtree that contains a starting point of a leg along
433 : /// the primary path
434 1 : static constexpr bool primary_child_subtree_contains_primary_start =
435 : T1::primary_subtree_contains_primary_start;
436 : /// If on the primary path, whether or not this subtree contains a starting
437 : /// point of a leg along the primary path
438 1 : static constexpr bool primary_subtree_contains_primary_start =
439 : is_primary_start or primary_child_subtree_contains_primary_start;
440 :
441 0 : AddSub(T1 t1, T2 t2) : t1_(std::move(t1)), t2_(std::move(t2)) {}
442 0 : ~AddSub() override = default;
443 :
444 : /// \brief Assert that the LHS tensor of the equation does not also appear in
445 : /// this expression's subtree
446 : template <typename LhsTensor>
447 1 : SPECTRE_ALWAYS_INLINE void assert_lhs_tensor_not_in_rhs_expression(
448 : const gsl::not_null<LhsTensor*> lhs_tensor) const {
449 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T1>) {
450 : t1_.assert_lhs_tensor_not_in_rhs_expression(lhs_tensor);
451 : }
452 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T2>) {
453 : t2_.assert_lhs_tensor_not_in_rhs_expression(lhs_tensor);
454 : }
455 : }
456 :
457 : /// \brief Assert that each instance of the LHS tensor in the RHS tensor
458 : /// expression uses the same generic index order that the LHS uses
459 : ///
460 : /// \tparam LhsTensorIndices the list of generic `TensorIndex`s of the LHS
461 : /// result `Tensor` being computed
462 : /// \param lhs_tensor the LHS result `Tensor` being computed
463 : template <typename LhsTensorIndices, typename LhsTensor>
464 1 : SPECTRE_ALWAYS_INLINE void assert_lhs_tensorindices_same_in_rhs(
465 : const gsl::not_null<LhsTensor*> lhs_tensor) const {
466 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T1>) {
467 : t1_.template assert_lhs_tensorindices_same_in_rhs<LhsTensorIndices>(
468 : lhs_tensor);
469 : }
470 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T2>) {
471 : t2_.template assert_lhs_tensorindices_same_in_rhs<LhsTensorIndices>(
472 : lhs_tensor);
473 : }
474 : }
475 :
476 : /// \brief Get the size of a component from a `Tensor` in this expression's
477 : /// subtree of the RHS `TensorExpression`
478 : ///
479 : /// \return the size of a component from a `Tensor` in this expression's
480 : /// subtree of the RHS `TensorExpression`
481 1 : SPECTRE_ALWAYS_INLINE size_t get_rhs_tensor_component_size() const {
482 : if constexpr (T1::height_relative_to_closest_tensor_leaf_in_subtree <=
483 : T2::height_relative_to_closest_tensor_leaf_in_subtree) {
484 : return t1_.get_rhs_tensor_component_size();
485 : } else {
486 : return t2_.get_rhs_tensor_component_size();
487 : }
488 : }
489 :
490 : /// \brief Return the second operand's multi-index given the first operand's
491 : /// multi-index
492 : ///
493 : /// \param op1_multi_index the multi-index of the left operand
494 : /// \return the second operand's multi-index
495 : SPECTRE_ALWAYS_INLINE std::array<size_t, num_tensor_indices_op2>
496 1 : get_op2_multi_index(
497 : const std::array<size_t, num_tensor_indices>& op1_multi_index) const {
498 : if constexpr (ops_have_generic_indices_at_same_positions) {
499 : if constexpr (op1_spatial_spacetime_index_positions.size() != 0 or
500 : op2_spatial_spacetime_index_positions.size() != 0) {
501 : // Operands have the same generic index order, but at least one of them
502 : // has at least one spacetime index where a spatial index has been used,
503 : // so we need to compute the 2nd operand's (possibly) shifted
504 : // multi-index values
505 : constexpr std::array<std::int32_t, num_tensor_indices>
506 : spatial_spacetime_index_transformation =
507 : detail::spatial_spacetime_index_transformation_from_positions<
508 : num_tensor_indices>(op1_spatial_spacetime_index_positions,
509 : op2_spatial_spacetime_index_positions);
510 : std::array<size_t, num_tensor_indices> op2_multi_index =
511 : op1_multi_index;
512 : for (size_t i = 0; i < num_tensor_indices; i++) {
513 : gsl::at(op2_multi_index, i) = static_cast<size_t>(
514 : static_cast<std::int32_t>(gsl::at(op2_multi_index, i)) +
515 : gsl::at(spatial_spacetime_index_transformation, i));
516 : }
517 : return op2_multi_index;
518 : } else {
519 : // Operands have the same generic index order and neither of them has
520 : // a spacetime index where a spatial index has been used, so
521 : // both operands have the same multi-index
522 : return op1_multi_index;
523 : }
524 : } else {
525 : if constexpr (op1_spatial_spacetime_index_positions.size() != 0 or
526 : op2_spatial_spacetime_index_positions.size() != 0) {
527 : // Operands don't have the same generic index order and at least one of
528 : // them has at least one spacetime index where a spatial index has been
529 : // used, so we need to compute the 2nd operand's (possibly) shifted
530 : // multi-index values and reorder them with respect to the 2nd operand's
531 : // generic index order
532 :
533 : // The list of positions where generic spatial indices were used for
534 : // spacetime indices in the second operand, but rearranged in terms of
535 : // the first operand's generic index order.
536 : constexpr std::array<size_t,
537 : op2_spatial_spacetime_index_positions.size()>
538 : transformed_op2_spatial_spacetime_index_positions = []() {
539 : std::array<size_t, op2_spatial_spacetime_index_positions.size()>
540 : transformed_positions{};
541 : for (size_t i = 0;
542 : i < op2_spatial_spacetime_index_positions.size(); i++) {
543 : gsl::at(transformed_positions, i) =
544 : gsl::at(operand_index_transformation,
545 : gsl::at(op2_spatial_spacetime_index_positions, i));
546 : }
547 : return transformed_positions;
548 : }();
549 :
550 : // According to the transformed positions above, compute the value shift
551 : // needed to convert from multi-indices of the first operand to
552 : // multi-indices of the 2nd operand (with the generic index order of the
553 : // first)
554 : constexpr std::array<std::int32_t, num_tensor_indices>
555 : spatial_spacetime_index_transformation =
556 : detail::spatial_spacetime_index_transformation_from_positions<
557 : num_tensor_indices>(
558 : op1_spatial_spacetime_index_positions,
559 : transformed_op2_spatial_spacetime_index_positions);
560 : std::array<size_t, num_tensor_indices> op2_multi_index =
561 : op1_multi_index;
562 : for (size_t i = 0; i < num_tensor_indices; i++) {
563 : gsl::at(op2_multi_index, i) = static_cast<size_t>(
564 : static_cast<std::int32_t>(gsl::at(op2_multi_index, i)) +
565 : gsl::at(spatial_spacetime_index_transformation, i));
566 : }
567 : return transform_multi_index(op2_multi_index,
568 : operand_index_transformation);
569 : } else {
570 : // Operands don't have the same generic index order, but neither of them
571 : // has a spacetime index where a spatial index has been used, so we just
572 : // need to reorder the 2nd operand's multi_index according to its
573 : // generic index order
574 : return transform_multi_index(op1_multi_index,
575 : operand_index_transformation);
576 : }
577 : }
578 : }
579 :
580 : /// \brief Helper function for computing the sum of or difference between
581 : /// components at given multi-indices from both operands of the expression
582 : ///
583 : /// \details Both multi-index arguments must be ordered according to their
584 : /// operand's respective generic index ordering
585 : ///
586 : /// \param op1_multi_index the multi-index of the component of the first
587 : /// operand
588 : /// \param op2_multi_index the multi-index of the component of the second
589 : /// operand
590 : /// \return the sum of or difference between the two components' values
591 1 : SPECTRE_ALWAYS_INLINE decltype(auto) add_or_subtract(
592 : const std::array<size_t, num_tensor_indices>& op1_multi_index,
593 : const std::array<size_t, num_tensor_indices_op2>& op2_multi_index) const {
594 : if constexpr (Sign == 1) {
595 : return t1_.get(op1_multi_index) + t2_.get(op2_multi_index);
596 : } else {
597 : return t1_.get(op1_multi_index) - t2_.get(op2_multi_index);
598 : }
599 : }
600 :
601 : /// \brief Return the value of the component at the given multi-index of the
602 : /// tensor resulting from addition or subtraction
603 : ///
604 : /// \details One important detail to note about the type of the `AddSub`
605 : /// expression is that its two operands may have (i) different generic index
606 : /// orders, and/or (ii) different indices in their `index_list`s if where one
607 : /// operand uses a generic spatial index for a spacetime index, the other
608 : /// tensor may use that generic spatial index for a spatial index of the same
609 : /// dimension, valence, and frame. Therefore, there are four possible cases
610 : /// for an `AddSub` expression that are considered in the implementation:
611 : /// - same generic index order, spatial spacetime indices in expression
612 : /// - same generic index order, spatial spacetime indices not in expression
613 : /// - different generic index order, spatial spacetime indices in expression
614 : /// - different generic index order, spatial spacetime indices not in
615 : /// expression
616 : ///
617 : /// This means that for expressions where the generic index orders differ, a
618 : /// multi-index for a component of one operand is a (possible) rearrangement
619 : /// of the equivalent multi-index for a component in the other operand. This
620 : /// also means that for expressions where (at least once) a generic spatial
621 : /// index is used for a spacetime index, then, after accounting
622 : /// for potential reordering due to different generic index orders, a
623 : /// multi-index's values for a component of one operand are (possibly) shifted
624 : /// by one, compared to the multi-index's values for a component in the other
625 : /// operand.
626 : ///
627 : /// For example, given \f$R_{ij} + S_{ji}\f$, let \f$R\f$'s first index be
628 : /// a spacetime index, but \f$R\f$'s second index and both of \f$S\f$' indices
629 : /// be spatial indices. If \f$i = 2\f$ and \f$j = 0\f$, then when we compute
630 : /// \f$R_{20} + S_{02}\f$, the multi-index for \f$R_{20}\f$ is
631 : /// `{2 + 1, 0} = {3, 0}` (first value shifted because it is a spacetime
632 : /// index) and the multi-index for \f$S_{02}\f$ is `[0, 2]`. Because the first
633 : /// operand of an `AddSub` expresion propagates its generic index order and
634 : /// index list ( \ref SpacetimeIndex "TensorIndexType"s) as the `AddSub`'s own
635 : /// generic index order and index list, the `result_multi_index` is equivalent
636 : /// to the multi-index for the first operand. Thus, we need only compute the
637 : /// second operand's multi-index as a transformation of the first: reorder and
638 : /// shift the values of the first operand to compute the equivalent
639 : /// multi-index for the second operand.
640 : ///
641 : /// \param result_multi_index the multi-index of the component of the result
642 : /// tensor to retrieve
643 : /// \return the value of the component at `result_multi_index` in the result
644 : /// tensor
645 1 : SPECTRE_ALWAYS_INLINE decltype(auto) get(
646 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
647 : return add_or_subtract(result_multi_index,
648 : get_op2_multi_index(result_multi_index));
649 : }
650 :
651 : /// \brief Helper for evaluating the LHS Tensor's result component at this
652 : /// subtree by evaluating the two operand's subtrees separately and adding or
653 : /// subtracting them
654 : ///
655 : /// \details
656 : /// The left and right operands' subtrees are evaluated successively with
657 : /// two separate assignments to the LHS result component. Since `DataVector`
658 : /// expression runtime scales poorly with increased number of operations,
659 : /// evaluating the two expression subtrees separately like this is beneficial
660 : /// when at least one of the subtrees contains a large number of operations.
661 : /// Instead of evaluating a larger expression with their combined total number
662 : /// of operations, we evaluate two smaller ones.
663 : ///
664 : /// This function also differs from `add_or_subtract` in that it takes into
665 : /// account whether we have already computed part of the result component at a
666 : /// lower subtree. In recursively computing this sum/difference, the current
667 : /// result component will be substituted in for the most recent (highest)
668 : /// subtree below it that has already been evaluated.
669 : ///
670 : /// \param result_component the LHS tensor component to evaluate
671 : /// \param op1_multi_index the multi-index of the component of the first
672 : /// operand of the sum or difference to evaluate
673 : /// \param op2_multi_index the multi-index of the component of the second
674 : /// operand of the sum or difference to evaluate
675 : template <typename ResultType>
676 1 : SPECTRE_ALWAYS_INLINE void add_or_subtract_primary_children(
677 : ResultType& result_component,
678 : const std::array<size_t, num_tensor_indices>& op1_multi_index,
679 : const std::array<size_t, num_tensor_indices_op2>& op2_multi_index) const {
680 : if constexpr (Sign == 1) {
681 : // We're performing addition
682 : if constexpr (is_primary_end) {
683 : (void)op1_multi_index;
684 : // We've already computed the whole child subtree on the primary path,
685 : // so just add the result of the other child's subtree to the current
686 : // result
687 : result_component += t2_.get(op2_multi_index);
688 : } else {
689 : // We haven't yet evaluated the whole subtree of the primary child, so
690 : // first assign the result component to be the result of computing the
691 : // primary child's subtree
692 : result_component =
693 : t1_.template get_primary(result_component, op1_multi_index);
694 : // Now that the primary child's subtree has been computed, add the
695 : // result of evaluating the other child's subtree to the current result
696 : result_component += t2_.get(op2_multi_index);
697 : }
698 : } else {
699 : // We're performing subtraction
700 : if constexpr (is_primary_end) {
701 : (void)op1_multi_index;
702 : // We've already computed the whole child subtree on the primary path,
703 : // so just subtract the result of the other child's subtree from the
704 : // current result
705 : result_component -= t2_.get(op2_multi_index);
706 : } else {
707 : // We haven't yet evaluated the whole subtree of the primary child, so
708 : // first assign the result component to be the result of computing the
709 : // primary child's subtree
710 : result_component =
711 : t1_.template get_primary(result_component, op1_multi_index);
712 : // Now that the primary child's subtree has been computed, subtract the
713 : // result of evaluating the other child's subtree from the current
714 : // result
715 : result_component -= t2_.get(op2_multi_index);
716 : }
717 : }
718 : }
719 :
720 : /// \brief Evaluate the LHS Tensor's result component at this subtree by
721 : /// evaluating the two operand's subtrees separately and adding or subtracting
722 : /// them
723 : ///
724 : /// \details
725 : /// See `add_or_subtract_primary_children` for more details
726 : ///
727 : /// \param result_component the LHS tensor component to evaluate
728 : /// \param result_multi_index the multi-index of the component of the result
729 : /// tensor to evaluate
730 : template <typename ResultType>
731 1 : SPECTRE_ALWAYS_INLINE void evaluate_primary_children(
732 : ResultType& result_component,
733 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
734 : add_or_subtract_primary_children(result_component, result_multi_index,
735 : get_op2_multi_index(result_multi_index));
736 : }
737 :
738 : /// \brief Helper function for returning the sum of or difference between
739 : /// components at given multi-indices from both operands of the expression
740 : ///
741 : /// \details
742 : /// This function differs from `add_or_subtract` in that it takes into account
743 : /// whether we have already computed part of the result component at a lower
744 : /// subtree. In recursively computing this sum/difference, the current result
745 : /// component will be substituted in for the most recent (highest) subtree
746 : /// below it that has already been evaluated.
747 : ///
748 : /// \param result_component the LHS tensor component to evaluate
749 : /// \param op1_multi_index the multi-index of the component of the first
750 : /// operand
751 : /// \param op2_multi_index the multi-index of the component of the second
752 : /// operand
753 : /// \return the sum of or difference between the two components' values
754 : template <typename ResultType>
755 1 : SPECTRE_ALWAYS_INLINE decltype(auto) add_or_subtract_primary(
756 : const ResultType& result_component,
757 : const std::array<size_t, num_tensor_indices>& op1_multi_index,
758 : const std::array<size_t, num_tensor_indices_op2>& op2_multi_index) const {
759 : if constexpr (Sign == 1) {
760 : // We're performing addition
761 : if constexpr (is_primary_end) {
762 : (void)op1_multi_index;
763 : // We've already computed the whole child subtree on the primary path,
764 : // so just add the result of the other child's subtree to the current
765 : // result
766 : return result_component + t2_.get(op2_multi_index);
767 : } else {
768 : // We haven't yet evaluated the whole subtree for this expression, so
769 : // return the sum of the results of the two operands' subtrees
770 : return t1_.template get_primary(result_component, op1_multi_index) +
771 : t2_.get(op2_multi_index);
772 : }
773 : } else {
774 : // We're performing subtraction
775 : if constexpr (is_primary_end) {
776 : (void)op1_multi_index;
777 : // We've already computed the whole child subtree on the primary path,
778 : // so just subtract the result of the other child's subtree from the
779 : // current result
780 : return result_component - t2_.get(op2_multi_index);
781 : } else {
782 : // We haven't yet evaluated the whole subtree for this expression, so
783 : // return the difference between the results of the two operands'
784 : // subtrees
785 : return t1_.template get_primary(result_component, op1_multi_index) -
786 : t2_.get(op2_multi_index);
787 : }
788 : }
789 : }
790 :
791 : /// \brief Return the value of the component at the given multi-index of the
792 : /// tensor resulting from addition or subtraction
793 : ///
794 : /// \details
795 : /// This function differs from `get` in that it takes into account whether we
796 : /// have already computed part of the result component at a lower subtree.
797 : /// In recursively computing this sum/difference, the current result component
798 : /// will be substituted in for the most recent (highest) subtree below it that
799 : /// has already been evaluated.
800 : ///
801 : /// \param result_component the LHS tensor component to evaluate
802 : /// \param result_multi_index the multi-index of the component of the result
803 : /// tensor to retrieve
804 : /// \return the value of the component at `result_multi_index` in the result
805 : /// tensor
806 : template <typename ResultType>
807 1 : SPECTRE_ALWAYS_INLINE decltype(auto) get_primary(
808 : const ResultType& result_component,
809 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
810 : return add_or_subtract_primary(result_component, result_multi_index,
811 : get_op2_multi_index(result_multi_index));
812 : }
813 :
814 : /// \brief Successively evaluate the LHS Tensor's result component at each
815 : /// leg in this expression's subtree
816 : ///
817 : /// \details
818 : /// This function takes into account whether we have already computed part of
819 : /// the result component at a lower subtree. In recursively computing this
820 : /// sum/difference, the current result component will be substituted in for
821 : /// the most recent (highest) subtree below it that has already been
822 : /// evaluated.
823 : ///
824 : /// \param result_component the LHS tensor component to evaluate
825 : /// \param result_multi_index the multi-index of the component of the result
826 : /// tensor to evaluate
827 : template <typename ResultType>
828 1 : SPECTRE_ALWAYS_INLINE void evaluate_primary_subtree(
829 : ResultType& result_component,
830 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
831 : if constexpr (primary_child_subtree_contains_primary_start) {
832 : // The primary child's subtree contains at least one leg, so recurse down
833 : // and evaluate that first
834 : t1_.template evaluate_primary_subtree(result_component,
835 : result_multi_index);
836 : }
837 :
838 : if constexpr (is_primary_start) {
839 : // We want to evaluate the subtree for this expression
840 : if constexpr (evaluate_children_separately) {
841 : // Evaluate operand's subtrees separately
842 : evaluate_primary_children(result_component, result_multi_index);
843 : } else {
844 : // Evaluate whole subtree as one expression
845 : result_component = get_primary(result_component, result_multi_index);
846 : }
847 : }
848 : }
849 :
850 : private:
851 : /// Left operand expression
852 1 : T1 t1_;
853 : /// Right operand expression
854 1 : T2 t2_;
855 : };
856 : } // namespace tenex
857 :
858 : /*!
859 : * \ingroup TensorExpressionsGroup
860 : */
861 : template <typename T1, typename T2, typename X1, typename X2, typename Symm1,
862 : typename Symm2, typename IndexList1, typename IndexList2,
863 : typename Args1, typename Args2>
864 0 : SPECTRE_ALWAYS_INLINE auto operator+(
865 : const TensorExpression<T1, X1, Symm1, IndexList1, Args1>& t1,
866 : const TensorExpression<T2, X2, Symm2, IndexList2, Args2>& t2) {
867 : using op1_generic_indices =
868 : typename tenex::detail::remove_time_indices<Args1>::type;
869 : using op2_generic_indices =
870 : typename tenex::detail::remove_time_indices<Args2>::type;
871 : static_assert(tmpl::size<op1_generic_indices>::value ==
872 : tmpl::size<op2_generic_indices>::value,
873 : "Tensor addition is only possible when the same number of "
874 : "generic indices are used with both operands.");
875 : static_assert(
876 : tmpl::equal_members<op1_generic_indices, op2_generic_indices>::value,
877 : "The generic indices when adding two tensors must be equal. This error "
878 : "occurs from expressions like R(ti::a, ti::b) + S(ti::c, ti::a)");
879 : if constexpr (T1::num_ops_subtree >= T2::num_ops_subtree) {
880 : return tenex::AddSub<T1, T2, Args1, Args2, 1>(~t1, ~t2);
881 : } else {
882 : return tenex::AddSub<T2, T1, Args2, Args1, 1>(~t2, ~t1);
883 : }
884 : }
885 :
886 : /// @{
887 : /// \ingroup TensorExpressionsGroup
888 : /// \brief Returns the tensor expression representing the sum of a tensor
889 : /// expression and a number
890 : ///
891 : /// \details
892 : /// The tensor expression operand must represent an expression that, when
893 : /// evaluated, would be a rank 0 tensor. For example, if `R` and `S` are
894 : /// Tensors, here is a non-exhaustive list of some of the acceptable forms that
895 : /// the tensor expression operand could take:
896 : /// - `R()`
897 : /// - `R(ti::A, ti::a)`
898 : /// - `(R(ti::A, ti::B) * S(ti::a, ti::b))`
899 : /// - `R(ti::t, ti::t)`
900 : ///
901 : /// \param t the tensor expression operand of the sum
902 : /// \param number the numeric operand of the sum
903 : /// \return the tensor expression representing the sum of the tensor expression
904 : /// and the number
905 : template <typename T, typename X, typename Symm, typename IndexList,
906 : typename... Args, typename N,
907 : Requires<std::is_arithmetic_v<N>> = nullptr>
908 1 : SPECTRE_ALWAYS_INLINE auto operator+(
909 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t,
910 : const N number) {
911 : static_assert(
912 : (... and tt::is_time_index<Args>::value),
913 : "Can only add a number to a tensor expression that evaluates to a rank 0"
914 : "tensor.");
915 : return t + tenex::NumberAsExpression(number);
916 : }
917 : template <typename T, typename X, typename Symm, typename IndexList,
918 : typename... Args, typename N,
919 : Requires<std::is_arithmetic_v<N>> = nullptr>
920 1 : SPECTRE_ALWAYS_INLINE auto operator+(
921 : const N number,
922 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t) {
923 : static_assert(
924 : (... and tt::is_time_index<Args>::value),
925 : "Can only add a number to a tensor expression that evaluates to a rank 0"
926 : "tensor.");
927 : return t + tenex::NumberAsExpression(number);
928 : }
929 : template <typename T, typename X, typename Symm, typename IndexList,
930 : typename... Args, typename N>
931 1 : SPECTRE_ALWAYS_INLINE auto operator+(
932 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t,
933 : const std::complex<N>& number) {
934 : static_assert(
935 : (... and tt::is_time_index<Args>::value),
936 : "Can only add a number to a tensor expression that evaluates to a rank 0"
937 : "tensor.");
938 : return t + tenex::NumberAsExpression(number);
939 : }
940 : template <typename T, typename X, typename Symm, typename IndexList,
941 : typename... Args, typename N>
942 1 : SPECTRE_ALWAYS_INLINE auto operator+(
943 : const std::complex<N>& number,
944 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t) {
945 : static_assert(
946 : (... and tt::is_time_index<Args>::value),
947 : "Can only add a number to a tensor expression that evaluates to a rank 0"
948 : "tensor.");
949 : return t + tenex::NumberAsExpression(number);
950 : }
951 : /// @}
952 :
953 : /*!
954 : * \ingroup TensorExpressionsGroup
955 : */
956 : template <typename T1, typename T2, typename X1, typename X2, typename Symm1,
957 : typename Symm2, typename IndexList1, typename IndexList2,
958 : typename Args1, typename Args2>
959 0 : SPECTRE_ALWAYS_INLINE auto operator-(
960 : const TensorExpression<T1, X1, Symm1, IndexList1, Args1>& t1,
961 : const TensorExpression<T2, X2, Symm2, IndexList2, Args2>& t2) {
962 : using op1_generic_indices =
963 : typename tenex::detail::remove_time_indices<Args1>::type;
964 : using op2_generic_indices =
965 : typename tenex::detail::remove_time_indices<Args2>::type;
966 : static_assert(tmpl::size<op1_generic_indices>::value ==
967 : tmpl::size<op2_generic_indices>::value,
968 : "Tensor subtraction is only possible when the same number of "
969 : "generic indices are used with both operands.");
970 : static_assert(
971 : tmpl::equal_members<op1_generic_indices, op2_generic_indices>::value,
972 : "The generic indices when subtracting two tensors must be equal. This "
973 : "error occurs from expressions like R(ti::a, ti::b) - S(ti::c, ti::a)");
974 : return tenex::AddSub<T1, T2, Args1, Args2, -1>(~t1, ~t2);
975 : }
976 :
977 : /// @{
978 : /// \ingroup TensorExpressionsGroup
979 : /// \brief Returns the tensor expression representing the difference of a tensor
980 : /// expression and a number
981 : ///
982 : /// \details
983 : /// The tensor expression operand must represent an expression that, when
984 : /// evaluated, would be a rank 0 tensor. For example, if `R` and `S` are
985 : /// Tensors, here is a non-exhaustive list of some of the acceptable forms that
986 : /// the tensor expression operand could take:
987 : /// - `R()`
988 : /// - `R(ti::A, ti::a)`
989 : /// - `(R(ti::A, ti::B) * S(ti::a, ti::b))`
990 : /// - `R(ti::t, ti::t)`
991 : ///
992 : /// \param t the tensor expression operand of the difference
993 : /// \param number the numeric operand of the difference
994 : /// \return the tensor expression representing the difference of the tensor
995 : /// expression and the number
996 : template <typename T, typename X, typename Symm, typename IndexList,
997 : typename... Args, typename N,
998 : Requires<std::is_arithmetic_v<N>> = nullptr>
999 1 : SPECTRE_ALWAYS_INLINE auto operator-(
1000 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t,
1001 : const N number) {
1002 : static_assert(
1003 : (... and tt::is_time_index<Args>::value),
1004 : "Can only subtract a number from a tensor expression that evaluates to a "
1005 : "rank 0 tensor.");
1006 : return t + tenex::NumberAsExpression(-number);
1007 : }
1008 : template <typename T, typename X, typename Symm, typename IndexList,
1009 : typename... Args, typename N,
1010 : Requires<std::is_arithmetic_v<N>> = nullptr>
1011 1 : SPECTRE_ALWAYS_INLINE auto operator-(
1012 : const N number,
1013 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t) {
1014 : static_assert(
1015 : (... and tt::is_time_index<Args>::value),
1016 : "Can only subtract a number from a tensor expression that evaluates to a "
1017 : "rank 0 tensor.");
1018 : return tenex::NumberAsExpression(number) - t;
1019 : }
1020 : template <typename T, typename X, typename Symm, typename IndexList,
1021 : typename... Args, typename N>
1022 1 : SPECTRE_ALWAYS_INLINE auto operator-(
1023 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t,
1024 : const std::complex<N>& number) {
1025 : static_assert(
1026 : (... and tt::is_time_index<Args>::value),
1027 : "Can only subtract a number from a tensor expression that evaluates to a "
1028 : "rank 0 tensor.");
1029 : return t + tenex::NumberAsExpression(-number);
1030 : }
1031 : template <typename T, typename X, typename Symm, typename IndexList,
1032 : typename... Args, typename N>
1033 1 : SPECTRE_ALWAYS_INLINE auto operator-(
1034 : const std::complex<N>& number,
1035 : const TensorExpression<T, X, Symm, IndexList, tmpl::list<Args...>>& t) {
1036 : static_assert(
1037 : (... and tt::is_time_index<Args>::value),
1038 : "Can only subtract a number from a tensor expression that evaluates to a "
1039 : "rank 0 tensor.");
1040 : return tenex::NumberAsExpression(number) - t;
1041 : }
1042 : /// @}
|