Line data Source code
1 1 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : /// \file
5 : /// Defines ET for tensor inner and outer products
6 :
7 : #pragma once
8 :
9 : #include <array>
10 : #include <complex>
11 : #include <cstddef>
12 : #include <limits>
13 : #include <type_traits>
14 : #include <utility>
15 :
16 : #include "DataStructures/Tensor/Expressions/Contract.hpp"
17 : #include "DataStructures/Tensor/Expressions/DataTypeSupport.hpp"
18 : #include "DataStructures/Tensor/Expressions/NumberAsExpression.hpp"
19 : #include "DataStructures/Tensor/Expressions/TensorExpression.hpp"
20 : #include "DataStructures/Tensor/Symmetry.hpp"
21 : #include "Utilities/ForceInline.hpp"
22 : #include "Utilities/Gsl.hpp"
23 : #include "Utilities/Requires.hpp"
24 : #include "Utilities/TMPL.hpp"
25 :
26 : namespace tenex {
27 : namespace detail {
28 : template <typename T1, typename T2, typename SymmList1 = typename T1::symmetry,
29 : typename SymmList2 = typename T2::symmetry>
30 : struct OuterProductType;
31 :
32 : template <typename T1, typename T2, template <typename...> class SymmList1,
33 : typename... Symm1, template <typename...> class SymmList2,
34 : typename... Symm2>
35 : struct OuterProductType<T1, T2, SymmList1<Symm1...>, SymmList2<Symm2...>> {
36 : using type =
37 : typename get_binop_datatype<typename T1::type, typename T2::type>::type;
38 : using symmetry =
39 : Symmetry<(Symm1::value + sizeof...(Symm2))..., Symm2::value...>;
40 : using index_list =
41 : tmpl::append<typename T1::index_list, typename T2::index_list>;
42 : using tensorindex_list =
43 : tmpl::append<typename T1::args_list, typename T2::args_list>;
44 : };
45 : } // namespace detail
46 :
47 : /// \ingroup TensorExpressionsGroup
48 : /// \brief Defines the tensor expression representing the outer product of two
49 : /// tensor expressions
50 : ///
51 : /// \details
52 : /// For details on aliases and members defined in this class, as well as general
53 : /// `TensorExpression` terminology used in its members' documentation, see
54 : /// documentation for `TensorExpression`.
55 : ///
56 : /// \tparam T1 the left operand expression of the outer product expression
57 : /// \tparam T2 the right operand expression of the outer product expression
58 : template <typename T1, typename T2,
59 : typename IndexList1 = typename T1::index_list,
60 : typename IndexList2 = typename T2::index_list,
61 : typename ArgsList1 = typename T1::args_list,
62 : typename ArgsList2 = typename T2::args_list>
63 1 : struct OuterProduct;
64 :
65 : template <typename T1, typename T2, template <typename...> class IndexList1,
66 : typename... Indices1, template <typename...> class IndexList2,
67 : typename... Indices2, template <typename...> class ArgsList1,
68 : typename... Args1, template <typename...> class ArgsList2,
69 : typename... Args2>
70 0 : struct OuterProduct<T1, T2, IndexList1<Indices1...>, IndexList2<Indices2...>,
71 : ArgsList1<Args1...>, ArgsList2<Args2...>>
72 : : public TensorExpression<
73 : OuterProduct<T1, T2>, typename detail::OuterProductType<T1, T2>::type,
74 : typename detail::OuterProductType<T1, T2>::symmetry,
75 : typename detail::OuterProductType<T1, T2>::index_list,
76 : typename detail::OuterProductType<T1, T2>::tensorindex_list> {
77 : static_assert(
78 : detail::tensorexpression_binop_datatypes_are_supported_v<T1, T2>,
79 : "Cannot multiply the given TensorExpressions with the given data types. "
80 : "This can occur from e.g. trying to multiply a Tensor with data type "
81 : "double and a Tensor with data type DataVector.");
82 : // === Index properties ===
83 : /// The type of the data being stored in the result of the expression
84 1 : using type = typename detail::OuterProductType<T1, T2>::type;
85 : /// The ::Symmetry of the result of the expression
86 1 : using symmetry = typename detail::OuterProductType<T1, T2>::symmetry;
87 : /// The list of \ref SpacetimeIndex "TensorIndexType"s of the result of the
88 : /// expression
89 1 : using index_list = typename detail::OuterProductType<T1, T2>::index_list;
90 : /// The list of generic `TensorIndex`s of the result of the
91 : /// expression
92 1 : using args_list = typename detail::OuterProductType<T1, T2>::tensorindex_list;
93 : /// The number of tensor indices in the result of the expression
94 1 : static constexpr auto num_tensor_indices = tmpl::size<index_list>::value;
95 : /// The number of tensor indices in the left operand expression
96 1 : static constexpr auto op1_num_tensor_indices =
97 : tmpl::size<typename T1::index_list>::value;
98 : /// The number of tensor indices in the right operand expression
99 1 : static constexpr auto op2_num_tensor_indices =
100 : num_tensor_indices - op1_num_tensor_indices;
101 :
102 : // === Expression subtree properties ===
103 : /// The number of arithmetic tensor operations done in the subtree for the
104 : /// left operand
105 1 : static constexpr size_t num_ops_left_child = T1::num_ops_subtree;
106 : /// The number of arithmetic tensor operations done in the subtree for the
107 : /// right operand
108 1 : static constexpr size_t num_ops_right_child = T2::num_ops_subtree;
109 : // This helps ensure we are minimizing breadth in the overall tree
110 : static_assert(num_ops_left_child >= num_ops_right_child,
111 : "The left operand should be a subtree with equal or more "
112 : "tensor operations than the right operand's subtree.");
113 : /// The total number of arithmetic tensor operations done in this expression's
114 : /// whole subtree
115 1 : static constexpr size_t num_ops_subtree =
116 : num_ops_left_child + num_ops_right_child + 1;
117 : /// The height of this expression's node in the expression tree relative to
118 : /// the closest `TensorAsExpression` leaf in its subtree
119 1 : static constexpr size_t height_relative_to_closest_tensor_leaf_in_subtree =
120 : T2::height_relative_to_closest_tensor_leaf_in_subtree <=
121 : T1::height_relative_to_closest_tensor_leaf_in_subtree
122 : ? (T2::height_relative_to_closest_tensor_leaf_in_subtree !=
123 : std::numeric_limits<size_t>::max()
124 : ? T2::height_relative_to_closest_tensor_leaf_in_subtree + 1
125 : : T2::height_relative_to_closest_tensor_leaf_in_subtree)
126 : : T1::height_relative_to_closest_tensor_leaf_in_subtree + 1;
127 :
128 : // === Properties for splitting up subexpressions along the primary path ===
129 : // These definitions only have meaning if this expression actually ends up
130 : // being along the primary path that is taken when evaluating the whole tree.
131 : // See documentation for `TensorExpression` for more details.
132 : /// If on the primary path, whether or not the expression is an ending point
133 : /// of a leg
134 1 : static constexpr bool is_primary_end = T1::is_primary_start;
135 : /// If on the primary path, this is the remaining number of arithmetic tensor
136 : /// operations that need to be done in the subtree of the child along the
137 : /// primary path, given that we will have already computed the whole subtree
138 : /// at the next lowest leg's starting point.
139 1 : static constexpr size_t num_ops_to_evaluate_primary_left_child =
140 : is_primary_end ? 0 : T1::num_ops_to_evaluate_primary_subtree;
141 : /// If on the primary path, this is the remaining number of arithmetic tensor
142 : /// operations that need to be done in the right operand's subtree. No
143 : /// splitting is currently done, so this is just `num_ops_right_child`.
144 1 : static constexpr size_t num_ops_to_evaluate_primary_right_child =
145 : num_ops_right_child;
146 : /// If on the primary path, this is the remaining number of arithmetic tensor
147 : /// operations that need to be done for this expression's subtree, given that
148 : /// we will have already computed the subtree at the next lowest leg's
149 : /// starting point
150 1 : static constexpr size_t num_ops_to_evaluate_primary_subtree =
151 : num_ops_to_evaluate_primary_left_child +
152 : num_ops_to_evaluate_primary_right_child + 1;
153 : /// If on the primary path, whether or not the expression is a starting point
154 : /// of a leg
155 1 : static constexpr bool is_primary_start =
156 : num_ops_to_evaluate_primary_subtree >=
157 : detail::max_num_ops_in_sub_expression<type>;
158 : /// When evaluating along a primary path, whether each operand's subtrees
159 : /// should be evaluated separately. Since `DataVector` expression runtime
160 : /// scales poorly with increased number of operations, evaluating the two
161 : /// expression subtrees separately like this is beneficial when at least one
162 : /// of the subtrees contains a large number of operations.
163 1 : static constexpr bool evaluate_children_separately =
164 : is_primary_start and (num_ops_to_evaluate_primary_left_child >=
165 : detail::max_num_ops_in_sub_expression<type> or
166 : num_ops_to_evaluate_primary_right_child >=
167 : detail::max_num_ops_in_sub_expression<type>);
168 : /// If on the primary path, whether or not the expression's child along the
169 : /// primary path is a subtree that contains a starting point of a leg along
170 : /// the primary path
171 1 : static constexpr bool primary_child_subtree_contains_primary_start =
172 : T1::primary_subtree_contains_primary_start;
173 : /// If on the primary path, whether or not this subtree contains a starting
174 : /// point of a leg along the primary path
175 1 : static constexpr bool primary_subtree_contains_primary_start =
176 : is_primary_start or primary_child_subtree_contains_primary_start;
177 :
178 0 : OuterProduct(T1 t1, T2 t2) : t1_(std::move(t1)), t2_(std::move(t2)) {}
179 0 : ~OuterProduct() override = default;
180 :
181 : /// \brief Assert that the LHS tensor of the equation does not also appear in
182 : /// this expression's subtree
183 : template <typename LhsTensor>
184 1 : SPECTRE_ALWAYS_INLINE void assert_lhs_tensor_not_in_rhs_expression(
185 : const gsl::not_null<LhsTensor*> lhs_tensor) const {
186 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T1>) {
187 : t1_.assert_lhs_tensor_not_in_rhs_expression(lhs_tensor);
188 : }
189 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T2>) {
190 : t2_.assert_lhs_tensor_not_in_rhs_expression(lhs_tensor);
191 : }
192 : }
193 :
194 : /// \brief Assert that each instance of the LHS tensor in the RHS tensor
195 : /// expression uses the same generic index order that the LHS uses
196 : ///
197 : /// \tparam LhsTensorIndices the list of generic `TensorIndex`s of the LHS
198 : /// result `Tensor` being computed
199 : /// \param lhs_tensor the LHS result `Tensor` being computed
200 : template <typename LhsTensorIndices, typename LhsTensor>
201 1 : SPECTRE_ALWAYS_INLINE void assert_lhs_tensorindices_same_in_rhs(
202 : const gsl::not_null<LhsTensor*> lhs_tensor) const {
203 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T1>) {
204 : t1_.template assert_lhs_tensorindices_same_in_rhs<LhsTensorIndices>(
205 : lhs_tensor);
206 : }
207 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T2>) {
208 : t2_.template assert_lhs_tensorindices_same_in_rhs<LhsTensorIndices>(
209 : lhs_tensor);
210 : }
211 : }
212 :
213 : /// \brief Get the size of a component from a `Tensor` in this expression's
214 : /// subtree of the RHS `TensorExpression`
215 : ///
216 : /// \return the size of a component from a `Tensor` in this expression's
217 : /// subtree of the RHS `TensorExpression`
218 1 : SPECTRE_ALWAYS_INLINE size_t get_rhs_tensor_component_size() const {
219 : if constexpr (T1::height_relative_to_closest_tensor_leaf_in_subtree <=
220 : T2::height_relative_to_closest_tensor_leaf_in_subtree) {
221 : return t1_.get_rhs_tensor_component_size();
222 : } else {
223 : return t2_.get_rhs_tensor_component_size();
224 : }
225 : }
226 :
227 : /// \brief Return the first operand's multi-index given the outer product's
228 : /// multi-index
229 : ///
230 : /// \param result_multi_index the multi-index of the component of the outer
231 : /// product tensor
232 : /// \return the first operand's multi-index
233 : constexpr SPECTRE_ALWAYS_INLINE std::array<size_t, op1_num_tensor_indices>
234 1 : get_op1_multi_index(
235 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
236 : std::array<size_t, op1_num_tensor_indices> op1_multi_index{};
237 : for (size_t i = 0; i < op1_num_tensor_indices; i++) {
238 : gsl::at(op1_multi_index, i) = gsl::at(result_multi_index, i);
239 : }
240 : return op1_multi_index;
241 : }
242 :
243 : /// \brief Return the second operand's multi-index given the outer product's
244 : /// multi-index
245 : ///
246 : /// \param result_multi_index the multi-index of the component of the outer
247 : /// product tensor
248 : /// \return the second operand's multi-index
249 : constexpr SPECTRE_ALWAYS_INLINE std::array<size_t, op2_num_tensor_indices>
250 1 : get_op2_multi_index(
251 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
252 : std::array<size_t, op2_num_tensor_indices> op2_multi_index{};
253 : for (size_t i = 0; i < op2_num_tensor_indices; i++) {
254 : gsl::at(op2_multi_index, i) =
255 : gsl::at(result_multi_index, op1_num_tensor_indices + i);
256 : }
257 : return op2_multi_index;
258 : }
259 :
260 : /// \brief Return the value of the component of the outer product tensor at a
261 : /// given multi-index
262 : ///
263 : /// \details
264 : /// This function takes the multi-index of some component of the resultant
265 : /// outer product to compute. The function first computes the multi-indices of
266 : /// the pair of components in the two operand expressions, then multiplies the
267 : /// values at these multi-indices to obtain the value of the resultant outer
268 : /// product component. For example, say we are evaluating
269 : /// \f$L_abc = R_{b} * S_{ca}\f$. Let `result_multi_index == {0, 1, 2}`, which
270 : /// refers to the component \f$L_{012}\f$, the component we wish to compute.
271 : /// This function will compute the multi-indices of the operands that
272 : /// correspond to \f$R_{1}\f$ and \f$S_{20}\f$, retrieve their values, and
273 : /// return their product.
274 : ///
275 : /// \param result_multi_index the multi-index of the component of the outer
276 : /// product tensor to retrieve
277 : /// \return the value of the component at `result_multi_index` in the outer
278 : /// product tensor
279 1 : SPECTRE_ALWAYS_INLINE decltype(auto) get(
280 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
281 : return t1_.get(get_op1_multi_index(result_multi_index)) *
282 : t2_.get(get_op2_multi_index(result_multi_index));
283 : }
284 :
285 : /// \brief Return the product of the components at the given multi-indices of
286 : /// the left and right operands
287 : ///
288 : /// \details
289 : /// This function differs from `get` in that it takes into account whether we
290 : /// have already computed part of the result component at a lower subtree.
291 : /// In recursively computing this product, the current result component will
292 : /// be substituted in for the most recent (highest) subtree below it that has
293 : /// already been evaluated.
294 : ///
295 : /// \param result_component the LHS tensor component to evaluate
296 : /// \param op1_multi_index the multi-index of the component of the first
297 : /// operand of the product to retrieve
298 : /// \param op2_multi_index the multi-index of the component of the second
299 : /// operand of the product to retrieve
300 : template <typename ResultType>
301 1 : SPECTRE_ALWAYS_INLINE decltype(auto) get_primary(
302 : const ResultType& result_component,
303 : const std::array<size_t, op1_num_tensor_indices>& op1_multi_index,
304 : const std::array<size_t, op2_num_tensor_indices>& op2_multi_index) const {
305 : if constexpr (is_primary_end) {
306 : (void)op1_multi_index;
307 : // We've already computed the whole child subtree on the primary path, so
308 : // just return the product of the current result component and the result
309 : // of the other child's subtree
310 : return result_component * t2_.get(op2_multi_index);
311 : } else {
312 : // We haven't yet evaluated the whole subtree for this expression, so
313 : // return the product of the results of the two operands' subtrees
314 : return t1_.template get_primary(result_component, op1_multi_index) *
315 : t2_.get(op2_multi_index);
316 : }
317 : }
318 :
319 : /// \brief Return the value of the component of the outer product tensor at a
320 : /// given multi-index
321 : ///
322 : /// \details
323 : /// This function differs from `get` in that it takes into account whether we
324 : /// have already computed part of the result component at a lower subtree.
325 : /// In recursively computing this product, the current result component will
326 : /// be substituted in for the most recent (highest) subtree below it that has
327 : /// already been evaluated.
328 : ///
329 : /// \param result_component the LHS tensor component to evaluate
330 : /// \param result_multi_index the multi-index of the component of the outer
331 : /// product tensor to retrieve
332 : /// \return the value of the component at `result_multi_index` in the outer
333 : /// product tensor
334 : template <typename ResultType>
335 1 : SPECTRE_ALWAYS_INLINE decltype(auto) get_primary(
336 : const ResultType& result_component,
337 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
338 : return get_primary(result_component,
339 : get_op1_multi_index(result_multi_index),
340 : get_op2_multi_index(result_multi_index));
341 : }
342 :
343 : /// \brief Evaluate the LHS Tensor's result component at this subtree by
344 : /// evaluating the two operand's subtrees separately and multiplying
345 : ///
346 : /// \details
347 : /// This function takes into account whether we have already computed part of
348 : /// the result component at a lower subtree. In recursively computing this
349 : /// product, the current result component will be substituted in for the most
350 : /// recent (highest) subtree below it that has already been evaluated.
351 : ///
352 : /// The left and right operands' subtrees are evaluated successively with
353 : /// two separate assignments to the LHS result component. Since `DataVector`
354 : /// expression runtime scales poorly with increased number of operations,
355 : /// evaluating the two expression subtrees separately like this is beneficial
356 : /// when at least one of the subtrees contains a large number of operations.
357 : /// Instead of evaluating a larger expression with their combined total number
358 : /// of operations, we evaluate two smaller ones.
359 : ///
360 : /// \param result_component the LHS tensor component to evaluate
361 : /// \param op1_multi_index the multi-index of the component of the first
362 : /// operand of the product to evaluate
363 : /// \param op2_multi_index the multi-index of the component of the second
364 : /// operand of the product to evaluate
365 : template <typename ResultType>
366 1 : SPECTRE_ALWAYS_INLINE void evaluate_primary_children(
367 : ResultType& result_component,
368 : const std::array<size_t, op1_num_tensor_indices>& op1_multi_index,
369 : const std::array<size_t, op2_num_tensor_indices>& op2_multi_index) const {
370 : if constexpr (is_primary_end) {
371 : (void)op1_multi_index;
372 : // We've already computed the whole child subtree on the primary path, so
373 : // just multiply the current result by the result of the other child's
374 : // subtree
375 : result_component *= t2_.get(op2_multi_index);
376 : } else {
377 : // We haven't yet evaluated the whole subtree of the primary child, so
378 : // first assign the result component to be the result of computing the
379 : // primary child's subtree
380 : result_component =
381 : t1_.template get_primary(result_component, op1_multi_index);
382 : // Now that the primary child's subtree has been computed, multiply the
383 : // current result by the result of evaluating the other child's subtree
384 : result_component *= t2_.get(op2_multi_index);
385 : }
386 : }
387 :
388 : /// \brief Successively evaluate the LHS Tensor's result component at each
389 : /// leg in this expression's subtree
390 : ///
391 : /// \details
392 : /// This function takes into account whether we have already computed part of
393 : /// the result component at a lower subtree. In recursively computing this
394 : /// product, the current result component will be substituted in for the most
395 : /// recent (highest) subtree below it that has already been evaluated.
396 : ///
397 : /// \param result_component the LHS tensor component to evaluate
398 : /// \param result_multi_index the multi-index of the component of the outer
399 : /// product tensor to evaluate
400 : template <typename ResultType>
401 1 : SPECTRE_ALWAYS_INLINE void evaluate_primary_subtree(
402 : ResultType& result_component,
403 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
404 : const std::array<size_t, op1_num_tensor_indices> op1_multi_index =
405 : get_op1_multi_index(result_multi_index);
406 : if constexpr (primary_child_subtree_contains_primary_start) {
407 : // The primary child's subtree contains at least one leg, so recurse down
408 : // and evaluate that first
409 : t1_.template evaluate_primary_subtree(result_component, op1_multi_index);
410 : }
411 :
412 : if constexpr (is_primary_start) {
413 : // We want to evaluate the subtree for this expression
414 : if constexpr (evaluate_children_separately) {
415 : // Evaluate operand's subtrees separately
416 : evaluate_primary_children(result_component, op1_multi_index,
417 : get_op2_multi_index(result_multi_index));
418 : } else {
419 : // Evaluate whole subtree as one expression
420 : result_component = get_primary(result_component, op1_multi_index,
421 : get_op2_multi_index(result_multi_index));
422 : }
423 : }
424 : }
425 :
426 : private:
427 : /// Left operand expression
428 1 : T1 t1_;
429 : /// Right operand expression
430 1 : T2 t2_;
431 : };
432 : } // namespace tenex
433 :
434 : /// \ingroup TensorExpressionsGroup
435 : /// \brief Returns the tensor expression representing the product of two tensor
436 : /// expressions
437 : ///
438 : /// \details
439 : /// If the two operands have N pairs of indices that need to be contracted, the
440 : /// returned expression will be an `OuterProduct` expression nested inside N
441 : /// `TensorContract` expressions. This represents computing the inner product
442 : /// of the outer product of the two operands. If the operands do not have any
443 : /// indices to be contracted, the returned expression will be an `OuterProduct`.
444 : ///
445 : /// The two arguments are expressions that contain the two operands of the
446 : /// product, where the types of the operands are `T1` and `T2`.
447 : ///
448 : /// \tparam T1 the derived TensorExpression type of the first operand of the
449 : /// product
450 : /// \tparam T2 the derived TensorExpression type of the second operand of the
451 : /// product
452 : /// \tparam ArgsList1 the TensorIndexs of the first operand
453 : /// \tparam ArgsList2 the TensorIndexs of the second operand
454 : /// \param t1 first operand expression of the product
455 : /// \param t2 the second operand expression of the product
456 : /// \return the tensor expression representing the product of two tensor
457 : /// expressions
458 : template <typename T1, typename T2, typename ArgsList1, typename ArgsList2>
459 1 : SPECTRE_ALWAYS_INLINE auto operator*(
460 : const TensorExpression<T1, typename T1::type, typename T1::symmetry,
461 : typename T1::index_list, ArgsList1>& t1,
462 : const TensorExpression<T2, typename T2::type, typename T2::symmetry,
463 : typename T2::index_list, ArgsList2>& t2) {
464 : if constexpr (T1::num_ops_subtree >= T2::num_ops_subtree) {
465 : return tenex::contract(tenex::OuterProduct<T1, T2>(~t1, ~t2));
466 : } else {
467 : return tenex::contract(tenex::OuterProduct<T2, T1>(~t2, ~t1));
468 : }
469 : }
470 :
471 : /// @{
472 : /// \ingroup TensorExpressionsGroup
473 : /// \brief Returns the tensor expression representing the product of a tensor
474 : /// expression and a number
475 : ///
476 : /// \param t the tensor expression operand of the product
477 : /// \param number the numeric operand of the product
478 : /// \return the tensor expression representing the product of the tensor
479 : /// expression and the number
480 : template <typename T, typename N, Requires<std::is_arithmetic_v<N>> = nullptr>
481 1 : SPECTRE_ALWAYS_INLINE auto operator*(
482 : const TensorExpression<T, typename T::type, typename T::symmetry,
483 : typename T::index_list, typename T::args_list>& t,
484 : const N number) {
485 : return t * tenex::NumberAsExpression(number);
486 : }
487 : template <typename T, typename N, Requires<std::is_arithmetic_v<N>> = nullptr>
488 1 : SPECTRE_ALWAYS_INLINE auto operator*(
489 : const N number,
490 : const TensorExpression<T, typename T::type, typename T::symmetry,
491 : typename T::index_list, typename T::args_list>& t) {
492 : return t * tenex::NumberAsExpression(number);
493 : }
494 : template <typename T, typename N>
495 1 : SPECTRE_ALWAYS_INLINE auto operator*(
496 : const TensorExpression<T, typename T::type, typename T::symmetry,
497 : typename T::index_list, typename T::args_list>& t,
498 : const std::complex<N>& number) {
499 : return t * tenex::NumberAsExpression(number);
500 : }
501 : template <typename T, typename N>
502 1 : SPECTRE_ALWAYS_INLINE auto operator*(
503 : const std::complex<N>& number,
504 : const TensorExpression<T, typename T::type, typename T::symmetry,
505 : typename T::index_list, typename T::args_list>& t) {
506 : return t * tenex::NumberAsExpression(number);
507 : }
508 : /// @}
|