Line data Source code
1 1 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : /// \file
5 : /// Defines ET for tensor division by scalars
6 :
7 : #pragma once
8 :
9 : #include <array>
10 : #include <complex>
11 : #include <cstddef>
12 : #include <limits>
13 : #include <type_traits>
14 : #include <utility>
15 :
16 : #include "DataStructures/Tensor/Expressions/DataTypeSupport.hpp"
17 : #include "DataStructures/Tensor/Expressions/NumberAsExpression.hpp"
18 : #include "DataStructures/Tensor/Expressions/TensorExpression.hpp"
19 : #include "DataStructures/Tensor/Expressions/TimeIndex.hpp"
20 : #include "Utilities/ForceInline.hpp"
21 : #include "Utilities/Gsl.hpp"
22 : #include "Utilities/MakeArray.hpp"
23 : #include "Utilities/Requires.hpp"
24 : #include "Utilities/TMPL.hpp"
25 :
26 : namespace tenex {
27 : /// \ingroup TensorExpressionsGroup
28 : /// \brief Defines the tensor expression representing the quotient of one tensor
29 : /// expression divided by another tensor expression that evaluates to a rank 0
30 : /// tensor
31 : ///
32 : /// \details
33 : /// For details on aliases and members defined in this class, as well as general
34 : /// `TensorExpression` terminology used in its members' documentation, see
35 : /// documentation for `TensorExpression`.
36 : ///
37 : /// \tparam T1 the numerator operand expression of the division expression
38 : /// \tparam T2 the denominator operand expression of the division expression
39 : /// \tparam Args2 the generic indices of the denominator expression
40 : template <typename T1, typename T2, typename... Args2>
41 1 : struct Divide
42 : : public TensorExpression<Divide<T1, T2, Args2...>,
43 : typename detail::get_binop_datatype<
44 : typename T1::type, typename T2::type>::type,
45 : typename T1::symmetry, typename T1::index_list,
46 : typename T1::args_list> {
47 : static_assert(
48 : detail::tensorexpression_binop_datatypes_are_supported_v<T1, T2>,
49 : "Cannot divide the given TensorExpressions with the given data types. "
50 : "This can occur from e.g. trying to divide a Tensor with data type "
51 : "double and a Tensor with data type DataVector.");
52 : static_assert(
53 : not((std::is_same_v<T1, NumberAsExpression<std::complex<double>>> and
54 : std::is_same_v<typename T2::type, DataVector>) or
55 : (std::is_same_v<T2, NumberAsExpression<std::complex<double>>> and
56 : std::is_same_v<typename T1::type, DataVector>)),
57 : "Cannot perform division between a std::complex<double> and a "
58 : "TensorExpression whose data type is DataVector because Blaze does not "
59 : "support division between std::complex<double> and DataVector.");
60 : static_assert((... and tt::is_time_index<Args2>::value),
61 : "Can only divide a tensor expression by a number or a tensor "
62 : "expression that evaluates to "
63 : "a rank 0 tensor.");
64 :
65 : // === Index properties ===
66 : /// The type of the data being stored in the result of the expression
67 1 : using type = typename detail::get_binop_datatype<typename T1::type,
68 : typename T2::type>::type;
69 : /// The ::Symmetry of the result of the expression
70 1 : using symmetry = typename T1::symmetry;
71 : /// The list of \ref SpacetimeIndex "TensorIndexType"s of the result of the
72 : /// expression
73 1 : using index_list = typename T1::index_list;
74 : /// The list of generic `TensorIndex`s of the result of the expression
75 1 : using args_list = typename T1::args_list;
76 : /// The number of tensor indices in the result of the expression
77 1 : static constexpr auto num_tensor_indices =
78 : tmpl::size<typename T1::index_list>::value;
79 : /// The number of tensor indices in the left operand expression
80 1 : static constexpr auto op2_num_tensor_indices =
81 : tmpl::size<typename T2::index_list>::value;
82 : /// The multi-index for the denominator
83 1 : static constexpr auto op2_multi_index =
84 : make_array<op2_num_tensor_indices, size_t>(0);
85 :
86 : // === Expression subtree properties ===
87 : /// The number of arithmetic tensor operations done in the subtree for the
88 : /// left operand
89 1 : static constexpr size_t num_ops_left_child = T1::num_ops_subtree;
90 : /// The number of arithmetic tensor operations done in the subtree for the
91 : /// right operand
92 1 : static constexpr size_t num_ops_right_child = T2::num_ops_subtree;
93 : /// The total number of arithmetic tensor operations done in this expression's
94 : /// whole subtree
95 1 : static constexpr size_t num_ops_subtree =
96 : num_ops_left_child + num_ops_right_child + 1;
97 : /// The height of this expression's node in the expression tree relative to
98 : /// the closest `TensorAsExpression` leaf in its subtree
99 1 : static constexpr size_t height_relative_to_closest_tensor_leaf_in_subtree =
100 : T2::height_relative_to_closest_tensor_leaf_in_subtree <=
101 : T1::height_relative_to_closest_tensor_leaf_in_subtree
102 : ? (T2::height_relative_to_closest_tensor_leaf_in_subtree !=
103 : std::numeric_limits<size_t>::max()
104 : ? T2::height_relative_to_closest_tensor_leaf_in_subtree + 1
105 : : T2::height_relative_to_closest_tensor_leaf_in_subtree)
106 : : T1::height_relative_to_closest_tensor_leaf_in_subtree + 1;
107 :
108 : // === Properties for splitting up subexpressions along the primary path ===
109 : // These definitions only have meaning if this expression actually ends up
110 : // being along the primary path that is taken when evaluating the whole tree.
111 : // See documentation for `TensorExpression` for more details.
112 : /// If on the primary path, whether or not the expression is an ending point
113 : /// of a leg
114 1 : static constexpr bool is_primary_end = T1::is_primary_start;
115 : /// If on the primary path, this is the remaining number of arithmetic tensor
116 : /// operations that need to be done in the subtree of the child along the
117 : /// primary path, given that we will have already computed the whole subtree
118 : /// at the next lowest leg's starting point.
119 1 : static constexpr size_t num_ops_to_evaluate_primary_left_child =
120 : is_primary_end ? 0 : T1::num_ops_to_evaluate_primary_subtree;
121 : /// If on the primary path, this is the remaining number of arithmetic tensor
122 : /// operations that need to be done in the right operand's subtree. No
123 : /// splitting is currently done, so this is just `num_ops_right_child`.
124 1 : static constexpr size_t num_ops_to_evaluate_primary_right_child =
125 : num_ops_right_child;
126 : /// If on the primary path, this is the remaining number of arithmetic tensor
127 : /// operations that need to be done for this expression's subtree, given that
128 : /// we will have already computed the subtree at the next lowest leg's
129 : /// starting point
130 1 : static constexpr size_t num_ops_to_evaluate_primary_subtree =
131 : num_ops_to_evaluate_primary_left_child +
132 : num_ops_to_evaluate_primary_right_child + 1;
133 : /// If on the primary path, whether or not the expression is a starting point
134 : /// of a leg
135 1 : static constexpr bool is_primary_start =
136 : num_ops_to_evaluate_primary_subtree >=
137 : detail::max_num_ops_in_sub_expression<type>;
138 : /// When evaluating along a primary path, whether each operand's subtrees
139 : /// should be evaluated separately. Since `DataVector` expression runtime
140 : /// scales poorly with increased number of operations, evaluating the two
141 : /// expression subtrees separately like this is beneficial when at least one
142 : /// of the subtrees contains a large number of operations.
143 1 : static constexpr bool evaluate_children_separately =
144 : is_primary_start and (num_ops_to_evaluate_primary_left_child >=
145 : detail::max_num_ops_in_sub_expression<type> or
146 : num_ops_to_evaluate_primary_right_child >=
147 : detail::max_num_ops_in_sub_expression<type>);
148 : /// If on the primary path, whether or not the expression's child along the
149 : /// primary path is a subtree that contains a starting point of a leg along
150 : /// the primary path
151 1 : static constexpr bool primary_child_subtree_contains_primary_start =
152 : T1::primary_subtree_contains_primary_start;
153 : /// If on the primary path, whether or not this subtree contains a starting
154 : /// point of a leg along the primary path
155 1 : static constexpr bool primary_subtree_contains_primary_start =
156 : is_primary_start or primary_child_subtree_contains_primary_start;
157 :
158 0 : Divide(T1 t1, T2 t2) : t1_(std::move(t1)), t2_(std::move(t2)) {}
159 0 : ~Divide() override = default;
160 :
161 : /// \brief Assert that the LHS tensor of the equation does not also appear in
162 : /// this expression's subtree
163 : template <typename LhsTensor>
164 1 : SPECTRE_ALWAYS_INLINE void assert_lhs_tensor_not_in_rhs_expression(
165 : const gsl::not_null<LhsTensor*> lhs_tensor) const {
166 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T1>) {
167 : t1_.assert_lhs_tensor_not_in_rhs_expression(lhs_tensor);
168 : }
169 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T2>) {
170 : t2_.assert_lhs_tensor_not_in_rhs_expression(lhs_tensor);
171 : }
172 : }
173 :
174 : /// \brief Assert that each instance of the LHS tensor in the RHS tensor
175 : /// expression uses the same generic index order that the LHS uses
176 : ///
177 : /// \tparam LhsTensorIndices the list of generic `TensorIndex`s of the LHS
178 : /// result `Tensor` being computed
179 : /// \param lhs_tensor the LHS result `Tensor` being computed
180 : template <typename LhsTensorIndices, typename LhsTensor>
181 1 : SPECTRE_ALWAYS_INLINE void assert_lhs_tensorindices_same_in_rhs(
182 : const gsl::not_null<LhsTensor*> lhs_tensor) const {
183 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T1>) {
184 : t1_.assert_lhs_tensorindices_same_in_rhs(lhs_tensor);
185 : }
186 : if constexpr (not std::is_base_of_v<MarkAsNumberAsExpression, T2>) {
187 : t2_.assert_lhs_tensorindices_same_in_rhs(lhs_tensor);
188 : }
189 : }
190 :
191 : /// \brief Get the size of a component from a `Tensor` in this expression's
192 : /// subtree of the RHS `TensorExpression`
193 : ///
194 : /// \return the size of a component from a `Tensor` in this expression's
195 : /// subtree of the RHS `TensorExpression`
196 1 : SPECTRE_ALWAYS_INLINE size_t get_rhs_tensor_component_size() const {
197 : if constexpr (T1::height_relative_to_closest_tensor_leaf_in_subtree <=
198 : T2::height_relative_to_closest_tensor_leaf_in_subtree) {
199 : return t1_.get_rhs_tensor_component_size();
200 : } else {
201 : return t2_.get_rhs_tensor_component_size();
202 : }
203 : }
204 :
205 : /// \brief Return the value of the component of the quotient tensor at a given
206 : /// multi-index
207 : ///
208 : /// \param result_multi_index the multi-index of the component of the quotient
209 : //// tensor to retrieve
210 : /// \return the value of the component in the quotient tensor at
211 : /// `result_multi_index`
212 1 : SPECTRE_ALWAYS_INLINE decltype(auto) get(
213 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
214 : return t1_.get(result_multi_index) / t2_.get(op2_multi_index);
215 : }
216 :
217 : /// \brief Return the value of the component of the quotient tensor at a given
218 : /// multi-index
219 : ///
220 : /// \details
221 : /// This function differs from `get` in that it takes into account whether we
222 : /// have already computed part of the result component at a lower subtree.
223 : /// In recursively computing this quotient, the current result component will
224 : /// be substituted in for the most recent (highest) subtree below it that has
225 : /// already been evaluated.
226 : ///
227 : /// \param result_component the LHS tensor component to evaluate
228 : /// \param result_multi_index the multi-index of the component of the quotient
229 : //// tensor to retrieve
230 : /// \return the value of the component in the quotient tensor at
231 : /// `result_multi_index`
232 : template <typename ResultType>
233 1 : SPECTRE_ALWAYS_INLINE decltype(auto) get_primary(
234 : const ResultType& result_component,
235 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
236 : if constexpr (is_primary_end) {
237 : (void)result_multi_index;
238 : // We've already computed the whole child subtree on the primary path, so
239 : // just return the quotient of the current result component and the result
240 : // of the other child's subtree
241 : return result_component / t2_.get(op2_multi_index);
242 : } else {
243 : // We haven't yet evaluated the whole subtree for this expression, so
244 : // return the quotient of the results of the two operands' subtrees
245 : return t1_.template get_primary(result_component, result_multi_index) /
246 : t2_.get(op2_multi_index);
247 : }
248 : }
249 :
250 : /// \brief Evaluate the LHS Tensor's result component at this subtree by
251 : /// evaluating the two operand's subtrees separately and dividing
252 : ///
253 : /// \details
254 : /// This function takes into account whether we have already computed part of
255 : /// the result component at a lower subtree. In recursively computing this
256 : /// quotient, the current result component will be substituted in for the most
257 : /// recent (highest) subtree below it that has already been evaluated.
258 : ///
259 : /// The left and right operands' subtrees are evaluated successively with
260 : /// two separate assignments to the LHS result component. Since `DataVector`
261 : /// expression runtime scales poorly with increased number of operations,
262 : /// evaluating the two expression subtrees separately like this is beneficial
263 : /// when at least one of the subtrees contains a large number of operations.
264 : /// Instead of evaluating a larger expression with their combined total number
265 : /// of operations, we evaluate two smaller ones.
266 : ///
267 : /// \param result_component the LHS tensor component to evaluate
268 : /// \param result_multi_index the multi-index of the component of the result
269 : /// tensor to evaluate
270 : template <typename ResultType>
271 1 : SPECTRE_ALWAYS_INLINE void evaluate_primary_children(
272 : ResultType& result_component,
273 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
274 : if constexpr (is_primary_end) {
275 : (void)result_multi_index;
276 : // We've already computed the whole child subtree on the primary path, so
277 : // just divide the current result by the result of the other child's
278 : // subtree
279 : result_component /= t2_.get(op2_multi_index);
280 : } else {
281 : // We haven't yet evaluated the whole subtree of the primary child, so
282 : // first assign the result component to be the result of computing the
283 : // primary child's subtree
284 : result_component =
285 : t1_.template get_primary(result_component, result_multi_index);
286 : // Now that the primary child's subtree has been computed, divide the
287 : // current result by the result of evaluating the other child's subtree
288 : result_component /= t2_.get(op2_multi_index);
289 : }
290 : }
291 :
292 : /// \brief Successively evaluate the LHS Tensor's result component at each
293 : /// leg in this expression's subtree
294 : ///
295 : /// \details
296 : /// This function takes into account whether we have already computed part of
297 : /// the result component at a lower subtree. In recursively computing this
298 : /// quotient, the current result component will be substituted in for the most
299 : /// recent (highest) subtree below it that has already been evaluated.
300 : ///
301 : /// \param result_component the LHS tensor component to evaluate
302 : /// \param result_multi_index the multi-index of the component of the result
303 : /// tensor to evaluate
304 : template <typename ResultType>
305 1 : SPECTRE_ALWAYS_INLINE void evaluate_primary_subtree(
306 : ResultType& result_component,
307 : const std::array<size_t, num_tensor_indices>& result_multi_index) const {
308 : if constexpr (primary_child_subtree_contains_primary_start) {
309 : // The primary child's subtree contains at least one leg, so recurse down
310 : // and evaluate that first
311 : t1_.template evaluate_primary_subtree(result_component,
312 : result_multi_index);
313 : }
314 : if constexpr (is_primary_start) {
315 : // We want to evaluate the subtree for this expression
316 : if constexpr (evaluate_children_separately) {
317 : // Evaluate operand's subtrees separately
318 : evaluate_primary_children(result_component, result_multi_index);
319 : } else {
320 : // Evaluate whole subtree as one expression
321 : result_component = get_primary(result_component, result_multi_index);
322 : }
323 : }
324 : }
325 :
326 : private:
327 : /// Left operand (numerator)
328 1 : T1 t1_;
329 : /// Right operand (denominator)
330 1 : T2 t2_;
331 : };
332 : } // namespace tenex
333 :
334 : /// \ingroup TensorExpressionsGroup
335 : /// \brief Returns the tensor expression representing the quotient of one tensor
336 : /// expression over another tensor expression that evaluates to a rank 0 tensor
337 : ///
338 : /// \details
339 : /// `t2` must be an expression that, when evaluated, would be a rank 0 tensor.
340 : /// For example, if `R` and `S` are Tensors, here is a non-exhaustive list of
341 : /// some of the acceptable forms that `t2` could take:
342 : /// - `R()`
343 : /// - `R(ti::A, ti::a)`
344 : /// - `(R(ti::A, ti::B) * S(ti::a, ti::b))`
345 : /// - `R(ti::t, ti::t) + 1.0`
346 : ///
347 : /// \param t1 the tensor expression numerator
348 : /// \param t2 the rank 0 tensor expression denominator
349 : template <typename T1, typename T2, typename... Args2>
350 1 : SPECTRE_ALWAYS_INLINE auto operator/(
351 : const TensorExpression<T1, typename T1::type, typename T1::symmetry,
352 : typename T1::index_list, typename T1::args_list>& t1,
353 : const TensorExpression<T2, typename T2::type, typename T2::symmetry,
354 : typename T2::index_list, tmpl::list<Args2...>>& t2) {
355 : return tenex::Divide<T1, T2, Args2...>(~t1, ~t2);
356 : }
357 :
358 : /// @{
359 : /// \ingroup TensorExpressionsGroup
360 : /// \brief Returns the tensor expression representing the quotient of a tensor
361 : /// expression over a number
362 : ///
363 : /// \note The implementation instead uses the operation, `t * (1.0 / number)`
364 : ///
365 : /// \param t the tensor expression operand of the quotient
366 : /// \param number the numeric operand of the quotient
367 : /// \return the tensor expression representing the quotient of the tensor
368 : /// expression over the number
369 : template <typename T, typename N, Requires<std::is_arithmetic_v<N>> = nullptr>
370 1 : SPECTRE_ALWAYS_INLINE auto operator/(
371 : const TensorExpression<T, typename T::type, typename T::symmetry,
372 : typename T::index_list, typename T::args_list>& t,
373 : const N number) {
374 : return t * tenex::NumberAsExpression(1.0 / number);
375 : }
376 : template <typename T, typename N>
377 1 : SPECTRE_ALWAYS_INLINE auto operator/(
378 : const TensorExpression<T, typename T::type, typename T::symmetry,
379 : typename T::index_list, typename T::args_list>& t,
380 : const std::complex<N>& number) {
381 : return t * tenex::NumberAsExpression(1.0 / number);
382 : }
383 : /// @}
384 :
385 : /// @{
386 : /// \ingroup TensorExpressionsGroup
387 : /// \brief Returns the tensor expression representing the quotient of a number
388 : /// over a tensor expression that evaluates to a rank 0 tensor
389 : ///
390 : /// \param number the numeric numerator of the quotient
391 : /// \param t the tensor expression denominator of the quotient
392 : /// \return the tensor expression representing the quotient of the number over
393 : /// the tensor expression
394 : template <typename T, typename N, Requires<std::is_arithmetic_v<N>> = nullptr>
395 1 : SPECTRE_ALWAYS_INLINE auto operator/(
396 : const N number,
397 : const TensorExpression<T, typename T::type, typename T::symmetry,
398 : typename T::index_list, typename T::args_list>& t) {
399 : return tenex::NumberAsExpression(number) / t;
400 : }
401 : template <typename T, typename N>
402 1 : SPECTRE_ALWAYS_INLINE auto operator/(
403 : const std::complex<N>& number,
404 : const TensorExpression<T, typename T::type, typename T::symmetry,
405 : typename T::index_list, typename T::args_list>& t) {
406 : return tenex::NumberAsExpression(number) / t;
407 : }
408 : /// @}
|