Line data Source code
1 1 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : /// \file
5 : /// Defines class TensorStructure<Symmetry, Indices...>
6 :
7 : #pragma once
8 :
9 : #include <array>
10 : #include <limits>
11 :
12 : #include "DataStructures/Tensor/IndexType.hpp"
13 : #include "DataStructures/Tensor/Metafunctions.hpp"
14 : #include "DataStructures/Tensor/Symmetry.hpp"
15 : #include "Utilities/Algorithm.hpp"
16 : #include "Utilities/Array.hpp"
17 : #include "Utilities/ConstantExpressions.hpp"
18 : #include "Utilities/ErrorHandling/Assert.hpp"
19 : #include "Utilities/ForceInline.hpp"
20 : #include "Utilities/Gsl.hpp"
21 : #include "Utilities/MakeArray.hpp"
22 : #include "Utilities/Numeric.hpp"
23 : #include "Utilities/Requires.hpp"
24 : #include "Utilities/TMPL.hpp"
25 :
26 : namespace Tensor_detail {
27 : template <size_t Size>
28 : constexpr size_t number_of_independent_components(
29 : const std::array<int, Size>& symm, const std::array<size_t, Size>& dims) {
30 : if constexpr (Size == 0) {
31 : (void)symm;
32 : (void)dims;
33 :
34 : return 1;
35 : } else if constexpr (Size == 1) {
36 : (void)symm;
37 :
38 : return dims[0];
39 : } else {
40 : size_t max_element = 0;
41 : for (size_t i = 0; i < Size; ++i) {
42 : // clang-tidy: internals of assert(), don't need gsl::at in constexpr
43 : // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay,cppcoreguidelines-pro-bounds-constant-array-index)
44 : assert(symm[i] > 0);
45 : // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-constant-array-index)
46 : max_element = std::max(static_cast<size_t>(ce_abs(symm[i])), max_element);
47 : }
48 : assert(max_element > 0); // NOLINT
49 : size_t total_independent_components = 1;
50 : for (size_t symm_index = 1; symm_index <= max_element; ++symm_index) {
51 : size_t number_of_indices_with_symm = 0;
52 : size_t dim_of_index = 0;
53 : for (size_t i = 0; i < Size; ++i) {
54 : if (static_cast<size_t>(symm[i]) == symm_index) { // NOLINT
55 : ++number_of_indices_with_symm;
56 : dim_of_index = dims[i]; // NOLINT
57 : }
58 : }
59 : assert(dim_of_index > 0); // NOLINT
60 : assert(number_of_indices_with_symm > 0); // NOLINT
61 : if (dim_of_index - 1 > number_of_indices_with_symm) {
62 : total_independent_components *=
63 : falling_factorial(dim_of_index + number_of_indices_with_symm - 1,
64 : number_of_indices_with_symm) /
65 : factorial(number_of_indices_with_symm);
66 : } else {
67 : total_independent_components *=
68 : falling_factorial(dim_of_index + number_of_indices_with_symm - 1,
69 : dim_of_index - 1) /
70 : factorial(dim_of_index - 1);
71 : }
72 : }
73 : return total_independent_components;
74 : }
75 : }
76 :
77 : template <size_t Size>
78 : constexpr size_t number_of_components(const std::array<size_t, Size>& dims) {
79 : size_t number = 1;
80 : for (size_t i = 0; i < Size; ++i) {
81 : // clang-tidy: use gsl::at
82 : number *= dims[i]; // NOLINT
83 : }
84 : return number;
85 : }
86 :
87 : template <typename T, typename S, size_t Size>
88 : constexpr void increment_tensor_index(cpp20::array<T, Size>& tensor_index,
89 : const cpp20::array<S, Size>& dims) {
90 : for (size_t i = 0; i < Size; ++i) {
91 : if (++tensor_index[i] < static_cast<T>(dims[i])) {
92 : return;
93 : }
94 : tensor_index[i] = 0;
95 : }
96 : }
97 :
98 : // index_to_swap_with takes the last two arguments as opposed to just one of
99 : // them so that when the max constexpr steps is reached on clang it is reached
100 : // in this function rather than in array.
101 : template <size_t Rank>
102 : constexpr size_t index_to_swap_with(
103 : const cpp20::array<size_t, Rank>& tensor_index,
104 : const cpp20::array<int, Rank>& sym, size_t index_to_swap_with,
105 : const size_t current_index) {
106 : // If you encounter infinite loop compilation errors here you are
107 : // constructing very large Tensor's. If you are sure Tensor is
108 : // the correct data structure you can extend the compiler limit
109 : // by passing the flag -fconstexpr-steps=<SOME LARGER VALUE>
110 : while (index_to_swap_with < Rank) { // See comment on line above for fix
111 : if (tensor_index[current_index] > tensor_index[index_to_swap_with] and
112 : sym[current_index] == sym[index_to_swap_with]) {
113 : return index_to_swap_with;
114 : }
115 : index_to_swap_with--;
116 : }
117 : return current_index;
118 : }
119 :
120 : // \brief Reorders a tensor multi-index to a canonical form based on its
121 : // symmetries
122 : //
123 : // \details Reorders the values of the symmetric indices of a multi-index such
124 : // that each symmetric subset has values descending from left to right in the
125 : // multi-index. For example, if `tensor_index` is `[1, 2, 3, 4]` and `symm` is
126 : // `[2, 2, 1, 1]`, the returned canonical multi-index is `[2, 1, 4, 3]`.
127 : //
128 : // \param tensor_index the multi-index to canonicalize
129 : // \tparam symm the canonical symmetry of the tensor
130 : // (see `Symmetry` for the form of canonical symmetries)
131 : // \return the reordered, canonical form of `tensor_index`
132 : template <size_t Rank>
133 : constexpr cpp20::array<size_t, Rank> canonicalize_tensor_index(
134 : cpp20::array<size_t, Rank> tensor_index,
135 : const cpp20::array<int, Rank>& symm) {
136 : for (size_t i = 1; i < Rank; ++i) {
137 : for (size_t j = i; j < Rank; --j) {
138 : const size_t temp = tensor_index[j];
139 : const size_t swap = index_to_swap_with(tensor_index, symm, j, j);
140 : tensor_index[j] = tensor_index[swap];
141 : tensor_index[swap] = temp;
142 : }
143 : }
144 : return tensor_index;
145 : }
146 :
147 : // \brief Reorders a tensor multi-index to a canonical form based on its
148 : // symmetries
149 : //
150 : // \details See other overload for detaiils
151 : //
152 : // \tparam Symm the symmetry of the tensor
153 : // \param tensor_index the multi-index to canonicalize
154 : // \return the reordered, canonical form of `tensor_index`
155 : template <typename Symm, size_t Rank>
156 : constexpr cpp20::array<size_t, Rank> canonicalize_tensor_index(
157 : cpp20::array<size_t, Rank> tensor_index) {
158 : static_assert(tmpl::size<Symm>::value == Rank,
159 : "Symm and tensor_index have different ranks");
160 :
161 : if constexpr (Rank < 2) {
162 : return tensor_index;
163 : } else {
164 : constexpr auto symm = make_cpp20_array_from_list<Symm>();
165 : constexpr auto max_symm_value = *alg::max_element(symm);
166 : static_assert(*alg::min_element(symm) > 0,
167 : "canonicalize_tensor_index assumes symmetry values are > 0");
168 : static_assert(
169 : max_symm_value <= Rank,
170 : "canonicalize_tensor_index assumes symmetry values are <= Rank");
171 :
172 : if constexpr (max_symm_value == Rank) {
173 : return tensor_index;
174 : } else {
175 : return canonicalize_tensor_index(tensor_index, symm);
176 : }
177 : }
178 : }
179 :
180 : template <size_t Rank>
181 : constexpr size_t compute_collapsed_index(
182 : const cpp20::array<size_t, Rank>& tensor_index,
183 : const cpp20::array<size_t, Rank> dims) {
184 : size_t collapsed_index = 0;
185 : for (size_t i = Rank - 1; i < Rank; --i) {
186 : collapsed_index = tensor_index[i] + dims[i] * collapsed_index;
187 : }
188 : return collapsed_index;
189 : }
190 :
191 : /// \brief Computes a mapping from a collapsed_index to its storage_index
192 : ///
193 : /// \details
194 : /// Because each collapsed_index corresponds to a unique tensor_index, this map
195 : /// also effectively relates each unique tensor_index to its storage_index.
196 : /// While each index of the returned map corresponds to a unique tensor_index,
197 : /// the element stored at each index is a storage_index that may or may not be
198 : /// unique. If symmetries are present, this map will not be 1-1, as
199 : /// collapsed_indices that correspond to tensor_indices with the same canonical
200 : /// form will map to the same storage_index. Provided that any tensor_index is
201 : /// first converted to its corresponding collapsed_index, this map can be used
202 : /// to retrieve the storage_index of that tensor_index, canonicalized or not.
203 : ///
204 : /// \tparam Symm the Symmetry of the tensor
205 : /// \tparam NumberOfComponents the total number of components in the tensor
206 : /// \param index_dimensions the dimensions of the tensor's indices
207 : /// \return a mapping from a collapsed_index to its storage_index
208 : template <typename Symm, size_t NumberOfComponents>
209 : constexpr auto compute_collapsed_to_storage(
210 : const cpp20::array<size_t, tmpl::size<Symm>::value>& index_dimensions) {
211 : if constexpr (tmpl::size<Symm>::value != 0) {
212 : constexpr size_t rank = tmpl::size<Symm>::value;
213 : constexpr auto symm = make_cpp20_array_from_list<Symm>();
214 : constexpr std::int32_t max_symm_value = *alg::max_element(symm);
215 : static_assert(
216 : *alg::min_element(symm) > 0,
217 : "compute_collapsed_to_storage assumes symmetry values are > 0");
218 : static_assert(
219 : max_symm_value <= rank,
220 : "compute_collapsed_to_storage assumes symmetry values are <= rank");
221 :
222 : if constexpr (max_symm_value == rank) {
223 : const size_t first_storage_index{0};
224 : cpp20::array<size_t, NumberOfComponents> collapsed_to_storage{};
225 : return alg::iota(collapsed_to_storage, first_storage_index);
226 : } else {
227 : cpp20::array<size_t, NumberOfComponents> collapsed_to_storage{};
228 : auto tensor_index = convert_to_cpp20_array(
229 : make_array<tmpl::size<Symm>::value>(size_t{0}));
230 : size_t count{0};
231 : for (auto& current_storage_index : collapsed_to_storage) {
232 : // Compute canonical tensor_index, which, for symmetric get_tensor_index
233 : // is in decreasing numerical order, e.g. (3,2) rather than (2,3).
234 : const auto canonical_tensor_index =
235 : canonicalize_tensor_index<Symm>(tensor_index);
236 : // If the tensor_index was already in the canonical form, then it must
237 : // be a new unique entry and we add it to collapsed_to_storage_ as a
238 : // new integer, thus increasing the size_. Else, the StorageIndex has
239 : // already been determined so we look it up in the existing
240 : // collapsed_to_storage table.
241 : if (tensor_index == canonical_tensor_index) {
242 : current_storage_index = count;
243 : ++count;
244 : } else {
245 : current_storage_index = collapsed_to_storage[compute_collapsed_index(
246 : canonical_tensor_index, index_dimensions)];
247 : }
248 : // Move to the next tensor_index.
249 : increment_tensor_index(tensor_index, index_dimensions);
250 : }
251 : return collapsed_to_storage;
252 : }
253 : } else {
254 : (void)index_dimensions;
255 :
256 : return cpp20::array<size_t, 1>{{0}};
257 : }
258 : }
259 :
260 : /// \brief Computes a 1-1 mapping from a storage_index to its canonical
261 : /// tensor_index
262 : ///
263 : /// \details
264 : /// When symmetries are present, not all unique tensor_indices can be retrieved
265 : /// from this map, as some tensor_indices will share the same canonical form.
266 : /// Otherwise, if no symmetries are present, each unique tensor_index is already
267 : /// in the canonical form, and one that is not shared by another tensor_index,
268 : /// so this would equivalently mean a 1-1 mapping from a storage_index to a
269 : /// tensor_index. This means that when no symmetries are present, all unique
270 : /// tensor_indices of a tensor can be retrieved from this map.
271 : ///
272 : /// \tparam Symm the Symmetry of the tensor
273 : /// \tparam NumIndComps the number of independent components in the tensor, i.e.
274 : /// components equivalent due to symmetry counted only once
275 : /// \tparam NumComps the total number of components in the tensor
276 : /// \param collapsed_to_storage a mapping from a collapsed_index to its
277 : /// storage_index, which is only 1-1 if there are no symmetries
278 : /// \param index_dimensions the dimensions of the tensor's indices
279 : /// \return a 1-1 mapping from a storage_index to its canonical tensor_index
280 : template <typename Symm, size_t NumIndComps, size_t NumComps>
281 : constexpr auto compute_storage_to_tensor(
282 : const cpp20::array<size_t, NumComps>& collapsed_to_storage,
283 : const cpp20::array<size_t, tmpl::size<Symm>::value>& index_dimensions) {
284 : if constexpr (tmpl::size<Symm>::value != 0) {
285 : constexpr size_t rank = tmpl::size<Symm>::value;
286 : cpp20::array<cpp20::array<size_t, rank>, NumIndComps> storage_to_tensor{};
287 : cpp20::array<size_t, rank> tensor_index =
288 : convert_to_cpp20_array(make_array<rank>(size_t{0}));
289 : for (const auto& current_storage_index : collapsed_to_storage) {
290 : storage_to_tensor[current_storage_index] =
291 : canonicalize_tensor_index<Symm>(tensor_index);
292 : increment_tensor_index(tensor_index, index_dimensions);
293 : }
294 : return storage_to_tensor;
295 : } else {
296 : (void)collapsed_to_storage;
297 : (void)index_dimensions;
298 :
299 : return cpp20::array<cpp20::array<size_t, 1>, 1>{
300 : {cpp20::array<size_t, 1>{{0}}}};
301 : }
302 : }
303 :
304 : template <size_t NumIndComps, typename T, size_t NumComps>
305 : constexpr cpp20::array<size_t, NumIndComps> compute_multiplicity(
306 : const cpp20::array<T, NumComps>& collapsed_to_storage) {
307 : cpp20::array<size_t, NumIndComps> multiplicity =
308 : convert_to_cpp20_array(make_array<NumIndComps>(size_t{0}));
309 : for (const auto& current_storage_index : collapsed_to_storage) {
310 : ++multiplicity[current_storage_index];
311 : }
312 : return multiplicity;
313 : }
314 :
315 : template <size_t NumIndices>
316 : struct ComponentNameImpl {
317 : template <typename Structure, typename T>
318 : static std::string apply(
319 : const std::array<T, NumIndices>& tensor_index,
320 : const std::array<std::string, NumIndices>& axis_labels) {
321 : const size_t storage_index = Structure::get_storage_index(tensor_index);
322 : std::array<std::string, Structure::rank()> labels = axis_labels;
323 : constexpr auto index_dim = Structure::dims();
324 : for (size_t i = 0; i < Structure::rank(); ++i) {
325 : if (gsl::at(labels, i).length() == 0) {
326 : if (gsl::at(Structure::index_types(), i) == IndexType::Spacetime) {
327 : switch (gsl::at(index_dim, i)) {
328 : case 2:
329 : gsl::at(labels, i) = "tx";
330 : break;
331 : case 3:
332 : gsl::at(labels, i) = "txy";
333 : break;
334 : case 4:
335 : gsl::at(labels, i) = "txyz";
336 : break;
337 : default:
338 : ERROR("Tensor dim["
339 : << i
340 : << "] must be 1,2,3, or 4 for default axis_labels. "
341 : "Either pass a string or extend the function.");
342 : }
343 : } else {
344 : switch (gsl::at(index_dim, i)) {
345 : case 1:
346 : gsl::at(labels, i) = "x";
347 : break;
348 : case 2:
349 : gsl::at(labels, i) = "xy";
350 : break;
351 : case 3:
352 : gsl::at(labels, i) = "xyz";
353 : break;
354 : default:
355 : ERROR("Tensor dim["
356 : << i
357 : << "] must be 1,2, or 3 for default axis_labels. "
358 : "Either pass a string or extend the function.");
359 : }
360 : }
361 : } else {
362 : if (gsl::at(axis_labels, i).length() != gsl::at(index_dim, i)) {
363 : ERROR("Dimension mismatch: Tensor has dim = "
364 : << gsl::at(index_dim, i) << ", but you specified "
365 : << gsl::at(axis_labels, i).length() << " different labels in "
366 : << gsl::at(axis_labels, i));
367 : }
368 : }
369 : }
370 : // Create string labeling get_tensor_index
371 : std::stringstream ss;
372 : const auto canonical_tensor_index =
373 : Structure::get_canonical_tensor_index(storage_index);
374 : for (size_t r = 0; r < Structure::rank(); ++r) {
375 : ss << gsl::at(labels, r)[gsl::at(canonical_tensor_index, r)];
376 : }
377 : return ss.str();
378 : }
379 : };
380 :
381 : template <>
382 : struct ComponentNameImpl<0> {
383 : template <typename Structure, typename T>
384 : static std::string apply(const std::array<T, 0>& /*tensor_index*/,
385 : const std::array<std::string, 0>& /*axis_labels*/) {
386 : return "Scalar";
387 : }
388 : };
389 :
390 : /// \ingroup TensorGroup
391 : /// A lookup table between each tensor_index and storage_index
392 : ///
393 : /// 1. tensor_index: (a, b, c,...). There are Dim^rank tensor_index's
394 : /// 2. collapsed_index: a + Dim * (b + Dim * (c + ...)), there are Dim^rank
395 : /// unique collapsed indices and there is a 1-1 map between
396 : /// a tensor_index and a collapsed_index.
397 : /// 3. storage_index: index into the storage vector of the Tensor. This depends
398 : /// on symmetries of the tensor, rank, and dimensionality. If
399 : /// the Tensor has symmetries, tensor_indices that are
400 : /// equivalent due to symmetry will have the same
401 : /// storage_index and canonical form. This means that the
402 : /// mapping between tensor_indices and storage_indices is 1-1
403 : /// only if no symmetries are present, but there is a 1-1
404 : /// mapping between canonical tensor_indices and
405 : /// storage_indices, regardless of symmetry.
406 : /// \tparam Symm the symmetry of the Tensor
407 : /// \tparam Indices list of tensor_index's giving the dimensionality and frame
408 : /// of the index
409 : template <typename Symm, typename... Indices>
410 : struct Structure {
411 : static_assert(
412 : TensorMetafunctions::check_index_symmetry_v<Symm, Indices...>,
413 : "Cannot construct a Tensor with a symmetric pair that are not the same.");
414 : static_assert(tmpl::size<Symm>::value == sizeof...(Indices),
415 : "The number of indices in Symmetry do not match the number of "
416 : "indices given to the Structure.");
417 : static_assert(
418 : tmpl2::flat_all_v<tt::is_tensor_index_type<Indices>::value...>,
419 : "All Indices passed to Structure must be of type TensorIndexType.");
420 :
421 : using index_list = tmpl::list<Indices...>;
422 : using symmetry = Symm;
423 :
424 : SPECTRE_ALWAYS_INLINE static constexpr size_t rank() {
425 : return sizeof...(Indices);
426 : }
427 :
428 : SPECTRE_ALWAYS_INLINE static constexpr size_t size() {
429 : constexpr auto number_of_independent_components =
430 : ::Tensor_detail::number_of_independent_components(
431 : make_array_from_list<
432 : tmpl::conditional_t<sizeof...(Indices) != 0, Symm, int>>(),
433 : make_array_from_list<tmpl::conditional_t<sizeof...(Indices) != 0,
434 : index_list, size_t>>());
435 : return number_of_independent_components;
436 : }
437 :
438 : SPECTRE_ALWAYS_INLINE static constexpr size_t number_of_components() {
439 : constexpr auto number_of_components = ::Tensor_detail::number_of_components(
440 : make_array_from_list<tmpl::conditional_t<sizeof...(Indices) != 0,
441 : index_list, size_t>>());
442 : return number_of_components;
443 : }
444 :
445 : /// A mapping between each collapsed_index and its storage_index. See
446 : /// compute_collapsed_to_storage for details.
447 : static constexpr auto collapsed_to_storage_ =
448 : compute_collapsed_to_storage<Symm, number_of_components()>(
449 : make_cpp20_array_from_list<tmpl::conditional_t<
450 : sizeof...(Indices) == 0, size_t, index_list>>());
451 : /// A 1-1 mapping between each storage_index and its canonical tensor_index.
452 : /// See compute_storage_to_tensor for details.
453 : static constexpr auto storage_to_tensor_ = compute_storage_to_tensor<Symm,
454 : size()>(
455 : collapsed_to_storage_,
456 : make_cpp20_array_from_list<
457 : tmpl::conditional_t<sizeof...(Indices) == 0, size_t, index_list>>());
458 : static constexpr auto multiplicity_ =
459 : compute_multiplicity<size()>(collapsed_to_storage_);
460 :
461 : // Retrieves the dimensionality of the I'th index
462 : template <int I>
463 : SPECTRE_ALWAYS_INLINE static constexpr size_t dim() {
464 : static_assert(sizeof...(Indices),
465 : "A scalar does not have any indices from which you can "
466 : "retrieve the dimensionality.");
467 : return tmpl::at<index_list, tmpl::int32_t<I>>::value;
468 : }
469 :
470 : SPECTRE_ALWAYS_INLINE static constexpr std::array<size_t, sizeof...(Indices)>
471 : dims() {
472 : constexpr auto dims = make_array_from_list<
473 : tmpl::conditional_t<sizeof...(Indices) != 0, index_list, size_t>>();
474 : return dims;
475 : }
476 :
477 : SPECTRE_ALWAYS_INLINE static constexpr std::array<int, sizeof...(Indices)>
478 : symmetries() {
479 : return make_array_from_list<
480 : tmpl::conditional_t<0 != sizeof...(Indices), Symm, int>>();
481 : }
482 :
483 : SPECTRE_ALWAYS_INLINE static constexpr std::array<IndexType,
484 : sizeof...(Indices)>
485 : index_types() {
486 : return std::array<IndexType, sizeof...(Indices)>{{Indices::index_type...}};
487 : }
488 :
489 : /// Return array of the valence of each index
490 : SPECTRE_ALWAYS_INLINE static constexpr std::array<UpLo, sizeof...(Indices)>
491 : index_valences() {
492 : return std::array<UpLo, sizeof...(Indices)>{{Indices::ul...}};
493 : }
494 :
495 : /// Return array of the frame of each index
496 : SPECTRE_ALWAYS_INLINE static constexpr auto index_frames() {
497 : return std::tuple<typename Indices::Frame...>{};
498 : }
499 :
500 : /// \brief Get the canonical tensor_index array of a storage_index
501 : ///
502 : /// \details
503 : /// For a symmetric tensor \f$T_{(ab)}\f$ with an associated symmetry list
504 : /// `Symmetry<1, 1>`, this will return, e.g. `{{3, 2}}` rather than `{{2, 3}}`
505 : /// for that particular index. Note that the canonical ordering is
506 : /// implementation-defined.
507 : ///
508 : /// As `storage_to_tensor_` is a computed 1-1 mapping between a storage_index
509 : /// and canonical tensor_index, we simply retrieve the canonical tensor_index
510 : /// from this map.
511 : ///
512 : /// \param storage_index the storage_index of which to get the canonical
513 : /// tensor_index
514 : /// \return the canonical tensor_index array of a storage_index
515 : template <size_t Rank = sizeof...(Indices)>
516 : SPECTRE_ALWAYS_INLINE static constexpr std::array<size_t, Rank>
517 : get_canonical_tensor_index(const size_t storage_index) {
518 : if constexpr (Rank != 0) {
519 : constexpr auto storage_to_tensor = storage_to_tensor_;
520 : return gsl::at(storage_to_tensor, storage_index);
521 : } else {
522 : (void)storage_index;
523 :
524 : return std::array<size_t, 0>{};
525 : }
526 : }
527 :
528 : /// \brief Get the storage_index of a tensor_index
529 : ///
530 : /// \details
531 : /// This first computes the collapsed_index of the given tensor_index (this is
532 : /// a 1-1 mapping), then retrieves the storage_index from
533 : /// collapsed_to_storage_.
534 : ///
535 : /// \param args comma separated list of the tensor_index of which to get the
536 : /// storage_index
537 : /// \return the storage_index of a tensor_index
538 : template <typename... N>
539 : SPECTRE_ALWAYS_INLINE static constexpr std::size_t get_storage_index(
540 : const N... args) {
541 : static_assert(sizeof...(Indices) == sizeof...(N),
542 : "the number arguments must be equal to rank_");
543 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
544 : return gsl::at(
545 : collapsed_to_storage,
546 : compute_collapsed_index(
547 : cpp20::array<size_t, sizeof...(N)>{{static_cast<size_t>(args)...}},
548 : make_cpp20_array_from_list<tmpl::conditional_t<
549 : 0 != sizeof...(Indices), index_list, size_t>>()));
550 : }
551 :
552 : /// \brief Get the storage_index of a tensor_index
553 : ///
554 : /// \details
555 : /// This first computes the collapsed_index of the given tensor_index (this is
556 : /// a 1-1 mapping), then retrieves the storage_index from
557 : /// collapsed_to_storage_.
558 : ///
559 : /// \param tensor_index the tensor_index of which to get the storage_index
560 : /// \return the storage_index of a tensor_index
561 : template <typename I>
562 : SPECTRE_ALWAYS_INLINE static constexpr std::size_t get_storage_index(
563 : const std::array<I, sizeof...(Indices)>& tensor_index) {
564 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
565 : return gsl::at(collapsed_to_storage,
566 : compute_collapsed_index(
567 : convert_to_cpp20_array(tensor_index),
568 : make_cpp20_array_from_list<tmpl::conditional_t<
569 : 0 != sizeof...(Indices), index_list, size_t>>()));
570 : }
571 :
572 : /// \brief Get the storage_index of a tensor_index
573 : ///
574 : /// \details
575 : /// This first computes the collapsed_index of the given tensor_index (this is
576 : /// a 1-1 mapping), then retrieves the storage_index from
577 : /// collapsed_to_storage_.
578 : ///
579 : /// \tparam N the comma separated list of the tensor_index of which to get the
580 : /// storage_index
581 : /// \return the storage_index of a tensor_index
582 : template <int... N, Requires<(sizeof...(N) > 0)> = nullptr>
583 : SPECTRE_ALWAYS_INLINE static constexpr std::size_t get_storage_index() {
584 : static_assert(sizeof...(Indices) == sizeof...(N),
585 : "the number arguments must be equal to rank_");
586 : constexpr std::size_t storage_index =
587 : collapsed_to_storage_[compute_collapsed_index(
588 : cpp20::array<size_t, sizeof...(N)>{{N...}},
589 : make_cpp20_array_from_list<index_list>())];
590 : return storage_index;
591 : }
592 :
593 : /// Get the multiplicity of the storage_index
594 : /// \param storage_index the storage_index of which to get the multiplicity
595 : SPECTRE_ALWAYS_INLINE static constexpr size_t multiplicity(
596 : const size_t storage_index) {
597 : constexpr auto multiplicity = multiplicity_;
598 : return gsl::at(multiplicity, storage_index);
599 : }
600 :
601 : /// Get the array of collapsed index to storage_index
602 : SPECTRE_ALWAYS_INLINE static constexpr std::array<size_t,
603 : number_of_components()>
604 : collapsed_to_storage() {
605 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
606 : return collapsed_to_storage;
607 : }
608 :
609 : /// Get the storage_index for the specified collapsed index
610 : SPECTRE_ALWAYS_INLINE static constexpr int collapsed_to_storage(
611 : const size_t i) {
612 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
613 : return gsl::at(collapsed_to_storage, i);
614 : }
615 :
616 : /// Get the array of tensor_index's corresponding to the storage_index's.
617 : SPECTRE_ALWAYS_INLINE static constexpr const cpp20::array<
618 : cpp20::array<size_t, sizeof...(Indices) == 0 ? 1 : sizeof...(Indices)>,
619 : size()>
620 : storage_to_tensor_index() {
621 : constexpr auto storage_to_tensor = storage_to_tensor_;
622 : return storage_to_tensor;
623 : }
624 :
625 : template <typename T>
626 : SPECTRE_ALWAYS_INLINE static std::string component_name(
627 : const std::array<T, rank()>& tensor_index,
628 : const std::array<std::string, rank()>& axis_labels) {
629 : return ComponentNameImpl<sizeof...(Indices)>::template apply<Structure>(
630 : tensor_index, axis_labels);
631 : }
632 : };
633 : } // namespace Tensor_detail
|