Line data Source code
1 1 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : /// \file
5 : /// Defines class TensorStructure<Symmetry, Indices...>
6 :
7 : #pragma once
8 :
9 : #include <array>
10 : #include <limits>
11 :
12 : #include "DataStructures/Tensor/IndexType.hpp"
13 : #include "DataStructures/Tensor/Metafunctions.hpp"
14 : #include "DataStructures/Tensor/Symmetry.hpp"
15 : #include "Utilities/Array.hpp" // IWYU pragma: export
16 : #include "Utilities/ConstantExpressions.hpp"
17 : #include "Utilities/ErrorHandling/Assert.hpp"
18 : #include "Utilities/ForceInline.hpp"
19 : #include "Utilities/Gsl.hpp"
20 : #include "Utilities/MakeArray.hpp"
21 : #include "Utilities/Requires.hpp"
22 : #include "Utilities/TMPL.hpp"
23 :
24 : namespace Tensor_detail {
25 : template <size_t Size>
26 : constexpr size_t number_of_independent_components(
27 : const std::array<int, Size>& symm, const std::array<size_t, Size>& dims) {
28 : if constexpr (Size == 0) {
29 : (void)symm;
30 : (void)dims;
31 :
32 : return 1;
33 : } else if constexpr (Size == 1) {
34 : (void)symm;
35 :
36 : return dims[0];
37 : } else {
38 : size_t max_element = 0;
39 : for (size_t i = 0; i < Size; ++i) {
40 : // clang-tidy: internals of assert(), don't need gsl::at in constexpr
41 : // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay,cppcoreguidelines-pro-bounds-constant-array-index)
42 : assert(symm[i] > 0);
43 : // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-constant-array-index)
44 : max_element = std::max(static_cast<size_t>(ce_abs(symm[i])), max_element);
45 : }
46 : assert(max_element > 0); // NOLINT
47 : size_t total_independent_components = 1;
48 : for (size_t symm_index = 1; symm_index <= max_element; ++symm_index) {
49 : size_t number_of_indices_with_symm = 0;
50 : size_t dim_of_index = 0;
51 : for (size_t i = 0; i < Size; ++i) {
52 : if (static_cast<size_t>(symm[i]) == symm_index) { // NOLINT
53 : ++number_of_indices_with_symm;
54 : dim_of_index = dims[i]; // NOLINT
55 : }
56 : }
57 : assert(dim_of_index > 0); // NOLINT
58 : assert(number_of_indices_with_symm > 0); // NOLINT
59 : if (dim_of_index - 1 > number_of_indices_with_symm) {
60 : total_independent_components *=
61 : falling_factorial(dim_of_index + number_of_indices_with_symm - 1,
62 : number_of_indices_with_symm) /
63 : factorial(number_of_indices_with_symm);
64 : } else {
65 : total_independent_components *=
66 : falling_factorial(dim_of_index + number_of_indices_with_symm - 1,
67 : dim_of_index - 1) /
68 : factorial(dim_of_index - 1);
69 : }
70 : }
71 : return total_independent_components;
72 : }
73 : }
74 :
75 : template <size_t Size>
76 : constexpr size_t number_of_components(const std::array<size_t, Size>& dims) {
77 : size_t number = 1;
78 : for (size_t i = 0; i < Size; ++i) {
79 : // clang-tidy: use gsl::at
80 : number *= dims[i]; // NOLINT
81 : }
82 : return number;
83 : }
84 :
85 : template <typename T, typename S, size_t Size>
86 : constexpr void increment_tensor_index(cpp20::array<T, Size>& tensor_index,
87 : const cpp20::array<S, Size>& dims) {
88 : for (size_t i = 0; i < Size; ++i) {
89 : if (++tensor_index[i] < static_cast<T>(dims[i])) {
90 : return;
91 : }
92 : tensor_index[i] = 0;
93 : }
94 : }
95 :
96 : // index_to_swap_with takes the last two arguments as opposed to just one of
97 : // them so that when the max constexpr steps is reached on clang it is reached
98 : // in this function rather than in array.
99 : template <size_t Rank>
100 : constexpr size_t index_to_swap_with(
101 : const cpp20::array<size_t, Rank>& tensor_index,
102 : const cpp20::array<int, Rank>& sym, size_t index_to_swap_with,
103 : const size_t current_index) {
104 : // If you encounter infinite loop compilation errors here you are
105 : // constructing very large Tensor's. If you are sure Tensor is
106 : // the correct data structure you can extend the compiler limit
107 : // by passing the flag -fconstexpr-steps=<SOME LARGER VALUE>
108 : while (true) { // See source code comment on line above this one for fix
109 : if (Rank == index_to_swap_with) {
110 : return current_index;
111 : } else if (tensor_index[current_index] <
112 : tensor_index[index_to_swap_with] and
113 : sym[current_index] == sym[index_to_swap_with]) {
114 : return index_to_swap_with;
115 : }
116 : index_to_swap_with++;
117 : }
118 : }
119 :
120 : template <size_t Size, size_t SymmSize>
121 : constexpr cpp20::array<size_t, Size> canonicalize_tensor_index(
122 : cpp20::array<size_t, Size> tensor_index,
123 : const cpp20::array<int, SymmSize>& symm) {
124 : for (size_t i = 0; i < Size; ++i) {
125 : const size_t temp = tensor_index[i];
126 : const size_t swap = index_to_swap_with(tensor_index, symm, i, i);
127 : tensor_index[i] = tensor_index[swap];
128 : tensor_index[swap] = temp;
129 : }
130 : return tensor_index;
131 : }
132 :
133 : template <size_t Rank>
134 : constexpr size_t compute_collapsed_index(
135 : const cpp20::array<size_t, Rank>& tensor_index,
136 : const cpp20::array<size_t, Rank> dims) {
137 : size_t collapsed_index = 0;
138 : for (size_t i = Rank - 1; i < Rank; --i) {
139 : collapsed_index = tensor_index[i] + dims[i] * collapsed_index;
140 : }
141 : return collapsed_index;
142 : }
143 :
144 : /// \brief Computes a mapping from a collapsed_index to its storage_index
145 : ///
146 : /// \details
147 : /// Because each collapsed_index corresponds to a unique tensor_index, this map
148 : /// also effectively relates each unique tensor_index to its storage_index.
149 : /// While each index of the returned map corresponds to a unique tensor_index,
150 : /// the element stored at each index is a storage_index that may or may not be
151 : /// unique. If symmetries are present, this map will not be 1-1, as
152 : /// collapsed_indices that correspond to tensor_indices with the same canonical
153 : /// form will map to the same storage_index. Provided that any tensor_index is
154 : /// first converted to its corresponding collapsed_index, this map can be used
155 : /// to retrieve the storage_index of that tensor_index, canonicalized or not.
156 : ///
157 : /// \tparam Symm the Symmetry of the tensor
158 : /// \tparam NumberOfComponents the total number of components in the tensor
159 : /// \param index_dimensions the dimensions of the tensor's indices
160 : /// \return a mapping from a collapsed_index to its storage_index
161 : template <typename Symm, size_t NumberOfComponents>
162 : constexpr auto compute_collapsed_to_storage(
163 : const cpp20::array<size_t, tmpl::size<Symm>::value>& index_dimensions) {
164 : if constexpr (tmpl::size<Symm>::value != 0) {
165 : cpp20::array<size_t, NumberOfComponents> collapsed_to_storage{};
166 : auto tensor_index =
167 : convert_to_cpp20_array(make_array<tmpl::size<Symm>::value>(size_t{0}));
168 : size_t count{0};
169 : for (auto& current_storage_index : collapsed_to_storage) {
170 : // Compute canonical tensor_index, which, for symmetric get_tensor_index
171 : // is in decreasing numerical order, e.g. (3,2) rather than (2,3).
172 : const auto canonical_tensor_index = canonicalize_tensor_index(
173 : tensor_index, make_cpp20_array_from_list<Symm>());
174 : // If the tensor_index was already in the canonical form, then it must be
175 : // a new unique entry and we add it to collapsed_to_storage_ as a new
176 : // integer, thus increasing the size_. Else, the StorageIndex has already
177 : // been determined so we look it up in the existing collapsed_to_storage
178 : // table.
179 : if (tensor_index == canonical_tensor_index) {
180 : current_storage_index = count;
181 : ++count;
182 : } else {
183 : current_storage_index = collapsed_to_storage[compute_collapsed_index(
184 : canonical_tensor_index, index_dimensions)];
185 : }
186 : // Move to the next tensor_index.
187 : increment_tensor_index(tensor_index, index_dimensions);
188 : }
189 : return collapsed_to_storage;
190 : } else {
191 : (void)index_dimensions;
192 :
193 : return cpp20::array<size_t, 1>{{0}};
194 : }
195 : }
196 :
197 : /// \brief Computes a 1-1 mapping from a storage_index to its canonical
198 : /// tensor_index
199 : ///
200 : /// \details
201 : /// When symmetries are present, not all unique tensor_indices can be retrieved
202 : /// from this map, as some tensor_indices will share the same canonical form.
203 : /// Otherwise, if no symmetries are present, each unique tensor_index is already
204 : /// in the canonical form, and one that is not shared by another tensor_index,
205 : /// so this would equivalently mean a 1-1 mapping from a storage_index to a
206 : /// tensor_index. This means that when no symmetries are present, all unique
207 : /// tensor_indices of a tensor can be retrieved from this map.
208 : ///
209 : /// \tparam Symm the Symmetry of the tensor
210 : /// \tparam NumIndComps the number of independent components in the tensor, i.e.
211 : /// components equivalent due to symmetry counted only once
212 : /// \tparam NumComps the total number of components in the tensor
213 : /// \param collapsed_to_storage a mapping from a collapsed_index to its
214 : /// storage_index, which is only 1-1 if there are no symmetries
215 : /// \param index_dimensions the dimensions of the tensor's indices
216 : /// \return a 1-1 mapping from a storage_index to its canonical tensor_index
217 : template <typename Symm, size_t NumIndComps, size_t NumComps>
218 : constexpr auto compute_storage_to_tensor(
219 : const cpp20::array<size_t, NumComps>& collapsed_to_storage,
220 : const cpp20::array<size_t, tmpl::size<Symm>::value>& index_dimensions) {
221 : if constexpr (tmpl::size<Symm>::value > 0) {
222 : constexpr size_t rank = tmpl::size<Symm>::value;
223 : cpp20::array<cpp20::array<size_t, rank>, NumIndComps> storage_to_tensor{};
224 : cpp20::array<size_t, rank> tensor_index =
225 : convert_to_cpp20_array(make_array<rank>(size_t{0}));
226 : for (const auto& current_storage_index : collapsed_to_storage) {
227 : storage_to_tensor[current_storage_index] = canonicalize_tensor_index(
228 : tensor_index, make_cpp20_array_from_list<Symm>());
229 : increment_tensor_index(tensor_index, index_dimensions);
230 : }
231 : return storage_to_tensor;
232 : } else {
233 : (void)collapsed_to_storage;
234 : (void)index_dimensions;
235 :
236 : return cpp20::array<cpp20::array<size_t, 1>, 1>{
237 : {cpp20::array<size_t, 1>{{0}}}};
238 : }
239 : }
240 :
241 : template <size_t NumIndComps, typename T, size_t NumComps>
242 : constexpr cpp20::array<size_t, NumIndComps> compute_multiplicity(
243 : const cpp20::array<T, NumComps>& collapsed_to_storage) {
244 : cpp20::array<size_t, NumIndComps> multiplicity =
245 : convert_to_cpp20_array(make_array<NumIndComps>(size_t{0}));
246 : for (const auto& current_storage_index : collapsed_to_storage) {
247 : ++multiplicity[current_storage_index];
248 : }
249 : return multiplicity;
250 : }
251 :
252 : template <size_t NumIndices>
253 : struct ComponentNameImpl {
254 : template <typename Structure, typename T>
255 : static std::string apply(
256 : const std::array<T, NumIndices>& tensor_index,
257 : const std::array<std::string, NumIndices>& axis_labels) {
258 : const size_t storage_index = Structure::get_storage_index(tensor_index);
259 : std::array<std::string, Structure::rank()> labels = axis_labels;
260 : constexpr auto index_dim = Structure::dims();
261 : for (size_t i = 0; i < Structure::rank(); ++i) {
262 : if (gsl::at(labels, i).length() == 0) {
263 : if (gsl::at(Structure::index_types(), i) == IndexType::Spacetime) {
264 : switch (gsl::at(index_dim, i)) {
265 : case 2:
266 : gsl::at(labels, i) = "tx";
267 : break;
268 : case 3:
269 : gsl::at(labels, i) = "txy";
270 : break;
271 : case 4:
272 : gsl::at(labels, i) = "txyz";
273 : break;
274 : default:
275 : ERROR("Tensor dim["
276 : << i
277 : << "] must be 1,2,3, or 4 for default axis_labels. "
278 : "Either pass a string or extend the function.");
279 : }
280 : } else {
281 : switch (gsl::at(index_dim, i)) {
282 : case 1:
283 : gsl::at(labels, i) = "x";
284 : break;
285 : case 2:
286 : gsl::at(labels, i) = "xy";
287 : break;
288 : case 3:
289 : gsl::at(labels, i) = "xyz";
290 : break;
291 : default:
292 : ERROR("Tensor dim["
293 : << i
294 : << "] must be 1,2, or 3 for default axis_labels. "
295 : "Either pass a string or extend the function.");
296 : }
297 : }
298 : } else {
299 : if (gsl::at(axis_labels, i).length() != gsl::at(index_dim, i)) {
300 : ERROR("Dimension mismatch: Tensor has dim = "
301 : << gsl::at(index_dim, i) << ", but you specified "
302 : << gsl::at(axis_labels, i).length() << " different labels in "
303 : << gsl::at(axis_labels, i));
304 : }
305 : }
306 : }
307 : // Create string labeling get_tensor_index
308 : std::stringstream ss;
309 : const auto canonical_tensor_index =
310 : Structure::get_canonical_tensor_index(storage_index);
311 : for (size_t r = 0; r < Structure::rank(); ++r) {
312 : ss << gsl::at(labels, r)[gsl::at(canonical_tensor_index, r)];
313 : }
314 : return ss.str();
315 : }
316 : };
317 :
318 : template <>
319 : struct ComponentNameImpl<0> {
320 : template <typename Structure, typename T>
321 : static std::string apply(const std::array<T, 0>& /*tensor_index*/,
322 : const std::array<std::string, 0>& /*axis_labels*/) {
323 : return "Scalar";
324 : }
325 : };
326 :
327 : /// \ingroup TensorGroup
328 : /// A lookup table between each tensor_index and storage_index
329 : ///
330 : /// 1. tensor_index: (a, b, c,...). There are Dim^rank tensor_index's
331 : /// 2. collapsed_index: a + Dim * (b + Dim * (c + ...)), there are Dim^rank
332 : /// unique collapsed indices and there is a 1-1 map between
333 : /// a tensor_index and a collapsed_index.
334 : /// 3. storage_index: index into the storage vector of the Tensor. This depends
335 : /// on symmetries of the tensor, rank, and dimensionality. If
336 : /// the Tensor has symmetries, tensor_indices that are
337 : /// equivalent due to symmetry will have the same
338 : /// storage_index and canonical form. This means that the
339 : /// mapping between tensor_indices and storage_indices is 1-1
340 : /// only if no symmetries are present, but there is a 1-1
341 : /// mapping between canonical tensor_indices and
342 : /// storage_indices, regardless of symmetry.
343 : /// \tparam Symm the symmetry of the Tensor
344 : /// \tparam Indices list of tensor_index's giving the dimensionality and frame
345 : /// of the index
346 : template <typename Symm, typename... Indices>
347 : struct Structure {
348 : static_assert(
349 : TensorMetafunctions::check_index_symmetry_v<Symm, Indices...>,
350 : "Cannot construct a Tensor with a symmetric pair that are not the same.");
351 : static_assert(tmpl::size<Symm>::value == sizeof...(Indices),
352 : "The number of indices in Symmetry do not match the number of "
353 : "indices given to the Structure.");
354 : static_assert(
355 : tmpl2::flat_all_v<tt::is_tensor_index_type<Indices>::value...>,
356 : "All Indices passed to Structure must be of type TensorIndexType.");
357 :
358 : using index_list = tmpl::list<Indices...>;
359 :
360 : SPECTRE_ALWAYS_INLINE static constexpr size_t rank() {
361 : return sizeof...(Indices);
362 : }
363 :
364 : SPECTRE_ALWAYS_INLINE static constexpr size_t size() {
365 : constexpr auto number_of_independent_components =
366 : ::Tensor_detail::number_of_independent_components(
367 : make_array_from_list<
368 : tmpl::conditional_t<sizeof...(Indices) != 0, Symm, int>>(),
369 : make_array_from_list<tmpl::conditional_t<sizeof...(Indices) != 0,
370 : index_list, size_t>>());
371 : return number_of_independent_components;
372 : }
373 :
374 : SPECTRE_ALWAYS_INLINE static constexpr size_t number_of_components() {
375 : constexpr auto number_of_components = ::Tensor_detail::number_of_components(
376 : make_array_from_list<tmpl::conditional_t<sizeof...(Indices) != 0,
377 : index_list, size_t>>());
378 : return number_of_components;
379 : }
380 :
381 : /// A mapping between each collapsed_index and its storage_index. See
382 : /// \ref compute_collapsed_to_storage for details.
383 : static constexpr auto collapsed_to_storage_ =
384 : compute_collapsed_to_storage<Symm, number_of_components()>(
385 : make_cpp20_array_from_list<tmpl::conditional_t<
386 : sizeof...(Indices) == 0, size_t, index_list>>());
387 : /// A 1-1 mapping between each storage_index and its canonical tensor_index.
388 : /// See \ref compute_storage_to_tensor for details.
389 : static constexpr auto storage_to_tensor_ = compute_storage_to_tensor<Symm,
390 : size()>(
391 : collapsed_to_storage_,
392 : make_cpp20_array_from_list<
393 : tmpl::conditional_t<sizeof...(Indices) == 0, size_t, index_list>>());
394 : static constexpr auto multiplicity_ =
395 : compute_multiplicity<size()>(collapsed_to_storage_);
396 :
397 : // Retrieves the dimensionality of the I'th index
398 : template <int I>
399 : SPECTRE_ALWAYS_INLINE static constexpr size_t dim() {
400 : static_assert(sizeof...(Indices),
401 : "A scalar does not have any indices from which you can "
402 : "retrieve the dimensionality.");
403 : return tmpl::at<index_list, tmpl::int32_t<I>>::value;
404 : }
405 :
406 : SPECTRE_ALWAYS_INLINE static constexpr std::array<size_t, sizeof...(Indices)>
407 : dims() {
408 : constexpr auto dims = make_array_from_list<
409 : tmpl::conditional_t<sizeof...(Indices) != 0, index_list, size_t>>();
410 : return dims;
411 : }
412 :
413 : SPECTRE_ALWAYS_INLINE static constexpr std::array<int, sizeof...(Indices)>
414 : symmetries() {
415 : return make_array_from_list<
416 : tmpl::conditional_t<0 != sizeof...(Indices), Symm, int>>();
417 : }
418 :
419 : SPECTRE_ALWAYS_INLINE static constexpr std::array<IndexType,
420 : sizeof...(Indices)>
421 : index_types() {
422 : return std::array<IndexType, sizeof...(Indices)>{{Indices::index_type...}};
423 : }
424 :
425 : /// Return array of the valence of each index
426 : SPECTRE_ALWAYS_INLINE static constexpr std::array<UpLo, sizeof...(Indices)>
427 : index_valences() {
428 : return std::array<UpLo, sizeof...(Indices)>{{Indices::ul...}};
429 : }
430 :
431 : /// Return array of the frame of each index
432 : SPECTRE_ALWAYS_INLINE static constexpr auto index_frames() {
433 : return std::tuple<typename Indices::Frame...>{};
434 : }
435 :
436 : /// \brief Get the canonical tensor_index array of a storage_index
437 : ///
438 : /// \details
439 : /// For a symmetric tensor \f$T_{(ab)}\f$ with an associated symmetry list
440 : /// `Symmetry<1, 1>`, this will return, e.g. `{{3, 2}}` rather than `{{2, 3}}`
441 : /// for that particular index. Note that the canonical ordering is
442 : /// implementation-defined.
443 : ///
444 : /// As `storage_to_tensor_` is a computed 1-1 mapping between a storage_index
445 : /// and canonical tensor_index, we simply retrieve the canonical tensor_index
446 : /// from this map.
447 : ///
448 : /// \param storage_index the storage_index of which to get the canonical
449 : /// tensor_index
450 : /// \return the canonical tensor_index array of a storage_index
451 : template <size_t Rank = sizeof...(Indices)>
452 : SPECTRE_ALWAYS_INLINE static constexpr std::array<size_t, Rank>
453 : get_canonical_tensor_index(const size_t storage_index) {
454 : if constexpr (Rank != 0) {
455 : constexpr auto storage_to_tensor = storage_to_tensor_;
456 : return gsl::at(storage_to_tensor, storage_index);
457 : } else {
458 : (void)storage_index;
459 :
460 : return std::array<size_t, 0>{};
461 : }
462 : }
463 :
464 : /// \brief Get the storage_index of a tensor_index
465 : ///
466 : /// \details
467 : /// This first computes the collapsed_index of the given tensor_index (this is
468 : /// a 1-1 mapping), then retrieves the storage_index from
469 : /// collapsed_to_storage_.
470 : ///
471 : /// \param args comma separated list of the tensor_index of which to get the
472 : /// storage_index
473 : /// \return the storage_index of a tensor_index
474 : template <typename... N>
475 : SPECTRE_ALWAYS_INLINE static constexpr std::size_t get_storage_index(
476 : const N... args) {
477 : static_assert(sizeof...(Indices) == sizeof...(N),
478 : "the number arguments must be equal to rank_");
479 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
480 : #if defined(__GNUC__) and not defined(__clang__) and __GNUC__ >= 10 and __GNUC__ < 12
481 : #pragma GCC diagnostic push
482 : #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
483 : #endif
484 : return gsl::at(
485 : collapsed_to_storage,
486 : compute_collapsed_index(
487 : cpp20::array<size_t, sizeof...(N)>{{static_cast<size_t>(args)...}},
488 : make_cpp20_array_from_list<tmpl::conditional_t<
489 : 0 != sizeof...(Indices), index_list, size_t>>()));
490 : #if defined(__GNUC__) and not defined(__clang__) and __GNUC__ >= 10 and __GNUC__ < 12
491 : #pragma GCC diagnostic pop
492 : #endif
493 : }
494 :
495 : /// \brief Get the storage_index of a tensor_index
496 : ///
497 : /// \details
498 : /// This first computes the collapsed_index of the given tensor_index (this is
499 : /// a 1-1 mapping), then retrieves the storage_index from
500 : /// collapsed_to_storage_.
501 : ///
502 : /// \param tensor_index the tensor_index of which to get the storage_index
503 : /// \return the storage_index of a tensor_index
504 : template <typename I>
505 : SPECTRE_ALWAYS_INLINE static constexpr std::size_t get_storage_index(
506 : const std::array<I, sizeof...(Indices)>& tensor_index) {
507 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
508 : return gsl::at(collapsed_to_storage,
509 : compute_collapsed_index(
510 : convert_to_cpp20_array(tensor_index),
511 : make_cpp20_array_from_list<tmpl::conditional_t<
512 : 0 != sizeof...(Indices), index_list, size_t>>()));
513 : }
514 :
515 : /// \brief Get the storage_index of a tensor_index
516 : ///
517 : /// \details
518 : /// This first computes the collapsed_index of the given tensor_index (this is
519 : /// a 1-1 mapping), then retrieves the storage_index from
520 : /// collapsed_to_storage_.
521 : ///
522 : /// \tparam N the comma separated list of the tensor_index of which to get the
523 : /// storage_index
524 : /// \return the storage_index of a tensor_index
525 : template <int... N, Requires<(sizeof...(N) > 0)> = nullptr>
526 : SPECTRE_ALWAYS_INLINE static constexpr std::size_t get_storage_index() {
527 : static_assert(sizeof...(Indices) == sizeof...(N),
528 : "the number arguments must be equal to rank_");
529 : constexpr std::size_t storage_index =
530 : collapsed_to_storage_[compute_collapsed_index(
531 : cpp20::array<size_t, sizeof...(N)>{{N...}},
532 : make_cpp20_array_from_list<index_list>())];
533 : return storage_index;
534 : }
535 :
536 : /// Get the multiplicity of the storage_index
537 : /// \param storage_index the storage_index of which to get the multiplicity
538 : SPECTRE_ALWAYS_INLINE static constexpr size_t multiplicity(
539 : const size_t storage_index) {
540 : constexpr auto multiplicity = multiplicity_;
541 : return gsl::at(multiplicity, storage_index);
542 : }
543 :
544 : /// Get the array of collapsed index to storage_index
545 : SPECTRE_ALWAYS_INLINE static constexpr std::array<size_t,
546 : number_of_components()>
547 : collapsed_to_storage() {
548 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
549 : return collapsed_to_storage;
550 : }
551 :
552 : /// Get the storage_index for the specified collapsed index
553 : SPECTRE_ALWAYS_INLINE static constexpr int collapsed_to_storage(
554 : const size_t i) {
555 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
556 : return gsl::at(collapsed_to_storage, i);
557 : }
558 :
559 : /// Get the array of tensor_index's corresponding to the storage_index's.
560 : SPECTRE_ALWAYS_INLINE static constexpr const cpp20::array<
561 : cpp20::array<size_t, sizeof...(Indices) == 0 ? 1 : sizeof...(Indices)>,
562 : size()>
563 : storage_to_tensor_index() {
564 : constexpr auto storage_to_tensor = storage_to_tensor_;
565 : return storage_to_tensor;
566 : }
567 :
568 : template <typename T>
569 : SPECTRE_ALWAYS_INLINE static std::string component_name(
570 : const std::array<T, rank()>& tensor_index,
571 : const std::array<std::string, rank()>& axis_labels) {
572 : return ComponentNameImpl<sizeof...(Indices)>::template apply<Structure>(
573 : tensor_index, axis_labels);
574 : }
575 : };
576 : } // namespace Tensor_detail
|