Line data Source code
1 1 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : /// \file
5 : /// Defines class TensorStructure<Symmetry, Indices...>
6 :
7 : #pragma once
8 :
9 : #include <array>
10 : #include <limits>
11 :
12 : #include "DataStructures/Tensor/IndexType.hpp"
13 : #include "DataStructures/Tensor/Metafunctions.hpp"
14 : #include "DataStructures/Tensor/Symmetry.hpp"
15 : #include "Utilities/Array.hpp"
16 : #include "Utilities/ConstantExpressions.hpp"
17 : #include "Utilities/ErrorHandling/Assert.hpp"
18 : #include "Utilities/ForceInline.hpp"
19 : #include "Utilities/Gsl.hpp"
20 : #include "Utilities/MakeArray.hpp"
21 : #include "Utilities/Requires.hpp"
22 : #include "Utilities/TMPL.hpp"
23 :
24 : namespace Tensor_detail {
25 : template <size_t Size>
26 : constexpr size_t number_of_independent_components(
27 : const std::array<int, Size>& symm, const std::array<size_t, Size>& dims) {
28 : if constexpr (Size == 0) {
29 : (void)symm;
30 : (void)dims;
31 :
32 : return 1;
33 : } else if constexpr (Size == 1) {
34 : (void)symm;
35 :
36 : return dims[0];
37 : } else {
38 : size_t max_element = 0;
39 : for (size_t i = 0; i < Size; ++i) {
40 : // clang-tidy: internals of assert(), don't need gsl::at in constexpr
41 : // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay,cppcoreguidelines-pro-bounds-constant-array-index)
42 : assert(symm[i] > 0);
43 : // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-constant-array-index)
44 : max_element = std::max(static_cast<size_t>(ce_abs(symm[i])), max_element);
45 : }
46 : assert(max_element > 0); // NOLINT
47 : size_t total_independent_components = 1;
48 : for (size_t symm_index = 1; symm_index <= max_element; ++symm_index) {
49 : size_t number_of_indices_with_symm = 0;
50 : size_t dim_of_index = 0;
51 : for (size_t i = 0; i < Size; ++i) {
52 : if (static_cast<size_t>(symm[i]) == symm_index) { // NOLINT
53 : ++number_of_indices_with_symm;
54 : dim_of_index = dims[i]; // NOLINT
55 : }
56 : }
57 : assert(dim_of_index > 0); // NOLINT
58 : assert(number_of_indices_with_symm > 0); // NOLINT
59 : if (dim_of_index - 1 > number_of_indices_with_symm) {
60 : total_independent_components *=
61 : falling_factorial(dim_of_index + number_of_indices_with_symm - 1,
62 : number_of_indices_with_symm) /
63 : factorial(number_of_indices_with_symm);
64 : } else {
65 : total_independent_components *=
66 : falling_factorial(dim_of_index + number_of_indices_with_symm - 1,
67 : dim_of_index - 1) /
68 : factorial(dim_of_index - 1);
69 : }
70 : }
71 : return total_independent_components;
72 : }
73 : }
74 :
75 : template <size_t Size>
76 : constexpr size_t number_of_components(const std::array<size_t, Size>& dims) {
77 : size_t number = 1;
78 : for (size_t i = 0; i < Size; ++i) {
79 : // clang-tidy: use gsl::at
80 : number *= dims[i]; // NOLINT
81 : }
82 : return number;
83 : }
84 :
85 : template <typename T, typename S, size_t Size>
86 : constexpr void increment_tensor_index(cpp20::array<T, Size>& tensor_index,
87 : const cpp20::array<S, Size>& dims) {
88 : for (size_t i = 0; i < Size; ++i) {
89 : if (++tensor_index[i] < static_cast<T>(dims[i])) {
90 : return;
91 : }
92 : tensor_index[i] = 0;
93 : }
94 : }
95 :
96 : // index_to_swap_with takes the last two arguments as opposed to just one of
97 : // them so that when the max constexpr steps is reached on clang it is reached
98 : // in this function rather than in array.
99 : template <size_t Rank>
100 : constexpr size_t index_to_swap_with(
101 : const cpp20::array<size_t, Rank>& tensor_index,
102 : const cpp20::array<int, Rank>& sym, size_t index_to_swap_with,
103 : const size_t current_index) {
104 : // If you encounter infinite loop compilation errors here you are
105 : // constructing very large Tensor's. If you are sure Tensor is
106 : // the correct data structure you can extend the compiler limit
107 : // by passing the flag -fconstexpr-steps=<SOME LARGER VALUE>
108 : while (true) { // See source code comment on line above this one for fix
109 : if (Rank == index_to_swap_with) {
110 : return current_index;
111 : } else if (tensor_index[current_index] <
112 : tensor_index[index_to_swap_with] and
113 : sym[current_index] == sym[index_to_swap_with]) {
114 : return index_to_swap_with;
115 : }
116 : index_to_swap_with++;
117 : }
118 : }
119 :
120 : template <size_t Size, size_t SymmSize>
121 : constexpr cpp20::array<size_t, Size> canonicalize_tensor_index(
122 : cpp20::array<size_t, Size> tensor_index,
123 : const cpp20::array<int, SymmSize>& symm) {
124 : for (size_t i = 0; i < Size; ++i) {
125 : const size_t temp = tensor_index[i];
126 : const size_t swap = index_to_swap_with(tensor_index, symm, i, i);
127 : tensor_index[i] = tensor_index[swap];
128 : tensor_index[swap] = temp;
129 : }
130 : return tensor_index;
131 : }
132 :
133 : template <size_t Rank>
134 : constexpr size_t compute_collapsed_index(
135 : const cpp20::array<size_t, Rank>& tensor_index,
136 : const cpp20::array<size_t, Rank> dims) {
137 : size_t collapsed_index = 0;
138 : for (size_t i = Rank - 1; i < Rank; --i) {
139 : collapsed_index = tensor_index[i] + dims[i] * collapsed_index;
140 : }
141 : return collapsed_index;
142 : }
143 :
144 : /// \brief Computes a mapping from a collapsed_index to its storage_index
145 : ///
146 : /// \details
147 : /// Because each collapsed_index corresponds to a unique tensor_index, this map
148 : /// also effectively relates each unique tensor_index to its storage_index.
149 : /// While each index of the returned map corresponds to a unique tensor_index,
150 : /// the element stored at each index is a storage_index that may or may not be
151 : /// unique. If symmetries are present, this map will not be 1-1, as
152 : /// collapsed_indices that correspond to tensor_indices with the same canonical
153 : /// form will map to the same storage_index. Provided that any tensor_index is
154 : /// first converted to its corresponding collapsed_index, this map can be used
155 : /// to retrieve the storage_index of that tensor_index, canonicalized or not.
156 : ///
157 : /// \tparam Symm the Symmetry of the tensor
158 : /// \tparam NumberOfComponents the total number of components in the tensor
159 : /// \param index_dimensions the dimensions of the tensor's indices
160 : /// \return a mapping from a collapsed_index to its storage_index
161 : template <typename Symm, size_t NumberOfComponents>
162 : constexpr auto compute_collapsed_to_storage(
163 : const cpp20::array<size_t, tmpl::size<Symm>::value>& index_dimensions) {
164 : if constexpr (tmpl::size<Symm>::value != 0) {
165 : cpp20::array<size_t, NumberOfComponents> collapsed_to_storage{};
166 : auto tensor_index =
167 : convert_to_cpp20_array(make_array<tmpl::size<Symm>::value>(size_t{0}));
168 : size_t count{0};
169 : for (auto& current_storage_index : collapsed_to_storage) {
170 : // Compute canonical tensor_index, which, for symmetric get_tensor_index
171 : // is in decreasing numerical order, e.g. (3,2) rather than (2,3).
172 : const auto canonical_tensor_index = canonicalize_tensor_index(
173 : tensor_index, make_cpp20_array_from_list<Symm>());
174 : // If the tensor_index was already in the canonical form, then it must be
175 : // a new unique entry and we add it to collapsed_to_storage_ as a new
176 : // integer, thus increasing the size_. Else, the StorageIndex has already
177 : // been determined so we look it up in the existing collapsed_to_storage
178 : // table.
179 : if (tensor_index == canonical_tensor_index) {
180 : current_storage_index = count;
181 : ++count;
182 : } else {
183 : current_storage_index = collapsed_to_storage[compute_collapsed_index(
184 : canonical_tensor_index, index_dimensions)];
185 : }
186 : // Move to the next tensor_index.
187 : increment_tensor_index(tensor_index, index_dimensions);
188 : }
189 : return collapsed_to_storage;
190 : } else {
191 : (void)index_dimensions;
192 :
193 : return cpp20::array<size_t, 1>{{0}};
194 : }
195 : }
196 :
197 : /// \brief Computes a 1-1 mapping from a storage_index to its canonical
198 : /// tensor_index
199 : ///
200 : /// \details
201 : /// When symmetries are present, not all unique tensor_indices can be retrieved
202 : /// from this map, as some tensor_indices will share the same canonical form.
203 : /// Otherwise, if no symmetries are present, each unique tensor_index is already
204 : /// in the canonical form, and one that is not shared by another tensor_index,
205 : /// so this would equivalently mean a 1-1 mapping from a storage_index to a
206 : /// tensor_index. This means that when no symmetries are present, all unique
207 : /// tensor_indices of a tensor can be retrieved from this map.
208 : ///
209 : /// \tparam Symm the Symmetry of the tensor
210 : /// \tparam NumIndComps the number of independent components in the tensor, i.e.
211 : /// components equivalent due to symmetry counted only once
212 : /// \tparam NumComps the total number of components in the tensor
213 : /// \param collapsed_to_storage a mapping from a collapsed_index to its
214 : /// storage_index, which is only 1-1 if there are no symmetries
215 : /// \param index_dimensions the dimensions of the tensor's indices
216 : /// \return a 1-1 mapping from a storage_index to its canonical tensor_index
217 : template <typename Symm, size_t NumIndComps, size_t NumComps>
218 : constexpr auto compute_storage_to_tensor(
219 : const cpp20::array<size_t, NumComps>& collapsed_to_storage,
220 : const cpp20::array<size_t, tmpl::size<Symm>::value>& index_dimensions) {
221 : if constexpr (tmpl::size<Symm>::value > 0) {
222 : constexpr size_t rank = tmpl::size<Symm>::value;
223 : cpp20::array<cpp20::array<size_t, rank>, NumIndComps> storage_to_tensor{};
224 : cpp20::array<size_t, rank> tensor_index =
225 : convert_to_cpp20_array(make_array<rank>(size_t{0}));
226 : for (const auto& current_storage_index : collapsed_to_storage) {
227 : storage_to_tensor[current_storage_index] = canonicalize_tensor_index(
228 : tensor_index, make_cpp20_array_from_list<Symm>());
229 : increment_tensor_index(tensor_index, index_dimensions);
230 : }
231 : return storage_to_tensor;
232 : } else {
233 : (void)collapsed_to_storage;
234 : (void)index_dimensions;
235 :
236 : return cpp20::array<cpp20::array<size_t, 1>, 1>{
237 : {cpp20::array<size_t, 1>{{0}}}};
238 : }
239 : }
240 :
241 : template <size_t NumIndComps, typename T, size_t NumComps>
242 : constexpr cpp20::array<size_t, NumIndComps> compute_multiplicity(
243 : const cpp20::array<T, NumComps>& collapsed_to_storage) {
244 : cpp20::array<size_t, NumIndComps> multiplicity =
245 : convert_to_cpp20_array(make_array<NumIndComps>(size_t{0}));
246 : for (const auto& current_storage_index : collapsed_to_storage) {
247 : ++multiplicity[current_storage_index];
248 : }
249 : return multiplicity;
250 : }
251 :
252 : template <size_t NumIndices>
253 : struct ComponentNameImpl {
254 : template <typename Structure, typename T>
255 : static std::string apply(
256 : const std::array<T, NumIndices>& tensor_index,
257 : const std::array<std::string, NumIndices>& axis_labels) {
258 : const size_t storage_index = Structure::get_storage_index(tensor_index);
259 : std::array<std::string, Structure::rank()> labels = axis_labels;
260 : constexpr auto index_dim = Structure::dims();
261 : for (size_t i = 0; i < Structure::rank(); ++i) {
262 : if (gsl::at(labels, i).length() == 0) {
263 : if (gsl::at(Structure::index_types(), i) == IndexType::Spacetime) {
264 : switch (gsl::at(index_dim, i)) {
265 : case 2:
266 : gsl::at(labels, i) = "tx";
267 : break;
268 : case 3:
269 : gsl::at(labels, i) = "txy";
270 : break;
271 : case 4:
272 : gsl::at(labels, i) = "txyz";
273 : break;
274 : default:
275 : ERROR("Tensor dim["
276 : << i
277 : << "] must be 1,2,3, or 4 for default axis_labels. "
278 : "Either pass a string or extend the function.");
279 : }
280 : } else {
281 : switch (gsl::at(index_dim, i)) {
282 : case 1:
283 : gsl::at(labels, i) = "x";
284 : break;
285 : case 2:
286 : gsl::at(labels, i) = "xy";
287 : break;
288 : case 3:
289 : gsl::at(labels, i) = "xyz";
290 : break;
291 : default:
292 : ERROR("Tensor dim["
293 : << i
294 : << "] must be 1,2, or 3 for default axis_labels. "
295 : "Either pass a string or extend the function.");
296 : }
297 : }
298 : } else {
299 : if (gsl::at(axis_labels, i).length() != gsl::at(index_dim, i)) {
300 : ERROR("Dimension mismatch: Tensor has dim = "
301 : << gsl::at(index_dim, i) << ", but you specified "
302 : << gsl::at(axis_labels, i).length() << " different labels in "
303 : << gsl::at(axis_labels, i));
304 : }
305 : }
306 : }
307 : // Create string labeling get_tensor_index
308 : std::stringstream ss;
309 : const auto canonical_tensor_index =
310 : Structure::get_canonical_tensor_index(storage_index);
311 : for (size_t r = 0; r < Structure::rank(); ++r) {
312 : ss << gsl::at(labels, r)[gsl::at(canonical_tensor_index, r)];
313 : }
314 : return ss.str();
315 : }
316 : };
317 :
318 : template <>
319 : struct ComponentNameImpl<0> {
320 : template <typename Structure, typename T>
321 : static std::string apply(const std::array<T, 0>& /*tensor_index*/,
322 : const std::array<std::string, 0>& /*axis_labels*/) {
323 : return "Scalar";
324 : }
325 : };
326 :
327 : /// \ingroup TensorGroup
328 : /// A lookup table between each tensor_index and storage_index
329 : ///
330 : /// 1. tensor_index: (a, b, c,...). There are Dim^rank tensor_index's
331 : /// 2. collapsed_index: a + Dim * (b + Dim * (c + ...)), there are Dim^rank
332 : /// unique collapsed indices and there is a 1-1 map between
333 : /// a tensor_index and a collapsed_index.
334 : /// 3. storage_index: index into the storage vector of the Tensor. This depends
335 : /// on symmetries of the tensor, rank, and dimensionality. If
336 : /// the Tensor has symmetries, tensor_indices that are
337 : /// equivalent due to symmetry will have the same
338 : /// storage_index and canonical form. This means that the
339 : /// mapping between tensor_indices and storage_indices is 1-1
340 : /// only if no symmetries are present, but there is a 1-1
341 : /// mapping between canonical tensor_indices and
342 : /// storage_indices, regardless of symmetry.
343 : /// \tparam Symm the symmetry of the Tensor
344 : /// \tparam Indices list of tensor_index's giving the dimensionality and frame
345 : /// of the index
346 : template <typename Symm, typename... Indices>
347 : struct Structure {
348 : static_assert(
349 : TensorMetafunctions::check_index_symmetry_v<Symm, Indices...>,
350 : "Cannot construct a Tensor with a symmetric pair that are not the same.");
351 : static_assert(tmpl::size<Symm>::value == sizeof...(Indices),
352 : "The number of indices in Symmetry do not match the number of "
353 : "indices given to the Structure.");
354 : static_assert(
355 : tmpl2::flat_all_v<tt::is_tensor_index_type<Indices>::value...>,
356 : "All Indices passed to Structure must be of type TensorIndexType.");
357 :
358 : using index_list = tmpl::list<Indices...>;
359 : using symmetry = Symm;
360 :
361 : SPECTRE_ALWAYS_INLINE static constexpr size_t rank() {
362 : return sizeof...(Indices);
363 : }
364 :
365 : SPECTRE_ALWAYS_INLINE static constexpr size_t size() {
366 : constexpr auto number_of_independent_components =
367 : ::Tensor_detail::number_of_independent_components(
368 : make_array_from_list<
369 : tmpl::conditional_t<sizeof...(Indices) != 0, Symm, int>>(),
370 : make_array_from_list<tmpl::conditional_t<sizeof...(Indices) != 0,
371 : index_list, size_t>>());
372 : return number_of_independent_components;
373 : }
374 :
375 : SPECTRE_ALWAYS_INLINE static constexpr size_t number_of_components() {
376 : constexpr auto number_of_components = ::Tensor_detail::number_of_components(
377 : make_array_from_list<tmpl::conditional_t<sizeof...(Indices) != 0,
378 : index_list, size_t>>());
379 : return number_of_components;
380 : }
381 :
382 : /// A mapping between each collapsed_index and its storage_index. See
383 : /// compute_collapsed_to_storage for details.
384 : static constexpr auto collapsed_to_storage_ =
385 : compute_collapsed_to_storage<Symm, number_of_components()>(
386 : make_cpp20_array_from_list<tmpl::conditional_t<
387 : sizeof...(Indices) == 0, size_t, index_list>>());
388 : /// A 1-1 mapping between each storage_index and its canonical tensor_index.
389 : /// See compute_storage_to_tensor for details.
390 : static constexpr auto storage_to_tensor_ = compute_storage_to_tensor<Symm,
391 : size()>(
392 : collapsed_to_storage_,
393 : make_cpp20_array_from_list<
394 : tmpl::conditional_t<sizeof...(Indices) == 0, size_t, index_list>>());
395 : static constexpr auto multiplicity_ =
396 : compute_multiplicity<size()>(collapsed_to_storage_);
397 :
398 : // Retrieves the dimensionality of the I'th index
399 : template <int I>
400 : SPECTRE_ALWAYS_INLINE static constexpr size_t dim() {
401 : static_assert(sizeof...(Indices),
402 : "A scalar does not have any indices from which you can "
403 : "retrieve the dimensionality.");
404 : return tmpl::at<index_list, tmpl::int32_t<I>>::value;
405 : }
406 :
407 : SPECTRE_ALWAYS_INLINE static constexpr std::array<size_t, sizeof...(Indices)>
408 : dims() {
409 : constexpr auto dims = make_array_from_list<
410 : tmpl::conditional_t<sizeof...(Indices) != 0, index_list, size_t>>();
411 : return dims;
412 : }
413 :
414 : SPECTRE_ALWAYS_INLINE static constexpr std::array<int, sizeof...(Indices)>
415 : symmetries() {
416 : return make_array_from_list<
417 : tmpl::conditional_t<0 != sizeof...(Indices), Symm, int>>();
418 : }
419 :
420 : SPECTRE_ALWAYS_INLINE static constexpr std::array<IndexType,
421 : sizeof...(Indices)>
422 : index_types() {
423 : return std::array<IndexType, sizeof...(Indices)>{{Indices::index_type...}};
424 : }
425 :
426 : /// Return array of the valence of each index
427 : SPECTRE_ALWAYS_INLINE static constexpr std::array<UpLo, sizeof...(Indices)>
428 : index_valences() {
429 : return std::array<UpLo, sizeof...(Indices)>{{Indices::ul...}};
430 : }
431 :
432 : /// Return array of the frame of each index
433 : SPECTRE_ALWAYS_INLINE static constexpr auto index_frames() {
434 : return std::tuple<typename Indices::Frame...>{};
435 : }
436 :
437 : /// \brief Get the canonical tensor_index array of a storage_index
438 : ///
439 : /// \details
440 : /// For a symmetric tensor \f$T_{(ab)}\f$ with an associated symmetry list
441 : /// `Symmetry<1, 1>`, this will return, e.g. `{{3, 2}}` rather than `{{2, 3}}`
442 : /// for that particular index. Note that the canonical ordering is
443 : /// implementation-defined.
444 : ///
445 : /// As `storage_to_tensor_` is a computed 1-1 mapping between a storage_index
446 : /// and canonical tensor_index, we simply retrieve the canonical tensor_index
447 : /// from this map.
448 : ///
449 : /// \param storage_index the storage_index of which to get the canonical
450 : /// tensor_index
451 : /// \return the canonical tensor_index array of a storage_index
452 : template <size_t Rank = sizeof...(Indices)>
453 : SPECTRE_ALWAYS_INLINE static constexpr std::array<size_t, Rank>
454 : get_canonical_tensor_index(const size_t storage_index) {
455 : if constexpr (Rank != 0) {
456 : constexpr auto storage_to_tensor = storage_to_tensor_;
457 : return gsl::at(storage_to_tensor, storage_index);
458 : } else {
459 : (void)storage_index;
460 :
461 : return std::array<size_t, 0>{};
462 : }
463 : }
464 :
465 : /// \brief Get the storage_index of a tensor_index
466 : ///
467 : /// \details
468 : /// This first computes the collapsed_index of the given tensor_index (this is
469 : /// a 1-1 mapping), then retrieves the storage_index from
470 : /// collapsed_to_storage_.
471 : ///
472 : /// \param args comma separated list of the tensor_index of which to get the
473 : /// storage_index
474 : /// \return the storage_index of a tensor_index
475 : template <typename... N>
476 : SPECTRE_ALWAYS_INLINE static constexpr std::size_t get_storage_index(
477 : const N... args) {
478 : static_assert(sizeof...(Indices) == sizeof...(N),
479 : "the number arguments must be equal to rank_");
480 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
481 : return gsl::at(
482 : collapsed_to_storage,
483 : compute_collapsed_index(
484 : cpp20::array<size_t, sizeof...(N)>{{static_cast<size_t>(args)...}},
485 : make_cpp20_array_from_list<tmpl::conditional_t<
486 : 0 != sizeof...(Indices), index_list, size_t>>()));
487 : }
488 :
489 : /// \brief Get the storage_index of a tensor_index
490 : ///
491 : /// \details
492 : /// This first computes the collapsed_index of the given tensor_index (this is
493 : /// a 1-1 mapping), then retrieves the storage_index from
494 : /// collapsed_to_storage_.
495 : ///
496 : /// \param tensor_index the tensor_index of which to get the storage_index
497 : /// \return the storage_index of a tensor_index
498 : template <typename I>
499 : SPECTRE_ALWAYS_INLINE static constexpr std::size_t get_storage_index(
500 : const std::array<I, sizeof...(Indices)>& tensor_index) {
501 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
502 : return gsl::at(collapsed_to_storage,
503 : compute_collapsed_index(
504 : convert_to_cpp20_array(tensor_index),
505 : make_cpp20_array_from_list<tmpl::conditional_t<
506 : 0 != sizeof...(Indices), index_list, size_t>>()));
507 : }
508 :
509 : /// \brief Get the storage_index of a tensor_index
510 : ///
511 : /// \details
512 : /// This first computes the collapsed_index of the given tensor_index (this is
513 : /// a 1-1 mapping), then retrieves the storage_index from
514 : /// collapsed_to_storage_.
515 : ///
516 : /// \tparam N the comma separated list of the tensor_index of which to get the
517 : /// storage_index
518 : /// \return the storage_index of a tensor_index
519 : template <int... N, Requires<(sizeof...(N) > 0)> = nullptr>
520 : SPECTRE_ALWAYS_INLINE static constexpr std::size_t get_storage_index() {
521 : static_assert(sizeof...(Indices) == sizeof...(N),
522 : "the number arguments must be equal to rank_");
523 : constexpr std::size_t storage_index =
524 : collapsed_to_storage_[compute_collapsed_index(
525 : cpp20::array<size_t, sizeof...(N)>{{N...}},
526 : make_cpp20_array_from_list<index_list>())];
527 : return storage_index;
528 : }
529 :
530 : /// Get the multiplicity of the storage_index
531 : /// \param storage_index the storage_index of which to get the multiplicity
532 : SPECTRE_ALWAYS_INLINE static constexpr size_t multiplicity(
533 : const size_t storage_index) {
534 : constexpr auto multiplicity = multiplicity_;
535 : return gsl::at(multiplicity, storage_index);
536 : }
537 :
538 : /// Get the array of collapsed index to storage_index
539 : SPECTRE_ALWAYS_INLINE static constexpr std::array<size_t,
540 : number_of_components()>
541 : collapsed_to_storage() {
542 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
543 : return collapsed_to_storage;
544 : }
545 :
546 : /// Get the storage_index for the specified collapsed index
547 : SPECTRE_ALWAYS_INLINE static constexpr int collapsed_to_storage(
548 : const size_t i) {
549 : constexpr auto collapsed_to_storage = collapsed_to_storage_;
550 : return gsl::at(collapsed_to_storage, i);
551 : }
552 :
553 : /// Get the array of tensor_index's corresponding to the storage_index's.
554 : SPECTRE_ALWAYS_INLINE static constexpr const cpp20::array<
555 : cpp20::array<size_t, sizeof...(Indices) == 0 ? 1 : sizeof...(Indices)>,
556 : size()>
557 : storage_to_tensor_index() {
558 : constexpr auto storage_to_tensor = storage_to_tensor_;
559 : return storage_to_tensor;
560 : }
561 :
562 : template <typename T>
563 : SPECTRE_ALWAYS_INLINE static std::string component_name(
564 : const std::array<T, rank()>& tensor_index,
565 : const std::array<std::string, rank()>& axis_labels) {
566 : return ComponentNameImpl<sizeof...(Indices)>::template apply<Structure>(
567 : tensor_index, axis_labels);
568 : }
569 : };
570 : } // namespace Tensor_detail
|