Line data Source code
1 0 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : #pragma once
5 :
6 : #include "NumericalAlgorithms/SphericalHarmonics/TensorYlm.hpp"
7 :
8 : #include <cstddef>
9 :
10 : #include "Utilities/Gsl.hpp"
11 :
12 : namespace ylm::TensorYlm {
13 :
14 : /*!
15 : * \brief Fills a sparse matrix that does a TensorYlm Cartesian
16 : * to Spherical operation.
17 : *
18 : * If the CoefficientNormalization parameter is set to Standard, then
19 : * assumes that the input, $T^{\tilde A}_{\ell' m'}$, is stored in a
20 : * Tensor<DataVector>. Multiplying the resulting
21 : * sparse matrix by the Tensor<DataVector> is equivalent to
22 : * evaluating the right-hand side of Eq. $(\ref{eq:C2S})$.
23 : * If the CoefficientNormalization parameter is set to Spherepack, then
24 : * assumes the matrix will multiply a Tensor<DataVector> containing
25 : * ${\breve T}^{\tilde A}_{\ell' m'}$ and the transform operation is
26 : * equivalent to Eq. $(\ref{eq:C2SSpherepack})$.
27 : *
28 : * We assume that the independent components of the Tensor<DataVector>
29 : * are stored contiguously in memory, in order of the `storage_index` of
30 : * the Tensor. However, we do allow for a stride. This means that we
31 : * can point to memory starting with the first element of
32 : * $T^{\tilde A}_{\ell' m'}$ and multiply that by the sparse matrix we compute
33 : * here, and get the result.
34 : *
35 : * The memory layout here is different than in SpEC. In SpEC, each
36 : * tensor component is stored in separately-allocated memory, so the
37 : * SpEC equivalent of the fill_cart_to_sphere function fills $N^2$ sparse
38 : * matrices, where $N$ is the number of independent components of the
39 : * Tensor. The advantage of the SpEC method is that each sparse matrix
40 : * is smaller, so sorting elements into the correct order while
41 : * constructing each sparse matrix is faster (sorting is > linear in
42 : * the number of matrix elements). The disadvantage of the SpEC
43 : * method is that evaluating the coefficients for a single Tensor involves
44 : * $N^2$ matrix-vector multiplications, whereas here evaluating the
45 : * coefficients involves only one matrix-vector multiplication, which should
46 : * have more efficient memory access. It is not clear which method is
47 : * faster overall without more profiling.
48 : *
49 : * ## Explicit formulas
50 : *
51 : * The following formulas come from Klinger and Scheel, in prep.
52 : *
53 : * For rank-1 tensors, the expression for
54 : * $C_{\ell' m'\tilde{D}}^{\ell'' m'' B}$ is
55 : * \begin{align}
56 : * C_{\ell' m'\tilde{D}}^{\ell'' m'' B} &=
57 : * \sqrt{\frac{(2\ell'+1)(2\ell''+1)}{2}}
58 : * (-1)^{m''} (-1)^{\delta(\tilde{D},\mathbf{e}_y)} (-1)^{\delta(s_B,-1)}
59 : * \left(\begin{array}{ccc}
60 : * \ell' & \ell'' & 1 \cr
61 : * s_B & 0 & -s_B
62 : * \end{array}\right)
63 : * \nonumber \\
64 : * &\times \sum_j k_j(\tilde{D})
65 : * \left(\begin{array}{ccc}
66 : * \ell' & \ell'' & 1 \cr
67 : * -m' & m'' & -m_j(\tilde{D})
68 : * \end{array}\right),
69 : * \end{align}
70 : * where the 6-element "matrices"
71 : * in parentheses are Wigner 3-J symbols.
72 : *
73 : * For second-rank tensors, the expression for
74 : * $C_{\ell' m'\tilde{D}}^{\ell'' m'' B}$ is
75 : * \begin{align}
76 : * C_{\ell' m'\tilde{D}}^{\ell'' m'' B}
77 : * &=
78 : * \frac{1}{2}(-1)^{\delta(s_{B_1},-1)}(-1)^{\delta(s_{B_2},-1)}(-1)^{m'}
79 : * (-1)^{\delta(\tilde{D}_1,\mathbf{e}_y)}
80 : * (-1)^{\delta(\tilde{D}_2,\mathbf{e}_y)}
81 : * \sqrt{(2\ell''+1)(2\ell'+1)}
82 : * \nonumber \\
83 : * &\times
84 : * \sum_{u,v,\tilde{\ell},\tilde{m},\tilde{s}}
85 : * (2 \tilde{\ell}+1) (-1)^{\tilde{s}} S(\tilde{\ell},\tilde{D})
86 : * k_u(\tilde{D}_1) k_v(\tilde{D}_2)
87 : * \left(\begin{array}{ccc}
88 : * \ell' & \ell'' & \tilde{\ell} \cr
89 : * -m' & m'' & \tilde{m}
90 : * \end{array}\right)
91 : * \nonumber \\
92 : * &\qquad\times
93 : * \left(\begin{array}{ccc}
94 : * \ell' & \ell'' & \tilde{\ell} \cr
95 : * s_{B_1}+s_{B_2} & 0 & -\tilde{s}
96 : * \end{array}\right)
97 : * \left(\begin{array}{ccc}
98 : * 1 & 1 & \tilde{\ell} \cr
99 : * -m_u(\tilde{D}_1)&-m_v(\tilde{D}_2)&-\tilde{m}
100 : * \end{array}\right)
101 : * \left(\begin{array}{ccc}
102 : * 1 & 1 & \tilde{\ell} \cr
103 : * -s_{B_1}&-s_{B_2}&\tilde{s}
104 : * \end{array}\right),
105 : * \end{align}
106 : * where the factor $S(\tilde{\ell},\tilde{D})$ is a symmetry factor.
107 : * For a 2nd-rank tensor with no symmetries, $S(\tilde{\ell},\tilde{D})$ is
108 : * unity, but for a tensor symmetric in $(\tilde{D_1},\tilde{D_2})$
109 : * the matrix elements $C_{\ell' m'\tilde{D}}^{\ell'' m'' B}$ are
110 : * multiplied only by tensor components with $\tilde{D_1}\geq\tilde{D_2}$
111 : * and the symmetry is accounted for by setting
112 : * \begin{align}
113 : * S(\tilde{\ell},\tilde{D}) &=
114 : * (1+(-1)^{\tilde{\ell}})\frac{2-\delta(\tilde{D_1},\tilde{D_2})}{2}.
115 : * \end{align}
116 : *
117 : * For third-rank tensors, the expression for
118 : * $C_{\ell' m'\tilde{D}}^{\ell'' m'' B}$ is
119 : * \begin{align}
120 : * C_{\ell' m'\tilde{D}}^{\ell'' m'' B}
121 : * &=
122 : * (-1)^{m'}
123 : * (-1)^{\delta(s_{B_1},-1)}
124 : * (-1)^{\delta(s_{B_2},-1)}
125 : * (-1)^{\delta(s_{B_3},-1)}
126 : * (-1)^{\delta(\tilde{D}_1,\mathbf{e}_y)}
127 : * (-1)^{\delta(\tilde{D}_2,\mathbf{e}_y)}
128 : * (-1)^{\delta(\tilde{D}_3,\mathbf{e}_y)}
129 : * \nonumber \\
130 : * &\times
131 : * \sqrt{\frac{(2\ell''+1)(2\ell'+1)}{8}}
132 : * \sum_{u,v,w,\tilde{\ell},\tilde{m},\tilde{s},\hat{\ell},\hat{m},\hat{s}}
133 : * (2 \tilde{\ell}+1) (2 \hat{\ell}+1) (-1)^{\tilde{s}+\hat{s}+\tilde{m}}
134 : * k_u(\tilde{D}_2) k_v(\tilde{D}_3) k_w(\tilde{D}_1)
135 : * \nonumber \\
136 : * &\qquad\times
137 : * S(\tilde{\ell},\tilde{D})
138 : * \left(\begin{array}{ccc}
139 : * \ell' & \ell'' & \hat{\ell} \cr
140 : * -m' & m'' & \hat{m}
141 : * \end{array}\right)
142 : * \left(\begin{array}{ccc}
143 : * \ell' & \ell'' & \hat{\ell} \cr
144 : * s_{B_1}+s_{B_2}+s_{B_3} & 0 & -\hat{s}
145 : * \end{array}\right)
146 : * \nonumber \\
147 : * &\qquad\times
148 : * \left(\begin{array}{ccc}
149 : * 1 & 1 & \tilde{\ell} \cr
150 : * -m_u(\tilde{D}_2)&-m_v(\tilde{D}_3)&-\tilde{m}
151 : * \end{array}\right)
152 : * \left(\begin{array}{ccc}
153 : * 1 & 1 & \tilde{\ell} \cr
154 : * -s_{B_2}&-s_{B_3}&\tilde{s}
155 : * \end{array}\right)
156 : * \nonumber \\
157 : * &\qquad\times
158 : * \left(\begin{array}{ccc}
159 : * 1 & \tilde{\ell} & \hat{\ell} \cr
160 : * -m_w(\tilde{D}_1)&\tilde{m}&-\hat{m}
161 : * \end{array}\right)
162 : * \left(\begin{array}{ccc}
163 : * 1 & \tilde{\ell} & \hat{\ell} \cr
164 : * -s_{B_1}&-\tilde{s}&\hat{s}
165 : * \end{array}\right).
166 : * \end{align}
167 : * As in the rank-2 case,
168 : * $S(\tilde{\ell},\tilde{D})$ is a symmetry factor that is unity
169 : * for a tensor with no symmetries and is
170 : * \begin{align}
171 : * S(\tilde{\ell},\tilde{D}) &=
172 : * (1+(-1)^{\tilde{\ell}})\frac{2-\delta(\tilde{D_2},\tilde{D_3})}{2}
173 : * \end{align}
174 : * for a tensor symmetric on its last two indices. We don't consider
175 : * other symmetries because we don't find them in the cases we care
176 : * about.
177 : *
178 : * \tparam TensorStructure A Tensor_detail::Structure
179 : * \tparam SparseMatrixType A sparse matrix fillable by SparseMatrixFiller
180 : *
181 : * \param matrix The sparse matrix to fill
182 : * \param ell_max The maximum ylm ell value
183 : * \param coefficient_normalization Describes the normalization of coefficients.
184 : *
185 : */
186 : template <typename TensorStructure, typename SparseMatrixType>
187 1 : void fill_cart_to_sphere(gsl::not_null<SparseMatrixType*> matrix,
188 : size_t ell_max,
189 : CoefficientNormalization coefficient_normalization);
190 :
191 : } // namespace ylm::TensorYlm
|