Line data Source code
1 0 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : #pragma once
5 :
6 : #include <array>
7 : #include <cstddef>
8 : #include <limits>
9 : #include <optional>
10 : #include <pup.h>
11 : #include <string>
12 : #include <vector>
13 :
14 : #include "DataStructures/DataBox/ObservationBox.hpp"
15 : #include "DataStructures/DataBox/ValidateSelection.hpp"
16 : #include "DataStructures/DataVector.hpp"
17 : #include "DataStructures/Tags/TempTensor.hpp"
18 : #include "DataStructures/TempBuffer.hpp"
19 : #include "DataStructures/Tensor/Tensor.hpp"
20 : #include "Domain/Amr/Flag.hpp"
21 : #include "NumericalAlgorithms/Spectral/Mesh.hpp"
22 : #include "Options/Context.hpp"
23 : #include "Options/ParseError.hpp"
24 : #include "Options/String.hpp"
25 : #include "ParallelAlgorithms/Amr/Criteria/Criterion.hpp"
26 : #include "ParallelAlgorithms/Events/Tags.hpp"
27 : #include "Utilities/Algorithm.hpp"
28 : #include "Utilities/TMPL.hpp"
29 :
30 : /// \cond
31 : template <size_t>
32 : class ElementId;
33 : /// \endcond
34 :
35 1 : namespace amr::Criteria {
36 :
37 : namespace Constraints_detail {
38 :
39 : /*!
40 : * \brief Computes the (squared) normalization factor $N_\hat{k}^2$ (see
41 : * `logical_constraints` below)
42 : */
43 : template <size_t Dim>
44 : void normalization_factor_square(
45 : gsl::not_null<tnsr::i<DataVector, Dim, Frame::ElementLogical>*> result,
46 : const Jacobian<DataVector, Dim, Frame::ElementLogical, Frame::Inertial>&
47 : jacobian);
48 :
49 : /*!
50 : * \brief Computes a measure of the constraints in each logical direction of the
51 : * grid
52 : *
53 : * \requires `constraints_tensor` to be a tensor of rank 2 or higher. The first
54 : * index must be a lower spatial index that originates from a derivative.
55 : *
56 : * We follow \cite Szilagyi2014fna, Eq. (62)-(64) to compute:
57 : *
58 : * \begin{align}
59 : * \Epsilon_\hat{k} &= \frac{1}{N_\hat{k}} \sqrt{\sum_{a\ldots} \left(\sum_{i}
60 : * \frac{\partial x^i}{\partial x^\hat{k}} C_{ia\ldots}\right)^2} \\
61 : * N_\hat{k} &= \sqrt{\sum_{i} \left(\frac{\partial x^i}{\partial x^\hat{k}}
62 : * \right)^2}
63 : * \end{align}
64 : *
65 : * This transform the first lower spatial index of the tensor to the
66 : * element-logical frame, then takes an L2 norm over all remaining indices.
67 : * The (squared) normalization factor $N_\hat{k}^2$ is computed by
68 : * `normalization_factor` and passed in as an argument.
69 : */
70 : template <size_t Dim, typename TensorType>
71 : void logical_constraints(
72 : gsl::not_null<tnsr::i<DataVector, Dim, Frame::ElementLogical>*> result,
73 : gsl::not_null<DataVector*> buffer, const TensorType& constraints_tensor,
74 : const Jacobian<DataVector, Dim, Frame::ElementLogical, Frame::Inertial>&
75 : jacobian,
76 : const tnsr::i<DataVector, Dim, Frame::ElementLogical>&
77 : normalization_factor_square);
78 :
79 : /*!
80 : * \brief Apply the AMR criterion to one of the constraints
81 : *
82 : * The `result` is the current decision in each dimension based on the previous
83 : * constraints. This function will update the flags if necessary. It takes
84 : * the "max" of the current and new flags, where the "highest" flag is
85 : * `Flag::IncreaseResolution`, followed by `Flag::DoNothing`, and then
86 : * `Flag::DecreaseResolution`.
87 : */
88 : template <size_t Dim>
89 : void max_over_components(
90 : gsl::not_null<std::array<Flag, Dim>*> result,
91 : const tnsr::i<DataVector, Dim, Frame::ElementLogical>& logical_constraints,
92 : double abs_target, double coarsening_factor);
93 :
94 : } // namespace Constraints_detail
95 :
96 : /*!
97 : * \brief Refine the grid towards the target constraint violation
98 : *
99 : * - If any constraint is above the target value, the element will be p-refined.
100 : * - If all constraints are below the target times the "coarsening factor" the
101 : * element will be p-coarsened.
102 : *
103 : * This criterion is based on Sec. 6.1.4 in \cite Szilagyi2014fna .
104 : *
105 : * If the coarsening factor turns out to be hard to choose, then we can try to
106 : * eliminate it by projecting the variables to a lower polynomial order before
107 : * computing constraints, or something like that.
108 : *
109 : * \tparam Dim Spatial dimension of the grid
110 : * \tparam TensorTags List of tags of the constraints to be monitored. These
111 : * must be tensors of rank 2 or higher. The first index must be a lower spatial
112 : * index that originates from a derivative.
113 : */
114 : template <size_t Dim, typename TensorTags>
115 1 : class Constraints : public Criterion {
116 : public:
117 0 : struct ConstraintsToMonitor {
118 0 : using type = std::vector<std::string>;
119 0 : static constexpr Options::String help = {"The constraints to monitor."};
120 0 : static size_t lower_bound_on_size() { return 1; }
121 : };
122 0 : struct AbsoluteTarget {
123 0 : using type = double;
124 0 : static constexpr Options::String help = {
125 : "The absolute target constraint violation. If any constraint is above "
126 : "this value, the element will be p-refined."};
127 0 : static double lower_bound() { return 0.; }
128 : };
129 0 : struct CoarseningFactor {
130 0 : using type = double;
131 0 : static constexpr Options::String help = {
132 : "If all constraints are below the 'AbsoluteTarget' times this factor, "
133 : "the element will be p-coarsened. "
134 : "A reasonable value is 0.1."};
135 0 : static double lower_bound() { return 0.; }
136 0 : static double upper_bound() { return 1.; }
137 : };
138 :
139 0 : using options =
140 : tmpl::list<ConstraintsToMonitor, AbsoluteTarget, CoarseningFactor>;
141 :
142 0 : static constexpr Options::String help = {
143 : "Refine the grid towards the target constraint violation"};
144 :
145 0 : Constraints() = default;
146 :
147 0 : Constraints(std::vector<std::string> vars_to_monitor, double abs_target,
148 : double coarsening_factor, const Options::Context& context = {});
149 :
150 : /// \cond
151 : explicit Constraints(CkMigrateMessage* msg);
152 : using PUP::able::register_constructor;
153 : WRAPPED_PUPable_decl_template(Constraints); // NOLINT
154 : /// \endcond
155 :
156 0 : using compute_tags_for_observation_box = tmpl::append<
157 : TensorTags, tmpl::list<domain::Tags::JacobianCompute<
158 : Dim, Frame::ElementLogical, Frame::Inertial>,
159 : Events::Tags::ObserverJacobianCompute<
160 : Dim, Frame::ElementLogical, Frame::Inertial>>>;
161 :
162 0 : std::string observation_name() override { return "Constraints"; }
163 :
164 0 : using argument_tags = tmpl::list<::Tags::ObservationBox>;
165 :
166 : template <typename ComputeTagsList, typename DataBoxType,
167 : typename Metavariables>
168 0 : std::array<Flag, Dim> operator()(
169 : const ObservationBox<ComputeTagsList, DataBoxType>& box,
170 : Parallel::GlobalCache<Metavariables>& cache,
171 : const ElementId<Dim>& element_id) const;
172 :
173 0 : void pup(PUP::er& p) override;
174 :
175 : private:
176 0 : std::vector<std::string> vars_to_monitor_{};
177 0 : double abs_target_ = std::numeric_limits<double>::signaling_NaN();
178 0 : double coarsening_factor_ = std::numeric_limits<double>::signaling_NaN();
179 : };
180 :
181 : // Out-of-line definitions
182 : /// \cond
183 :
184 : template <size_t Dim, typename TensorTags>
185 : Constraints<Dim, TensorTags>::Constraints(
186 : std::vector<std::string> vars_to_monitor, const double abs_target,
187 : const double coarsening_factor, const Options::Context& context)
188 : : vars_to_monitor_(std::move(vars_to_monitor)),
189 : abs_target_(abs_target),
190 : coarsening_factor_(coarsening_factor) {
191 : db::validate_selection<TensorTags>(vars_to_monitor_, context);
192 : }
193 :
194 : template <size_t Dim, typename TensorTags>
195 : Constraints<Dim, TensorTags>::Constraints(CkMigrateMessage* msg)
196 : : Criterion(msg) {}
197 :
198 : template <size_t Dim, typename TensorTags>
199 : template <typename ComputeTagsList, typename DataBoxType,
200 : typename Metavariables>
201 : std::array<Flag, Dim> Constraints<Dim, TensorTags>::operator()(
202 : const ObservationBox<ComputeTagsList, DataBoxType>& box,
203 : Parallel::GlobalCache<Metavariables>& /*cache*/,
204 : const ElementId<Dim>& /*element_id*/) const {
205 : auto result = make_array<Dim>(Flag::Undefined);
206 : const auto& jacobian =
207 : get<Events::Tags::ObserverJacobian<Dim, Frame::ElementLogical,
208 : Frame::Inertial>>(box);
209 : // Set up memory buffers
210 : const size_t num_points = jacobian.begin()->size();
211 : TempBuffer<tmpl::list<::Tags::Tempi<0, Dim, Frame::ElementLogical>,
212 : ::Tags::Tempi<1, Dim, Frame::ElementLogical>,
213 : ::Tags::TempScalar<2>>>
214 : buffer{num_points};
215 : auto& normalization_factor_square =
216 : get<::Tags::Tempi<0, Dim, Frame::ElementLogical>>(buffer);
217 : auto& logical_constraints =
218 : get<::Tags::Tempi<1, Dim, Frame::ElementLogical>>(buffer);
219 : auto& scalar_buffer = get(get<::Tags::TempScalar<2>>(buffer));
220 : Constraints_detail::normalization_factor_square(
221 : make_not_null(&normalization_factor_square), jacobian);
222 : // Check all constraints in turn
223 : tmpl::for_each<TensorTags>([&result, &box, &jacobian,
224 : &normalization_factor_square,
225 : &logical_constraints, &scalar_buffer,
226 : this](const auto tag_v) {
227 : // Stop if we have already decided to refine every dimension
228 : if (result == make_array<Dim>(Flag::IncreaseResolution)) {
229 : return;
230 : }
231 : using tag = tmpl::type_from<std::decay_t<decltype(tag_v)>>;
232 : const std::string tag_name = db::tag_name<tag>();
233 : // Skip if this tensor is not being monitored
234 : if (not alg::found(vars_to_monitor_, tag_name)) {
235 : return;
236 : }
237 : Constraints_detail::logical_constraints(
238 : make_not_null(&logical_constraints), make_not_null(&scalar_buffer),
239 : get<tag>(box), jacobian, normalization_factor_square);
240 : Constraints_detail::max_over_components(make_not_null(&result),
241 : logical_constraints, abs_target_,
242 : coarsening_factor_);
243 : });
244 : return result;
245 : }
246 :
247 : template <size_t Dim, typename TensorTags>
248 : void Constraints<Dim, TensorTags>::pup(PUP::er& p) {
249 : p | vars_to_monitor_;
250 : p | abs_target_;
251 : p | coarsening_factor_;
252 : }
253 :
254 : template <size_t Dim, typename TensorTags>
255 : PUP::able::PUP_ID Constraints<Dim, TensorTags>::my_PUP_ID = 0; // NOLINT
256 : /// \endcond
257 :
258 : } // namespace amr::Criteria
|