Line data Source code
1 0 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : #pragma once
5 :
6 : #include <array>
7 : #include <cstddef>
8 : #include <limits>
9 : #include <optional>
10 : #include <pup.h>
11 : #include <string>
12 : #include <vector>
13 :
14 : #include "DataStructures/DataBox/ObservationBox.hpp"
15 : #include "DataStructures/DataBox/ValidateSelection.hpp"
16 : #include "DataStructures/DataVector.hpp"
17 : #include "DataStructures/Tags/TempTensor.hpp"
18 : #include "DataStructures/TempBuffer.hpp"
19 : #include "DataStructures/Tensor/Tensor.hpp"
20 : #include "Domain/Amr/Flag.hpp"
21 : #include "NumericalAlgorithms/Spectral/Mesh.hpp"
22 : #include "Options/Context.hpp"
23 : #include "Options/ParseError.hpp"
24 : #include "Options/String.hpp"
25 : #include "ParallelAlgorithms/Amr/Criteria/Criterion.hpp"
26 : #include "ParallelAlgorithms/Amr/Criteria/Type.hpp"
27 : #include "ParallelAlgorithms/Events/Tags.hpp"
28 : #include "Utilities/Algorithm.hpp"
29 : #include "Utilities/TMPL.hpp"
30 :
31 : /// \cond
32 : template <size_t>
33 : class ElementId;
34 : /// \endcond
35 :
36 1 : namespace amr::Criteria {
37 :
38 : namespace Constraints_detail {
39 :
40 : /*!
41 : * \brief Computes the (squared) normalization factor $N_\hat{k}^2$ (see
42 : * `logical_constraints` below)
43 : */
44 : template <size_t Dim>
45 : void normalization_factor_square(
46 : gsl::not_null<tnsr::i<DataVector, Dim, Frame::ElementLogical>*> result,
47 : const Jacobian<DataVector, Dim, Frame::ElementLogical, Frame::Inertial>&
48 : jacobian);
49 :
50 : /*!
51 : * \brief Computes a measure of the constraints in each logical direction of the
52 : * grid
53 : *
54 : * \requires `constraints_tensor` to be a tensor of rank 2 or higher. The first
55 : * index must be a lower spatial index that originates from a derivative.
56 : *
57 : * We follow \cite Szilagyi2014fna, Eq. (62)-(64) to compute:
58 : *
59 : * \begin{align}
60 : * \Epsilon_\hat{k} &= \frac{1}{N_\hat{k}} \sqrt{\sum_{a\ldots} \left(\sum_{i}
61 : * \frac{\partial x^i}{\partial x^\hat{k}} C_{ia\ldots}\right)^2} \\
62 : * N_\hat{k} &= \sqrt{\sum_{i} \left(\frac{\partial x^i}{\partial x^\hat{k}}
63 : * \right)^2}
64 : * \end{align}
65 : *
66 : * This transform the first lower spatial index of the tensor to the
67 : * element-logical frame, then takes an L2 norm over all remaining indices.
68 : * The (squared) normalization factor $N_\hat{k}^2$ is computed by
69 : * `normalization_factor` and passed in as an argument.
70 : */
71 : template <size_t Dim, typename TensorType>
72 : void logical_constraints(
73 : gsl::not_null<tnsr::i<DataVector, Dim, Frame::ElementLogical>*> result,
74 : gsl::not_null<DataVector*> buffer, const TensorType& constraints_tensor,
75 : const Jacobian<DataVector, Dim, Frame::ElementLogical, Frame::Inertial>&
76 : jacobian,
77 : const tnsr::i<DataVector, Dim, Frame::ElementLogical>&
78 : normalization_factor_square);
79 :
80 : /*!
81 : * \brief Apply the AMR criterion to one of the constraints
82 : *
83 : * The `result` is the current decision in each dimension based on the previous
84 : * constraints. This function will update the flags if necessary. It takes
85 : * the "max" of the current and new flags, where the "highest" flag is
86 : * `Flag::IncreaseResolution`, followed by `Flag::DoNothing`, and then
87 : * `Flag::DecreaseResolution`.
88 : */
89 : template <size_t Dim>
90 : void max_over_components(
91 : gsl::not_null<std::array<Flag, Dim>*> result,
92 : const tnsr::i<DataVector, Dim, Frame::ElementLogical>& logical_constraints,
93 : double abs_target, double coarsening_factor);
94 :
95 : } // namespace Constraints_detail
96 :
97 : /*!
98 : * \brief Refine the grid towards the target constraint violation
99 : *
100 : * - If any constraint is above the target value, the element will be p-refined.
101 : * - If all constraints are below the target times the "coarsening factor" the
102 : * element will be p-coarsened.
103 : *
104 : * This criterion is based on Sec. 6.1.4 in \cite Szilagyi2014fna .
105 : *
106 : * If the coarsening factor turns out to be hard to choose, then we can try to
107 : * eliminate it by projecting the variables to a lower polynomial order before
108 : * computing constraints, or something like that.
109 : *
110 : * \tparam Dim Spatial dimension of the grid
111 : * \tparam TensorTags List of tags of the constraints to be monitored. These
112 : * must be tensors of rank 2 or higher. The first index must be a lower spatial
113 : * index that originates from a derivative.
114 : */
115 : template <size_t Dim, typename TensorTags>
116 1 : class Constraints : public Criterion {
117 : public:
118 0 : struct ConstraintsToMonitor {
119 0 : using type = std::vector<std::string>;
120 0 : static constexpr Options::String help = {"The constraints to monitor."};
121 0 : static size_t lower_bound_on_size() { return 1; }
122 : };
123 0 : struct AbsoluteTarget {
124 0 : using type = double;
125 0 : static constexpr Options::String help = {
126 : "The absolute target constraint violation. If any constraint is above "
127 : "this value, the element will be p-refined."};
128 0 : static double lower_bound() { return 0.; }
129 : };
130 0 : struct CoarseningFactor {
131 0 : using type = double;
132 0 : static constexpr Options::String help = {
133 : "If all constraints are below the 'AbsoluteTarget' times this factor, "
134 : "the element will be p-coarsened. "
135 : "A reasonable value is 0.1."};
136 0 : static double lower_bound() { return 0.; }
137 0 : static double upper_bound() { return 1.; }
138 : };
139 :
140 0 : using options =
141 : tmpl::list<ConstraintsToMonitor, AbsoluteTarget, CoarseningFactor>;
142 :
143 0 : static constexpr Options::String help = {
144 : "Refine the grid towards the target constraint violation"};
145 :
146 0 : Constraints() = default;
147 :
148 0 : Constraints(std::vector<std::string> vars_to_monitor, double abs_target,
149 : double coarsening_factor, const Options::Context& context = {});
150 :
151 : /// \cond
152 : explicit Constraints(CkMigrateMessage* msg);
153 : using PUP::able::register_constructor;
154 : WRAPPED_PUPable_decl_template(Constraints); // NOLINT
155 : /// \endcond
156 :
157 0 : using compute_tags_for_observation_box = tmpl::append<
158 : TensorTags, tmpl::list<domain::Tags::JacobianCompute<
159 : Dim, Frame::ElementLogical, Frame::Inertial>,
160 : Events::Tags::ObserverJacobianCompute<
161 : Dim, Frame::ElementLogical, Frame::Inertial>>>;
162 :
163 0 : Type type() override { return Type::p; }
164 :
165 0 : std::string observation_name() override { return "Constraints"; }
166 :
167 0 : using argument_tags = tmpl::list<::Tags::ObservationBox>;
168 :
169 : template <typename ComputeTagsList, typename DataBoxType,
170 : typename Metavariables>
171 0 : std::array<Flag, Dim> operator()(
172 : const ObservationBox<ComputeTagsList, DataBoxType>& box,
173 : Parallel::GlobalCache<Metavariables>& cache,
174 : const ElementId<Dim>& element_id) const;
175 :
176 0 : void pup(PUP::er& p) override;
177 :
178 : private:
179 0 : std::vector<std::string> vars_to_monitor_{};
180 0 : double abs_target_ = std::numeric_limits<double>::signaling_NaN();
181 0 : double coarsening_factor_ = std::numeric_limits<double>::signaling_NaN();
182 : };
183 :
184 : // Out-of-line definitions
185 : /// \cond
186 :
187 : template <size_t Dim, typename TensorTags>
188 : Constraints<Dim, TensorTags>::Constraints(
189 : std::vector<std::string> vars_to_monitor, const double abs_target,
190 : const double coarsening_factor, const Options::Context& context)
191 : : vars_to_monitor_(std::move(vars_to_monitor)),
192 : abs_target_(abs_target),
193 : coarsening_factor_(coarsening_factor) {
194 : db::validate_selection<TensorTags>(vars_to_monitor_, context);
195 : }
196 :
197 : template <size_t Dim, typename TensorTags>
198 : Constraints<Dim, TensorTags>::Constraints(CkMigrateMessage* msg)
199 : : Criterion(msg) {}
200 :
201 : template <size_t Dim, typename TensorTags>
202 : template <typename ComputeTagsList, typename DataBoxType,
203 : typename Metavariables>
204 : std::array<Flag, Dim> Constraints<Dim, TensorTags>::operator()(
205 : const ObservationBox<ComputeTagsList, DataBoxType>& box,
206 : Parallel::GlobalCache<Metavariables>& /*cache*/,
207 : const ElementId<Dim>& /*element_id*/) const {
208 : auto result = make_array<Dim>(Flag::Undefined);
209 : const auto& jacobian =
210 : get<Events::Tags::ObserverJacobian<Dim, Frame::ElementLogical,
211 : Frame::Inertial>>(box);
212 : // Set up memory buffers
213 : const size_t num_points = jacobian.begin()->size();
214 : TempBuffer<tmpl::list<::Tags::Tempi<0, Dim, Frame::ElementLogical>,
215 : ::Tags::Tempi<1, Dim, Frame::ElementLogical>,
216 : ::Tags::TempScalar<2>>>
217 : buffer{num_points};
218 : auto& normalization_factor_square =
219 : get<::Tags::Tempi<0, Dim, Frame::ElementLogical>>(buffer);
220 : auto& logical_constraints =
221 : get<::Tags::Tempi<1, Dim, Frame::ElementLogical>>(buffer);
222 : auto& scalar_buffer = get(get<::Tags::TempScalar<2>>(buffer));
223 : Constraints_detail::normalization_factor_square(
224 : make_not_null(&normalization_factor_square), jacobian);
225 : // Check all constraints in turn
226 : tmpl::for_each<TensorTags>([&result, &box, &jacobian,
227 : &normalization_factor_square,
228 : &logical_constraints, &scalar_buffer,
229 : this](const auto tag_v) {
230 : // Stop if we have already decided to refine every dimension
231 : if (result == make_array<Dim>(Flag::IncreaseResolution)) {
232 : return;
233 : }
234 : using tag = tmpl::type_from<std::decay_t<decltype(tag_v)>>;
235 : const std::string tag_name = db::tag_name<tag>();
236 : // Skip if this tensor is not being monitored
237 : if (not alg::found(vars_to_monitor_, tag_name)) {
238 : return;
239 : }
240 : Constraints_detail::logical_constraints(
241 : make_not_null(&logical_constraints), make_not_null(&scalar_buffer),
242 : get<tag>(box), jacobian, normalization_factor_square);
243 : Constraints_detail::max_over_components(make_not_null(&result),
244 : logical_constraints, abs_target_,
245 : coarsening_factor_);
246 : });
247 : return result;
248 : }
249 :
250 : template <size_t Dim, typename TensorTags>
251 : void Constraints<Dim, TensorTags>::pup(PUP::er& p) {
252 : p | vars_to_monitor_;
253 : p | abs_target_;
254 : p | coarsening_factor_;
255 : }
256 :
257 : template <size_t Dim, typename TensorTags>
258 : PUP::able::PUP_ID Constraints<Dim, TensorTags>::my_PUP_ID = 0; // NOLINT
259 : /// \endcond
260 :
261 : } // namespace amr::Criteria
|