ObserveNorms.hpp
1 // Distributed under the MIT License.
2 // See LICENSE.txt for details.
3 
4 #pragma once
5 
6 #include <cstddef>
7 #include <limits>
8 #include <pup.h>
9 #include <string>
10 #include <unordered_map>
11 #include <utility>
12 #include <vector>
13 
15 #include "DataStructures/DataBox/TagName.hpp"
16 #include "DataStructures/DataVector.hpp"
18 #include "IO/Observer/Helpers.hpp"
19 #include "IO/Observer/ObservationId.hpp"
20 #include "IO/Observer/ObserverComponent.hpp"
21 #include "IO/Observer/ReductionActions.hpp"
22 #include "IO/Observer/TypeOfObservation.hpp"
23 #include "Options/Options.hpp"
24 #include "Parallel/ArrayIndex.hpp"
26 #include "Parallel/GlobalCache.hpp"
27 #include "Parallel/Invoke.hpp"
28 #include "Parallel/Reduction.hpp"
29 #include "ParallelAlgorithms/EventsAndTriggers/Event.hpp"
30 #include "Time/Tags.hpp"
33 #include "Utilities/Functional.hpp"
34 #include "Utilities/Numeric.hpp"
35 #include "Utilities/TMPL.hpp"
36 
37 namespace Events {
38 /// \cond
39 template <typename ObservableTensorTagsList>
40 class ObserveNorms;
41 /// \endcond
42 
43 /*!
44  * \brief Compute norms of tensors in the DataBox and write them to disk.
45  *
46  * The L2 norm is computed as the RMS, so
47  *
48  * \f{align*}{
49  * L_2(u)=\sqrt{\frac{1}{N}\sum_{i=0}^{N} u_i^2}
50  * \f}
51  *
52  * where \f$N\f$ is the number of grid points.
53  *
54  * The norm can be taken for each individual component, or summed over
55  * components. For the max/min it is then the max/min over all components, while
56  * for the L2 norm we have (for a 3d vector, 2d and 1d are similar)
57  *
58  * \f{align*}{
59  * L_2(v^k)=\sqrt{\frac{1}{N}\sum_{i=0}^{N} \left[(v^x_i)^2 + (v^y_i)^2
60  * + (v^z_i)^2\right]}
61  * \f}
62  *
63  * Here is an example of an input file:
64  *
65  * \snippet Test_ObserveNorms.cpp input_file_examples
66  */
67 template <typename... ObservableTensorTags>
68 class ObserveNorms<tmpl::list<ObservableTensorTags...>> : public Event {
69  private:
70  struct ObserveTensor {
71  static constexpr Options::String help = {
72  "The tensor to reduce, and how to reduce it."};
73 
74  struct Name {
75  using type = std::string;
76  static constexpr Options::String help = {
77  "The name of the tensor to observe."};
78  };
79  struct NormType {
80  using type = std::string;
81  static constexpr Options::String help = {
82  "The type of norm to use. Must be one of Max, Min, or L2Norm."};
83  };
84  struct Components {
85  using type = std::string;
86  static constexpr Options::String help = {
87  "How to handle tensor components. Must be Individual or Sum."};
88  };
89 
90  using options = tmpl::list<Name, NormType, Components>;
91 
92  ObserveTensor() = default;
93 
94  ObserveTensor(std::string in_tensor, std::string in_norm_type,
95  std::string in_components,
96  const Options::Context& context = {});
97 
98  std::string tensor{};
99  std::string norm_type{};
100  std::string components{};
101  };
102 
103  template <typename Functional>
104  struct ElementWiseBinary {
105  std::vector<double> operator()(
106  const std::vector<double>& t0,
107  const std::vector<double>& t1) const noexcept {
108  ASSERT(t0.size() == t1.size(),
109  "Size must be the same but got t0: " << t0.size()
110  << " and t1: " << t1.size());
111  std::vector<double> result(t0.size());
112  for (size_t i = 0; i < t0.size(); ++i) {
113  result[i] = Functional{}(t0[i], t1[i]);
114  }
115  return result;
116  }
117 
118  std::vector<double> operator()(const std::vector<double>& t0,
119  const double t1) const noexcept {
120  // This overload is needed for the L2 norm
121  std::vector<double> result(t0.size());
122  for (size_t i = 0; i < t0.size(); ++i) {
123  result[i] = Functional{}(t0[i], t1);
124  }
125  return result;
126  }
127  };
128 
129  using ReductionData = tmpl::wrap<
130  tmpl::list<Parallel::ReductionDatum<double, funcl::AssertEqual<>>,
133  ElementWiseBinary<funcl::Max<>>>,
135  ElementWiseBinary<funcl::Min<>>>,
137  std::vector<double>, ElementWiseBinary<funcl::Plus<>>,
138  ElementWiseBinary<funcl::Sqrt<funcl::Divides<>>>,
140  Parallel::ReductionData>;
141 
142  public:
143  /// The name of the subfile inside the HDF5 file
144  struct SubfileName {
145  using type = std::string;
146  static constexpr Options::String help = {
147  "The name of the subfile inside the HDF5 file without an extension and "
148  "without a preceding '/'."};
149  };
150  /// The tensor to observe and how to do the reduction
151  struct TensorsToObserve {
153  static constexpr Options::String help = {
154  "List specifying each tensor to observe and how it is reduced."};
155  };
156 
157  /// \cond
158  explicit ObserveNorms(CkMigrateMessage* msg) noexcept;
159  using PUP::able::register_constructor;
160  WRAPPED_PUPable_decl_template(ObserveNorms); // NOLINT
161  /// \endcond
162 
163  using options = tmpl::list<SubfileName, TensorsToObserve>;
164 
165  static constexpr Options::String help =
166  "Observe norms of tensors in the DataBox.\n"
167  "\n"
168  "Writes reduction quantities:\n"
169  " * Time\n"
170  " * NumberOfPoints = total number of points in the domain\n"
171  " * Max values\n"
172  " * Min values\n"
173  " * L2-norm values\n";
174 
175  ObserveNorms() = default;
176 
177  ObserveNorms(const std::string& subfile_name,
178  const std::vector<ObserveTensor>& observe_tensors) noexcept;
179 
180  using observed_reduction_data_tags =
181  observers::make_reduction_data_tags<tmpl::list<ReductionData>>;
182 
183  using argument_tags = tmpl::list<::Tags::Time, ::Tags::DataBox>;
184 
185  template <typename DbTagsList, typename Metavariables, typename ArrayIndex,
186  typename ParallelComponent>
187  void operator()(const double& time, const db::DataBox<DbTagsList>& box,
189  const ArrayIndex& array_index,
190  const ParallelComponent* const /*meta*/) const noexcept;
191 
192  using observation_registration_tags = tmpl::list<>;
193 
195  get_observation_type_and_key_for_registration() const noexcept {
197  observers::ObservationKey(subfile_path_ + ".dat")};
198  }
199 
200  using is_ready_argument_tags = tmpl::list<>;
201 
202  template <typename Metavariables, typename ArrayIndex, typename Component>
203  bool is_ready(Parallel::GlobalCache<Metavariables>& /*cache*/,
204  const ArrayIndex& /*array_index*/,
205  const Component* const /*meta*/) const noexcept {
206  return true;
207  }
208 
209  bool needs_evolved_variables() const noexcept override { return true; }
210 
211  // NOLINTNEXTLINE(google-runtime-references)
212  void pup(PUP::er& p) override;
213 
214  private:
215  std::string subfile_path_;
216  std::vector<std::string> tensor_names_{};
217  std::vector<std::string> tensor_norm_types_{};
218  std::vector<std::string> tensor_components_{};
219 };
220 
221 /// \cond
222 template <typename... ObservableTensorTags>
223 ObserveNorms<tmpl::list<ObservableTensorTags...>>::ObserveNorms(
224  CkMigrateMessage* msg) noexcept
225  : Event(msg) {}
226 
227 template <typename... ObservableTensorTags>
228 ObserveNorms<tmpl::list<ObservableTensorTags...>>::ObserveNorms(
229  const std::string& subfile_name,
230  const std::vector<ObserveTensor>& observe_tensors) noexcept
231  : subfile_path_("/" + subfile_name) {
232  tensor_names_.reserve(observe_tensors.size());
233  tensor_norm_types_.reserve(observe_tensors.size());
234  tensor_components_.reserve(observe_tensors.size());
235  for (const auto& observe_tensor : observe_tensors) {
236  tensor_names_.push_back(observe_tensor.tensor);
237  tensor_norm_types_.push_back(observe_tensor.norm_type);
238  tensor_components_.push_back(observe_tensor.components);
239  }
240 }
241 
242 template <typename... ObservableTensorTags>
243 ObserveNorms<tmpl::list<ObservableTensorTags...>>::ObserveTensor::ObserveTensor(
244  std::string in_tensor, std::string in_norm_type, std::string in_components,
245  const Options::Context& context)
246  : tensor(std::move(in_tensor)),
247  norm_type(std::move(in_norm_type)),
248  components(std::move(in_components)) {
249  if (((tensor != db::tag_name<ObservableTensorTags>()) and ...)) {
250  PARSE_ERROR(
251  context, "Tensor '"
252  << tensor << "' is not known. Known tensors are: "
253  << ((db::tag_name<ObservableTensorTags>() + ",") + ...));
254  }
255  if (norm_type != "Max" and norm_type != "Min" and norm_type != "L2Norm") {
256  PARSE_ERROR(context, "NormType must be one of Max, Min, or L2Norm, not "
257  << norm_type);
258  }
259  if (components != "Individual" and components != "Sum") {
260  PARSE_ERROR(context,
261  "Components must be Individual or Sum, not " << components);
262  }
263 }
264 
265 template <typename... ObservableTensorTags>
266 template <typename DbTagsList, typename Metavariables, typename ArrayIndex,
267  typename ParallelComponent>
268 void ObserveNorms<tmpl::list<ObservableTensorTags...>>::operator()(
269  const double& time, const db::DataBox<DbTagsList>& box,
270  Parallel::GlobalCache<Metavariables>& cache, const ArrayIndex& array_index,
271  const ParallelComponent* const /*meta*/) const noexcept {
272  using tensor_tags = tmpl::list<ObservableTensorTags...>;
273  static_assert((db::tag_is_retrievable_v<ObservableTensorTags,
274  db::DataBox<DbTagsList>> and
275  ...),
276  "Not all ObservableTensorTags were found in the DataBox.");
277 
280  norm_values_and_names{};
281  size_t number_of_points = 0;
282 
283  // Loop over ObservableTensorTags and see if it was requested to be observed.
284  // This approach allows us to delay evaluating any compute tags until they're
285  // actually needed for observing.
286  tmpl::for_each<tensor_tags>([this, &box, &norm_values_and_names,
287  &number_of_points](auto tag_v) noexcept {
288  using tag = tmpl::type_from<decltype(tag_v)>;
289  const std::string tensor_name = db::tag_name<tag>();
290  for (size_t i = 0; i < tensor_names_.size(); ++i) {
291  if (tensor_name == tensor_names_[i]) {
292  auto& [values, names] = norm_values_and_names[tensor_norm_types_[i]];
293  const auto [component_names, components] =
294  db::get<tag>(box).get_vector_of_data();
295  if (number_of_points != 0 and
296  components[0].size() != number_of_points) {
297  ERROR("The number of grid points previously was "
298  << number_of_points << " but changed to "
299  << components[0].size()
300  << ". This means you're computing norms of tensors over "
301  "different grids, which will give the wrong answer for "
302  "norms that use the grid points. The tensor is: "
303  << tensor_name);
304  }
305  number_of_points = components[0].size();
306 
307  if (tensor_components_[i] == "Individual") {
308  for (size_t storage_index = 0; storage_index < component_names.size();
309  ++storage_index) {
310  if (tensor_norm_types_[i] == "Max") {
311  values.push_back(max(components[storage_index]));
312  } else if (tensor_norm_types_[i] == "Min") {
313  values.push_back(min(components[storage_index]));
314  } else if (tensor_norm_types_[i] == "L2Norm") {
315  values.push_back(
316  alg::accumulate(square(components[storage_index]), 0.0));
317  }
318  names.push_back(
319  tensor_norm_types_[i] + "(" +
320  (component_names.size() == 1
321  ? tensor_name
322  : (tensor_name + "_" + component_names[storage_index])) +
323  ")");
324  }
325  } else if (tensor_components_[i] == "Sum") {
326  double value = 0.0;
327  if (tensor_norm_types_[i] == "Max") {
329  } else if (tensor_norm_types_[i] == "Min") {
331  }
332  for (size_t storage_index = 0; storage_index < component_names.size();
333  ++storage_index) {
334  if (tensor_norm_types_[i] == "Max") {
335  value = std::max(value, max(components[storage_index]));
336  } else if (tensor_norm_types_[i] == "Min") {
337  value = std::min(value, min(components[storage_index]));
338  } else if (tensor_norm_types_[i] == "L2Norm") {
339  value += alg::accumulate(square(components[storage_index]), 0.0);
340  }
341  }
342 
343  names.push_back(tensor_norm_types_[i] + "(" + tensor_name + ")");
344  values.push_back(value);
345  }
346  }
347  }
348  });
349 
350  // Concatenate the legend info together.
351  std::vector<std::string> legend{"Time", "NumberOfPoints"};
352  legend.insert(legend.end(), norm_values_and_names["Max"].second.begin(),
353  norm_values_and_names["Max"].second.end());
354  legend.insert(legend.end(), norm_values_and_names["Min"].second.begin(),
355  norm_values_and_names["Min"].second.end());
356  legend.insert(legend.end(), norm_values_and_names["L2Norm"].second.begin(),
357  norm_values_and_names["L2Norm"].second.end());
358 
359  // Send data to reduction observer
360  auto& local_observer =
361  *Parallel::get_parallel_component<observers::Observer<Metavariables>>(
362  cache)
363  .ckLocalBranch();
364 
365  Parallel::simple_action<observers::Actions::ContributeReductionData>(
366  local_observer, observers::ObservationId(time, subfile_path_ + ".dat"),
369  Parallel::ArrayIndex<ArrayIndex>(array_index)},
370  subfile_path_, std::move(legend),
371  ReductionData{time, number_of_points,
372  std::move(norm_values_and_names["Max"].first),
373  std::move(norm_values_and_names["Min"].first),
374  std::move(norm_values_and_names["L2Norm"].first)});
375 }
376 
377 template <typename... ObservableTensorTags>
378 void ObserveNorms<tmpl::list<ObservableTensorTags...>>::pup(PUP::er& p) {
379  Event::pup(p);
380  p | subfile_path_;
381  p | tensor_names_;
382  p | tensor_norm_types_;
383  p | tensor_components_;
384 }
385 
386 template <typename... ObservableTensorTags>
387 PUP::able::PUP_ID ObserveNorms<tmpl::list<ObservableTensorTags...>>::my_PUP_ID =
388  0; // NOLINT
389 /// \endcond
390 } // namespace Events
observers::ObservationId
A unique identifier for an observation representing the type of observation and the instance (e....
Definition: ObservationId.hpp:71
std::string
CharmPupable.hpp
Events::ObserveNorms< tmpl::list< ObservableTensorTags... > >::needs_evolved_variables
bool needs_evolved_variables() const noexcept override
Whether the event uses anything depending on the evolved_variables. If this returns false,...
Definition: ObserveNorms.hpp:209
utility
Parallel::GlobalCache
Definition: ElementReceiveInterpPoints.hpp:15
Parallel::ReductionDatum
The data to be reduced, and invokables to be called whenever two reduction messages are combined and ...
Definition: Reduction.hpp:65
PARSE_ERROR
#define PARSE_ERROR(context, m)
Definition: Options.hpp:71
MakeWithValueImpls::number_of_points
size_t number_of_points(const T &input) noexcept
The number of points represented by an object.
Definition: MakeWithValue.hpp:40
std::pair
GlobalCache.hpp
std::index_sequence
Options.hpp
db::tag_is_retrievable_v
constexpr bool tag_is_retrievable_v
Definition: DataBox.hpp:61
vector
Error.hpp
std::size
T size(T... args)
square
constexpr decltype(auto) square(const T &x)
Compute the square of x
Definition: ConstantExpressions.hpp:55
observers::ObservationKey
Used as a key in maps to keep track of how many elements have registered.
Definition: ObservationId.hpp:28
Options::Context
Definition: Options.hpp:41
ERROR
#define ERROR(m)
prints an error message to the standard error stream and aborts the program.
Definition: Error.hpp:37
std::add_pointer_t
WRAPPED_PUPable_decl_template
#define WRAPPED_PUPable_decl_template(className)
Mark derived classes as serializable.
Definition: CharmPupable.hpp:22
Event
Definition: Event.hpp:19
DataBox.hpp
cstddef
Assert.hpp
std::min
T min(T... args)
ASSERT
#define ASSERT(a, m)
Assert that an expression should be true.
Definition: Assert.hpp:49
ActionTesting::cache
Parallel::GlobalCache< Metavariables > & cache(MockRuntimeSystem< Metavariables > &runner, const ArrayIndex &array_index) noexcept
Returns the GlobalCache of Component with index array_index.
Definition: MockRuntimeSystemFreeFunctions.hpp:382
limits
Options::String
const char *const String
The string used in option structs.
Definition: Options.hpp:32
Tensor.hpp
observers::TypeOfObservation::Reduction
@ Reduction
The sender will only perform reduction observations.
observers::ArrayComponentId
An ID type that identifies both the parallel component and the index in the parallel component.
Definition: ArrayComponentId.hpp:27
std::time
T time(T... args)
Parallel::ArrayIndex
The array index used for indexing Chare Arrays, mostly an implementation detail.
Definition: ArrayIndex.hpp:28
std::max
T max(T... args)
unordered_map
TMPL.hpp
alg::accumulate
decltype(auto) accumulate(const Container &c, T init)
Convenience wrapper around std::accumulate, returns std::accumulate(begin(c), end(c),...
Definition: Numeric.hpp:54
string