Algorithm.hpp
1 // Distributed under the MIT License.
2 // See LICENSE.txt for details.
3 
4 #pragma once
5 
6 #include <boost/variant/variant.hpp>
7 #include <charm++.h>
8 #include <converse.h>
9 #include <cstddef>
10 #include <exception>
11 #include <initializer_list>
12 #include <ostream>
13 #include <pup.h>
14 #include <tuple>
15 #include <unordered_set>
16 #include <utility>
17 
18 #include "DataStructures/DataBox/DataBox.hpp" // IWYU pragma: keep
19 #include "DataStructures/DataBox/PrefixHelpers.hpp"
20 #include "Parallel/AlgorithmMetafunctions.hpp"
21 #include "Parallel/Algorithms/AlgorithmArrayDeclarations.hpp"
22 #include "Parallel/Algorithms/AlgorithmGroupDeclarations.hpp"
23 #include "Parallel/Algorithms/AlgorithmNodegroupDeclarations.hpp"
24 #include "Parallel/Algorithms/AlgorithmSingletonDeclarations.hpp"
25 #include "Parallel/CharmRegistration.hpp"
26 #include "Parallel/GlobalCache.hpp"
27 #include "Parallel/Info.hpp"
28 #include "Parallel/NodeLock.hpp"
29 #include "Parallel/ParallelComponentHelpers.hpp"
30 #include "Parallel/PhaseDependentActionList.hpp"
31 #include "Parallel/PupStlCpp11.hpp"
32 #include "Parallel/SimpleActionVisitation.hpp"
33 #include "Parallel/TypeTraits.hpp"
38 #include "Utilities/Gsl.hpp"
39 #include "Utilities/MakeString.hpp"
40 #include "Utilities/NoSuchType.hpp"
41 #include "Utilities/Overloader.hpp"
42 #include "Utilities/PrettyType.hpp"
43 #include "Utilities/Requires.hpp"
45 #include "Utilities/TMPL.hpp"
46 #include "Utilities/TaggedTuple.hpp"
47 #include "Utilities/TypeTraits.hpp"
48 
49 // IWYU pragma: no_include <array> // for tuple_size
50 
51 // IWYU pragma: no_include "Parallel/Algorithm.hpp" // Include... ourself?
52 
53 namespace Parallel {
54 /// \cond
55 template <typename ParallelComponent, typename PhaseDepActionList>
56 class AlgorithmImpl;
57 /// \endcond
58 
59 namespace Algorithm_detail {
60 template <typename Metavariables, typename Component, typename = std::void_t<>>
61 struct has_registration_list : std::false_type {};
62 
63 template <typename Metavariables, typename Component>
64 struct has_registration_list<
65  Metavariables, Component,
66  std::void_t<
67  typename Metavariables::template registration_list<Component>::type>>
68  : std::true_type {};
69 
70 template <typename Metavariables, typename Component>
71 constexpr bool has_registration_list_v =
72  has_registration_list<Metavariables, Component>::value;
73 } // namespace Algorithm_detail
74 
75 /*!
76  * \ingroup ParallelGroup
77  * \brief A distributed object (Charm++ Chare) that executes a series of Actions
78  * and is capable of sending and receiving data. Acts as an interface to
79  * Charm++.
80  *
81  * ### Different Types of Algorithms
82  * Charm++ chares can be one of four types, which is specified by the type alias
83  * `chare_type` inside the `ParallelComponent`. The four available types of
84  * Algorithms are:
85  * 1. A Parallel::Algorithms::Singleton where there is only one
86  * in the entire execution of the program.
87  * 2. A Parallel::Algorithms::Array which holds zero or more
88  * elements each of which is a distributed object on some core. An array can
89  * grow and shrink in size dynamically if need be and can also be bound to
90  * another array. That is, the bound array has the same number of elements as
91  * the array it is bound to, and elements with the same ID are on the same core.
92  * 3. A Parallel::Algorithms::Group, which is an array but there is
93  * one element per core and they are not able to be moved around between cores.
94  * These are typically useful for gathering data from array elements on their
95  * core, and then processing or reducing it.
96  * 4. A Parallel::Algorithms::Nodegroup, which is similar to a
97  * group except that there is one element per node. For Charm++ SMP (shared
98  * memory parallelism) builds a node corresponds to the usual definition of a
99  * node on a supercomputer. However, for non-SMP builds nodes and cores are
100  * equivalent. An important difference between groups and nodegroups is that
101  * entry methods (remote calls to functions) are not threadsafe on nodegroups.
102  * It is up to the person writing the Actions that will be executed on the
103  * Nodegroup Algorithm to ensure they are threadsafe.
104  *
105  * ### What is an Algorithm?
106  * An Algorithm is a distributed object, a Charm++ chare, that repeatedly
107  * executes a series of Actions. An Action is a struct that has a `static` apply
108  * function with signature:
109  *
110  * \code
111  * template <typename... DbTags, typename... InboxTags, typename Metavariables,
112  * typename ArrayIndex, typename ActionList>
113  * static auto apply(db::DataBox<tmpl::list<DbTags...>>& box,
114  * tuples::TaggedTuple<InboxTags...>& inboxes,
115  * const GlobalCache<Metavariables>& cache,
116  * const ArrayIndex& array_index,
117  * const TemporalId& temporal_id, const ActionList meta);
118  * \endcode
119  *
120  * Note that any of the arguments can be const or non-const references except
121  * `array_index`, which must be a `const&`.
122  *
123  * ### Explicit instantiations of entry methods
124  * The code in src/Parallel/CharmMain.tpp registers all entry methods, and if
125  * one is not properly registered then a static_assert explains how to have it
126  * be registered. If there is a bug in the implementation and an entry method
127  * isn't being registered or hitting a static_assert then Charm++ will give an
128  * error of the following form:
129  *
130  * \verbatim
131  * registration happened after init Entry point: simple_action(), addr:
132  * 0x555a3d0e2090
133  * ------------- Processor 0 Exiting: Called CmiAbort ------------
134  * Reason: Did you forget to instantiate a templated entry method in a .ci file?
135  * \endverbatim
136  *
137  * If you encounter this issue please file a bug report supplying everything
138  * necessary to reproduce the issue.
139  */
140 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
141 class AlgorithmImpl<ParallelComponent, tmpl::list<PhaseDepActionListsPack...>>
142  : public ParallelComponent::chare_type::template cbase<
143  ParallelComponent,
144  typename get_array_index<typename ParallelComponent::chare_type>::
145  template f<ParallelComponent>> {
146  static_assert(
147  sizeof...(PhaseDepActionListsPack) > 0,
148  "Must have at least one phase dependent action list "
149  "(PhaseActions) in a parallel component. See the first template "
150  "parameter of 'AlgorithmImpl' in the error message to see which "
151  "component doesn't have any phase dependent action lists.");
152 
153  public:
154  /// List of Actions in the order that generates the DataBox types
155  using all_actions_list = tmpl::flatten<
156  tmpl::list<typename PhaseDepActionListsPack::action_list...>>;
157  /// The metavariables class passed to the Algorithm
158  using metavariables = typename ParallelComponent::metavariables;
159  /// List off all the Tags that can be received into the Inbox
161  /// The type of the object used to identify the element of the array, group
162  /// or nodegroup spatially. The default should be an `int`.
163  using array_index = typename get_array_index<
164  typename ParallelComponent::chare_type>::template f<ParallelComponent>;
165 
166  using parallel_component = ParallelComponent;
167  /// The type of the Chare
168  using chare_type = typename parallel_component::chare_type;
169  /// The Charm++ proxy object type
170  using cproxy_type =
171  typename chare_type::template cproxy<parallel_component, array_index>;
172  /// The Charm++ base object type
173  using cbase_type =
174  typename chare_type::template cbase<parallel_component, array_index>;
175  /// The type of the phases
176  using PhaseType =
177  typename tmpl::front<tmpl::list<PhaseDepActionListsPack...>>::phase_type;
178 
179  using phase_dependent_action_lists = tmpl::list<PhaseDepActionListsPack...>;
180 
181  /// \cond
182  // Needed for serialization
183  AlgorithmImpl() noexcept;
184  /// \endcond
185 
186  /// Constructor used by Main to initialize the algorithm
187  template <class... InitializationTags>
188  AlgorithmImpl(
189  const Parallel::CProxy_GlobalCache<metavariables>&
190  global_cache_proxy,
191  tuples::TaggedTuple<InitializationTags...> initialization_items) noexcept;
192 
193  /// Charm++ migration constructor, used after a chare is migrated
194  explicit AlgorithmImpl(CkMigrateMessage* /*msg*/) noexcept;
195 
196  void pup(PUP::er& p) noexcept override { // NOLINT
197 #ifdef SPECTRE_CHARM_PROJECTIONS
198  p | non_action_time_start_;
199 #endif
200  if (performing_action_) {
201  ERROR("cannot serialize while performing action!");
202  }
203  p | performing_action_;
204  p | phase_;
205  p | algorithm_step_;
207  p | node_lock_;
208  }
209  p | terminate_;
210  p | halt_algorithm_until_next_phase_;
211  p | box_;
212  p | inboxes_;
213  p | array_index_;
214  p | global_cache_;
215  // note that `perform_registration_or_deregistration` passes the `box_` by
216  // const reference. If mutable access is required to the box, this function
217  // call needs to be carefully considered with respect to the `p | box_` call
218  // in both packing and unpacking scenarios.
219  perform_registration_or_deregistration(p, box_);
220  }
221  /// \cond
222  ~AlgorithmImpl() override;
223 
224  AlgorithmImpl(const AlgorithmImpl& /*unused*/) = delete;
225  AlgorithmImpl& operator=(const AlgorithmImpl& /*unused*/) = delete;
226  AlgorithmImpl(AlgorithmImpl&& /*unused*/) = delete;
227  AlgorithmImpl& operator=(AlgorithmImpl&& /*unused*/) = delete;
228  /// \endcond
229 
230  /*!
231  * \brief Calls the `apply` function `Action` after a reduction has been
232  * completed.
233  *
234  * The `apply` function must take `arg` as its last argument.
235  */
236  template <typename Action, typename Arg>
237  void reduction_action(Arg arg) noexcept;
238 
239  /// \brief Explicitly call the action `Action`. If the returned DataBox type
240  /// is not one of the types of the algorithm then a compilation error occurs.
241  template <typename Action, typename... Args>
242  void simple_action(std::tuple<Args...> args) noexcept;
243 
244  template <typename Action>
245  void simple_action() noexcept;
246 
247  /// \brief Call the `Action` sychronously, returning a result without any
248  /// parallelization. The action is called immediately and control flow returns
249  /// to the caller immediately upon completion.
250  ///
251  /// \note `Action` must have a type alias `return_type` specifying its return
252  /// type. This constraint is to simplify the variant visitation logic for the
253  /// \ref DataBoxGroup "DataBox".
254  template <typename Action, typename... Args>
255  typename Action::return_type local_synchronous_action(
256  Args&&... args) noexcept {
258  "Cannot call a (blocking) local synchronous action on a "
259  "chare that is not a NodeGroup");
260  return Algorithm_detail::local_synchronous_action_visitor<
261  Action, ParallelComponent>(box_, make_not_null(&node_lock_),
262  std::forward<Args>(args)...);
263  }
264 
265  // @{
266  /// Call an Action on a local nodegroup requiring the Action to handle thread
267  /// safety.
268  ///
269  /// The `Parallel::NodeLock` of the nodegroup is passed to the Action instead
270  /// of the `action_list` as a `const gsl::not_null<Parallel::NodeLock*>&`. The
271  /// node lock can be locked with the `Parallel::NodeLock::lock()` function,
272  /// and unlocked with `Parallel::unlock()`. `Parallel::NodeLock::try_lock()`
273  /// is also provided in case something useful can be done if the lock couldn't
274  /// be acquired.
275  template <
276  typename Action, typename... Args,
277  Requires<(sizeof...(Args), std::is_same_v<Parallel::Algorithms::Nodegroup,
278  chare_type>)> = nullptr>
279  void threaded_action(std::tuple<Args...> args) noexcept {
280  (void)Parallel::charmxx::RegisterThreadedAction<ParallelComponent, Action,
281  Args...>::registrar;
282  forward_tuple_to_threaded_action<Action>(
283  std::move(args), std::make_index_sequence<sizeof...(Args)>{});
284  }
285 
286  template <typename Action>
287  void threaded_action() noexcept {
288  // NOLINTNEXTLINE(modernize-redundant-void-arg)
289  (void)Parallel::charmxx::RegisterThreadedAction<ParallelComponent,
290  Action>::registrar;
291  Algorithm_detail::simple_action_visitor<Action, ParallelComponent>(
292  box_, *global_cache_,
293  static_cast<const array_index&>(array_index_),
294  make_not_null(&node_lock_));
295  }
296  // @}
297 
298  /// \brief Receive data and store it in the Inbox, and try to continue
299  /// executing the algorithm
300  ///
301  /// When an algorithm has terminated it can be restarted by passing
302  /// `enable_if_disabled = true`. This allows long-term disabling and
303  /// re-enabling of algorithms
304  template <typename ReceiveTag, typename ReceiveDataType>
305  void receive_data(typename ReceiveTag::temporal_id instance,
306  ReceiveDataType&& t,
307  bool enable_if_disabled = false) noexcept;
308 
309  // @{
310  /// Start evaluating the algorithm until the is_ready function of an Action
311  /// returns false, or an Action returns with `terminate` set to `true` or a
312  /// `Parallel::AlgorithmExecution` flag of `AlgorithmExecution::Pause` or
313  /// `AlgorithmExecution::Halt`.
314  ///
315  /// In the case where no phase is passed the current phase is assumed.
316  constexpr void perform_algorithm() noexcept;
317 
318  constexpr void perform_algorithm(const bool restart_if_terminated) noexcept {
319  if (restart_if_terminated) {
320  set_terminate(false);
321  }
322  perform_algorithm();
323  }
324  // @}
325 
326  void start_phase(const PhaseType next_phase) noexcept {
327  // terminate should be true since we exited a phase previously.
328  if (not get_terminate() and not halt_algorithm_until_next_phase_) {
329  ERROR(
330  "An algorithm must always be set to terminate at the beginning of a "
331  "phase. Since this is not the case the previous phase did not end "
332  "correctly. The integer corresponding to the previous phase is: "
333  << static_cast<int>(phase_)
334  << " and the next phase is: " << static_cast<int>(next_phase)
335  << ", The termination flag is: " << get_terminate()
336  << ", and the halt flag is: " << halt_algorithm_until_next_phase_);
337  }
338  // set terminate to true if there are no actions in this PDAL
339  set_terminate(number_of_actions_in_phase(next_phase) == 0);
340  phase_ = next_phase;
341  algorithm_step_ = 0;
342  halt_algorithm_until_next_phase_ = false;
343  perform_algorithm();
344  }
345 
346  /// Tell the Algorithm it should no longer execute the algorithm. This does
347  /// not mean that the execution of the program is terminated, but only that
348  /// the algorithm has terminated. An algorithm can be restarted by passing
349  /// `true` as the second argument to the `receive_data` method or by calling
350  /// perform_algorithm(true).
351  constexpr void set_terminate(const bool t) noexcept { terminate_ = t; }
352 
353  /// Check if an algorithm should continue being evaluated
354  constexpr bool get_terminate() const noexcept { return terminate_; }
355 
356  // {@
357  /// Wrappers for charm++ informational functions.
358 
359  /// Number of processing elements
360  inline int number_of_procs() const noexcept {
361  return sys::number_of_procs();
362  }
363 
364  /// %Index of my processing element.
365  inline int my_proc() const noexcept { return sys::my_proc(); }
366 
367  /// Number of nodes.
368  inline int number_of_nodes() const noexcept {
369  return sys::number_of_nodes();
370  }
371 
372  /// %Index of my node.
373  inline int my_node() const noexcept { return sys::my_node(); }
374 
375  /// Number of processing elements on the given node.
376  inline int procs_on_node(const int node_index) const noexcept {
377  return sys::procs_on_node(node_index);
378  }
379 
380  /// The local index of my processing element on my node.
381  /// This is in the interval 0, ..., procs_on_node(my_node()) - 1.
382  inline int my_local_rank() const noexcept {
383  return sys::my_local_rank();
384  }
385 
386  /// %Index of first processing element on the given node.
387  inline int first_proc_on_node(const int node_index) const noexcept {
388  return sys::first_proc_on_node(node_index);
389  }
390 
391  /// %Index of the node for the given processing element.
392  inline int node_of(const int proc_index) const noexcept {
393  return sys::node_of(proc_index);
394  }
395 
396  /// The local index for the given processing element on its node.
397  inline int local_rank_of(const int proc_index) const noexcept {
398  return sys::local_rank_of(proc_index);
399  }
400  // @}
401 
402  private:
403  template <typename ThisVariant, typename... Variants, typename... Args>
404  void perform_registration_or_deregistration_impl(
405  PUP::er& p, const boost::variant<Variants...>& box,
406  const gsl::not_null<int*> iter,
407  const gsl::not_null<bool*> already_visited) noexcept {
408  // void cast to avoid compiler warnings about the unused variable in the
409  // false branch of the constexpr
410  (void)already_visited;
411  if (box.which() == *iter and not *already_visited) {
412  // The deregistration and registration below does not actually insert
413  // anything into the PUP::er stream, so nothing is done on a sizing pup.
414  if constexpr (Algorithm_detail::has_registration_list_v<
415  metavariables, ParallelComponent>) {
416  using registration_list =
417  typename metavariables::template registration_list<
418  ParallelComponent>::type;
419  if (p.isPacking()) {
420  tmpl::for_each<registration_list>(
421  [this, &box](auto registration_v) noexcept {
422  using registration = typename decltype(registration_v)::type;
423  registration::template perform_deregistration<
424  ParallelComponent>(boost::get<ThisVariant>(box),
425  *global_cache_, array_index_);
426  });
427  }
428  if (p.isUnpacking()) {
429  tmpl::for_each<registration_list>(
430  [this, &box](auto registration_v) noexcept {
431  using registration = typename decltype(registration_v)::type;
432  registration::template perform_registration<ParallelComponent>(
433  boost::get<ThisVariant>(box), *global_cache_, array_index_);
434  });
435  }
436  *already_visited = true;
437  }
438  }
439  ++(*iter);
440  }
441 
442  template <typename... Variants, typename... Args>
443  void perform_registration_or_deregistration(
444  PUP::er& p, const boost::variant<Variants...>& box) noexcept {
445  int iter = 0;
446  bool already_visited = false;
448  perform_registration_or_deregistration_impl<Variants>(
449  p, box, &iter, &already_visited));
450  }
451 
452  static constexpr bool is_singleton =
453  std::is_same_v<chare_type, Parallel::Algorithms::Singleton>;
454 
455  template <class Dummy = int,
456  Requires<(sizeof(Dummy), is_singleton)> = nullptr>
457  constexpr void set_array_index() noexcept {}
458  template <class Dummy = int,
459  Requires<(sizeof(Dummy), not is_singleton)> = nullptr>
460  void set_array_index() noexcept {
461  // down cast to the algorithm_type, so that the `thisIndex` method can be
462  // called, which is defined in the CBase class
463  array_index_ = static_cast<typename chare_type::template algorithm_type<
464  ParallelComponent, array_index>&>(*this)
465  .thisIndex;
466  }
467 
468  template <typename PhaseDepActions, size_t... Is>
469  constexpr bool iterate_over_actions(
470  std::index_sequence<Is...> /*meta*/) noexcept;
471 
472  template <typename Action, typename... Args, size_t... Is>
473  void forward_tuple_to_action(std::tuple<Args...>&& args,
474  std::index_sequence<Is...> /*meta*/) noexcept {
475  Algorithm_detail::simple_action_visitor<Action, ParallelComponent>(
476  box_, *global_cache_,
477  static_cast<const array_index&>(array_index_),
478  std::forward<Args>(std::get<Is>(args))...);
479  }
480 
481  template <typename Action, typename... Args, size_t... Is>
482  void forward_tuple_to_threaded_action(
483  std::tuple<Args...>&& args,
484  std::index_sequence<Is...> /*meta*/) noexcept {
485  const gsl::not_null<Parallel::NodeLock*> node_lock{&node_lock_};
486  Algorithm_detail::simple_action_visitor<Action, ParallelComponent>(
487  box_, *global_cache_,
488  static_cast<const array_index&>(array_index_), node_lock,
489  std::forward<Args>(std::get<Is>(args))...);
490  }
491 
492  size_t number_of_actions_in_phase(const PhaseType phase) const noexcept {
493  size_t number_of_actions = 0;
494  const auto helper = [&number_of_actions, phase](auto pdal_v) {
495  if (pdal_v.phase == phase) {
496  number_of_actions = pdal_v.number_of_actions;
497  }
498  };
499  EXPAND_PACK_LEFT_TO_RIGHT(helper(PhaseDepActionListsPack{}));
500  return number_of_actions;
501  }
502 
503  // Invoke the static `apply` method of `ThisAction`. The if constexprs are for
504  // handling the cases where the `apply` method returns a tuple of one, two,
505  // or three elements, in order:
506  // 1. A DataBox
507  // 2. Either:
508  // 2a. A bool determining whether or not to terminate (and potentially move
509  // to the next phase), or
510  // 2b. An `AlgorithmExecution` object describing whether to continue,
511  // pause, or halt. `AlgorithmExecution::Continue` does not alter the
512  // termination flag, `AlgorithmExecution::Pause` is equivalent to
513  // passing `true`, and `AlgorithmExecution::Halt` causes the algorithm
514  // to stop without the possibility of restart until after the next
515  // phase change.
516  // 3. An unsigned integer corresponding to which action in the current phase's
517  // algorithm to execute next.
518  template <typename ThisAction, typename ActionList, typename DbTags>
519  void invoke_iterable_action(db::DataBox<DbTags>& my_box) noexcept {
520  auto action_return = ThisAction::apply(
521  my_box, inboxes_, *global_cache_, std::as_const(array_index_),
522  ActionList{}, std::add_pointer_t<ParallelComponent>{});
523 
524  static_assert(
525  Algorithm_detail::check_iterable_action_return_type<
526  ParallelComponent, ThisAction,
527  std::decay_t<decltype(action_return)>>::value,
528  "An iterable action has an invalid return type.\n"
529  "See the template parameters of "
530  "Algorithm_detail::check_iterable_action_return_type for details: the "
531  "first is the parallel component in question, the second is the "
532  "iterable action, and the third is the return type at fault.\n"
533  "The return type must be a tuple of length one, two, or three "
534  "with:\n"
535  " first type is an updated DataBox;\n"
536  " second type is either a bool (indicating termination) or a "
537  "`Parallel::AlgorithmExecution` object;\n"
538  " third type is a size_t indicating the next action in the current"
539  " phase.");
540 
541  constexpr size_t tuple_size =
542  std::tuple_size<decltype(action_return)>::value;
543  if constexpr (tuple_size >= 1_st) {
544  box_ = std::move(get<0>(action_return));
545  }
546  if constexpr (tuple_size >= 2_st) {
547  if constexpr (std::is_same_v<decltype(get<1>(action_return)), bool&>) {
548  terminate_ = get<1>(action_return);
549  } else {
550  switch(get<1>(action_return)) {
551  case AlgorithmExecution::Halt:
552  halt_algorithm_until_next_phase_ = true;
553  terminate_ = true;
554  break;
555  case AlgorithmExecution::Pause:
556  terminate_ = true;
557  break;
558  default:
559  break;
560  }
561  }
562  }
563  if constexpr (tuple_size >= 3_st) {
564  algorithm_step_ = get<2>(action_return);
565  }
566  }
567 
568  // Member variables
569 
570 #ifdef SPECTRE_CHARM_PROJECTIONS
571  double non_action_time_start_;
572 #endif
573 
574  Parallel::GlobalCache<metavariables>* global_cache_{nullptr};
575  bool performing_action_ = false;
576  PhaseType phase_{};
577  std::size_t algorithm_step_ = 0;
578  tmpl::conditional_t<Parallel::is_node_group_proxy<cproxy_type>::value,
580  node_lock_;
581 
582  bool terminate_{true};
583  bool halt_algorithm_until_next_phase_{false};
584 
585  using all_cache_tags = get_const_global_cache_tags<metavariables>;
586  using initial_databox = db::compute_databox_type<tmpl::flatten<tmpl::list<
587  Tags::GlobalCacheImpl<metavariables>,
588  typename ParallelComponent::initialization_tags,
590  // The types held by the boost::variant, box_
591  using databox_phase_types = typename Algorithm_detail::build_databox_types<
592  tmpl::list<>, phase_dependent_action_lists, initial_databox,
593  inbox_tags_list, metavariables, array_index, ParallelComponent>::type;
594 
595  template <typename T>
596  struct get_databox_types {
597  using type = typename T::databox_types;
598  };
599 
600  using databox_types = tmpl::flatten<
601  tmpl::transform<databox_phase_types, get_databox_types<tmpl::_1>>>;
602  // Create a boost::variant that can hold any of the DataBox's
603  using variant_boxes = tmpl::remove_duplicates<
604  tmpl::push_front<databox_types, db::DataBox<tmpl::list<>>>>;
606  tuples::tagged_tuple_from_typelist<inbox_tags_list> inboxes_{};
607  array_index array_index_;
608 };
609 
610 ////////////////////////////////////////////////////////////////
611 // Definitions
612 ////////////////////////////////////////////////////////////////
613 
614 /// \cond
615 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
616 AlgorithmImpl<ParallelComponent, tmpl::list<PhaseDepActionListsPack...>>::
617  AlgorithmImpl() noexcept {
618  set_array_index();
619 }
620 
621 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
622 template <class... InitializationTags>
623 AlgorithmImpl<ParallelComponent, tmpl::list<PhaseDepActionListsPack...>>::
624  AlgorithmImpl(const Parallel::CProxy_GlobalCache<metavariables>&
625  global_cache_proxy,
627  initialization_items) noexcept
628  : AlgorithmImpl() {
629  (void)initialization_items; // avoid potential compiler warnings if unused
630  // When we are using the LoadBalancing phase, we want the Main component to
631  // handle the synchronization, so the components do not participate in the
632  // charm++ `AtSync` barrier.
633  // The array parallel components are migratable so they get balanced
634  // appropriately when load balancing is triggered by the LoadBalancing phase
635  // in Main
636  if constexpr (std::is_same_v<typename ParallelComponent::chare_type,
638  Algorithm_detail::has_LoadBalancing_v<
639  typename metavariables::Phase>) {
640  this->usesAtSync = false;
641  this->setMigratable(true);
642  }
643  global_cache_ = global_cache_proxy.ckLocalBranch();
644  box_ = db::create<
645  db::AddSimpleTags<tmpl::flatten<
646  tmpl::list<Tags::GlobalCacheImpl<metavariables>,
647  typename ParallelComponent::initialization_tags>>>,
650  global_cache_,
651  std::move(get<InitializationTags>(initialization_items))...);
652 }
653 
654 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
655 AlgorithmImpl<ParallelComponent, tmpl::list<PhaseDepActionListsPack...>>::
656  AlgorithmImpl(CkMigrateMessage* msg) noexcept
657  : cbase_type(msg) {}
658 
659 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
660 AlgorithmImpl<ParallelComponent,
661  tmpl::list<PhaseDepActionListsPack...>>::~AlgorithmImpl() {
662  // We place the registrar in the destructor since every AlgorithmImpl will
663  // have a destructor, but we have different constructors so it's not clear
664  // which will be instantiated.
666  ParallelComponent>::registrar;
667 }
668 
669 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
670 template <typename Action, typename Arg>
671 void AlgorithmImpl<ParallelComponent, tmpl::list<PhaseDepActionListsPack...>>::
672  reduction_action(Arg arg) noexcept {
674  ParallelComponent, Action, std::decay_t<Arg>>::registrar;
675  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
676  node_lock_.lock();
677  }
678  if (performing_action_) {
679  ERROR(
680  "Already performing an Action and cannot execute additional Actions "
681  "from inside of an Action. This is only possible if the "
682  "reduction_action function is not invoked via a proxy, which makes "
683  "no sense for a reduction.");
684  }
685  performing_action_ = true;
686  arg.finalize();
687  forward_tuple_to_action<Action>(std::move(arg.data()),
688  std::make_index_sequence<Arg::pack_size()>{});
689  performing_action_ = false;
690  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
691  node_lock_.unlock();
692  }
693  perform_algorithm();
694 }
695 
696 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
697 template <typename Action, typename... Args>
698 void AlgorithmImpl<ParallelComponent, tmpl::list<PhaseDepActionListsPack...>>::
699  simple_action(std::tuple<Args...> args) noexcept {
700  (void)Parallel::charmxx::RegisterSimpleAction<ParallelComponent, Action,
701  Args...>::registrar;
702  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
703  node_lock_.lock();
704  }
705  if (performing_action_) {
706  ERROR(
707  "Already performing an Action and cannot execute additional Actions "
708  "from inside of an Action. This is only possible if the "
709  "simple_action function is not invoked via a proxy, which "
710  "we do not allow.");
711  }
712  performing_action_ = true;
713  forward_tuple_to_action<Action>(std::move(args),
714  std::make_index_sequence<sizeof...(Args)>{});
715  performing_action_ = false;
716  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
717  node_lock_.unlock();
718  }
719  perform_algorithm();
720 }
721 
722 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
723 template <typename Action>
724 void AlgorithmImpl<ParallelComponent, tmpl::list<PhaseDepActionListsPack...>>::
725  simple_action() noexcept {
726  (void)Parallel::charmxx::RegisterSimpleAction<ParallelComponent,
727  Action>::registrar;
728  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
729  node_lock_.lock();
730  }
731  if (performing_action_) {
732  ERROR(
733  "Already performing an Action and cannot execute additional Actions "
734  "from inside of an Action. This is only possible if the "
735  "simple_action function is not invoked via a proxy, which "
736  "we do not allow.");
737  }
738  performing_action_ = true;
739  Algorithm_detail::simple_action_visitor<Action, ParallelComponent>(
740  box_, *global_cache_,
741  static_cast<const array_index&>(array_index_));
742  performing_action_ = false;
743  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
744  node_lock_.unlock();
745  }
746  perform_algorithm();
747 }
748 
749 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
750 template <typename ReceiveTag, typename ReceiveDataType>
751 void AlgorithmImpl<ParallelComponent, tmpl::list<PhaseDepActionListsPack...>>::
752  receive_data(typename ReceiveTag::temporal_id instance, ReceiveDataType&& t,
753  const bool enable_if_disabled) noexcept {
754  (void)Parallel::charmxx::RegisterReceiveData<ParallelComponent,
755  ReceiveTag>::registrar;
756  try {
757  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
758  node_lock_.lock();
759  }
760  if (enable_if_disabled) {
761  set_terminate(false);
762  }
763  ReceiveTag::insert_into_inbox(
764  make_not_null(&tuples::get<ReceiveTag>(inboxes_)), instance,
765  std::forward<ReceiveDataType>(t));
766  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
767  node_lock_.unlock();
768  }
769  } catch (std::exception& e) {
770  ERROR("Fatal error: Unexpected exception caught in receive_data: "
771  << e.what());
772  }
773  perform_algorithm();
774 }
775 
776 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
777 constexpr void AlgorithmImpl<
778  ParallelComponent,
779  tmpl::list<PhaseDepActionListsPack...>>::perform_algorithm() noexcept {
780  if (performing_action_ or get_terminate() or
781  halt_algorithm_until_next_phase_) {
782  return;
783  }
784 #ifdef SPECTRE_CHARM_PROJECTIONS
785  non_action_time_start_ = sys::wall_time();
786 #endif
787  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
788  node_lock_.lock();
789  }
790  const auto invoke_for_phase = [this](auto phase_dep_v) noexcept {
791  using PhaseDep = decltype(phase_dep_v);
792  constexpr PhaseType phase = PhaseDep::phase;
793  using actions_list = typename PhaseDep::action_list;
794  if (phase_ == phase) {
795  while (tmpl::size<actions_list>::value > 0 and not get_terminate() and
796  not halt_algorithm_until_next_phase_ and
797  iterate_over_actions<PhaseDep>(
798  std::make_index_sequence<tmpl::size<actions_list>::value>{})) {
799  }
800  }
801  };
802  // Loop over all phases, once the current phase is found we perform the
803  // algorithm in that phase until we are no longer able to because we are
804  // waiting on data to be sent or because the algorithm has been marked as
805  // terminated.
806  EXPAND_PACK_LEFT_TO_RIGHT(invoke_for_phase(PhaseDepActionListsPack{}));
807  if constexpr (std::is_same_v<Parallel::NodeLock, decltype(node_lock_)>) {
808  node_lock_.unlock();
809  }
810 #ifdef SPECTRE_CHARM_PROJECTIONS
811  traceUserBracketEvent(SPECTRE_CHARM_NON_ACTION_WALLTIME_EVENT_ID,
812  non_action_time_start_, sys::wall_time());
813 #endif
814 }
815 /// \endcond
816 
817 template <typename ParallelComponent, typename... PhaseDepActionListsPack>
818 template <typename PhaseDepActions, size_t... Is>
819 constexpr bool
820 AlgorithmImpl<ParallelComponent, tmpl::list<PhaseDepActionListsPack...>>::
821  iterate_over_actions(const std::index_sequence<Is...> /*meta*/) noexcept {
822  bool take_next_action = true;
823  const auto helper = [this, &take_next_action](auto iteration) noexcept {
824  constexpr size_t iter = decltype(iteration)::value;
825  if (not(take_next_action and not terminate_ and
826  not halt_algorithm_until_next_phase_ and algorithm_step_ == iter)) {
827  return;
828  }
829  using actions_list = typename PhaseDepActions::action_list;
830  using this_action = tmpl::at_c<actions_list, iter>;
831  const auto check_if_ready = [this](auto action,
832  const auto& check_local_box) noexcept {
833  if constexpr (Algorithm_detail::is_is_ready_callable_t<
834  decltype(action),
835  std::decay_t<decltype(check_local_box)>,
836  tuples::tagged_tuple_from_typelist<inbox_tags_list>,
838  array_index>{}) {
839  return decltype(action)::is_ready(
840  check_local_box, std::as_const(inboxes_), *global_cache_,
841  std::as_const(array_index_));
842  } else {
843  return true;
844  }
845  };
846 
847  constexpr size_t phase_index =
848  tmpl::index_of<phase_dependent_action_lists, PhaseDepActions>::value;
849  using databox_phase_type = tmpl::at_c<databox_phase_types, phase_index>;
850  using databox_types_this_phase = typename databox_phase_type::databox_types;
851 
852  using potential_databox_indices = std::conditional_t<
853  iter == 0_st,
854  tmpl::integral_list<size_t, 0_st,
855  tmpl::size<databox_types_this_phase>::value - 1_st>,
856  tmpl::integral_list<size_t, iter>>;
857  bool box_found = false;
858  tmpl::for_each<potential_databox_indices>(
859  [this, &box_found, &take_next_action,
860  &check_if_ready](auto potential_databox_index_v) noexcept {
861  constexpr size_t potential_databox_index =
862  decltype(potential_databox_index_v)::type::value;
863  using this_databox =
864  tmpl::at_c<databox_types_this_phase, potential_databox_index>;
865  if (not box_found and
866  box_.which() ==
867  static_cast<int>(
868  tmpl::index_of<variant_boxes, this_databox>::value)) {
869  box_found = true;
870  auto& box = boost::get<this_databox>(box_);
871  if (not check_if_ready(this_action{}, box)) {
872  take_next_action = false;
873  return;
874  }
875  performing_action_ = true;
876  ++algorithm_step_;
877  invoke_iterable_action<this_action, actions_list>(box);
878  }
879  });
880  if (not box_found) {
881  ERROR(
882  "The DataBox type being retrieved at algorithm step: "
883  << algorithm_step_ << " in phase " << phase_index
884  << " corresponding to action " << pretty_type::get_name<this_action>()
885  << " is not the correct type but is of variant index " << box_.which()
886  << ". If you are using Goto and Label actions then you are using "
887  "them incorrectly.");
888  }
889 
890  performing_action_ = false;
891  // Wrap counter if necessary
892  if (algorithm_step_ >= tmpl::size<actions_list>::value) {
893  algorithm_step_ = 0;
894  }
895  };
896  // In case of no Actions avoid compiler warning.
897  (void)helper;
898  // This is a template for loop for Is
900  return take_next_action;
901 }
902 } // namespace Parallel
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::local_rank_of
int local_rank_of(const int proc_index) const noexcept
The local index for the given processing element on its node.
Definition: Algorithm.hpp:397
NoSuchType
Used to mark "no type" or "bad state" for metaprogramming.
Definition: NoSuchType.hpp:10
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::procs_on_node
int procs_on_node(const int node_index) const noexcept
Number of processing elements on the given node.
Definition: Algorithm.hpp:376
std::false_type
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::inbox_tags_list
Parallel::get_inbox_tags< all_actions_list > inbox_tags_list
List off all the Tags that can be received into the Inbox.
Definition: Algorithm.hpp:160
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::my_local_rank
int my_local_rank() const noexcept
The local index of my processing element on my node. This is in the interval 0, .....
Definition: Algorithm.hpp:382
EXPAND_PACK_LEFT_TO_RIGHT
#define EXPAND_PACK_LEFT_TO_RIGHT(...)
Expand a parameter pack evaluating the terms from left to right.
Definition: TMPL.hpp:565
utility
exception
Parallel::GlobalCache< metavariables >
Parallel::is_node_group_proxy
Definition: TypeTraits.hpp:34
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::my_node
int my_node() const noexcept
Index of my node.
Definition: Algorithm.hpp:373
unordered_set
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::cbase_type
typename chare_type::template cbase< parallel_component, array_index > cbase_type
The Charm++ base object type.
Definition: Algorithm.hpp:174
GlobalCache.hpp
std::make_index_sequence
std::set_terminate
T set_terminate(T... args)
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::my_proc
int my_proc() const noexcept
Index of my processing element.
Definition: Algorithm.hpp:365
Error.hpp
PrettyType.hpp
db::compute_databox_type
typename detail::compute_dbox_type< TagList >::type compute_databox_type
Returns the type of the DataBox that would be constructed from the TagList of tags.
Definition: DataBox.hpp:828
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::cproxy_type
typename chare_type::template cproxy< parallel_component, array_index > cproxy_type
The Charm++ proxy object type.
Definition: Algorithm.hpp:171
Parallel::procs_on_node
int procs_on_node(const int node_index, const DistribObject &distributed_object) noexcept
Number of processing elements on the given node.
Definition: Info.hpp:60
Parallel::number_of_procs
int number_of_procs(const DistribObject &distributed_object) noexcept
Number of processing elements.
Definition: Info.hpp:24
db::AddComputeTags
tmpl::flatten< tmpl::list< Tags... > > AddComputeTags
List of Compute Item Tags to add to the DataBox.
Definition: DataBox.hpp:809
ParallelInfo.hpp
Parallel::local_rank_of
int local_rank_of(const int proc_index, const DistribObject &distributed_object) noexcept
The local index for the given processing element on its node.
Definition: Info.hpp:100
BoostHelpers.hpp
tuple
Info.hpp
std::as_const
T as_const(T... args)
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::chare_type
typename parallel_component::chare_type chare_type
The type of the Chare.
Definition: Algorithm.hpp:168
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::all_actions_list
tmpl::flatten< tmpl::list< typename PhaseDepActionListsPack::action_list... > > all_actions_list
List of Actions in the order that generates the DataBox types.
Definition: Algorithm.hpp:156
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::number_of_procs
int number_of_procs() const noexcept
Wrappers for charm++ informational functions.
Definition: Algorithm.hpp:360
Parallel::get_inbox_tags
tmpl::remove_duplicates< tmpl::join< tmpl::transform< ActionsList, detail::get_inbox_tags_from_action< tmpl::_1 > >> > get_inbox_tags
Given a list of Actions, get a list of the unique inbox tags.
Definition: ParallelComponentHelpers.hpp:32
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::first_proc_on_node
int first_proc_on_node(const int node_index) const noexcept
Index of first processing element on the given node.
Definition: Algorithm.hpp:387
Parallel::NodeLock
A typesafe wrapper for a lock for synchronization of shared resources on a given node,...
Definition: NodeLock.hpp:25
ERROR
#define ERROR(m)
prints an error message to the standard error stream and aborts the program.
Definition: Error.hpp:37
db::create
constexpr auto create(Args &&... args)
Create a new DataBox.
Definition: DataBox.hpp:853
Parallel::first_proc_on_node
int first_proc_on_node(const int node_index, const DistribObject &distributed_object) noexcept
Index of first processing element on the given node.
Definition: Info.hpp:80
std::add_pointer_t
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::array_index
typename get_array_index< typename ParallelComponent::chare_type >::template f< ParallelComponent > array_index
The type of the object used to identify the element of the array, group or nodegroup spatially....
Definition: Algorithm.hpp:164
Parallel::my_node
int my_node(const DistribObject &distributed_object) noexcept
Index of my node.
Definition: Info.hpp:51
ActionTesting::is_ready
bool is_ready(MockRuntimeSystem< Metavariables > &runner, const typename Component::array_index &array_index) noexcept
Runs the is_ready function and returns the result for the next action in the current phase on the arr...
Definition: MockRuntimeSystemFreeFunctions.hpp:286
ActionTesting::get_terminate
bool get_terminate(const MockRuntimeSystem< Metavariables > &runner, const typename Component::array_index &array_index) noexcept
Returns whether or not the Component with index array_index has been terminated.
Definition: MockRuntimeSystemFreeFunctions.hpp:370
DataBox.hpp
Parallel::charmxx::RegisterReceiveData
Derived class for registering receive_data functions.
Definition: CharmRegistration.hpp:428
cstddef
Assert.hpp
Parallel::my_proc
int my_proc(const DistribObject &distributed_object) noexcept
Index of my processing element.
Definition: Info.hpp:33
Parallel::local_synchronous_action
decltype(auto) local_synchronous_action(Proxy &&proxy, Args &&... args) noexcept
Invoke a local synchronous action on proxy
Definition: Invoke.hpp:79
make_boost_variant_over
typename detail::make_boost_variant_over_impl< tmpl::remove_duplicates< Sequence > >::type make_boost_variant_over
Create a boost::variant with all all the types inside the typelist Sequence.
Definition: BoostHelpers.hpp:43
tuples::TaggedTuple
An associative container that is indexed by structs.
Definition: TaggedTuple.hpp:271
Parallel::charmxx::RegisterThreadedAction
Derived class for registering threaded actions.
Definition: CharmRegistration.hpp:304
Parallel::number_of_nodes
int number_of_nodes(const DistribObject &distributed_object) noexcept
Number of nodes.
Definition: Info.hpp:42
Parallel::Algorithms::Nodegroup
A struct that stores the charm++ types relevant for a particular nodegroup component.
Definition: AlgorithmNodegroupDeclarations.hpp:29
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::PhaseType
typename tmpl::front< tmpl::list< PhaseDepActionListsPack... > >::phase_type PhaseType
The type of the phases.
Definition: Algorithm.hpp:177
std::decay_t
PupStlCpp11.hpp
Parallel::receive_data
void receive_data(Proxy &&proxy, typename ReceiveTag::temporal_id temporal_id, ReceiveDataType &&receive_data, const bool enable_if_disabled=false) noexcept
Send the data args... to the algorithm running on proxy, and tag the message with the identifier temp...
Definition: Invoke.hpp:32
sys::wall_time
double wall_time()
The current wall time in seconds.
Definition: ParallelInfo.hpp:94
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::threaded_action
void threaded_action(std::tuple< Args... > args) noexcept
Call an Action on a local nodegroup requiring the Action to handle thread safety.
Definition: Algorithm.hpp:279
db::AddSimpleTags
tmpl::flatten< tmpl::list< Tags... > > AddSimpleTags
List of Tags to add to the DataBox.
Definition: DataBox.hpp:802
Parallel::Algorithms::Array
A struct that stores the charm++ types relevant for a particular array component.
Definition: AlgorithmArrayDeclarations.hpp:29
Gsl.hpp
Parallel::my_local_rank
int my_local_rank(const DistribObject &distributed_object) noexcept
The local index of my processing element on my node. This is in the interval 0, .....
Definition: Info.hpp:71
TypeTraits.hpp
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::node_of
int node_of(const int proc_index) const noexcept
Index of the node for the given processing element.
Definition: Algorithm.hpp:392
Parallel::charmxx::RegisterParallelComponent
Derived class for registering parallel components.
Definition: CharmRegistration.hpp:138
ForceInline.hpp
Requires.hpp
db::wrap_tags_in
tmpl::transform< TagList, tmpl::bind< Wrapper, tmpl::_1, tmpl::pin< Args >... > > wrap_tags_in
Create a new tmpl::list of tags by wrapping each tag in TagList in Wrapper<_, Args....
Definition: PrefixHelpers.hpp:30
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::get_terminate
constexpr bool get_terminate() const noexcept
Check if an algorithm should continue being evaluated.
Definition: Algorithm.hpp:354
std::size_t
Parallel::simple_action
void simple_action(Proxy &&proxy) noexcept
Invoke a simple action on proxy
Definition: Invoke.hpp:62
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::metavariables
typename ParallelComponent::metavariables metavariables
The metavariables class passed to the Algorithm.
Definition: Algorithm.hpp:158
make_not_null
gsl::not_null< T * > make_not_null(T *ptr) noexcept
Construct a not_null from a pointer. Often this will be done as an implicit conversion,...
Definition: Gsl.hpp:880
Requires
typename Requires_detail::requires_impl< B >::template_error_type_failed_to_meet_requirements_on_template_parameters Requires
Express requirements on the template parameters of a function or class, replaces std::enable_if_t
Definition: Requires.hpp:67
std::conditional_t
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::number_of_nodes
int number_of_nodes() const noexcept
Number of nodes.
Definition: Algorithm.hpp:368
Parallel::charmxx::RegisterSimpleAction
Derived class for registering simple actions.
Definition: CharmRegistration.hpp:228
ostream
Parallel::charmxx::RegisterReductionAction
Derived class for registering reduction actions.
Definition: CharmRegistration.hpp:472
Parallel::node_of
int node_of(const int proc_index, const DistribObject &distributed_object) noexcept
Index of the node for the given processing element.
Definition: Info.hpp:90
Parallel
Functionality for parallelization.
Definition: ElementReceiveInterpPoints.hpp:13
TMPL.hpp
Parallel::AlgorithmImpl< ParallelComponent, tmpl::list< PhaseDepActionListsPack... > >::set_terminate
constexpr void set_terminate(const bool t) noexcept
Tell the Algorithm it should no longer execute the algorithm. This does not mean that the execution o...
Definition: Algorithm.hpp:351
Parallel::threaded_action
void threaded_action(Proxy &&proxy) noexcept
Invoke a threaded action on proxy, where the proxy must be a nodegroup.
Definition: Invoke.hpp:92
gsl::not_null
Require a pointer to not be a nullptr
Definition: ReadSpecThirdOrderPiecewisePolynomial.hpp:13
initializer_list