SpECTRE Documentation Coverage Report
Current view: top level - Parallel/ArrayCollection - DgElementArrayMemberBase.hpp Hit Total Coverage
Commit: f679b1c7cc6e5bd6a5c72de114b5bff6a9facc98 Lines: 15 38 39.5 %
Date: 2024-05-20 02:45:36
Legend: Lines: hit not hit

          Line data    Source code
       1           0 : // Distributed under the MIT License.
       2             : // See LICENSE.txt for details.
       3             : 
       4             : #pragma once
       5             : 
       6             : #include <charm++.h>
       7             : #include <cstddef>
       8             : #include <limits>
       9             : #include <pup.h>
      10             : #include <string>
      11             : #include <unordered_map>
      12             : 
      13             : #include "Domain/Structure/ElementId.hpp"
      14             : #include "Parallel/NodeLock.hpp"
      15             : #include "Parallel/Phase.hpp"
      16             : #include "Utilities/Gsl.hpp"
      17             : #include "Utilities/Serialization/CharmPupable.hpp"
      18             : 
      19             : namespace Parallel {
      20             : /*!
      21             :  * \brief The base class of a member of an DG element array/map on a nodegroup.
      22             :  *
      23             :  * The nodegroup DgElementCollection stores all the elements on a node. Each
      24             :  * of those elements is a `DgElementArrayMember` and has this base class to
      25             :  * make access easier so it can be used in a type-erased context since
      26             :  * `DgElementArrayMember` depends on the metavariables,
      27             :  * phase-dependent-action-list, and the simple tags needed from options.
      28             :  *
      29             :  * This class essentially mimicks a lot of the functionality of
      30             :  * `Parallel::DistributedObject` but does not involve Charm++ beyond
      31             :  * serialization.
      32             :  */
      33             : template <size_t Dim>
      34           1 : class DgElementArrayMemberBase : PUP::able {
      35             :  public:
      36           0 :   DgElementArrayMemberBase() = default;
      37             : 
      38           0 :   DgElementArrayMemberBase(const DgElementArrayMemberBase& /*rhs*/) = default;
      39           0 :   DgElementArrayMemberBase& operator=(const DgElementArrayMemberBase& /*rhs*/) =
      40             :       default;
      41           0 :   DgElementArrayMemberBase(DgElementArrayMemberBase&& /*rhs*/) = default;
      42           0 :   DgElementArrayMemberBase& operator=(DgElementArrayMemberBase&& /*rhs*/) =
      43             :       default;
      44           0 :   ~DgElementArrayMemberBase() override = default;
      45             : 
      46           0 :   WRAPPED_PUPable_abstract(DgElementArrayMemberBase);  // NOLINT
      47             : 
      48           0 :   explicit DgElementArrayMemberBase(CkMigrateMessage* msg);
      49             : 
      50             :   /// Start execution of the phase-dependent action list in `next_phase`. If
      51             :   /// `next_phase` has already been visited, execution will resume at the point
      52             :   /// where the previous execution of the same phase left off.
      53           1 :   virtual void start_phase(Parallel::Phase next_phase) = 0;
      54             : 
      55             :   /// Get the current phase
      56           1 :   Parallel::Phase phase() const;
      57             : 
      58             :   /// Tell the Algorithm it should no longer execute the algorithm. This does
      59             :   /// not mean that the execution of the program is terminated, but only that
      60             :   /// the algorithm has terminated. An algorithm can be restarted by passing
      61             :   /// `true` as the second argument to the `receive_data` method or by calling
      62             :   /// perform_algorithm(true).
      63           1 :   void set_terminate(gsl::not_null<size_t*> number_of_elements_terminated,
      64             :                      gsl::not_null<Parallel::NodeLock*> nodegroup_lock,
      65             :                      bool terminate);
      66             : 
      67             :   /// Check if an algorithm should continue being evaluated
      68           1 :   bool get_terminate() const;
      69             : 
      70             :   /// The zero-indexed step in the algorithm.
      71           1 :   size_t algorithm_step() const;
      72             : 
      73             :   /// Start evaluating the algorithm until it is stopped by an action.
      74           1 :   virtual void perform_algorithm() = 0;
      75             : 
      76             :   /// Print the expanded type aliases
      77           1 :   virtual std::string print_types() const = 0;
      78             : 
      79             :   /// Print the current state of the algorithm
      80           1 :   std::string print_state() const;
      81             : 
      82             :   /// Print the current contents of the inboxes
      83           1 :   virtual std::string print_inbox() const = 0;
      84             : 
      85             :   /// Print the current contents of the DataBox
      86           1 :   virtual std::string print_databox() const = 0;
      87             : 
      88             :   /// \brief The `inbox_lock()` only locks the inbox, nothing else. The inbox is
      89             :   /// unsafe to access without this lock.
      90             :   ///
      91             :   /// Use `element_lock()` to lock the rest of the element.
      92             :   ///
      93             :   /// This should always be managed by `std::unique_lock` or `std::lock_guard`.
      94           1 :   Parallel::NodeLock& inbox_lock();
      95             : 
      96             :   /// \brief Locks the element, except for the inbox, which is guarded by the
      97             :   /// `inbox_lock()`.
      98             :   ///
      99             :   /// This should always be managed by `std::unique_lock` or `std::lock_guard`.
     100           1 :   Parallel::NodeLock& element_lock();
     101             : 
     102             :   /// \brief Set which core this element should pretend to be bound to.
     103           1 :   void set_core(size_t core);
     104             : 
     105             :   /// \brief Get which core this element should pretend to be bound to.
     106           1 :   size_t get_core() const;
     107             : 
     108           0 :   void pup(PUP::er& p) override;
     109             : 
     110             :  protected:
     111           0 :   DgElementArrayMemberBase(ElementId<Dim> element_id, size_t node_number);
     112             : 
     113           0 :   Parallel::NodeLock inbox_lock_{};
     114           0 :   Parallel::NodeLock element_lock_{};
     115           0 :   bool performing_action_ = false;
     116           0 :   Parallel::Phase phase_{Parallel::Phase::Initialization};
     117           0 :   std::unordered_map<Parallel::Phase, size_t> phase_bookmarks_{};
     118           0 :   std::size_t algorithm_step_ = 0;
     119             : 
     120           0 :   bool terminate_{true};
     121           0 :   bool halt_algorithm_until_next_phase_{false};
     122             : 
     123             :   // Records the name of the next action to be called so that during deadlock
     124             :   // analysis we can print this out.
     125           0 :   std::string deadlock_analysis_next_iterable_action_{};
     126           0 :   ElementId<Dim> element_id_;
     127           0 :   size_t my_node_{std::numeric_limits<size_t>::max()};
     128             :   // There is no associated core. However, we use this as a method of
     129             :   // interoperating with core-aware concepts like the interpolation
     130             :   // framework. Once that framework is core-agnostic we will remove my_core_.
     131           0 :   size_t my_core_{std::numeric_limits<size_t>::max()};
     132             : };
     133             : }  // namespace Parallel

Generated by: LCOV version 1.14