SpECTRE Documentation Coverage Report
Current view: top level - Parallel/ArrayCollection - SendDataToElement.hpp Hit Total Coverage
Commit: 6e1258ccd353220e12442198913007fb6c170b6b Lines: 1 4 25.0 %
Date: 2024-10-23 19:54:13
Legend: Lines: hit not hit

          Line data    Source code
       1           0 : // Distributed under the MIT License.
       2             : // See LICENSE.txt for details.
       3             : 
       4             : #pragma once
       5             : 
       6             : #include <cstddef>
       7             : #include <mutex>
       8             : #include <type_traits>
       9             : #include <utility>
      10             : 
      11             : #include "DataStructures/DataBox/DataBox.hpp"
      12             : #include "Domain/Structure/ElementId.hpp"
      13             : #include "Evolution/DiscontinuousGalerkin/AtomicInboxBoundaryData.hpp"
      14             : #include "Parallel/ArrayCollection/ReceiveDataForElement.hpp"
      15             : #include "Parallel/ArrayCollection/Tags/ElementLocations.hpp"
      16             : #include "Parallel/GlobalCache.hpp"
      17             : #include "Parallel/Info.hpp"
      18             : #include "Parallel/NodeLock.hpp"
      19             : #include "Utilities/ErrorHandling/Assert.hpp"
      20             : #include "Utilities/Gsl.hpp"
      21             : #include "Utilities/TaggedTuple.hpp"
      22             : 
      23             : namespace Parallel::Actions {
      24             : /*!
      25             :  * \brief A local synchronous action where data is communicated to neighbor
      26             :  * elements.
      27             :  *
      28             :  * If the inbox tag type is an `evolution::dg::AtomicInboxBoundaryData` then
      29             :  * remote insert for elements on the same node is done in a lock-free manner
      30             :  * between the sender and receiver elements, and in a wait-free manner between
      31             :  * different sender elements to the same receiver element.
      32             :  *
      33             :  * The number of messages needed to take the next time step on the receiver
      34             :  * element is kept track of and a message is sent to the parallel runtime
      35             :  * system (e.g. Charm++) only when the receiver/neighbor element has all the
      36             :  * data it needs to take the next time step. This is done so as to reduce
      37             :  * pressure on the runtime system by sending fewer messages.
      38             :  */
      39           1 : struct SendDataToElement {
      40           0 :   using return_type = void;
      41             : 
      42             :   template <typename ParallelComponent, typename DbTagList, size_t Dim,
      43             :             typename ReceiveTag, typename ReceiveData, typename Metavariables>
      44           0 :   static return_type apply(
      45             :       db::DataBox<DbTagList>& box,
      46             :       const gsl::not_null<Parallel::NodeLock*> /*node_lock*/,
      47             :       const gsl::not_null<Parallel::GlobalCache<Metavariables>*> cache,
      48             :       const ReceiveTag& /*meta*/, const ElementId<Dim>& element_to_execute_on,
      49             :       typename ReceiveTag::temporal_id instance, ReceiveData&& receive_data) {
      50             :     const size_t my_node = Parallel::my_node<size_t>(*cache);
      51             :     // While we don't mutate the value, we want to avoid locking the DataBox
      52             :     // and the nodegroup by using `db::get_mutable_reference`. If/when we
      53             :     // start dynamically inserting and removing elements, we'll need to update
      54             :     // how we handle this. For example, we might need the containers to have
      55             :     // strong stability guarantees.
      56             :     ASSERT(db::get_mutable_reference<Parallel::Tags::ElementLocations<Dim>>(
      57             :                make_not_null(&box))
      58             :                    .count(element_to_execute_on) == 1,
      59             :            "Could not find ElementId " << element_to_execute_on
      60             :                                        << " in the list of element locations");
      61             :     const size_t node_of_element =
      62             :         db::get_mutable_reference<Parallel::Tags::ElementLocations<Dim>>(
      63             :             make_not_null(&box))
      64             :             .at(element_to_execute_on);
      65             :     auto& my_proxy =
      66             :         Parallel::get_parallel_component<ParallelComponent>(*cache);
      67             :     if (node_of_element == my_node) {
      68             :       [[maybe_unused]] size_t count = 0;
      69             :       ASSERT(db::get_mutable_reference<
      70             :                  typename ParallelComponent::element_collection_tag>(
      71             :                  make_not_null(&box))
      72             :                      .count(element_to_execute_on) == 1,
      73             :              "The element with ID "
      74             :                  << element_to_execute_on << " is not on node " << my_node
      75             :                  << ". We should be sending data to node " << node_of_element);
      76             :       auto& element = db::get_mutable_reference<
      77             :                           typename ParallelComponent::element_collection_tag>(
      78             :                           make_not_null(&box))
      79             :                           .at(element_to_execute_on);
      80             :       if constexpr (std::is_same_v<evolution::dg::AtomicInboxBoundaryData<Dim>,
      81             :                                    typename ReceiveTag::type>) {
      82             :         count = ReceiveTag::insert_into_inbox(
      83             :             make_not_null(&tuples::get<ReceiveTag>(element.inboxes())),
      84             :             instance, std::forward<ReceiveData>(receive_data));
      85             :       } else {
      86             :         // Scope so that we minimize how long we lock the inbox.
      87             :         std::lock_guard inbox_lock(element.inbox_lock());
      88             :         count = ReceiveTag::insert_into_inbox(
      89             :             make_not_null(&tuples::get<ReceiveTag>(element.inboxes())),
      90             :             instance, std::forward<ReceiveData>(receive_data));
      91             :       }
      92             :       // A lower bound for the number of neighbors is
      93             :       // `2 * Dim - number_of_block_boundaries`, which doesn't give us the
      94             :       // exact minimum number of sends we need to do, but gets us close in most
      95             :       // cases. If we really wanted to we could also add the number of
      96             :       // directions that don't have external boundaries in our neighbors block.
      97             :       // if (count >=
      98             :       //     (2 * Dim - element_to_execute_on.number_of_block_boundaries())) {
      99             :       Parallel::threaded_action<Parallel::Actions::ReceiveDataForElement<>>(
     100             :           my_proxy[node_of_element], element_to_execute_on);
     101             :       // }
     102             :     } else {
     103             :       Parallel::threaded_action<Parallel::Actions::ReceiveDataForElement<>>(
     104             :           my_proxy[node_of_element], ReceiveTag{}, element_to_execute_on,
     105             :           instance, std::forward<ReceiveData>(receive_data));
     106             :     }
     107             :   }
     108             : };
     109             : }  // namespace Parallel::Actions

Generated by: LCOV version 1.14