SpECTRE Documentation Coverage Report
Current view: top level - Domain - ElementDistribution.hpp Hit Total Coverage
Commit: d0fc80462417e83e5cddfa1b9901bb4a9b6af4d6 Lines: 5 12 41.7 %
Date: 2024-03-29 00:33:31
Legend: Lines: hit not hit

          Line data    Source code
       1           0 : // Distributed under the MIT License.
       2             : // See LICENSE.txt for details.
       3             : 
       4             : #pragma once
       5             : 
       6             : #include <array>
       7             : #include <cstddef>
       8             : #include <optional>
       9             : #include <string>
      10             : #include <unordered_map>
      11             : #include <unordered_set>
      12             : #include <utility>
      13             : #include <vector>
      14             : 
      15             : #include "Options/Options.hpp"
      16             : #include "Options/ParseError.hpp"
      17             : #include "Utilities/TypeTraits/CreateGetStaticMemberVariableOrDefault.hpp"
      18             : 
      19             : /// \cond
      20             : template <size_t Dim>
      21             : class Block;
      22             : 
      23             : template <size_t Dim>
      24             : class ElementId;
      25             : 
      26             : namespace Spectral {
      27             : enum class Quadrature;
      28             : }  // namespace Spectral
      29             : /// \endcond
      30             : 
      31             : namespace domain {
      32             : /// The weighting scheme for assigning computational costs to `Element`s for
      33             : /// distributing balanced compuational costs per processor (see
      34             : /// `BlockZCurveProcDistribution`)
      35           1 : enum class ElementWeight {
      36             :   /// A weighting scheme where each `Element` is assigned the same computational
      37             :   /// cost
      38             :   Uniform,
      39             :   /// A weighting scheme where each `Element`'s computational cost is equal to
      40             :   /// the number of grid points in that `Element`
      41             :   NumGridPoints,
      42             :   /// A weighting scheme where each `Element`'s computational cost is weighted
      43             :   /// by both the number of grid points and minimum spacing between grid points
      44             :   /// in that `Element` (see `get_num_points_and_grid_spacing_cost()` for
      45             :   /// details)
      46             :   NumGridPointsAndGridSpacing
      47             : };
      48             : 
      49           0 : std::ostream& operator<<(std::ostream& os, ElementWeight weight);
      50             : 
      51             : /// \brief Get the cost of each `Element` in a list of `Block`s where
      52             : /// `element_weight` specifies which weight distribution scheme to use
      53             : ///
      54             : /// \details It is only necessary to pass in a value for `quadrature` if
      55             : /// the value for `element_weight` is
      56             : /// `ElementWeight::NumGridPointsAndGridSpacing`. Otherwise, the argument isn't
      57             : /// needed and will have no effect if it does have a value.
      58             : template <size_t Dim>
      59           1 : std::unordered_map<ElementId<Dim>, double> get_element_costs(
      60             :     const std::vector<Block<Dim>>& blocks,
      61             :     const std::vector<std::array<size_t, Dim>>& initial_refinement_levels,
      62             :     const std::vector<std::array<size_t, Dim>>& initial_extents,
      63             :     ElementWeight element_weight,
      64             :     const std::optional<Spectral::Quadrature>& quadrature);
      65             : 
      66             : /*!
      67             :  * \brief Distribution strategy for assigning elements to CPUs using a
      68             :  * Morton ('Z-order') space-filling curve to determine placement within each
      69             :  * block, where `Element`s are distributed across CPUs
      70             :  *
      71             :  * \details The element distribution attempts to assign a balanced total
      72             :  * computational cost to each processor that is allowed to have `Element`s.
      73             :  * First, each `Block`'s `Element`s are ordered by their Z-curve index (see more
      74             :  * below). `Element`s are traversed in this order and assigned to CPUs in order,
      75             :  * moving onto the next CPU once the target cost per CPU is met. The target cost
      76             :  * per CPU is defined as the remaining cost to distribute divided by the
      77             :  * remaining number of CPUs to distribute to. This is an important distinction
      78             :  * from simply having one constant target cost per CPU defined as the total cost
      79             :  * divided by the total number of CPUs with elements. Since the total cost of
      80             :  * `Element`s on a processor will nearly never add up to be exactly the average
      81             :  * cost per CPU, this means that we would either have to decide to overshoot or
      82             :  * undershoot the average as we iterate over the CPUs and assign `Element`s. If
      83             :  * we overshoot the average on each processor, the final processor could have a
      84             :  * much lower cost than the rest of the processors and we run the risk of
      85             :  * overshooting so much that one or more of the requested processors don't get
      86             :  * assigned any `Element`s at all. If we undershoot the average on each
      87             :  * processor, the final processor could have a much higher cost than the others
      88             :  * due to remainder cost piling up. This algorithm avoids these risks by instead
      89             :  * adjusting the target cost per CPU as we finish assigning cost to previous
      90             :  * CPUs.
      91             :  *
      92             :  * Morton curves are a simple and easily-computed space-filling curve that
      93             :  * (unlike Hilbert curves) permit diagonal traversal. See, for instance,
      94             :  * \cite Borrell2018 for a discussion of mesh partitioning using space-filling
      95             :  * curves.
      96             :  * A concrete example of the use of a Morton curve in 2d is given below.
      97             :  *
      98             :  * A sketch of a 2D block with 4x2 elements, with each element labeled according
      99             :  * to the order on the Morton curve:
     100             :  * ```
     101             :  *          x-->
     102             :  *          0   1   2   3
     103             :  *        ----------------
     104             :  *  y  0 |  0   2   4   6
     105             :  *  |    |  | / | / | / |
     106             :  *  v  1 |  1   3   5   7
     107             :  * ```
     108             :  * (forming a zig-zag path, that under some rotation/reflection has a 'Z'
     109             :  * shape).
     110             :  *
     111             :  * The Morton curve method is a quick way of getting acceptable spatial locality
     112             :  * -- usually, for approximately even distributions, it will ensure that
     113             :  * elements are assigned in large volume chunks, and the structure of the Morton
     114             :  * curve ensures that for a given processor and block, the elements will be
     115             :  * assigned in no more than two orthogonally connected clusters. In principle, a
     116             :  * Hilbert curve could potentially improve upon the gains obtained by this class
     117             :  * by guaranteeing that all elements within each block form a single
     118             :  * orthogonally connected cluster.
     119             :  *
     120             :  * The assignment of portions of blocks to processors may use partial blocks,
     121             :  * and/or multiple blocks to ensure an even distribution of elements to
     122             :  * processors.
     123             :  * We currently make no distinction between dividing elements between processors
     124             :  * within a node and dividing elements between processors across nodes. The
     125             :  * current technique aims to have a simple method of reducing communication
     126             :  * globally, though it would likely be more efficient to prioritize minimization
     127             :  * of inter-node communication, because communication across interconnects is
     128             :  * the primary cost of communication in charm++ runs.
     129             :  *
     130             :  * \warning The use of the Morton curve to generate a well-clustered element
     131             :  * distribution currently assumes that the refinement is uniform over each
     132             :  * block, with no internal structure that would be generated by, for instance
     133             :  * AMR.
     134             :  * This distribution method will need alteration to perform well for blocks with
     135             :  * internal structure from h-refinement. Morton curves can be defined
     136             :  * recursively, so a generalization of the present method is possible for blocks
     137             :  * with internal refinement
     138             :  *
     139             :  * \tparam Dim the number of spatial dimensions of the `Block`s
     140             :  */
     141             : template <size_t Dim>
     142           1 : struct BlockZCurveProcDistribution {
     143           0 :   BlockZCurveProcDistribution() = default;
     144             : 
     145             :   /// The `number_of_procs_with_elements` argument represents how many procs
     146             :   /// will have elements. This is not necessarily equal to the total number of
     147             :   /// procs because some global procs may be ignored by the sixth argument
     148             :   /// `global_procs_to_ignore`.
     149           1 :   BlockZCurveProcDistribution(
     150             :       const std::unordered_map<ElementId<Dim>, double>& element_costs,
     151             :       size_t number_of_procs_with_elements,
     152             :       const std::vector<Block<Dim>>& blocks,
     153             :       const std::vector<std::array<size_t, Dim>>& initial_refinement_levels,
     154             :       const std::vector<std::array<size_t, Dim>>& initial_extents,
     155             :       const std::unordered_set<size_t>& global_procs_to_ignore = {});
     156             : 
     157             :   /// Gets the suggested processor number for a particular `ElementId`,
     158             :   /// determined by the Morton curve weighted element assignment described in
     159             :   /// detail in the parent class documentation.
     160           1 :   size_t get_proc_for_element(const ElementId<Dim>& element_id) const;
     161             : 
     162             :   const std::vector<std::vector<std::pair<size_t, size_t>>>&
     163           0 :   block_element_distribution() const {
     164             :     return block_element_distribution_;
     165             :   }
     166             : 
     167             :  private:
     168             :   // in this nested data structure:
     169             :   // - The block id is the first index
     170             :   // - There is an arbitrary number of CPUs per block, each with an element
     171             :   //   allowance
     172             :   // - Each element allowance is represented by a pair of proc number, number of
     173             :   //   elements in the allowance
     174             :   std::vector<std::vector<std::pair<size_t, size_t>>>
     175           0 :       block_element_distribution_;
     176             : };
     177             : }  // namespace domain
     178             : 
     179             : namespace element_weight_detail {
     180             : CREATE_GET_STATIC_MEMBER_VARIABLE_OR_DEFAULT(local_time_stepping)
     181             : }  // namespace element_weight_detail
     182             : 
     183             : template <>
     184           0 : struct Options::create_from_yaml<domain::ElementWeight> {
     185             :   template <typename Metavariables>
     186           0 :   static domain::ElementWeight create(const Options::Option& options) {
     187             :     const auto ordering = options.parse_as<std::string>();
     188             :     if (ordering == "Uniform") {
     189             :       return domain::ElementWeight::Uniform;
     190             :     } else if (ordering == "NumGridPoints") {
     191             :       return domain::ElementWeight::NumGridPoints;
     192             :     } else if (ordering == "NumGridPointsAndGridSpacing") {
     193             :       if constexpr (not element_weight_detail::
     194             :                         get_local_time_stepping_or_default_v<Metavariables,
     195             :                                                              false>) {
     196             :         PARSE_ERROR(
     197             :             options.context(),
     198             :             "When not using local time stepping, you cannot use "
     199             :             "NumGridPointsAndGridSpacing for the element distribution. Please "
     200             :             "choose another element distribution.");
     201             :       }
     202             :       return domain::ElementWeight::NumGridPointsAndGridSpacing;
     203             :     }
     204             :     PARSE_ERROR(options.context(),
     205             :                 "ElementWeight must be 'Uniform', 'NumGridPoints', or, "
     206             :                 "'NumGridPointsAndGridSpacing'");
     207             :   }
     208             : };

Generated by: LCOV version 1.14