SpECTRE Documentation Coverage Report
Current view: top level - DataStructures - SparseMatrixFiller.hpp Hit Total Coverage
Commit: 1f2210958b4f38fdc0400907ee7c6d5af5111418 Lines: 5 14 35.7 %
Date: 2025-12-05 05:03:31
Legend: Lines: hit not hit

          Line data    Source code
       1           0 : // Distributed under the MIT License.
       2             : // See LICENSE.txt for details.
       3             : 
       4             : #pragma once
       5             : 
       6             : #include <blaze/math/CompressedMatrix.h>
       7             : #include <cstddef>
       8             : #include <unordered_map>
       9             : #include <utility>
      10             : #include <vector>
      11             : 
      12             : #include "DataStructures/SimpleSparseMatrix.hpp"
      13             : #include "Utilities/ErrorHandling/Assert.hpp"
      14             : #include "Utilities/Gsl.hpp"
      15             : 
      16             : namespace sparse_matrix_filler_detail {
      17             : /// Hash on a pair of size_ts
      18             : struct PairHasher {
      19             :   size_t operator()(const std::pair<size_t, size_t>& key) const {
      20             :     ASSERT(key.first < 4294967295, "Key.first too large: " << key.first);
      21             :     ASSERT(key.second < 4294967295, "Key.second too large: " << key.second);
      22             :     return key.first bitor (key.second << 32);
      23             :   }
      24             : };
      25             : }  // namespace sparse_matrix_filler_detail
      26             : 
      27             : /*!
      28             :  * \ingroup DataStructuresGroup
      29             :  * \brief A data structure used to fill sparse matrices.
      30             :  *
      31             :  * We normally use blaze::CompressedMatrix to hold sparse matrices and to
      32             :  * operate on them.
      33             :  *
      34             :  * Unfortunately to fill a blaze::CompressedMatrix efficiently you
      35             :  * need to fill nonzero matrix elements in a well-defined order
      36             :  * (i.e. fill all nonzero elements in the top row in the order of
      37             :  * nonzero columns, and repeat for each other row, in order) using
      38             :  * CompressedMatrix's reserve(), append(), and finalize() functions.
      39             :  *
      40             :  * However, we are interested in a use case where:
      41             :  *
      42             :  *   1. We are filling the matrix elements using a complicated
      43             :  *      algorithm that computes (as opposed to systematically loops
      44             :  *      over) the matrix element indices to be filled, so we are
      45             :  *      effectively filling matrix elements in a random order.
      46             :  *
      47             :  *   2. The algorithm can traverse the same matrix element hundreds
      48             :  *      of times, meaning that the first time an element is traversed
      49             :  *      it should be filled, but subsequent times it is traversed it
      50             :  *      should be incremented.
      51             :  *
      52             :  * SparseMatrixFiller provides a function to fill matrix elements, keeping
      53             :  * in mind the above two issues.  And it provides a function to copy its
      54             :  * sparse matrix into blaze::CompressedMatrix efficiently.
      55             :  *
      56             :  * We consider two ways to keep track of unique elements:
      57             :  *
      58             :  * 1. Store matrix elements in a full vector that is indexed by
      59             :  *    matrix_elements[src_index+max_src_index*dest_index].  Set this
      60             :  *    vector to zero initially, and increment it each time 'add' is called.
      61             :  *    This is simple, but has a major issue: memory.
      62             :  *    In SpEC, I (Mark Scheel) have measured that computing TensorYlm
      63             :  *    filtering matrices using this method, after l_max=40 , the
      64             :  *    slope of CPU-h vs l_max for this method has a kink, and CPU-h
      65             :  *    increases like l_max^2 instead of l_max.  Presumably this is
      66             :  *    because of cache misses when accessing the very large
      67             :  *    matrix_elements vector at random locations.
      68             :  *
      69             :  * 2. Have a std::unordered_map<std::pair<size_t,size_t>, size_t>>
      70             :  *    called `element_index` that keeps track of the unique elements.
      71             :  *    Also have three vectors `dest_indices`, `src_indices`,
      72             :  *    and `matrix_elements` that keep the nonzero elements.
      73             :  *    element_index is indexed by
      74             :  *    element_index[{dest_index,src_index}] = index-into-matrix-elements
      75             :  *    So each time we add a new element, we check element_index to see
      76             :  *    if that element has not already been added, and if not, we push_back
      77             :  *    the three vectors.  If it has already been added, we simply do
      78             :  *    matrix_elements[index-into-matrix-elements] += new_element.
      79             :  *
      80             :  * I (Mark Scheel) have found in SpEC for computing TensorYlm filtering
      81             :  * matrices that the CPU cost of algorithm 2. above scales linearly
      82             :  * with l_max at least up to l_max=140, but for l_max < 40 or so,
      83             :  * algorithm 2. is about a factor of 3 slower than algorithm 1.
      84             :  * So SpEC uses both algorithms, depending on the value of l_max.
      85             :  *
      86             :  * For SpECTRE, we plan to do the same: for small l_max we will use
      87             :  * algorithm 1 and for large l_max we will use algorithm 2.  The
      88             :  * cutoff for l_max will depend on other factors (like the rank of the
      89             :  * Tensor being filtered) and will be determined by whoever calls
      90             :  * SparseMatrixFiller.
      91             :  */
      92           1 : class SparseMatrixFiller {
      93             :  public:
      94             :   /// Assume num_rows and num_cols are equal.
      95             :   /// If use_map_method is true, uses algorithm 2.
      96             :   /// Scale is the overall scale of the matrix; scale is used to remove
      97             :   /// elements that are smaller than roundoff.
      98           1 :   SparseMatrixFiller(size_t num_cols, bool use_map_method, double scale);
      99             : 
     100             :   /// Add (or increment) an element such that for v = M x, where M is
     101             :   /// the matrix and v and x are vectors, we compute the contribution
     102             :   /// to v[dest_index] from x[src_index].
     103           1 :   void add(double element, size_t dest_index, size_t src_index);
     104             : 
     105             :   /// Fills the matrix.
     106             :   /// Note that the order in which blaze matrices must be filled depends
     107             :   /// on whether the matrix is rowMajor or columnMajor, so changing
     108             :   /// the template parameter here will fail without changing the code.
     109           1 :   void fill(gsl::not_null<blaze::CompressedMatrix<double, blaze::rowMajor>*>
     110             :                 matrix) const;
     111             : 
     112             :   /// Fills a SimpleSparseMatrix.
     113           1 :   void fill(gsl::not_null<SimpleSparseMatrix*> matrix) const;
     114             : 
     115             :  private:
     116           0 :   void fill_sparse_matrix_elements(
     117             :       gsl::not_null<std::vector<SparseMatrixElement>*> data) const;
     118           0 :   size_t num_rows_{0}, num_cols_{0};
     119           0 :   bool use_map_method_{true};
     120           0 :   double scale_{1.0};
     121           0 :   std::vector<double> matrix_elements_{};
     122             :   std::unordered_map<std::pair<size_t, size_t>, size_t,
     123             :                      sparse_matrix_filler_detail::PairHasher>
     124           0 :       element_index_{};
     125           0 :   std::vector<size_t> dest_indices_{};
     126           0 :   std::vector<size_t> src_indices_{};
     127             : };

Generated by: LCOV version 1.14