SpECTRE Documentation Coverage Report
Current view: top level - NumericalAlgorithms/LinearSolver - Gmres.hpp Hit Total Coverage
Commit: a48770a280d0d66bc04cb5df30de1e3cc4985437 Lines: 4 44 9.1 %
Date: 2021-06-17 20:55:50
Legend: Lines: hit not hit

          Line data    Source code
       1           0 : // Distributed under the MIT License.
       2             : // See LICENSE.txt for details.
       3             : 
       4             : #pragma once
       5             : 
       6             : #include <cstddef>
       7             : #include <memory>
       8             : #include <optional>
       9             : #include <pup.h>
      10             : #include <type_traits>
      11             : #include <utility>
      12             : 
      13             : #include "DataStructures/DenseMatrix.hpp"
      14             : #include "DataStructures/DenseVector.hpp"
      15             : #include "IO/Logging/Verbosity.hpp"
      16             : #include "NumericalAlgorithms/Convergence/Criteria.hpp"
      17             : #include "NumericalAlgorithms/Convergence/HasConverged.hpp"
      18             : #include "NumericalAlgorithms/Convergence/Reason.hpp"
      19             : #include "NumericalAlgorithms/LinearSolver/InnerProduct.hpp"
      20             : #include "NumericalAlgorithms/LinearSolver/LinearSolver.hpp"
      21             : #include "Options/Auto.hpp"
      22             : #include "Options/Options.hpp"
      23             : #include "Parallel/CharmPupable.hpp"
      24             : #include "Utilities/Gsl.hpp"
      25             : #include "Utilities/Registration.hpp"
      26             : #include "Utilities/TMPL.hpp"
      27             : 
      28             : namespace LinearSolver {
      29           1 : namespace gmres::detail {
      30             : 
      31             : // Perform an Arnoldi orthogonalization to find a new `operand` that is
      32             : // orthogonal to all vectors in `basis_history`. Appends a new column to the
      33             : // `orthogonalization_history` that holds the inner product of the intermediate
      34             : // `operand` with each vector in the `basis_history` and itself.
      35             : template <typename VarsType>
      36             : void arnoldi_orthogonalize(
      37             :     const gsl::not_null<VarsType*> operand,
      38             :     const gsl::not_null<DenseMatrix<double>*> orthogonalization_history,
      39             :     const std::vector<VarsType>& basis_history,
      40             :     const size_t iteration) noexcept {
      41             :   // Resize matrix and make sure the new entries that are not being filled below
      42             :   // are zero.
      43             :   orthogonalization_history->resize(iteration + 2, iteration + 1);
      44             :   for (size_t j = 0; j < iteration; ++j) {
      45             :     (*orthogonalization_history)(iteration + 1, j) = 0.;
      46             :   }
      47             :   // Arnoldi orthogonalization
      48             :   for (size_t j = 0; j < iteration + 1; ++j) {
      49             :     const double orthogonalization = inner_product(basis_history[j], *operand);
      50             :     (*orthogonalization_history)(j, iteration) = orthogonalization;
      51             :     *operand -= orthogonalization * basis_history[j];
      52             :   }
      53             :   (*orthogonalization_history)(iteration + 1, iteration) =
      54             :       sqrt(inner_product(*operand, *operand));
      55             :   // Avoid an FPE if the new operand norm is exactly zero. In that case the
      56             :   // problem is solved and the algorithm will terminate (see Proposition 9.3 in
      57             :   // \cite Saad2003). Since there will be no next iteration we don't need to
      58             :   // normalize the operand.
      59             :   if (UNLIKELY((*orthogonalization_history)(iteration + 1, iteration) == 0.)) {
      60             :     return;
      61             :   }
      62             :   *operand /= (*orthogonalization_history)(iteration + 1, iteration);
      63             : }
      64             : 
      65             : // Solve the linear least-squares problem `||beta - H * y||` for `y`, where `H`
      66             : // is the Hessenberg matrix given by `orthogonalization_history` and `beta` is
      67             : // the vector `(initial_residual, 0, 0, ...)` by updating the QR decomposition
      68             : // of `H` from the previous iteration with a Givens rotation.
      69             : void solve_minimal_residual(
      70             :     gsl::not_null<DenseMatrix<double>*> orthogonalization_history,
      71             :     gsl::not_null<DenseVector<double>*> residual_history,
      72             :     gsl::not_null<DenseVector<double>*> givens_sine_history,
      73             :     gsl::not_null<DenseVector<double>*> givens_cosine_history,
      74             :     size_t iteration) noexcept;
      75             : 
      76             : // Find the vector that minimizes the residual by inverting the upper
      77             : // triangular matrix obtained above.
      78             : DenseVector<double> minimal_residual_vector(
      79             :     const DenseMatrix<double>& orthogonalization_history,
      80             :     const DenseVector<double>& residual_history) noexcept;
      81             : 
      82             : }  // namespace gmres::detail
      83             : 
      84             : namespace Serial {
      85             : 
      86             : /// Disables the iteration callback at compile-time
      87           1 : struct NoIterationCallback {};
      88             : 
      89             : /// \cond
      90             : template <typename VarsType, typename Preconditioner,
      91             :           typename LinearSolverRegistrars>
      92             : struct Gmres;
      93             : /// \endcond
      94             : 
      95             : namespace Registrars {
      96             : 
      97             : /// Registers the `LinearSolver::Serial::Gmres` linear solver.
      98             : template <typename VarsType>
      99           1 : struct Gmres {
     100             :   template <typename LinearSolverRegistrars>
     101           0 :   using f = Serial::Gmres<VarsType, LinearSolver<LinearSolverRegistrars>,
     102             :                           LinearSolverRegistrars>;
     103             : };
     104             : }  // namespace Registrars
     105             : 
     106             : /*!
     107             :  * \brief A serial GMRES iterative solver for nonsymmetric linear systems of
     108             :  * equations.
     109             :  *
     110             :  * This is an iterative algorithm to solve general linear equations \f$Ax=b\f$
     111             :  * where \f$A\f$ is a linear operator. See \cite Saad2003, chapter 6.5 for a
     112             :  * description of the GMRES algorithm and Algorithm 9.6 for this implementation.
     113             :  * It is matrix-free, which means the operator \f$A\f$ needs not be provided
     114             :  * explicity as a matrix but only the operator action \f$A(x)\f$ must be
     115             :  * provided for an argument \f$x\f$.
     116             :  *
     117             :  * The GMRES algorithm does not require the operator \f$A\f$ to be symmetric or
     118             :  * positive-definite. Note that other algorithms such as conjugate gradients may
     119             :  * be more efficient for symmetric positive-definite operators.
     120             :  *
     121             :  * \par Convergence:
     122             :  * Given a set of \f$N_A\f$ equations (e.g. through an \f$N_A\times N_A\f$
     123             :  * matrix) the GMRES algorithm will converge to numerical precision in at most
     124             :  * \f$N_A\f$ iterations. However, depending on the properties of the linear
     125             :  * operator, an approximate solution can ideally be obtained in only a few
     126             :  * iterations. See \cite Saad2003, section 6.11.4 for details on the convergence
     127             :  * of the GMRES algorithm.
     128             :  *
     129             :  * \par Restarting:
     130             :  * This implementation of the GMRES algorithm supports restarting, as detailed
     131             :  * in \cite Saad2003, section 6.5.5. Since the GMRES algorithm iteratively
     132             :  * builds up an orthogonal basis of the solution space the cost of each
     133             :  * iteration increases linearly with the number of iterations. Therefore it is
     134             :  * sometimes helpful to restart the algorithm every \f$N_\mathrm{restart}\f$
     135             :  * iterations, discarding the set of basis vectors and starting again from the
     136             :  * current solution estimate. This strategy can improve the performance of the
     137             :  * solver, but note that the solver can stagnate for non-positive-definite
     138             :  * operators and is not guaranteed to converge within \f$N_A\f$ iterations
     139             :  * anymore. Set the `restart` argument of the constructor to
     140             :  * \f$N_\mathrm{restart}\f$ to activate restarting, or set it to 'None' to
     141             :  * deactivate restarting.
     142             :  *
     143             :  * \par Preconditioning:
     144             :  * This implementation of the GMRES algorithm also supports preconditioning.
     145             :  * You can provide a linear operator \f$P\f$ that approximates the inverse of
     146             :  * the operator \f$A\f$ to accelerate the convergence of the linear solve.
     147             :  * The algorithm is right-preconditioned, which allows the preconditioner to
     148             :  * change in every iteration ("flexible" variant). See \cite Saad2003, sections
     149             :  * 9.3.2 and 9.4.1 for details. This implementation follows Algorithm 9.6 in
     150             :  * \cite Saad2003.
     151             :  *
     152             :  * \par Improvements:
     153             :  * Further improvements can potentially be implemented for this algorithm, see
     154             :  * e.g. \cite Ayachour2003.
     155             :  *
     156             :  * \example
     157             :  * \snippet NumericalAlgorithms/LinearSolver/Test_Gmres.cpp gmres_example
     158             :  */
     159             : template <typename VarsType, typename Preconditioner = NoPreconditioner,
     160             :           typename LinearSolverRegistrars =
     161             :               tmpl::list<Registrars::Gmres<VarsType>>>
     162             : class Gmres final : public PreconditionedLinearSolver<Preconditioner,
     163           1 :                                                       LinearSolverRegistrars> {
     164             :  private:
     165           0 :   using Base =
     166             :       PreconditionedLinearSolver<Preconditioner, LinearSolverRegistrars>;
     167             : 
     168           0 :   struct ConvergenceCriteria {
     169           0 :     using type = Convergence::Criteria;
     170           0 :     static constexpr Options::String help =
     171             :         "Determine convergence of the algorithm";
     172             :   };
     173           0 :   struct Restart {
     174           0 :     using type = Options::Auto<size_t, Options::AutoLabel::None>;
     175           0 :     static constexpr Options::String help =
     176             :         "Iterations to run before restarting, or 'None' to disable restarting. "
     177             :         "Note that the solver is not guaranteed to converge anymore if you "
     178             :         "enable restarting.";
     179           0 :     static type suggested_value() noexcept { return {}; }
     180             :   };
     181           0 :   struct Verbosity {
     182           0 :     using type = ::Verbosity;
     183           0 :     static constexpr Options::String help = "Logging verbosity";
     184             :   };
     185             : 
     186             :  public:
     187           0 :   static constexpr Options::String help =
     188             :       "A serial GMRES iterative solver for nonsymmetric linear systems of\n"
     189             :       "equations Ax=b. It will converge to numerical precision in at most N_A\n"
     190             :       "iterations, where N_A is the number of equations represented by the\n"
     191             :       "linear operator A, but will ideally converge to a reasonable\n"
     192             :       "approximation of the solution x in only a few iterations.\n"
     193             :       "\n"
     194             :       "Preconditioning: Specify a preconditioner to run in every GMRES "
     195             :       "iteration to accelerate the solve, or 'None' to disable "
     196             :       "preconditioning. The choice of preconditioner can be crucial to obtain "
     197             :       "good convergence.\n"
     198             :       "\n"
     199             :       "Restarting: It is sometimes helpful to restart the algorithm every\n"
     200             :       "N_restart iterations to speed it up. Note that it can stagnate for\n"
     201             :       "non-positive-definite matrices and is not guaranteed to converge\n"
     202             :       "within N_A iterations anymore when restarting is activated.\n"
     203             :       "Activate restarting by setting the 'Restart' option to N_restart, or\n"
     204             :       "deactivate restarting by setting it to 'None'.";
     205           0 :   using options = tmpl::flatten<tmpl::list<
     206             :       ConvergenceCriteria, Verbosity, Restart,
     207             :       tmpl::conditional_t<std::is_same_v<Preconditioner, NoPreconditioner>,
     208             :                           tmpl::list<>, typename Base::PreconditionerOption>>>;
     209             : 
     210           0 :   Gmres(Convergence::Criteria convergence_criteria, ::Verbosity verbosity,
     211             :         std::optional<size_t> restart = std::nullopt,
     212             :         std::optional<typename Base::PreconditionerType> local_preconditioner =
     213             :             std::nullopt,
     214             :         const Options::Context& context = {});
     215             : 
     216           0 :   Gmres() = default;
     217           0 :   Gmres(Gmres&&) = default;
     218           0 :   Gmres& operator=(Gmres&&) = default;
     219           0 :   ~Gmres() override = default;
     220             : 
     221           0 :   Gmres(const Gmres& rhs) noexcept;
     222           0 :   Gmres& operator=(const Gmres& rhs) noexcept;
     223             : 
     224           0 :   std::unique_ptr<LinearSolver<LinearSolverRegistrars>> get_clone() const
     225             :       noexcept override {
     226             :     return std::make_unique<Gmres>(*this);
     227             :   }
     228             : 
     229             :   /// \cond
     230             :   explicit Gmres(CkMigrateMessage* m) noexcept;
     231             :   using PUP::able::register_constructor;
     232             :   WRAPPED_PUPable_decl_template(Gmres);  // NOLINT
     233             :   /// \endcond
     234             : 
     235           0 :   void initialize() noexcept {
     236             :     orthogonalization_history_.reserve(restart_ + 1);
     237             :     residual_history_.reserve(restart_ + 1);
     238             :     givens_sine_history_.reserve(restart_);
     239             :     givens_cosine_history_.reserve(restart_);
     240             :     basis_history_.resize(restart_ + 1);
     241             :     preconditioned_basis_history_.resize(restart_);
     242             :   }
     243             : 
     244           0 :   const Convergence::Criteria& convergence_criteria() const noexcept {
     245             :     return convergence_criteria_;
     246             :   }
     247           0 :   ::Verbosity verbosity() const noexcept { return verbosity_; }
     248           0 :   size_t restart() const noexcept { return restart_; }
     249             : 
     250           0 :   void pup(PUP::er& p) noexcept override {  // NOLINT
     251             :     Base::pup(p);
     252             :     p | convergence_criteria_;
     253             :     p | verbosity_;
     254             :     p | restart_;
     255             :     if (p.isUnpacking()) {
     256             :       initialize();
     257             :     }
     258             :   }
     259             : 
     260             :   template <typename LinearOperator, typename SourceType,
     261             :             typename IterationCallback = NoIterationCallback>
     262           0 :   Convergence::HasConverged solve(
     263             :       gsl::not_null<VarsType*> initial_guess_in_solution_out,
     264             :       const LinearOperator& linear_operator, const SourceType& source,
     265             :       const IterationCallback& iteration_callback =
     266             :           NoIterationCallback{}) const noexcept;
     267             : 
     268           0 :   void reset() noexcept override {
     269             :     // Nothing to reset. Only call into base class to reset preconditioner.
     270             :     Base::reset();
     271             :   }
     272             : 
     273             :  private:
     274           0 :   Convergence::Criteria convergence_criteria_{};
     275           0 :   ::Verbosity verbosity_{::Verbosity::Verbose};
     276           0 :   size_t restart_{};
     277             : 
     278             :   // Memory buffers to avoid re-allocating memory for successive solves:
     279             :   // The `orthogonalization_history_` is built iteratively from inner products
     280             :   // between existing and potential basis vectors and then Givens-rotated to
     281             :   // become upper-triangular.
     282           0 :   mutable DenseMatrix<double> orthogonalization_history_{};
     283             :   // The `residual_history_` holds the remaining residual in its last entry, and
     284             :   // the other entries `g` "source" the minimum residual vector `y` in
     285             :   // `R * y = g` where `R` is the upper-triangular `orthogonalization_history_`.
     286           0 :   mutable DenseVector<double> residual_history_{};
     287             :   // These represent the accumulated Givens rotations up to the current
     288             :   // iteration.
     289           0 :   mutable DenseVector<double> givens_sine_history_{};
     290           0 :   mutable DenseVector<double> givens_cosine_history_{};
     291             :   // These represent the orthogonal Krylov-subspace basis that is constructed
     292             :   // iteratively by Arnoldi-orthogonalizing a new vector in each iteration and
     293             :   // appending it to the `basis_history_`.
     294           0 :   mutable std::vector<VarsType> basis_history_{};
     295             :   // When a preconditioner is used it is applied to each new basis vector. The
     296             :   // preconditioned basis is used to construct the solution when the algorithm
     297             :   // has converged.
     298           0 :   mutable std::vector<VarsType> preconditioned_basis_history_{};
     299             : };
     300             : 
     301             : template <typename VarsType, typename Preconditioner,
     302             :           typename LinearSolverRegistrars>
     303             : Gmres<VarsType, Preconditioner, LinearSolverRegistrars>::Gmres(
     304             :     Convergence::Criteria convergence_criteria, ::Verbosity verbosity,
     305             :     std::optional<size_t> restart,
     306             :     std::optional<typename Base::PreconditionerType> local_preconditioner,
     307             :     const Options::Context& context)
     308             :     // clang-tidy: trivially copyable
     309             :     : Base(std::move(local_preconditioner)),
     310             :       convergence_criteria_(std::move(convergence_criteria)),  // NOLINT
     311             :       verbosity_(std::move(verbosity)),                        // NOLINT
     312             :       restart_(restart.value_or(convergence_criteria_.max_iterations)) {
     313             :   if (restart_ == 0) {
     314             :     PARSE_ERROR(context,
     315             :                 "Can't restart every '0' iterations. Set to a nonzero "
     316             :                 "number, or to 'None' if you meant to disable restarting.");
     317             :   }
     318             :   initialize();
     319             : }
     320             : 
     321             : // Define copy constructors. They don't have to copy the memory buffers but
     322             : // only resize them. They take care of copying the preconditioner by calling
     323             : // into the base class.
     324             : template <typename VarsType, typename Preconditioner,
     325             :           typename LinearSolverRegistrars>
     326             : Gmres<VarsType, Preconditioner, LinearSolverRegistrars>::Gmres(
     327             :     const Gmres& rhs) noexcept
     328             :     : Base(rhs),
     329             :       convergence_criteria_(rhs.convergence_criteria_),
     330             :       verbosity_(rhs.verbosity_),
     331             :       restart_(rhs.restart_) {
     332             :   initialize();
     333             : }
     334             : template <typename VarsType, typename Preconditioner,
     335             :           typename LinearSolverRegistrars>
     336             : Gmres<VarsType, Preconditioner, LinearSolverRegistrars>&
     337             : Gmres<VarsType, Preconditioner, LinearSolverRegistrars>::operator=(
     338             :     const Gmres& rhs) noexcept {
     339             :   Base::operator=(rhs);
     340             :   convergence_criteria_ = rhs.convergence_criteria_;
     341             :   verbosity_ = rhs.verbosity_;
     342             :   restart_ = rhs.restart_;
     343             :   initialize();
     344             :   return *this;
     345             : }
     346             : 
     347             : /// \cond
     348             : template <typename VarsType, typename Preconditioner,
     349             :           typename LinearSolverRegistrars>
     350             : Gmres<VarsType, Preconditioner, LinearSolverRegistrars>::Gmres(
     351             :     CkMigrateMessage* m) noexcept
     352             :     : Base(m) {}
     353             : /// \endcond
     354             : 
     355             : template <typename VarsType, typename Preconditioner,
     356             :           typename LinearSolverRegistrars>
     357             : template <typename LinearOperator, typename SourceType,
     358             :           typename IterationCallback>
     359             : Convergence::HasConverged
     360           0 : Gmres<VarsType, Preconditioner, LinearSolverRegistrars>::solve(
     361             :     const gsl::not_null<VarsType*> initial_guess_in_solution_out,
     362             :     const LinearOperator& linear_operator, const SourceType& source,
     363             :     const IterationCallback& iteration_callback) const noexcept {
     364             :   constexpr bool use_preconditioner =
     365             :       not std::is_same_v<Preconditioner, NoPreconditioner>;
     366             :   constexpr bool use_iteration_callback =
     367             :       not std::is_same_v<IterationCallback, NoIterationCallback>;
     368             : 
     369             :   // Could pre-allocate memory for the basis-history vectors here. Not doing
     370             :   // that for now because we don't know how many iterations we'll need.
     371             :   // Estimating the number of iterations and pre-allocating memory is a possible
     372             :   // performance optimization.
     373             : 
     374             :   auto& solution = *initial_guess_in_solution_out;
     375             :   Convergence::HasConverged has_converged{};
     376             :   size_t iteration = 0;
     377             : 
     378             :   while (not has_converged) {
     379             :     const auto& initial_guess = *initial_guess_in_solution_out;
     380             :     auto& initial_operand = basis_history_[0];
     381             :     linear_operator(make_not_null(&initial_operand), initial_guess);
     382             :     initial_operand *= -1.;
     383             :     initial_operand += source;
     384             :     const double initial_residual_magnitude =
     385             :         sqrt(inner_product(initial_operand, initial_operand));
     386             :     has_converged = Convergence::HasConverged{convergence_criteria_, iteration,
     387             :                                               initial_residual_magnitude,
     388             :                                               initial_residual_magnitude};
     389             :     if constexpr (use_iteration_callback) {
     390             :       iteration_callback(has_converged);
     391             :     }
     392             :     if (UNLIKELY(has_converged)) {
     393             :       break;
     394             :     }
     395             :     initial_operand /= initial_residual_magnitude;
     396             :     residual_history_.resize(1);
     397             :     residual_history_[0] = initial_residual_magnitude;
     398             :     for (size_t k = 0; k < restart_; ++k) {
     399             :       auto& operand = basis_history_[k + 1];
     400             :       if constexpr (use_preconditioner) {
     401             :         if (this->has_preconditioner()) {
     402             :           // Begin the preconditioner at an initial guess of 0. Not all
     403             :           // preconditioners take the initial guess into account.
     404             :           preconditioned_basis_history_[k] =
     405             :               make_with_value<VarsType>(initial_operand, 0.);
     406             :           this->preconditioner().solve(
     407             :               make_not_null(&preconditioned_basis_history_[k]), linear_operator,
     408             :               basis_history_[k]);
     409             :         }
     410             :       }
     411             :       linear_operator(make_not_null(&operand),
     412             :                       this->has_preconditioner()
     413             :                           ? preconditioned_basis_history_[k]
     414             :                           : basis_history_[k]);
     415             :       // Find a new orthogonal basis vector of the Krylov subspace
     416             :       gmres::detail::arnoldi_orthogonalize(
     417             :           make_not_null(&operand), make_not_null(&orthogonalization_history_),
     418             :           basis_history_, k);
     419             :       // Least-squares solve for the minimal residual
     420             :       gmres::detail::solve_minimal_residual(
     421             :           make_not_null(&orthogonalization_history_),
     422             :           make_not_null(&residual_history_),
     423             :           make_not_null(&givens_sine_history_),
     424             :           make_not_null(&givens_cosine_history_), k);
     425             :       ++iteration;
     426             :       has_converged = Convergence::HasConverged{
     427             :           convergence_criteria_, iteration, abs(residual_history_[k + 1]),
     428             :           initial_residual_magnitude};
     429             :       if constexpr (use_iteration_callback) {
     430             :         iteration_callback(has_converged);
     431             :       }
     432             :       if (UNLIKELY(has_converged)) {
     433             :         break;
     434             :       }
     435             :     }
     436             :     // Find the vector w.r.t. the constructed orthogonal basis of the Krylov
     437             :     // subspace that minimizes the residual
     438             :     const auto minres = gmres::detail::minimal_residual_vector(
     439             :         orthogonalization_history_, residual_history_);
     440             :     // Construct the solution from the orthogonal basis and the minimal residual
     441             :     // vector
     442             :     for (size_t i = 0; i < minres.size(); ++i) {
     443             :       solution += minres[i] * gsl::at(this->has_preconditioner()
     444             :                                           ? preconditioned_basis_history_
     445             :                                           : basis_history_,
     446             :                                       i);
     447             :     }
     448             :   }
     449             :   return has_converged;
     450             : }
     451             : 
     452             : /// \cond
     453             : template <typename VarsType, typename Preconditioner,
     454             :           typename LinearSolverRegistrars>
     455             : // NOLINTNEXTLINE
     456             : PUP::able::PUP_ID
     457             :     Gmres<VarsType, Preconditioner, LinearSolverRegistrars>::my_PUP_ID = 0;
     458             : /// \endcond
     459             : 
     460             : }  // namespace Serial
     461             : }  // namespace LinearSolver

Generated by: LCOV version 1.14