Commit 135a8865 authored by gbalduzz's avatar gbalduzz Committed by Doak, Peter W.
Browse files

Compute MC log weight and recompute configuration at initialization and warm-up.

parent ee0de8cf
Loading
Loading
Loading
Loading
+66 −4
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@
// - multiply
// - trsm
// - determinant
// - logDeterminant
// - eigensolver (non-symmetric / symmetric / Hermitian)
// - pseudoInverse

@@ -1216,8 +1217,8 @@ void pseudoInverse(const Matrix<Scalar, CPU>& a, Matrix<Scalar, CPU>& a_inv, dou
// Computes (in place) the determinant of the matrix.
// Returns: determinant.
// Postcondition: M is its LU decomposition.
template <template <typename, DeviceType> class MatrixType, typename Scalar, DeviceType device>
double determinantIP(MatrixType<Scalar, device>& M) {
template <template <typename, DeviceType> class MatrixType, typename Scalar>
Scalar determinantIP(MatrixType<Scalar, CPU>& M) {
  assert(M.nrCols() == M.nrRows());
  const int n = M.nrCols();
  std::vector<int> ipiv(n);
@@ -1243,12 +1244,73 @@ double determinantIP(MatrixType<Scalar, device>& M) {

// Copy and computes the determinant of the matrix.
// Returns: determinant.
template <typename Scalar>
double determinant(const Matrix<Scalar, CPU>& M) {
template <typename Scalar, DeviceType device>
double determinant(const Matrix<Scalar, device>& M) {
  Matrix<Scalar, CPU> M_copy(M);
  return determinantIP(M_copy);
}

// Returns: logarithm of the absolute value of the determinant and the sign of the determinant,
//          or zero if the determinant is zero.
// Postcondition: M is its LU decomposition.
template <template <typename, DeviceType> class MatrixType, typename Scalar>
std::pair<Scalar, int> logDeterminantIP(MatrixType<Scalar, CPU>& M, std::vector<int>& ipiv) {
  assert(M.is_square());
  static_assert(std::is_same_v<Scalar, float> || std::is_same_v<Scalar, double>,
                " This function is defined only for Real numbers");

  const int n = M.nrCols();
  ipiv.resize(n);

  try {
    lapack::getrf(n, n, M.ptr(), M.leadingDimension(), ipiv.data());
  }
  catch (lapack::util::LapackException& err) {
    if (err.info() > 0)
      return {0., 0};
    else
      throw(std::logic_error("LU decomposition failed."));
  }

  Scalar log_det = 0.;
  int sign = 1;

  for (int i = 0; i < n; i++) {
    log_det += std::log(std::abs(M(i, i)));
    if (M(i, i) < 0)
      sign *= -1;

    if (ipiv[i] != i + 1)
      sign *= -1;
  }

  return {log_det, sign};
}

template <template <typename, DeviceType> class MatrixType, typename Scalar, DeviceType device>
auto logDeterminant(const MatrixType<Scalar, device>& m) {
  Matrix<Scalar, CPU> m_copy(m);
  std::vector<int> ipiv;
  return logDeterminantIP(m_copy, ipiv);
}

template <typename Scalar, template <typename, DeviceType> class MatrixType>
std::pair<Scalar, int> inverseAndLogDeterminant(MatrixType<Scalar, CPU>& mat) {
  std::vector<int> ipiv;
  const auto [log_det, sign] = logDeterminantIP(mat, ipiv);

  if (!sign)
    throw(std::logic_error("Singular matrix"));

  const int lwork = util::getInverseWorkSize(mat);
  std::vector<Scalar> work(lwork);

  lapack::UseDevice<CPU>::getri(mat.nrRows(), mat.ptr(), mat.leadingDimension(), ipiv.data(),
                                work.data(), lwork);

  return {-log_det, sign};
}

template <typename Scalar>
bool areNear(const Matrix<Scalar, CPU>& A, const Matrix<Scalar, CPU>& B, const double err = 1e-16) {
  if (A.size() != B.size())
+1 −1
Original line number Diff line number Diff line
@@ -339,7 +339,7 @@ void CtauxAccumulator<device_t, Parameters, Data, Real>::updateFrom(walker_type&

  current_sign = walker.get_sign();

  const linalg::util::CudaEvent* event = walker.compute_M(M_);
  const linalg::util::CudaEvent* event = walker.computeM(M_);

  single_particle_accumulator_obj.synchronizeCopy();
  two_particle_accumulator_.synchronizeCopy();
+1 −1
Original line number Diff line number Diff line
@@ -333,7 +333,7 @@ void CtauxClusterSolver<device_t, Parameters, Data>::warmUp(Walker& walker) {
    walker.updateShell(i, parameters_.get_warm_up_sweeps());
  }

  walker.is_thermalized() = true;
  walker.markThermalized();

  if (concurrency_.id() == concurrency_.first())
    std::cout << "\n\t\t warm-up has ended\n" << std::endl;
+217 −213

File changed.

Preview size limit exceeded, changes collapsed.

+34 −12
Original line number Diff line number Diff line
@@ -71,16 +71,14 @@ public:
    return configuration_;
  }

  int getSign() const {
    return sign_;
  }

  void computeM(MatrixPair& m_accum) const;

  // Reset the counters and recompute the configuration sign and weight.
  void markThermalized();

  // Recompute the matrix M from the configuration in O(expansion_order^3) time.
  void setMFromConfig();
  // Postcondition: sign_ and mc_log_weight_ are recomputed.
  virtual void setMFromConfig();

  bool is_thermalized() const {
    return thermalized_;
@@ -92,10 +90,14 @@ public:
  double avgOrder() const {
    return order_avg_.count() ? order_avg_.mean() : order();
  }
  int sign() const {
  int get_sign() const {
    return sign_;
  }

  Real get_MC_log_weight() const {
    return mc_log_weight_;
  }

  double acceptanceRatio() const {
    return Real(n_accepted_) / Real(n_steps_);
  }
@@ -106,6 +108,10 @@ public:
    return configuration_;
  }

  const auto& get_matrix_configuration() const {
    return configuration_.get_sectors();
  }

  void updateShell(int meas_id, int meas_to_do) const;

  void printSummary() const;
@@ -196,6 +202,8 @@ protected: // Members.

  float flop_ = 0.;

  double mc_log_weight_ = 0;

private:
  linalg::Vector<int, linalg::CPU> ipiv_;
  linalg::Vector<Real, linalg::CPU> work_;
@@ -223,6 +231,9 @@ CtintWalkerBase<Parameters, Real>::CtintWalkerBase(const Parameters& parameters_
template <class Parameters, typename Real>
void CtintWalkerBase<Parameters, Real>::initialize() {
  assert(total_interaction_);
  sign_ = 1;
  mc_log_weight_ = 1.;

  if (!configuration_.size()) {  // Do not initialize config if it was read.
    while (parameters_.getInitialConfigurationSize() > configuration_.size()) {
      configuration_.insertRandom(rng_);
@@ -236,9 +247,12 @@ void CtintWalkerBase<Parameters, Real>::initialize() {

template <class Parameters, typename Real>
void CtintWalkerBase<Parameters, Real>::setMFromConfig() {
  // compute Mij = g0(t_i,t_j) - I* alpha(s_i)
  mc_log_weight_ = 1.;
  sign_ = 1;

  for (int s = 0; s < 2; ++s) {
    // compute Mij = g0(t_i,t_j) - I* alpha(s_i)

    const auto& sector = configuration_.getSector(s);
    auto& M = M_[s];
    const int n = sector.size();
@@ -249,10 +263,17 @@ void CtintWalkerBase<Parameters, Real>::setMFromConfig() {
      for (int i = 0; i < n; ++i)
        M(i, j) = d_builder_ptr_->computeD(i, j, sector);

    const Real det = linalg::matrixop::inverseAndDeterminant(M);
    if (M.nrRows()) {
      const auto [log_det, sign] = linalg::matrixop::inverseAndLogDeterminant(M);
      mc_log_weight_ += log_det;
      sign_ *= sign;
    }
  }

    // Set the initial sign
    if (det < 0)
  for (int i = 0; i < configuration_.size(); ++i) {
    const Real term = -configuration_.getStrength(i);
    mc_log_weight_ += std::log(std::abs(term));
    if (term < 0)
      sign_ *= -1;
  }
}
@@ -268,8 +289,6 @@ void CtintWalkerBase<Parameters, Real>::updateSweepAverages() {

template <class Parameters, typename Real>
void CtintWalkerBase<Parameters, Real>::markThermalized() {
  if (partial_order_avg_.mean() == 0)
    throw(std::runtime_error("The average expansion order is 0."));
  thermalized_ = true;

  nb_steps_per_sweep_ =
@@ -279,6 +298,9 @@ void CtintWalkerBase<Parameters, Real>::markThermalized() {
  sign_avg_.reset();
  n_accepted_ = 0;
  n_steps_ = 0;

  // Recompute the Monte Carlo weight.
  setMFromConfig();
}

template <class Parameters, typename Real>
Loading