Unverified Commit 8e7cac89 authored by Peter Doak's avatar Peter Doak Committed by GitHub
Browse files

Merge pull request #312 from PDoakORNL/clang_fixes

Issues revealed by clang apple or mainline on osx
parents e37b11f7 3829aa98
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -254,7 +254,7 @@ private:

  ValueType* data_ = nullptr;

  template <class ScalarType2, DeviceType device_name2>
  template <class ScalarType2, DeviceType device_name2, class ALLOC2>
  friend class dca::linalg::Matrix;
};

+4 −4
Original line number Diff line number Diff line
@@ -295,8 +295,8 @@ void insertRow(Matrix<Scalar, CPU, ALLOC>& mat, int i) {
// Preconditions: mat is a square matrix.
// Postconditions: ipiv and work are resized to the needed dimension.
// \todo consider doing inverse at full precision reguardless of incoming Scalar precision
template <typename Scalar, DeviceType device_name, template <typename, DeviceType> class MatrixType>
void inverse(MatrixType<Scalar, device_name>& mat, Vector<int, CPU>& ipiv,
  template <typename Scalar, DeviceType device_name, class ALLOC, template <typename, DeviceType, class> class MatrixType>
  void inverse(MatrixType<Scalar, device_name, ALLOC>& mat, Vector<int, CPU>& ipiv,
             Vector<Scalar, device_name>& work) {
  assert(mat.is_square());

@@ -312,8 +312,8 @@ void inverse(MatrixType<Scalar, device_name>& mat, Vector<int, CPU>& ipiv,
                                        work.ptr(), lwork);
}

template <typename Scalar, DeviceType device_name, template <typename, DeviceType> class MatrixType>
void inverse(MatrixType<Scalar, device_name>& mat) {
  template <typename Scalar, DeviceType device_name, class ALLOC, template <typename, DeviceType, class> class MatrixType>
  void inverse(MatrixType<Scalar, device_name, ALLOC>& mat) {
  Vector<int, CPU> ipiv;
  Vector<Scalar, device_name> work;
  inverse(mat, ipiv, work);
+2 −2
Original line number Diff line number Diff line
@@ -33,8 +33,8 @@ namespace util {

// Returns optimal lwork for inverse.
// In: mat
template <typename ScalarType,  template <typename, DeviceType> class MatrixType>
int getInverseWorkSize(MatrixType<ScalarType, CPU>& mat) {
  template <typename ScalarType, class ALLOC, template <typename, DeviceType, class> class MatrixType>
int getInverseWorkSize(MatrixType<ScalarType, CPU, ALLOC>& mat) {
  assert(mat.is_square());

  ScalarType tmp;
+2 −1
Original line number Diff line number Diff line
@@ -100,7 +100,8 @@ const linalg::Matrix<dca::util::ComplexAlias<Scalar>, linalg::CPU>& SpaceTransfo
      const auto& r = RDmn::parameter_type::get_elements()[j];
      for (int i = 0; i < KDmn::dmn_size(); ++i) {
        const auto& k = KDmn::parameter_type::get_elements()[i];
	auto temp_exp = std::exp(dca::util::ComplexAlias<dca::util::RealAlias<Scalar>>{0, util::innerProduct(k, r)});
	using Real = dca::util::RealAlias<Scalar>;
	auto temp_exp = std::exp(dca::util::ComplexAlias<Real>{0, static_cast<Real>(util::innerProduct(k, r))});
        T(i, j) = typename decltype(T)::ValueType{temp_exp.real(), temp_exp.imag()};
      }
    }
+11 −3
Original line number Diff line number Diff line
@@ -113,7 +113,10 @@ public:
  void setSampleConfiguration(const io::Buffer&) {}

  /** used for testing */
  auto& getG0() { return g0_; };
  auto& getG0() {
    return g0_;
  };

protected:
  void warmUp(Walker& walker);

@@ -435,8 +438,13 @@ void CtauxClusterSolver<device_t, Parameters, Data, DIST>::computeErrorBars() {
    std::vector<typename Data::TpGreensFunction> G4 = accumulator_.get_sign_times_G4();

    for (std::size_t channel = 0; channel < G4.size(); ++channel) {
      if constexpr (dca::util::IsComplex_t<Scalar>::value)
        G4[channel] /= TpComplex{parameters_.get_beta() * parameters_.get_beta()} *
                     TpComplex{accumulator_.get_accumulated_sign().sum()};
                       TpComplex{static_cast<Scalar>(accumulator_.get_accumulated_sign().sum())};
      else
        G4[channel] /= TpComplex{parameters_.get_beta() * parameters_.get_beta()} *
                       TpComplex{static_cast<Real>(accumulator_.get_accumulated_sign().sum())};

      concurrency_.average_and_compute_stddev(G4[channel], data_.get_G4_stdv()[channel]);
    }
  }