Commit d6c3c61c authored by gbalduzz's avatar gbalduzz
Browse files

Removed MPITypeMap::factor.

parent 71f614a2
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@ public:
  void max(Scalar& value) const {
    Scalar result;

    MPI_Allreduce(&value, &result, MPITypeMap<Scalar>::factor(), MPITypeMap<Scalar>::value(),
    MPI_Allreduce(&value, &result, 1, MPITypeMap<Scalar>::value(),
                  MPI_MAX, MPIProcessorGrouping::get());

    value = result;
+1 −1
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@ public:
  void min(Scalar& value) const {
    Scalar result;

    MPI_Allreduce(&value, &result, MPITypeMap<Scalar>::factor(), MPITypeMap<Scalar>::value(),
    MPI_Allreduce(&value, &result, 1, MPITypeMap<Scalar>::value(),
                  MPI_MIN, MPIProcessorGrouping::get());

    value = result;
+4 −4
Original line number Diff line number Diff line
@@ -149,8 +149,8 @@ template <typename scalar_type>
void MPICollectiveSum::sum(scalar_type& value) const {
  scalar_type result;

  MPI_Allreduce(&value, &result, MPITypeMap<scalar_type>::factor(),
                MPITypeMap<scalar_type>::value(), MPI_SUM, MPIProcessorGrouping::get());
  MPI_Allreduce(&value, &result, 1, MPITypeMap<scalar_type>::value(), MPI_SUM,
                MPIProcessorGrouping::get());

  value = result;
}
@@ -513,8 +513,8 @@ void MPICollectiveSum::sum(const T* in, T* out, std::size_t n) const {

  for (std::size_t start = 0; start < n; start += max_size) {
    const int msg_size = std::min(n - start, max_size);
    MPI_Allreduce(in + start, out + start, MPITypeMap<T>::factor() * msg_size,
                  MPITypeMap<T>::value(), MPI_SUM, MPIProcessorGrouping::get());
    MPI_Allreduce(in + start, out + start, msg_size, MPITypeMap<T>::value(), MPI_SUM,
                  MPIProcessorGrouping::get());
  }
}

+21 −24
Original line number Diff line number Diff line
@@ -76,8 +76,7 @@ template <typename scalar_type>
int MPIPacking::get_buffer_size(scalar_type /*item*/) const {
  int size(0);

  MPI_Pack_size(MPITypeMap<scalar_type>::factor(), MPITypeMap<scalar_type>::value(),
                MPIProcessorGrouping::get(), &size);
  MPI_Pack_size(1, MPITypeMap<scalar_type>::value(), MPIProcessorGrouping::get(), &size);

  return size;
}
@@ -97,7 +96,7 @@ int MPIPacking::get_buffer_size(const std::basic_string<scalar_type>& str) const

  int result = get_buffer_size(str.size());

  int count = str.size() * MPITypeMap<scalar_type>::factor();
  int count = str.size();

  {
    int size(0);
@@ -113,7 +112,7 @@ template <typename scalar_type>
int MPIPacking::get_buffer_size(const std::vector<scalar_type>& v) const {
  int result = get_buffer_size(v.size());

  int count = v.size() * MPITypeMap<scalar_type>::factor();
  int count = v.size();

  {
    int size(0);
@@ -164,7 +163,7 @@ template <typename scalar_type, class dmn_type>
int MPIPacking::get_buffer_size(const func::function<scalar_type, dmn_type>& f) const {
  int result = get_buffer_size(f.size());

  int count = f.size() * MPITypeMap<scalar_type>::factor();
  int count = f.size();

  {
    int size = 0;
@@ -180,7 +179,7 @@ template <typename scalar_type>
void MPIPacking::pack(char* buffer, int size, int& off_set, scalar_type item) const {
  const scalar_type* tPtr(&item);

  MPI_Pack(tPtr, MPITypeMap<scalar_type>::factor(), MPITypeMap<scalar_type>::value(), buffer, size,
  MPI_Pack(tPtr, 1, MPITypeMap<scalar_type>::value(), buffer, size,
           &off_set, MPIProcessorGrouping::get());
}

@@ -201,8 +200,8 @@ void MPIPacking::pack(char* buffer, int size, int& off_set,
  int vectorSize(str.size());
  pack(buffer, size, off_set, vectorSize);

  MPI_Pack(&str[0], vectorSize * MPITypeMap<scalar_type>::factor(),
           MPITypeMap<scalar_type>::value(), buffer, size, &off_set, MPIProcessorGrouping::get());
  MPI_Pack(&str[0], vectorSize, MPITypeMap<scalar_type>::value(), buffer, size, &off_set,
           MPIProcessorGrouping::get());
}

template <typename scalar_type>
@@ -211,8 +210,8 @@ void MPIPacking::pack(char* buffer, int size, int& off_set, const std::vector<sc
  int vectorSize(v.size());
  pack(buffer, size, off_set, vectorSize);

  MPI_Pack(&v[0], vectorSize * MPITypeMap<scalar_type>::factor(), MPITypeMap<scalar_type>::value(),
           buffer, size, &off_set, MPIProcessorGrouping::get());
  MPI_Pack(&v[0], vectorSize, MPITypeMap<scalar_type>::value(), buffer, size, &off_set,
           MPIProcessorGrouping::get());
}

template <typename scalar_type>
@@ -260,16 +259,16 @@ void MPIPacking::pack(char* buffer, int size, int& off_set,
  int function_size(f.size());
  pack(buffer, size, off_set, function_size);

  MPI_Pack(f.values(), function_size * MPITypeMap<scalar_type>::factor(),
           MPITypeMap<scalar_type>::value(), buffer, size, &off_set, MPIProcessorGrouping::get());
  MPI_Pack(f.values(), function_size, MPITypeMap<scalar_type>::value(), buffer, size, &off_set,
           MPIProcessorGrouping::get());
}

template <typename scalar_type>
void MPIPacking::unpack(char* buffer, int size, int& off_set, scalar_type& item) const {
  scalar_type tmp;

  MPI_Unpack(buffer, size, &off_set, &tmp, MPITypeMap<scalar_type>::factor(),
             MPITypeMap<scalar_type>::value(), MPIProcessorGrouping::get());
  MPI_Unpack(buffer, size, &off_set, &tmp, 1, MPITypeMap<scalar_type>::value(),
             MPIProcessorGrouping::get());

  item = tmp;
}
@@ -303,9 +302,8 @@ void MPIPacking::unpack(char* buffer, int size, int& off_set,
  str.resize(vectorSize);

  // UnPack the vector
  MPI_Unpack(buffer, size, &off_set, static_cast<scalar_type*>(&str[0]),
             MPITypeMap<scalar_type>::factor() * vectorSize, MPITypeMap<scalar_type>::value(),
             MPIProcessorGrouping::get());
  MPI_Unpack(buffer, size, &off_set, static_cast<scalar_type*>(&str[0]), 1 * vectorSize,
             MPITypeMap<scalar_type>::value(), MPIProcessorGrouping::get());
}

template <typename scalar_type>
@@ -317,9 +315,8 @@ void MPIPacking::unpack(char* buffer, int size, int& off_set, std::vector<scalar
  v.resize(vectorSize);

  // UnPack the vector
  MPI_Unpack(buffer, size, &off_set, static_cast<scalar_type*>(&v[0]),
             MPITypeMap<scalar_type>::factor() * vectorSize, MPITypeMap<scalar_type>::value(),
             MPIProcessorGrouping::get());
  MPI_Unpack(buffer, size, &off_set, static_cast<scalar_type*>(&v[0]), 1 * vectorSize,
             MPITypeMap<scalar_type>::value(), MPIProcessorGrouping::get());
}

template <typename scalar_type>
@@ -384,11 +381,11 @@ void MPIPacking::unpack(char* buffer, int size, int& off_set,
  unpack(buffer, size, off_set, function_size);

  // UnPack the vector
  MPI_Unpack(buffer, size, &off_set, f.values(), MPITypeMap<scalar_type>::factor() * function_size,
             MPITypeMap<scalar_type>::value(), MPIProcessorGrouping::get());
  MPI_Unpack(buffer, size, &off_set, f.values(), function_size, MPITypeMap<scalar_type>::value(),
             MPIProcessorGrouping::get());
}

}  // parallel
}  // dca
}  // namespace parallel
}  // namespace dca

#endif  // DCA_PARALLEL_MPI_CONCURRENCY_MPI_PACKING_HPP
+14 −68
Original line number Diff line number Diff line
@@ -26,12 +26,7 @@ namespace parallel {
// dca::parallel::

template <typename T>
class MPITypeMap {
public:
  static constexpr std::size_t factor() {
    return 1;
  }

struct MPITypeMap {
  template <typename = std::enable_if_t<std::is_enum<T>::value>>
  static MPI_Datatype value() {
    return MPITypeMap<std::underlying_type_t<T>>::value();
@@ -39,24 +34,14 @@ public:
};

template <>
class MPITypeMap<bool> {
public:
  static std::size_t factor() {
    return 1;
  }

struct MPITypeMap<bool> {
  static MPI_Datatype value() {
    return MPI_CXX_BOOL;
  }
};

template <>
class MPITypeMap<char> {
public:
  static std::size_t factor() {
    return 1;
  }

struct MPITypeMap<char> {
  static MPI_Datatype value() {
    return MPI_CHAR;
  }
@@ -64,10 +49,6 @@ public:

template <>
struct MPITypeMap<std::uint8_t> {
  static constexpr std::size_t factor() {
    return 1;
  }

  static MPI_Datatype value() {
    return MPI_UNSIGNED_CHAR;
  }
@@ -75,90 +56,55 @@ struct MPITypeMap<std::uint8_t> {


template <>
class MPITypeMap<int> {
public:
  static std::size_t factor() {
    return 1;
  }

struct MPITypeMap<int> {
  static MPI_Datatype value() {
    return MPI_INT;
  }
};

template <>
class MPITypeMap<std::size_t> {
public:
  static std::size_t factor() {
    return 1;
  }

struct MPITypeMap<std::size_t> {
  static MPI_Datatype value() {
    return MPI_UNSIGNED_LONG;
  }
};

template <>
class MPITypeMap<long long int> {
public:
  static std::size_t factor() {
    return 1;
  }

struct MPITypeMap<long long int> {
  static MPI_Datatype value() {
    return MPI_LONG_LONG_INT;
  }
};

template <>
class MPITypeMap<float> {
public:
  static std::size_t factor() {
    return 1;
  }

struct MPITypeMap<float> {
  static MPI_Datatype value() {
    return MPI_FLOAT;
  }
};

template <>
class MPITypeMap<double> {
public:
  static std::size_t factor() {
    return 1;
  }

struct MPITypeMap<double> {
  static MPI_Datatype value() {
    return MPI_DOUBLE;
  }
};

template <>
class MPITypeMap<std::complex<float>> {
public:
  static std::size_t factor() {
    return 2;
  }

struct MPITypeMap<std::complex<float>> {
  static MPI_Datatype value() {
    return MPI_FLOAT;
    return MPI_COMPLEX;
  }
};

template <>
class MPITypeMap<std::complex<double>> {
public:
  static std::size_t factor() {
    return 2;
  }

struct MPITypeMap<std::complex<double>> {
  static MPI_Datatype value() {
    return MPI_DOUBLE;
    return MPI_DOUBLE_COMPLEX;
  }
};

}  // parallel
}  // dca
}  // namespace parallel
}  // namespace dca

#endif  // DCA_PARALLEL_MPI_CONCURRENCY_MPI_TYPE_MAP_HPP
Loading