Commit d40ea75e authored by Doak, Peter W.'s avatar Doak, Peter W.
Browse files

This API is crude but it is what weilei is currently using

and we will refactor from here.
parent 30d8a648
Loading
Loading
Loading
Loading
+46 −0
Original line number Diff line number Diff line
@@ -93,9 +93,19 @@ public:
  template <typename Scalar, typename domain_type>
  void execute(const func::function<Scalar, domain_type>& f);

  /** experimental distributed function interface
   */
  template <typename Scalar, typename domain_type>
  void execute(const func::function<Scalar, domain_type>& f, uint64_t start, uint64_t end);

  template <typename Scalar, typename domain_type>
  void execute(const std::string& name, const func::function<Scalar, domain_type>& f);

  /** experimental distributed function interface
   */
  template <typename Scalar, typename domain_type>
  void execute(const std::string& name, const func::function<Scalar, domain_type>& f, uint64_t start, uint64_t end);

  template <typename Scalar>
  void execute(const std::string& name, const dca::linalg::Vector<Scalar, dca::linalg::CPU>& A);

@@ -284,6 +294,18 @@ void ADIOS2Writer::execute(const func::function<Scalar, domain_type>& f) {
  execute(f.get_name(), f);
}

template <typename Scalar, typename domain_type>
void ADIOS2Writer::execute(const func::function<Scalar, domain_type>& f, uint64_t start, uint64_t end) {
  if (f.size() == 0)
    return;

  if (verbose_) {
    std::cout << "\t ADIOS2Writer: Write function : " << f.get_name() << "\n";
  }

  execute(f.get_name(), f, start, end);
}

template <typename Scalar, typename domain_type>
void ADIOS2Writer::execute(const std::string& name, const func::function<Scalar, domain_type>& f) {
  if (f.size() == 0)
@@ -305,6 +327,30 @@ void ADIOS2Writer::execute(const std::string& name, const func::function<Scalar,
  addAttribute<size_t>(full_name, "domain-sizes", std::vector<size_t>{dims.size()}, dims.data());
}

template <typename Scalar, typename domain_type>
void ADIOS2Writer::execute(const std::string& name, const func::function<Scalar, domain_type>& f, uint64_t start, uint64_t end) {
  if (f.size() == 0)
    return;

  const std::string full_name = get_path(name);

  std::vector<size_t> dims;
  for (int l = 0; l < f.signature(); ++l)
    dims.push_back(f[l]);

  // be careful --> ADIOS2 is by default row-major, while the function-class is column-major !
  std::reverse(dims.begin(), dims.end());

  // see test/integration/parallel/func_distribution/function_distribution_test.cpp for
  // how to get the subindices spanned on this rank.  
  
  write<Scalar>(full_name, dims, f.values());

  std::reverse(dims.begin(), dims.end());
  addAttribute(full_name, "name", f.get_name());
  addAttribute<size_t>(full_name, "domain-sizes", std::vector<size_t>{dims.size()}, dims.data());
}

template <typename Scalar>
void ADIOS2Writer::execute(const std::string& name,
                           const dca::linalg::Vector<Scalar, dca::linalg::CPU>& V) {
+14 −4
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@
#include "gtest/gtest.h"

int rank, comm_size;
dca::parallel::MPIConcurrency* concurrencyPtr;
dca::parallel::MPIConcurrency* concurrency_ptr;

template <typename Scalar>
class ADIOS2ParallelIOTest : public ::testing::Test {};
@@ -47,11 +47,21 @@ TYPED_TEST(ADIOS2ParallelIOTest, FunctionReadWrite) {
  for (auto& x : f1)
    x = ++val;

  uint64_t start = 0;
  uint64_t end = 0;
  // This returns the linearized bounds of the function for a rank.
  dca::parallel::util::getComputeRange(concurrency_ptr->id(), concurrency_ptr->number_of_processors(),
                                     f1.size(), start, end);
    
  {
    dca::io::ADIOS2Writer writer(concurrencyPtr);
    dca::io::ADIOS2Writer writer(concurrency_ptr);
    writer.open_file("test_func_" + typeStr + ".bp", true);

    writer.execute(f1);
    // Because the caller needs to know if its function is distributed or not we will assume this is so for the API as well.
    // in the future I think something more sophisticated needs to be done and the function will need to know its
    // distribution, but for now we distribute only over the fastest index

    writer.execute(f1, start, end);

    writer.close_file();

@@ -78,7 +88,7 @@ int main(int argc, char** argv) {
  dca::parallel::MPIConcurrency concurrency(argc, argv);
  rank = concurrency.id();
  comm_size = concurrency.number_of_processors();
  concurrencyPtr = &concurrency;
  concurrency_ptr = &concurrency;

  ::testing::InitGoogleTest(&argc, argv);