Commit fbbd2f5e authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

[BROKEN]: Switching to MPICommProxy ...

parent b5209d64
#include "exatn.hpp"
#ifdef MPI_ENABLED
#include "mpi.h"
#endif
#include <iostream>
namespace exatn {
#ifdef MPI_ENABLED
void initialize(MPI_Comm communicator,
void initialize(MPICommProxy & communicator,
const std::string & graph_executor_name,
const std::string & node_executor_name)
{
......@@ -18,7 +22,9 @@ void initialize(MPI_Comm communicator,
}
return;
}
#else
#endif
void initialize(const std::string & graph_executor_name,
const std::string & node_executor_name)
{
......@@ -26,12 +32,15 @@ void initialize(const std::string & graph_executor_name,
serviceRegistry->initialize();
exatnFrameworkInitialized = true;
//std::cout << "#DEBUG(exatn): ExaTN services initialized" << std::endl << std::flush;
#ifdef MPI_ENABLED
numericalServer = std::make_shared<NumServer>(???,graph_executor_name,node_executor_name);
#else
numericalServer = std::make_shared<NumServer>(graph_executor_name,node_executor_name);
#endif
//std::cout << "#DEBUG(exatn): ExaTN numerical server initialized" << std::endl << std::flush;
}
return;
}
#endif
bool isInitialized() {
......
......@@ -11,13 +11,12 @@ namespace exatn {
/** Initializes ExaTN **/
#ifdef MPI_ENABLED
void initialize(MPI_Comm communicator = MPI_COMM_WORLD, //MPI communicator
void initialize(MPICommProxy & communicator, //MPI communicator proxy
const std::string & graph_executor_name = "eager-dag-executor", //DAG executor kind
const std::string & node_executor_name = "talsh-node-executor"); //DAG node executor kind
#else
#endif
void initialize(const std::string & graph_executor_name = "eager-dag-executor", //DAG executor kind
const std::string & node_executor_name = "talsh-node-executor"); //DAG node executor kind
#endif
/** Returns whether or not ExaTN has been initialized **/
bool isInitialized();
......
/** ExaTN::Numerics: Numerical server
REVISION: 2020/02/28
REVISION: 2020/03/10
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -11,6 +11,10 @@ Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
#include <map>
#include <future>
#ifdef MPI_ENABLED
#include "mpi.h"
#endif
#include <cassert>
namespace exatn{
......@@ -20,14 +24,14 @@ std::shared_ptr<NumServer> numericalServer {nullptr}; //initialized by exatn::in
#ifdef MPI_ENABLED
NumServer::NumServer(MPI_Comm communicator,
NumServer::NumServer(MPICommProxy & communicator,
const std::string & graph_executor_name,
const std::string & node_executor_name):
contr_seq_optimizer_("dummy"),
tensor_rt_(std::make_shared<runtime::TensorRuntime>(communicator,graph_executor_name,node_executor_name))
{
int mpi_error = MPI_Comm_size(communicator,&num_processes_); assert(mpi_error == MPI_SUCCESS);
mpi_error = MPI_Comm_rank(communicator,&process_rank_); assert(mpi_error == MPI_SUCCESS);
int mpi_error = MPI_Comm_size(*(communicator.get<MPI_Comm>()),&num_processes_); assert(mpi_error == MPI_SUCCESS);
mpi_error = MPI_Comm_rank(*(communicator.get<MPI_Comm>()),&process_rank_); assert(mpi_error == MPI_SUCCESS);
space_register_ = getSpaceRegister(); assert(space_register_);
tensor_op_factory_ = TensorOpFactory::get();
scopes_.push(std::pair<std::string,ScopeId>{"GLOBAL",0}); //GLOBAL scope 0 is automatically open (top scope)
......@@ -65,7 +69,7 @@ NumServer::~NumServer()
#ifdef MPI_ENABLED
void NumServer::reconfigureTensorRuntime(MPI_Comm communicator,
void NumServer::reconfigureTensorRuntime(MPICommProxy & communicator,
const std::string & dag_executor_name,
const std::string & node_executor_name)
{
......
/** ExaTN::Numerics: Numerical server
REVISION: 2020/02/28
REVISION: 2020/03/10
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -92,7 +92,7 @@ class NumServer final {
public:
#ifdef MPI_ENABLED
NumServer(MPI_Comm communicator = MPI_COMM_WORLD, //MPI communicator
NumServer(MPICommProxy & communicator, //MPI communicator proxy
const std::string & graph_executor_name = "eager-dag-executor", //DAG executor kind
const std::string & node_executor_name = "talsh-node-executor"); //DAG node executor kind
#else
......@@ -107,7 +107,7 @@ public:
/** Reconfigures tensor runtime implementation. **/
#ifdef MPI_ENABLED
void reconfigureTensorRuntime(MPI_Comm communicator,
void reconfigureTensorRuntime(MPICommProxy & communicator,
const std::string & dag_executor_name,
const std::string & node_executor_name);
#else
......
......@@ -3,6 +3,10 @@
#include "exatn.hpp"
#include "talshxx.hpp"
#ifdef MPI_ENABLED
#include "mpi.h"
#endif
#include <iostream>
#include <utility>
......
......@@ -2,6 +2,10 @@
#include <gtest/gtest.h>
#include "exatn.hpp"
#ifdef MPI_ENABLED
#include "mpi.h"
#endif
using namespace exatn::parser;
TEST(TAProLInterpreterTester, checkSimple) {
......
/** ExaTN: MPI proxy types
REVISION: 2020/03/10
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
#ifndef EXATN_MPI_PROXY_HPP_
#define EXATN_MPI_PROXY_HPP_
namespace exatn {
class MPICommProxy{
public:
template<typename MPITypeName>
MPICommProxy(MPITypeName * mpi_object_ptr):
object_(static_cast<void*>(mpi_object_ptr)) {}
MPICommProxy(const MPICommProxy &) = default;
MPICommProxy & operator=(const MPICommProxy &) = default;
MPICommProxy(MPICommProxy &&) noexcept = default;
MPICommProxy & operator=(MPICommProxy &&) noexcept = default;
~MPICommProxy() = default;
bool isEmpty() const {return (object_ == nullptr);}
template<typename MPITypeName>
MPITypeName * get(){return static_cast<MPITypeName*>(object_);}
private:
void * object_;
};
} //namespace exatn
#endif //EXATN_MPI_PROXY_HPP_
/** ExaTN:: Tensor Runtime: Task-based execution layer for tensor operations
REVISION: 2020/02/27
REVISION: 2020/03/10
Copyright (C) 2018-2020 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle)
......@@ -10,6 +10,10 @@ Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle)
#include "talshxx.hpp"
#ifdef MPI_ENABLED
#include "mpi.h"
#endif
#include <vector>
#include <iostream>
......@@ -17,15 +21,15 @@ namespace exatn {
namespace runtime {
#ifdef MPI_ENABLED
TensorRuntime::TensorRuntime(MPI_Comm communicator,
TensorRuntime::TensorRuntime(MPICommProxy & communicator,
const std::string & graph_executor_name,
const std::string & node_executor_name):
communicator_(communicator),
mpi_comm_(communicator),
graph_executor_name_(graph_executor_name), node_executor_name_(node_executor_name),
current_dag_(nullptr), executing_(false), alive_(false)
{
int mpi_error = MPI_Comm_size(communicator,&num_processes_); assert(mpi_error == MPI_SUCCESS);
mpi_error = MPI_Comm_rank(communicator,&process_rank_); assert(mpi_error == MPI_SUCCESS);
int mpi_error = MPI_Comm_size(*(mpi_comm_.get<MPI_Comm>()),&num_processes_); assert(mpi_error == MPI_SUCCESS);
mpi_error = MPI_Comm_rank(*(mpi_comm_.get<MPI_Comm>()),&process_rank_); assert(mpi_error == MPI_SUCCESS);
graph_executor_ = exatn::getService<TensorGraphExecutor>(graph_executor_name_);
std::cout << "#DEBUG(exatn::runtime::TensorRuntime)[MAIN_THREAD:Process " << process_rank_
<< "]: DAG executor set to " << graph_executor_name_ << " + "
......
/** ExaTN:: Tensor Runtime: Task-based execution layer for tensor operations
REVISION: 2020/02/27
REVISION: 2020/03/10
Copyright (C) 2018-2020 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle)
......@@ -49,9 +49,7 @@ Rationale:
#include "tensor_operation.hpp"
#include "tensor_method.hpp"
#ifdef MPI_ENABLED
#include "mpi.h"
#endif
#include "mpi_proxy.hpp"
#include <map>
#include <list>
......@@ -71,7 +69,7 @@ class TensorRuntime final {
public:
#ifdef MPI_ENABLED
TensorRuntime(MPI_Comm communicator = MPI_COMM_WORLD, //MPI communicator
TensorRuntime(MPICommProxy & communicator, //MPI communicator proxy
const std::string & graph_executor_name = "eager-dag-executor", //DAG executor kind
const std::string & node_executor_name = "talsh-node-executor"); //DAG node executor kind
#else
......@@ -159,8 +157,8 @@ private:
inline void unlockDataReqQ(){data_req_mtx_.unlock();}
#ifdef MPI_ENABLED
/** MPI communicator **/
MPI_Comm communicator_;
/** MPI communicator proxy **/
MPICommProxy mpi_comm_;
#endif
/** Tensor graph (DAG) executor name **/
std::string graph_executor_name_;
......
......@@ -13,6 +13,10 @@
#include <gtest/gtest.h>
#include "exatn.hpp"
#ifdef MPI_ENABLED
#include "mpi.h"
#endif
TEST(TensorRuntimeTester, checkSimple) {
using exatn::numerics::Tensor;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment