Commit ecb5c690 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Fixed automatic openining of the GLOBAL scope.

TensorRuntime DAG is built properly.
parent 6cc9dd0e
......@@ -58,6 +58,16 @@ $ cmake .. -DEXATN_BUILD_TESTS=TRUE
where the choices are OPENMPI or MPICH. You may also need to set
-DMPI_BIN_PATH=<PATH_TO_MPI_BINARIES> in case they are in a different location.
$ make install
Example of a typical workstation configuration with default Linux BLAS (found in /usr/lib)
as well as CUDA (this is a single command line below):
cmake ..
-DCMAKE_BUILD_TYPE=Release
-DEXATN_BUILD_TESTS=TRUE
-DPYTHON_INCLUDE_DIR=$(python3 -c "import sysconfig; print(sysconfig.get_paths()['platinclude'])")
-DCUDA_HOST_COMPILER=/usr/bin/g++
-DBLAS_LIB=ATLAS
-DBLAS_PATH=/usr/lib
```
For GPU builds, setting the CUDA_HOST_COMPILER is necessary if your default `g++` is
not compatible with the CUDA nvcc compiler on your system. For example, CUDA 10 only
......
/** ExaTN::Numerics: Numerical server
REVISION: 2019/08/26
REVISION: 2019/09/03
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -18,6 +18,7 @@ NumServer::NumServer():
tensor_rt_(std::make_shared<runtime::TensorRuntime>())
{
scopes_.push(std::pair<std::string,ScopeId>{"GLOBAL",0}); //GLOBAL scope 0 is automatically open (top scope)
tensor_rt_->openScope("GLOBAL");
}
void NumServer::reconfigureTensorRuntime(const std::string & dag_executor_name,
......
......@@ -48,7 +48,7 @@ void TensorRuntime::executionThreadWorkflow()
//<< node_executor_name_ << std::endl << std::flush;
while(alive_.load()){
if(executing_.load()){ //executing_ is set to TRUE by the main thread when new operations are submitted
graph_executor_->execute(*current_dag_);
//graph_executor_->execute(*current_dag_);
executing_.store(false); //executing_ is set to FALSE by the execution thread
}
}
......@@ -114,6 +114,7 @@ VertexIdType TensorRuntime::submit(std::shared_ptr<TensorOperation> op) {
assert(currentScopeIsSet());
auto node_id = current_dag_->addOperation(op);
op->setId(node_id);
current_dag_->printIt(); //debug
executing_.store(true); //signal to the execution thread to execute the DAG
return node_id;
}
......
......@@ -56,17 +56,15 @@ TEST(TensorRuntimeTester, checkSimple) {
std::shared_ptr<TensorOperation> destroy_tensor0 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_tensor0->setTensorOperand(tensor0);
#if 0
//Execute all tensor operations via numerical server:
//Execute all tensor operations via the ExaTN numerical server:
exatn::numericalServer->submit(create_tensor0);
exatn::numericalServer->submit(create_tensor1);
exatn::numericalServer->submit(create_tensor2);
exatn::numericalServer->submit(contract_tensors);
auto synced = exatn::numericalServer->sync(*tensor0,true); assert(synced);
//auto synced = exatn::numericalServer->sync(*tensor0,true); assert(synced);
exatn::numericalServer->submit(destroy_tensor2);
exatn::numericalServer->submit(destroy_tensor1);
exatn::numericalServer->submit(destroy_tensor0);
#endif
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment