Commit 33322ac3 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Added TensorNetworkQueue to cuQuantum backend.


Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent c6afe52e
......@@ -20,6 +20,7 @@ target_include_directories(${LIBRARY_NAME}
graph_executors/lazy
../graph
${CMAKE_SOURCE_DIR}/src/exatn
cuquantum
)
set(_bundle_name exatn_runtime_executor)
......
......@@ -19,12 +19,12 @@ target_include_directories(${LIBRARY_NAME}
target_link_libraries(${LIBRARY_NAME} PUBLIC exatn-numerics)
if(CUQUANTUM)
target_include_directories(${LIBRARY_NAME} PUBLIC ${CUQUANTUM_PATH}/include)
target_include_directories(${LIBRARY_NAME} PRIVATE ${CUQUANTUM_PATH}/include)
target_link_libraries(${LIBRARY_NAME} PRIVATE ${CUQUANTUM_PATH}/lib64/libcutensornet.so)
endif()
if(CUTENSOR AND NOT CUTENSOR_PATH STREQUAL ".")
target_include_directories(${LIBRARY_NAME} PUBLIC ${CUTENSOR_PATH}/include)
target_include_directories(${LIBRARY_NAME} PRIVATE ${CUTENSOR_PATH}/include)
target_link_libraries(${LIBRARY_NAME} PRIVATE ${CUTENSOR_PATH}/lib/11/libcutensor.so)
endif()
......
/** ExaTN: Tensor Runtime: Tensor network executor: NVIDIA cuQuantum
REVISION: 2021/12/14
REVISION: 2021/12/21
Copyright (C) 2018-2021 Dmitry Lyakh
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
......@@ -10,13 +10,24 @@ Rationale:
#ifdef CUQUANTUM
#include "cuquantum_executor.hpp"
#include <cutensornet.h>
#include <cutensor.h>
#include <cuda_runtime.h>
#include <vector>
#include <iostream>
#include "cuquantum_executor.hpp"
namespace exatn {
namespace runtime {
struct TensorNetworkReq {
std::shared_ptr<numerics::TensorNetwork> network;
};
CuQuantumExecutor::CuQuantumExecutor()
{
const size_t version = cutensornetGetVersion();
......
/** ExaTN: Tensor Runtime: Tensor network executor: NVIDIA cuQuantum
REVISION: 2021/12/20
REVISION: 2021/12/21
Copyright (C) 2018-2021 Dmitry Lyakh
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
......@@ -16,34 +16,31 @@ Rationale:
#ifndef EXATN_RUNTIME_CUQUANTUM_EXECUTOR_HPP_
#define EXATN_RUNTIME_CUQUANTUM_EXECUTOR_HPP_
#include "tensor_network.hpp"
#include "tensor_operation.hpp"
#include <cutensornet.h>
#include <cutensor.h>
#include <cuda_runtime.h>
#include <unordered_map>
#include <vector>
#include <cassert>
#include "errors.hpp"
#include "tensor_network_queue.hpp"
namespace exatn {
namespace runtime {
struct TensorNetworkReq;
class CuQuantumExecutor {
public:
CuQuantumExecutor();
CuQuantumExecutor(const CuQuantumExecutor &) = delete;
CuQuantumExecutor & operator=(CuQuantumExecutor &) = delete;
CuQuantumExecutor(CuQuantumExecutor &&) noexcept = delete;
CuQuantumExecutor & operator=(CuQuantumExecutor &&) noexcept = delete;
virtual ~CuQuantumExecutor() = default;
int execute(numerics::TensorNetwork & network,
int execute(std::shared_ptr<numerics::TensorNetwork> network,
TensorOpExecHandle * exec_handle);
bool sync(TensorOpExecHandle op_handle,
bool sync(TensorOpExecHandle exec_handle,
int * error_code,
bool wait = true);
......@@ -51,6 +48,8 @@ public:
protected:
/** Currently processed tensor networks **/
std::unordered_map<TensorOpExecHandle,std::unique_ptr<TensorNetworkReq>> active_networks_;
};
} //namespace runtime
......
/** ExaTN: Tensor Runtime: Tensor network executor: Execution queue
REVISION: 2021/12/21
Copyright (C) 2018-2021 Dmitry Lyakh
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
Rationale:
- ExaTN graph executor may accept whole tensor networks for execution
via the optional cuQuantum backend in which case the graph executor
will delegate execution of whole tensor networks to CuQuantumExecutor.
**/
#ifndef EXATN_RUNTIME_TENSOR_NETWORK_QUEUE_HPP_
#define EXATN_RUNTIME_TENSOR_NETWORK_QUEUE_HPP_
#include "tensor_network.hpp"
#include "tensor_operation.hpp"
#include <list>
#include <memory>
#include <atomic>
#include <mutex>
#include "errors.hpp"
namespace exatn {
namespace runtime {
class TensorNetworkQueue {
public:
TensorNetworkQueue() = default;
TensorNetworkQueue(const TensorNetworkQueue &) = delete;
TensorNetworkQueue & operator=(const TensorNetworkQueue &) = delete;
TensorNetworkQueue(TensorNetworkQueue &&) noexcept = delete;
TensorNetworkQueue & operator=(TensorNetworkQueue &&) noexcept = delete;
~TensorNetworkQueue() = default;
inline void lock(){queue_lock_.lock();}
inline void unlock(){queue_lock_.unlock();}
protected:
std::list<std::pair<std::shared_ptr<numerics::TensorNetwork>,
TensorOpExecHandle>> networks_;
std::mutex queue_lock_;
};
} //namespace runtime
} //namespace exatn
#endif //EXATN_RUNTIME_TENSOR_NETWORK_QUEUE_HPP_
/** ExaTN:: Tensor Runtime: Tensor graph executor: Eager
REVISION: 2020/09/02
REVISION: 2021/12/21
Copyright (C) 2018-2020 Tiffany Mintz, Dmitry Lyakh, Alex McCaskey
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle)
Copyright (C) 2018-2021 Tiffany Mintz, Dmitry Lyakh, Alex McCaskey
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
**/
#include "graph_executor_eager.hpp"
......@@ -87,5 +87,10 @@ void EagerGraphExecutor::execute(TensorGraph & dag) {
return;
}
void EagerGraphExecutor::execute(TensorNetworkQueue & tensor_network_queue) {
assert(false);
}
} //namespace runtime
} //namespace exatn
/** ExaTN:: Tensor Runtime: Tensor graph executor: Eager
REVISION: 2021/03/29
REVISION: 2021/12/21
Copyright (C) 2018-2021 Tiffany Mintz, Dmitry Lyakh, Alex McCaskey
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
......@@ -30,6 +30,9 @@ public:
/** Traverses the DAG and executes all its nodes. **/
virtual void execute(TensorGraph & dag) override;
/** Traverses the list of tensor networks and executes them as a whole. **/
virtual void execute(TensorNetworkQueue & tensor_network_queue) override;
/** Regulates the tensor prefetch depth (0 turns prefetch off). **/
virtual void setPrefetchDepth(unsigned int depth) override {
return;
......
/** ExaTN:: Tensor Runtime: Tensor graph executor: Lazy
REVISION: 2021/03/29
REVISION: 2021/12/21
Copyright (C) 2018-2021 Dmitry Lyakh
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
......@@ -9,6 +9,10 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
#include "talshxx.hpp"
#ifdef CUQUANTUM
#include "cuquantum_executor.hpp"
#endif
#include <iostream>
#include <iomanip>
......@@ -249,5 +253,11 @@ void LazyGraphExecutor::execute(TensorGraph & dag) {
return;
}
void LazyGraphExecutor::execute(TensorNetworkQueue & tensor_network_queue) {
return;
}
} //namespace runtime
} //namespace exatn
/** ExaTN:: Tensor Runtime: Tensor graph executor: Lazy
REVISION: 2021/03/29
REVISION: 2021/12/21
Copyright (C) 2018-2021 Dmitry Lyakh, Alex McCaskey
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
......@@ -37,6 +37,9 @@ public:
/** Traverses the DAG and executes all its nodes. **/
virtual void execute(TensorGraph & dag) override;
/** Traverses the list of tensor networks and executes them as a whole. **/
virtual void execute(TensorNetworkQueue & tensor_network_queue) override;
/** Regulates the tensor prefetch depth (0 turns prefetch off). **/
virtual void setPrefetchDepth(unsigned int depth) override {
prefetch_depth_ = depth;
......
/** ExaTN:: Tensor Runtime: Tensor graph executor
REVISION: 2021/04/01
REVISION: 2021/12/21
Copyright (C) 2018-2021 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
......@@ -19,6 +19,7 @@ Rationale:
#include "Identifiable.hpp"
#include "tensor_graph.hpp"
#include "tensor_network_queue.hpp"
#include "tensor_node_executor.hpp"
#include "tensor_operation.hpp"
......@@ -129,6 +130,10 @@ public:
[THREAD: This function is executed by the execution thread] **/
virtual void execute(TensorGraph & dag) = 0;
/** Traverses the list of tensor networks and executes them as a whole.
[THREAD: This function is executed by the execution thread] **/
virtual void execute(TensorNetworkQueue & tensor_network_queue) = 0;
/** Regulates the tensor prefetch depth (0 turns prefetch off). **/
virtual void setPrefetchDepth(unsigned int depth) = 0;
......
/** ExaTN:: Tensor Runtime: Task-based execution layer for tensor operations
REVISION: 2021/12/14
REVISION: 2021/12/21
Copyright (C) 2018-2021 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
......@@ -10,10 +10,6 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
#include "talshxx.hpp"
#ifdef CUQUANTUM
#include "cuquantum_executor.hpp"
#endif
#ifdef MPI_ENABLED
#include "mpi.h"
#endif
......@@ -110,9 +106,10 @@ void TensorRuntime::executionThreadWorkflow()
graph_executor_->execute(*current_dag_);
processTensorDataRequests(); //process all outstanding client requests for tensor data (synchronous)
if(current_dag_->hasUnexecutedNodes()){
executing_.store(true); //reaffirm that DAG is still executing
executing_.store(true); //reaffirm that DAG is still executing
}else{
executing_.store(false); //executing_ is set to FALSE by the execution thread
graph_executor_->execute(tensor_network_queue_);
executing_.store(false); //executing_ is set to FALSE by the execution thread
}
}
processTensorDataRequests(); //process all outstanding client requests for tensor data (synchronous)
......
/** ExaTN:: Tensor Runtime: Task-based execution layer for tensor operations
REVISION: 2021/12/14
REVISION: 2021/12/21
Copyright (C) 2018-2021 Dmitry Lyakh, Tiffany Mintz, Alex McCaskey
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)
......@@ -44,8 +44,9 @@ Rationale:
#ifndef EXATN_RUNTIME_TENSOR_RUNTIME_HPP_
#define EXATN_RUNTIME_TENSOR_RUNTIME_HPP_
#include "tensor_graph_executor.hpp"
#include "tensor_graph.hpp"
#include "tensor_network_queue.hpp"
#include "tensor_graph_executor.hpp"
#include "tensor_operation.hpp"
#include "tensor_method.hpp"
......@@ -199,6 +200,8 @@ private:
std::shared_ptr<TensorGraph> current_dag_; //pointer to the current DAG
/** Tensor data request queue **/
std::list<TensorDataReq> data_req_queue_;
/** List of tensor networks submitted for processing as a whole **/
TensorNetworkQueue tensor_network_queue_;
/** Logging level (0:none) **/
int logging_;
/** Current executing status (whether or not the execution thread is active) **/
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment