cuquantum_executor.hpp 1.66 KB
Newer Older
1
/** ExaTN: Tensor Runtime: Tensor network executor: NVIDIA cuQuantum
2
REVISION: 2021/12/22
3
4
5
6
7

Copyright (C) 2018-2021 Dmitry Lyakh
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle)

Rationale:
8
9
10
 - ExaTN graph executor may accept whole tensor networks for execution
   via the optional cuQuantum backend in which case the graph executor
   will delegate execution of whole tensor networks to CuQuantumExecutor.
11
12
13

**/

14
#ifdef CUQUANTUM
15
16
17
18
19

#ifndef EXATN_RUNTIME_CUQUANTUM_EXECUTOR_HPP_
#define EXATN_RUNTIME_CUQUANTUM_EXECUTOR_HPP_

#include <unordered_map>
20
#include <vector>
21

22
#include "tensor_network_queue.hpp"
23
24
25
26

namespace exatn {
namespace runtime {

27
28
struct TensorNetworkReq;

29
30
31
32
33
class CuQuantumExecutor {

public:

 CuQuantumExecutor();
34
35
36
37
 CuQuantumExecutor(const CuQuantumExecutor &) = delete;
 CuQuantumExecutor & operator=(CuQuantumExecutor &) = delete;
 CuQuantumExecutor(CuQuantumExecutor &&) noexcept = delete;
 CuQuantumExecutor & operator=(CuQuantumExecutor &&) noexcept = delete;
38
 virtual ~CuQuantumExecutor();
39

40
 int execute(std::shared_ptr<numerics::TensorNetwork> network,
41
             TensorOpExecHandle exec_handle);
42

43
 bool sync(TensorOpExecHandle exec_handle,
44
45
46
47
48
           int * error_code,
           bool wait = true);

 bool sync();

49
50
protected:

51
 /** Currently processed tensor networks **/
52
53
54
55
56
 std::unordered_map<TensorOpExecHandle,std::shared_ptr<TensorNetworkReq>> active_networks_;
 /** GPU Ids available to the current process **/
 std::vector<int> gpus;
 /** cuTensorNet contexts for all available GPUs **/
 std::vector<void*> ctn_handles; //cutensornetHandle_t
57
58
59
60
61
62
63
};

} //namespace runtime
} //namespace exatn

#endif //EXATN_RUNTIME_CUQUANTUM_EXECUTOR_HPP_

64
#endif //CUQUANTUM