Commit 9cd4c0e5 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Added possibility of switching Tensor Contraction Order optimizers

during runtime via resetContrSeqOptimizer().
parent d7dc14c8
Pipeline #86911 passed with stage
in 4 minutes and 19 seconds
/** ExaTN::Numerics: General client header
REVISION: 2019/12/14
REVISION: 2020/01/17
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
1. Vector space and subspace registration:
......@@ -313,6 +313,12 @@ inline std::shared_ptr<talsh::Tensor> getLocalTensor(const std::string & name) /
{return numericalServer->getLocalTensor(name);}
/** Resets the tensor contraction sequence optimizer that
is invoked when evaluating tensor networks: {dummy,heuro}. **/
inline void resetContrSeqOptimizer(const std::string & optimizer_name)
{return numericalServer->resetContrSeqOptimizer(optimizer_name);}
/** Resets tensor runtime logging level (0:none). **/
inline void resetRuntimeLoggingLevel(int level = 0)
{return numericalServer->resetRuntimeLoggingLevel(level);}
......
/** ExaTN::Numerics: Numerical server
REVISION: 2019/12/30
REVISION: 2020/01/17
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
#include "num_server.hpp"
......@@ -20,7 +20,7 @@ std::shared_ptr<NumServer> numericalServer {nullptr}; //initialized by exatn::in
NumServer::NumServer():
tensor_rt_(std::make_shared<runtime::TensorRuntime>())
contr_seq_optimizer_("dummy"), tensor_rt_(std::make_shared<runtime::TensorRuntime>())
{
tensor_op_factory_ = TensorOpFactory::get();
scopes_.push(std::pair<std::string,ScopeId>{"GLOBAL",0}); //GLOBAL scope 0 is automatically open (top scope)
......@@ -49,6 +49,12 @@ void NumServer::reconfigureTensorRuntime(const std::string & dag_executor_name,
return;
}
void NumServer::resetContrSeqOptimizer(const std::string & optimizer_name)
{
contr_seq_optimizer_ = optimizer_name;
return;
}
void NumServer::resetRuntimeLoggingLevel(int level)
{
if(tensor_rt_) tensor_rt_->resetLoggingLevel(level);
......@@ -210,7 +216,7 @@ bool NumServer::submit(std::shared_ptr<TensorOperation> operation)
bool NumServer::submit(TensorNetwork & network)
{
assert(network.isValid()); //debug
auto & op_list = network.getOperationList("heuro");
auto & op_list = network.getOperationList(contr_seq_optimizer_);
auto output_tensor = network.getTensor(0);
auto iter = tensors_.find(output_tensor->getName());
if(iter == tensors_.end()){ //output tensor does not exist and needs to be created and initialized to zero
......
/** ExaTN::Numerics: Numerical server
REVISION: 2019/12/11
REVISION: 2020/01/17
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(a) Numerical server provides basic tensor network processing functionality:
......@@ -102,6 +102,10 @@ public:
void reconfigureTensorRuntime(const std::string & dag_executor_name,
const std::string & node_executor_name);
/** Resets the tensor contraction sequence optimizer that is
invoked when evaluating tensor networks. **/
void resetContrSeqOptimizer(const std::string & optimizer_name);
/** Resets the runtime logging level (0:none). **/
void resetRuntimeLoggingLevel(int level = 0);
......@@ -324,6 +328,8 @@ private:
std::unordered_map<std::string,std::shared_ptr<Tensor>> tensors_; //registered tensors (by CREATE operation)
std::list<std::shared_ptr<Tensor>> implicit_tensors_; //tensors created implicitly by the runtime (for garbage collection)
std::string contr_seq_optimizer_; //tensor contraction sequence optimizer invoked when evaluating tensor networks
std::map<std::string,std::shared_ptr<TensorMethod>> ext_methods_; //external tensor methods
std::map<std::string,std::shared_ptr<BytePacket>> ext_data_; //external data
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment