Commit bc35175b authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

exatn::NumServer.submit() method now accepts a set of parallel processes

where the processing will be done.
parent f9cf7569
......@@ -246,6 +246,21 @@ bool NumServer::submit(std::shared_ptr<TensorOperation> operation)
}
bool NumServer::submit(TensorNetwork & network)
{
std::vector<unsigned int> process_set(num_processes_);
for(unsigned int i = 0; i < num_processes_; ++i) process_set[i] = i;
return submit(network,process_set);
}
bool NumServer::submit(std::shared_ptr<TensorNetwork> network)
{
std::vector<unsigned int> process_set(num_processes_);
for(unsigned int i = 0; i < num_processes_; ++i) process_set[i] = i;
return submit(network,process_set);
}
bool NumServer::submit(TensorNetwork & network,
const std::vector<unsigned int> & process_set)
{
assert(network.isValid()); //debug
auto & op_list = network.getOperationList(contr_seq_optimizer_);
......@@ -272,13 +287,32 @@ bool NumServer::submit(TensorNetwork & network)
return true;
}
bool NumServer::submit(std::shared_ptr<TensorNetwork> network)
bool NumServer::submit(std::shared_ptr<TensorNetwork> network,
const std::vector<unsigned int> & process_set)
{
if(network) return submit(*network);
if(network) return submit(*network,process_set);
return false;
}
bool NumServer::submit(TensorExpansion & expansion, std::shared_ptr<Tensor> accumulator)
bool NumServer::submit(TensorExpansion & expansion,
std::shared_ptr<Tensor> accumulator)
{
std::vector<unsigned int> process_set(num_processes_);
for(unsigned int i = 0; i < num_processes_; ++i) process_set[i] = i;
return submit(expansion,accumulator,process_set);
}
bool NumServer::submit(std::shared_ptr<TensorExpansion> expansion,
std::shared_ptr<Tensor> accumulator)
{
std::vector<unsigned int> process_set(num_processes_);
for(unsigned int i = 0; i < num_processes_; ++i) process_set[i] = i;
return submit(expansion,accumulator,process_set);
}
bool NumServer::submit(TensorExpansion & expansion,
std::shared_ptr<Tensor> accumulator,
const std::vector<unsigned int> & process_set)
{
assert(accumulator);
std::list<std::shared_ptr<TensorOperation>> accumulations;
......@@ -305,9 +339,11 @@ bool NumServer::submit(TensorExpansion & expansion, std::shared_ptr<Tensor> accu
return true;
}
bool NumServer::submit(std::shared_ptr<TensorExpansion> expansion, std::shared_ptr<Tensor> accumulator)
bool NumServer::submit(std::shared_ptr<TensorExpansion> expansion,
std::shared_ptr<Tensor> accumulator,
const std::vector<unsigned int> & process_set)
{
if(expansion) return submit(*expansion,accumulator);
if(expansion) return submit(*expansion,accumulator,process_set);
return false;
}
......
......@@ -180,17 +180,31 @@ public:
/** Submits a tensor network for processing (evaluating the output tensor-result).
If the output (result) tensor has not been created yet, it will be created and
initialized to zero automatically, and later destroyed automatically when no longer needed. **/
bool submit(TensorNetwork & network); //in: tensor network for numerical evaluation
bool submit(std::shared_ptr<TensorNetwork> network);
initialized to zero automatically, and later destroyed automatically when no longer needed.
By default all parallel processes will be processing the tensor network,
otherwise the desired process subset needs to be explicitly specified. **/
bool submit(TensorNetwork & network); //in: tensor network for numerical evaluation
bool submit(std::shared_ptr<TensorNetwork> network); //in: tensor network for numerical evaluation
bool submit(TensorNetwork & network, //in: tensor network for numerical evaluation
const std::vector<unsigned int> & process_set); //in: chosen set of parallel processes
bool submit(std::shared_ptr<TensorNetwork> network, //in: tensor network for numerical evaluation
const std::vector<unsigned int> & process_set); //in: chosen set of parallel processes
/** Submits a tensor network expansion for processing (evaluating output tensors of all
constituting tensor networks and accumualting them in the provided accumulator tensor).
Synchronization of the tensor expansion evaluation is done via syncing on the accumulator tensor. **/
Synchronization of the tensor expansion evaluation is done via syncing on the accumulator
tensor. By default all parallel processes will be processing the tensor network,
otherwise the desired process subset needs to be explicitly specified. **/
bool submit(TensorExpansion & expansion, //in: tensor expansion for numerical evaluation
std::shared_ptr<Tensor> accumulator); //inout: tensor accumulator (result)
bool submit(std::shared_ptr<TensorExpansion> expansion,
std::shared_ptr<Tensor> accumulator);
bool submit(std::shared_ptr<TensorExpansion> expansion, //in: tensor expansion for numerical evaluation
std::shared_ptr<Tensor> accumulator); //inout: tensor accumulator (result)
bool submit(TensorExpansion & expansion, //in: tensor expansion for numerical evaluation
std::shared_ptr<Tensor> accumulator, //inout: tensor accumulator (result)
const std::vector<unsigned int> & process_set); //in: chosen set of parallel processes
bool submit(std::shared_ptr<TensorExpansion> expansion, //in: tensor expansion for numerical evaluation
std::shared_ptr<Tensor> accumulator, //inout: tensor accumulator (result)
const std::vector<unsigned int> & process_set); //in: chosen set of parallel processes
/** Synchronizes all update operations on a given tensor. **/
bool sync(const Tensor & tensor,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment