Commit 1fcd89fc authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Implemented exatn/tests/NumServerTester test which evaluates a tensor network.

Issue: Synchronization on tensor network hangs (commented for now).
parent 33ff17f6
......@@ -45,6 +45,121 @@ TEST(NumServerTester, checkNumServer)
}
TEST(NumServerTester, useNumServer)
{
using exatn::TensorOpCode;
using exatn::numerics::Tensor;
using exatn::numerics::TensorShape;
using exatn::numerics::TensorOperation;
using exatn::numerics::TensorOpFactory;
auto & op_factory = *(TensorOpFactory::get()); //tensor operation factory
//Example of tensor network processing:
//3-site MPS closure with 2-body Hamiltonian applied to sites 0 and 1:
//Z0() = T0(a,b) * T1(b,c,d) * T2(d,e) * H0(a,c,f,g) * S0(f,h) * S1(h,g,i) * S2(i,e)
// 0 1 2 3 4 5 6 7 <-- tensor id
//Declare participating ExaTN tensors:
auto z0 = std::make_shared<Tensor>("Z0");
auto t0 = std::make_shared<Tensor>("T0",TensorShape{2,2});
auto t1 = std::make_shared<Tensor>("T1",TensorShape{2,2,2});
auto t2 = std::make_shared<Tensor>("T2",TensorShape{2,2});
auto h0 = std::make_shared<Tensor>("H0",TensorShape{2,2,2,2});
auto s0 = std::make_shared<Tensor>("S0",TensorShape{2,2});
auto s1 = std::make_shared<Tensor>("S1",TensorShape{2,2,2});
auto s2 = std::make_shared<Tensor>("S2",TensorShape{2,2});
//Declare a tensor network:
TensorNetwork network("{0,1} 3-site MPS closure", //tensor network name
"Z0() = T0(a,b) * T1(b,c,d) * T2(d,e) * H0(a,c,f,g) * S0(f,h) * S1(h,g,i) * S2(i,e)", //tensor network specification
std::map<std::string,std::shared_ptr<Tensor>>{
{z0->getName(),z0}, {t0->getName(),t0}, {t1->getName(),t1}, {t2->getName(),t2},
{h0->getName(),h0}, {s0->getName(),s0}, {s1->getName(),s1}, {s2->getName(),s2}
}
);
network.printIt();
//Create participating ExaTN tensors:
std::shared_ptr<TensorOperation> create_z0 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_z0->setTensorOperand(z0);
exatn::numericalServer->submit(create_z0);
std::shared_ptr<TensorOperation> create_t0 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_t0->setTensorOperand(t0);
exatn::numericalServer->submit(create_t0);
std::shared_ptr<TensorOperation> create_t1 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_t1->setTensorOperand(t1);
exatn::numericalServer->submit(create_t1);
std::shared_ptr<TensorOperation> create_t2 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_t2->setTensorOperand(t2);
exatn::numericalServer->submit(create_t2);
std::shared_ptr<TensorOperation> create_h0 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_h0->setTensorOperand(h0);
exatn::numericalServer->submit(create_h0);
std::shared_ptr<TensorOperation> create_s0 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_s0->setTensorOperand(s0);
exatn::numericalServer->submit(create_s0);
std::shared_ptr<TensorOperation> create_s1 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_s1->setTensorOperand(s1);
exatn::numericalServer->submit(create_s1);
std::shared_ptr<TensorOperation> create_s2 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_s2->setTensorOperand(s2);
exatn::numericalServer->submit(create_s2);
//Initialize participating ExaTN tensors:
//`Finish
//Evaluate the tensor network:
exatn::numericalServer->submit(network);
//auto synced = exatn::numericalServer->sync(network,true);
//assert(synced);
//Retrieve the result:
//`Finish
//Destroy participating ExaTN tensors:
std::shared_ptr<TensorOperation> destroy_s2 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_s2->setTensorOperand(s2);
exatn::numericalServer->submit(destroy_s2);
std::shared_ptr<TensorOperation> destroy_s1 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_s1->setTensorOperand(s1);
exatn::numericalServer->submit(destroy_s1);
std::shared_ptr<TensorOperation> destroy_s0 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_s0->setTensorOperand(s0);
exatn::numericalServer->submit(destroy_s0);
std::shared_ptr<TensorOperation> destroy_h0 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_h0->setTensorOperand(h0);
exatn::numericalServer->submit(destroy_h0);
std::shared_ptr<TensorOperation> destroy_t2 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_t2->setTensorOperand(t2);
exatn::numericalServer->submit(destroy_t2);
std::shared_ptr<TensorOperation> destroy_t1 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_t1->setTensorOperand(t1);
exatn::numericalServer->submit(destroy_t1);
std::shared_ptr<TensorOperation> destroy_t0 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_t0->setTensorOperand(t0);
exatn::numericalServer->submit(destroy_t0);
std::shared_ptr<TensorOperation> destroy_z0 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_z0->setTensorOperand(z0);
exatn::numericalServer->submit(destroy_z0);
//Grab a beer!
}
int main(int argc, char **argv) {
exatn::initialize();
......
/** ExaTN::Numerics: Tensor network
REVISION: 2019/09/11
REVISION: 2019/09/12
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -13,8 +13,10 @@ Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
#include <string>
#include <vector>
#include <list>
#include <map>
#include <memory>
#include <algorithm>
namespace exatn{
......@@ -886,14 +888,35 @@ std::list<std::shared_ptr<TensorOperation>> & TensorNetwork::getOperationList(co
auto & tensor_op_factory = *(TensorOpFactory::get());
if(this->getNumTensors() > 1){ //two or more input tensors: One or more contractions
TensorNetwork net(*this);
std::list<unsigned int> intermediates;
unsigned int num_contractions = contraction_seq_.size();
for(auto contr = contraction_seq_.cbegin(); contr != contraction_seq_.cend(); ++contr){
//std::cout << "#DEBUG(TensorNetwork::getOperationList): Contracting " << contr->left_id << " * " << contr->right_id
// << " -> " << contr->result_id << std::endl; //debug
auto tensor1 = net.getTensor(contr->left_id);
auto tensor2 = net.getTensor(contr->right_id);
std::string contr_pattern;
auto merged = net.mergeTensors(contr->left_id,contr->right_id,contr->result_id,&contr_pattern);
assert(merged);
if(num_contractions > 1){ //intermediate contraction
auto merged = net.mergeTensors(contr->left_id,contr->right_id,contr->result_id,&contr_pattern);
assert(merged);
}else{ //last contraction
assert(contr->result_id == 0); //last tensor contraction accumulates into the output tensor of the tensor network
const auto * tensor1_legs = net.getTensorConnections(contr->left_id);
assert(tensor1_legs != nullptr);
const auto * tensor2_legs = net.getTensorConnections(contr->right_id);
assert(tensor2_legs != nullptr);
std::vector<TensorLeg> pattern(*tensor1_legs);
pattern.insert(pattern.end(),tensor2_legs->begin(),tensor2_legs->end());
auto generated = generate_contraction_pattern(pattern,tensor1_legs->size(),tensor2_legs->size(),contr_pattern);
assert(generated);
}
auto tensor0 = net.getTensor(contr->result_id);
if(contr->result_id != 0){ //intermediate tensors need to be created/destroyed
auto op_create = tensor_op_factory.createTensorOp(TensorOpCode::CREATE);
op_create->setTensorOperand(tensor0);
operations_.emplace_back(std::shared_ptr<TensorOperation>(std::move(op_create)));
intermediates.emplace_back(contr->result_id);
}
auto op = tensor_op_factory.createTensorOp(TensorOpCode::CONTRACT);
op->setTensorOperand(tensor0);
op->setTensorOperand(tensor1);
......@@ -901,7 +924,23 @@ std::list<std::shared_ptr<TensorOperation>> & TensorNetwork::getOperationList(co
op->setIndexPattern(contr_pattern);
assert(op->isSet());
operations_.emplace_back(std::shared_ptr<TensorOperation>(std::move(op)));
auto left_intermediate = std::find(intermediates.begin(),intermediates.end(),contr->left_id);
if(left_intermediate != intermediates.end()){
auto op_destroy = tensor_op_factory.createTensorOp(TensorOpCode::DESTROY);
op_destroy->setTensorOperand(tensor1);
operations_.emplace_back(std::shared_ptr<TensorOperation>(std::move(op_destroy)));
intermediates.erase(left_intermediate);
}
auto right_intermediate = std::find(intermediates.begin(),intermediates.end(),contr->right_id);
if(right_intermediate != intermediates.end()){
auto op_destroy = tensor_op_factory.createTensorOp(TensorOpCode::DESTROY);
op_destroy->setTensorOperand(tensor2);
operations_.emplace_back(std::shared_ptr<TensorOperation>(std::move(op_destroy)));
intermediates.erase(right_intermediate);
}
--num_contractions;
}
assert(intermediates.empty());
}else{ //one input tensor: Single addition
std::shared_ptr<Tensor> tensor0(nullptr);
std::shared_ptr<Tensor> tensor1(nullptr);
......
......@@ -151,7 +151,7 @@ int TalshNodeExecutor::execute(numerics::TensorOpContract & op,
const auto tensor0_hash = tensor0.getTensorHash();
auto tens0_pos = tensors_.find(tensor0_hash);
if(tens0_pos == tensors_.end()){
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): ADD: Tensor operand 0 not found: " << std::endl;
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): CONTRACT: Tensor operand 0 not found: " << std::endl;
op.printIt();
assert(false);
}
......@@ -161,7 +161,7 @@ int TalshNodeExecutor::execute(numerics::TensorOpContract & op,
const auto tensor1_hash = tensor1.getTensorHash();
auto tens1_pos = tensors_.find(tensor1_hash);
if(tens1_pos == tensors_.end()){
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): ADD: Tensor operand 1 not found: " << std::endl;
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): CONTRACT: Tensor operand 1 not found: " << std::endl;
op.printIt();
assert(false);
}
......@@ -171,7 +171,7 @@ int TalshNodeExecutor::execute(numerics::TensorOpContract & op,
const auto tensor2_hash = tensor2.getTensorHash();
auto tens2_pos = tensors_.find(tensor2_hash);
if(tens2_pos == tensors_.end()){
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): ADD: Tensor operand 2 not found: " << std::endl;
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): CONTRACT: Tensor operand 2 not found: " << std::endl;
op.printIt();
assert(false);
}
......@@ -181,7 +181,7 @@ int TalshNodeExecutor::execute(numerics::TensorOpContract & op,
auto task_res = tasks_.emplace(std::make_pair(*exec_handle,
std::make_shared<talsh::TensorTask>()));
if(!task_res.second){
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): ADD: Attempt to execute the same operation twice: " << std::endl;
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): CONTRACT: Attempt to execute the same operation twice: " << std::endl;
op.printIt();
assert(false);
}
......
......@@ -13,7 +13,7 @@ TensorRuntime::TensorRuntime(const std::string & graph_executor_name,
{
graph_executor_ = exatn::getService<TensorGraphExecutor>(graph_executor_name_);
std::cout << "#DEBUG(exatn::runtime::TensorRuntime)[MAIN_THREAD]: DAG executor set to "
<< graph_executor_name_ << "+" << node_executor_name_ << std::endl << std::flush;
<< graph_executor_name_ << " + " << node_executor_name_ << std::endl << std::flush;
launchExecutionThread();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment