Commit f18fb5c7 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Merge branch 'devel_dil' into devel

parents 9a93a9b1 1fcd89fc
Pipeline #71038 passed with stage
in 4 minutes and 49 seconds
......@@ -45,6 +45,121 @@ TEST(NumServerTester, checkNumServer)
}
TEST(NumServerTester, useNumServer)
{
using exatn::TensorOpCode;
using exatn::numerics::Tensor;
using exatn::numerics::TensorShape;
using exatn::numerics::TensorOperation;
using exatn::numerics::TensorOpFactory;
auto & op_factory = *(TensorOpFactory::get()); //tensor operation factory
//Example of tensor network processing:
//3-site MPS closure with 2-body Hamiltonian applied to sites 0 and 1:
//Z0() = T0(a,b) * T1(b,c,d) * T2(d,e) * H0(a,c,f,g) * S0(f,h) * S1(h,g,i) * S2(i,e)
// 0 1 2 3 4 5 6 7 <-- tensor id
//Declare participating ExaTN tensors:
auto z0 = std::make_shared<Tensor>("Z0");
auto t0 = std::make_shared<Tensor>("T0",TensorShape{2,2});
auto t1 = std::make_shared<Tensor>("T1",TensorShape{2,2,2});
auto t2 = std::make_shared<Tensor>("T2",TensorShape{2,2});
auto h0 = std::make_shared<Tensor>("H0",TensorShape{2,2,2,2});
auto s0 = std::make_shared<Tensor>("S0",TensorShape{2,2});
auto s1 = std::make_shared<Tensor>("S1",TensorShape{2,2,2});
auto s2 = std::make_shared<Tensor>("S2",TensorShape{2,2});
//Declare a tensor network:
TensorNetwork network("{0,1} 3-site MPS closure", //tensor network name
"Z0() = T0(a,b) * T1(b,c,d) * T2(d,e) * H0(a,c,f,g) * S0(f,h) * S1(h,g,i) * S2(i,e)", //tensor network specification
std::map<std::string,std::shared_ptr<Tensor>>{
{z0->getName(),z0}, {t0->getName(),t0}, {t1->getName(),t1}, {t2->getName(),t2},
{h0->getName(),h0}, {s0->getName(),s0}, {s1->getName(),s1}, {s2->getName(),s2}
}
);
network.printIt();
//Create participating ExaTN tensors:
std::shared_ptr<TensorOperation> create_z0 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_z0->setTensorOperand(z0);
exatn::numericalServer->submit(create_z0);
std::shared_ptr<TensorOperation> create_t0 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_t0->setTensorOperand(t0);
exatn::numericalServer->submit(create_t0);
std::shared_ptr<TensorOperation> create_t1 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_t1->setTensorOperand(t1);
exatn::numericalServer->submit(create_t1);
std::shared_ptr<TensorOperation> create_t2 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_t2->setTensorOperand(t2);
exatn::numericalServer->submit(create_t2);
std::shared_ptr<TensorOperation> create_h0 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_h0->setTensorOperand(h0);
exatn::numericalServer->submit(create_h0);
std::shared_ptr<TensorOperation> create_s0 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_s0->setTensorOperand(s0);
exatn::numericalServer->submit(create_s0);
std::shared_ptr<TensorOperation> create_s1 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_s1->setTensorOperand(s1);
exatn::numericalServer->submit(create_s1);
std::shared_ptr<TensorOperation> create_s2 = op_factory.createTensorOp(TensorOpCode::CREATE);
create_s2->setTensorOperand(s2);
exatn::numericalServer->submit(create_s2);
//Initialize participating ExaTN tensors:
//`Finish
//Evaluate the tensor network:
exatn::numericalServer->submit(network);
//auto synced = exatn::numericalServer->sync(network,true);
//assert(synced);
//Retrieve the result:
//`Finish
//Destroy participating ExaTN tensors:
std::shared_ptr<TensorOperation> destroy_s2 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_s2->setTensorOperand(s2);
exatn::numericalServer->submit(destroy_s2);
std::shared_ptr<TensorOperation> destroy_s1 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_s1->setTensorOperand(s1);
exatn::numericalServer->submit(destroy_s1);
std::shared_ptr<TensorOperation> destroy_s0 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_s0->setTensorOperand(s0);
exatn::numericalServer->submit(destroy_s0);
std::shared_ptr<TensorOperation> destroy_h0 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_h0->setTensorOperand(h0);
exatn::numericalServer->submit(destroy_h0);
std::shared_ptr<TensorOperation> destroy_t2 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_t2->setTensorOperand(t2);
exatn::numericalServer->submit(destroy_t2);
std::shared_ptr<TensorOperation> destroy_t1 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_t1->setTensorOperand(t1);
exatn::numericalServer->submit(destroy_t1);
std::shared_ptr<TensorOperation> destroy_t0 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_t0->setTensorOperand(t0);
exatn::numericalServer->submit(destroy_t0);
std::shared_ptr<TensorOperation> destroy_z0 = op_factory.createTensorOp(TensorOpCode::DESTROY);
destroy_z0->setTensorOperand(z0);
exatn::numericalServer->submit(destroy_z0);
//Grab a beer!
}
int main(int argc, char **argv) {
exatn::initialize();
......
/** ExaTN::Numerics: Abstract Tensor
REVISION: 2019/07/22
REVISION: 2019/09/11
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -82,6 +82,8 @@ public:
Tensor(const std::string & name); //tensor name
/** Create a tensor by contracting two other tensors.
The vector of tensor legs specifies the tensor contraction pattern:
contraction[] describes dimensions of both input tensors,
first left tensor dimensions, then right tensor dimensions:
contraction.size() = left_rank + right_rank;
Output tensor id = 0;
Left input tensor id = 1;
......
/** ExaTN::Numerics: Tensor network
REVISION: 2019/09/09
REVISION: 2019/09/12
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -13,8 +13,10 @@ Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
#include <string>
#include <vector>
#include <list>
#include <map>
#include <memory>
#include <algorithm>
namespace exatn{
......@@ -239,6 +241,14 @@ std::shared_ptr<Tensor> TensorNetwork::getTensor(unsigned int tensor_id)
}
const std::vector<TensorLeg> * TensorNetwork::getTensorConnections(unsigned int tensor_id)
{
auto it = tensors_.find(tensor_id);
if(it == tensors_.end()) return nullptr;
return &((it->second).getTensorLegs());
}
bool TensorNetwork::finalize(bool check_validity)
{
if(finalized_ == 0){
......@@ -767,6 +777,11 @@ bool TensorNetwork::mergeTensors(unsigned int left_id, unsigned int right_id, un
}
}
assert(res_mode == num_uncontracted);
//Generate symbolic contraction pattern if needed:
if(contr_pattern != nullptr){
auto generated = generate_contraction_pattern(pattern,left_tensor_rank,right_tensor_rank,*contr_pattern);
assert(generated);
}
//Append the tensor result:
tensors_.emplace(std::make_pair(
result_id,
......@@ -873,37 +888,80 @@ std::list<std::shared_ptr<TensorOperation>> & TensorNetwork::getOperationList(co
auto & tensor_op_factory = *(TensorOpFactory::get());
if(this->getNumTensors() > 1){ //two or more input tensors: One or more contractions
TensorNetwork net(*this);
std::list<unsigned int> intermediates;
unsigned int num_contractions = contraction_seq_.size();
for(auto contr = contraction_seq_.cbegin(); contr != contraction_seq_.cend(); ++contr){
//std::cout << "#DEBUG(TensorNetwork::getOperationList): Contracting " << contr->left_id << " * " << contr->right_id
// << " -> " << contr->result_id << std::endl; //debug
auto tensor1 = net.getTensor(contr->left_id);
auto tensor2 = net.getTensor(contr->right_id);
//`Get index pattern for tensor contraction
auto merged = net.mergeTensors(contr->left_id,contr->right_id,contr->result_id);
assert(merged);
std::string contr_pattern;
if(num_contractions > 1){ //intermediate contraction
auto merged = net.mergeTensors(contr->left_id,contr->right_id,contr->result_id,&contr_pattern);
assert(merged);
}else{ //last contraction
assert(contr->result_id == 0); //last tensor contraction accumulates into the output tensor of the tensor network
const auto * tensor1_legs = net.getTensorConnections(contr->left_id);
assert(tensor1_legs != nullptr);
const auto * tensor2_legs = net.getTensorConnections(contr->right_id);
assert(tensor2_legs != nullptr);
std::vector<TensorLeg> pattern(*tensor1_legs);
pattern.insert(pattern.end(),tensor2_legs->begin(),tensor2_legs->end());
auto generated = generate_contraction_pattern(pattern,tensor1_legs->size(),tensor2_legs->size(),contr_pattern);
assert(generated);
}
auto tensor0 = net.getTensor(contr->result_id);
if(contr->result_id != 0){ //intermediate tensors need to be created/destroyed
auto op_create = tensor_op_factory.createTensorOp(TensorOpCode::CREATE);
op_create->setTensorOperand(tensor0);
operations_.emplace_back(std::shared_ptr<TensorOperation>(std::move(op_create)));
intermediates.emplace_back(contr->result_id);
}
auto op = tensor_op_factory.createTensorOp(TensorOpCode::CONTRACT);
op->setTensorOperand(tensor0);
op->setTensorOperand(tensor1);
op->setTensorOperand(tensor2);
op->setIndexPattern("`Replace with generated index pattern");
op->setIndexPattern(contr_pattern);
assert(op->isSet());
operations_.emplace_back(std::shared_ptr<TensorOperation>(std::move(op)));
auto left_intermediate = std::find(intermediates.begin(),intermediates.end(),contr->left_id);
if(left_intermediate != intermediates.end()){
auto op_destroy = tensor_op_factory.createTensorOp(TensorOpCode::DESTROY);
op_destroy->setTensorOperand(tensor1);
operations_.emplace_back(std::shared_ptr<TensorOperation>(std::move(op_destroy)));
intermediates.erase(left_intermediate);
}
auto right_intermediate = std::find(intermediates.begin(),intermediates.end(),contr->right_id);
if(right_intermediate != intermediates.end()){
auto op_destroy = tensor_op_factory.createTensorOp(TensorOpCode::DESTROY);
op_destroy->setTensorOperand(tensor2);
operations_.emplace_back(std::shared_ptr<TensorOperation>(std::move(op_destroy)));
intermediates.erase(right_intermediate);
}
--num_contractions;
}
assert(intermediates.empty());
}else{ //one input tensor: Single addition
std::shared_ptr<Tensor> tensor0(nullptr);
std::shared_ptr<Tensor> tensor1(nullptr);
unsigned int left_tensor_id = 0;
for(auto iter = this->begin(); iter != this->end(); ++iter){
if(iter->first == 0){
tensor0 = this->getTensor(iter->first);
}else{
tensor1 = this->getTensor(iter->first);
left_tensor_id = iter->first;
}
}
auto op = tensor_op_factory.createTensorOp(TensorOpCode::ADD);
op->setTensorOperand(tensor0);
op->setTensorOperand(tensor1);
//`Get index pattern for tensor addition
op->setIndexPattern("`Replace with generated index pattern");
const auto * tensor1_legs = this->getTensorConnections(left_tensor_id);
assert(tensor1_legs != nullptr);
std::string contr_pattern;
auto generated = generate_contraction_pattern(*tensor1_legs,tensor1_legs->size(),0,contr_pattern);
assert(generated);
op->setIndexPattern(contr_pattern);
assert(op->isSet());
operations_.emplace_back(std::shared_ptr<TensorOperation>(std::move(op)));
}
......
/** ExaTN::Numerics: Tensor network
REVISION: 2019/09/09
REVISION: 2019/09/11
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -124,6 +124,9 @@ public:
If not found, returns nullptr. **/
std::shared_ptr<Tensor> getTensor(unsigned int tensor_id);
/** Get tensor connections. **/
const std::vector<TensorLeg> * getTensorConnections(unsigned int tensor_id);
/** Begin iterator **/
inline Iterator begin() {return tensors_.begin();}
/** End iterator **/
......
/** ExaTN: Numerics: Symbolic tensor processing
REVISION: 2019/08/07
REVISION: 2019/09/12
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
#include "tensor_symbol.hpp"
#include <cassert>
namespace exatn{
bool parse_tensor(const std::string & tensor, //in: tensor as a string
......@@ -128,4 +130,71 @@ bool parse_tensor_network(const std::string & network, //in: tensor netwo
return true;
}
bool generate_contraction_pattern(const std::vector<numerics::TensorLeg> & pattern,
unsigned int left_tensor_rank,
unsigned int right_tensor_rank,
std::string & symb_pattern)
/* pattern[left_rank + right_rank] = {left_legs + right_legs} */
{
const std::size_t DEFAULT_STRING_CAPACITY = 256; //string capacity reserve value
assert(pattern.size() == left_tensor_rank + right_tensor_rank);
symb_pattern.clear();
if(pattern.empty()){ //multiplication of scalars
symb_pattern = "D()+=L()*R()";
}else{ //at least one tensor is present
if(symb_pattern.capacity() < DEFAULT_STRING_CAPACITY) symb_pattern.reserve(DEFAULT_STRING_CAPACITY);
unsigned int dest_indices[left_tensor_rank + right_tensor_rank];
unsigned int dest_tensor_rank = 0;
for(const auto & leg: pattern){
if(leg.getTensorId() == 0){
dest_indices[leg.getDimensionId()] = dest_tensor_rank++;
}
}
symb_pattern.append("D(");
for(unsigned int i = 0; i < dest_tensor_rank; ++i){
symb_pattern.append("u"+std::to_string(dest_indices[i])+",");
}
if(symb_pattern[symb_pattern.size()-1] == ','){
symb_pattern.replace(symb_pattern.size()-1,1,")");
}else{
symb_pattern.append(")");
}
symb_pattern.append("+=L(");
dest_tensor_rank = 0;
unsigned int contr_ind = 0;
for(unsigned int i = 0; i < left_tensor_rank; ++i){
if(pattern[i].getTensorId() == 0){
dest_indices[i] = left_tensor_rank;
symb_pattern.append("u"+std::to_string(dest_tensor_rank++)+",");
}else{
dest_indices[i] = contr_ind;
symb_pattern.append("c"+std::to_string(contr_ind++)+",");
}
}
if(symb_pattern[symb_pattern.size()-1] == ','){
symb_pattern.replace(symb_pattern.size()-1,1,")");
}else{
symb_pattern.append(")");
}
symb_pattern.append("*R(");
for(unsigned int i = left_tensor_rank; i < left_tensor_rank + right_tensor_rank; ++i){
if(pattern[i].getTensorId() == 0){
symb_pattern.append("u"+std::to_string(dest_tensor_rank++)+",");
}else{
contr_ind = dest_indices[pattern[i].getDimensionId()];
assert(contr_ind < left_tensor_rank);
symb_pattern.append("c"+std::to_string(contr_ind)+",");
}
}
if(symb_pattern[symb_pattern.size()-1] == ','){
symb_pattern.replace(symb_pattern.size()-1,1,")");
}else{
symb_pattern.append(")");
}
}
return true;
}
} //namespace exatn
/** ExaTN: Numerics: Symbolic tensor processing
REVISION: 2019/08/07
REVISION: 2019/09/11
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)
......@@ -34,6 +34,7 @@ Rationale:
#define EXATN_TENSOR_SYMBOL_HPP_
#include "tensor_basic.hpp"
#include "tensor_leg.hpp"
#include <string>
#include <vector>
......@@ -125,6 +126,20 @@ bool parse_tensor(const std::string & tensor, //in: tensor as a string
bool parse_tensor_network(const std::string & network, //in: tensor network as a string
std::vector<std::string> & tensors); //out: parsed (symbolic) tensors
/** Generates symbolic tensor contraction pattern from the digital tensor
contraction pattern used by the contraction-based Tensor constructor:
pattern[0..m-1] describes connectivity of dimensions of the left contracted tensor,
pattern[m..m+n-1] decribes connectivity of dimensions of the right contracted tensor,
where m and n are the ranks of the left and right contracted tensors, respectively.
pattern[x] is a TensorLeg specifying the dimension of another tensor the described
dimension is connected to, where the result tensor is tensor 0 while the left and
right contracted tensors are tensors 1 and 2, respectively.
**/
bool generate_contraction_pattern(const std::vector<numerics::TensorLeg> & pattern,
unsigned int left_tensor_rank,
unsigned int right_tensor_rank,
std::string & symb_pattern);
} //namespace exatn
#endif //EXATN_TENSOR_SYMBOL_HPP_
......@@ -151,7 +151,7 @@ int TalshNodeExecutor::execute(numerics::TensorOpContract & op,
const auto tensor0_hash = tensor0.getTensorHash();
auto tens0_pos = tensors_.find(tensor0_hash);
if(tens0_pos == tensors_.end()){
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): ADD: Tensor operand 0 not found: " << std::endl;
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): CONTRACT: Tensor operand 0 not found: " << std::endl;
op.printIt();
assert(false);
}
......@@ -161,7 +161,7 @@ int TalshNodeExecutor::execute(numerics::TensorOpContract & op,
const auto tensor1_hash = tensor1.getTensorHash();
auto tens1_pos = tensors_.find(tensor1_hash);
if(tens1_pos == tensors_.end()){
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): ADD: Tensor operand 1 not found: " << std::endl;
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): CONTRACT: Tensor operand 1 not found: " << std::endl;
op.printIt();
assert(false);
}
......@@ -171,7 +171,7 @@ int TalshNodeExecutor::execute(numerics::TensorOpContract & op,
const auto tensor2_hash = tensor2.getTensorHash();
auto tens2_pos = tensors_.find(tensor2_hash);
if(tens2_pos == tensors_.end()){
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): ADD: Tensor operand 2 not found: " << std::endl;
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): CONTRACT: Tensor operand 2 not found: " << std::endl;
op.printIt();
assert(false);
}
......@@ -181,7 +181,7 @@ int TalshNodeExecutor::execute(numerics::TensorOpContract & op,
auto task_res = tasks_.emplace(std::make_pair(*exec_handle,
std::make_shared<talsh::TensorTask>()));
if(!task_res.second){
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): ADD: Attempt to execute the same operation twice: " << std::endl;
std::cout << "#ERROR(exatn::runtime::node_executor_talsh): CONTRACT: Attempt to execute the same operation twice: " << std::endl;
op.printIt();
assert(false);
}
......
......@@ -13,7 +13,7 @@ TensorRuntime::TensorRuntime(const std::string & graph_executor_name,
{
graph_executor_ = exatn::getService<TensorGraphExecutor>(graph_executor_name_);
std::cout << "#DEBUG(exatn::runtime::TensorRuntime)[MAIN_THREAD]: DAG executor set to "
<< graph_executor_name_ << "+" << node_executor_name_ << std::endl << std::flush;
<< graph_executor_name_ << " + " << node_executor_name_ << std::endl << std::flush;
launchExecutionThread();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment