Commit 949a5264 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Bug fix: Tensor contraction patterns inside Tensor Network evaluation

now properly reproduce tensor complex conjugation.
parent fc77064b
Pipeline #85228 passed with stage
in 4 minutes and 2 seconds
......@@ -433,6 +433,86 @@ TEST(NumServerTester, circuitNumServer)
}
TEST(NumServerTester, circuitConjugateNumServer)
{
using exatn::numerics::Tensor;
using exatn::numerics::TensorShape;
using exatn::numerics::TensorNetwork;
using exatn::TensorElementType;
exatn::resetRuntimeLoggingLevel(0); //debug
//Define the initial qubit state vector:
std::vector<std::complex<double>> qzero {
{1.0,0.0}, {0.0,0.0}
};
//Define quantum gates: *NEGATIVE* imaginary
std::vector<std::complex<double>> unitary {
{1.0, 0.0}, {0.0,-1.0},
{0.0,-1.0}, {1.0, 0.0}
};
//Create tensors:
bool created = exatn::createTensor("Q0", TensorElementType::COMPLEX64,TensorShape{2}); assert(created);
created = exatn::createTensor("U", TensorElementType::COMPLEX64, TensorShape{2,2}); assert(created);
bool registered = exatn::registerTensorIsometry("U", {0}, {1}); assert(registered);
//Initialize tensors:
bool initialized = exatn::initTensorData("Q0", qzero); assert(initialized);
initialized = exatn::initTensorData("U", unitary); assert(initialized);
{//Open a new scope:
//Build a tensor network representing the quantum circuit:
TensorNetwork circuit("QuantumCircuit");
bool appended = circuit.appendTensor(1, exatn::getTensor("Q0"), {}); assert(appended);
appended = circuit.appendTensorGate(2, exatn::getTensor("U"), {0}); assert(appended);
circuit.printIt(); //debug
//Build a conjugated tensor network:
TensorNetwork conj_circuit(circuit);
conj_circuit.rename("ConjugatedCircuit");
conj_circuit.conjugate();
conj_circuit.printIt(); //debug
bool evaluated = exatn::evaluateSync(circuit); assert(evaluated);
evaluated = exatn::evaluateSync(conj_circuit); assert(evaluated);
//Synchronize:
exatn::sync();
//Retrieve the results:
auto talsh_tensor0 = exatn::getLocalTensor(circuit.getTensor(0)->getName());
const std::complex<double> * body_ptr0;
if(talsh_tensor0->getDataAccessHostConst(&body_ptr0)){
std::cout << "[";
for(int i = 0; i < talsh_tensor0->getVolume(); ++i){
std::cout << body_ptr0[i];
}
std::cout << "]\n";
}
auto talsh_tensor1 = exatn::getLocalTensor(conj_circuit.getTensor(0)->getName());
const std::complex<double> * body_ptr1;
if(talsh_tensor1->getDataAccessHostConst(&body_ptr1)){
std::cout << "[";
for(int i = 0; i < talsh_tensor1->getVolume(); ++i){
std::cout << body_ptr1[i];
}
std::cout << "]\n";
}
}
//Destroy tensors:
bool destroyed = exatn::destroyTensor("U"); assert(destroyed);
destroyed = exatn::destroyTensor("Q0"); assert(destroyed);
//Synchronize:
exatn::sync();
//Grab a coffee!
}
TEST(NumServerTester, largeCircuitNumServer)
{
using exatn::numerics::Tensor;
......
/** ExaTN::Numerics: Tensor network
REVISION: 2019/12/30
REVISION: 2019/12/31
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -334,6 +334,8 @@ const std::string & TensorNetwork::getName() const
void TensorNetwork::rename(const std::string & name)
{
assert(finalized_ != 0);
resetOutputTensor();
name_ = name;
return;
}
......@@ -1101,10 +1103,12 @@ bool TensorNetwork::mergeTensors(unsigned int left_id, unsigned int right_id, un
auto * left_tensor = this->getTensorConn(left_id);
assert(left_tensor != nullptr);
auto left_tensor_rank = left_tensor->getNumLegs();
auto left_tensor_conj = left_tensor->isComplexConjugated();
const auto & left_legs = left_tensor->getTensorLegs();
auto * right_tensor = this->getTensorConn(right_id);
assert(right_tensor != nullptr);
auto right_tensor_rank = right_tensor->getNumLegs();
auto right_tensor_conj = right_tensor->isComplexConjugated();
const auto & right_legs = right_tensor->getTensorLegs();
//Count contracted and uncontracted legs:
unsigned int num_contracted = 0;
......@@ -1134,7 +1138,8 @@ bool TensorNetwork::mergeTensors(unsigned int left_id, unsigned int right_id, un
assert(res_mode == num_uncontracted);
//Generate symbolic contraction pattern if needed:
if(contr_pattern != nullptr){
auto generated = generate_contraction_pattern(pattern,left_tensor_rank,right_tensor_rank,*contr_pattern);
auto generated = generate_contraction_pattern(pattern,left_tensor_rank,right_tensor_rank,
*contr_pattern,left_tensor_conj,right_tensor_conj);
assert(generated);
}
//Append the tensor result:
......@@ -1470,7 +1475,8 @@ std::list<std::shared_ptr<TensorOperation>> & TensorNetwork::getOperationList(co
assert(tensor2_legs != nullptr);
std::vector<TensorLeg> pattern(*tensor1_legs);
pattern.insert(pattern.end(),tensor2_legs->begin(),tensor2_legs->end());
auto generated = generate_contraction_pattern(pattern,tensor1_legs->size(),tensor2_legs->size(),contr_pattern);
auto generated = generate_contraction_pattern(pattern,tensor1_legs->size(),tensor2_legs->size(),
contr_pattern,conj1,conj2);
assert(generated);
}
auto tensor0 = net.getTensor(contr->result_id);
......@@ -1533,7 +1539,7 @@ std::list<std::shared_ptr<TensorOperation>> & TensorNetwork::getOperationList(co
const auto * tensor1_legs = this->getTensorConnections(left_tensor_id);
assert(tensor1_legs != nullptr);
std::string contr_pattern;
auto generated = generate_contraction_pattern(*tensor1_legs,tensor1_legs->size(),0,contr_pattern);
auto generated = generate_contraction_pattern(*tensor1_legs,tensor1_legs->size(),0,contr_pattern,conj1);
assert(generated);
op->setIndexPattern(contr_pattern);
assert(op->isSet());
......
/** ExaTN::Numerics: Tensor network
REVISION: 2019/12/23
REVISION: 2019/12/31
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -130,7 +130,7 @@ public:
/** Returns the name of the tensor network. **/
const std::string & getName() const;
/** Renames the tensor network. **/
/** Renames the tensor network. The output tensor is reset to a new one as well. **/
void rename(const std::string & name);
/** Returns a given tensor of the tensor network without its connections (legs).
......
/** ExaTN: Numerics: Symbolic tensor processing
REVISION: 2019/12/10
REVISION: 2019/12/31
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -136,7 +136,9 @@ bool parse_tensor_network(const std::string & network, //in: tensor netwo
bool generate_contraction_pattern(const std::vector<numerics::TensorLeg> & pattern,
unsigned int left_tensor_rank,
unsigned int right_tensor_rank,
std::string & symb_pattern)
std::string & symb_pattern,
bool left_conjugated,
bool right_conjugated)
/* pattern[left_rank + right_rank] = {left_legs + right_legs} */
{
const std::size_t DEFAULT_STRING_CAPACITY = 256; //string capacity reserve value
......@@ -144,7 +146,15 @@ bool generate_contraction_pattern(const std::vector<numerics::TensorLeg> & patte
assert(pattern.size() == left_tensor_rank + right_tensor_rank);
symb_pattern.clear();
if(pattern.empty()){ //multiplication of scalars
symb_pattern = "D()+=L()*R()";
if(left_conjugated && right_conjugated){
symb_pattern = "D()+=L+()*R+()";
}else if(left_conjugated && !right_conjugated){
symb_pattern = "D()+=L+()*R()";
}else if(!left_conjugated && right_conjugated){
symb_pattern = "D()+=L()*R+()";
}else{
symb_pattern = "D()+=L()*R()";
}
}else{ //at least one tensor is present
if(symb_pattern.capacity() < DEFAULT_STRING_CAPACITY) symb_pattern.reserve(DEFAULT_STRING_CAPACITY);
unsigned int dest_indices[left_tensor_rank + right_tensor_rank];
......@@ -163,7 +173,11 @@ bool generate_contraction_pattern(const std::vector<numerics::TensorLeg> & patte
}else{
symb_pattern.append(")");
}
symb_pattern.append("+=L(");
if(left_conjugated){
symb_pattern.append("+=L+(");
}else{
symb_pattern.append("+=L(");
}
dest_tensor_rank = 0;
unsigned int contr_ind = 0;
for(unsigned int i = 0; i < left_tensor_rank; ++i){
......@@ -180,7 +194,11 @@ bool generate_contraction_pattern(const std::vector<numerics::TensorLeg> & patte
}else{
symb_pattern.append(")");
}
symb_pattern.append("*R(");
if(right_conjugated){
symb_pattern.append("*R+(");
}else{
symb_pattern.append("*R(");
}
for(unsigned int i = left_tensor_rank; i < left_tensor_rank + right_tensor_rank; ++i){
if(pattern[i].getTensorId() == 0){
symb_pattern.append("u"+std::to_string(dest_tensor_rank++)+",");
......@@ -206,11 +224,12 @@ bool generate_contraction_pattern(const std::vector<numerics::TensorLeg> & patte
bool generate_addition_pattern(const std::vector<numerics::TensorLeg> & pattern,
std::string & symb_pattern)
std::string & symb_pattern,
bool conjugated)
/* pattern[left_rank] = {left_legs} */
{
unsigned int rank = pattern.size();
auto generated = generate_contraction_pattern(pattern,rank,0,symb_pattern);
auto generated = generate_contraction_pattern(pattern,rank,0,symb_pattern,conjugated,false);
if(generated){
auto pos = symb_pattern.rfind("*R()");
generated = (pos != std::string::npos);
......@@ -223,12 +242,13 @@ bool generate_addition_pattern(const std::vector<numerics::TensorLeg> & pattern,
/* Generates the trivial tensor addition pattern. */
bool generate_addition_pattern(unsigned int tensor_rank,
std::string & symb_pattern)
std::string & symb_pattern,
bool conjugated)
{
std::vector<numerics::TensorLeg> pattern(tensor_rank);
unsigned int dim = 0;
for(auto & leg: pattern) leg = numerics::TensorLeg(0,dim++);
return generate_addition_pattern(pattern,symb_pattern);
return generate_addition_pattern(pattern,symb_pattern,conjugated);
}
} //namespace exatn
/** ExaTN: Numerics: Symbolic tensor processing
REVISION: 2019/12/30
REVISION: 2019/12/31
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)
......@@ -149,25 +149,27 @@ bool parse_tensor_network(const std::string & network, //in: tensor netw
where m and n are the ranks of the left and right contracted tensors, respectively.
pattern[x] is a TensorLeg specifying the dimension of another tensor the described
dimension is connected to, where the result tensor is tensor 0 while the left and
right contracted tensors are tensors 1 and 2, respectively.
**/
right contracted tensors are tensors 1 and 2, respectively. **/
bool generate_contraction_pattern(const std::vector<numerics::TensorLeg> & pattern,
unsigned int left_tensor_rank,
unsigned int right_tensor_rank,
std::string & symb_pattern);
std::string & symb_pattern,
bool left_conjugated = false,
bool right_conjugated = false);
/** Generates symbolic tensor addition pattern from the digital tensor addition pattern:
pattern[0..m-1] describes connectivity of dimensions of the left tensor,
where m is the rank of the left tensor.
pattern[x] is a TensorLeg specifying the dimension of the result tensor the described
dimension is connected to, where the result tensor is tensor 0 and the left tensor is tensor 1.
**/
dimension is connected to, where the result tensor is tensor 0 and the left tensor is tensor 1. **/
bool generate_addition_pattern(const std::vector<numerics::TensorLeg> & pattern,
std::string & symb_pattern);
std::string & symb_pattern,
bool conjugated = false);
/** Generates the trivial tensor addition pattern. **/
bool generate_addition_pattern(unsigned int tensor_rank,
std::string & symb_pattern);
std::string & symb_pattern,
bool conjugated = false);
} //namespace exatn
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment