Commit 4d9da1a9 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Fixed tensor operator to tensor expansion coverter, plus reconstruction test


Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent 443f7af5
/** ExaTN::Numerics: General client header (free function API)
REVISION: 2021/09/30
REVISION: 2021/10/17
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -476,6 +476,14 @@ inline bool initTensorsRndSync(TensorNetwork & tensor_network) //inout: tensor n
{return numericalServer->initTensorsRndSync(tensor_network);}
/** Initializes special tensors present in the tensor network. **/
inline bool initTensorsSpecial(TensorNetwork & tensor_network) //inout: tensor network
{return numericalServer->initTensorsSpecial(tensor_network);}
inline bool initTensorsSpecialSync(TensorNetwork & tensor_network) //inout: tensor network
{return numericalServer->initTensorsSpecialSync(tensor_network);}
/** Computes max-abs norm of a tensor. **/
inline bool computeMaxAbsSync(const std::string & name, //in: tensor name
double & norm) //out: tensor norm
......@@ -501,6 +509,12 @@ inline bool computePartialNormsSync(const std::string & name, //in: t
{return numericalServer->computePartialNormsSync(name,tensor_dimension,partial_norms);}
/** Computes 2-norms of all tensors in a tensor network. **/
inline bool computeNorms2Sync(const TensorNetwork & network, //in: tensor network
std::map<std::string,double> & norms) //out: tensor norms: tensor_name --> norm
{return numericalServer->computeNorms2Sync(network,norms);}
/** Replicates a tensor within the given process group, which defaults to all MPI processes.
Only the root_process_rank within the given process group is required to have the tensor,
that is, the tensor will automatically be created in those MPI processes which do not have it. **/
......
/** ExaTN::Numerics: Numerical server
REVISION: 2021/10/06
REVISION: 2021/10/17
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -530,37 +530,43 @@ bool NumServer::submitOp(std::shared_ptr<TensorOperation> operation)
bool NumServer::submit(std::shared_ptr<TensorOperation> operation, std::shared_ptr<TensorMapper> tensor_mapper)
{
bool success = true;
//Determine tensor element type:
std::stack<unsigned int> deltas;
const auto opcode = operation->getOpcode();
const auto num_operands = operation->getNumOperands();
auto elem_type = TensorElementType::VOID;
for(unsigned int i = 0; i < num_operands; ++i){
auto operand = operation->getTensorOperand(i);
elem_type = operand->getElementType();
if(elem_type != TensorElementType::VOID) break;
}
//Create and initialize implicit Kronecker Delta tensors:
std::stack<unsigned int> deltas;
for(unsigned int i = 0; i < num_operands; ++i){
auto operand = operation->getTensorOperand(i);
const auto & tensor_name = operand->getName();
if(tensor_name.length() >= 2){
if(tensor_name[0] == '_' && tensor_name[1] == 'd'){ //_d: explicit Kronecker Delta tensor
assert(elem_type != TensorElementType::VOID);
std::shared_ptr<TensorOperation> op0 = tensor_op_factory_->createTensorOp(TensorOpCode::CREATE);
op0->setTensorOperand(operand);
std::dynamic_pointer_cast<numerics::TensorOpCreate>(op0)->resetTensorElementType(elem_type);
success = submitOp(op0);
if(success){
deltas.push(i);
std::shared_ptr<TensorOperation> op1 = tensor_op_factory_->createTensorOp(TensorOpCode::TRANSFORM);
op1->setTensorOperand(operand);
std::dynamic_pointer_cast<numerics::TensorOpTransform>(op1)->
resetFunctor(std::shared_ptr<TensorMethod>(new numerics::FunctorInitDelta()));
success = submitOp(op1);
if(opcode != TensorOpCode::CREATE && opcode != TensorOpCode::DESTROY){
auto elem_type = TensorElementType::VOID;
for(unsigned int i = 0; i < num_operands; ++i){
auto operand = operation->getTensorOperand(i);
elem_type = operand->getElementType();
if(elem_type != TensorElementType::VOID) break;
}
for(unsigned int i = 0; i < num_operands; ++i){
auto operand = operation->getTensorOperand(i);
const auto & tensor_name = operand->getName();
if(tensor_name.length() >= 2){
if(tensor_name[0] == '_' && tensor_name[1] == 'd'){ //_d: explicit Kronecker Delta tensor
if(!tensorAllocated(tensor_name)){
//std::cout << "#DEBUG(exatn::NumServer::submitOp): Kronecker Delta tensor creation: "
// << tensor_name << ": Element type = " << static_cast<int>(elem_type) << std::endl; //debug
assert(elem_type != TensorElementType::VOID);
std::shared_ptr<TensorOperation> op0 = tensor_op_factory_->createTensorOp(TensorOpCode::CREATE);
op0->setTensorOperand(operand);
std::dynamic_pointer_cast<numerics::TensorOpCreate>(op0)->resetTensorElementType(elem_type);
success = submitOp(op0);
if(success){
deltas.push(i);
std::shared_ptr<TensorOperation> op1 = tensor_op_factory_->createTensorOp(TensorOpCode::TRANSFORM);
op1->setTensorOperand(operand);
std::dynamic_pointer_cast<numerics::TensorOpTransform>(op1)->
resetFunctor(std::shared_ptr<TensorMethod>(new numerics::FunctorInitDelta()));
success = submitOp(op1);
}
}
}
}
if(!success) break;
}
if(!success) break;
}
if(success){
//Submit the main tensor operation:
......@@ -1595,6 +1601,50 @@ bool NumServer::initTensorsRndSync(TensorNetwork & tensor_network)
return success;
}
bool NumServer::initTensorsSpecial(TensorNetwork & tensor_network)
{
bool success = true;
for(auto tens = tensor_network.begin(); tens != tensor_network.end(); ++tens){
auto tensor = tens->second.getTensor();
const auto & tens_name = tensor->getName();
if(tens->first != 0){ //input tensor
if(tens_name.length() >= 2){
if(tens_name[0] == '_' && tens_name[1] == 'd'){
if(tensorAllocated(tens_name)){
success = transformTensor(tens_name,std::shared_ptr<TensorMethod>(new numerics::FunctorInitDelta()));
}else{
success = false;
}
}
}
}
if(!success) break;
}
return success;
}
bool NumServer::initTensorsSpecialSync(TensorNetwork & tensor_network)
{
bool success = true;
for(auto tens = tensor_network.begin(); tens != tensor_network.end(); ++tens){
auto tensor = tens->second.getTensor();
const auto & tens_name = tensor->getName();
if(tens->first != 0){ //input tensor
if(tens_name.length() >= 2){
if(tens_name[0] == '_' && tens_name[1] == 'd'){
if(tensorAllocated(tens_name)){
success = transformTensorSync(tens_name,std::shared_ptr<TensorMethod>(new numerics::FunctorInitDelta()));
}else{
success = false;
}
}
}
}
if(!success) break;
}
return success;
}
bool NumServer::computeMaxAbsSync(const std::string & name,
double & norm)
{
......@@ -1758,6 +1808,21 @@ bool NumServer::computePartialNormsSync(const std::string & name, //i
return submitted;
}
bool NumServer::computeNorms2Sync(const TensorNetwork & network,
std::map<std::string,double> & norms)
{
bool success = true;
norms.clear();
for(auto tens = network.cbegin(); tens != network.cend(); ++tens){
auto res = norms.emplace(std::make_pair(tens->second.getName(),0.0));
if(res.second){
success = computeNorm2Sync(tens->second.getName(),res.first->second);
if(!success) break;
}
}
return success;
}
bool NumServer::replicateTensor(const std::string & name, int root_process_rank)
{
return replicateTensor(getDefaultProcessGroup(),name,root_process_rank);
......@@ -3108,7 +3173,8 @@ bool NumServer::balanceNorm2Sync(const ProcessGroup & process_group,
break;
}
}else{
std::cout << "#WARNING(exatn::balanceNorm2): Tensor has zero norm, thus cannot be renormalized!" << std::endl;
std::cout << "#WARNING(exatn::balanceNorm2): Tensor " << tens->second.getName()
<< " has zero norm, thus cannot be renormalized!" << std::endl;
}
}else{
std::cout << "#ERROR(exatn::balanceNorm2): Unable to compute the norm of input tensor "
......
/** ExaTN::Numerics: Numerical server
REVISION: 2021/09/30
REVISION: 2021/10/17
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -664,6 +664,11 @@ public:
bool initTensorsRndSync(TensorNetwork & tensor_network); //inout: tensor network
/** Initializes special tensors present in the tensor network. **/
bool initTensorsSpecial(TensorNetwork & tensor_network); //inout: tensor network
bool initTensorsSpecialSync(TensorNetwork & tensor_network); //inout: tensor network
/** Computes max-abs norm of a tensor. **/
bool computeMaxAbsSync(const std::string & name, //in: tensor name
double & norm); //out: tensor norm
......@@ -681,6 +686,10 @@ public:
unsigned int tensor_dimension, //in: chosen tensor dimension
std::vector<double> & partial_norms); //out: partial 2-norms over the chosen tensor dimension
/** Computes 2-norms of all tensors in a tensor network. **/
bool computeNorms2Sync(const TensorNetwork & network, //in: tensor network
std::map<std::string,double> & norms); //out: tensor norms: tensor_name --> norm
/** Replicates a tensor within the given process group, which defaults to all MPI processes.
Only the root_process_rank within the given process group is required to have the tensor,
that is, the tensor will automatically be created in those MPI processes which do not have it. **/
......
......@@ -44,11 +44,11 @@
#define EXATN_TEST23
#define EXATN_TEST24
#define EXATN_TEST25
#define EXATN_TEST26*/
//#define EXATN_TEST27 //requires input file from source
//#define EXATN_TEST28 //requires input file from source
//#define EXATN_TEST29
//#define EXATN_TEST30
#define EXATN_TEST26
#define EXATN_TEST27 //requires input file from source
#define EXATN_TEST28 //requires input file from source
#define EXATN_TEST29
#define EXATN_TEST30*/
#ifdef EXATN_TEST0
......@@ -1549,7 +1549,7 @@ TEST(NumServerTester, IsingTNO)
bool success = true;
//exatn::resetLoggingLevel(2,2); //debug
exatn::resetLoggingLevel(2,2); //debug
//Define Ising Hamiltonian constants:
constexpr std::complex<double> ZERO{0.0,0.0};
......@@ -1601,10 +1601,11 @@ TEST(NumServerTester, IsingTNO)
//Build a tensor network operator:
auto ket_tensor = exatn::makeSharedTensor("TensorSpace",std::vector<int>(num_sites,2));
auto vec_net = exatn::makeSharedTensorNetwork("VectorNet",ket_tensor,*tn_builder,false);
vec_net->printIt(); //debug
//auto vec_net = exatn::makeSharedTensorNetwork("VectorNet",ket_tensor,*tn_builder,false);
//vec_net->printIt(); //debug
auto space_tensor = exatn::makeSharedTensor("TensorSpaceMap",std::vector<int>(num_sites*2,2));
auto ham_net = exatn::makeSharedTensorNetwork("HamiltonianNet",space_tensor,*tn_builder,true);
ham_net->markOptimizableAllTensors();
ham_net->printIt(); //debug
TensorOperator ham_tno("HamiltonianTNO");
success = ham_tno.appendComponent(ham_net,{{0,0},{1,1},{2,2},{3,3}},{{0,4},{1,5},{2,6},{3,7}},{1.0,0.0});
......@@ -1628,18 +1629,42 @@ TEST(NumServerTester, IsingTNO)
success = exatn::initTensorDataSync("U22",hamu); assert(success);
success = exatn::initTensorDataSync("U33",hamu); assert(success);
//Remap the Ising Hamiltonian as a tensor network operator:
//Create and initialize tensor network operator tensors:
success = exatn::createTensorsSync(*ham_net,TENS_ELEM_TYPE); assert(success);
success = exatn::initTensorsRndSync(*ham_net); assert(success);
//Remap tensor operators as tensor expansions:
auto ham_expansion = makeSharedTensorExpansion(ham,*ket_tensor);
ham_expansion->printIt(); //debug
auto ham_tno_expansion = makeSharedTensorExpansion(ham_tno,*ket_tensor);
ham_tno_expansion->printIt(); //debug
//Reconstruct the Ising Hamiltonian as a tensor network operator:
//Create and initialize special tensors in the Hamiltonian tensor expansion:
for(auto net = ham_expansion->begin(); net != ham_expansion->end(); ++net){
success = exatn::createTensorsSync(*(net->network),TENS_ELEM_TYPE); assert(success);
success = exatn::initTensorsSpecialSync(*(net->network)); assert(success);
}
//Reconstruct the Ising Hamiltonian as a tensor network operator:
success = exatn::balanceNormalizeNorm2Sync(*ham_expansion,1.0,1.0,false); assert(success);
success = exatn::balanceNorm2Sync(*ham_tno_expansion,1.0,true); assert(success);
ham_tno_expansion->conjugate();
exatn::TensorNetworkReconstructor::resetDebugLevel(1); //debug
exatn::TensorNetworkReconstructor reconstructor(ham_expansion,ham_tno_expansion,1e-4);
success = exatn::sync(); assert(success);
double residual_norm, fidelity;
bool reconstructed = reconstructor.reconstruct(&residual_norm,&fidelity);
success = exatn::sync(); assert(success);
if(reconstructed){
std::cout << "Reconstruction succeeded: Residual norm = " << residual_norm
<< "; Fidelity = " << fidelity << std::endl;
}else{
std::cout << "Reconstruction failed!" << std::endl; assert(false);
}
//Destroy all tensors:
success = exatn::sync(); assert(success);
success = exatn::destroyTensorsSync(*ham_net); assert(success);
success = exatn::destroyTensorSync("U33"); assert(success);
success = exatn::destroyTensorSync("U22"); assert(success);
success = exatn::destroyTensorSync("U11"); assert(success);
......
/** ExaTN::Numerics: Tensor network builder: Tree: Tree Tensor Network
REVISION: 2021/10/07
REVISION: 2021/10/15
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -100,7 +100,11 @@ void NetworkBuilderTTN::build(TensorNetwork & network, bool tensor_operator)
}else{
for(unsigned int i = 0; i < (tens_rank - end_decr); ++i){
unsigned int below_tensor_id = (tensor_id_base - num_dims + extent_id + i);
tens_legs[i] = TensorLeg(below_tensor_id,(network.getTensor(below_tensor_id)->getRank() - 1));
if(tensor_operator){
tens_legs[i] = TensorLeg(below_tensor_id,(network.getTensor(below_tensor_id)->getRank() / 2));
}else{
tens_legs[i] = TensorLeg(below_tensor_id,(network.getTensor(below_tensor_id)->getRank() - 1));
}
}
if(end_decr){
tens_legs[tens_rank - 1] = TensorLeg(tensor_id_base + num_tensors_in_layer + (num_dims_new / arity_),
......@@ -114,7 +118,7 @@ void NetworkBuilderTTN::build(TensorNetwork & network, bool tensor_operator)
false,false);
assert(appended);
if(tensor_operator && layer == 0){
auto * tens_conn = network.getTensorConn(tensor_id_base+num_dims_new);
auto * tens_conn = network.getTensorConn(tensor_id_base + num_dims_new);
for(unsigned int i = 0; i < (tens_rank - end_decr); ++i){
const unsigned int output_dim_id = output_tensor_rank + extent_id + i;
tens_conn->appendLeg(output_dim_extents[output_dim_id],TensorLeg{0,output_dim_id});
......@@ -130,6 +134,7 @@ void NetworkBuilderTTN::build(TensorNetwork & network, bool tensor_operator)
++layer;
}
}
//std::cout << "#DEBUG(exatn::network_builder_ttn): Network built:\n"; network.printIt(); //debug
return;
}
......
/** ExaTN::Numerics: Tensor
REVISION: 2021/08/21
REVISION: 2021/10/15
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -102,8 +102,11 @@ name_(name), element_type_(TensorElementType::VOID)
//Set the tensor element type:
auto left_tensor_type = left_tensor.getElementType();
auto right_tensor_type = right_tensor.getElementType();
assert(left_tensor_type == right_tensor_type);
this->setElementType(left_tensor_type);
if(static_cast<int>(left_tensor_type) <= static_cast<int>(right_tensor_type)){
this->setElementType(left_tensor_type);
}else{
this->setElementType(right_tensor_type);
}
}
Tensor::Tensor(BytePacket & byte_packet)
......
/** ExaTN: Tensor basic types and parameters
REVISION: 2021/08/20
REVISION: 2021/10/15
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -31,6 +31,17 @@ constexpr SpaceId SOME_SPACE = 0; //any unregistered (anonymous) space (all regi
constexpr SubspaceId FULL_SUBSPACE = 0; //every space has its trivial (full) subspace automatically registered as subspace 0
constexpr SubspaceId UNREG_SUBSPACE = 0xFFFFFFFFFFFFFFFF; //id of any unregistered subspace
//Possible types of tensor elements:
enum class TensorElementType{
VOID,
REAL16,
REAL32,
REAL64,
COMPLEX16,
COMPLEX32,
COMPLEX64
};
//Direction of a leg (directed edge) in a tensor network:
enum class LegDirection{
UNDIRECT, //no direction
......@@ -50,6 +61,7 @@ enum class IndexKind{
RTRACE //traced index in the right tensor operand
};
//Basic tensor operations:
enum class TensorOpCode{
NOOP, //no operation
CREATE, //tensor creation
......@@ -69,15 +81,6 @@ enum class TensorOpCode{
ALLREDUCE //tensor allreduce (parallel execution only)
};
enum class TensorElementType{
VOID,
REAL16,
REAL32,
REAL64,
COMPLEX16,
COMPLEX32,
COMPLEX64
};
//TensorElementTypeSize<enum TensorElementType>() --> Size in bytes:
template <TensorElementType> constexpr std::size_t TensorElementTypeSize();
......
/** ExaTN::Numerics: Tensor network expansion
REVISION: 2021/10/14
REVISION: 2021/10/15
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -140,13 +140,13 @@ TensorExpansion::TensorExpansion(const TensorOperator & tensor_operator,
if(comp_ket_rank == comp_bra_rank){
if(comp_rank <= space_rank){
auto output_tensor = network->getTensor(0);
std::vector<int> space_leg_ids(space_rank,-1);
std::vector<unsigned int> space_leg_ids(space_rank,space_rank);
for(unsigned int i = 0; i < comp_ket_rank; ++i){
const auto & ket_leg = component->ket_legs[i];
const auto & bra_leg = component->bra_legs[i];
if(ket_leg.first < ket_space_rank && bra_leg.first < bra_space_rank){
assert(space_leg_ids[ket_leg.first] < 0);
assert(space_leg_ids[ket_space_rank + bra_leg.first] < 0);
assert(space_leg_ids[ket_leg.first] >= space_rank);
assert(space_leg_ids[ket_space_rank + bra_leg.first] >= space_rank);
assert(tensor_dims_conform(ket_subspace,*output_tensor,ket_leg.first,ket_leg.second));
assert(tensor_dims_conform(bra_subspace,*output_tensor,bra_leg.first,bra_leg.second));
space_leg_ids[ket_leg.first] = ket_leg.second; //global leg id --> local network leg id
......@@ -158,9 +158,9 @@ TensorExpansion::TensorExpansion(const TensorOperator & tensor_operator,
}
unsigned int bi = 0, ki = 0, out_rank = comp_rank;
while(ki < ket_space_rank && bi < bra_space_rank){
if(space_leg_ids[ki] >= 0) ++ki;
if(space_leg_ids[ket_space_rank + bi] >= 0) ++bi;
if(space_leg_ids[ki] < 0 && space_leg_ids[ket_space_rank + bi] < 0){
if(space_leg_ids[ki] < space_rank) ++ki;
if(space_leg_ids[ket_space_rank + bi] < space_rank) ++bi;
if(space_leg_ids[ki] >= space_rank && space_leg_ids[ket_space_rank + bi] >= space_rank){
space_leg_ids[ki] = out_rank++;
space_leg_ids[ket_space_rank + bi] = out_rank++;
auto identity_tensor = makeSharedTensor("_d",
......@@ -172,7 +172,8 @@ TensorExpansion::TensorExpansion(const TensorOperator & tensor_operator,
}
}
assert(out_rank == space_rank);
auto success = this->appendComponent(network,component->coefficient); assert(success);
auto success = network->reorderOutputModes(space_leg_ids); assert(success);
success = this->appendComponent(network,component->coefficient); assert(success);
}else{
std::cout << "ERROR(exatn::TensorExpansion::ctor): The combined rank of provided tensor subspaces is too low for the given tensor operator!" << std::endl;
assert(false);
......
/** ExaTN::Numerics: Tensor network
REVISION: 2021/10/13
REVISION: 2021/10/17
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -335,7 +335,7 @@ bool TensorNetwork::isValid()
unsigned int TensorNetwork::getRank() const
{
assert(this->isFinalized());
//assert(this->isFinalized());
return tensors_.at(0).getNumLegs(); //output tensor
}
......@@ -359,7 +359,10 @@ TensorElementType TensorNetwork::getTensorElementType() const
{
assert(this->isFinalized());
for(const auto & tens: tensors_){
if(tens.first != 0) return tens.second.getElementType();
if(tens.first != 0){
const auto elem_type = tens.second.getElementType();
if(elem_type != TensorElementType::VOID) return elem_type;
}
}
return TensorElementType::VOID;
}
......@@ -2244,6 +2247,7 @@ double TensorNetwork::getContractionCost(unsigned int left_id, unsigned int righ
std::list<std::shared_ptr<TensorOperation>> & TensorNetwork::getOperationList(const std::string & contr_seq_opt_name,
bool universal_indices)
{
const auto default_elem_type = getTensorElementType();
if(operations_.empty()){
//Determine the pseudo-optimal sequence of tensor contractions:
max_intermediate_presence_volume_ = 0.0;
......@@ -2287,8 +2291,11 @@ std::list<std::shared_ptr<TensorOperation>> & TensorNetwork::getOperationList(co
max_intermediate_presence_volume_ = std::max(max_intermediate_presence_volume_,static_cast<double>(intermediates_vol));
auto op_create = tensor_op_factory.createTensorOpShared(TensorOpCode::CREATE); //create intermediate
op_create->setTensorOperand(tensor0);
if(tensor0->getElementType() != TensorElementType::VOID)
if(tensor0->getElementType() != TensorElementType::VOID){
std::dynamic_pointer_cast<TensorOpCreate>(op_create)->resetTensorElementType(tensor0->getElementType());
}else{
std::dynamic_pointer_cast<TensorOpCreate>(op_create)->resetTensorElementType(default_elem_type);
}
operations_.emplace_back(op_create);
intermediates.emplace_back(contr->result_id);
if(ACCUMULATIVE_CONTRACTIONS){
......@@ -2299,7 +2306,7 @@ std::list<std::shared_ptr<TensorOperation>> & TensorNetwork::getOperationList(co
operations_.emplace_back(op_init);
}
}else{ //make sure the output tensor has its type set
if(tensor0->getElementType() == TensorElementType::VOID) tensor0->setElementType(tensor1->getElementType());
if(tensor0->getElementType() == TensorElementType::VOID) tensor0->setElementType(default_elem_type);
}
auto op = tensor_op_factory.createTensorOpShared(TensorOpCode::CONTRACT);
op->setTensorOperand(tensor0);
......@@ -2343,7 +2350,7 @@ std::list<std::shared_ptr<TensorOperation>> & TensorNetwork::getOperationList(co
left_tensor_id = iter->first;
}
}
if(tensor0->getElementType() == TensorElementType::VOID) tensor0->setElementType(tensor1->getElementType());
if(tensor0->getElementType() == TensorElementType::VOID) tensor0->setElementType(default_elem_type);
auto op = tensor_op_factory.createTensorOp(TensorOpCode::ADD);
op->setTensorOperand(tensor0);
op->setTensorOperand(tensor1,conj1);
......
/** ExaTN::Numerics: Tensor network
REVISION: 2021/10/13
REVISION: 2021/10/17
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -599,6 +599,7 @@ protected:
If the tensor operation list is empty, does nothing. **/
void establishUniversalIndexNumeration();
private:
/** Resets the output tensor in a finalized tensor network to a new
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment