Commit 934d704d authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Implemented tensor isometrization functor and isometric tensor tree builder


Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent 0cf5e194
/** ExaTN::Numerics: Numerical server
REVISION: 2022/01/26
REVISION: 2022/01/29
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -1678,8 +1678,14 @@ bool NumServer::initTensorRnd(const std::string & name)
std::cout << "#ERROR(exatn::initTensorRnd): Random initialization of composite tensors is not implemented yet!\n";
assert(false);
}else{
const auto & process_group = getTensorProcessGroup(name);
success = broadcastTensor(process_group,name,0);
if(tensor->hasIsometries()){
const auto & isometries = tensor->retrieveIsometries();
success = transformTensor(name,std::shared_ptr<TensorMethod>(new numerics::FunctorIsometrize(*(isometries.cbegin()))));
}
if(success){
const auto & process_group = getTensorProcessGroup(name);
success = broadcastTensor(process_group,name,0);
}
}
}
}
......@@ -1696,8 +1702,14 @@ bool NumServer::initTensorRndSync(const std::string & name)
std::cout << "#ERROR(exatn::initTensorRndSync): Random initialization of composite tensors is not implemented yet!\n";
assert(false);
}else{
const auto & process_group = getTensorProcessGroup(name);
success = broadcastTensorSync(process_group,name,0);
if(tensor->hasIsometries()){
const auto & isometries = tensor->retrieveIsometries();
success = transformTensorSync(name,std::shared_ptr<TensorMethod>(new numerics::FunctorIsometrize(*(isometries.cbegin()))));
}
if(success){
const auto & process_group = getTensorProcessGroup(name);
success = broadcastTensorSync(process_group,name,0);
}
}
}
}
......
/** ExaTN::Numerics: Numerical server
REVISION: 2022/01/25
REVISION: 2022/01/28
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -59,6 +59,7 @@ Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
#include "functor_init_delta.hpp"
#include "functor_init_proj.hpp"
#include "functor_init_file.hpp"
#include "functor_isometrize.hpp"
#include "functor_scale.hpp"
#include "functor_maxabs.hpp"
#include "functor_norm1.hpp"
......@@ -109,6 +110,7 @@ using numerics::FunctorInitDat;
using numerics::FunctorInitDelta;
using numerics::FunctorInitProj;
using numerics::FunctorInitFile;
using numerics::FunctorIsometrize;
using numerics::FunctorScale;
using numerics::FunctorMaxAbs;
using numerics::FunctorNorm1;
......
......@@ -19,7 +19,7 @@
//Test activation:
#define EXATN_TEST0
/*#define EXATN_TEST0
#define EXATN_TEST1
#define EXATN_TEST2
#define EXATN_TEST3
......@@ -51,7 +51,7 @@
#define EXATN_TEST29
#define EXATN_TEST30
#define EXATN_TEST31 //requires input file from source
#define EXATN_TEST32
#define EXATN_TEST32*/
#define EXATN_TEST33
#define EXATN_TEST34
......@@ -4140,7 +4140,7 @@ TEST(NumServerTester, IsometricAIEM) {
const auto TENS_ELEM_TYPE = TensorElementType::COMPLEX64;
//exatn::resetLoggingLevel(1,2); //debug
exatn::resetLoggingLevel(2,2); //debug
std::size_t free_mem = 0;
auto used_mem = exatn::getMemoryUsage(&free_mem);
......@@ -4151,21 +4151,26 @@ TEST(NumServerTester, IsometricAIEM) {
bool success = true;
//Create tensors:
success = exatn::createTensor("A",TENS_ELEM_TYPE,TensorShape{}); assert(success);
success = exatn::createTensor("B",TENS_ELEM_TYPE,TensorShape{4,4}); assert(success);
success = exatn::createTensor("C",TENS_ELEM_TYPE,TensorShape{4,4}); assert(success);
success = exatn::createTensor("A",TENS_ELEM_TYPE,TensorShape{4,2,8,2,4}); assert(success);
exatn::getTensor("A")->registerIsometry({0,2,4});
success = exatn::createTensor("B",TENS_ELEM_TYPE,TensorShape{2,2,2,2}); assert(success);
//Init tensors:
success = exatn::initTensorRnd("A"); assert(success);
success = exatn::initTensorRnd("B"); assert(success);
success = exatn::initTensor("C",0.0); assert(success);
//success = exatn::transformTensor("A",std::shared_ptr<exatn::TensorMethod>(
// new exatn::FunctorIsometrize(std::vector<unsigned int>{0,2,4})));
//assert(success);
success = exatn::initTensor("B",0.0); assert(success);
//Contract tensors:
success = exatn::contractTensors("C(u1,u0)+=A()*B(u0,u1)",1.0); assert(success);
success = exatn::contractTensorsSync("B(j1,j2,j3,j4)+=A(i1,j1,i2,j2,i3)*A+(i1,j3,i2,j4,i3)",1.0); assert(success);
//success = exatn::printTensor("B"); assert(success);
double nrm1 = 0.0;
success = exatn::computeNorm1Sync("B",nrm1); assert(success);
std::cout << "1-norm of the identity tensor = " << nrm1 << " VS correct = 4" << std::endl;
//Destroy tensors:
success = exatn::sync(); assert(success);
success = exatn::destroyTensor("C"); assert(success);
success = exatn::destroyTensor("B"); assert(success);
success = exatn::destroyTensor("A"); assert(success);
......
......@@ -49,6 +49,7 @@ add_library(${LIBRARY_NAME}
functor_init_delta.cpp
functor_init_proj.cpp
functor_init_file.cpp
functor_isometrize.cpp
functor_scale.cpp
functor_maxabs.cpp
functor_norm1.cpp
......
/** ExaTN::Numerics: Tensor Functor: Initialization of Ordering Projection tensors
REVISION: 2021/09/21
REVISION: 2022/01/28
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
#include "functor_init_proj.hpp"
......@@ -16,12 +16,14 @@ namespace numerics{
void FunctorInitProj::pack(BytePacket & packet)
{
//`Finish
return;
}
void FunctorInitProj::unpack(BytePacket & packet)
{
//`Finish
return;
}
......
/** ExaTN::Numerics: Tensor Functor: Tensor Isometrization
REVISION: 2022/01/29
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
#include "functor_isometrize.hpp"
#include "tensor_range.hpp"
#include "talshxx.hpp"
namespace exatn{
namespace numerics{
void FunctorIsometrize::pack(BytePacket & packet)
{
//`Finish
return;
}
void FunctorIsometrize::unpack(BytePacket & packet)
{
//`Finish
return;
}
template <typename NumericType>
inline NumericType conjugated(NumericType value)
{
return value;
}
template <typename NumericType>
inline std::complex<NumericType> conjugated(std::complex<NumericType> value)
{
return std::conj(value);
}
int FunctorIsometrize::apply(talsh::Tensor & local_tensor) //tensor slice (in general)
{
unsigned int tens_rank;
const auto * extents = local_tensor.getDimExtents(tens_rank); //rank is returned by reference
const auto tensor_volume = local_tensor.getVolume(); //volume of the given tensor slice
const auto & offsets = local_tensor.getDimOffsets(); //base offsets of the given tensor slice
for(const auto & offs: offsets) assert(offs == 0); //`tensor slices are not allowed (replicated tensors only)
std::vector<DimExtent> strides(tens_rank);
DimExtent stride = 1;
for(int i = 0; i < tens_rank; ++i){
strides[i] = stride;
stride *= extents[i];
}
auto enforce_isometry = [&](auto * tensor_body,
const std::vector<unsigned int> & iso_dims){
using tensor_body_type = typename std::remove_pointer<decltype(tensor_body)>::type;
unsigned int rankx = iso_dims.size();
unsigned int ranky = tens_rank - rankx;
if(rankx > 0 && ranky > 0){
//Set up ranges:
std::vector<DimExtent> extx(rankx), strx(rankx);
std::vector<DimExtent> exty(ranky), stry(ranky);
std::vector<int> dim_mask(tens_rank,1);
for(const auto & dim: iso_dims){
assert(dim >= 0 && dim < tens_rank);
dim_mask[dim] = 0;
}
int x = 0, y = 0;
for(int i = 0; i < tens_rank; ++i){
if(dim_mask[i] == 0){
extx[x] = extents[i];
strx[x++] = strides[i];
}else{
exty[y] = extents[i];
stry[y++] = strides[i];
}
}
assert(x == rankx && y == ranky);
TensorRange rngx(std::vector<DimOffset>(rankx,0),extx,strx);
TensorRange rngy(std::vector<DimOffset>(ranky,0),exty,stry);
DimExtent volx = rngx.localVolume();
DimExtent voly = rngy.localVolume();
//Allocate a tempory matricized buffer:
tensor_body_type * buf = new tensor_body_type[voly*volx];
//Load data into the tempory matricized buffer:
rngy.reset();
for(DimOffset j = 0; j < voly; ++j){
rngx.reset();
for(DimOffset i = 0; i < volx; ++i){
buf[volx*j + i] = tensor_body[rngy.globalOffset() + rngx.globalOffset()];
rngx.next();
}
rngy.next();
}
//Modified Gram-Schmidt procedure:
for(DimOffset j = 0; j < voly; ++j){
double nrm2 = 0.0;
for(DimOffset i = 0; i < volx; ++i){
const auto elem = std::abs(buf[volx*j + i]);
nrm2 += elem * elem;
}
nrm2 = std::sqrt(nrm2);
for(DimOffset i = 0; i < volx; ++i){
buf[volx*j + i] /= tensor_body_type(nrm2);
}
for(DimOffset k = j+1; k < voly; ++k){
tensor_body_type dpr(0.0);
for(DimOffset i = 0; i < volx; ++i){
dpr += conjugated(buf[volx*j + i]) * buf[volx*k + i];
}
for(DimOffset i = 0; i < volx; ++i){
buf[volx*k + i] -= dpr * buf[volx*j + i];
}
}
}
//Copy the result back into the tensor:
rngy.reset();
for(DimOffset j = 0; j < voly; ++j){
rngx.reset();
for(DimOffset i = 0; i < volx; ++i){
tensor_body[rngy.globalOffset() + rngx.globalOffset()] = buf[volx*j + i];
rngx.next();
}
rngy.next();
}
//Deallocate the temporary matricized buffer:
delete [] buf;
}
return 0;
};
auto access_granted = false;
{//Try REAL32:
float * body;
access_granted = local_tensor.getDataAccessHost(&body);
if(access_granted) return enforce_isometry(body,isometry1_);
}
{//Try REAL64:
double * body;
access_granted = local_tensor.getDataAccessHost(&body);
if(access_granted) return enforce_isometry(body,isometry1_);
}
{//Try COMPLEX32:
std::complex<float> * body;
access_granted = local_tensor.getDataAccessHost(&body);
if(access_granted) return enforce_isometry(body,isometry1_);
}
{//Try COMPLEX64:
std::complex<double> * body;
access_granted = local_tensor.getDataAccessHost(&body);
if(access_granted) return enforce_isometry(body,isometry1_);
}
std::cout << "#ERROR(exatn::numerics::FunctorIsometrize): Unknown data kind in talsh::Tensor!" << std::endl;
return 1;
}
} //namespace numerics
} //namespace exatn
/** ExaTN::Numerics: Tensor Functor: Tensor Isometrization
REVISION: 2022/01/28
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(A) This tensor functor (method) performs the modified Gram-Schmidt
procedure on a tensor in order to enforce its isometries.
**/
#ifndef EXATN_NUMERICS_FUNCTOR_ISOMETRIZE_HPP_
#define EXATN_NUMERICS_FUNCTOR_ISOMETRIZE_HPP_
#include "Identifiable.hpp"
#include "tensor_basic.hpp"
#include "tensor_method.hpp" //from TAL-SH
#include <string>
#include <vector>
#include <complex>
#include "errors.hpp"
namespace exatn{
namespace numerics{
class FunctorIsometrize: public talsh::TensorFunctor<Identifiable>{
public:
FunctorIsometrize(const std::vector<unsigned int> & isometry1):
isometry1_(isometry1)
{
for(int i = 1; i < isometry1_.size(); ++i) assert(isometry1_[i] > isometry1_[i-1]);
}
FunctorIsometrize(const std::vector<unsigned int> & isometry1,
const std::vector<unsigned int> & isometry2):
isometry1_(isometry1), isometry2_(isometry2)
{
for(int i = 1; i < isometry1_.size(); ++i) assert(isometry1_[i] > isometry1_[i-1]);
for(int i = 1; i < isometry2_.size(); ++i) assert(isometry2_[i] > isometry2_[i-1]);
}
virtual ~FunctorIsometrize() = default;
virtual const std::string name() const override
{
return "TensorFunctorIsometrize";
}
virtual const std::string description() const override
{
return "Enforces tensor isometries";
}
/** Packs data members into a byte packet. **/
virtual void pack(BytePacket & packet) override;
/** Unpacks data members from a byte packet. **/
virtual void unpack(BytePacket & packet) override;
/** Initializes the local tensor slice with external data.
Returns zero on success, or an error code otherwise.
The talsh::Tensor slice is identified by its signature and
shape that both can be accessed by talsh::Tensor methods. **/
virtual int apply(talsh::Tensor & local_tensor) override;
private:
std::vector<unsigned int> isometry1_;
std::vector<unsigned int> isometry2_;
};
} //namespace numerics
} //namespace exatn
#endif //EXATN_NUMERICS_FUNCTOR_ISOMETRIZE_HPP_
/** ExaTN::Numerics: Tensor network builder: Tree: Tree Tensor Network
REVISION: 2021/10/15
REVISION: 2022/01/29
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
#include "network_builder_ttn.hpp"
#include "tensor_network.hpp"
......@@ -16,7 +16,7 @@ namespace exatn{
namespace numerics{
NetworkBuilderTTN::NetworkBuilderTTN():
max_bond_dim_(1), arity_(2)
max_bond_dim_(1), arity_(2), isometric_(0)
{
}
......@@ -28,6 +28,8 @@ bool NetworkBuilderTTN::getParameter(const std::string & name, long long * value
*value = max_bond_dim_;
}else if(name == "arity"){
*value = arity_;
}else if(name == "isometric"){
*value = isometric_;
}else{
found = false;
}
......@@ -42,6 +44,8 @@ bool NetworkBuilderTTN::setParameter(const std::string & name, long long value)
max_bond_dim_ = value;
}else if(name == "arity"){
arity_ = value;
}else if(name == "isometric"){
isometric_ = value;
}else{
found = false;
}
......@@ -124,7 +128,15 @@ void NetworkBuilderTTN::build(TensorNetwork & network, bool tensor_operator)
tens_conn->appendLeg(output_dim_extents[output_dim_id],TensorLeg{0,output_dim_id});
}
}
network.getTensor(tensor_id_base + num_dims_new)->rename();
auto tens = network.getTensor(tensor_id_base + num_dims_new);
tens->rename();
if(isometric_ != 0){
if(tens->getRank() == arity_ + 1){ //TTN vector
tens->registerIsometry({0,1});
}else if(tens->getRank() == arity_ + 3){ //TTN operator
tens->registerIsometry({0,1,3,4});
}
}
++num_dims_new; //next tensor within the layer (each tensor supplies one new dimension)
}
tensor_id_base += num_tensors_in_layer;
......
/** ExaTN::Numerics: Tensor network builder: Tree: Tree Tensor Network
REVISION: 2021/10/14
REVISION: 2022/01/29
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(a) Builds a tree tensor network.
Parameters:
* max_bond_dim: Maximal internal bond dimension;
* arity: Tree arity;
* max_bond_dim: >0: Maximal internal bond dimension;
* arity: >1: Tree arity;
* isometric: {0/1}: Whether or not all tensor factors except the root are isometric;
(b) Leg numeration (tensor network vector):
[0] [1] [2] [3] ...
......@@ -80,6 +81,7 @@ private:
long long max_bond_dim_; //maximal internal bond dimension
long long arity_; //tree arity
int isometric_; //isometry
};
} //namespace numerics
......
/** ExaTN::Numerics: Tensor
REVISION: 2021/10/15
REVISION: 2022/01/28
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
#include "tensor.hpp"
#include "tensor_symbol.hpp"
......@@ -421,6 +421,11 @@ void Tensor::registerIsometry(const std::vector<unsigned int> & isometry)
return;
}
bool Tensor::hasIsometries() const
{
return !(isometries_.empty());
}
const std::list<std::vector<unsigned int>> & Tensor::retrieveIsometries() const
{
return isometries_;
......
/** ExaTN::Numerics: Abstract Tensor
REVISION: 2021/10/13
REVISION: 2022/01/28
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
/** NOTES:
Tensor specification requires:
......@@ -225,6 +225,9 @@ public:
/** Registers an isometry in the tensor. **/
void registerIsometry(const std::vector<unsigned int> & isometry);
/** Queries whether the tensor has any isometries. **/
bool hasIsometries() const;
/** Retrieves the list of all registered isometries in the tensor. **/
const std::list<std::vector<unsigned int>> & retrieveIsometries() const;
......
/** ExaTN::Numerics: Tensor connected to other tensors inside a tensor network
REVISION: 2021/04/27
REVISION: 2022/01/28
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
#include "tensor_connected.hpp"
#include "tensor_symbol.hpp"
......@@ -222,6 +222,11 @@ void TensorConn::replaceStoredTensor(std::shared_ptr<Tensor> tensor)
return;
}
bool TensorConn::hasIsometries() const
{
return tensor_->hasIsometries();
}
const std::list<std::vector<unsigned int>> & TensorConn::retrieveIsometries() const
{
return tensor_->retrieveIsometries();
......
/** ExaTN::Numerics: Tensor connected to other tensors in a tensor network
REVISION: 2021/04/27
REVISION: 2022/01/28
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(a) A tensor inside a tensor network is generally connected
......@@ -153,6 +153,9 @@ public:
/** Replaces the stored tensor with a new one provided explicitly. **/
void replaceStoredTensor(std::shared_ptr<Tensor> tensor);
/** Queries whether the tensor has any isometries. **/
bool hasIsometries() const;
/** Retrieves the list of all registered isometries in the tensor. **/
const std::list<std::vector<unsigned int>> & retrieveIsometries() const;
......
/** ExaTN::Numerics: Tensor network
REVISION: 2021/12/22
REVISION: 2022/01/28
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2022 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2022 Oak Ridge National Laboratory (UT-Battelle) **/
#include "tensor_network.hpp"
#include "tensor_symbol.hpp"
......@@ -121,7 +121,7 @@ void printContractionSequence(std::ofstream & output_file, const std::list<numer
//Main:
TensorNetwork::TensorNetwork():
explicit_output_(0), finalized_(1), max_tensor_id_(0),
explicit_output_(0), finalized_(1), has_isometries_(0), max_tensor_id_(0),
contraction_seq_flops_(0.0), max_intermediate_presence_volume_(0.0),
max_intermediate_volume_(0.0), max_intermediate_rank_(0), universal_indexing_(false)
{
......@@ -136,7 +136,7 @@ TensorNetwork::TensorNetwork():