Commit 976fc441 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Implemented TensorOperatorRemapper class.


Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent 10f14d7a
......@@ -7,6 +7,7 @@ add_library(${LIBRARY_NAME}
quantum.cpp
num_server.cpp
reconstructor.cpp
remapper.cpp
linear_solver.cpp
optimizer.cpp
eigensolver.cpp)
......
/** ExaTN:: Extreme eigenvalue/eigenvector Krylov solver over tensor networks
REVISION: 2021/01/16
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
#include "eigensolver.hpp"
#include <iostream>
namespace exatn{
unsigned int TensorNetworkEigenSolver::debug{0};
......
/** ExaTN:: Linear solver over tensor network manifolds
REVISION: 2021/10/21
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -7,6 +7,8 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
#include "linear_solver.hpp"
#include "reconstructor.hpp"
#include <iostream>
namespace exatn{
unsigned int TensorNetworkLinearSolver::debug{0};
......@@ -20,10 +22,11 @@ TensorNetworkLinearSolver::TensorNetworkLinearSolver(std::shared_ptr<TensorOpera
tensor_operator_(tensor_operator), rhs_expansion_(rhs_expansion), vector_expansion_(vector_expansion),
max_iterations_(DEFAULT_MAX_ITERATIONS), tolerance_(tolerance),
#ifdef MPI_ENABLED
parallel_(true)
parallel_(true),
#else
parallel_(false)
parallel_(false),
#endif
residual_norm_(0.0), fidelity_(0.0)
{
if(!rhs_expansion_->isKet()){
std::cout << "#ERROR(exatn:TensorNetworkLinearSolver): The rhs tensor network vector expansion must be a ket!"
......@@ -52,20 +55,26 @@ void TensorNetworkLinearSolver::resetMaxIterations(unsigned int max_iterations)
}
std::shared_ptr<TensorExpansion> TensorNetworkLinearSolver::getSolution() const
std::shared_ptr<TensorExpansion> TensorNetworkLinearSolver::getSolution(double * residual_norm,
double * fidelity) const
{
if(residual_norm != nullptr) *residual_norm = residual_norm_;
if(fidelity != nullptr) *fidelity = fidelity_;
return vector_expansion_;
}
bool TensorNetworkLinearSolver::solve()
bool TensorNetworkLinearSolver::solve(double * residual_norm, double * fidelity)
{
return solve(exatn::getDefaultProcessGroup());
return solve(exatn::getDefaultProcessGroup(),residual_norm,fidelity);
}
bool TensorNetworkLinearSolver::solve(const ProcessGroup & process_group)
bool TensorNetworkLinearSolver::solve(const ProcessGroup & process_group, double * residual_norm, double * fidelity)
{
if(residual_norm != nullptr) *residual_norm = 0.0;
if(fidelity != nullptr) *fidelity = 0.0;
unsigned int local_rank; //local process rank within the process group
if(!process_group.rankIsIn(exatn::getProcessRank(),&local_rank)) return true; //process is not in the group: Do nothing
unsigned int num_procs = 1;
......@@ -90,24 +99,26 @@ bool TensorNetworkLinearSolver::solve(const ProcessGroup & process_group)
//Solve for <x| via reconstruction (<x||A*|b>):
vector_expansion_->conjugate();
vector_expansion_->markOptimizableAllTensors();
exatn::TensorNetworkReconstructor::resetDebugLevel(1,0); //debug
exatn::TensorNetworkReconstructor::resetDebugLevel(TensorNetworkLinearSolver::debug,
TensorNetworkLinearSolver::focus);
exatn::TensorNetworkReconstructor reconstructor(opvec_expansion_,vector_expansion_,tolerance_);
success = exatn::sync(); assert(success);
double residual_norm, fidelity;
bool reconstructed = reconstructor.reconstruct(process_group,&residual_norm,&fidelity,true,true);
bool reconstructed = reconstructor.reconstruct(process_group,&residual_norm_,&fidelity_,true,true);
success = exatn::sync(); assert(success);
if(reconstructed){
if(TensorNetworkLinearSolver::debug > 0)
std::cout << "Linear solve reconstruction succeeded: Residual norm = " << residual_norm
<< "; Fidelity = " << fidelity << std::endl;
std::cout << "Linear solve reconstruction succeeded: Residual norm = " << residual_norm_
<< "; Fidelity = " << fidelity_ << std::endl;
}else{
std::cout << "#ERROR(exatn::TensorNetworkLinearSolver): Reconstruction failed!" << std::endl;
}
//Rescale the solution:
vector_expansion_->conjugate();
vector_expansion_->rescale(std::complex<double>{original_norm,0.0});
if(residual_norm != nullptr) *residual_norm = residual_norm_;
if(fidelity != nullptr) *fidelity = fidelity_;
return reconstructed;
}
......
/** ExaTN:: Linear solver over tensor network manifolds
REVISION: 2021/10/21
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -15,7 +15,7 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
#include "exatn_numerics.hpp"
#include <vector>
#include <memory>
#include <complex>
#include "errors.hpp"
......@@ -50,11 +50,15 @@ public:
void resetMaxIterations(unsigned int max_iterations = DEFAULT_MAX_ITERATIONS);
/** Solves the linear system over tensor network manifolds. **/
bool solve();
bool solve(const ProcessGroup & process_group); //in: executing process group
bool solve(double * residual_norm, //out: 2-norm of the residual tensor (error)
double * fidelity); //out: squared normalized overlap (fidelity)
bool solve(const ProcessGroup & process_group, //in: executing process group
double * residual_norm, //out: 2-norm of the residual tensor (error)
double * fidelity); //out: squared normalized overlap (fidelity)
/** Returns the found tensor network expansion. **/
std::shared_ptr<TensorExpansion> getSolution() const;
std::shared_ptr<TensorExpansion> getSolution(double * residual_norm, //out: 2-norm of the residual tensor (error)
double * fidelity) const; //out: squared normalized overlap (fidelity)
/** Enables/disables coarse-grain parallelization over tensor networks. **/
void enableParallelization(bool parallel = true);
......@@ -71,6 +75,9 @@ private:
double tolerance_; //numerical convergence tolerance (for the gradient)
bool parallel_; //enables/disables coarse-grain parallelization over tensor networks
double residual_norm_; //2-norm of the residual tensor after optimization (error)
double fidelity_; //achieved reconstruction fidelity (normalized squared overlap)
std::shared_ptr<TensorExpansion> opvec_expansion_; //A * x tensor network expansion
};
......
/** ExaTN:: Variational optimizer of a closed symmetric tensor network expansion functional
REVISION: 2021/10/21
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -10,6 +10,7 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
#include <unordered_set>
#include <string>
#include <iostream>
namespace exatn{
......
/** ExaTN:: Reconstructs an approximate tensor network expansion for a given tensor network expansion
REVISION: 2021/10/18
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -10,6 +10,7 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
#include <unordered_set>
#include <string>
#include <iostream>
#include <cmath>
namespace exatn{
......
/** ExaTN:: Reconstructs an approximate tensor network expansion for a given tensor network expansion
REVISION: 2021/10/02
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -119,7 +119,7 @@ private:
double input_norm_; //2-norm of the input tensor expansion
double output_norm_; //2-norm of the approximant tensor expansion
double residual_norm_; //2-norm of the residual tensor after optimization (error)
double fidelity_; //achieved reconstruction fidelity (squared overlap)
double fidelity_; //achieved reconstruction fidelity (normalized squared overlap)
std::vector<Environment> environments_; //optimization environments for each optimizable tensor
};
......
/** ExaTN:: Reconstructs an approximate tensor network operator for a given tensor network operator
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
#include "remapper.hpp"
#include "reconstructor.hpp"
#include <string>
#include <iostream>
namespace exatn{
unsigned int TensorOperatorRemapper::debug{0};
int TensorOperatorRemapper::focus{-1};
TensorOperatorRemapper::TensorOperatorRemapper(std::shared_ptr<Tensor> ket_space,
std::shared_ptr<Tensor> bra_space,
std::shared_ptr<TensorOperator> target,
std::shared_ptr<TensorOperator> approximant,
double tolerance):
ket_space_(ket_space), bra_space_(bra_space), target_(target), approximant_(approximant),
max_iterations_(DEFAULT_MAX_ITERATIONS), tolerance_(tolerance),
#ifdef MPI_ENABLED
parallel_(true),
#else
parallel_(false),
#endif
residual_norm_(0.0), fidelity_(0.0)
{
}
TensorOperatorRemapper::TensorOperatorRemapper(std::shared_ptr<Tensor> ket_space,
std::shared_ptr<TensorOperator> target,
std::shared_ptr<TensorOperator> approximant,
double tolerance):
ket_space_(ket_space), bra_space_(ket_space), target_(target), approximant_(approximant),
max_iterations_(DEFAULT_MAX_ITERATIONS), tolerance_(tolerance),
#ifdef MPI_ENABLED
parallel_(true),
#else
parallel_(false),
#endif
residual_norm_(0.0), fidelity_(0.0)
{
}
void TensorOperatorRemapper::resetTolerance(double tolerance)
{
tolerance_ = tolerance;
return;
}
void TensorOperatorRemapper::resetMaxIterations(unsigned int max_iterations)
{
max_iterations_ = max_iterations;
return;
}
std::shared_ptr<TensorOperator> TensorOperatorRemapper::getSolution(double * residual_norm,
double * fidelity) const
{
if(fidelity_ == 0.0) return std::shared_ptr<TensorOperator>(nullptr);
*residual_norm = residual_norm_;
*fidelity = fidelity_;
return approximant_;
}
bool TensorOperatorRemapper::reconstruct(double * residual_norm,
double * fidelity,
bool rnd_init,
bool nesterov,
double acceptable_fidelity)
{
return reconstruct(exatn::getDefaultProcessGroup(), residual_norm, fidelity, rnd_init, nesterov, acceptable_fidelity);
}
bool TensorOperatorRemapper::reconstruct(const ProcessGroup & process_group,
double * residual_norm,
double * fidelity,
bool rnd_init,
bool nesterov,
double acceptable_fidelity)
{
unsigned int local_rank; //local process rank within the process group
if(!process_group.rankIsIn(exatn::getProcessRank(),&local_rank)) return true; //process is not in the group: Do nothing
unsigned int num_procs = 1;
if(parallel_) num_procs = process_group.getSize();
if(TensorOperatorRemapper::focus >= 0){
if(getProcessRank() != TensorOperatorRemapper::focus) TensorOperatorRemapper::debug = 0;
}
//Remap tensor network operators as tensor network expansions in the given space:
auto target_expansion = makeSharedTensorExpansion(*target_,*ket_space_,*bra_space_);
auto approx_expansion = makeSharedTensorExpansion(*approximant_,*ket_space_,*bra_space_);
//Normalize the target tensor network expansion to unity:
double original_norm = 0.0;
bool success = normalizeNorm2Sync(process_group,*target_expansion,1.0,&original_norm); assert(success);
if(TensorOperatorRemapper::debug > 0)
std::cout << "#DEBUG(exatn::TensorOperatorRemapper): Original target norm = " << original_norm << std::endl;
//Reconstruct the target via approximant:
approx_expansion->conjugate();
approx_expansion->markOptimizableAllTensors();
exatn::TensorNetworkReconstructor::resetDebugLevel(TensorOperatorRemapper::debug,
TensorOperatorRemapper::focus);
exatn::TensorNetworkReconstructor reconstructor(target_expansion,approx_expansion,tolerance_);
success = exatn::sync(); assert(success);
bool reconstructed = reconstructor.reconstruct(process_group,&residual_norm_,&fidelity_,true,true);
success = exatn::sync(); assert(success);
if(reconstructed){
if(TensorOperatorRemapper::debug > 0)
std::cout << "Tensor operator reconstruction succeeded: Residual norm = " << residual_norm_
<< "; Fidelity = " << fidelity_ << std::endl;
}else{
std::cout << "#ERROR(exatn::TensorOperatorRemapper): Reconstruction failed!" << std::endl;
}
//Rescale the solution:
approx_expansion->conjugate();
approx_expansion->rescale(std::complex<double>{original_norm,0.0});
const auto num_components = approximant_->getNumComponents();
assert(approx_expansion->getNumComponents() == num_components);
for(std::size_t i = 0; i < num_components; ++i){
(*approximant_)[i].coefficient = (*approx_expansion)[i].coefficient;
}
return reconstructed;
}
void TensorOperatorRemapper::enableParallelization(bool parallel)
{
parallel_ = parallel;
return;
}
void TensorOperatorRemapper::resetDebugLevel(unsigned int level, int focus_process)
{
TensorOperatorRemapper::debug = level;
TensorOperatorRemapper::focus = focus_process;
return;
}
} //namespace exatn
/** ExaTN:: Reconstructs an approximate tensor network operator for a given tensor network operator
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(A) Given a tensor network operator of some form, the tensor network operator remapper
optimizes its tensor factors to maximize the overlap with another given constant
tensor network operator, thus providing an approximation to it. The reconstruction
fidelity is the normalized squared overlap between the two tensor network operators.
The reconstruction tolerance is a numerical tolerance used for checking convergence
of the underlying linear algebra procedures.
**/
#ifndef EXATN_REMAPPER_HPP_
#define EXATN_REMAPPER_HPP_
#include "exatn_numerics.hpp"
#include <memory>
#include "errors.hpp"
namespace exatn{
class TensorOperatorRemapper{
public:
static unsigned int debug;
static int focus;
static constexpr const double DEFAULT_TOLERANCE = 1e-5;
static constexpr const unsigned int DEFAULT_MAX_ITERATIONS = 1000;
static constexpr const double DEFAULT_ACCEPTABLE_FIDELITY = 0.01;
TensorOperatorRemapper(std::shared_ptr<Tensor> ket_space, //in: tensor defining the ket space
std::shared_ptr<Tensor> bra_space, //in: tensor defining the bra space
std::shared_ptr<TensorOperator> target, //in: tensor network operator to be reconstructed (constant)
std::shared_ptr<TensorOperator> approximant, //inout: reconstructing tensor network operator
double tolerance = DEFAULT_TOLERANCE); //in: desired reconstruction convergence tolerance
TensorOperatorRemapper(std::shared_ptr<Tensor> ket_space, //in: tensor defining the ket space (if symmetric to bra)
std::shared_ptr<TensorOperator> target, //in: tensor network operator to be reconstructed (constant)
std::shared_ptr<TensorOperator> approximant, //inout: reconstructing tensor network operator
double tolerance = DEFAULT_TOLERANCE); //in: desired reconstruction convergence tolerance
TensorOperatorRemapper(const TensorOperatorRemapper &) = default;
TensorOperatorRemapper & operator=(const TensorOperatorRemapper &) = default;
TensorOperatorRemapper(TensorOperatorRemapper &&) noexcept = default;
TensorOperatorRemapper & operator=(TensorOperatorRemapper &&) noexcept = default;
~TensorOperatorRemapper() = default;
/** Resets the reconstruction tolerance. **/
void resetTolerance(double tolerance = DEFAULT_TOLERANCE);
/** Resets the max number of macro-iterations (sweeping epochs). **/
void resetMaxIterations(unsigned int max_iterations = DEFAULT_MAX_ITERATIONS);
/** Approximately reconstructs a tensor network operator via another tensor network
operator. Upon success, returns the achieved fidelity of the reconstruction,
that is, the squared overlap between the two tensor network expansions: [0..1]. **/
bool reconstruct(double * residual_norm, //out: 2-norm of the residual tensor (error)
double * fidelity, //out: squared normalized overlap (fidelity)
bool rnd_init = true, //in: random initialization flag
bool nesterov = true, //in: Nesterov acceleration
double acceptable_fidelity = DEFAULT_ACCEPTABLE_FIDELITY); //in: acceptable fidelity
bool reconstruct(const ProcessGroup & process_group, //in: executing process group
double * residual_norm, //out: 2-norm of the residual tensor (error)
double * fidelity, //out: squared normalized overlap (fidelity)
bool rnd_init = true, //in: random initialization flag
bool nesterov = true, //in: Nesterov acceleration
double acceptable_fidelity = DEFAULT_ACCEPTABLE_FIDELITY); //in: acceptable fidelity
/** Returns the reconstructing (optimized) tensor network operator. **/
std::shared_ptr<TensorOperator> getSolution(double * residual_norm, //out: 2-norm of the residual tensor (error)
double * fidelity) const; //out: squared normalized overlap (fidelity)
/** Enables/disables coarse-grain parallelization over tensor networks. **/
void enableParallelization(bool parallel = true);
static void resetDebugLevel(unsigned int level = 0, //in: debug level
int focus_process = -1); //in: process to focus on (-1: all)
private:
std::shared_ptr<Tensor> ket_space_; //tensor defining the ket space
std::shared_ptr<Tensor> bra_space_; //tensor defining the bra space
std::shared_ptr<TensorOperator> target_; //tensor network operator to reconstruct
std::shared_ptr<TensorOperator> approximant_; //reconstructing tensor network operator
unsigned int max_iterations_; //max number of macro-iterations
double tolerance_; //numerical reconstruction convergence tolerance (for the gradient)
bool parallel_; //enables/disables coarse-grain parallelization over tensor networks
double residual_norm_; //2-norm of the residual tensor after optimization (error)
double fidelity_; //achieved reconstruction fidelity (normalized squared overlap)
};
} //namespace exatn
#endif //EXATN_REMAPPER_HPP_
......@@ -1699,10 +1699,13 @@ TEST(NumServerTester, IsingTNO)
std::cout << "Reconstruction failed!" << std::endl;
assert(false);
}
const auto tno_exp_coefs = ham_tno_expansion->getCoefficients();
assert(tno_exp_coefs.size() == 1);
const auto eig_scaling = ham_norm * tno_exp_coefs[0];
std::cout << "Eigenvalue scaling coefficient = " << eig_scaling << std::endl;
ham_tno_expansion->conjugate();
ham_tno_expansion->rescale(std::complex<double>{ham_norm,0.0});
const auto num_components = ham_tno->getNumComponents();
assert(ham_tno_expansion->getNumComponents() == num_components);
for(std::size_t i = 0; i < num_components; ++i){
(*ham_tno)[i].coefficient = (*ham_tno_expansion)[i].coefficient;
}
//Ground state search for the tensor network Hamiltonian:
std::cout << "Ground state search for the tensor network Hamiltonian:" << std::endl;
......@@ -1719,7 +1722,7 @@ TEST(NumServerTester, IsingTNO)
}
const auto expect_val2 = optimizer2.getExpectationValue();
std::cout << "Relative eigenvalue error due to reconstruction is "
<< std::abs(expect_val1 - (expect_val2 * eig_scaling)) / std::abs(expect_val1) * 1e2 << " %\n";
<< std::abs(expect_val1 - expect_val2) / std::abs(expect_val1) * 1e2 << " %\n";
//Destroy all tensors:
success = exatn::sync(); assert(success);
......
/** ExaTN::Numerics: Tensor operator
REVISION: 2021/10/21
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -179,6 +179,13 @@ void TensorOperator::conjugate()
}
void TensorOperator::rescale(std::complex<double> scaling_factor)
{
for(auto & component: components_) component.coefficient *= scaling_factor;
return;
}
std::vector<std::complex<double>> TensorOperator::getCoefficients() const
{
std::vector<std::complex<double>> coefs(components_.size(),{0.0,0.0});
......
/** ExaTN::Numerics: Tensor operator
REVISION: 2021/10/21
REVISION: 2021/10/22
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -143,6 +143,9 @@ public:
complex linear expansion coefficients are complex conjugated. **/
void conjugate();
/** Multiplies all components of the tensor network operator by a given complex number. **/
void rescale(std::complex<double> scaling_factor);
/** Returns linear combination coefficients for all components. **/
std::vector<std::complex<double>> getCoefficients() const;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment