Commit 10f14d7a authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Implemented tensor network linear solver, needs testing ...


Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent 38402c98
......@@ -7,6 +7,7 @@ add_library(${LIBRARY_NAME}
quantum.cpp
num_server.cpp
reconstructor.cpp
linear_solver.cpp
optimizer.cpp
eigensolver.cpp)
......
/** ExaTN:: Linear solver over tensor network manifolds
REVISION: 2021/10/21
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
#include "linear_solver.hpp"
#include "reconstructor.hpp"
namespace exatn{
unsigned int TensorNetworkLinearSolver::debug{0};
int TensorNetworkLinearSolver::focus{-1};
TensorNetworkLinearSolver::TensorNetworkLinearSolver(std::shared_ptr<TensorOperator> tensor_operator,
std::shared_ptr<TensorExpansion> rhs_expansion,
std::shared_ptr<TensorExpansion> vector_expansion,
double tolerance):
tensor_operator_(tensor_operator), rhs_expansion_(rhs_expansion), vector_expansion_(vector_expansion),
max_iterations_(DEFAULT_MAX_ITERATIONS), tolerance_(tolerance),
#ifdef MPI_ENABLED
parallel_(true)
#else
parallel_(false)
#endif
{
if(!rhs_expansion_->isKet()){
std::cout << "#ERROR(exatn:TensorNetworkLinearSolver): The rhs tensor network vector expansion must be a ket!"
<< std::endl << std::flush;
assert(false);
}
if(!vector_expansion_->isKet()){
std::cout << "#ERROR(exatn:TensorNetworkLinearSolver): The tensor network vector expansion sought for must be a ket!"
<< std::endl << std::flush;
assert(false);
}
}
void TensorNetworkLinearSolver::resetTolerance(double tolerance)
{
tolerance_ = tolerance;
return;
}
void TensorNetworkLinearSolver::resetMaxIterations(unsigned int max_iterations)
{
max_iterations_ = max_iterations;
return;
}
std::shared_ptr<TensorExpansion> TensorNetworkLinearSolver::getSolution() const
{
return vector_expansion_;
}
bool TensorNetworkLinearSolver::solve()
{
return solve(exatn::getDefaultProcessGroup());
}
bool TensorNetworkLinearSolver::solve(const ProcessGroup & process_group)
{
unsigned int local_rank; //local process rank within the process group
if(!process_group.rankIsIn(exatn::getProcessRank(),&local_rank)) return true; //process is not in the group: Do nothing
unsigned int num_procs = 1;
if(parallel_) num_procs = process_group.getSize();
if(TensorNetworkLinearSolver::focus >= 0){
if(getProcessRank() != TensorNetworkLinearSolver::focus) TensorNetworkLinearSolver::debug = 0;
}
//Construct the |A*|b> tensor network expansion:
rhs_expansion_->conjugate();
opvec_expansion_ = std::make_shared<TensorExpansion>(*rhs_expansion_,*tensor_operator_);
opvec_expansion_->conjugate();
rhs_expansion_->conjugate();
//Normalize |A*|b> to unity (<b|A|A*|b> = 1):
double original_norm = 0.0;
bool success = normalizeNorm2Sync(process_group,*opvec_expansion_,1.0,&original_norm); assert(success);
if(TensorNetworkLinearSolver::debug > 0)
std::cout << "#DEBUG(exatn::TensorNetworkLinearSolver): Original <b|A| norm = " << original_norm << std::endl;
//Solve for <x| via reconstruction (<x||A*|b>):
vector_expansion_->conjugate();
vector_expansion_->markOptimizableAllTensors();
exatn::TensorNetworkReconstructor::resetDebugLevel(1,0); //debug
exatn::TensorNetworkReconstructor reconstructor(opvec_expansion_,vector_expansion_,tolerance_);
success = exatn::sync(); assert(success);
double residual_norm, fidelity;
bool reconstructed = reconstructor.reconstruct(process_group,&residual_norm,&fidelity,true,true);
success = exatn::sync(); assert(success);
if(reconstructed){
if(TensorNetworkLinearSolver::debug > 0)
std::cout << "Linear solve reconstruction succeeded: Residual norm = " << residual_norm
<< "; Fidelity = " << fidelity << std::endl;
}else{
std::cout << "#ERROR(exatn::TensorNetworkLinearSolver): Reconstruction failed!" << std::endl;
}
//Rescale the solution:
vector_expansion_->rescale(std::complex<double>{original_norm,0.0});
return reconstructed;
}
void TensorNetworkLinearSolver::enableParallelization(bool parallel)
{
parallel_ = parallel;
return;
}
void TensorNetworkLinearSolver::resetDebugLevel(unsigned int level, int focus_process)
{
TensorNetworkLinearSolver::debug = level;
TensorNetworkLinearSolver::focus = focus_process;
return;
}
} //namespace exatn
/** ExaTN:: Linear solver over tensor network manifolds
REVISION: 2021/10/21
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(A) Solves a linear system A * x = b, where A is a tensor network operator,
b is a given tensor network expansion, and x is the unknown tensor
network expansion sought for.
**/
#ifndef EXATN_LINEAR_SOLVER_HPP_
#define EXATN_LINEAR_SOLVER_HPP_
#include "exatn_numerics.hpp"
#include <vector>
#include <complex>
#include "errors.hpp"
namespace exatn{
class TensorNetworkLinearSolver{
public:
static unsigned int debug;
static int focus;
static constexpr const double DEFAULT_TOLERANCE = 1e-4;
static constexpr const unsigned int DEFAULT_MAX_ITERATIONS = 1000;
TensorNetworkLinearSolver(std::shared_ptr<TensorOperator> tensor_operator, //in: tensor network operator
std::shared_ptr<TensorExpansion> rhs_expansion, //in: right-hand-side tensor network expansion
std::shared_ptr<TensorExpansion> vector_expansion, //inout: tensor network expansion forming the bra/ket vectors
double tolerance = DEFAULT_TOLERANCE); //in: desired numerical convergence tolerance
TensorNetworkLinearSolver(const TensorNetworkLinearSolver &) = default;
TensorNetworkLinearSolver & operator=(const TensorNetworkLinearSolver &) = default;
TensorNetworkLinearSolver(TensorNetworkLinearSolver &&) noexcept = default;
TensorNetworkLinearSolver & operator=(TensorNetworkLinearSolver &&) noexcept = default;
~TensorNetworkLinearSolver() = default;
/** Resets the numerical tolerance. **/
void resetTolerance(double tolerance = DEFAULT_TOLERANCE);
/** Resets the max number of macro-iterations. **/
void resetMaxIterations(unsigned int max_iterations = DEFAULT_MAX_ITERATIONS);
/** Solves the linear system over tensor network manifolds. **/
bool solve();
bool solve(const ProcessGroup & process_group); //in: executing process group
/** Returns the found tensor network expansion. **/
std::shared_ptr<TensorExpansion> getSolution() const;
/** Enables/disables coarse-grain parallelization over tensor networks. **/
void enableParallelization(bool parallel = true);
static void resetDebugLevel(unsigned int level = 0, //in: debug level
int focus_process = -1); //in: process to focus on (-1: all)
private:
std::shared_ptr<TensorOperator> tensor_operator_; //tensor network operator
std::shared_ptr<TensorExpansion> rhs_expansion_; //right-hand-side tensor network expansion
std::shared_ptr<TensorExpansion> vector_expansion_; //unknown tensor network expansion sought for
unsigned int max_iterations_; //max number of macro-iterations
double tolerance_; //numerical convergence tolerance (for the gradient)
bool parallel_; //enables/disables coarse-grain parallelization over tensor networks
std::shared_ptr<TensorExpansion> opvec_expansion_; //A * x tensor network expansion
};
} //namespace exatn
#endif //EXATN_LINEAR_SOLVER_HPP_
......@@ -10,7 +10,9 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
vectors formed by the same tensor network expansion, this tensor network
variational optimizer will optimize the tensor factors constituting the
bra/ket tensor network vectors to arrive at an extremum of that functional,
specifically targeting its minimum.
specifically targeting its minimum:
E = <x|H|x> / <x|x>, where H is a tensor network operator, and x is a
tensor network expansion that delivers an extremum to the functional.
**/
#ifndef EXATN_OPTIMIZER_HPP_
......@@ -93,7 +95,7 @@ private:
std::complex<double> expect_value; //current expectation value
};
std::shared_ptr<TensorOperator> tensor_operator_; //tensor operator
std::shared_ptr<TensorOperator> tensor_operator_; //tensor network operator
std::shared_ptr<TensorExpansion> vector_expansion_; //tensor network expansion to optimize (bra/ket vector)
unsigned int max_iterations_; //max number of macro-iterations
unsigned int micro_iterations_; //number of microiterations per optimized tensor
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment