Commit 38402c98 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Added ability to retrieve the expectation value in TensorNetworkOptimizer.


Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent aef19982
/** ExaTN:: Variational optimizer of a closed symmetric tensor network expansion functional
REVISION: 2021/10/18
REVISION: 2021/10/21
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -24,10 +24,11 @@ TensorNetworkOptimizer::TensorNetworkOptimizer(std::shared_ptr<TensorOperator> t
max_iterations_(DEFAULT_MAX_ITERATIONS), micro_iterations_(DEFAULT_MICRO_ITERATIONS),
epsilon_(DEFAULT_LEARN_RATE), tolerance_(tolerance),
#ifdef MPI_ENABLED
parallel_(true)
parallel_(true),
#else
parallel_(false)
parallel_(false),
#endif
average_expect_val_({0.0,0.0})
{
if(!vector_expansion_->isKet()){
std::cout << "#ERROR(exatn:TensorNetworkOptimizer): The tensor network vector expansion must be a ket!"
......@@ -65,12 +66,19 @@ void TensorNetworkOptimizer::resetMicroIterations(unsigned int micro_iterations)
}
std::shared_ptr<TensorExpansion> TensorNetworkOptimizer::getSolution() const
std::shared_ptr<TensorExpansion> TensorNetworkOptimizer::getSolution(std::complex<double> * average_expect_val) const
{
if(average_expect_val != nullptr) *average_expect_val = average_expect_val_;
return vector_expansion_;
}
std::complex<double> TensorNetworkOptimizer::getExpectationValue() const
{
return average_expect_val_;
}
bool TensorNetworkOptimizer::optimize()
{
return optimize(exatn::getDefaultProcessGroup());
......@@ -206,7 +214,7 @@ bool TensorNetworkOptimizer::optimize_sd(const ProcessGroup & process_group)
std::cout << "#DEBUG(exatn::TensorNetworkOptimizer): Iteration " << iteration << std::endl;
converged = true;
double max_convergence = 0.0;
std::complex<double> average_expect_val{0.0,0.0};
average_expect_val_ = std::complex<double>{0.0,0.0};
for(auto & environment: environments_){
//Create the gradient tensors:
done = createTensorSync(environment.gradient,environment.tensor->getElementType()); assert(done);
......@@ -241,7 +249,7 @@ bool TensorNetworkOptimizer::optimize_sd(const ProcessGroup & process_group)
default:
assert(false);
}
if(micro_iteration == (micro_iterations_ - 1)) average_expect_val += expect_val;
if(micro_iteration == (micro_iterations_ - 1)) average_expect_val_ += expect_val;
if(TensorNetworkOptimizer::debug > 1) std::cout << " Operator expectation value w.r.t. " << environment.tensor->getName()
<< " = " << std::scientific << expect_val << std::endl;
//Update the expectation value in the gradient expansion:
......@@ -361,9 +369,9 @@ bool TensorNetworkOptimizer::optimize_sd(const ProcessGroup & process_group)
done = destroyTensorSync(environment.gradient_aux->getName()); assert(done);
done = destroyTensorSync(environment.gradient->getName()); assert(done);
}
average_expect_val /= static_cast<double>(environments_.size());
average_expect_val_ /= static_cast<double>(environments_.size());
if(TensorNetworkOptimizer::debug > 0){
std::cout << "Average expectation value = " << average_expect_val
std::cout << "Average expectation value = " << average_expect_val_
<< "; Max convergence residual = " << max_convergence << std::endl;
}
++iteration;
......
/** ExaTN:: Variational optimizer of a closed symmetric tensor network expansion functional
REVISION: 2021/10/02
REVISION: 2021/10/21
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -64,7 +64,10 @@ public:
bool optimize(const ProcessGroup & process_group); //in: executing process group
/** Returns the optimized tensor network expansion forming the optimal bra/ket vectors. **/
std::shared_ptr<TensorExpansion> getSolution() const;
std::shared_ptr<TensorExpansion> getSolution(std::complex<double> * average_expect_val = nullptr) const;
/** Returns the achieved expectation value. **/
std::complex<double> getExpectationValue() const;
/** Enables/disables coarse-grain parallelization over tensor networks. **/
void enableParallelization(bool parallel = true);
......@@ -98,6 +101,8 @@ private:
double tolerance_; //numerical convergence tolerance (for the gradient)
bool parallel_; //enables/disables coarse-grain parallelization over tensor networks
std::complex<double> average_expect_val_; //average expectation value (across all optimized tensor factors)
std::vector<Environment> environments_; //optimization environments for each optimizable tensor
};
......
......@@ -1677,6 +1677,7 @@ TEST(NumServerTester, IsingTNO)
std::cout << "Search failed!" << std::endl;
assert(false);
}
const auto expect_val1 = optimizer1.getExpectationValue();
//Reconstruct the Ising Hamiltonian as a tensor network operator:
std::cout << "Reconstructing Ising Hamiltonian with a tensor network operator:" << std::endl;
......@@ -1700,7 +1701,8 @@ TEST(NumServerTester, IsingTNO)
}
const auto tno_exp_coefs = ham_tno_expansion->getCoefficients();
assert(tno_exp_coefs.size() == 1);
std::cout << "Eigenvalue scaling coefficient = " << ham_norm * tno_exp_coefs[0] << std::endl;
const auto eig_scaling = ham_norm * tno_exp_coefs[0];
std::cout << "Eigenvalue scaling coefficient = " << eig_scaling << std::endl;
//Ground state search for the tensor network Hamiltonian:
std::cout << "Ground state search for the tensor network Hamiltonian:" << std::endl;
......@@ -1715,6 +1717,9 @@ TEST(NumServerTester, IsingTNO)
std::cout << "Search failed!" << std::endl;
assert(false);
}
const auto expect_val2 = optimizer2.getExpectationValue();
std::cout << "Relative eigenvalue error due to reconstruction is "
<< std::abs(expect_val1 - (expect_val2 * eig_scaling)) / std::abs(expect_val1) * 1e2 << " %\n";
//Destroy all tensors:
success = exatn::sync(); assert(success);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment