Commit 832d18ee authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Fixed parallelization inconsistency in Optimizer and Reconstructor


Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent 4d9da1a9
/** ExaTN:: Variational optimizer of a closed symmetric tensor network expansion functional
REVISION: 2021/10/02
REVISION: 2021/10/18
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -22,7 +22,12 @@ TensorNetworkOptimizer::TensorNetworkOptimizer(std::shared_ptr<TensorOperator> t
double tolerance):
tensor_operator_(tensor_operator), vector_expansion_(vector_expansion),
max_iterations_(DEFAULT_MAX_ITERATIONS), micro_iterations_(DEFAULT_MICRO_ITERATIONS),
epsilon_(DEFAULT_LEARN_RATE), tolerance_(tolerance), parallel_(true)
epsilon_(DEFAULT_LEARN_RATE), tolerance_(tolerance),
#ifdef MPI_ENABLED
parallel_(true)
#else
parallel_(false)
#endif
{
if(!vector_expansion_->isKet()){
std::cout << "#ERROR(exatn:TensorNetworkOptimizer): The tensor network vector expansion must be a ket!"
......@@ -85,7 +90,8 @@ bool TensorNetworkOptimizer::optimize_sd(const ProcessGroup & process_group)
unsigned int local_rank; //local process rank within the process group
if(!process_group.rankIsIn(exatn::getProcessRank(),&local_rank)) return true; //process is not in the group: Do nothing
const auto num_procs = process_group.getSize();
unsigned int num_procs = 1;
if(parallel_) num_procs = process_group.getSize();
if(TensorNetworkOptimizer::focus >= 0){
if(getProcessRank() != TensorNetworkOptimizer::focus) TensorNetworkOptimizer::debug = 0;
......
/** ExaTN:: Reconstructs an approximate tensor network expansion for a given tensor network expansion
REVISION: 2021/10/02
REVISION: 2021/10/18
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -22,7 +22,12 @@ TensorNetworkReconstructor::TensorNetworkReconstructor(std::shared_ptr<TensorExp
std::shared_ptr<TensorExpansion> approximant,
double tolerance):
expansion_(expansion), approximant_(approximant),
max_iterations_(DEFAULT_MAX_ITERATIONS), epsilon_(DEFAULT_LEARN_RATE), tolerance_(tolerance), parallel_(true),
max_iterations_(DEFAULT_MAX_ITERATIONS), epsilon_(DEFAULT_LEARN_RATE), tolerance_(tolerance),
#ifdef MPI_ENABLED
parallel_(true),
#else
parallel_(false),
#endif
input_norm_(0.0), output_norm_(0.0), residual_norm_(0.0), fidelity_(0.0)
{
if(!expansion_->isKet()){
......@@ -126,7 +131,8 @@ bool TensorNetworkReconstructor::reconstruct_sd(const ProcessGroup & process_gro
{
unsigned int local_rank; //local process rank within the process group
if(!process_group.rankIsIn(exatn::getProcessRank(),&local_rank)) return true; //process is not in the group: Do nothing
const auto num_procs = process_group.getSize();
unsigned int num_procs = 1;
if(parallel_) num_procs = process_group.getSize();
assert(residual_norm != nullptr);
assert(fidelity != nullptr);
......
......@@ -1646,10 +1646,10 @@ TEST(NumServerTester, IsingTNO)
}
//Reconstruct the Ising Hamiltonian as a tensor network operator:
success = exatn::balanceNormalizeNorm2Sync(*ham_expansion,1.0,1.0,false); assert(success);
success = exatn::normalizeNorm2Sync(*ham_expansion,1.0); assert(success);
success = exatn::balanceNorm2Sync(*ham_tno_expansion,1.0,true); assert(success);
ham_tno_expansion->conjugate();
exatn::TensorNetworkReconstructor::resetDebugLevel(1); //debug
exatn::TensorNetworkReconstructor::resetDebugLevel(1,0); //debug
exatn::TensorNetworkReconstructor reconstructor(ham_expansion,ham_tno_expansion,1e-4);
success = exatn::sync(); assert(success);
double residual_norm, fidelity;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment