Loading src/exatn/CMakeLists.txt +1 −1 Original line number Diff line number Diff line Loading @@ -17,7 +17,7 @@ add_dependencies(${LIBRARY_NAME} exatensor-build) target_include_directories(${LIBRARY_NAME} PUBLIC . ${CMAKE_BINARY_DIR}) target_link_libraries(${LIBRARY_NAME} PUBLIC CppMicroServices exatn-numerics exatn-runtime) PUBLIC CppMicroServices exatn-numerics exatn-runtime PRIVATE ExaTensor::ExaTensor) exatn_configure_library_rpath(exatn) Loading src/exatn/optimizer.cpp +195 −1 Original line number Diff line number Diff line /** ExaTN:: Variational optimizer of a closed symmetric tensor network expansion functional REVISION: 2021/10/30 REVISION: 2021/11/17 Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ Loading @@ -12,6 +12,22 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ #include <string> #include <iostream> //LAPACK zggev: extern "C" { void zggev_( char const * jobvl, char const * jobvr, int const * n, void * A, int const * lda, void * B, int const * ldb, void * alpha, void * beta, void * VL, int const * ldvl, void * VR, int const * ldvr, void * work, int const * lwork, void * rwork, int * info); } namespace exatn{ unsigned int TensorNetworkOptimizer::debug{0}; Loading Loading @@ -170,6 +186,9 @@ bool TensorNetworkOptimizer::optimize_sd(const ProcessGroup & process_group) if(getProcessRank() != TensorNetworkOptimizer::focus) TensorNetworkOptimizer::debug = 0; } //Pre-optimize the initial tensor network vector expansion guess: if(PREOPTIMIZE_INITIAL_GUESS) computeInitialGuess(process_group); //Balance-normalize the tensor network vector expansion: //bool success = balanceNormalizeNorm2Sync(*vector_expansion_,1.0,1.0,true); assert(success); bool success = balanceNorm2Sync(*vector_expansion_,1.0,true); assert(success); Loading Loading @@ -453,6 +472,181 @@ bool TensorNetworkOptimizer::optimize_sd(const ProcessGroup & process_group) } void TensorNetworkOptimizer::computeInitialGuess(const ProcessGroup & process_group, bool highest, unsigned int guess_dim) { assert(tensor_operator_ && vector_expansion_); assert(guess_dim > 1); bool success = true; //Generate a random non-orthogonal tensor network basis: auto tn_builder = exatn::getTensorNetworkBuilder("MPS"); success = tn_builder->setParameter("max_bond_dim",DEFAULT_GUESS_MAX_BOND_DIM); assert(success); auto ket_tensor = vector_expansion_->getSpaceTensor(); const auto elem_type = vector_expansion_->cbegin()->network->getTensorElementType(); assert(elem_type != TensorElementType::VOID); std::vector<std::shared_ptr<TensorExpansion>> basis_ket(guess_dim); std::vector<std::shared_ptr<TensorExpansion>> basis_bra(guess_dim); success = exatn::sync(process_group); assert(success); for(unsigned int i = 0; i < guess_dim; ++i){ basis_ket[i]->rename("_BasisVector"+std::to_string(i)); auto basis_vector = makeSharedTensorNetwork("_VectorNet"+std::to_string(i),ket_tensor,*tn_builder,false); success = basis_ket[i]->appendComponent(basis_vector,std::complex<double>{1.0,0.0}); assert(success); basis_bra[i] = makeSharedTensorExpansion(*(basis_ket[i])); basis_bra[i]->conjugate(); basis_bra[i]->rename("_ConjVectorNet"+std::to_string(i)); success = exatn::createTensors(process_group,*basis_vector,elem_type); assert(success); success = exatn::initTensorsRnd(*basis_vector); assert(success); } success = exatn::sync(process_group); assert(success); //Build the operator matrix: std::vector<std::complex<double>> oper_matrix(guess_dim*guess_dim); std::vector<std::shared_ptr<Tensor>> oper_scalar(guess_dim*guess_dim); std::vector<std::shared_ptr<TensorExpansion>> oper_elems(guess_dim*guess_dim); for(unsigned int j = 0; j < guess_dim; ++j){ for(unsigned int i = 0; i < guess_dim; ++i){ oper_elems[j*guess_dim + i] = makeSharedTensorExpansion(*(basis_ket[j]),*(basis_bra[i]),*tensor_operator_); oper_scalar[j*guess_dim + i] = makeSharedTensor("_"); oper_scalar[j*guess_dim + i]->rename("_OperScalarElem_"+std::to_string(i)+"_"+std::to_string(j)); success = exatn::createTensor(process_group,oper_scalar[j*guess_dim + i],elem_type); assert(success); success = exatn::initTensor(oper_scalar[j*guess_dim + i]->getName(),0.0); assert(success); success = exatn::evaluate(process_group,*(oper_elems[j*guess_dim + i]),oper_scalar[j*guess_dim + i]); assert(success); } } //Build the metric matrix: std::vector<std::complex<double>> metr_matrix(guess_dim*guess_dim); std::vector<std::shared_ptr<Tensor>> metr_scalar(guess_dim*guess_dim); std::vector<std::shared_ptr<TensorExpansion>> metr_elems(guess_dim*guess_dim); for(unsigned int j = 0; j < guess_dim; ++j){ for(unsigned int i = 0; i < guess_dim; ++i){ metr_elems[j*guess_dim + i] = makeSharedTensorExpansion(*(basis_ket[j]),*(basis_bra[i])); metr_scalar[j*guess_dim + i] = makeSharedTensor("_"); metr_scalar[j*guess_dim + i]->rename("_MetrScalarElem_"+std::to_string(i)+"_"+std::to_string(j)); success = exatn::createTensor(process_group,metr_scalar[j*guess_dim + i],elem_type); assert(success); success = exatn::initTensor(metr_scalar[j*guess_dim + i]->getName(),0.0); assert(success); success = exatn::evaluate(process_group,*(metr_elems[j*guess_dim + i]),metr_scalar[j*guess_dim + i]); assert(success); } } success = exatn::sync(process_group); assert(success); for(unsigned int j = 0; j < guess_dim; ++j){ for(unsigned int i = 0; i < guess_dim; ++i){ const auto & oper_name = oper_scalar[j*guess_dim + i]->getName(); const auto & metr_name = metr_scalar[j*guess_dim + i]->getName(); switch(elem_type){ case TensorElementType::REAL32: oper_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(oper_name)->getSliceView<float>()[std::initializer_list<int>{}], 0.0); metr_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(metr_name)->getSliceView<float>()[std::initializer_list<int>{}], 0.0); break; case TensorElementType::REAL64: oper_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(oper_name)->getSliceView<double>()[std::initializer_list<int>{}], 0.0); metr_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(metr_name)->getSliceView<double>()[std::initializer_list<int>{}], 0.0); break; case TensorElementType::COMPLEX32: oper_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(oper_name)->getSliceView<std::complex<float>>()[std::initializer_list<int>{}]); metr_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(metr_name)->getSliceView<std::complex<float>>()[std::initializer_list<int>{}]); break; case TensorElementType::COMPLEX64: oper_matrix[j*guess_dim + i] = exatn::getLocalTensor(oper_name)->getSliceView<std::complex<double>>()[std::initializer_list<int>{}]; metr_matrix[j*guess_dim + i] = exatn::getLocalTensor(metr_name)->getSliceView<std::complex<double>>()[std::initializer_list<int>{}]; break; default: assert(false); } } } //Print matrices (debug): std::cout << "#DEBUG(exatn::TensorNetworkOptimizer::getInitialGuess): Operator matrix:\n" << std::scientific; for(unsigned int i = 0; i < guess_dim; ++i){ for(unsigned int j = 0; j < guess_dim; ++j){ std::cout << " " << oper_matrix[j*guess_dim + i]; } std::cout << std::endl; } std::cout << "#DEBUG(exatn::TensorNetworkOptimizer::getInitialGuess): Metric matrix:\n" << std::scientific; for(unsigned int i = 0; i < guess_dim; ++i){ for(unsigned int j = 0; j < guess_dim; ++j){ std::cout << " " << metr_matrix[j*guess_dim + i]; } std::cout << std::endl; } //Solve the projected eigen-problem: int info = 0; const char job_type = 'V'; const int matrix_dim = guess_dim; const int lwork = std::max(2*guess_dim,guess_dim*guess_dim); std::vector<std::complex<double>> work_space(lwork); std::vector<std::complex<double>> left_vecs(guess_dim*guess_dim,std::complex<double>{0.0,0.0}); std::vector<std::complex<double>> right_vecs(guess_dim*guess_dim,std::complex<double>{0.0,0.0}); std::vector<std::complex<double>> alpha(guess_dim,std::complex<double>{0.0,0.0}); std::vector<std::complex<double>> beta(guess_dim,std::complex<double>{0.0,0.0}); std::vector<double> rwork_space(guess_dim*8); zggev_(&job_type,&job_type,&matrix_dim, (void*)oper_matrix.data(),&matrix_dim, (void*)metr_matrix.data(),&matrix_dim, (void*)alpha.data(),(void*)beta.data(), (void*)left_vecs.data(),&matrix_dim, (void*)right_vecs.data(),&matrix_dim, (void*)work_space.data(),&lwork, (void*)rwork_space.data(),&info); if(info == 0){ //Find the min/max eigenvalue: int min_entry = 0, max_entry = 0; for(int i = 0; i < matrix_dim; ++i){ if(std::abs(beta[i]) != 0.0){ const auto lambda = alpha[i] / beta[i]; if(std::abs(beta[min_entry]) != 0.0){ const auto min_val = alpha[min_entry] / beta[min_entry]; if(lambda.real() < min_val.real()) min_entry = i; }else{ min_entry = i; } if(std::abs(beta[max_entry]) != 0.0){ const auto max_val = alpha[max_entry] / beta[max_entry]; if(lambda.real() > max_val.real()) max_entry = i; }else{ max_entry = i; } } } //Generate the target tensor eigen-expansion: auto target_root = min_entry; if(highest) target_root = max_entry; auto root_expansion = makeSharedTensorExpansion("_RootExpansion"); for(int i = 0; i < matrix_dim; ++i){ const auto coef = right_vecs[target_root*matrix_dim + i]; for(auto net = basis_ket[i]->begin(); net != basis_ket[i]->end(); ++net){ success = root_expansion->appendComponent(net->network,coef*(net->coefficient)); assert(success); } } //Reconstruct the eigen-expansion as an initial guess: vector_expansion_->conjugate(); TensorNetworkReconstructor::resetDebugLevel(1,0); //debug TensorNetworkReconstructor reconstructor(root_expansion,vector_expansion_,DEFAULT_GUESS_TOLERANCE); success = exatn::sync(process_group); assert(success); double residual_norm, fidelity; bool reconstructed = reconstructor.reconstruct(process_group,&residual_norm,&fidelity); success = exatn::sync(process_group); assert(success); vector_expansion_->conjugate(); } //Destroy temporaries: for(unsigned int j = 0; j < guess_dim; ++j){ success = exatn::destroyTensors(*(basis_ket[j]->begin()->network)); assert(success); for(unsigned int i = 0; i < guess_dim; ++i){ success = exatn::destroyTensor(oper_scalar[j*guess_dim + i]->getName()); assert(success); success = exatn::destroyTensor(metr_scalar[j*guess_dim + i]->getName()); assert(success); } } success = exatn::sync(process_group); assert(success); return; } void TensorNetworkOptimizer::enableParallelization(bool parallel) { parallel_ = parallel; Loading src/exatn/optimizer.hpp +11 −1 Original line number Diff line number Diff line /** ExaTN:: Variational optimizer of a closed symmetric tensor network expansion functional REVISION: 2021/10/27 REVISION: 2021/11/17 Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ Loading @@ -19,6 +19,7 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ #define EXATN_OPTIMIZER_HPP_ #include "exatn_numerics.hpp" #include "reconstructor.hpp" #include <vector> #include <complex> Loading @@ -38,6 +39,10 @@ public: static constexpr const double DEFAULT_LEARN_RATE = 0.5; static constexpr const unsigned int DEFAULT_MAX_ITERATIONS = 1000; static constexpr const unsigned int DEFAULT_MICRO_ITERATIONS = 1; static constexpr const bool PREOPTIMIZE_INITIAL_GUESS = true; static constexpr const unsigned int DEFAULT_KRYLOV_GUESS_DIM = 8; static constexpr const unsigned int DEFAULT_GUESS_MAX_BOND_DIM = DEFAULT_KRYLOV_GUESS_DIM; static constexpr const double DEFAULT_GUESS_TOLERANCE = 1e-2; TensorNetworkOptimizer(std::shared_ptr<TensorOperator> tensor_operator, //in: hermitian tensor network operator std::shared_ptr<TensorExpansion> vector_expansion, //inout: tensor network expansion forming the bra/ket vectors Loading Loading @@ -96,6 +101,11 @@ public: protected: //Generates a pre-optimized initial guess for the extreme eigen-root (lowest by default): void computeInitialGuess(const ProcessGroup & process_group, bool highest = false, unsigned int guess_dim = DEFAULT_KRYLOV_GUESS_DIM); //Implementation based on the steepest descent algorithm: bool optimize_sd(const ProcessGroup & process_group); //in: executing process group Loading src/exatn/tests/NumServerTester.cpp +8 −7 Original line number Diff line number Diff line Loading @@ -19,7 +19,7 @@ //Test activation: #define EXATN_TEST0 #define EXATN_TEST1 /*#define EXATN_TEST1 #define EXATN_TEST2 #define EXATN_TEST3 #define EXATN_TEST4 Loading Loading @@ -48,9 +48,9 @@ //#define EXATN_TEST27 //requires input file from source //#define EXATN_TEST28 //requires input file from source #define EXATN_TEST29 #define EXATN_TEST30 //#define EXATN_TEST31 //requires input file from source #define EXATN_TEST32 #define EXATN_TEST30*/ #define EXATN_TEST31 //requires input file from source //#define EXATN_TEST32 #ifdef EXATN_TEST0 Loading Loading @@ -3605,15 +3605,16 @@ TEST(NumServerTester, ExcitedMCVQE) { const int max_bond_dim = std::min(static_cast<int>(std::pow(2,num_spin_sites/2)),bond_dim_lim); const int arity = 2; const std::string tn_type = "TTN"; //MPS or TTN const double accuracy = 2e-4; const double accuracy = 1e-4; //exatn::resetLoggingLevel(2,2); //debug //exatn::resetLoggingLevel(1,2); //debug bool success = true; bool root = (exatn::getProcessRank() == 0); //Read the MCVQE Hamiltonian in spin representation: auto hamiltonian0 = exatn::quantum::readSpinHamiltonian("MCVQEHamiltonian","mcvqe_8q.qcw.txt",TENS_ELEM_TYPE,"QCWare"); auto hamiltonian0 = exatn::quantum::readSpinHamiltonian("MCVQEHamiltonian", "mcvqe_"+std::to_string(num_spin_sites)+"q.qcw.txt",TENS_ELEM_TYPE,"QCWare"); success = hamiltonian0->deleteComponent(0); assert(success); //Configure the tensor network builder: Loading src/numerics/tensor_expansion.cpp +10 −1 Original line number Diff line number Diff line /** ExaTN::Numerics: Tensor network expansion REVISION: 2021/10/21 REVISION: 2021/11/15 Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ Loading Loading @@ -346,6 +346,15 @@ void TensorExpansion::markOptimizableAllTensors() } std::shared_ptr<Tensor> TensorExpansion::getSpaceTensor() const { assert(getNumComponents() > 0); auto space_tensor = makeSharedTensor(*(cbegin()->network->getTensor(0))); space_tensor->rename(); return space_tensor; } std::vector<std::complex<double>> TensorExpansion::getCoefficients() const { std::vector<std::complex<double>> coefs(components_.size(),{0.0,0.0}); Loading Loading
src/exatn/CMakeLists.txt +1 −1 Original line number Diff line number Diff line Loading @@ -17,7 +17,7 @@ add_dependencies(${LIBRARY_NAME} exatensor-build) target_include_directories(${LIBRARY_NAME} PUBLIC . ${CMAKE_BINARY_DIR}) target_link_libraries(${LIBRARY_NAME} PUBLIC CppMicroServices exatn-numerics exatn-runtime) PUBLIC CppMicroServices exatn-numerics exatn-runtime PRIVATE ExaTensor::ExaTensor) exatn_configure_library_rpath(exatn) Loading
src/exatn/optimizer.cpp +195 −1 Original line number Diff line number Diff line /** ExaTN:: Variational optimizer of a closed symmetric tensor network expansion functional REVISION: 2021/10/30 REVISION: 2021/11/17 Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ Loading @@ -12,6 +12,22 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ #include <string> #include <iostream> //LAPACK zggev: extern "C" { void zggev_( char const * jobvl, char const * jobvr, int const * n, void * A, int const * lda, void * B, int const * ldb, void * alpha, void * beta, void * VL, int const * ldvl, void * VR, int const * ldvr, void * work, int const * lwork, void * rwork, int * info); } namespace exatn{ unsigned int TensorNetworkOptimizer::debug{0}; Loading Loading @@ -170,6 +186,9 @@ bool TensorNetworkOptimizer::optimize_sd(const ProcessGroup & process_group) if(getProcessRank() != TensorNetworkOptimizer::focus) TensorNetworkOptimizer::debug = 0; } //Pre-optimize the initial tensor network vector expansion guess: if(PREOPTIMIZE_INITIAL_GUESS) computeInitialGuess(process_group); //Balance-normalize the tensor network vector expansion: //bool success = balanceNormalizeNorm2Sync(*vector_expansion_,1.0,1.0,true); assert(success); bool success = balanceNorm2Sync(*vector_expansion_,1.0,true); assert(success); Loading Loading @@ -453,6 +472,181 @@ bool TensorNetworkOptimizer::optimize_sd(const ProcessGroup & process_group) } void TensorNetworkOptimizer::computeInitialGuess(const ProcessGroup & process_group, bool highest, unsigned int guess_dim) { assert(tensor_operator_ && vector_expansion_); assert(guess_dim > 1); bool success = true; //Generate a random non-orthogonal tensor network basis: auto tn_builder = exatn::getTensorNetworkBuilder("MPS"); success = tn_builder->setParameter("max_bond_dim",DEFAULT_GUESS_MAX_BOND_DIM); assert(success); auto ket_tensor = vector_expansion_->getSpaceTensor(); const auto elem_type = vector_expansion_->cbegin()->network->getTensorElementType(); assert(elem_type != TensorElementType::VOID); std::vector<std::shared_ptr<TensorExpansion>> basis_ket(guess_dim); std::vector<std::shared_ptr<TensorExpansion>> basis_bra(guess_dim); success = exatn::sync(process_group); assert(success); for(unsigned int i = 0; i < guess_dim; ++i){ basis_ket[i]->rename("_BasisVector"+std::to_string(i)); auto basis_vector = makeSharedTensorNetwork("_VectorNet"+std::to_string(i),ket_tensor,*tn_builder,false); success = basis_ket[i]->appendComponent(basis_vector,std::complex<double>{1.0,0.0}); assert(success); basis_bra[i] = makeSharedTensorExpansion(*(basis_ket[i])); basis_bra[i]->conjugate(); basis_bra[i]->rename("_ConjVectorNet"+std::to_string(i)); success = exatn::createTensors(process_group,*basis_vector,elem_type); assert(success); success = exatn::initTensorsRnd(*basis_vector); assert(success); } success = exatn::sync(process_group); assert(success); //Build the operator matrix: std::vector<std::complex<double>> oper_matrix(guess_dim*guess_dim); std::vector<std::shared_ptr<Tensor>> oper_scalar(guess_dim*guess_dim); std::vector<std::shared_ptr<TensorExpansion>> oper_elems(guess_dim*guess_dim); for(unsigned int j = 0; j < guess_dim; ++j){ for(unsigned int i = 0; i < guess_dim; ++i){ oper_elems[j*guess_dim + i] = makeSharedTensorExpansion(*(basis_ket[j]),*(basis_bra[i]),*tensor_operator_); oper_scalar[j*guess_dim + i] = makeSharedTensor("_"); oper_scalar[j*guess_dim + i]->rename("_OperScalarElem_"+std::to_string(i)+"_"+std::to_string(j)); success = exatn::createTensor(process_group,oper_scalar[j*guess_dim + i],elem_type); assert(success); success = exatn::initTensor(oper_scalar[j*guess_dim + i]->getName(),0.0); assert(success); success = exatn::evaluate(process_group,*(oper_elems[j*guess_dim + i]),oper_scalar[j*guess_dim + i]); assert(success); } } //Build the metric matrix: std::vector<std::complex<double>> metr_matrix(guess_dim*guess_dim); std::vector<std::shared_ptr<Tensor>> metr_scalar(guess_dim*guess_dim); std::vector<std::shared_ptr<TensorExpansion>> metr_elems(guess_dim*guess_dim); for(unsigned int j = 0; j < guess_dim; ++j){ for(unsigned int i = 0; i < guess_dim; ++i){ metr_elems[j*guess_dim + i] = makeSharedTensorExpansion(*(basis_ket[j]),*(basis_bra[i])); metr_scalar[j*guess_dim + i] = makeSharedTensor("_"); metr_scalar[j*guess_dim + i]->rename("_MetrScalarElem_"+std::to_string(i)+"_"+std::to_string(j)); success = exatn::createTensor(process_group,metr_scalar[j*guess_dim + i],elem_type); assert(success); success = exatn::initTensor(metr_scalar[j*guess_dim + i]->getName(),0.0); assert(success); success = exatn::evaluate(process_group,*(metr_elems[j*guess_dim + i]),metr_scalar[j*guess_dim + i]); assert(success); } } success = exatn::sync(process_group); assert(success); for(unsigned int j = 0; j < guess_dim; ++j){ for(unsigned int i = 0; i < guess_dim; ++i){ const auto & oper_name = oper_scalar[j*guess_dim + i]->getName(); const auto & metr_name = metr_scalar[j*guess_dim + i]->getName(); switch(elem_type){ case TensorElementType::REAL32: oper_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(oper_name)->getSliceView<float>()[std::initializer_list<int>{}], 0.0); metr_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(metr_name)->getSliceView<float>()[std::initializer_list<int>{}], 0.0); break; case TensorElementType::REAL64: oper_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(oper_name)->getSliceView<double>()[std::initializer_list<int>{}], 0.0); metr_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(metr_name)->getSliceView<double>()[std::initializer_list<int>{}], 0.0); break; case TensorElementType::COMPLEX32: oper_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(oper_name)->getSliceView<std::complex<float>>()[std::initializer_list<int>{}]); metr_matrix[j*guess_dim + i] = std::complex<double>( exatn::getLocalTensor(metr_name)->getSliceView<std::complex<float>>()[std::initializer_list<int>{}]); break; case TensorElementType::COMPLEX64: oper_matrix[j*guess_dim + i] = exatn::getLocalTensor(oper_name)->getSliceView<std::complex<double>>()[std::initializer_list<int>{}]; metr_matrix[j*guess_dim + i] = exatn::getLocalTensor(metr_name)->getSliceView<std::complex<double>>()[std::initializer_list<int>{}]; break; default: assert(false); } } } //Print matrices (debug): std::cout << "#DEBUG(exatn::TensorNetworkOptimizer::getInitialGuess): Operator matrix:\n" << std::scientific; for(unsigned int i = 0; i < guess_dim; ++i){ for(unsigned int j = 0; j < guess_dim; ++j){ std::cout << " " << oper_matrix[j*guess_dim + i]; } std::cout << std::endl; } std::cout << "#DEBUG(exatn::TensorNetworkOptimizer::getInitialGuess): Metric matrix:\n" << std::scientific; for(unsigned int i = 0; i < guess_dim; ++i){ for(unsigned int j = 0; j < guess_dim; ++j){ std::cout << " " << metr_matrix[j*guess_dim + i]; } std::cout << std::endl; } //Solve the projected eigen-problem: int info = 0; const char job_type = 'V'; const int matrix_dim = guess_dim; const int lwork = std::max(2*guess_dim,guess_dim*guess_dim); std::vector<std::complex<double>> work_space(lwork); std::vector<std::complex<double>> left_vecs(guess_dim*guess_dim,std::complex<double>{0.0,0.0}); std::vector<std::complex<double>> right_vecs(guess_dim*guess_dim,std::complex<double>{0.0,0.0}); std::vector<std::complex<double>> alpha(guess_dim,std::complex<double>{0.0,0.0}); std::vector<std::complex<double>> beta(guess_dim,std::complex<double>{0.0,0.0}); std::vector<double> rwork_space(guess_dim*8); zggev_(&job_type,&job_type,&matrix_dim, (void*)oper_matrix.data(),&matrix_dim, (void*)metr_matrix.data(),&matrix_dim, (void*)alpha.data(),(void*)beta.data(), (void*)left_vecs.data(),&matrix_dim, (void*)right_vecs.data(),&matrix_dim, (void*)work_space.data(),&lwork, (void*)rwork_space.data(),&info); if(info == 0){ //Find the min/max eigenvalue: int min_entry = 0, max_entry = 0; for(int i = 0; i < matrix_dim; ++i){ if(std::abs(beta[i]) != 0.0){ const auto lambda = alpha[i] / beta[i]; if(std::abs(beta[min_entry]) != 0.0){ const auto min_val = alpha[min_entry] / beta[min_entry]; if(lambda.real() < min_val.real()) min_entry = i; }else{ min_entry = i; } if(std::abs(beta[max_entry]) != 0.0){ const auto max_val = alpha[max_entry] / beta[max_entry]; if(lambda.real() > max_val.real()) max_entry = i; }else{ max_entry = i; } } } //Generate the target tensor eigen-expansion: auto target_root = min_entry; if(highest) target_root = max_entry; auto root_expansion = makeSharedTensorExpansion("_RootExpansion"); for(int i = 0; i < matrix_dim; ++i){ const auto coef = right_vecs[target_root*matrix_dim + i]; for(auto net = basis_ket[i]->begin(); net != basis_ket[i]->end(); ++net){ success = root_expansion->appendComponent(net->network,coef*(net->coefficient)); assert(success); } } //Reconstruct the eigen-expansion as an initial guess: vector_expansion_->conjugate(); TensorNetworkReconstructor::resetDebugLevel(1,0); //debug TensorNetworkReconstructor reconstructor(root_expansion,vector_expansion_,DEFAULT_GUESS_TOLERANCE); success = exatn::sync(process_group); assert(success); double residual_norm, fidelity; bool reconstructed = reconstructor.reconstruct(process_group,&residual_norm,&fidelity); success = exatn::sync(process_group); assert(success); vector_expansion_->conjugate(); } //Destroy temporaries: for(unsigned int j = 0; j < guess_dim; ++j){ success = exatn::destroyTensors(*(basis_ket[j]->begin()->network)); assert(success); for(unsigned int i = 0; i < guess_dim; ++i){ success = exatn::destroyTensor(oper_scalar[j*guess_dim + i]->getName()); assert(success); success = exatn::destroyTensor(metr_scalar[j*guess_dim + i]->getName()); assert(success); } } success = exatn::sync(process_group); assert(success); return; } void TensorNetworkOptimizer::enableParallelization(bool parallel) { parallel_ = parallel; Loading
src/exatn/optimizer.hpp +11 −1 Original line number Diff line number Diff line /** ExaTN:: Variational optimizer of a closed symmetric tensor network expansion functional REVISION: 2021/10/27 REVISION: 2021/11/17 Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ Loading @@ -19,6 +19,7 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ #define EXATN_OPTIMIZER_HPP_ #include "exatn_numerics.hpp" #include "reconstructor.hpp" #include <vector> #include <complex> Loading @@ -38,6 +39,10 @@ public: static constexpr const double DEFAULT_LEARN_RATE = 0.5; static constexpr const unsigned int DEFAULT_MAX_ITERATIONS = 1000; static constexpr const unsigned int DEFAULT_MICRO_ITERATIONS = 1; static constexpr const bool PREOPTIMIZE_INITIAL_GUESS = true; static constexpr const unsigned int DEFAULT_KRYLOV_GUESS_DIM = 8; static constexpr const unsigned int DEFAULT_GUESS_MAX_BOND_DIM = DEFAULT_KRYLOV_GUESS_DIM; static constexpr const double DEFAULT_GUESS_TOLERANCE = 1e-2; TensorNetworkOptimizer(std::shared_ptr<TensorOperator> tensor_operator, //in: hermitian tensor network operator std::shared_ptr<TensorExpansion> vector_expansion, //inout: tensor network expansion forming the bra/ket vectors Loading Loading @@ -96,6 +101,11 @@ public: protected: //Generates a pre-optimized initial guess for the extreme eigen-root (lowest by default): void computeInitialGuess(const ProcessGroup & process_group, bool highest = false, unsigned int guess_dim = DEFAULT_KRYLOV_GUESS_DIM); //Implementation based on the steepest descent algorithm: bool optimize_sd(const ProcessGroup & process_group); //in: executing process group Loading
src/exatn/tests/NumServerTester.cpp +8 −7 Original line number Diff line number Diff line Loading @@ -19,7 +19,7 @@ //Test activation: #define EXATN_TEST0 #define EXATN_TEST1 /*#define EXATN_TEST1 #define EXATN_TEST2 #define EXATN_TEST3 #define EXATN_TEST4 Loading Loading @@ -48,9 +48,9 @@ //#define EXATN_TEST27 //requires input file from source //#define EXATN_TEST28 //requires input file from source #define EXATN_TEST29 #define EXATN_TEST30 //#define EXATN_TEST31 //requires input file from source #define EXATN_TEST32 #define EXATN_TEST30*/ #define EXATN_TEST31 //requires input file from source //#define EXATN_TEST32 #ifdef EXATN_TEST0 Loading Loading @@ -3605,15 +3605,16 @@ TEST(NumServerTester, ExcitedMCVQE) { const int max_bond_dim = std::min(static_cast<int>(std::pow(2,num_spin_sites/2)),bond_dim_lim); const int arity = 2; const std::string tn_type = "TTN"; //MPS or TTN const double accuracy = 2e-4; const double accuracy = 1e-4; //exatn::resetLoggingLevel(2,2); //debug //exatn::resetLoggingLevel(1,2); //debug bool success = true; bool root = (exatn::getProcessRank() == 0); //Read the MCVQE Hamiltonian in spin representation: auto hamiltonian0 = exatn::quantum::readSpinHamiltonian("MCVQEHamiltonian","mcvqe_8q.qcw.txt",TENS_ELEM_TYPE,"QCWare"); auto hamiltonian0 = exatn::quantum::readSpinHamiltonian("MCVQEHamiltonian", "mcvqe_"+std::to_string(num_spin_sites)+"q.qcw.txt",TENS_ELEM_TYPE,"QCWare"); success = hamiltonian0->deleteComponent(0); assert(success); //Configure the tensor network builder: Loading
src/numerics/tensor_expansion.cpp +10 −1 Original line number Diff line number Diff line /** ExaTN::Numerics: Tensor network expansion REVISION: 2021/10/21 REVISION: 2021/11/15 Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ Loading Loading @@ -346,6 +346,15 @@ void TensorExpansion::markOptimizableAllTensors() } std::shared_ptr<Tensor> TensorExpansion::getSpaceTensor() const { assert(getNumComponents() > 0); auto space_tensor = makeSharedTensor(*(cbegin()->network->getTensor(0))); space_tensor->rename(); return space_tensor; } std::vector<std::complex<double>> TensorExpansion::getCoefficients() const { std::vector<std::complex<double>> coefs(components_.size(),{0.0,0.0}); Loading