Commit 2645209a authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Implemented Ising Hamiltonian test for ground and excited states.


Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent c22d1949
......@@ -3416,7 +3416,7 @@ TEST(NumServerTester, SpinHamiltonians) {
bool success = true;
//Define the 1D-Ising Hamiltonian generator:
//Define the 1D Ising Hamiltonian generator:
TensorRange spin_sites({num_spin_sites});
auto ising_generator = [j_param,
spin_sites,
......@@ -3438,11 +3438,38 @@ TEST(NumServerTester, SpinHamiltonians) {
return pauli_product;
};
//Construct the 1D-Ising Hamiltonian using the generator:
auto ising_hamiltonian = exatn::quantum::generateSpinHamiltonian("IsingHamiltonian",
ising_generator,
TENS_ELEM_TYPE);
//ising_hamiltonian->printIt(); //debug
//Construct the 1D Ising Hamiltonian using the generator:
auto ising_hamiltonian0 = exatn::quantum::generateSpinHamiltonian("IsingHamiltonian",
ising_generator,
TENS_ELEM_TYPE);
//ising_hamiltonian0->printIt(); //debug
//Define the 1D transverse field generator:
spin_sites.reset();
auto transverse_generator = [h_param,
spin_sites,
num_sites = spin_sites.localVolume(),
finished = false] () mutable -> PauliProduct {
assert(num_sites > 1);
PauliProduct pauli_product;
if(!finished){
const auto spin_site = spin_sites.localOffset();
pauli_product.product.emplace_back(PauliMap{Gate::gate_X,spin_site});
pauli_product.coefficient = h_param;
if(spin_site < (num_sites - 1)){
spin_sites.next();
}else{
finished = true;
}
}
return pauli_product;
};
//Construct the 1D transverse field Hamiltonian using the generator:
auto transverse_field = exatn::quantum::generateSpinHamiltonian("TransverseField",
transverse_generator,
TENS_ELEM_TYPE);
//transverse_field->printIt(); //debug
//Configure the tensor network builder:
auto tn_builder = exatn::getTensorNetworkBuilder(tn_type);
......@@ -3457,10 +3484,15 @@ TEST(NumServerTester, SpinHamiltonians) {
//Build tensor network vectors:
auto ket_tensor = exatn::makeSharedTensor("TensorSpace",std::vector<int>(num_spin_sites,2));
auto vec_net = exatn::makeSharedTensorNetwork("VectorNet",ket_tensor,*tn_builder,false);
vec_net->markOptimizableAllTensors();
//vec_net->printIt(); //debug
auto vec_tns = exatn::makeSharedTensorExpansion("VectorTNS",vec_net,std::complex<double>{1.0,0.0});
auto vec_net0 = exatn::makeSharedTensorNetwork("VectorNet",ket_tensor,*tn_builder,false);
vec_net0->markOptimizableAllTensors();
auto vec_tns0 = exatn::makeSharedTensorExpansion("VectorTNS",vec_net0,std::complex<double>{1.0,0.0});
auto vec_net1 = exatn::makeSharedTensorNetwork("VectorNet",ket_tensor,*tn_builder,false);
vec_net1->markOptimizableAllTensors();
auto vec_tns1 = exatn::makeSharedTensorExpansion("VectorTNS",vec_net1,std::complex<double>{1.0,0.0});
auto vec_net2 = exatn::makeSharedTensorNetwork("VectorNet",ket_tensor,*tn_builder,false);
vec_net2->markOptimizableAllTensors();
auto vec_tns2 = exatn::makeSharedTensorExpansion("VectorTNS",vec_net2,std::complex<double>{1.0,0.0});
auto rhs_net = exatn::makeSharedTensorNetwork("RightHandSideNet",ket_tensor,*tn_builder,false);
auto rhs_tns = exatn::makeSharedTensorExpansion("RightHandSideTNS",rhs_net,std::complex<double>{1.0,0.0});
......@@ -3468,8 +3500,12 @@ TEST(NumServerTester, SpinHamiltonians) {
{
//Create and initialize tensor network vector tensors:
std::cout << "Creating and initializing tensor network vector tensors ... ";
success = exatn::createTensorsSync(*vec_net,TENS_ELEM_TYPE); assert(success);
success = exatn::initTensorsRndSync(*vec_net); assert(success);
success = exatn::createTensorsSync(*vec_net0,TENS_ELEM_TYPE); assert(success);
success = exatn::initTensorsRndSync(*vec_net0); assert(success);
success = exatn::createTensorsSync(*vec_net1,TENS_ELEM_TYPE); assert(success);
success = exatn::initTensorsRndSync(*vec_net1); assert(success);
success = exatn::createTensorsSync(*vec_net2,TENS_ELEM_TYPE); assert(success);
success = exatn::initTensorsRndSync(*vec_net2); assert(success);
success = exatn::createTensorsSync(*rhs_net,TENS_ELEM_TYPE); assert(success);
success = exatn::initTensorsRndSync(*rhs_net); assert(success);
std::cout << "Ok" << std::endl;
......@@ -3477,9 +3513,55 @@ TEST(NumServerTester, SpinHamiltonians) {
//Ground state search for the original Hamiltonian:
std::cout << "Ground state search for the original Hamiltonian:" << std::endl;
exatn::TensorNetworkOptimizer::resetDebugLevel(1,0);
exatn::TensorNetworkOptimizer optimizer(ising_hamiltonian,vec_tns,1e-5);
exatn::TensorNetworkOptimizer optimizer0(ising_hamiltonian0,vec_tns0,1e-4);
success = exatn::sync(); assert(success);
bool converged = optimizer.optimize();
bool converged = optimizer0.optimize();
success = exatn::sync(); assert(success);
if(converged){
std::cout << "Search succeeded: ";
}else{
std::cout << "Search failed!" << std::endl;
assert(false);
}
const auto expect_val0 = optimizer0.getExpectationValue();
std::cout << "Expectation value = " << expect_val0 << std::endl;
//First excited state search for the projected Hamiltonian:
std::cout << "1st excited state search for the projected Hamiltonian:" << std::endl;
vec_net0->markOptimizableNoTensors();
std::vector<std::pair<unsigned int, unsigned int>> ket_pairing(num_spin_sites);
for(unsigned int i = 0; i < num_spin_sites; ++i) ket_pairing[i] = std::make_pair(i,i);
std::vector<std::pair<unsigned int, unsigned int>> bra_pairing(num_spin_sites);
for(unsigned int i = 0; i < num_spin_sites; ++i) bra_pairing[i] = std::make_pair(i,i);
auto projector0 = exatn::makeSharedTensorOperator("Projector0",vec_net0,vec_net0,
ket_pairing,bra_pairing,-expect_val0);
auto ising_hamiltonian1 = exatn::combineTensorOperators(*ising_hamiltonian0,*projector0);
//ising_hamiltonian1->printIt(); //debug
exatn::TensorNetworkOptimizer::resetDebugLevel(1,0);
exatn::TensorNetworkOptimizer optimizer1(ising_hamiltonian1,vec_tns1,1e-4);
success = exatn::sync(); assert(success);
converged = optimizer1.optimize();
success = exatn::sync(); assert(success);
if(converged){
std::cout << "Search succeeded: ";
}else{
std::cout << "Search failed!" << std::endl;
assert(false);
}
const auto expect_val1 = optimizer1.getExpectationValue();
std::cout << "Expectation value = " << expect_val1 << std::endl;
//Second excited state search for the projected Hamiltonian:
std::cout << "2nd excited state search for the projected Hamiltonian:" << std::endl;
vec_net1->markOptimizableNoTensors();
auto projector1 = exatn::makeSharedTensorOperator("Projector1",vec_net1,vec_net1,
ket_pairing,bra_pairing,-expect_val1);
auto ising_hamiltonian2 = exatn::combineTensorOperators(*ising_hamiltonian1,*projector1);
//ising_hamiltonian2->printIt(); //debug
exatn::TensorNetworkOptimizer::resetDebugLevel(1,0);
exatn::TensorNetworkOptimizer optimizer2(ising_hamiltonian2,vec_tns2,1e-4);
success = exatn::sync(); assert(success);
converged = optimizer2.optimize();
success = exatn::sync(); assert(success);
if(converged){
std::cout << "Search succeeded: ";
......@@ -3487,8 +3569,8 @@ TEST(NumServerTester, SpinHamiltonians) {
std::cout << "Search failed!" << std::endl;
assert(false);
}
const auto expect_val = optimizer.getExpectationValue();
std::cout << "Expectation value = " << expect_val << std::endl;
const auto expect_val2 = optimizer2.getExpectationValue();
std::cout << "Expectation value = " << expect_val2 << std::endl;
}
//Synchronize:
......
/** ExaTN::Numerics: Tensor network
REVISION: 2021/10/17
REVISION: 2021/10/26
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -2220,6 +2220,12 @@ void TensorNetwork::markOptimizableAllTensors()
}
void TensorNetwork::markOptimizableNoTensors()
{
return markOptimizableTensors([](const Tensor &){return false;});
}
double TensorNetwork::getContractionCost(unsigned int left_id, unsigned int right_id,
double * total_volume, double * diff_volume,
double * arithm_intensity, bool adjust_cost)
......
/** ExaTN::Numerics: Tensor network
REVISION: 2021/10/17
REVISION: 2021/10/26
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -441,6 +441,7 @@ public:
will become subject to optimization when optimizing the tensor network. **/
void markOptimizableTensors(std::function<bool (const Tensor &)> predicate);
void markOptimizableAllTensors(); //marks all input tensors as optimizable
void markOptimizableNoTensors(); //marks no input tensors as optimizable
/** Returns the FMA flop count for a given contraction of two tensors identified by their ids
in the tensor network. Optionally returns the volume difference. Optionally also returns
......
......@@ -35,6 +35,7 @@ TensorOperator::TensorOperator(const std::string & name,
const auto shift = ket_network->getRank();
for(auto & pairing: shifted_bra_pairing) pairing.second += shift;
auto combined_network = makeSharedTensorNetwork(*ket_network,true,ket_network->getName());
combined_network->conjugate();
auto success = combined_network->appendTensorNetwork(TensorNetwork(*bra_network,true,bra_network->getName()),{});
assert(success);
success = appendComponent(combined_network,ket_pairing,shifted_bra_pairing,coefficient);
......@@ -247,4 +248,22 @@ void TensorOperator::printIt() const
} //namespace numerics
std::shared_ptr<numerics::TensorOperator> combineTensorOperators(const numerics::TensorOperator & operator1,
const numerics::TensorOperator & operator2)
{
auto operator_result = makeSharedTensorOperator(operator1.getName() + "+" + operator2.getName());
for(auto component = operator1.cbegin(); component != operator1.cend(); ++component){
auto success = operator_result->appendComponent(component->network,
component->ket_legs,component->bra_legs,component->coefficient);
assert(success);
}
for(auto component = operator2.cbegin(); component != operator2.cend(); ++component){
auto success = operator_result->appendComponent(component->network,
component->ket_legs,component->bra_legs,component->coefficient);
assert(success);
}
return operator_result;
}
} //namespace exatn
......@@ -186,6 +186,10 @@ inline std::shared_ptr<numerics::TensorOperator> makeSharedTensorOperator(Args&&
return std::make_shared<numerics::TensorOperator>(std::forward<Args>(args)...);
}
/** Combines two tensor network operators together. **/
std::shared_ptr<numerics::TensorOperator> combineTensorOperators(const numerics::TensorOperator & operator1,
const numerics::TensorOperator & operator2);
} //namespace exatn
#endif //EXATN_NUMERICS_TENSOR_OPERATOR_HPP_
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment