Commit 4eeccedc authored by Nguyen, Thien's avatar Nguyen, Thien
Browse files

QAOA to use observable grouping if possible



Note: need to figure out how to deal w/ gradients in this case.
Signed-off-by: Nguyen, Thien's avatarThien Nguyen <nguyentm@ornl.gov>
parent c94a5dc0
......@@ -297,9 +297,8 @@ PauliOperator::observe(std::shared_ptr<CompositeInstruction> function, const Het
if (!shots_enabled) {
// Log that we cannot do grouping (since shots is not set)
std::cout << qpu->name()
<< " accelerator is not running 'shots' execution. Observable "
"grouping will be ignored.\n";
xacc::info(qpu->name() + " accelerator is not running 'shots' execution. "
"Observable grouping will be ignored.");
return observe(function);
}
// For this grouping, we only support *single* grouping,
......@@ -431,7 +430,7 @@ PauliOperator::observe(std::shared_ptr<CompositeInstruction> function, const Het
meas->setParameter(0, classicalIdx);
gateFunction->addInstruction(meas);
}
std::cout << "Group observed circuit:\n" << gateFunction->toString() << "\n";
// std::cout << "Group observed circuit:\n" << gateFunction->toString() << "\n";
return {gateFunction};
}
......
......@@ -152,10 +152,67 @@ void QAOA::execute(const std::shared_ptr<AcceleratorBuffer> buffer) const {
kernel->expand(m);
}
// Handle Max-cut optimization on shots-based backends (including physical
// backends). We only want to execute a single circuit for observable with all
// commuting terms such as the maxcut Hamiltonian.
// Limitation: this grouping cannot handle gradient strategy at the moment.
// Observe the cost Hamiltonian:
auto kernels = m_costHamObs->observe(kernel);
auto kernels = [&] {
if (dynamic_cast<xacc::quantum::PauliOperator *>(m_costHamObs)) {
return m_costHamObs->observe(kernel, {{"accelerator", m_qpu}});
} else {
return m_costHamObs->observe(kernel);
}
}();
// Grouping is possible (no gradient strategy)
// TODO: Gradient strategy to handle grouping as well.
int iterCount = 0;
if (m_costHamObs->getNonIdentitySubTerms().size() > 1 &&
kernels.size() == 1 && !gradientStrategy) {
OptFunction f(
[&, this](const std::vector<double> &x, std::vector<double> &dx) {
auto tmpBuffer = xacc::qalloc(buffer->size());
std::vector<std::shared_ptr<CompositeInstruction>> fsToExec{
kernels[0]->operator()(x)};
m_qpu->execute(tmpBuffer, fsToExec);
double energy = m_costHamObs->postProcess(tmpBuffer);
// We will only have one child buffer for each parameter set.
assert(tmpBuffer->getChildren().size() == 1);
auto result_buf = tmpBuffer->getChildren()[0];
result_buf->addExtraInfo("parameters", x);
result_buf->addExtraInfo("energy", energy);
buffer->appendChild("Iter" + std::to_string(iterCount), result_buf);
std::stringstream ss;
ss << "Iter " << iterCount << ": E("
<< (!x.empty() ? std::to_string(x[0]) : "");
for (int i = 1; i < x.size(); i++) {
ss << "," << std::setprecision(3) << x[i];
if (i > 4) {
// Don't print too many params
ss << ", ...";
break;
}
}
ss << ") = " << std::setprecision(12) << energy;
xacc::info(ss.str());
iterCount++;
if (m_maximize)
energy *= -1.0;
return energy;
}, kernel->nVariables());
auto result = m_optimizer->optimize(f);
// Reports the final cost:
double finalCost = result.first;
if (m_maximize)
finalCost *= -1.0;
buffer->addExtraInfo("opt-val", ExtraInfo(finalCost));
buffer->addExtraInfo("opt-params", ExtraInfo(result.second));
return;
}
// Construct the optimizer/minimizer:
OptFunction f(
[&, this](const std::vector<double> &x, std::vector<double> &dx) {
......@@ -315,8 +372,28 @@ QAOA::execute(const std::shared_ptr<AcceleratorBuffer> buffer,
m_single_exec_kernel = kernel;
}
// Observe the cost Hamiltonian:
auto kernels = m_costHamObs->observe(kernel);
// Observe the cost Hamiltonian, with the input Accelerator:
// i.e. perform grouping (e.g. max-cut QAOA, Pauli) if possible:
auto kernels = [&] {
if (dynamic_cast<xacc::quantum::PauliOperator *>(m_costHamObs)) {
return m_costHamObs->observe(kernel, {{"accelerator", m_qpu}});
} else {
return m_costHamObs->observe(kernel);
}
}();
if (m_costHamObs->getNonIdentitySubTerms().size() > 1 &&
kernels.size() == 1) {
// Grouping was done:
// just execute the single observed kernel:
std::vector<std::shared_ptr<CompositeInstruction>> fsToExec{
kernels[0]->operator()(x)};
m_qpu->execute(buffer, fsToExec);
const double finalCost = m_costHamObs->postProcess(buffer);
// std::cout << "Compute energy from grouping: " << finalCost << "\n";
return { finalCost };
}
std::vector<double> coefficients;
std::vector<std::string> kernelNames;
std::vector<std::shared_ptr<CompositeInstruction>> fsToExec;
......
......@@ -124,7 +124,7 @@ TEST(QAOATester, checkInitialStateConstruction) {
auto acc = xacc::getAccelerator("qpp");
auto buffer = xacc::qalloc(2);
auto optimizer = xacc::getOptimizer("nlopt", {{"initial-parameters", random_vector(-2., 2., 8)}});
auto optimizer = xacc::getOptimizer("nlopt", {{"initial-parameters", random_vector(-2., 2., 12)}});
auto qaoa = xacc::getService<Algorithm>("QAOA");
// Create deuteron Hamiltonian
auto H_N_2 = xacc::quantum::getObservable(
......@@ -142,7 +142,7 @@ TEST(QAOATester, checkInitialStateConstruction) {
std::make_pair("optimizer", optimizer),
std::make_pair("observable", H_N_2),
// number of time steps (p) param
std::make_pair("steps", 4),
std::make_pair("steps", 6),
std::make_pair("initial-state", initial_program),
std::make_pair("parameter-scheme", "Standard")}));
qaoa->execute(buffer);
......@@ -243,6 +243,75 @@ TEST(QAOATester, checkMaxCut) {
// EXPECT_NEAR((*buffer)["opt-val"].as<double>(), 2.0, 1e-3);
}
TEST(QAOATester, checkMaxCutGrouping) {
auto acc = xacc::getAccelerator("qpp", {{"shots", 8192}});
auto buffer = xacc::qalloc(3);
xacc::set_verbose(true);
auto optimizer = xacc::getOptimizer(
"nlopt", {{"ftol", 0.001},
{"maximize", true},
{"initial-parameters", random_vector(-2., 2., 2)}});
auto qaoa = xacc::getService<Algorithm>("maxcut-qaoa");
auto graph = xacc::getService<xacc::Graph>("boost-digraph");
// Triangle graph
for (int i = 0; i < 3; i++) {
graph->addVertex();
}
graph->addEdge(0, 1);
graph->addEdge(0, 2);
graph->addEdge(1, 2);
const bool initOk = qaoa->initialize(
{std::make_pair("accelerator", acc),
std::make_pair("optimizer", optimizer),
std::make_pair("graph", graph),
// number of time steps (p) param
std::make_pair("steps", 1),
// "Standard" or "Extended"
std::make_pair("parameter-scheme", "Standard")});
qaoa->execute(buffer);
buffer->print();
std::cout << "Opt-val: " << (*buffer)["opt-val"].as<double>() << "\n";
// There seems to be a local minima at 1.5 as well...
EXPECT_NEAR((*buffer)["opt-val"].as<double>(), 2.0, 0.1);
}
TEST(QAOATester, checkP1TriangleGraphGroupingExpVal) {
auto acc = xacc::getAccelerator("aer", {{"shots", 8192}});
auto optimizer = xacc::getOptimizer(
"nlopt",
{{"maximize", true}, {"initial-parameters", random_vector(-2., 2., 2)}});
auto H = xacc::quantum::getObservable(
"pauli", std::string("1.5 I - 0.5 Z0 Z1 - 0.5 Z0 Z2 - 0.5 Z1 Z2"));
auto qaoa = xacc::getAlgorithm("QAOA", {{"accelerator", acc},
{"optimizer", optimizer},
{"observable", H},
{"steps", 1},
{"parameter-scheme", "Standard"}});
auto all_betas =
xacc::linspace(-xacc::constants::pi / 4., xacc::constants::pi / 4., 20);
auto all_gammas =
xacc::linspace(-xacc::constants::pi, xacc::constants::pi, 20);
for (auto gamma : all_gammas) {
for (auto beta : all_betas) {
auto buffer = xacc::qalloc(3);
auto cost = qaoa->execute(buffer, std::vector<double>{gamma, beta})[0];
auto d = 1;
auto e = 1;
auto f = 1;
auto theory = 3 * (.5 +
.25 * std::sin(4 * beta) * std::sin(gamma) *
(std::cos(gamma) + std::cos(gamma)) -
.25 * std::sin(2 * beta) * std::sin(2 * beta) *
(std::pow(std::cos(gamma), d + e - 2 * f)) *
(1 - std::cos(2 * gamma)));
// std::cout << "Cost = " << cost << "; expected = " << theory << "\n";
EXPECT_NEAR(cost, theory, 0.1);
}
}
}
int main(int argc, char **argv) {
xacc::Initialize(argc, argv);
::testing::InitGoogleTest(&argc, argv);
......
......@@ -50,7 +50,9 @@ std::string hex_string_to_binary_string(std::string hex) {
HeterogeneousMap AerAccelerator::getProperties() {
auto props = physical_backend_properties;
// Insert 'shots' data
props.insert("shots", m_shots);
if (m_simtype == "qasm") {
props.insert("shots", m_shots);
}
return props;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment