Commit c301710a authored by Mccaskey, Alex's avatar Mccaskey, Alex
Browse files

major update to objective function / taskInitiate to better line up with spec + xacc algorithms

parent cbb4ead7
Loading
Loading
Loading
Loading
+9 −23
Original line number Diff line number Diff line
#include "qcor.hpp"

__qpu__ void ansatz(qreg q, double theta, std::shared_ptr<xacc::Observable> H) {
__qpu__ void ansatz(qreg q, double theta) {
  X(q[0]);
  exp_i_theta(q, theta, H);
  auto exponent_op = X(0) * Y(1) - Y(0) * X(1);
  exp_i_theta(q, theta, exponent_op);
}

int main(int argc, char **argv) {
@@ -10,31 +10,17 @@ int main(int argc, char **argv) {
  auto q = qalloc(2);

  // Create the Deuteron Hamiltonian
  auto H = qcor::createObservable(
  auto H = createObservable(
      "5.907 - 2.1433 X0X1 - 2.1433 Y0Y1 + .21829 Z0 - 6.125 Z1");

  // Create the Ansatz exponent Operator
  auto ansatz_exponent = qcor::createObservable("X0 Y1 - Y0 X1");

  // Create an objective function to optimize
  // Each call evaluates E(theta) = <ansatz(theta) | H | ansatz(theta)>
  qcor::OptFunction opt_func(
      [&](const std::vector<double> &x, std::vector<double> &grad) -> double {
        
        // Affect Observable observation and evaluate at given ansatz parameters
        auto e = qcor::observe(ansatz, H, q, x[0], ansatz_exponent);

        // Need to clean the current qubit register
        q.reset();
        return e;
      },
      1);
  // Create the objective function
  auto objective = createObjectiveFunction(ansatz, H, q, 1);
  
  // Create a qcor Optimizer
  auto optimizer = qcor::createOptimizer("nlopt");
  auto optimizer = createOptimizer("nlopt");

  // Optimize the above function
  auto result = optimizer->optimize(opt_func);
  auto result = optimizer->optimize(*objective.get());

  // Print the result
  printf("energy = %f\n", result.first);
+8 −16
Original line number Diff line number Diff line
#include "qcor.hpp"

__qpu__ void ansatz(qreg q, double theta) {
  X(q[0]);
@@ -10,31 +9,24 @@ int main(int argc, char **argv) {
  // Allocate 2 qubits
  auto q = qalloc(2);

  // Create the Deuteron Hamiltonian (Observable)
  auto H = 5.907 - 2.1433 * qcor::X(0) * qcor::X(1) -
           2.1433 * qcor::Y(0) * qcor::Y(1) + .21829 * qcor::Z(0) -
           6.125 * qcor::Z(1);
  // Create the Deuteron Hamiltonian
  auto H = 5.907 - 2.1433 * X(0) * X(1) - 2.1433 * Y(0) * Y(1) + .21829 * Z(0) -
           6.125 * Z(1);

  // Create the ObjectiveFunction, here we want to run VQE
  // need to provide ansatz and the Observable
  auto objective = qcor::createObjectiveFunction("vqe", ansatz, H);
  // Create the ObjectiveFunction
  auto objective = qcor::createObjectiveFunction(ansatz, H, q, 1);

  // Create the Optimizer
  auto optimizer = qcor::createOptimizer("nlopt");
  auto optimizer = createOptimizer("nlopt");

  // Call taskInitiate, kick off optimization of the give
  // functor dependent on the ObjectiveFunction, async call
  auto handle = qcor::taskInitiate(
      objective, optimizer,
      [&](const std::vector<double> x, std::vector<double> &dx) {
        return (*objective)(q, x[0]);
      },
      1);
  auto handle = taskInitiate(objective, optimizer);

  // Go do other work...

  // Query results when ready.
  auto results = qcor::sync(handle);
  auto results = sync(handle);

  // Print the optimal value.
  printf("<H> = %f\n", results.opt_val);
+0 −5
Original line number Diff line number Diff line
@@ -27,11 +27,6 @@ int main() {
  // Create the QAOA instance
  QAOA qaoa(H, ref_ham, steps);

  // we're using a gradient-based optimizer, 
  // by default QAOA will use parameter-shift-rule, 
  // here we set the scale factor. 
  qaoa.set_parameter_shift_scale_factor(.1);

  // Execute synchronously and display
  const auto [energy, params] = qaoa.execute(lbfgs);

+9 −8
Original line number Diff line number Diff line
@@ -26,7 +26,7 @@ __qpu__ void ansatz(qreg q, double x) {
// using the exp_i_theta instruction
__qpu__ void ansatz_vec(qreg q, std::vector<double> x) {
  X(q[0]);
  auto ansatz_exponent = qcor::X(0) * qcor::Y(1) - qcor::Y(0) * qcor::X(1);
  auto ansatz_exponent = X(0) * Y(1) - Y(0) * X(1);
  exp_i_theta(q, x[0], ansatz_exponent);
}

@@ -44,13 +44,13 @@ __qpu__ void xasm_open_qasm_mixed_ansatz(qreg q, double xx) {
int main() {

  // Define the Hamiltonian using the QCOR API
  auto H = 5.907 - 2.1433 * qcor::X(0) * qcor::X(1) -
           2.1433 * qcor::Y(0) * qcor::Y(1) + .21829 * qcor::Z(0) -
           6.125 * qcor::Z(1);
  auto H = 5.907 - 2.1433 * X(0) * X(1) -
           2.1433 * Y(0) * Y(1) + .21829 * Z(0) -
           6.125 * Z(1);

  // Create a VQE instance, must give it
  // the parameterized ansatz functor and Observable
  qcor::VQE vqe(ansatz, H);
  VQE vqe(ansatz, H);

  // Execute synchronously, providing the initial parameters to
  // start the optimization at
@@ -59,7 +59,7 @@ int main() {

  // Now do the same for the vector double ansatz, but
  // also demonstrate the async interface
  qcor::VQE vqe_vec(ansatz_vec, H);
  VQE vqe_vec(ansatz_vec, H);
  auto handle = vqe_vec.execute_async(std::vector<double>{0.0});

  // Can go do other work, quantum execution is happening on
@@ -73,10 +73,11 @@ int main() {
  // Now run with the mixed language kernel,
  // initialize the optimization to x = .55, also
  // use a custom Optimizer (gradient enabled)
  auto optimizer = qcor::createOptimizer(
  auto optimizer = createOptimizer(
      "nlopt", {std::make_pair("nlopt-optimizer", "l-bfgs"),
                std::make_pair("nlopt-maxeval", 20)});
  qcor::VQE vqe_openqasm(xasm_open_qasm_mixed_ansatz, H);
  VQE vqe_openqasm(xasm_open_qasm_mixed_ansatz, H,
                         {{"gradient-strategy", "central"}});
  const auto [energy_oq, params_oq] = vqe_openqasm.execute(optimizer, .55);

  std::cout << "<H>(" << params_oq[0] << ") = " << energy_oq << "\n";
+31 −33
Original line number Diff line number Diff line
#include <random>

__qpu__ void qaoa_ansatz(qreg q, int n_steps, std::vector<double> gamma,
                         std::vector<double> beta,
                         qcor::PauliOperator& cost_ham) {
                         std::vector<double> beta, std::string cost_ham_str) {

  // Local Declarations
  auto nQubits = q.size();
@@ -14,6 +13,9 @@ __qpu__ void qaoa_ansatz(qreg q, int n_steps, std::vector<double> gamma,
    H(q[0]);
  }

  auto cost_ham_ptr = createObservable(cost_ham_str);
  auto& cost_ham = *cost_ham_ptr.get();

  // Get all non-identity hamiltonian terms
  // for the following exp(H_i) trotterization
  auto cost_terms = cost_ham.getNonIdentitySubTerms();
@@ -38,7 +40,7 @@ __qpu__ void qaoa_ansatz(qreg q, int n_steps, std::vector<double> gamma,

    // Add the reference hamiltonian term
    for (int i = 0; i < nQubits; i++) {
      auto ref_ham_term = qcor::X(i);
      auto ref_ham_term = X(i);
      auto m_beta = beta[beta_counter];
      exp_i_theta(q, m_beta, ref_ham_term);
      beta_counter++;
@@ -58,51 +60,47 @@ int main(int argc, char **argv) {
  int total_params = nSteps * nParamsPerStep;

  // Generate a random initial parameter set
  std::random_device rnd_device;
  std::mt19937 mersenne_engine{rnd_device()}; // Generates random integers
  std::uniform_real_distribution<double> dist{0, 1};
  auto gen = [&dist, &mersenne_engine]() { return dist(mersenne_engine); };
  std::vector<double> initial_params(total_params);
  std::generate(initial_params.begin(), initial_params.end(), gen);
  auto initial_params = random_vector(0., 1., total_params);

  // Construct the cost hamiltonian
  auto cost_ham =
      -5.0 - 0.5 * (qcor::Z(0) - qcor::Z(3) - qcor::Z(1) * qcor::Z(2)) -
      qcor::Z(2) + 2 * qcor::Z(0) * qcor::Z(2) + 2.5 * qcor::Z(2) * qcor::Z(3);

  // Create the VQE ObjectiveFunction, giving it the
  // ansatz and Observable (cost hamiltonian)
  auto objective = qcor::createObjectiveFunction("vqe", qaoa_ansatz, cost_ham);

  // Create the classical Optimizer
  auto optimizer = qcor::createOptimizer(
      "nlopt", {std::make_pair("initial-parameters", initial_params),
                std::make_pair("nlopt-maxeval", 100)});

  // Create mechanism for mapping Optimizer std::vector<double> parameters
  // to the ObjectiveFunction variadic arguments corresponding to the above
  // quantum kernel (qreg, int, vec<double>, vec<double>, PauliOperator)
  auto args_translation =
      qcor::TranslationFunctor<qreg, int, std::vector<double>,
                               std::vector<double>, qcor::PauliOperator>(
  auto cost_ham = -5.0 - 0.5 * (Z(0) - Z(3) - Z(1) * Z(2)) - Z(2) +
                  2 * Z(0) * Z(2) + 2.5 * Z(2) * Z(3);

  // FIXME, currently with args translator and make_tuple we aren't able 
  // to directly pass Observable&, so here we just map to a string and 
  // read the string to an Observable in the kernel
  auto args_translator =
      std::make_shared<ArgsTranslator<qreg, int, std::vector<double>,
                                      std::vector<double>, std::string>>(
          [&](const std::vector<double> x) {
            // split x into gamma and beta sets
            std::vector<double> gamma(x.begin(), x.begin() + nSteps * nGamma),
                beta(x.begin() + nSteps * nGamma,
                     x.begin() + nSteps * nGamma + nSteps * nBeta);
            return std::make_tuple(q, nSteps, gamma, beta, cost_ham);
            return std::make_tuple(q, nSteps, gamma, beta, cost_ham.toString());
          });
  qcor::set_verbose(true);

  // Create the VQE ObjectiveFunction
  auto objective = createObjectiveFunction(qaoa_ansatz, cost_ham,
                                           args_translator, q, total_params);

  // Create the classical Optimizer
  auto optimizer = createOptimizer(
      "nlopt", {std::make_pair("initial-parameters", initial_params),
                std::make_pair("nlopt-maxeval", 100)});

  
  set_verbose(true);

  // Launch the job asynchronously
  auto handle =
      qcor::taskInitiate(objective, optimizer, args_translation, total_params);
      taskInitiate(objective, optimizer);

  // Go do other work... if you want

  // Query results when ready.
  auto results = qcor::sync(handle);
  auto results = sync(handle);

  // Print the optimal value.
  printf("Min QUBO value = %f\n", results.opt_val);

}
Loading