Unverified Commit 1345cc04 authored by Mccaskey, Alex's avatar Mccaskey, Alex Committed by GitHub
Browse files

Merge pull request #159 from tnguyen-ornl/tnguyen/gradients

First pass of better support for gradients
parents a3390ac6 442c8f17
Loading
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -17,7 +17,6 @@ add_test(NAME qrt_simple-demo COMMAND ${CMAKE_BINARY_DIR}/qcor -c ${CMAKE_CURREN
add_test(NAME qrt_deuteron_exp_inst COMMAND ${CMAKE_BINARY_DIR}/qcor -c ${CMAKE_CURRENT_SOURCE_DIR}/deuteron/deuteron_exp_inst.cpp)
add_test(NAME qrt_deuteron_task_initiate COMMAND ${CMAKE_BINARY_DIR}/qcor -c ${CMAKE_CURRENT_SOURCE_DIR}/deuteron/deuteron_task_initiate.cpp)
add_test(NAME qrt_qaoa_example COMMAND ${CMAKE_BINARY_DIR}/qcor -c ${CMAKE_CURRENT_SOURCE_DIR}/qaoa/qaoa_example.cpp)
add_test(NAME qrt_simple-objective-function COMMAND ${CMAKE_BINARY_DIR}/qcor -c ${CMAKE_CURRENT_SOURCE_DIR}/simple/simple-objective-function.cpp)
add_test(NAME qrt_qpe_example COMMAND ${CMAKE_BINARY_DIR}/qcor -c ${CMAKE_CURRENT_SOURCE_DIR}/qpe/qpe_example_qrt.cpp)
add_test(NAME qrt_kernel_include COMMAND ${CMAKE_BINARY_DIR}/qcor -c ${CMAKE_CURRENT_SOURCE_DIR}/simple/multiple_kernels.cpp)
add_test(NAME qrt_grover COMMAND ${CMAKE_BINARY_DIR}/qcor -c ${CMAKE_CURRENT_SOURCE_DIR}/grover/grover.cpp)
@@ -40,6 +39,9 @@ add_test(NAME quasimo_verified_qpe COMMAND ${CMAKE_BINARY_DIR}/qcor ${CMAKE_CURR
add_test(NAME hadamard_ctrl_test COMMAND ${CMAKE_BINARY_DIR}/qcor ${CMAKE_CURRENT_SOURCE_DIR}/ctrl-gates/simple_hadamard_test.cpp)
add_test(NAME multi_ctrl_test COMMAND ${CMAKE_BINARY_DIR}/qcor ${CMAKE_CURRENT_SOURCE_DIR}/ctrl-gates/multiple_controls.cpp)

add_qcor_compile_and_exe_test(qrt_obj_func_simple simple/simple-objective-function.cpp)
add_qcor_compile_and_exe_test(qrt_kernel_autograd_simple simple/gradients_optimization.cpp)
add_qcor_compile_and_exe_test(qrt_hybrid_vqe_exe hybrid/deuteron_h2_vqe.cpp)
add_qcor_compile_and_exe_test(qrt_bell_ctrl bell/bell_control.cpp)

# Lambda tests
+4 −1
Original line number Diff line number Diff line
@@ -56,12 +56,14 @@ int main() {
  // start the optimization at
  const auto [energy, params] = vqe.execute(0.0);
  std::cout << "<H>(" << params[0] << ") = " << energy << "\n";
  qcor_expect(std::abs(energy + 1.74886) < 0.1);

  // Now do the same for the vector double ansatz, but
  // also demonstrate the async interface
  VQE vqe_vec(ansatz_vec, H);
  const auto [energy_vec, params_vec] = vqe_vec.execute(std::vector<double>{0.0});
  std::cout << "<H>(" << params_vec[0] << ") = " << energy_vec << "\n";
  qcor_expect(std::abs(energy_vec + 1.74886) < 0.1);

  // Now run with the mixed language kernel,
  // initialize the optimization to x = .55, also
@@ -74,6 +76,7 @@ int main() {
  const auto [energy_oq, params_oq] = vqe_openqasm.execute(optimizer, .55);

  std::cout << "<H>(" << params_oq[0] << ") = " << energy_oq << "\n";
  qcor_expect(std::abs(energy_vec + 1.74886) < 0.1);
  
  // Can query information about the vqe run
  // Here, we get all parameter sets executed and correspnding energies seen
+97 −0
Original line number Diff line number Diff line
__qpu__ void ansatz(qreg q, double theta) {
  X(q[0]);
  Ry(q[1], theta);
  CX(q[1], q[0]);
}

// Ansatz that takes a vector
__qpu__ void ansatz_vec(qreg q, std::vector<double> angles) {
  X(q[0]);
  Ry(q[1], angles[0]);
  CX(q[1], q[0]);
}

// Ansatz with an arbitrary signature
__qpu__ void ansatz_complex(qreg q, int idx, std::vector<double> angles) {
  X(q[0]);
  Ry(q[1], angles[idx]);
  CX(q[1], q[0]);
}

int main(int argc, char **argv) {
  // Allocate 2 qubits
  auto q = qalloc(2);

  // Programmer needs to set
  // the number of variational params
  auto n_variational_params = 1;

  // Create the Deuteron Hamiltonian
  auto H = 5.907 - 2.1433 * X(0) * X(1) - 2.1433 * Y(0) * Y(1) + .21829 * Z(0) -
           6.125 * Z(1);

  // Simple case 1: variational ansatz takes a single double
  {
    // Create the Optimizer (gradient-based)
    auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
    OptFunction opt_function(
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          auto q = qalloc(2);
          // Using kernel auto-gradient helper
          // Note: ansatz takes a single double argument => x[0]
          // Compile error if using the wrong signature.
          auto exp = ansatz::autograd(H, dx, q, x[0]);
          print("<E(", x[0], ") = ", exp);
          return exp;
        },
        n_variational_params);

    auto [energy, opt_params] = optimizer->optimize(opt_function);
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }

  // Simple case 2: variational ansatz takes a vector<double>
  {
    // Create the Optimizer (gradient-based)
    auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
    OptFunction opt_function(
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          auto q = qalloc(2);
          // Using kernel auto-gradient helper
          // Note: ansatz_vec takes a vector of double; hence just forward the
          // whole vector.
          auto exp = ansatz_vec::autograd(H, dx, q, x);
          print("<E(", x[0], ") = ", exp);
          return exp;
        },
        n_variational_params);

    auto [energy, opt_params] = optimizer->optimize(opt_function);
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }
  {
    // Create the Optimizer (gradient-based)
    auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
    OptFunction opt_function(
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          // Using kernel auto-gradient helper
          // Needs to provide args translator for this complex kernel.
          auto quantum_reg = qalloc(2);
          int index = 0;
          ArgsTranslator<qreg, int, std::vector<double>> args_translation(
              [&](const std::vector<double> x) {
                return std::tuple(quantum_reg, index, x);
              });
          auto exp = ansatz_complex::autograd(H, dx, x, args_translation);
          print("<E(", x[0], ") = ", exp);
          return exp;
        },
        n_variational_params);

    auto [energy, opt_params] = optimizer->optimize(opt_function);
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }
}
+4 −4
Original line number Diff line number Diff line
@@ -25,9 +25,8 @@ int main(int argc, char **argv) {

  // Create the ObjectiveFunction, here we want to run VQE
  // need to provide ansatz, Operator, and qreg
  auto objective = createObjectiveFunction(
      ansatz, H, q, n_variational_params,
      {{"gradient-strategy", "parameter-shift"}});
  auto objective = createObjectiveFunction(ansatz, H, q, n_variational_params,
                                           {{"gradient-strategy", "central"}});

  // Create the Optimizer.
  auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
@@ -40,6 +39,7 @@ int main(int argc, char **argv) {
  // Query results when ready.
  auto results = sync(handle);
  printf("vqe-energy from taskInitiate = %f\n", results.opt_val);
  qcor_expect(std::abs(results.opt_val + 1.74886) < 0.1);
  
  for (auto &x : linspace(-constants::pi, constants::pi, 20)) {
    std::cout << x << ", " << (*objective)({x}) << "\n";
+29 −0
Original line number Diff line number Diff line
@@ -743,6 +743,35 @@ PYBIND11_MODULE(_pyqcor, m) {
        return qcor::observe(kernel, observable, q);
      },
      "");
  m.def(
      "internal_autograd",
      [](py::function &kernel_eval, qcor::PauliOperator &obs,
         std::vector<double> x) -> std::tuple<double, std::vector<double>> {
        try {
          std::function<std::shared_ptr<xacc::CompositeInstruction>(
              std::vector<double>)>
              cpp_kernel_eval = [&](std::vector<double> x_vec) {
                auto ret = kernel_eval(x_vec);
                auto kernel = ret.cast<std::shared_ptr<CompositeInstruction>>();
                return kernel;
              };

          auto gradiend_method = qcor::__internal__::get_gradient_method(
              qcor::__internal__::DEFAULT_GRADIENT_METHOD, cpp_kernel_eval,
              obs);

          auto program = cpp_kernel_eval(x);
          auto q = ::qalloc(
              std::max((int)obs.nBits(), (int)program->nPhysicalBits()));
          auto cost_val = qcor::observe(program, obs, q);
          auto dx = (*gradiend_method)(x, cost_val);
          return std::make_tuple(cost_val, dx);
        } catch (std::exception &e) {
          qcor::error("Invalid kernel evaluator.");
          return std::make_tuple(0.0, std::vector<double>{});
        }
      },
      "");

  m.def("internal_get_all_instructions", []() -> std::vector<py::tuple> {
    auto insts = xacc::getServices<xacc::Instruction>();
Loading