Commit 50ec48a0 authored by Nguyen, Thien Minh's avatar Nguyen, Thien Minh
Browse files

Added autograd with args_translator



Signed-off-by: default avatarThien Nguyen <nguyentm@ornl.gov>
parent 0e373195
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@ add_test(NAME hadamard_ctrl_test COMMAND ${CMAKE_BINARY_DIR}/qcor ${CMAKE_CURREN
add_test(NAME multi_ctrl_test COMMAND ${CMAKE_BINARY_DIR}/qcor ${CMAKE_CURRENT_SOURCE_DIR}/ctrl-gates/multiple_controls.cpp)

add_qcor_compile_and_exe_test(qrt_obj_func_simple simple/simple-objective-function.cpp)
add_qcor_compile_and_exe_test(qrt_kernel_autograd_simple simple/gradients_optimization.cpp)
add_qcor_compile_and_exe_test(qrt_bell_ctrl bell/bell_control.cpp)

# Lambda tests
+30 −0
Original line number Diff line number Diff line
@@ -11,6 +11,13 @@ __qpu__ void ansatz_vec(qreg q, std::vector<double> angles) {
  CX(q[1], q[0]);
}

// Ansatz with an arbitrary signature
__qpu__ void ansatz_complex(qreg q, int idx, std::vector<double> angles) {
  X(q[0]);
  Ry(q[1], angles[idx]);
  CX(q[1], q[0]);
}

int main(int argc, char **argv) {
  // Allocate 2 qubits
  auto q = qalloc(2);
@@ -64,4 +71,27 @@ int main(int argc, char **argv) {
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }
  {
    // Create the Optimizer (gradient-based)
    auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
    OptFunction opt_function(
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          // Using kernel auto-gradient helper
          // Needs to provide args translator for this complex kernel.
          auto quantum_reg = qalloc(2);
          int index = 0;
          ArgsTranslator<qreg, int, std::vector<double>> args_translation(
              [&](const std::vector<double> x) {
                return std::tuple(quantum_reg, index, x);
              });
          auto exp = ansatz_complex::autograd(H, dx, x, args_translation);
          print("<E(", x[0], ") = ", exp);
          return exp;
        },
        n_variational_params);

    auto [energy, opt_params] = optimizer->optimize(opt_function);
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }
}
+31 −0
Original line number Diff line number Diff line
@@ -231,6 +231,37 @@ class QuantumKernel {
    return cost_val;
  }

  static double autograd(Observable &obs, std::vector<double> &dx,
                         std::vector<double> x,
                         ArgsTranslator<Args...> args_translator) {
    std::function<std::shared_ptr<xacc::CompositeInstruction>(
        std::vector<double>)>
        kernel_eval = [&](std::vector<double> x_vec) {
          auto eval_lambda = [&](Args... args) {
            auto tempKernel =
                qcor::__internal__::create_composite("__temp__autograd__");
            Derived derived(args...);
            derived.disable_destructor = true;
            derived(args...);
            tempKernel->addInstructions(
                derived.parent_kernel->getInstructions());
            return tempKernel;
          };
          auto args_tuple = args_translator(x_vec);
          return std::apply(eval_lambda, args_tuple);
        };

    auto gradiend_method = qcor::__internal__::get_gradient_method(
        qcor::__internal__::DEFAULT_GRADIENT_METHOD, kernel_eval, obs);

    auto kernel_observe = [&](Args... args) { return observe(obs, args...); };

    auto args_tuple = args_translator(x);
    const double cost_val = std::apply(kernel_observe, args_tuple);
    dx = (*gradiend_method)(x, cost_val);
    return cost_val;
  }

  static std::string openqasm(Args... args) {
    Derived derived(args...);
    KernelSignature<Args...> callable(derived);