Commit 8a20c22e authored by Nguyen, Thien Minh's avatar Nguyen, Thien Minh
Browse files

Added autograd helper for double and vector



also, fixed a copy-and-paste error

Signed-off-by: default avatarThien Nguyen <nguyentm@ornl.gov>
parent af2778fe
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
@@ -16,11 +16,6 @@ int main(int argc, char **argv) {
  auto H = 5.907 - 2.1433 * X(0) * X(1) - 2.1433 * Y(0) * Y(1) + .21829 * Z(0) -
           6.125 * Z(1);

  // Create the ObjectiveFunction, here we want to run VQE
  // need to provide ansatz, Operator, and qreg
  auto objective = createObjectiveFunction(ansatz, H, q, n_variational_params,
                                           {{"gradient-strategy", "central"}});

  // Create the Optimizer.
  auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
  OptFunction opt_function(
@@ -30,7 +25,7 @@ int main(int argc, char **argv) {
        print("<E(", x[0], ") = ", exp);
        return exp;
      },
      1);
      n_variational_params);

  auto [energy, opt_params] = optimizer->optimize(opt_function);
  print(energy);
+27 −1
Original line number Diff line number Diff line
@@ -194,8 +194,13 @@ class QuantumKernel {
    std::function<std::shared_ptr<xacc::CompositeInstruction>(
        std::vector<double>)>
        kernel_eval = [q](std::vector<double> x) {
          auto tempKernel =
              qcor::__internal__::create_composite("__temp__autograd__");
          Derived derived(q, x[0]);
          return derived.parent_kernel;
          derived.disable_destructor = true;
          derived(q, x[0]);
          tempKernel->addInstructions(derived.parent_kernel->getInstructions());
          return tempKernel;
        };

    auto gradiend_method = qcor::__internal__::get_gradient_method(
@@ -205,6 +210,27 @@ class QuantumKernel {
    return cost_val;
  }

  static double autograd(Observable &obs, std::vector<double> &dx, qreg q,
                         std::vector<double> x) {
    std::function<std::shared_ptr<xacc::CompositeInstruction>(
        std::vector<double>)>
        kernel_eval = [q](std::vector<double> x) {
          auto tempKernel =
              qcor::__internal__::create_composite("__temp__autograd__");
          Derived derived(q, x);
          derived.disable_destructor = true;
          derived(q, x);
          tempKernel->addInstructions(derived.parent_kernel->getInstructions());
          return tempKernel;
        };

    auto gradiend_method = qcor::__internal__::get_gradient_method(
        qcor::__internal__::DEFAULT_GRADIENT_METHOD, kernel_eval, obs);
    const double cost_val = observe(obs, q, x);
    dx = (*gradiend_method)(x, cost_val);
    return cost_val;
  }

  static std::string openqasm(Args... args) {
    Derived derived(args...);
    KernelSignature<Args...> callable(derived);
+1 −1
Original line number Diff line number Diff line
@@ -144,7 +144,7 @@ public:
    m_obs = xacc::as_shared_ptr(&obs);
    gradient_func = [&](const std::vector<double> &x,
                        double cost_val) -> std::vector<double> {
      return run_gradient_strategy(x, cost_val, "forward", m_step, m_obs,
      return run_gradient_strategy(x, cost_val, "central", m_step, m_obs,
                                   m_kernel_eval);
    };
  }