Unverified Commit 23581f9a authored by Mccaskey, Alex's avatar Mccaskey, Alex Committed by GitHub
Browse files

Merge pull request #233 from tnguyen-ornl/tnguyen/add-option-to-change-default-autograd-method

Added compiler switch to select kernel's autograd method
parents 61b96424 bb6513e0
Loading
Loading
Loading
Loading
Loading
+12 −3
Original line number Diff line number Diff line
// Compile with:
// qcor gradients_optimization.cpp
// By default, kernel::autograd will use `central` gradient strategy.
// This can be changed by using the `-autograd` qcor compiler flag:
// e.g.,
// qcor -autograd forward gradients_optimization.cpp
// Options: forward, backward, central
// Note: users may need to adjust the optimizer step size to guarantee
// convergence.
__qpu__ void ansatz(qreg q, double theta) {
  X(q[0]);
  Ry(q[1], theta);
@@ -33,7 +42,7 @@ int main(int argc, char **argv) {
  // Simple case 1: variational ansatz takes a single double
  {
    // Create the Optimizer (gradient-based)
    auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
    auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}});
    ObjectiveFunction opt_function(
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          auto q = qalloc(2);
@@ -54,7 +63,7 @@ int main(int argc, char **argv) {
  // Simple case 2: variational ansatz takes a vector<double>
  {
    // Create the Optimizer (gradient-based)
    auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
    auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}});
    ObjectiveFunction opt_function(
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          auto q = qalloc(2);
@@ -73,7 +82,7 @@ int main(int argc, char **argv) {
  }
  {
    // Create the Optimizer (gradient-based)
    auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
    auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}});
    ObjectiveFunction opt_function(
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          // Using kernel auto-gradient helper
+1 −1
Original line number Diff line number Diff line
@@ -296,7 +296,7 @@ std::string run_token_collector(
          mutable_parameters.begin(),
          "__" + kernel_name + "_" + counter_str + "__compute_functor");

      auto src_code = __internal__::qcor::construct_kernel_subtype(
      auto src_code = ::__internal__::qcor::construct_kernel_subtype(
          tmpss.str(), internal_kernel_function_name, mutable_arg_types,
          mutable_parameters, bufferNames);

+0 −2
Original line number Diff line number Diff line
@@ -15,8 +15,6 @@
#include "xacc_internal_compiler.hpp"
namespace qcor {
namespace __internal__ {
std::string DEFAULT_GRADIENT_METHOD = "central";

std::shared_ptr<GradientFunction>
get_gradient_method(const std::string &type,
                    std::shared_ptr<ObjectiveFunction> obj_func,
+0 −1
Original line number Diff line number Diff line
@@ -39,7 +39,6 @@ public:
};

namespace __internal__ {
extern std::string DEFAULT_GRADIENT_METHOD;
std::shared_ptr<GradientFunction>
get_gradient_method(const std::string &type,
                    std::shared_ptr<ObjectiveFunction> obj_func,
+3 −3
Original line number Diff line number Diff line
@@ -87,7 +87,7 @@ class KernelForwardDifferenceGradient : public KernelGradientService {
    m_kernel_eval = _kernel_eval;
    gradient_func = [&](const std::vector<double> &x,
                        double cost_val) -> std::vector<double> {
      return run_gradient_strategy(x, cost_val, "forward", m_step, obs,
      return run_gradient_strategy(x, cost_val, "forward", -m_step, obs,
                                   m_kernel_eval);
    };
  }
@@ -101,7 +101,7 @@ class KernelForwardDifferenceGradient : public KernelGradientService {
    gradient_func = [&](const std::vector<double> &x,
                        double cost_val) -> std::vector<double> {
      auto obs = m_objFunc->get_observable();
      return run_gradient_strategy(x, cost_val, "forward", m_step, obs,
      return run_gradient_strategy(x, cost_val, "forward", -m_step, obs,
                                   m_objFunc->get_kernel_evaluator());
    };
  }
@@ -129,7 +129,7 @@ class KernelBackwardDifferenceGradient : public KernelGradientService {
    m_kernel_eval = kernel_eval;
    gradient_func = [&](const std::vector<double> &x,
                        double cost_val) -> std::vector<double> {
      return run_gradient_strategy(x, cost_val, "forward", m_step, obs,
      return run_gradient_strategy(x, cost_val, "backward", m_step, obs,
                                   m_kernel_eval);
    };
  }
Loading