Loading examples/simple/gradients_optimization.cpp +12 −3 Original line number Diff line number Diff line // Compile with: // qcor gradients_optimization.cpp // By default, kernel::autograd will use `central` gradient strategy. // This can be changed by using the `-autograd` qcor compiler flag: // e.g., // qcor -autograd forward gradients_optimization.cpp // Options: forward, backward, central // Note: users may need to adjust the optimizer step size to guarantee // convergence. __qpu__ void ansatz(qreg q, double theta) { X(q[0]); Ry(q[1], theta); Loading Loading @@ -33,7 +42,7 @@ int main(int argc, char **argv) { // Simple case 1: variational ansatz takes a single double { // Create the Optimizer (gradient-based) auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}}); auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}}); ObjectiveFunction opt_function( [&](const std::vector<double> &x, std::vector<double> &dx) { auto q = qalloc(2); Loading @@ -54,7 +63,7 @@ int main(int argc, char **argv) { // Simple case 2: variational ansatz takes a vector<double> { // Create the Optimizer (gradient-based) auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}}); auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}}); ObjectiveFunction opt_function( [&](const std::vector<double> &x, std::vector<double> &dx) { auto q = qalloc(2); Loading @@ -73,7 +82,7 @@ int main(int argc, char **argv) { } { // Create the Optimizer (gradient-based) auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}}); auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}}); ObjectiveFunction opt_function( [&](const std::vector<double> &x, std::vector<double> &dx) { // Using kernel auto-gradient helper Loading handlers/token_collector/token_collector_util.cpp +1 −1 Original line number Diff line number Diff line Loading @@ -296,7 +296,7 @@ std::string run_token_collector( mutable_parameters.begin(), "__" + kernel_name + "_" + counter_str + "__compute_functor"); auto src_code = __internal__::qcor::construct_kernel_subtype( auto src_code = ::__internal__::qcor::construct_kernel_subtype( tmpss.str(), internal_kernel_function_name, mutable_arg_types, mutable_parameters, bufferNames); Loading runtime/objectives/gradient_function.cpp +0 −2 Original line number Diff line number Diff line Loading @@ -15,8 +15,6 @@ #include "xacc_internal_compiler.hpp" namespace qcor { namespace __internal__ { std::string DEFAULT_GRADIENT_METHOD = "central"; std::shared_ptr<GradientFunction> get_gradient_method(const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func, Loading runtime/objectives/gradient_function.hpp +0 −1 Original line number Diff line number Diff line Loading @@ -39,7 +39,6 @@ public: }; namespace __internal__ { extern std::string DEFAULT_GRADIENT_METHOD; std::shared_ptr<GradientFunction> get_gradient_method(const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func, Loading runtime/objectives/gradients/FiniteDifference/FiniteDifferenceGradients.cpp +3 −3 Original line number Diff line number Diff line Loading @@ -87,7 +87,7 @@ class KernelForwardDifferenceGradient : public KernelGradientService { m_kernel_eval = _kernel_eval; gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { return run_gradient_strategy(x, cost_val, "forward", m_step, obs, return run_gradient_strategy(x, cost_val, "forward", -m_step, obs, m_kernel_eval); }; } Loading @@ -101,7 +101,7 @@ class KernelForwardDifferenceGradient : public KernelGradientService { gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { auto obs = m_objFunc->get_observable(); return run_gradient_strategy(x, cost_val, "forward", m_step, obs, return run_gradient_strategy(x, cost_val, "forward", -m_step, obs, m_objFunc->get_kernel_evaluator()); }; } Loading Loading @@ -129,7 +129,7 @@ class KernelBackwardDifferenceGradient : public KernelGradientService { m_kernel_eval = kernel_eval; gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { return run_gradient_strategy(x, cost_val, "forward", m_step, obs, return run_gradient_strategy(x, cost_val, "backward", m_step, obs, m_kernel_eval); }; } Loading Loading
examples/simple/gradients_optimization.cpp +12 −3 Original line number Diff line number Diff line // Compile with: // qcor gradients_optimization.cpp // By default, kernel::autograd will use `central` gradient strategy. // This can be changed by using the `-autograd` qcor compiler flag: // e.g., // qcor -autograd forward gradients_optimization.cpp // Options: forward, backward, central // Note: users may need to adjust the optimizer step size to guarantee // convergence. __qpu__ void ansatz(qreg q, double theta) { X(q[0]); Ry(q[1], theta); Loading Loading @@ -33,7 +42,7 @@ int main(int argc, char **argv) { // Simple case 1: variational ansatz takes a single double { // Create the Optimizer (gradient-based) auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}}); auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}}); ObjectiveFunction opt_function( [&](const std::vector<double> &x, std::vector<double> &dx) { auto q = qalloc(2); Loading @@ -54,7 +63,7 @@ int main(int argc, char **argv) { // Simple case 2: variational ansatz takes a vector<double> { // Create the Optimizer (gradient-based) auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}}); auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}}); ObjectiveFunction opt_function( [&](const std::vector<double> &x, std::vector<double> &dx) { auto q = qalloc(2); Loading @@ -73,7 +82,7 @@ int main(int argc, char **argv) { } { // Create the Optimizer (gradient-based) auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}}); auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}}); ObjectiveFunction opt_function( [&](const std::vector<double> &x, std::vector<double> &dx) { // Using kernel auto-gradient helper Loading
handlers/token_collector/token_collector_util.cpp +1 −1 Original line number Diff line number Diff line Loading @@ -296,7 +296,7 @@ std::string run_token_collector( mutable_parameters.begin(), "__" + kernel_name + "_" + counter_str + "__compute_functor"); auto src_code = __internal__::qcor::construct_kernel_subtype( auto src_code = ::__internal__::qcor::construct_kernel_subtype( tmpss.str(), internal_kernel_function_name, mutable_arg_types, mutable_parameters, bufferNames); Loading
runtime/objectives/gradient_function.cpp +0 −2 Original line number Diff line number Diff line Loading @@ -15,8 +15,6 @@ #include "xacc_internal_compiler.hpp" namespace qcor { namespace __internal__ { std::string DEFAULT_GRADIENT_METHOD = "central"; std::shared_ptr<GradientFunction> get_gradient_method(const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func, Loading
runtime/objectives/gradient_function.hpp +0 −1 Original line number Diff line number Diff line Loading @@ -39,7 +39,6 @@ public: }; namespace __internal__ { extern std::string DEFAULT_GRADIENT_METHOD; std::shared_ptr<GradientFunction> get_gradient_method(const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func, Loading
runtime/objectives/gradients/FiniteDifference/FiniteDifferenceGradients.cpp +3 −3 Original line number Diff line number Diff line Loading @@ -87,7 +87,7 @@ class KernelForwardDifferenceGradient : public KernelGradientService { m_kernel_eval = _kernel_eval; gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { return run_gradient_strategy(x, cost_val, "forward", m_step, obs, return run_gradient_strategy(x, cost_val, "forward", -m_step, obs, m_kernel_eval); }; } Loading @@ -101,7 +101,7 @@ class KernelForwardDifferenceGradient : public KernelGradientService { gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { auto obs = m_objFunc->get_observable(); return run_gradient_strategy(x, cost_val, "forward", m_step, obs, return run_gradient_strategy(x, cost_val, "forward", -m_step, obs, m_objFunc->get_kernel_evaluator()); }; } Loading Loading @@ -129,7 +129,7 @@ class KernelBackwardDifferenceGradient : public KernelGradientService { m_kernel_eval = kernel_eval; gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { return run_gradient_strategy(x, cost_val, "forward", m_step, obs, return run_gradient_strategy(x, cost_val, "backward", m_step, obs, m_kernel_eval); }; } Loading