Unverified Commit 23581f9a authored by Mccaskey, Alex's avatar Mccaskey, Alex Committed by GitHub
Browse files

Merge pull request #233 from tnguyen-ornl/tnguyen/add-option-to-change-default-autograd-method

Added compiler switch to select kernel's autograd method
parents 61b96424 bb6513e0
Pipeline #169389 passed with stage
in 62 minutes and 59 seconds
// Compile with:
// qcor gradients_optimization.cpp
// By default, kernel::autograd will use `central` gradient strategy.
// This can be changed by using the `-autograd` qcor compiler flag:
// e.g.,
// qcor -autograd forward gradients_optimization.cpp
// Options: forward, backward, central
// Note: users may need to adjust the optimizer step size to guarantee
// convergence.
__qpu__ void ansatz(qreg q, double theta) {
X(q[0]);
Ry(q[1], theta);
......@@ -33,7 +42,7 @@ int main(int argc, char **argv) {
// Simple case 1: variational ansatz takes a single double
{
// Create the Optimizer (gradient-based)
auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}});
ObjectiveFunction opt_function(
[&](const std::vector<double> &x, std::vector<double> &dx) {
auto q = qalloc(2);
......@@ -54,7 +63,7 @@ int main(int argc, char **argv) {
// Simple case 2: variational ansatz takes a vector<double>
{
// Create the Optimizer (gradient-based)
auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}});
ObjectiveFunction opt_function(
[&](const std::vector<double> &x, std::vector<double> &dx) {
auto q = qalloc(2);
......@@ -73,7 +82,7 @@ int main(int argc, char **argv) {
}
{
// Create the Optimizer (gradient-based)
auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}});
ObjectiveFunction opt_function(
[&](const std::vector<double> &x, std::vector<double> &dx) {
// Using kernel auto-gradient helper
......
......@@ -296,7 +296,7 @@ std::string run_token_collector(
mutable_parameters.begin(),
"__" + kernel_name + "_" + counter_str + "__compute_functor");
auto src_code = __internal__::qcor::construct_kernel_subtype(
auto src_code = ::__internal__::qcor::construct_kernel_subtype(
tmpss.str(), internal_kernel_function_name, mutable_arg_types,
mutable_parameters, bufferNames);
......
......@@ -15,8 +15,6 @@
#include "xacc_internal_compiler.hpp"
namespace qcor {
namespace __internal__ {
std::string DEFAULT_GRADIENT_METHOD = "central";
std::shared_ptr<GradientFunction>
get_gradient_method(const std::string &type,
std::shared_ptr<ObjectiveFunction> obj_func,
......
......@@ -39,7 +39,6 @@ public:
};
namespace __internal__ {
extern std::string DEFAULT_GRADIENT_METHOD;
std::shared_ptr<GradientFunction>
get_gradient_method(const std::string &type,
std::shared_ptr<ObjectiveFunction> obj_func,
......
......@@ -87,7 +87,7 @@ class KernelForwardDifferenceGradient : public KernelGradientService {
m_kernel_eval = _kernel_eval;
gradient_func = [&](const std::vector<double> &x,
double cost_val) -> std::vector<double> {
return run_gradient_strategy(x, cost_val, "forward", m_step, obs,
return run_gradient_strategy(x, cost_val, "forward", -m_step, obs,
m_kernel_eval);
};
}
......@@ -101,7 +101,7 @@ class KernelForwardDifferenceGradient : public KernelGradientService {
gradient_func = [&](const std::vector<double> &x,
double cost_val) -> std::vector<double> {
auto obs = m_objFunc->get_observable();
return run_gradient_strategy(x, cost_val, "forward", m_step, obs,
return run_gradient_strategy(x, cost_val, "forward", -m_step, obs,
m_objFunc->get_kernel_evaluator());
};
}
......@@ -129,7 +129,7 @@ class KernelBackwardDifferenceGradient : public KernelGradientService {
m_kernel_eval = kernel_eval;
gradient_func = [&](const std::vector<double> &x,
double cost_val) -> std::vector<double> {
return run_gradient_strategy(x, cost_val, "forward", m_step, obs,
return run_gradient_strategy(x, cost_val, "backward", m_step, obs,
m_kernel_eval);
};
}
......
......@@ -64,6 +64,9 @@ public:
#endif
#ifdef __internal__qcor__validate__execution
xacc::internal_compiler::__validate_nisq_execution = true;
#endif
#ifdef __internal__qcor__autograd__method
xacc::internal_compiler::set_autograd_method(__internal__qcor__autograd__method);
#endif
}
};
......
......@@ -23,6 +23,12 @@
#include "xacc_service.hpp"
#include "qcor_observable.hpp"
namespace qcor {
namespace __internal__ {
std::string DEFAULT_GRADIENT_METHOD = "central";
}
} // namespace qcor
namespace xacc {
namespace internal_compiler {
// Extern vars:
......@@ -101,6 +107,10 @@ std::string get_native_code(std::shared_ptr<qcor::CompositeInstruction> program,
return get_qpu()->getNativeCode(program->as_xacc(), options);
}
void set_autograd_method(const std::string &method_name) {
qcor::__internal__::DEFAULT_GRADIENT_METHOD = method_name;
}
std::pair<bool, xacc::HeterogeneousMap>
validate_backend_execution(std::shared_ptr<qcor::CompositeInstruction> program,
xacc::HeterogeneousMap options) {
......
......@@ -268,6 +268,9 @@ void execute_pass_manager(
std::shared_ptr<qcor::CompositeInstruction> optional_composite = nullptr);
std::string get_native_code(std::shared_ptr<qcor::CompositeInstruction> program,
xacc::HeterogeneousMap options);
void set_autograd_method(const std::string &method_name);
// Hook to validate backend execution of a circuit
// e.g., via circuit mirror technique
// returns a pass/fail bool along with a untyped result data.
......@@ -278,6 +281,10 @@ std::pair<bool, xacc::HeterogeneousMap> validate_backend_execution(
} // namespace internal_compiler
} // namespace xacc
namespace qcor {
namespace __internal__ {
// Default gradient method to use with kernel's autograd method
extern std::string DEFAULT_GRADIENT_METHOD;
} // namespace __internal__
// Ancilla qubit allocator:
class AncQubitAllocator : public AllocEventListener, public QubitAllocator {
public:
......
......@@ -379,6 +379,8 @@ def main(argv=None):
# Currently, it means enabling Pauli-randomized mirror circuit benchmarking:
# i.e., executing a set of benchmarking circuits related to the original circuit that we know the expected bitstring
parser.add_argument('-validate', metavar='', help='Enables backend execution validation via the mirror circuit technique.')
parser.add_argument('-autograd', metavar='', help='Specifies the kernel autograd method. Options: central (default), forward, backward.')
args = parser.parse_args(sys.argv)
......@@ -646,7 +648,13 @@ def main(argv=None):
if '-D_QCOR_FTQC_RUNTIME' in sys.argv:
print('Execution validation cannot be used with FTQC runtime.')
exit(1)
if '-autograd' in sys.argv[1:]:
sidx = sys.argv.index('-autograd')
autogradMethodName = sys.argv[sidx+1]
sys.argv.remove(autogradMethodName)
sys.argv.remove('-autograd')
sys.argv += ['-D__internal__qcor__autograd__method=\"'+autogradMethodName+'\"']
# if not empty
if '@QCOR_APPEND_PLUGIN_PATH@':
sys.argv += ['-D__internal__qcor__compile__plugin__path=\"@QCOR_APPEND_PLUGIN_PATH@\"']
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment