Loading runtime/objectives/gradient_function.cpp +4 −5 Original line number Diff line number Diff line Loading @@ -5,14 +5,13 @@ namespace qcor { namespace __internal__ { std::shared_ptr<GradientFunction> get_gradient_method( const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func, std::function<std::shared_ptr<xacc::CompositeInstruction>(std::vector<double>)> &kernel_eval) { std::shared_ptr<GradientFunction> get_gradient_method(const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func) { if (!xacc::isInitialized()) xacc::internal_compiler::compiler_InitializeXACC(); auto service = xacc::getService<KernelGradientService>(type); service->initialize(obj_func, kernel_eval); service->initialize(obj_func); return service; } } // namespace __internal__ Loading runtime/objectives/gradient_function.hpp +6 −10 Original line number Diff line number Diff line #pragma once #include "heterogeneous.hpp" #include "Identifiable.hpp" #include "heterogeneous.hpp" #include <functional> #include <memory> #include <vector> Loading Loading @@ -32,9 +32,7 @@ public: namespace __internal__ { std::shared_ptr<GradientFunction> get_gradient_method(const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func, std::function<std::shared_ptr<xacc::CompositeInstruction>( std::vector<double>)> &kernel_eval); std::shared_ptr<ObjectiveFunction> obj_func); } // namespace __internal__ // Interface for gradient calculation services. Loading @@ -42,12 +40,10 @@ get_gradient_method(const std::string &type, // thin wrapper around std::function, i.e. C++ lambda) so that users can define // it in-place if need be. We also provide a set of registered gradient // services implementing this interface. class KernelGradientService : public GradientFunction, public xacc::Identifiable { class KernelGradientService : public GradientFunction, public xacc::Identifiable { public: virtual void initialize(std::shared_ptr<ObjectiveFunction> obj_func, std::function<std::shared_ptr<xacc::CompositeInstruction>( std::vector<double>)> &kernel_eval, virtual void initialize(std::shared_ptr<ObjectiveFunction> obj_func, xacc::HeterogeneousMap &&options = {}) = 0; }; } // namespace qcor No newline at end of file runtime/objectives/gradients/FiniteDifference/FiniteDifferenceGradients.cpp +98 −29 Original line number Diff line number Diff line Loading @@ -8,6 +8,37 @@ #include "xacc_service.hpp" using namespace cppmicroservices; namespace { // Wrapper to call XACC numerical AlgorithmGradientStrategy: // TODO: implement QCOR native methods std::vector<double> run_gradient_strategy( const std::vector<double> &x, double cost_val, const std::string &name, double step, std::shared_ptr<xacc::Observable> observable, std::function< std::shared_ptr<xacc::CompositeInstruction>(std::vector<double>)> kernel_eval) { std::vector<double> gradients(x.size(), 0.0); auto gradient_strategy = xacc::getService<xacc::AlgorithmGradientStrategy>(name); if (gradient_strategy->isNumerical() && observable->getIdentitySubTerm()) { gradient_strategy->setFunctionValue( cost_val - std::real(observable->getIdentitySubTerm()->coefficient())); } auto kernel = kernel_eval(x); gradient_strategy->initialize({{"observable", observable}, {"step", step}, {"kernel-evaluator", kernel_eval}}); auto grad_kernels = gradient_strategy->getGradientExecutions(kernel, x); const size_t nb_qubits = std::max(static_cast<size_t>(observable->nBits()), kernel->nPhysicalBits()); auto tmp_grad = qalloc(nb_qubits); xacc::internal_compiler::execute(tmp_grad.results(), grad_kernels); auto tmp_grad_children = tmp_grad.results()->getChildren(); gradient_strategy->compute(gradients, tmp_grad_children); return gradients; } } // namespace namespace qcor { class KernelForwardDifferenceGradient : public KernelGradientService { protected: Loading @@ -17,42 +48,80 @@ protected: public: const std::string name() const override { return "forward"; } const std::string description() const override { return ""; } void initialize( std::shared_ptr<ObjectiveFunction> obj_func, std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)> &kernel_eval, void initialize(std::shared_ptr<ObjectiveFunction> obj_func, HeterogeneousMap &&options) override { m_objFunc = obj_func; if (options.keyExists<double>("step")) { m_step = options.get<double>("step"); } gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { return run_gradient_strategy(x, cost_val, "forward", m_step, m_objFunc->get_observable(), m_objFunc->get_kernel_evaluator()); }; } }; auto observable = m_objFunc->get_observable(); class KernelBackwardDifferenceGradient : public KernelGradientService { protected: std::shared_ptr<ObjectiveFunction> m_objFunc; double m_step = 1e-7; public: const std::string name() const override { return "backward"; } const std::string description() const override { return ""; } void initialize(std::shared_ptr<ObjectiveFunction> obj_func, HeterogeneousMap &&options) override { m_objFunc = obj_func; if (options.keyExists<double>("step")) { m_step = options.get<double>("step"); } gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { std::vector<double> gradients(x.size(), 0.0); // TODO: port the implementation here as well. auto gradient_strategy = xacc::getService<xacc::AlgorithmGradientStrategy>("forward"); if (gradient_strategy->isNumerical() && observable->getIdentitySubTerm()) { gradient_strategy->setFunctionValue( cost_val - std::real(observable->getIdentitySubTerm()->coefficient())); return run_gradient_strategy(x, cost_val, "backward", m_step, m_objFunc->get_observable(), m_objFunc->get_kernel_evaluator()); }; } auto kernel = kernel_eval(x); gradient_strategy->initialize({{"observable", observable}, {"step", m_step}, {"kernel-evaluator", kernel_eval}}); auto grad_kernels = gradient_strategy->getGradientExecutions(kernel, x); const size_t nb_qubits = std::max( static_cast<size_t>(observable->nBits()), kernel->nPhysicalBits()); auto tmp_grad = qalloc(nb_qubits); xacc::internal_compiler::execute(tmp_grad.results(), grad_kernels); auto tmp_grad_children = tmp_grad.results()->getChildren(); gradient_strategy->compute(gradients, tmp_grad_children); return gradients; }; class KernelCentralDifferenceGradient : public KernelGradientService { protected: std::shared_ptr<ObjectiveFunction> m_objFunc; double m_step = 1e-7; public: const std::string name() const override { return "central"; } const std::string description() const override { return ""; } void initialize(std::shared_ptr<ObjectiveFunction> obj_func, HeterogeneousMap &&options) override { m_objFunc = obj_func; if (options.keyExists<double>("step")) { m_step = options.get<double>("step"); } gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { return run_gradient_strategy(x, cost_val, "central", m_step, m_objFunc->get_observable(), m_objFunc->get_kernel_evaluator()); }; } }; } // namespace qcor REGISTER_PLUGIN(qcor::KernelForwardDifferenceGradient, qcor::KernelGradientService) namespace { // Register all three diff plugins class US_ABI_LOCAL FiniteDiffActivator : public BundleActivator { public: FiniteDiffActivator() {} void Start(BundleContext context) { context.RegisterService<qcor::KernelGradientService>( std::make_shared<qcor::KernelForwardDifferenceGradient>()); context.RegisterService<qcor::KernelGradientService>( std::make_shared<qcor::KernelBackwardDifferenceGradient>()); context.RegisterService<qcor::KernelGradientService>( std::make_shared<qcor::KernelCentralDifferenceGradient>()); } void Stop(BundleContext /*context*/) {} }; } // namespace runtime/objectives/objective_function.hpp +47 −0 Original line number Diff line number Diff line Loading @@ -64,6 +64,14 @@ class ObjectiveFunction : public xacc::OptFunction, public xacc::Identifiable { throw std::bad_function_call(); return qalloc(1); } virtual std::function< std::shared_ptr<CompositeInstruction>(std::vector<double>)> get_kernel_evaluator() { // Derive (ObjectiveFunctionImpl) only error("Illegal call to get_kernel_evaluator()."); return {}; }; }; namespace __internal__ { Loading Loading @@ -220,6 +228,45 @@ public: helper->set_options(opts); } // Construct the kernel evaluator of this ObjectiveFunctionImpl std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)> get_kernel_evaluator() override { // Define a function pointer type for the quantum kernel void (*kernel_functor)(std::shared_ptr<CompositeInstruction>, KernelArgs...); // Cast to the function pointer type if (kernel_ptr) { kernel_functor = reinterpret_cast<void (*)( std::shared_ptr<CompositeInstruction>, KernelArgs...)>(kernel_ptr); } // Turn kernel evaluation into a functor that we can use here // and share with the helper ObjectiveFunction for gradient evaluation std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)> kernel_evaluator = lambda_kernel_evaluator.has_value() ? lambda_kernel_evaluator.value() : [&](std::vector<double> x) -> std::shared_ptr<CompositeInstruction> { // Create a new CompositeInstruction, and create a tuple // from it so we can concatenate with the tuple args auto m_kernel = create_new_composite(); auto kernel_composite_tuple = std::make_tuple(m_kernel); // Translate x parameters into kernel args (represented as a tuple) auto translated_tuple = (*args_translator)(x); // Concatenate the two to make the args list (kernel, args...) auto concatenated = std::tuple_cat(kernel_composite_tuple, translated_tuple); // Call the functor with those arguments qcor::__internal__::evaluate_function_with_tuple_args(kernel_functor, concatenated); return m_kernel; }; return kernel_evaluator; } // This will not be called on this class... It will only be called // on helpers... double operator()(xacc::internal_compiler::qreg &qreg, Loading Loading
runtime/objectives/gradient_function.cpp +4 −5 Original line number Diff line number Diff line Loading @@ -5,14 +5,13 @@ namespace qcor { namespace __internal__ { std::shared_ptr<GradientFunction> get_gradient_method( const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func, std::function<std::shared_ptr<xacc::CompositeInstruction>(std::vector<double>)> &kernel_eval) { std::shared_ptr<GradientFunction> get_gradient_method(const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func) { if (!xacc::isInitialized()) xacc::internal_compiler::compiler_InitializeXACC(); auto service = xacc::getService<KernelGradientService>(type); service->initialize(obj_func, kernel_eval); service->initialize(obj_func); return service; } } // namespace __internal__ Loading
runtime/objectives/gradient_function.hpp +6 −10 Original line number Diff line number Diff line #pragma once #include "heterogeneous.hpp" #include "Identifiable.hpp" #include "heterogeneous.hpp" #include <functional> #include <memory> #include <vector> Loading Loading @@ -32,9 +32,7 @@ public: namespace __internal__ { std::shared_ptr<GradientFunction> get_gradient_method(const std::string &type, std::shared_ptr<ObjectiveFunction> obj_func, std::function<std::shared_ptr<xacc::CompositeInstruction>( std::vector<double>)> &kernel_eval); std::shared_ptr<ObjectiveFunction> obj_func); } // namespace __internal__ // Interface for gradient calculation services. Loading @@ -42,12 +40,10 @@ get_gradient_method(const std::string &type, // thin wrapper around std::function, i.e. C++ lambda) so that users can define // it in-place if need be. We also provide a set of registered gradient // services implementing this interface. class KernelGradientService : public GradientFunction, public xacc::Identifiable { class KernelGradientService : public GradientFunction, public xacc::Identifiable { public: virtual void initialize(std::shared_ptr<ObjectiveFunction> obj_func, std::function<std::shared_ptr<xacc::CompositeInstruction>( std::vector<double>)> &kernel_eval, virtual void initialize(std::shared_ptr<ObjectiveFunction> obj_func, xacc::HeterogeneousMap &&options = {}) = 0; }; } // namespace qcor No newline at end of file
runtime/objectives/gradients/FiniteDifference/FiniteDifferenceGradients.cpp +98 −29 Original line number Diff line number Diff line Loading @@ -8,6 +8,37 @@ #include "xacc_service.hpp" using namespace cppmicroservices; namespace { // Wrapper to call XACC numerical AlgorithmGradientStrategy: // TODO: implement QCOR native methods std::vector<double> run_gradient_strategy( const std::vector<double> &x, double cost_val, const std::string &name, double step, std::shared_ptr<xacc::Observable> observable, std::function< std::shared_ptr<xacc::CompositeInstruction>(std::vector<double>)> kernel_eval) { std::vector<double> gradients(x.size(), 0.0); auto gradient_strategy = xacc::getService<xacc::AlgorithmGradientStrategy>(name); if (gradient_strategy->isNumerical() && observable->getIdentitySubTerm()) { gradient_strategy->setFunctionValue( cost_val - std::real(observable->getIdentitySubTerm()->coefficient())); } auto kernel = kernel_eval(x); gradient_strategy->initialize({{"observable", observable}, {"step", step}, {"kernel-evaluator", kernel_eval}}); auto grad_kernels = gradient_strategy->getGradientExecutions(kernel, x); const size_t nb_qubits = std::max(static_cast<size_t>(observable->nBits()), kernel->nPhysicalBits()); auto tmp_grad = qalloc(nb_qubits); xacc::internal_compiler::execute(tmp_grad.results(), grad_kernels); auto tmp_grad_children = tmp_grad.results()->getChildren(); gradient_strategy->compute(gradients, tmp_grad_children); return gradients; } } // namespace namespace qcor { class KernelForwardDifferenceGradient : public KernelGradientService { protected: Loading @@ -17,42 +48,80 @@ protected: public: const std::string name() const override { return "forward"; } const std::string description() const override { return ""; } void initialize( std::shared_ptr<ObjectiveFunction> obj_func, std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)> &kernel_eval, void initialize(std::shared_ptr<ObjectiveFunction> obj_func, HeterogeneousMap &&options) override { m_objFunc = obj_func; if (options.keyExists<double>("step")) { m_step = options.get<double>("step"); } gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { return run_gradient_strategy(x, cost_val, "forward", m_step, m_objFunc->get_observable(), m_objFunc->get_kernel_evaluator()); }; } }; auto observable = m_objFunc->get_observable(); class KernelBackwardDifferenceGradient : public KernelGradientService { protected: std::shared_ptr<ObjectiveFunction> m_objFunc; double m_step = 1e-7; public: const std::string name() const override { return "backward"; } const std::string description() const override { return ""; } void initialize(std::shared_ptr<ObjectiveFunction> obj_func, HeterogeneousMap &&options) override { m_objFunc = obj_func; if (options.keyExists<double>("step")) { m_step = options.get<double>("step"); } gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { std::vector<double> gradients(x.size(), 0.0); // TODO: port the implementation here as well. auto gradient_strategy = xacc::getService<xacc::AlgorithmGradientStrategy>("forward"); if (gradient_strategy->isNumerical() && observable->getIdentitySubTerm()) { gradient_strategy->setFunctionValue( cost_val - std::real(observable->getIdentitySubTerm()->coefficient())); return run_gradient_strategy(x, cost_val, "backward", m_step, m_objFunc->get_observable(), m_objFunc->get_kernel_evaluator()); }; } auto kernel = kernel_eval(x); gradient_strategy->initialize({{"observable", observable}, {"step", m_step}, {"kernel-evaluator", kernel_eval}}); auto grad_kernels = gradient_strategy->getGradientExecutions(kernel, x); const size_t nb_qubits = std::max( static_cast<size_t>(observable->nBits()), kernel->nPhysicalBits()); auto tmp_grad = qalloc(nb_qubits); xacc::internal_compiler::execute(tmp_grad.results(), grad_kernels); auto tmp_grad_children = tmp_grad.results()->getChildren(); gradient_strategy->compute(gradients, tmp_grad_children); return gradients; }; class KernelCentralDifferenceGradient : public KernelGradientService { protected: std::shared_ptr<ObjectiveFunction> m_objFunc; double m_step = 1e-7; public: const std::string name() const override { return "central"; } const std::string description() const override { return ""; } void initialize(std::shared_ptr<ObjectiveFunction> obj_func, HeterogeneousMap &&options) override { m_objFunc = obj_func; if (options.keyExists<double>("step")) { m_step = options.get<double>("step"); } gradient_func = [&](const std::vector<double> &x, double cost_val) -> std::vector<double> { return run_gradient_strategy(x, cost_val, "central", m_step, m_objFunc->get_observable(), m_objFunc->get_kernel_evaluator()); }; } }; } // namespace qcor REGISTER_PLUGIN(qcor::KernelForwardDifferenceGradient, qcor::KernelGradientService) namespace { // Register all three diff plugins class US_ABI_LOCAL FiniteDiffActivator : public BundleActivator { public: FiniteDiffActivator() {} void Start(BundleContext context) { context.RegisterService<qcor::KernelGradientService>( std::make_shared<qcor::KernelForwardDifferenceGradient>()); context.RegisterService<qcor::KernelGradientService>( std::make_shared<qcor::KernelBackwardDifferenceGradient>()); context.RegisterService<qcor::KernelGradientService>( std::make_shared<qcor::KernelCentralDifferenceGradient>()); } void Stop(BundleContext /*context*/) {} }; } // namespace
runtime/objectives/objective_function.hpp +47 −0 Original line number Diff line number Diff line Loading @@ -64,6 +64,14 @@ class ObjectiveFunction : public xacc::OptFunction, public xacc::Identifiable { throw std::bad_function_call(); return qalloc(1); } virtual std::function< std::shared_ptr<CompositeInstruction>(std::vector<double>)> get_kernel_evaluator() { // Derive (ObjectiveFunctionImpl) only error("Illegal call to get_kernel_evaluator()."); return {}; }; }; namespace __internal__ { Loading Loading @@ -220,6 +228,45 @@ public: helper->set_options(opts); } // Construct the kernel evaluator of this ObjectiveFunctionImpl std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)> get_kernel_evaluator() override { // Define a function pointer type for the quantum kernel void (*kernel_functor)(std::shared_ptr<CompositeInstruction>, KernelArgs...); // Cast to the function pointer type if (kernel_ptr) { kernel_functor = reinterpret_cast<void (*)( std::shared_ptr<CompositeInstruction>, KernelArgs...)>(kernel_ptr); } // Turn kernel evaluation into a functor that we can use here // and share with the helper ObjectiveFunction for gradient evaluation std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)> kernel_evaluator = lambda_kernel_evaluator.has_value() ? lambda_kernel_evaluator.value() : [&](std::vector<double> x) -> std::shared_ptr<CompositeInstruction> { // Create a new CompositeInstruction, and create a tuple // from it so we can concatenate with the tuple args auto m_kernel = create_new_composite(); auto kernel_composite_tuple = std::make_tuple(m_kernel); // Translate x parameters into kernel args (represented as a tuple) auto translated_tuple = (*args_translator)(x); // Concatenate the two to make the args list (kernel, args...) auto concatenated = std::tuple_cat(kernel_composite_tuple, translated_tuple); // Call the functor with those arguments qcor::__internal__::evaluate_function_with_tuple_args(kernel_functor, concatenated); return m_kernel; }; return kernel_evaluator; } // This will not be called on this class... It will only be called // on helpers... double operator()(xacc::internal_compiler::qreg &qreg, Loading