Commit aea38287 authored by Nguyen, Thien Minh's avatar Nguyen, Thien Minh
Browse files

Simple binding for kernel autograd



Not yet handle complex args type...

Signed-off-by: default avatarThien Nguyen <nguyentm@ornl.gov>
parent 50ec48a0
Loading
Loading
Loading
Loading
+29 −0
Original line number Diff line number Diff line
@@ -743,6 +743,35 @@ PYBIND11_MODULE(_pyqcor, m) {
        return qcor::observe(kernel, observable, q);
      },
      "");
  m.def(
      "internal_autograd",
      [](py::function &kernel_eval, qcor::PauliOperator &obs,
         std::vector<double> x) -> std::tuple<double, std::vector<double>> {
        try {
          std::function<std::shared_ptr<xacc::CompositeInstruction>(
              std::vector<double>)>
              cpp_kernel_eval = [&](std::vector<double> x_vec) {
                auto ret = kernel_eval(x_vec);
                auto kernel = ret.cast<std::shared_ptr<CompositeInstruction>>();
                return kernel;
              };

          auto gradiend_method = qcor::__internal__::get_gradient_method(
              qcor::__internal__::DEFAULT_GRADIENT_METHOD, cpp_kernel_eval,
              obs);

          auto program = cpp_kernel_eval(x);
          auto q = ::qalloc(
              std::max((int)obs.nBits(), (int)program->nPhysicalBits()));
          auto cost_val = qcor::observe(program, obs, q);
          auto dx = (*gradiend_method)(x, cost_val);
          return std::make_tuple(cost_val, dx);
        } catch (std::exception &e) {
          qcor::error("Invalid kernel evaluator.");
          return std::make_tuple(0.0, std::vector<double>{});
        }
      },
      "");

  m.def("internal_get_all_instructions", []() -> std::vector<py::tuple> {
    auto insts = xacc::getServices<xacc::Instruction>();
+15 −0
Original line number Diff line number Diff line
@@ -606,6 +606,21 @@ class qjit(object):
        program = self.extract_composite(*args)
        return internal_observe(program, observable)

    def autograd(self, observable, qreg, x_vec):
        """
        Return the expectation value and gradients of <observable> with 
        respect to the state given by this qjit kernel evaluated 
        at the given arguments. 
        """
        def kernel_eval(x):
            program = self.extract_composite(qreg, x)
            return program
        
        if isinstance(x_vec, float):
            x_vec = [x_vec]
            
        return internal_autograd(kernel_eval, observable, x_vec)

    def openqasm(self, *args):
        """
        Return an OpenQasm string representation of this 
+7 −1
Original line number Diff line number Diff line
@@ -88,3 +88,9 @@ add_test (NAME qcor_python_bug_89
set_tests_properties(qcor_python_bug_89
  PROPERTIES ENVIRONMENT "PYTHONPATH=${CMAKE_INSTALL_PREFIX}:$ENV{PYTHONPATH}")
 
add_test (NAME qcor_python_kernel_autograd
  COMMAND ${Python_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_autograd_opt.py
  WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
)
set_tests_properties(qcor_python_kernel_autograd
  PROPERTIES ENVIRONMENT "PYTHONPATH=${CMAKE_INSTALL_PREFIX}:$ENV{PYTHONPATH}")
 No newline at end of file
+30 −0
Original line number Diff line number Diff line
import faulthandler
faulthandler.enable()

import unittest
from qcor import *

class TestQCORKernelAutoGrad(unittest.TestCase):
    def test_simple_deuteron(self):
        @qjit
        def ansatz1(q: qreg, theta: List[float]):
            X(q[0])
            Ry(q[1], theta[0])
            CX(q[1], q[0])

        q = qalloc(2)
        H = -2.1433 * X(0) * X(1) - 2.1433 * \
            Y(0) * Y(1) + .21829 * Z(0) - 6.125 * Z(1) + 5.907
        
        def objective_function(x):
          q = qalloc(2)
          return  ansatz1.autograd(H, q, x)

        optimizer = createOptimizer('nlopt', {'algorithm':'l-bfgs'})
        (energy, opt_params) = optimizer.optimize(objective_function, 1)
        print("Energy =", energy)
        print("Opt params =", opt_params)
        self.assertAlmostEqual(energy, -1.74886, places=1)

if __name__ == '__main__':
    unittest.main()