Commit 442c8f17 authored by Nguyen, Thien Minh's avatar Nguyen, Thien Minh
Browse files

Added a Python test for autograd with args translate and code cleanup



Signed-off-by: default avatarThien Nguyen <nguyentm@ornl.gov>
parent 8b72b681
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ add_test(NAME multi_ctrl_test COMMAND ${CMAKE_BINARY_DIR}/qcor ${CMAKE_CURRENT_S

add_qcor_compile_and_exe_test(qrt_obj_func_simple simple/simple-objective-function.cpp)
add_qcor_compile_and_exe_test(qrt_kernel_autograd_simple simple/gradients_optimization.cpp)
add_qcor_compile_and_exe_test(qrt_hybrid_vqe_exe hybrid/deuteron_h2_vqe.cpp)
add_qcor_compile_and_exe_test(qrt_bell_ctrl bell/bell_control.cpp)

# Lambda tests
+4 −1
Original line number Diff line number Diff line
@@ -56,12 +56,14 @@ int main() {
  // start the optimization at
  const auto [energy, params] = vqe.execute(0.0);
  std::cout << "<H>(" << params[0] << ") = " << energy << "\n";
  qcor_expect(std::abs(energy + 1.74886) < 0.1);

  // Now do the same for the vector double ansatz, but
  // also demonstrate the async interface
  VQE vqe_vec(ansatz_vec, H);
  const auto [energy_vec, params_vec] = vqe_vec.execute(std::vector<double>{0.0});
  std::cout << "<H>(" << params_vec[0] << ") = " << energy_vec << "\n";
  qcor_expect(std::abs(energy_vec + 1.74886) < 0.1);

  // Now run with the mixed language kernel,
  // initialize the optimization to x = .55, also
@@ -74,6 +76,7 @@ int main() {
  const auto [energy_oq, params_oq] = vqe_openqasm.execute(optimizer, .55);

  std::cout << "<H>(" << params_oq[0] << ") = " << energy_oq << "\n";
  qcor_expect(std::abs(energy_vec + 1.74886) < 0.1);
  
  // Can query information about the vqe run
  // Here, we get all parameter sets executed and correspnding energies seen
+2 −1
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@ int main(int argc, char **argv) {
  // Query results when ready.
  auto results = sync(handle);
  printf("vqe-energy from taskInitiate = %f\n", results.opt_val);
  qcor_expect(std::abs(results.opt_val + 1.74886) < 0.1);
  
  for (auto &x : linspace(-constants::pi, constants::pi, 20)) {
    std::cout << x << ", " << (*objective)({x}) << "\n";
+34 −0
Original line number Diff line number Diff line
@@ -26,5 +26,39 @@ class TestQCORKernelAutoGrad(unittest.TestCase):
        print("Opt params =", opt_params)
        self.assertAlmostEqual(energy, -1.74886, places=1)
    
    def test_autograd_args_translate(self):
        set_qpu('qpp')
        @qjit
        def ansatz_args_translate(q: qreg, x: List[float], exp_args: List[FermionOperator]):
            X(q[0])
            for i, exp_arg in enumerate(exp_args):
                exp_i_theta(q, x[i], exp_args[i])

        exp_args = [adag(0) * a(1) - adag(1)*a(0), adag(0)*a(2) - adag(2)*a(0)]
        H = createOperator('5.907 - 2.1433 X0X1 - 2.1433 Y0Y1 + .21829 Z0 - 6.125 Z1 + 9.625 - 9.625 Z2 - 3.91 X1 X2 - 3.91 Y1 Y2')
        
        # Custom arg_translator in a Pythonic way
        def ansatz_translate(self, q: qreg, x: List[float]):
            ret_dict = {}    
            ret_dict["q"] = q
            ret_dict["x"] = x
            ret_dict["exp_args"] = exp_args
            return ret_dict

        ansatz_args_translate.translate = MethodType(ansatz_translate, qjit)
        
        def objective_function(x):
          q = qalloc(3)
          # Autograd a kernel with a custom args translate.
          return ansatz_args_translate.autograd(H, q, x)
        
        optimizer = createOptimizer('nlopt', {'algorithm':'l-bfgs'})
        (energy, opt_params) = optimizer.optimize(objective_function, 2)
        print("Energy =", energy)
        print("Opt params =", opt_params)
        self.assertAlmostEqual(energy, -2.044, places=1)
        self.assertAlmostEqual(opt_params[0], 0.7118, places=1)
        self.assertAlmostEqual(opt_params[1], 0.2739, places=1)

if __name__ == '__main__':
    unittest.main()
+2 −0
Original line number Diff line number Diff line
@@ -5,6 +5,8 @@

namespace qcor {
namespace __internal__ {
std::string DEFAULT_GRADIENT_METHOD = "central";

std::shared_ptr<GradientFunction>
get_gradient_method(const std::string &type,
                    std::shared_ptr<ObjectiveFunction> obj_func,
Loading