Commit 0e373195 authored by Nguyen, Thien Minh's avatar Nguyen, Thien Minh
Browse files

Update the test to cover both double and vector<double> case



Signed-off-by: default avatarThien Nguyen <nguyentm@ornl.gov>
parent 8a20c22e
Loading
Loading
Loading
Loading
+48 −13
Original line number Diff line number Diff line
@@ -4,6 +4,13 @@ __qpu__ void ansatz(qreg q, double theta) {
  CX(q[1], q[0]);
}

// Ansatz that takes a vector
__qpu__ void ansatz_vec(qreg q, std::vector<double> angles) {
  X(q[0]);
  Ry(q[1], angles[0]);
  CX(q[1], q[0]);
}

int main(int argc, char **argv) {
  // Allocate 2 qubits
  auto q = qalloc(2);
@@ -16,11 +23,16 @@ int main(int argc, char **argv) {
  auto H = 5.907 - 2.1433 * X(0) * X(1) - 2.1433 * Y(0) * Y(1) + .21829 * Z(0) -
           6.125 * Z(1);

  // Create the Optimizer.
  // Simple case 1: variational ansatz takes a single double
  {
    // Create the Optimizer (gradient-based)
    auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
    OptFunction opt_function(
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          auto q = qalloc(2);
          // Using kernel auto-gradient helper
          // Note: ansatz takes a single double argument => x[0]
          // Compile error if using the wrong signature.
          auto exp = ansatz::autograd(H, dx, q, x[0]);
          print("<E(", x[0], ") = ", exp);
          return exp;
@@ -29,4 +41,27 @@ int main(int argc, char **argv) {

    auto [energy, opt_params] = optimizer->optimize(opt_function);
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }

  // Simple case 2: variational ansatz takes a vector<double>
  {
    // Create the Optimizer (gradient-based)
    auto optimizer = createOptimizer("nlopt", {{"nlopt-optimizer", "l-bfgs"}});
    OptFunction opt_function(
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          auto q = qalloc(2);
          // Using kernel auto-gradient helper
          // Note: ansatz_vec takes a vector of double; hence just forward the
          // whole vector.
          auto exp = ansatz_vec::autograd(H, dx, q, x);
          print("<E(", x[0], ") = ", exp);
          return exp;
        },
        n_variational_params);

    auto [energy, opt_params] = optimizer->optimize(opt_function);
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }
}