gradients_optimization.cpp 3.53 KB
Newer Older
1
2
3
4
5
6
7
8
9
// Compile with:
// qcor gradients_optimization.cpp
// By default, kernel::autograd will use `central` gradient strategy.
// This can be changed by using the `-autograd` qcor compiler flag:
// e.g.,
// qcor -autograd forward gradients_optimization.cpp
// Options: forward, backward, central
// Note: users may need to adjust the optimizer step size to guarantee
// convergence.
10
11
12
13
14
15
__qpu__ void ansatz(qreg q, double theta) {
  X(q[0]);
  Ry(q[1], theta);
  CX(q[1], q[0]);
}

16
17
18
19
20
21
22
// Ansatz that takes a vector
__qpu__ void ansatz_vec(qreg q, std::vector<double> angles) {
  X(q[0]);
  Ry(q[1], angles[0]);
  CX(q[1], q[0]);
}

23
24
25
26
27
28
29
// Ansatz with an arbitrary signature
__qpu__ void ansatz_complex(qreg q, int idx, std::vector<double> angles) {
  X(q[0]);
  Ry(q[1], angles[idx]);
  CX(q[1], q[0]);
}

30
31
32
33
34
35
36
37
38
39
40
41
int main(int argc, char **argv) {
  // Allocate 2 qubits
  auto q = qalloc(2);

  // Programmer needs to set
  // the number of variational params
  auto n_variational_params = 1;

  // Create the Deuteron Hamiltonian
  auto H = 5.907 - 2.1433 * X(0) * X(1) - 2.1433 * Y(0) * Y(1) + .21829 * Z(0) -
           6.125 * Z(1);

42
43
44
  // Simple case 1: variational ansatz takes a single double
  {
    // Create the Optimizer (gradient-based)
45
    auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}});
46
    ObjectiveFunction opt_function(
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          auto q = qalloc(2);
          // Using kernel auto-gradient helper
          // Note: ansatz takes a single double argument => x[0]
          // Compile error if using the wrong signature.
          auto exp = ansatz::autograd(H, dx, q, x[0]);
          print("<E(", x[0], ") = ", exp);
          return exp;
        },
        n_variational_params);

    auto [energy, opt_params] = optimizer->optimize(opt_function);
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }

  // Simple case 2: variational ansatz takes a vector<double>
  {
    // Create the Optimizer (gradient-based)
66
    auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}});
67
    ObjectiveFunction opt_function(
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          auto q = qalloc(2);
          // Using kernel auto-gradient helper
          // Note: ansatz_vec takes a vector of double; hence just forward the
          // whole vector.
          auto exp = ansatz_vec::autograd(H, dx, q, x);
          print("<E(", x[0], ") = ", exp);
          return exp;
        },
        n_variational_params);

    auto [energy, opt_params] = optimizer->optimize(opt_function);
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }
83
84
  {
    // Create the Optimizer (gradient-based)
85
    auto optimizer = createOptimizer("mlpack", {{"step-size", 1e-2}});
86
    ObjectiveFunction opt_function(
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
        [&](const std::vector<double> &x, std::vector<double> &dx) {
          // Using kernel auto-gradient helper
          // Needs to provide args translator for this complex kernel.
          auto quantum_reg = qalloc(2);
          int index = 0;
          ArgsTranslator<qreg, int, std::vector<double>> args_translation(
              [&](const std::vector<double> x) {
                return std::tuple(quantum_reg, index, x);
              });
          auto exp = ansatz_complex::autograd(H, dx, x, args_translation);
          print("<E(", x[0], ") = ", exp);
          return exp;
        },
        n_variational_params);

    auto [energy, opt_params] = optimizer->optimize(opt_function);
    print(energy);
    qcor_expect(std::abs(energy + 1.74886) < 0.1);
  }
106
}