vqe.cpp 15.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
/*******************************************************************************
 * Copyright (c) 2019 UT-Battelle, LLC.
 * All rights reserved. This program and the accompanying materials
 * are made available under the terms of the Eclipse Public License v1.0
 * and Eclipse Distribution License v1.0 which accompanies this
 * distribution. The Eclipse Public License is available at
 * http://www.eclipse.org/legal/epl-v10.html and the Eclipse Distribution
 *License is available at https://eclipse.org/org/documents/edl-v10.php
 *
 * Contributors:
 *   Alexander J. McCaskey - initial API and implementation
 *******************************************************************************/
13
14
15
#include "vqe.hpp"

#include "Observable.hpp"
16
#include "Utils.hpp"
17
#include "xacc.hpp"
18
#include "xacc_service.hpp"
19
#include "AcceleratorDecorator.hpp"
20
21
22

#include <memory>
#include <iomanip>
23
#include <string>
24
25
26
27
28
29

using namespace xacc;

namespace xacc {
namespace algorithm {
bool VQE::initialize(const HeterogeneousMap &parameters) {
30
  if (!parameters.pointerLikeExists<Observable>("observable")) {
31
32
    std::cout << "Obs was false\n";
    return false;
33
34
  }

35
  if (!parameters.pointerLikeExists<CompositeInstruction>("ansatz")) {
36
37
    std::cout << "Ansatz was false\n";
    return false;
38
39
  }

40
  if (!parameters.pointerLikeExists<Accelerator>("accelerator")) {
41
42
43
    std::cout << "Acc was false\n";
    return false;
  }
44

45
  observable = parameters.getPointerLike<Observable>("observable");
46
47
48
  if (parameters.pointerLikeExists<Optimizer>("optimizer")) {
    optimizer = parameters.getPointerLike<Optimizer>("optimizer");
  }
49
50
51
  accelerator = parameters.getPointerLike<Accelerator>("accelerator");
  kernel = parameters.getPointerLike<CompositeInstruction>("ansatz");

52
53
  // if gradient is provided
  if (parameters.pointerLikeExists<AlgorithmGradientStrategy>(
54
55
56
57
58
59
          "gradient_strategy")) {
    gradientStrategy =
        parameters.get<std::shared_ptr<AlgorithmGradientStrategy>>(
            "gradient_strategy");
    // gradientStrategy->initialize({std::make_pair("observable",
    // xacc::as_shared_ptr(observable))});
60
61
  }

62
63
64
65
66
67
  if (parameters.stringExists("gradient_strategy") &&
      !parameters.pointerLikeExists<AlgorithmGradientStrategy>(
          "gradient_strategy") &&
      optimizer->isGradientBased()) {
    gradientStrategy = xacc::getService<AlgorithmGradientStrategy>(
        parameters.getString("gradient_strategy"));
68
    gradientStrategy->initialize(parameters);
69
70
  }

71
72
73
74
75
76
  if ((parameters.stringExists("gradient_strategy") ||
       parameters.pointerLikeExists<AlgorithmGradientStrategy>(
           "gradient_strategy")) &&
      !optimizer->isGradientBased()) {
    xacc::warning(
        "Chosen optimizer does not support gradients. Using default.");
77
78
  }

79
80
  if (optimizer && optimizer->isGradientBased() &&
      gradientStrategy == nullptr) {
Nguyen, Thien Minh's avatar
Nguyen, Thien Minh committed
81
82
    // No gradient strategy was provided, just use autodiff.
    gradientStrategy = xacc::getService<AlgorithmGradientStrategy>("autodiff");
83
    gradientStrategy->initialize(parameters);
Nguyen, Thien Minh's avatar
Nguyen, Thien Minh committed
84
  }
85
86
87
88
89
90
91
92
93
  return true;
}

const std::vector<std::string> VQE::requiredParameters() const {
  return {"observable", "optimizer", "accelerator", "ansatz"};
}

void VQE::execute(const std::shared_ptr<AcceleratorBuffer> buffer) const {

94
95
96
97
98
  if (!optimizer) {
    xacc::error("VQE Algorithm Error - Optimizer was null. Please provide a "
                "valid Optimizer.");
  }

99
100
  std::vector<std::shared_ptr<AcceleratorBuffer>> min_child_buffers;

101
  // auto kernels = observable->observe(xacc::as_shared_ptr(kernel));
102
103
  // Cache of energy values during iterations.
  std::vector<double> energies;
104
  double last_energy = std::numeric_limits<double>::max();
105
106
107
108

  // Here we just need to make a lambda kernel
  // to optimize that makes calls to the targeted QPU.
  OptFunction f(
109
      [&, this](const std::vector<double> &x, std::vector<double> &dx) {
110
111
112
113
        std::vector<double> coefficients;
        std::vector<std::string> kernelNames;
        std::vector<std::shared_ptr<CompositeInstruction>> fsToExec;

114
        // call CompositeInstruction::operator()()
115
116
117
        auto tmp_x = x;
        std::reverse(tmp_x.begin(), tmp_x.end());
        auto evaled = kernel->operator()(tmp_x);
118
119
120
        // observe
        auto kernels = observable->observe(evaled);

121
        double identityCoeff = 0.0;
122
        int nInstructionsEnergy = kernels.size(), nInstructionsGradient = 0;
123
124
125
126
        for (auto &f : kernels) {
          kernelNames.push_back(f->name());
          std::complex<double> coeff = f->getCoefficient();

127
          int nFunctionInstructions;
128
129
130
131
132
133
134
135
          if (f->getInstruction(0)->isComposite()) {
            nFunctionInstructions =
                kernel->nInstructions() + f->nInstructions() - 1;
          } else {
            nFunctionInstructions = f->nInstructions();
          }

          if (nFunctionInstructions > kernel->nInstructions()) {
136
            fsToExec.push_back(f);
137
138
139
            coefficients.push_back(std::real(coeff));
          } else {
            identityCoeff += std::real(coeff);
140
            coefficients.push_back(std::real(coeff));
141
142
143
          }
        }

144
        // Retrieve instructions for gradient, if a pointer of type
145
        // AlgorithmGradientStrategy is given
146
        if (gradientStrategy) {
147

148
149
          auto gradFsToExec = gradientStrategy->getGradientExecutions(
              xacc::as_shared_ptr(kernel), x);
150
151
152
          // Add gradient instructions to be sent to the qpu
          nInstructionsEnergy = fsToExec.size();
          nInstructionsGradient = gradFsToExec.size();
153
          for (auto inst : gradFsToExec) {
154
155
            fsToExec.push_back(inst);
          }
156
157
158
159
          xacc::info("Number of instructions for energy calculation: " +
                     std::to_string(nInstructionsEnergy));
          xacc::info("Number of instructions for gradient calculation: " +
                     std::to_string(nInstructionsGradient));
160
161
        }

162
163
164
165
        auto tmpBuffer = xacc::qalloc(buffer->size());
        accelerator->execute(tmpBuffer, fsToExec);
        auto buffers = tmpBuffer->getChildren();

166
167
168
169
170
        // Tag any gradient buffers;
        for (int i = nInstructionsEnergy; i < buffers.size(); i++) {
          buffers[i]->addExtraInfo("is-gradient-calc", true);
        }

171
        auto tmp_buffer_extra_info = tmpBuffer->getInformation();
172
173
        for (auto &[k, v] : tmp_buffer_extra_info) {
          buffer->addExtraInfo(k, v);
174
        }
175

176
        // Create buffer child for the Identity term
177
178
179
180
181
182
183
        auto idBuffer = xacc::qalloc(buffer->size());
        idBuffer->addExtraInfo("coefficient", identityCoeff);
        idBuffer->setName("I");
        idBuffer->addExtraInfo("kernel", "I");
        idBuffer->addExtraInfo("parameters", x);
        idBuffer->addExtraInfo("exp-val-z", 1.0);
        if (accelerator->name() == "ro-error")
184
          idBuffer->addExtraInfo("ro-fixed-exp-val-z", 1.0);
185
186
        buffer->appendChild("I", idBuffer);

187
188
189
190
191
192
        // Add information about the variational parameters to the child
        // buffers.
        // Other energy (observable-related) information will be populated by
        // the Observable::postProcess().
        for (auto &childBuffer : buffers) {
          childBuffer->addExtraInfo("parameters", x);
193
        }
194

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
        // Special key to indicate that the buffer was processed by a
        // HPC virtualization decorator.
        const std::string aggregate_key =
            "__internal__decorator_aggregate_vqe__";
        const double energy = [&]() {
          // Compute the Energy. We can do this manually,
          // or we may have a case where a accelerator decorator
          // posts the energy automatically at a given key
          // on the buffer extra info.
          if (std::dynamic_pointer_cast<xacc::AcceleratorDecorator>(
                  xacc::as_shared_ptr(accelerator)) &&
              buffer->hasExtraInfoKey(aggregate_key)) {
            // Handles VQE that was executed on a virtualized Accelerator,
            // i.e. the energy has been aggregated by the Decorator.
            double resultEnergy = identityCoeff;
            // This is a decorator that has an aggregate value
            auto aggregate_value = (*buffer)[aggregate_key].as<double>();
            resultEnergy += aggregate_value;
            return resultEnergy;
          } else {
            // Normal VQE: post-proces the result with the Observable.
            // This will also populate meta-data to the child-buffer of
            // the main buffer.
            return observable->postProcess(tmpBuffer);
219
          }
220
        }();
221

222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
        // Compute the variance as well as populate any variance-related
        // information to the child buffers
        const double variance = [&]() {
          if (std::dynamic_pointer_cast<xacc::AcceleratorDecorator>(
                  xacc::as_shared_ptr(accelerator)) &&
              buffer->hasExtraInfoKey(aggregate_key)) {
            // HPC decorator doesn't support variance...
            return 0.0;

          } else {
            // This will also populate information about variance to each child
            // buffer.
            return observable->postProcess(
                tmpBuffer, Observable::PostProcessingTask::VARIANCE_CALC);
          }
        }();
238

239
240
        if (gradientStrategy) {
          // gradient-based optimization
241
242
243
244
245
246
247
          // If gradientStrategy is numerical, pass the energy
          // We subtract the identityCoeff from the energy
          // instead of passing the energy because the gradients
          // only take the coefficients of parameterized instructions
          if (gradientStrategy->isNumerical()) {
            gradientStrategy->setFunctionValue(energy - identityCoeff);
          }
248

249
          // update gradient vector
250
251
252
          gradientStrategy->compute(
              dx, std::vector<std::shared_ptr<AcceleratorBuffer>>(
                      buffers.begin() + nInstructionsEnergy, buffers.end()));
253
254
        }

255
256
257
258
259
260
261
        // Append the child buffers from the temp. buffer
        // to the main buffer.
        // These child buffers have extra-information populate in the above
        // post-process steps.
        for (auto &b : buffers) {
          buffer->appendChild(b->name(), b);
        }
262
        std::stringstream ss;
263
        ss << "E(" << (!x.empty() ? std::to_string(x[0]) : "");
264
265
266
267
        for (int i = 1; i < x.size(); i++)
          ss << "," << x[i];
        ss << ") = " << std::setprecision(12) << energy;
        xacc::info(ss.str());
268
269
        // Saves the energy value.
        energies.emplace_back(energy);
270
271
272
273
274
275
276
277
278
279

        if (energy < last_energy) {
          min_child_buffers.clear();
          min_child_buffers.push_back(idBuffer);
          for (auto b : buffers) {
            min_child_buffers.push_back(b);
          } 
          last_energy = energy;
        }

280
281
282
283
284
285
        return energy;
      },
      kernel->nVariables());

  auto result = optimizer->optimize(f);

286
287
288
  // Get the children at the opt-params
  std::vector<double> opt_exp_vals, children_coeffs;
  std::vector<std::string> children_names;
289
  for (auto &child : min_child_buffers) {
290
291
292
293
294
295
296
    if (!child->hasExtraInfoKey("is-gradient-calc")) {
      opt_exp_vals.push_back(child->getInformation("exp-val-z").as<double>());

      // will not have
      children_coeffs.push_back(
          child->getInformation("coefficient").as<double>());
      children_names.push_back(child->name());
297
    } 
298
299
300
301
302
303
  }

  buffer->addExtraInfo("opt-exp-vals", opt_exp_vals);
  buffer->addExtraInfo("coefficients", children_coeffs);
  buffer->addExtraInfo("kernel-names", children_names);

304
305
  buffer->addExtraInfo("opt-val", ExtraInfo(result.first));
  buffer->addExtraInfo("opt-params", ExtraInfo(result.second));
306
307
  // Adds energies so that users can examine the convergence.
  buffer->addExtraInfo("params-energy", ExtraInfo(energies));
308
309
  return;
}
310
311
312
313

std::vector<double>
VQE::execute(const std::shared_ptr<AcceleratorBuffer> buffer,
             const std::vector<double> &x) {
314

315
316
317
318
319
  std::vector<double> coefficients;
  std::vector<std::string> kernelNames;
  std::vector<std::shared_ptr<CompositeInstruction>> fsToExec;

  double identityCoeff = 0.0;
320
321
322
  auto tmp_x = x;
  std::reverse(tmp_x.begin(), tmp_x.end());
  auto evaled = xacc::as_shared_ptr(kernel)->operator()(tmp_x);
323
  auto kernels = observable->observe(evaled);
324
325
326
327
328
329
330
331
332
333
334
335
  for (auto &f : kernels) {
    kernelNames.push_back(f->name());
    std::complex<double> coeff = f->getCoefficient();

    int nFunctionInstructions = 0;
    if (f->getInstruction(0)->isComposite()) {
      nFunctionInstructions = kernel->nInstructions() + f->nInstructions() - 1;
    } else {
      nFunctionInstructions = f->nInstructions();
    }

    if (nFunctionInstructions > kernel->nInstructions()) {
336
      fsToExec.push_back(f);
337
338
339
340
341
342
343
344
345
      coefficients.push_back(std::real(coeff));
    } else {
      identityCoeff += std::real(coeff);
    }
  }

  auto tmpBuffer = xacc::qalloc(buffer->size());
  accelerator->execute(tmpBuffer, fsToExec);
  auto buffers = tmpBuffer->getChildren();
346
347
348
  for (auto &b : buffers) {
    b->addExtraInfo("parameters", x);
  }
349
350
351
352
  auto tmp_buffer_extra_info = tmpBuffer->getInformation();
  for (auto &[k, v] : tmp_buffer_extra_info) {
    buffer->addExtraInfo(k, v);
  }
353
354

  // Create buffer child for the Identity term
355
356
357
358
359
360
361
362
363
  auto idBuffer = xacc::qalloc(buffer->size());
  idBuffer->addExtraInfo("coefficient", identityCoeff);
  idBuffer->setName("I");
  idBuffer->addExtraInfo("kernel", "I");
  idBuffer->addExtraInfo("parameters", x);
  idBuffer->addExtraInfo("exp-val-z", 1.0);
  if (accelerator->name() == "ro-error")
    idBuffer->addExtraInfo("ro-fixed-exp-val-z", 1.0);
  buffer->appendChild("I", idBuffer);
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
  const std::string aggregate_key = "__internal__decorator_aggregate_vqe__";
  const double energy = [&]() {
    // Compute the Energy. We can do this manually,
    // or we may have a case where a accelerator decorator
    // posts the energy automatically at a given key
    // on the buffer extra info.
    if (std::dynamic_pointer_cast<xacc::AcceleratorDecorator>(
            xacc::as_shared_ptr(accelerator)) &&
        buffer->hasExtraInfoKey(aggregate_key)) {
      // Handles VQE that was executed on a virtualized Accelerator,
      // i.e. the energy has been aggregated by the Decorator.
      double resultEnergy = identityCoeff;
      // This is a decorator that has an aggregate value
      auto aggregate_value = (*buffer)[aggregate_key].as<double>();
      resultEnergy += aggregate_value;
      return resultEnergy;
    } else {
      // Normal VQE: post-proces the result with the Observable.
      // This will also populate meta-data to the child-buffer of
      // the main buffer.
      return observable->postProcess(tmpBuffer);
385
    }
386
  }();
387

388
389
390
391
392
393
394
395
  // Compute the variance as well as populate any variance-related information
  // to the child buffers
  const double variance = [&]() {
    if (std::dynamic_pointer_cast<xacc::AcceleratorDecorator>(
            xacc::as_shared_ptr(accelerator)) &&
        buffer->hasExtraInfoKey(aggregate_key)) {
      // HPC decorator doesn't support variance...
      return 0.0;
396

397
398
399
400
401
    } else {
      // This will also populate information about variance to each child
      // buffer.
      return observable->postProcess(
          tmpBuffer, Observable::PostProcessingTask::VARIANCE_CALC);
402
    }
403
404
405
406
407
408
409
  }();
  // Append the child buffers from the temp. buffer
  // to the main buffer.
  // These child buffers have extra-information populate in the above
  // post-process steps.
  for (auto &b : buffers) {
    buffer->appendChild(b->name(), b);
410
411
  }
  std::stringstream ss;
412
  ss << "E(" << (!x.empty() ? std::to_string(x[0]) : "");
413
414
415
416
417
418
419
  for (int i = 1; i < x.size(); i++)
    ss << "," << x[i];
  ss << ") = " << std::setprecision(12) << energy;
  xacc::info(ss.str());
  return {energy};
}

420
421
} // namespace algorithm
} // namespace xacc