Unverified Commit ad126660 authored by Mccaskey, Alex's avatar Mccaskey, Alex Committed by GitHub
Browse files

Merge pull request #148 from tnguyen-ornl/tnguyen/lambda-obj-fun

createObjectiveFunction to support qpu_lambda ansatz
parents 3a6b5f6c 89d33cec
Loading
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -48,7 +48,7 @@ add_qcor_compile_and_exe_test(qrt_qpu_lambda_bell qpu_lambda/lambda_test_bell.cp
add_qcor_compile_and_exe_test(qrt_qpu_lambda_grover qpu_lambda/grover_lambda_oracle.cpp)
add_qcor_compile_and_exe_test(qrt_qpu_lambdas_in_loop qpu_lambda/deuteron_lambda.cpp)
add_qcor_compile_and_exe_test(qrt_qpu_lambda_deuteron qpu_lambda/deuteron_vqe.cpp)

add_qcor_compile_and_exe_test(qrt_qpu_lambda_objfunc qpu_lambda/deuteron_vqe_obj_func.cpp)

# Arithmetic tests
add_qcor_compile_and_exe_test(qrt_qpu_arith_adder arithmetic/simple.cpp)
+41 −0
Original line number Diff line number Diff line
#include "qcor.hpp"

int main() {
  // Create the Hamiltonian
  auto H = -2.1433 * X(0) * X(1) - 2.1433 * Y(0) * Y(1) + .21829 * Z(0) -
           6.125 * Z(1) + 5.907;
  int iter_count = 0;
  auto ansatz = qpu_lambda(
      [](qreg q, double x) {
        X(q[0]);
        Ry(q[1], x);
        CX(q[1], q[0]);
        print("Iter", iter_count, "; angle = ", x);
        iter_count++;
      },
      iter_count);

  auto q = qalloc(2);
  auto objective = createObjectiveFunction(ansatz, H, q, 1);
  // Create a qcor Optimizer
  auto optimizer = createOptimizer("nlopt");

  // Optimize the above function
  auto [optval, opt_params] = optimizer->optimize(*objective.get());
  std::cout << "Energy: " << optval << "\n";
  qcor_expect(std::abs(optval + 1.74886) < 0.1);

  auto ansatz_vec_param = qpu_lambda([](qreg q, std::vector<double> x) {
    X(q[0]);
    Ry(q[1], x[0]);
    CX(q[1], q[0]);
  });

  auto q1 = qalloc(2);
  auto objective_vec = createObjectiveFunction(ansatz_vec_param, H, q1, 1);

  // Optimize the above function
  auto [optval_vec, opt_params_vec] = optimizer->optimize(*objective_vec.get());
  std::cout << "Energy: " << optval_vec << "\n";
  qcor_expect(std::abs(optval_vec + 1.74886) < 0.1);
}
 No newline at end of file
+30 −0
Original line number Diff line number Diff line
@@ -300,6 +300,11 @@ private:
  QJIT qjit;

public:
  // Variational information, i.e. is this lambda compatible with VQE
  // e.g. single double or single vector double input.
  enum class Variational_Arg_Type { Double, Vec_Double, None };
  Variational_Arg_Type var_type = Variational_Arg_Type::None;
  
  // Constructor, capture vars should be deduced without
  // specifying them since we're using C++17
  _qpu_lambda(std::string &&ff, std::string &&_capture_var_names,
@@ -359,6 +364,31 @@ public:
      return result;
    }(tt);


    // Determine if this lambda has a VQE-compatible type:
    // QReg then variational params.
    if (arg_type_and_names.size() == 2) {
      const auto trim_space = [](std::string &stripString) {
        while (!stripString.empty() && std::isspace(*stripString.begin())) {
          stripString.erase(stripString.begin());
        }

        while (!stripString.empty() && std::isspace(*stripString.rbegin())) {
          stripString.erase(stripString.length() - 1);
        }
      };

      auto type_name = arg_type_and_names[1].first;
      trim_space(type_name);
      // Use a relax search to handle using namespace std...
      // FIXME: this is quite hacky.
      if (type_name.find("vector<double>") != std::string::npos) {
        var_type = Variational_Arg_Type::Vec_Double;
      } else if (type_name == "double") {
        var_type = Variational_Arg_Type::Double;
      }
    }

    // Map simple type to its reference type so that the
    // we can use consistent type-forwarding
    // when casting the JIT raw function pointer.
+104 −3
Original line number Diff line number Diff line
@@ -130,6 +130,10 @@ class ObjectiveFunctionImpl : public ObjectiveFunction {
  std::shared_ptr<LocalArgsTranslator> args_translator;
  std::shared_ptr<ObjectiveFunction> helper;
  xacc::internal_compiler::qreg qreg;
  // Kernel evaluator from qpu_lambda
  std::optional<
      std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)>>
      lambda_kernel_evaluator;

public:
  ObjectiveFunctionImpl(void *k_ptr, std::shared_ptr<Observable> obs,
@@ -168,6 +172,44 @@ class ObjectiveFunctionImpl : public ObjectiveFunction {
    helper->set_options(options);
  }

  ObjectiveFunctionImpl(
      std::function<void(std::shared_ptr<CompositeInstruction>, KernelArgs...)>
          &functor,
      std::shared_ptr<Observable> obs, xacc::internal_compiler::qreg &qq,
      std::shared_ptr<LocalArgsTranslator> translator,
      std::shared_ptr<ObjectiveFunction> obj_helper, const int dim,
      HeterogeneousMap opts)
      : qreg(qq) {
    // std::cout << "Constructed from lambda\n";
    lambda_kernel_evaluator =
        [&, functor](std::vector<double> x) -> std::shared_ptr<CompositeInstruction> {
      // std::cout << "HOWDY:\n";
      // Create a new CompositeInstruction, and create a tuple
      // from it so we can concatenate with the tuple args
      auto m_kernel = create_new_composite();
      auto kernel_composite_tuple = std::make_tuple(m_kernel);

      // Translate x parameters into kernel args (represented as a tuple)
      auto translated_tuple = (*args_translator)(x);

      // Concatenate the two to make the args list (kernel, args...)
      auto concatenated =
          std::tuple_cat(kernel_composite_tuple, translated_tuple);
      std::apply(functor, concatenated);
      // std::cout << m_kernel->toString() << "\n";
      return m_kernel;
    };
    observable = obs;
    args_translator = translator;
    helper = obj_helper;
    _dim = dim;
    _function = *this;
    options = opts;
    options.insert("observable", observable);
    helper->update_observable(observable);
    helper->set_options(options);
  }

  void set_options(HeterogeneousMap &opts) override {
    options = opts;
    helper->set_options(opts);
@@ -198,7 +240,9 @@ class ObjectiveFunctionImpl : public ObjectiveFunction {
    // Turn kernel evaluation into a functor that we can use here
    // and share with the helper ObjectiveFunction for gradient evaluation
    std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)>
        kernel_evaluator = [&](std::vector<double> x)
        kernel_evaluator = lambda_kernel_evaluator.has_value()
                               ? lambda_kernel_evaluator.value()
                               : [&](std::vector<double> x)
        -> std::shared_ptr<CompositeInstruction> {
      // Create a new CompositeInstruction, and create a tuple
      // from it so we can concatenate with the tuple args
@@ -503,4 +547,61 @@ std::shared_ptr<ObjectiveFunction> createObjectiveFunction(
      options);
}

template <typename... CaptureArgs, typename... Args>
std::shared_ptr<ObjectiveFunction> createObjectiveFunction(
    _qpu_lambda<CaptureArgs...> &lambda,
    std::shared_ptr<ArgsTranslator<Args...>> args_translator,
    Observable &observable, qreg &q, const int nParams,
    HeterogeneousMap &&options = {}) {
  auto helper = qcor::__internal__::get_objective("vqe");
  std::function<void(std::shared_ptr<CompositeInstruction>, Args...)> kernel_fn =
      [&lambda](std::shared_ptr<CompositeInstruction> comp, Args... args) -> void {
        return lambda.eval_with_parent(comp, args...);
      };

  return std::make_shared<ObjectiveFunctionImpl<Args...>>(
      kernel_fn, __internal__::qcor_as_shared(&observable), q,
      args_translator, helper, nParams, options);
}

// Create ObjFunc from a qpu_lambda w/o a specific args_translater
// Assume the lambda has a VQE-compatible signature
template <typename... CaptureArgs>
std::shared_ptr<ObjectiveFunction>
createObjectiveFunction(_qpu_lambda<CaptureArgs...> &lambda,
                        Observable &observable, qreg &q, const int nParams,
                        HeterogeneousMap &&options = {}) {
  if (lambda.var_type ==
      _qpu_lambda<CaptureArgs...>::Variational_Arg_Type::None) {
    error("qpu_lambda has an incompatible signature. Please provide an "
          "ArgsTranslator.");
  }
  auto helper = qcor::__internal__::get_objective("vqe");
  std::function<void(std::shared_ptr<CompositeInstruction>, qreg,
                     std::vector<double>)>
      kernel_fn = [&lambda](std::shared_ptr<CompositeInstruction> comp, qreg q,
                            std::vector<double> params) -> void {
    if (lambda.var_type ==
        _qpu_lambda<CaptureArgs...>::Variational_Arg_Type::Vec_Double) {
      return lambda.eval_with_parent(comp, q, params);
    }
    if (lambda.var_type ==
        _qpu_lambda<CaptureArgs...>::Variational_Arg_Type::Double) {
      if (params.size() != 1) {
        error("Invalid number of parameters. Expected 1, got " +
              std::to_string(params.size()));
      }
      return lambda.eval_with_parent(comp, q, params[0]);
    }
    error("Internal error: invalid qpu lambda type encountered.");
  };

  auto args_translator =
      std::make_shared<ArgsTranslator<qreg, std::vector<double>>>(
          [&](const std::vector<double> x) { return std::make_tuple(q, x); });

  return std::make_shared<ObjectiveFunctionImpl<qreg, std::vector<double>>>(
      kernel_fn, __internal__::qcor_as_shared(&observable), q, args_translator,
      helper, nParams, options);
}
} // namespace qcor
 No newline at end of file