Unverified Commit 887bd178 authored by Mccaskey, Alex's avatar Mccaskey, Alex Committed by GitHub

Merge pull request #338 from tnguyen-ornl/tnguyen/autodiff-obj-func

Support kernel_evaluator in autodiff
parents da9e6487 303f5ab0
Pipeline #126580 passed with stage
in 19 minutes and 50 seconds
......@@ -356,14 +356,23 @@ bool Autodiff::initialize(const HeterogeneousMap parameters) {
}
auto obs = parameters.getPointerLike<Observable>("observable");
fromObservable(xacc::as_shared_ptr(obs));
if (parameters.keyExists<std::function<std::shared_ptr<CompositeInstruction>(
std::vector<double>)>>("kernel-evaluator")) {
kernel_evaluator =
parameters.get<std::function<std::shared_ptr<CompositeInstruction>(
std::vector<double>)>>("kernel-evaluator");
if (parameters.keyExists<double>("step")) {
m_stepSize = parameters.get<double>("step");
}
}
return true;
}
std::vector<double>
Autodiff::derivative(std::shared_ptr<CompositeInstruction> CompositeInstruction,
const std::vector<double> &x,
double *optional_out_fn_val) {
// std::cout << "Observable: \n" << m_obsMat << "\n";
std::vector<double> Autodiff::computeDerivative(
std::shared_ptr<CompositeInstruction> CompositeInstruction,
const MatrixXcdual &obsMat, const std::vector<double> &x, size_t nbQubits,
double *optional_out_fn_val) {
// std::cout << "Observable: \n" << obsMat << "\n";
// std::cout << "Circuit: \n" << CompositeInstruction->toString() << "\n";
// std::cout << "Number of arguments = " << CompositeInstruction->nVariables()
// << "\n";
......@@ -379,7 +388,7 @@ Autodiff::derivative(std::shared_ptr<CompositeInstruction> CompositeInstruction,
varMap.emplace(CompositeInstruction->getVariables()[i], vars[i]);
}
AutodiffCircuitVisitor visitor(m_nbQubits, varMap);
AutodiffCircuitVisitor visitor(nbQubits, varMap);
InstructionIterator iter(CompositeInstruction);
while (iter.hasNext()) {
......@@ -393,7 +402,7 @@ Autodiff::derivative(std::shared_ptr<CompositeInstruction> CompositeInstruction,
VectorXcdual finalState = circMat.col(0);
// std::cout << "Final state:\n" << finalState << "\n";
VectorXcdual ket = m_obsMat * finalState;
VectorXcdual ket = obsMat * finalState;
VectorXcdual bra = VectorXcdual::Zero(finalState.size());
for (int i = 0; i < finalState.size(); ++i) {
bra[i] = conj(finalState[i]);
......@@ -425,6 +434,67 @@ Autodiff::derivative(std::shared_ptr<CompositeInstruction> CompositeInstruction,
return gradients;
}
std::vector<double>
Autodiff::derivative(std::shared_ptr<CompositeInstruction> CompositeInstruction,
const std::vector<double> &x,
double *optional_out_fn_val) {
return computeDerivative(CompositeInstruction, m_obsMat, x, m_nbQubits, optional_out_fn_val);
}
void Autodiff::compute(
std::vector<double> &dx,
std::vector<std::shared_ptr<AcceleratorBuffer>> results) {
// The list must be empty, i.e. no remote evaluation.
assert(results.empty());
if (kernel_evaluator) {
// Black-box kernel evaluator functor:
// Performs finite differences gradient
std::vector<double> valuesPlus(m_currentParams.size());
std::vector<double> valuesMinus(m_currentParams.size());
#ifdef WITH_OPENMP_
#pragma omp parallel for collapse(2)
#endif // WITH_OPENMP_
for (int i = 0; i < m_currentParams.size(); ++i) {
for (int j = 0; j < 2; ++j) {
if (j == 0) {
// Plus
auto newParameters = m_currentParams;
newParameters[i] = newParameters[i] + m_stepSize;
auto kernel = kernel_evaluator(newParameters);
assert(kernel->getVariables().size() == 0);
double funcVal = 0.0;
computeDerivative(kernel, m_obsMat, {} /* no params */, m_nbQubits,
&funcVal);
valuesPlus[i] = funcVal;
} else {
// Minus
auto newParameters = m_currentParams;
newParameters[i] = newParameters[i] - m_stepSize;
auto kernel = kernel_evaluator(newParameters);
assert(kernel->getVariables().size() == 0);
double funcVal = 0.0;
computeDerivative(kernel, m_obsMat, {} /* no params */, m_nbQubits,
&funcVal);
valuesMinus[i] = funcVal;
}
}
}
assert(valuesMinus.size() == m_currentParams.size() &&
valuesPlus.size() == m_currentParams.size());
dx.clear();
for (int i = 0; i < m_currentParams.size(); ++i) {
dx.emplace_back((valuesPlus[i] - valuesMinus[i]) / (2.0 * m_stepSize));
}
} else {
// Variational composite, perform analytical autodiff.
dx = derivative(m_varKernel, m_currentParams);
}
m_varKernel.reset();
m_currentParams.clear();
}
} // namespace quantum
} // namespace xacc
......
......@@ -25,6 +25,12 @@ public:
derivative(std::shared_ptr<CompositeInstruction> CompositeInstruction,
const std::vector<double> &x,
double *optional_out_fn_val = nullptr);
// Static helper to evaluate the derivatives and optionally the function value.
static std::vector<double>
computeDerivative(std::shared_ptr<CompositeInstruction> CompositeInstruction,
const MatrixXcdual &obsMat, const std::vector<double> &x,
size_t nbQubits, double *optional_out_fn_val = nullptr);
// AlgorithmGradientStrategy implementation:
virtual bool isNumerical() const override { return true; }
......@@ -43,19 +49,17 @@ public:
}
virtual void
compute(std::vector<double> &dx,
std::vector<std::shared_ptr<AcceleratorBuffer>> results) override {
// The list must be empty, i.e. no evaluation.
assert(results.empty());
dx = derivative(m_varKernel, m_currentParams);
m_varKernel.reset();
m_currentParams.clear();
}
std::vector<std::shared_ptr<AcceleratorBuffer>> results) override;
private:
MatrixXcdual m_obsMat;
size_t m_nbQubits;
std::shared_ptr<CompositeInstruction> m_varKernel;
std::vector<double> m_currentParams;
// Support for QCOR kernel evaluator
std::function<std::shared_ptr<CompositeInstruction>(std::vector<double>)> kernel_evaluator;
// Step size if using kernel_evaluator (black-box Composite)
double m_stepSize = 1e-5;
};
} // namespace quantum
} // namespace xacc
\ No newline at end of file
......@@ -12,6 +12,12 @@ add_library(${LIBRARY_NAME} SHARED ${SRC})
target_include_directories(${LIBRARY_NAME} PUBLIC . ${CMAKE_SOURCE_DIR}/tpls/autodiff ${CMAKE_SOURCE_DIR}/tpls/eigen)
target_link_libraries(${LIBRARY_NAME} PUBLIC xacc xacc-quantum-gate)
find_package(OpenMP)
if(OpenMP_CXX_FOUND)
target_compile_definitions(${LIBRARY_NAME} PUBLIC WITH_OPENMP_)
target_link_libraries(${LIBRARY_NAME} PUBLIC OpenMP::OpenMP_CXX)
endif()
set(_bundle_name xacc_autodiff_gradient)
set_target_properties(${LIBRARY_NAME} PROPERTIES
......
......@@ -246,6 +246,60 @@ TEST(AutodiffTester, checkQaoaMaxCutGradient) {
EXPECT_NEAR((*buffer)["opt-val"].as<double>(), 2.0, 0.1);
}
TEST(AutodiffTester, checkGradientKernelEvaluator) {
// Create the N=3 deuteron Hamiltonian
auto H_N_3 = xacc::quantum::getObservable(
"pauli",
std::string("5.907 - 2.1433 X0X1 - 2.1433 Y0Y1 + .21829 Z0 - 6.125 Z1 + "
"9.625 - 9.625 Z2 - 3.91 X1 X2 - 3.91 Y1 Y2"));
// JIT map Quil QASM Ansatz to IR
xacc::qasm(R"(
.compiler xasm
.circuit ansatz_h3_test
.parameters t0, t1
.qbit q
X(q[0]);
exp_i_theta(q, t0, {{"pauli", "X0 Y1 - Y0 X1"}});
exp_i_theta(q, t1, {{"pauli", "X0 Z1 Y2 - X2 Z1 Y0"}});
)");
auto ansatz = xacc::getCompiled("ansatz_h3_test");
// Create a kernel evaluator
std::function<std::shared_ptr<xacc::CompositeInstruction>(
std::vector<double>)>
kernel_evaluator =
[&](std::vector<double> x) { return ansatz->operator()(x); };
auto autodiff = std::make_shared<xacc::quantum::Autodiff>();
autodiff->initialize(
{{"observable", H_N_3}, {"kernel-evaluator", kernel_evaluator}});
const std::vector<double> initialParams{0.0, 0.0};
const int nbIterms = 100;
// gradient-descent step size
const double stepSize = 0.01;
std::vector<double> grad{0.0, 0.0};
auto currentParams = initialParams;
double energy = 0.0;
for (int i = 0; i < nbIterms; ++i) {
for (int paramId = 0; paramId < 2; ++paramId) {
currentParams[paramId] =
currentParams[paramId] - stepSize * grad[paramId];
}
autodiff->getGradientExecutions(kernel_evaluator(currentParams), currentParams);
autodiff->compute(grad, {});
EXPECT_EQ(grad.size(), 2);
autodiff->derivative(kernel_evaluator(currentParams), {}, &energy);
std::cout << "Energy: " << energy << "; Grads = ";
for (const auto &g : grad) {
std::cout << g << " ";
}
std::cout << "\n";
}
std::cout << "Energy: " << energy << "\n";
EXPECT_NEAR(energy, -2.044, 0.1);
}
int main(int argc, char **argv) {
xacc::Initialize(argc, argv);
::testing::InitGoogleTest(&argc, argv);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment