Commit a23edad2 authored by leonid's avatar leonid
Browse files

add python example too

parent 680fce2c
import xacc
qpu = xacc.getAccelerator(
'ibm', {'shots': 256, 'backend': 'lowest-queue-count', 'n-qubits': 5, 'check-jobs-limit': True})
print(qpu.getProperties()["total-json"])
xacc.qasm('''.compiler xasm
.circuit bell
.qbit q
H(q[0]);
CX(q[0],q[1]);
Measure(q[0]);
Measure(q[1]);
''')
bell = xacc.getCompiled('bell')
q = xacc.qalloc(2)
qpu.execute(q, bell)
print(q)
......@@ -16,6 +16,7 @@ target_link_libraries(bell_quil_ibm_local PRIVATE xacc)
add_executable(bell_xasm_ibm_local bell_xasm_ibm_local.cpp)
target_link_libraries(bell_xasm_ibm_local PRIVATE xacc)
add_executable(bell_xasm_ibm_select_backend bell_xasm_ibm_select_backend.cpp)
target_link_libraries(bell_xasm_ibm_select_backend PRIVATE xacc)
......
......@@ -212,16 +212,16 @@ void IBMAccelerator::processBackendCandidate(nlohmann::json& backend_json) {
// Get current backend status
auto status_response = get(IBM_API_URL, getStatusPath, {},
{std::make_pair("version", "1"),
std::make_pair("access_token", currentApiToken)});
std::make_pair("access_token", currentApiToken)});
auto status_response_json = json::parse(status_response);
auto queue_lenght = status_response_json["lengthQueue"].get<int>();
auto state = status_response_json["state"].get<bool>();
if( state && (backend_queue_lenght < 0 || backend_queue_lenght > queue_lenght)) {
if (state && (selected_backend_queue_lenght < 0 || selected_backend_queue_lenght > queue_lenght)) {
if (filterByJobsLimit && !verifyJobsLimit(curr_backend)) {
return;
}
backend_queue_lenght = queue_lenght;
selected_backend_queue_lenght = queue_lenght;
auto old_backend = backend;
backend = curr_backend;
availableBackends.clear();
......@@ -231,7 +231,7 @@ void IBMAccelerator::processBackendCandidate(nlohmann::json& backend_json) {
void IBMAccelerator::selectBackend(std::vector<std::string>& all_available_backends) {
bool lowest_queue_backend = false;
if( backend == "lowest-queue-count" ) {
if (backend == "lowest-queue-count") {
lowest_queue_backend = true;
}
......@@ -246,8 +246,8 @@ void IBMAccelerator::selectBackend(std::vector<std::string>& all_available_backe
}
}
// Simple case: select by backend_name
if( !lowest_queue_backend ) {
if(b["backend_name"].get<std::string>() == backend) {
if (!lowest_queue_backend) {
if (b["backend_name"].get<std::string>() == backend) {
availableBackends.insert(std::make_pair(backend, b));
}
} else {
......@@ -311,7 +311,7 @@ void IBMAccelerator::initialize(const HeterogeneousMap &params) {
get(IBM_API_URL, getBackendPropertiesPath, {},
{std::make_pair("version", "1"),
std::make_pair("access_token", currentApiToken)});
//xacc::info("Backend property:\n" + backend_props_response);
xacc::info("Backend property:\n" + backend_props_response);
auto props = json::parse(backend_props_response);
backendProperties.insert({backend, props});
......
......@@ -182,7 +182,7 @@ private:
int shots = 1024;
std::string backend = DEFAULT_IBM_BACKEND;
int backend_queue_lenght = -1;
int selected_backend_queue_lenght = -1;
bool jobIsRunning = false;
std::string currentJobId = "";
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment