Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
ORNL Quantum Computing Institute
xacc
Commits
1e1c65e3
Commit
1e1c65e3
authored
May 10, 2017
by
Mccaskey, Alex
Browse files
Started on QIR JSON persistence
parent
1f8e0ef5
Changes
48
Expand all
Hide whitespace changes
Inline
Side-by-side
quantum/CMakeLists.txt
View file @
1e1c65e3
...
...
@@ -29,6 +29,7 @@
#
#**********************************************************************************/
include_directories
(
${
CMAKE_CURRENT_SOURCE_DIR
}
/qir
)
include_directories
(
${
CMAKE_SOURCE_DIR
}
/tpls/rapidjson/include
)
add_subdirectory
(
qir
)
add_subdirectory
(
gate
)
...
...
quantum/gate/accelerators/firetensoraccelerator/FireTensorAccelerator.cpp
View file @
1e1c65e3
...
...
@@ -222,11 +222,9 @@ void FireTensorAccelerator::execute(std::shared_ptr<AcceleratorBuffer> buffer,
// Create a Visitor that will execute our lambdas when
// we encounter one
// auto visitor = std::make_shared<GateInstructionVisitor>(hadamard, cnot, x,
// measure, z, cond);
auto
visitor
=
std
::
make_shared
<
FunctionalGateInstructionVisitor
>
(
hadamard
,
cnot
,
x
,
measure
,
z
,
cond
);
// Our QIR is really a tree structure
// so create a pre-order tree traversal
// InstructionIterator to walk it
...
...
quantum/gate/accelerators/firetensoraccelerator/FireTensorAccelerator.hpp
View file @
1e1c65e3
...
...
@@ -70,9 +70,11 @@ protected:
std
::
function
<
void
(
ConditionalFunction
&
)
>
condAction
;
public:
template
<
typename
HF
,
typename
CNF
,
typename
XF
,
typename
MF
,
typename
ZF
,
typename
CF
>
template
<
typename
HF
,
typename
CNF
,
typename
XF
,
typename
MF
,
typename
ZF
,
typename
CF
>
FunctionalGateInstructionVisitor
(
HF
h
,
CNF
cn
,
XF
x
,
MF
m
,
ZF
z
,
CF
c
)
:
hAction
(
h
),
cnotAction
(
cn
),
xAction
(
x
),
zAction
(
z
),
measureAction
(
m
),
condAction
(
c
)
{
hAction
(
h
),
cnotAction
(
cn
),
xAction
(
x
),
zAction
(
z
),
measureAction
(
m
),
condAction
(
c
)
{
}
void
visit
(
Hadamard
&
h
)
{
...
...
quantum/gate/gateqir/GateFunction.hpp
View file @
1e1c65e3
...
...
@@ -136,6 +136,7 @@ public:
}
DEFINE_VISITABLE
()
/**
* This method should simply be implemented to invoke the
* visit() method on the provided QInstructionVisitor.
...
...
quantum/gate/gateqir/GateInstruction.hpp
View file @
1e1c65e3
...
...
@@ -122,6 +122,23 @@ public:
enabled
=
true
;
}
DEFINE_VISITABLE
()
// virtual void serializeJson(PrettyWriter<StringBuffer> writer) {
// writer.StartObject();
// writer.String("gate");
// writer.String(gateName.c_str());
// writer.String("enabled");
// writer.Bool(enabled);
// writer.String("qubits");
// writer.StartArray();
// for (auto qi : bits()) {
// writer.Int(qi);
// }
// writer.EndArray();
// writer.EndObject();
// }
/**
* The destructor
*/
...
...
quantum/gate/gateqir/GateQIR.cpp
View file @
1e1c65e3
...
...
@@ -32,12 +32,17 @@
#include
<boost/algorithm/string.hpp>
#include
<regex>
#include
"rapidjson/prettywriter.h"
using
namespace
rapidjson
;
namespace
xacc
{
namespace
quantum
{
void
GateQIR
::
generateGraph
()
{
// Local Declarations
auto
flatQasmStr
=
toString
();
void
GateQIR
::
generateGraph
(
const
std
::
string
&
kernelName
)
{
auto
flatQasmStr
=
toAssemblyString
(
kernelName
,
"qreg"
);
std
::
map
<
std
::
string
,
int
>
qubitVarNameToId
;
std
::
vector
<
std
::
string
>
qasmLines
;
...
...
@@ -233,21 +238,45 @@ bool GateQIR::incrementLayer(const std::vector<std::string>& gateCommand,
}
std
::
string
GateQIR
::
toString
()
{
int
nQubits
=
buffer
->
size
();
auto
bufVarName
=
buffer
->
name
();
std
::
string
GateQIR
::
toAssemblyString
(
const
std
::
string
&
kernelName
,
const
std
::
string
&
accBufferVarName
)
{
std
::
string
retStr
=
""
;
for
(
int
i
=
0
;
i
<
nQubits
;
i
++
)
{
retStr
+=
"qubit "
+
bufVarName
+
std
::
to_string
(
i
)
+
"
\n
"
;
auto
kernel
=
getKernel
(
kernelName
);
std
::
set
<
int
>
qubitsUsed
;
InstructionIterator
it
(
kernel
);
while
(
it
.
hasNext
())
{
// Get the next node in the tree
auto
nextInst
=
it
.
next
();
// If enabled, invoke the accept
// method which kicks off the visitor
// to execute the appropriate lambda.
if
(
nextInst
->
isEnabled
()
&&
!
nextInst
->
isComposite
())
{
for
(
auto
qi
:
nextInst
->
bits
())
{
qubitsUsed
.
insert
(
qi
);
}
}
}
for
(
auto
qi
:
qubitsUsed
)
{
retStr
+=
"qubit "
+
accBufferVarName
+
std
::
to_string
(
qi
)
+
"
\n
"
;
}
for
(
auto
f
:
kernels
)
{
retStr
+=
f
->
toString
(
buf
VarName
);
retStr
+=
f
->
toString
(
accBuffer
VarName
);
}
return
retStr
;
}
void
GateQIR
::
persist
(
std
::
ostream
&
outStream
)
{
write
(
outStream
);
StringBuffer
sb
;
PrettyWriter
<
StringBuffer
>
writer
(
sb
);
serializeJson
(
writer
);
outStream
<<
sb
.
GetString
();
return
;
}
// FOR IR
...
...
quantum/gate/gateqir/GateQIR.hpp
View file @
1e1c65e3
...
...
@@ -32,6 +32,14 @@
#define QUANTUM_GATE_GATEQIR_HPP_
#include
"QIR.hpp"
#include
"GateInstruction.hpp"
#include
"GateFunction.hpp"
#include
"InstructionIterator.hpp"
#include
"Hadamard.hpp"
#include
"CNOT.hpp"
#define RAPIDJSON_HAS_STDSTRING 1
namespace
xacc
{
namespace
quantum
{
...
...
@@ -62,21 +70,69 @@ public:
}
};
template
<
typename
Writer
>
class
JsonSerializerGateVisitor
:
public
BaseInstructionVisitor
,
public
InstructionVisitor
<
GateFunction
>
,
public
InstructionVisitor
<
Hadamard
>
,
public
InstructionVisitor
<
CNOT
>
{
protected:
Writer
&
writer
;
int
nFuncInsts
=
0
;
public:
JsonSerializerGateVisitor
(
Writer
&
w
)
:
writer
(
w
)
{}
void
baseGateInst
(
GateInstruction
&
inst
)
{
writer
.
StartObject
();
writer
.
String
(
"gate"
);
writer
.
String
(
inst
.
getName
().
c_str
());
writer
.
String
(
"enabled"
);
writer
.
Bool
(
inst
.
isEnabled
());
writer
.
String
(
"qubits"
);
writer
.
StartArray
();
for
(
auto
qi
:
inst
.
bits
())
{
writer
.
Int
(
qi
);
}
writer
.
EndArray
();
writer
.
EndObject
();
nFuncInsts
--
;
if
(
nFuncInsts
==
0
)
{
endFunction
();
}
}
void
visit
(
Hadamard
&
h
)
{
baseGateInst
(
dynamic_cast
<
GateInstruction
&>
(
h
));
}
void
visit
(
CNOT
&
cn
)
{
baseGateInst
(
dynamic_cast
<
GateInstruction
&>
(
cn
));
}
void
visit
(
GateFunction
&
function
)
{
writer
.
StartObject
();
writer
.
String
(
"function"
);
writer
.
String
(
function
.
getName
());
writer
.
String
(
"instructions"
);
writer
.
StartArray
();
nFuncInsts
=
function
.
nInstructions
();
}
private:
void
endFunction
()
{
writer
.
EndArray
();
writer
.
EndObject
();
}
};
/**
* The GateQIR is an implementation of the QIR for gate model quantum
* computing. It provides a Graph node type that models a quantum
* circuit gate (CircuitNode).
*
*/
class
GateQIR
:
public
virtual
xacc
::
quantum
::
QIR
<
xacc
::
quantum
::
CircuitNode
>
{
protected:
/**
* Reference to the AcceleratorBuffer that this
* QIR operates on.
*/
std
::
shared_ptr
<
AcceleratorBuffer
>
buffer
;
class
GateQIR
:
public
virtual
QIR
<
xacc
::
quantum
::
CircuitNode
>
{
public:
...
...
@@ -86,35 +142,19 @@ public:
GateQIR
()
{
}
/**
* The constructor, takes an accelerator buffer at construction.
* @param buf
*/
GateQIR
(
std
::
shared_ptr
<
AcceleratorBuffer
>
buf
)
:
buffer
(
buf
)
{
}
/**
* Provide a new AcceleratorBuffer for this Gate QIR.
* @param buf
*/
virtual
void
setAcceleratorBuffer
(
std
::
shared_ptr
<
AcceleratorBuffer
>
buf
)
{
buffer
=
buf
;
}
/**
* This method takes the list of quantum instructions that this
* QIR contains and creates a graph representation of the
* quantum circuit.
*/
virtual
void
generateGraph
();
virtual
void
generateGraph
(
const
std
::
string
&
kernelName
);
/**
* Return a string representation of this
* intermediate representation
* @return
*/
virtual
std
::
string
to
String
(
);
virtual
std
::
string
to
AssemblyString
(
const
std
::
string
&
kernelName
,
const
std
::
string
&
accBufferVarName
);
/**
* Persist this IR instance to the given
...
...
@@ -150,6 +190,27 @@ public:
private:
template
<
typename
Writer
>
void
serializeJson
(
Writer
&
writer
)
{
std
::
string
retStr
=
""
;
auto
visitor
=
std
::
make_shared
<
JsonSerializerGateVisitor
<
Writer
>>
(
writer
);
writer
.
StartArray
();
for
(
auto
kernel
:
kernels
)
{
InstructionIterator
it
(
kernel
);
while
(
it
.
hasNext
())
{
// Get the next node in the tree
auto
nextInst
=
it
.
next
();
if
(
nextInst
->
isEnabled
())
{
nextInst
->
accept
(
visitor
);
}
}
}
writer
.
EndArray
();
}
/**
* This method determines if a new layer should be added to the circuit.
*
...
...
quantum/gate/gateqir/ParameterizedGateInstruction.hpp
View file @
1e1c65e3
...
...
@@ -102,6 +102,9 @@ public:
return
str
;
}
DEFINE_VISITABLE
()
};
/**
*/
...
...
quantum/gate/gateqir/tests/GateQIRTester.cpp
View file @
1e1c65e3
...
...
@@ -51,14 +51,30 @@ BOOST_AUTO_TEST_CASE(checkCreationToString) {
"CNOT qreg0,qreg1
\n
"
"H qreg0
\n
"
;
auto
buf
=
std
::
make_shared
<
AcceleratorBuffer
>
(
"qreg"
,
3
);
auto
qir
=
std
::
make_shared
<
GateQIR
>
(
buf
);
auto
qir
=
std
::
make_shared
<
GateQIR
>
();
auto
f
=
std
::
make_shared
<
GateFunction
>
(
"foo"
);
auto
h
=
std
::
make_shared
<
Hadamard
>
(
1
);
auto
cn1
=
std
::
make_shared
<
CNOT
>
(
1
,
2
);
auto
cn2
=
std
::
make_shared
<
CNOT
>
(
0
,
1
);
auto
h2
=
std
::
make_shared
<
Hadamard
>
(
0
);
f
->
addInstruction
(
h
);
f
->
addInstruction
(
cn1
);
f
->
addInstruction
(
cn2
);
f
->
addInstruction
(
h2
);
qir
->
addKernel
(
f
);
BOOST_VERIFY
(
qir
->
toAssemblyString
(
"foo"
,
"qreg"
)
==
expectedQasm
);
}
BOOST_AUTO_TEST_CASE
(
checkSerialization
)
{
auto
qir
=
std
::
make_shared
<
GateQIR
>
();
auto
f
=
std
::
make_shared
<
GateFunction
>
(
"foo"
);
auto
h
=
std
::
make_shared
<
Hadamard
>
(
1
);
auto
cn1
=
std
::
make_shared
<
CNOT
>
(
1
,
2
);
auto
cn2
=
std
::
make_shared
<
CNOT
>
(
0
,
1
);
auto
h2
=
std
::
make_shared
<
Hadamard
>
(
0
);
f
->
addInstruction
(
h
);
f
->
addInstruction
(
cn1
);
f
->
addInstruction
(
cn2
);
...
...
@@ -66,7 +82,11 @@ BOOST_AUTO_TEST_CASE(checkCreationToString) {
qir
->
addKernel
(
f
);
BOOST_VERIFY
(
qir
->
toString
()
==
expectedQasm
);
std
::
stringstream
ss
;
qir
->
persist
(
ss
);
std
::
cout
<<
"HELLO:
\n
"
<<
ss
.
str
()
<<
"
\n
"
;
}
BOOST_AUTO_TEST_CASE
(
checkReadGraph
)
{
...
...
@@ -207,8 +227,7 @@ BOOST_AUTO_TEST_CASE(checkReadGraph) {
}
BOOST_AUTO_TEST_CASE
(
checkGenerateGraph
)
{
auto
buf
=
std
::
make_shared
<
AcceleratorBuffer
>
(
"qreg"
,
3
);
auto
qir
=
std
::
make_shared
<
GateQIR
>
(
buf
);
auto
qir
=
std
::
make_shared
<
GateQIR
>
();
auto
f
=
std
::
make_shared
<
GateFunction
>
(
"foo"
);
auto
h
=
std
::
make_shared
<
Hadamard
>
(
1
);
...
...
@@ -222,10 +241,10 @@ BOOST_AUTO_TEST_CASE(checkGenerateGraph) {
qir
->
addKernel
(
f
);
qir
->
generateGraph
();
qir
->
generateGraph
(
"foo"
);
std
::
stringstream
ss
;
qir
->
persist
(
ss
);
qir
->
write
(
ss
);
std
::
string
expected
=
"graph G {
\n
"
"{
\n
"
...
...
quantum/qir/QIR.hpp
View file @
1e1c65e3
...
...
@@ -61,20 +61,11 @@ public:
QIR
()
{
}
/**
* The constructor, takes the AcceleratorBuffer
* this IR works on.
* @param buf
*/
QIR
(
std
::
shared_ptr
<
AcceleratorBuffer
>
buf
)
:
IR
(
buf
)
{
}
/**
* From this IR's list of instructions, construct an
* equivalent graph representation.
*/
virtual
void
generateGraph
()
=
0
;
virtual
void
generateGraph
(
const
std
::
string
&
kernelName
)
=
0
;
/**
* Add a quantum function to this intermediate representation.
...
...
tpls/rapidjson/allocators.h
0 → 100644
View file @
1e1c65e3
// Tencent is pleased to support the open source community by making RapidJSON available.
//
// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
//
// Licensed under the MIT License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// http://opensource.org/licenses/MIT
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef RAPIDJSON_ALLOCATORS_H_
#define RAPIDJSON_ALLOCATORS_H_
#include
"rapidjson.h"
RAPIDJSON_NAMESPACE_BEGIN
///////////////////////////////////////////////////////////////////////////////
// Allocator
/*! \class rapidjson::Allocator
\brief Concept for allocating, resizing and freeing memory block.
Note that Malloc() and Realloc() are non-static but Free() is static.
So if an allocator need to support Free(), it needs to put its pointer in
the header of memory block.
\code
concept Allocator {
static const bool kNeedFree; //!< Whether this allocator needs to call Free().
// Allocate a memory block.
// \param size of the memory block in bytes.
// \returns pointer to the memory block.
void* Malloc(size_t size);
// Resize a memory block.
// \param originalPtr The pointer to current memory block. Null pointer is permitted.
// \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.)
// \param newSize the new size in bytes.
void* Realloc(void* originalPtr, size_t originalSize, size_t newSize);
// Free a memory block.
// \param pointer to the memory block. Null pointer is permitted.
static void Free(void *ptr);
};
\endcode
*/
///////////////////////////////////////////////////////////////////////////////
// CrtAllocator
//! C-runtime library allocator.
/*! This class is just wrapper for standard C library memory routines.
\note implements Allocator concept
*/
class
CrtAllocator
{
public:
static
const
bool
kNeedFree
=
true
;
void
*
Malloc
(
size_t
size
)
{
if
(
size
)
// behavior of malloc(0) is implementation defined.
return
std
::
malloc
(
size
);
else
return
NULL
;
// standardize to returning NULL.
}
void
*
Realloc
(
void
*
originalPtr
,
size_t
originalSize
,
size_t
newSize
)
{
(
void
)
originalSize
;
if
(
newSize
==
0
)
{
std
::
free
(
originalPtr
);
return
NULL
;
}
return
std
::
realloc
(
originalPtr
,
newSize
);
}
static
void
Free
(
void
*
ptr
)
{
std
::
free
(
ptr
);
}
};
///////////////////////////////////////////////////////////////////////////////
// MemoryPoolAllocator
//! Default memory allocator used by the parser and DOM.
/*! This allocator allocate memory blocks from pre-allocated memory chunks.
It does not free memory blocks. And Realloc() only allocate new memory.
The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default.
User may also supply a buffer as the first chunk.
If the user-buffer is full then additional chunks are allocated by BaseAllocator.
The user-buffer is not deallocated by this allocator.
\tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator.
\note implements Allocator concept
*/
template
<
typename
BaseAllocator
=
CrtAllocator
>
class
MemoryPoolAllocator
{
public:
static
const
bool
kNeedFree
=
false
;
//!< Tell users that no need to call Free() with this allocator. (concept Allocator)
//! Constructor with chunkSize.
/*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
\param baseAllocator The allocator for allocating memory chunks.
*/
MemoryPoolAllocator
(
size_t
chunkSize
=
kDefaultChunkCapacity
,
BaseAllocator
*
baseAllocator
=
0
)
:
chunkHead_
(
0
),
chunk_capacity_
(
chunkSize
),
userBuffer_
(
0
),
baseAllocator_
(
baseAllocator
),
ownBaseAllocator_
(
0
)
{
}
//! Constructor with user-supplied buffer.
/*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size.
The user buffer will not be deallocated when this allocator is destructed.
\param buffer User supplied buffer.
\param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader).
\param chunkSize The size of memory chunk. The default is kDefaultChunkSize.
\param baseAllocator The allocator for allocating memory chunks.
*/
MemoryPoolAllocator
(
void
*
buffer
,
size_t
size
,
size_t
chunkSize
=
kDefaultChunkCapacity
,
BaseAllocator
*
baseAllocator
=
0
)
:
chunkHead_
(
0
),
chunk_capacity_
(
chunkSize
),
userBuffer_
(
buffer
),
baseAllocator_
(
baseAllocator
),
ownBaseAllocator_
(
0
)
{
RAPIDJSON_ASSERT
(
buffer
!=
0
);
RAPIDJSON_ASSERT
(
size
>
sizeof
(
ChunkHeader
));
chunkHead_
=
reinterpret_cast
<
ChunkHeader
*>
(
buffer
);
chunkHead_
->
capacity
=
size
-
sizeof
(
ChunkHeader
);
chunkHead_
->
size
=
0
;
chunkHead_
->
next
=
0
;
}
//! Destructor.
/*! This deallocates all memory chunks, excluding the user-supplied buffer.
*/
~
MemoryPoolAllocator
()
{
Clear
();
RAPIDJSON_DELETE
(
ownBaseAllocator_
);
}
//! Deallocates all memory chunks, excluding the user-supplied buffer.
void
Clear
()
{
while
(
chunkHead_
&&
chunkHead_
!=
userBuffer_
)
{
ChunkHeader
*
next
=
chunkHead_
->
next
;
baseAllocator_
->
Free
(
chunkHead_
);
chunkHead_
=
next
;
}
if
(
chunkHead_
&&
chunkHead_
==
userBuffer_
)
chunkHead_
->
size
=
0
;
// Clear user buffer
}
//! Computes the total capacity of allocated memory chunks.
/*! \return total capacity in bytes.
*/
size_t
Capacity
()
const
{
size_t
capacity
=
0
;
for
(
ChunkHeader
*
c
=
chunkHead_
;
c
!=
0
;
c
=
c
->
next
)
capacity
+=
c
->
capacity
;
return
capacity
;
}
//! Computes the memory blocks allocated.
/*! \return total used bytes.
*/
size_t
Size
()
const
{
size_t
size
=
0
;
for
(
ChunkHeader
*
c
=
chunkHead_
;
c
!=
0
;
c
=
c
->
next
)
size
+=
c
->
size
;
return
size
;
}
//! Allocates a memory block. (concept Allocator)
void
*
Malloc
(
size_t
size
)
{
if
(
!
size
)
return
NULL
;
size
=
RAPIDJSON_ALIGN
(
size
);
if
(
chunkHead_
==
0
||
chunkHead_
->
size
+
size
>
chunkHead_
->
capacity
)
if
(
!
AddChunk
(
chunk_capacity_
>
size
?
chunk_capacity_
:
size
))
return
NULL
;
void
*
buffer
=
reinterpret_cast
<
char
*>
(
chunkHead_
)
+
RAPIDJSON_ALIGN
(
sizeof
(
ChunkHeader
))
+
chunkHead_
->
size
;
chunkHead_
->
size
+=
size
;
return
buffer
;
}
//! Resizes a memory block (concept Allocator)
void
*
Realloc
(
void
*
originalPtr
,
size_t
originalSize
,
size_t
newSize
)
{
if
(
originalPtr
==
0
)
return
Malloc
(
newSize
);
if
(
newSize
==
0
)
return
NULL
;
originalSize
=
RAPIDJSON_ALIGN
(
originalSize
);
newSize
=
RAPIDJSON_ALIGN
(
newSize
);
// Do not shrink if new size is smaller than original
if
(
originalSize
>=
newSize
)
return
originalPtr
;
// Simply expand it if it is the last allocation and there is sufficient space
if
(
originalPtr
==
reinterpret_cast
<
char
*>
(
chunkHead_
)
+
RAPIDJSON_ALIGN
(
sizeof
(
ChunkHeader
))
+
chunkHead_
->
size
-
originalSize
)
{
size_t
increment
=
static_cast
<
size_t
>
(
newSize
-
originalSize
);
if
(
chunkHead_
->
size
+
increment
<=
chunkHead_
->
capacity
)
{
chunkHead_
->
size
+=
increment
;
return
originalPtr
;