Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
ORNL Quantum Computing Institute
exatn
Commits
8338380c
Unverified
Commit
8338380c
authored
Mar 31, 2021
by
Dmitry I. Lyakh
Committed by
GitHub
Mar 31, 2021
Browse files
Merge pull request #46 from tnguyen-ornl/tnguyen/cotengra-integration
Wrapper for Cotengra Contraction Sequence Optimizer
parents
42b531f9
2910a893
Changes
9
Hide whitespace changes
Inline
Side-by-side
src/numerics/CMakeLists.txt
View file @
8338380c
...
...
@@ -63,6 +63,12 @@ target_include_directories(${LIBRARY_NAME}
${
CMAKE_SOURCE_DIR
}
/src/exatn
)
target_link_libraries
(
${
LIBRARY_NAME
}
PUBLIC ExaTensor::ExaTensor exatn-utils metis GKlib
)
find_package
(
Python COMPONENTS Interpreter Development
)
if
(
Python_FOUND
)
# Link to Cotengra wrapper as well if it was built (Python Found)
target_link_libraries
(
${
LIBRARY_NAME
}
PRIVATE exatn-cotengra-optimizer
)
endif
()
exatn_configure_plugin_rpath
(
${
LIBRARY_NAME
}
)
add_dependencies
(
${
LIBRARY_NAME
}
exatensor-build
)
...
...
@@ -75,3 +81,5 @@ file (GLOB HEADERS *.hpp)
install
(
FILES
${
HEADERS
}
DESTINATION include/exatn
)
install
(
TARGETS
${
LIBRARY_NAME
}
DESTINATION lib
)
add_subdirectory
(
cotengra
)
\ No newline at end of file
src/numerics/contraction_seq_optimizer.hpp
View file @
8338380c
...
...
@@ -31,6 +31,12 @@ struct ContrTriple{
unsigned
int
right_id
;
//id of the right input tensor (old)
};
// Tensor Slice Info
struct
SliceIndex
{
unsigned
int
tensor_id
;
// id of the tensor
unsigned
int
leg_id
;
// leg id of the tensor which we should slice.
};
class
TensorNetwork
;
class
MetisGraph
;
...
...
@@ -59,6 +65,20 @@ public:
std
::
list
<
ContrTriple
>
&
contr_seq
,
std
::
function
<
unsigned
int
()
>
intermediate_num_generator
)
=
0
;
/** Determines the pseudo-optimal tensor contraction sequence required for
evaluating a given tensor network.
- target_slice_size (in bytes): slice until largest tensor is at maximum
this size
- slice_inds: list of slice indices (tensor Id and leg Id pairs)
If the ContractionSeqOptimizer sub-class doesn't explicitly implement this
method, will just fallback to the above non-slicing method.**/
virtual
double
determineContractionSequence
(
const
TensorNetwork
&
network
,
std
::
list
<
ContrTriple
>
&
contr_seq
,
std
::
function
<
unsigned
int
()
>
intermediate_num_generator
,
uint64_t
target_slice_size
,
std
::
list
<
SliceIndex
>
&
slice_inds
)
{
return
determineContractionSequence
(
network
,
contr_seq
,
intermediate_num_generator
);
}
/** Caches the determined pseudo-optimal tensor contraction sequence for a given
tensor network for a later retrieval for the same tensor networks. Returns TRUE
on success, FALSE in case this tensor network has already been cached before. **/
...
...
src/numerics/contraction_seq_optimizer_factory.cpp
View file @
8338380c
...
...
@@ -5,6 +5,7 @@ Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
#include "contraction_seq_optimizer_factory.hpp"
#include "cotengra/contraction_seq_optimizer_cotengra.hpp"
namespace
exatn
{
...
...
@@ -16,6 +17,7 @@ ContractionSeqOptimizerFactory::ContractionSeqOptimizerFactory()
registerContractionSeqOptimizer
(
"heuro"
,
&
ContractionSeqOptimizerHeuro
::
createNew
);
registerContractionSeqOptimizer
(
"greed"
,
&
ContractionSeqOptimizerGreed
::
createNew
);
registerContractionSeqOptimizer
(
"metis"
,
&
ContractionSeqOptimizerMetis
::
createNew
);
registerContractionSeqOptimizer
(
"cotengra"
,
&
ContractionSeqOptimizerCotengra
::
createNew
);
}
void
ContractionSeqOptimizerFactory
::
registerContractionSeqOptimizer
(
const
std
::
string
&
name
,
...
...
src/numerics/cotengra/CMakeLists.txt
0 → 100644
View file @
8338380c
# Find Python
find_package
(
Python COMPONENTS Interpreter Development
)
if
(
Python_FOUND
)
set
(
LIBRARY_NAME exatn-cotengra-optimizer
)
get_filename_component
(
PYTHON_LIB_NAME
${
Python_LIBRARIES
}
NAME
)
configure_file
(
py-cotengra-optimizer.in.cpp
${
CMAKE_BINARY_DIR
}
/src/numerics/cotengra/py-cotengra-optimizer.cpp
)
file
(
GLOB SRC
${
CMAKE_BINARY_DIR
}
/src/numerics/cotengra/py-cotengra-optimizer.cpp
)
set
(
CMAKE_CXX_FLAGS
"
${
CMAKE_CXX_FLAGS
}
-fno-strict-aliasing -O2 -g -pipe -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wformat -fexceptions --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=native -D_GNU_SOURCE -fPIC -fwrapv"
)
usfunctiongetresourcesource
(
TARGET
${
LIBRARY_NAME
}
OUT SRC
)
usfunctiongeneratebundleinit
(
TARGET
${
LIBRARY_NAME
}
OUT SRC
)
add_library
(
${
LIBRARY_NAME
}
SHARED
${
SRC
}
)
target_include_directories
(
${
LIBRARY_NAME
}
PUBLIC . ..
${
Python_INCLUDE_DIRS
}
${
CMAKE_SOURCE_DIR
}
/tpls/pybind11/include
${
CMAKE_SOURCE_DIR
}
/src/exatn
)
target_link_libraries
(
${
LIBRARY_NAME
}
PRIVATE CppMicroServices Python::Python exatn-utils
)
set
(
_bundle_name exatn_cotengra_optimizer
)
set_target_properties
(
${
LIBRARY_NAME
}
PROPERTIES COMPILE_DEFINITIONS US_BUNDLE_NAME=
${
_bundle_name
}
US_BUNDLE_NAME
${
_bundle_name
}
)
usfunctionembedresources
(
TARGET
${
LIBRARY_NAME
}
WORKING_DIRECTORY
${
CMAKE_CURRENT_SOURCE_DIR
}
FILES manifest.json
)
exatn_configure_plugin_rpath
(
${
LIBRARY_NAME
}
)
install
(
TARGETS
${
LIBRARY_NAME
}
DESTINATION
${
CMAKE_INSTALL_PREFIX
}
/plugins
)
install
(
TARGETS
${
LIBRARY_NAME
}
DESTINATION lib
)
# if(EXATN_BUILD_TESTS)
# add_subdirectory(tests)
# endif()
else
()
message
(
STATUS
"
${
BoldYellow
}
Python Interpreter or Libs not found, skipping Cotengra Plugin Build.
${
ColorReset
}
"
)
endif
()
\ No newline at end of file
src/numerics/cotengra/contraction_seq_optimizer_cotengra.hpp
0 → 100644
View file @
8338380c
#ifndef EXATN_NUMERICS_CONTRACTION_SEQ_OPTIMIZER_COTENGRA_HPP_
#define EXATN_NUMERICS_CONTRACTION_SEQ_OPTIMIZER_COTENGRA_HPP_
#include "contraction_seq_optimizer.hpp"
#include "Identifiable.hpp"
namespace
exatn
{
namespace
numerics
{
class
ContractionSeqOptimizerCotengra
:
public
ContractionSeqOptimizer
,
public
Identifiable
{
public:
virtual
double
determineContractionSequence
(
const
TensorNetwork
&
network
,
std
::
list
<
ContrTriple
>
&
contr_seq
,
std
::
function
<
unsigned
int
()
>
intermediate_num_generator
,
uint64_t
target_slice_size
,
std
::
list
<
SliceIndex
>
&
slice_inds
)
override
;
virtual
double
determineContractionSequence
(
const
TensorNetwork
&
network
,
std
::
list
<
ContrTriple
>
&
contr_seq
,
std
::
function
<
unsigned
int
()
>
intermediate_num_generator
)
override
{
// Default is 2 GB
constexpr
uint64_t
DEFAULT_SLICE_SIZE
=
2
*
(
1ULL
<<
30
);
// Just ignore slice_inds result if using this method.
std
::
list
<
SliceIndex
>
ignored_slice_inds
;
return
determineContractionSequence
(
network
,
contr_seq
,
intermediate_num_generator
,
DEFAULT_SLICE_SIZE
,
ignored_slice_inds
);
}
const
std
::
string
name
()
const
override
{
return
"cotengra"
;
}
const
std
::
string
description
()
const
override
{
return
""
;
}
static
std
::
unique_ptr
<
ContractionSeqOptimizer
>
createNew
();
};
}
// namespace numerics
}
// namespace exatn
#endif // EXATN_NUMERICS_CONTRACTION_SEQ_OPTIMIZER_COTENGRA_HPP_
src/numerics/cotengra/manifest.json
0 → 100644
View file @
8338380c
{
"bundle.symbolic_name"
:
"exatn_cotengra_optimizer"
,
"bundle.activator"
:
true
,
"bundle.name"
:
"ExaTN Runtime wrapper of cotengra library"
,
"bundle.description"
:
""
}
\ No newline at end of file
src/numerics/cotengra/py-cotengra-optimizer.in.cpp
0 → 100644
View file @
8338380c
#include "contraction_seq_optimizer.hpp"
#include "contraction_seq_optimizer_cotengra.hpp"
#include "cppmicroservices/BundleActivator.h"
#include "cppmicroservices/BundleContext.h"
#include "tensor_network.hpp"
#include <dlfcn.h>
#include <pybind11/embed.h>
#include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
using
namespace
cppmicroservices
;
using
namespace
pybind11
::
literals
;
namespace
py
=
pybind11
;
namespace
exatn
{
namespace
numerics
{
double
ContractionSeqOptimizerCotengra
::
determineContractionSequence
(
const
TensorNetwork
&
network
,
std
::
list
<
ContrTriple
>
&
contr_seq
,
std
::
function
<
unsigned
int
()
>
intermediate_num_generator
,
uint64_t
target_slice_size
,
std
::
list
<
SliceIndex
>
&
slice_inds
)
{
static
bool
initialized
=
false
;
static
std
::
shared_ptr
<
pybind11
::
scoped_interpreter
>
guard
;
static
void
*
libpython_handle
=
nullptr
;
if
(
!
initialized
)
{
guard
=
std
::
make_shared
<
py
::
scoped_interpreter
>
();
libpython_handle
=
dlopen
(
"@PYTHON_LIB_NAME@"
,
RTLD_LAZY
|
RTLD_GLOBAL
);
initialized
=
true
;
}
const
auto
time_beg
=
std
::
chrono
::
high_resolution_clock
::
now
();
// network.printIt();
auto
nx
=
py
::
module
::
import
(
"networkx"
);
auto
oe
=
py
::
module
::
import
(
"opt_einsum"
);
auto
ctg
=
py
::
module
::
import
(
"cotengra"
);
auto
graph
=
nx
.
attr
(
"Graph"
)();
auto
tensor_rank_map
=
py
::
dict
();
for
(
auto
it
=
network
.
cbegin
();
it
!=
network
.
cend
();
++
it
)
{
const
auto
tensorId
=
it
->
first
;
auto
tensor_conn
=
it
->
second
;
// std::cout << "Tensor ID: " << tensorId << "\n";
graph
.
attr
(
"add_node"
)(
tensorId
);
auto
legs
=
tensor_conn
.
getTensorLegs
();
tensor_rank_map
[
py
::
int_
(
tensorId
)]
=
tensor_conn
.
getDimExtents
();
if
(
tensorId
>
0
)
{
for
(
const
auto
&
leg
:
legs
)
{
graph
.
attr
(
"add_edge"
)(
tensorId
,
leg
.
getTensorId
());
}
}
}
// py::print(tensor_rank_map);
auto
globals
=
py
::
globals
();
globals
[
"network_gragh"
]
=
graph
;
globals
[
"tensor_rank_data"
]
=
tensor_rank_map
;
globals
[
"oe"
]
=
oe
;
globals
[
"ctg"
]
=
ctg
;
{
auto
py_src
=
R"#(
graph = globals()['network_gragh']
edge2ind_map = {tuple(sorted(e)): oe.get_symbol(i) for i, e in enumerate(graph.edges)}
)#"
;
auto
locals
=
py
::
dict
();
py
::
exec
(
py_src
,
py
::
globals
(),
locals
);
// py::print(locals["edge2ind_map"]);
globals
[
"edge2ind"
]
=
locals
[
"edge2ind_map"
];
}
auto
py_src
=
R"#(
graph = globals()['network_gragh']
rank_data = globals()['tensor_rank_data']
shape_map = {}
for tensor_id in rank_data:
shape_map[tensor_id] = tuple(rank_data[tensor_id])
from opt_einsum.contract import Shaped
inputs = []
output = {}
node_list = []
for nd in graph.nodes:
if nd == 0:
output = {edge2ind[tuple(sorted(e))] for e in graph.edges(nd)}
else:
inputs.append({edge2ind[tuple(sorted(e))] for e in graph.edges(nd)})
node_list.append(int(nd))
eq = (",".join(["".join(i) for i in inputs]) + "->{}".format("".join(output)))
shapes = []
for nd in graph.nodes:
if nd != 0:
shapes.append(shape_map[nd])
views = list(map(Shaped, shapes))
)#"
;
// Execute and get the fixed expectation value.
auto
locals
=
py
::
dict
();
py
::
exec
(
py_src
,
py
::
globals
(),
locals
);
auto
eq
=
locals
[
"eq"
];
auto
arrays
=
locals
[
"views"
];
locals
[
"target_size"
]
=
target_slice_size
;
// Cotengra number of iterations
const
int
max_repeats
=
1000
;
locals
[
"max_repeats"
]
=
max_repeats
;
// Create Cotengra HyperOptimizer:
// Note: this config is for Sycamore circuits.
py
::
exec
(
R"#(
opt = ctg.HyperOptimizer(slicing_reconf_opts={'target_size': locals()['target_size']}, max_repeats=locals()['max_repeats'], parallel='auto', progbar=True))#"
,
py
::
globals
(),
locals
);
// auto optimizer = ctg.attr("HyperOptimizer")();
auto
optimizer
=
locals
[
"opt"
];
locals
[
"optimizer"
]
=
optimizer
;
locals
[
"arrays"
]
=
arrays
;
py
::
exec
(
R"#(path_list, path_info = oe.contract_path(eq, *arrays, optimize=optimizer))#"
,
py
::
globals
(),
locals
);
auto
contract_path
=
locals
[
"path_list"
];
// py::print(contract_path);
auto
path_vec
=
contract_path
.
cast
<
std
::
vector
<
std
::
pair
<
int
,
int
>>>
();
auto
node_list
=
locals
[
"node_list"
].
cast
<
std
::
vector
<
int
>>
();
const
auto
remove_index
=
[](
std
::
vector
<
int
>
&
vector
,
const
std
::
vector
<
int
>
&
to_remove
)
{
auto
vector_base
=
vector
.
begin
();
std
::
vector
<
int
>::
size_type
down_by
=
0
;
for
(
auto
iter
=
to_remove
.
cbegin
();
iter
<
to_remove
.
cend
();
iter
++
,
down_by
++
)
{
std
::
vector
<
int
>::
size_type
next
=
(
iter
+
1
==
to_remove
.
cend
()
?
vector
.
size
()
:
*
(
iter
+
1
));
std
::
move
(
vector_base
+
*
iter
+
1
,
vector_base
+
next
,
vector_base
+
*
iter
-
down_by
);
}
vector
.
resize
(
vector
.
size
()
-
to_remove
.
size
());
};
contr_seq
.
clear
();
for
(
const
auto
&
path_pair
:
path_vec
)
{
const
auto
lhs_node
=
node_list
[
path_pair
.
first
];
const
auto
rhs_node
=
node_list
[
path_pair
.
second
];
// Remove these 2 nodes:
remove_index
(
node_list
,
{
path_pair
.
first
,
path_pair
.
second
});
const
auto
intermediate_tensor_id
=
intermediate_num_generator
();
// std::cout << "Contract: " << lhs_node << " and " << rhs_node << " --> "
// << intermediate_tensor_id << "\n";
node_list
.
emplace_back
(
intermediate_tensor_id
);
ContrTriple
contrTriple
;
contrTriple
.
result_id
=
intermediate_tensor_id
;
contrTriple
.
left_id
=
lhs_node
;
contrTriple
.
right_id
=
rhs_node
;
contr_seq
.
emplace_back
(
contrTriple
);
}
auto
&
lastSeq
=
contr_seq
.
back
();
lastSeq
.
result_id
=
0
;
auto
tree
=
optimizer
.
attr
(
"best"
)[
"tree"
];
// py::print(tree);
const
double
flops
=
tree
.
attr
(
"contraction_cost"
)().
cast
<
double
>
();
// std::cout << "Contraction cost: " << flops << "\n";
const
auto
time_end
=
std
::
chrono
::
high_resolution_clock
::
now
();
const
auto
time_total
=
std
::
chrono
::
duration_cast
<
std
::
chrono
::
duration
<
double
>>
(
time_end
-
time_beg
);
std
::
cout
<<
"#DEBUG(ContractionSeqOptimizerCotengra): Done ("
<<
time_total
.
count
()
<<
" sec): "
<<
flops
<<
" flops.
\n
"
;
auto
slice_ids
=
tree
.
attr
(
"sliced_inds"
);
// py::print(slice_ids);
auto
iter
=
py
::
iter
(
slice_ids
);
std
::
vector
<
std
::
pair
<
int
,
int
>>
slice_edges
;
while
(
iter
!=
py
::
iterator
::
sentinel
())
{
locals
[
"sliceIdx"
]
=
*
iter
;
py
::
exec
(
R"#(slice_edge = list(edge2ind.keys())[list(edge2ind.values()).index(locals()['sliceIdx'])])#"
,
py
::
globals
(),
locals
);
const
auto
slice_edge
=
locals
[
"slice_edge"
].
cast
<
std
::
pair
<
int
,
int
>>
();
// std::cout << slice_edge.first << ":" << slice_edge.second << "\n";
slice_edges
.
emplace_back
(
slice_edge
);
++
iter
;
}
for
(
const
auto
&
slice_edge
:
slice_edges
)
{
const
int
lhs_tensor_id
=
slice_edge
.
first
;
const
int
rhs_tensor_id
=
slice_edge
.
second
;
const
auto
lhsTensor
=
network
.
getTensor
(
lhs_tensor_id
);
const
auto
rhsTensor
=
network
.
getTensor
(
rhs_tensor_id
);
// lhsTensor->printIt();
// std::cout << "\n";
// rhsTensor->printIt();
const
auto
lhsTensorLegs
=
*
(
network
.
getTensorConnections
(
lhs_tensor_id
));
const
auto
rhsTensorLegs
=
*
(
network
.
getTensorConnections
(
rhs_tensor_id
));
bool
foundLeg
=
false
;
for
(
const
auto
&
lhsTensorLeg
:
lhsTensorLegs
)
{
if
(
lhsTensorLeg
.
getTensorId
()
==
rhs_tensor_id
)
{
foundLeg
=
true
;
SliceIndex
slice_index
;
slice_index
.
tensor_id
=
lhsTensorLeg
.
getTensorId
();
slice_index
.
leg_id
=
lhsTensorLeg
.
getDimensionId
();
slice_inds
.
emplace_back
(
slice_index
);
// std::cout << "Tensor ID: " << slice_index.tensor_id
// << "; Leg: " << slice_index.leg_id << "\n";
break
;
}
}
assert
(
foundLeg
);
foundLeg
=
false
;
for
(
const
auto
&
rhsTensorLeg
:
rhsTensorLegs
)
{
if
(
rhsTensorLeg
.
getTensorId
()
==
lhs_tensor_id
)
{
foundLeg
=
true
;
SliceIndex
slice_index
;
slice_index
.
tensor_id
=
rhsTensorLeg
.
getTensorId
();
slice_index
.
leg_id
=
rhsTensorLeg
.
getDimensionId
();
slice_inds
.
emplace_back
(
slice_index
);
// std::cout << "Tensor ID: " << slice_index.tensor_id
// << "; Leg: " << slice_index.leg_id << "\n";
break
;
}
}
assert
(
foundLeg
);
}
return
flops
;
}
std
::
unique_ptr
<
ContractionSeqOptimizer
>
ContractionSeqOptimizerCotengra
::
createNew
()
{
return
std
::
make_unique
<
ContractionSeqOptimizerCotengra
>
();
}
}
// namespace numerics
}
// namespace exatn
namespace
{
/**
*/
class
US_ABI_LOCAL
CotengraActivator
:
public
BundleActivator
{
public:
CotengraActivator
()
{}
void
Start
(
BundleContext
context
)
{
context
.
RegisterService
<
exatn
::
numerics
::
ContractionSeqOptimizer
>
(
std
::
make_shared
<
exatn
::
numerics
::
ContractionSeqOptimizerCotengra
>
());
}
void
Stop
(
BundleContext
/*context*/
)
{}
};
}
// namespace
CPPMICROSERVICES_EXPORT_BUNDLE_ACTIVATOR
(
CotengraActivator
)
src/numerics/cotengra/tests/CMakeLists.txt
0 → 100644
View file @
8338380c
exatn_add_test
(
CotengraTester CotengraTester.cpp
)
target_link_libraries
(
CotengraTester PRIVATE exatn-runtime exatn-numerics exatn
)
src/numerics/cotengra/tests/CotengraTester.cpp
0 → 100644
View file @
8338380c
#include "contraction_seq_optimizer.hpp"
#include "exatn.hpp"
#include "talshxx.hpp"
#include <gtest/gtest.h>
TEST
(
CotengraTester
,
checkContractPath
)
{
using
exatn
::
Tensor
;
using
exatn
::
TensorElementType
;
using
exatn
::
TensorNetwork
;
using
exatn
::
TensorShape
;
const
unsigned
int
num_qubits
=
53
;
std
::
vector
<
std
::
pair
<
unsigned
int
,
unsigned
int
>>
sycamore_8_cnot
{
{
1
,
4
},
{
3
,
7
},
{
5
,
9
},
{
6
,
13
},
{
8
,
15
},
{
10
,
17
},
{
12
,
21
},
{
14
,
23
},
{
16
,
25
},
{
18
,
27
},
{
20
,
30
},
{
22
,
32
},
{
24
,
34
},
{
26
,
36
},
{
29
,
37
},
{
31
,
39
},
{
33
,
41
},
{
35
,
43
},
{
38
,
44
},
{
40
,
46
},
{
42
,
48
},
{
45
,
49
},
{
47
,
51
},
{
50
,
52
},
{
0
,
3
},
{
2
,
6
},
{
4
,
8
},
{
7
,
14
},
{
9
,
16
},
{
11
,
20
},
{
13
,
22
},
{
15
,
24
},
{
17
,
26
},
{
19
,
29
},
{
21
,
31
},
{
23
,
33
},
{
25
,
35
},
{
30
,
38
},
{
32
,
40
},
{
34
,
42
},
{
39
,
45
},
{
41
,
47
},
{
46
,
50
}};
std
::
cout
<<
"Building the circuit ...
\n
"
;
TensorNetwork
circuit
(
"Sycamore8_CNOT"
);
unsigned
int
tensor_counter
=
0
;
// Left qubit tensors:
unsigned
int
first_q_tensor
=
tensor_counter
+
1
;
for
(
unsigned
int
i
=
0
;
i
<
num_qubits
;
++
i
)
{
bool
success
=
circuit
.
appendTensor
(
++
tensor_counter
,
std
::
make_shared
<
Tensor
>
(
"Q"
+
std
::
to_string
(
i
),
TensorShape
{
2
}),
{});
assert
(
success
);
}
unsigned
int
last_q_tensor
=
tensor_counter
;
// CNOT gates:
auto
cnot
=
std
::
make_shared
<
Tensor
>
(
"CNOT"
,
TensorShape
{
2
,
2
,
2
,
2
});
for
(
unsigned
int
i
=
0
;
i
<
sycamore_8_cnot
.
size
();
++
i
)
{
bool
success
=
circuit
.
appendTensorGate
(
++
tensor_counter
,
cnot
,
{
sycamore_8_cnot
[
i
].
first
,
sycamore_8_cnot
[
i
].
second
});
assert
(
success
);
}
// Right qubit tensors:
unsigned
int
first_p_tensor
=
tensor_counter
+
1
;
for
(
unsigned
int
i
=
0
;
i
<
num_qubits
;
++
i
)
{
bool
success
=
circuit
.
appendTensor
(
++
tensor_counter
,
std
::
make_shared
<
Tensor
>
(
"P"
+
std
::
to_string
(
i
),
TensorShape
{
2
}),
{{
0
,
0
}});
assert
(
success
);
}
circuit
.
printIt
();
auto
cotengra
=
exatn
::
getService
<
exatn
::
numerics
::
ContractionSeqOptimizer
>
(
"cotengra"
);
std
::
list
<
exatn
::
numerics
::
ContrTriple
>
results
;
cotengra
->
determineContractionSequence
(
circuit
,
results
,
[
&
]()
->
unsigned
int
{
return
++
tensor_counter
;
});
for
(
const
auto
&
ctrTrip
:
results
)
{
std
::
cout
<<
"Contract: "
<<
ctrTrip
.
left_id
<<
" and "
<<
ctrTrip
.
right_id
<<
" --> "
<<
ctrTrip
.
result_id
<<
"
\n
"
;
}
}
TEST
(
CotengraTester
,
checkEvaluate
)
{
using
exatn
::
Tensor
;
using
exatn
::
TensorElementType
;
using
exatn
::
TensorNetwork
;
using
exatn
::
TensorShape
;
// Use cotengra
exatn
::
resetContrSeqOptimizer
(
"cotengra"
);
// Quantum Circuit:
// Q0----H---------
// Q1----H----C----
// Q2----H----N----
// Define the initial qubit state vector:
std
::
vector
<
std
::
complex
<
double
>>
qzero
{{
1.0
,
0.0
},
{
0.0
,
0.0
}};
// Define quantum gates:
std
::
vector
<
std
::
complex
<
double
>>
hadamard
{
{
1.0
,
0.0
},
{
1.0
,
0.0
},
{
1.0
,
0.0
},
{
-
1.0
,
0.0
}};
std
::
vector
<
std
::
complex
<
double
>>
identity
{
{
1.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
1.0
,
0.0
}};
std
::
vector
<
std
::
complex
<
double
>>
cnot
{
{
1.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
1.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
1.0
,
0.0
},
{
0.0
,
0.0
},
{
0.0
,
0.0
},
{
1.0
,
0.0
},
{
0.0
,
0.0
}};
// Create qubit tensors:
auto
created
=
false
;
created
=
exatn
::
createTensor
(
"Q0"
,
TensorElementType
::
COMPLEX64
,
TensorShape
{
2
});
assert
(
created
);
created
=
exatn
::
createTensor
(
"Q1"
,
TensorElementType
::
COMPLEX64
,
TensorShape
{
2
});
assert
(
created
);
created
=
exatn
::
createTensor
(
"Q2"
,
TensorElementType
::
COMPLEX64
,
TensorShape
{
2
});
assert
(
created
);
// Create gate tensors:
auto
registered
=
false
;
created
=
exatn
::
createTensor
(
"H"
,
TensorElementType
::
COMPLEX64
,
TensorShape
{
2
,
2
});
assert
(
created
);
registered
=
exatn
::
registerTensorIsometry
(
"H"
,
{
0
},
{
1
});
assert
(
registered
);
created
=
exatn
::
createTensor
(
"I"
,
TensorElementType
::
COMPLEX64
,
TensorShape
{
2
,
2
});
assert
(
created
);
registered
=
exatn
::
registerTensorIsometry
(
"I"
,
{
0
},
{
1
});
assert
(
registered
);
created
=
exatn
::
createTensor
(
"CNOT"
,
TensorElementType
::
COMPLEX64
,
TensorShape
{
2
,
2
,
2
,
2
});
assert
(
created
);
registered
=
exatn
::
registerTensorIsometry
(
"CNOT"
,
{
0
,
1
},
{
2
,
3
});
assert
(
registered
);
// Initialize qubit tensors to zero state:
auto
initialized
=
false
;
initialized
=
exatn
::
initTensorData
(
"Q0"
,
qzero
);
assert
(
initialized
);
initialized
=
exatn
::
initTensorData
(
"Q1"
,
qzero
);
assert
(
initialized
);
initialized
=
exatn
::
initTensorData
(
"Q2"
,
qzero
);
assert
(
initialized
);
// Initialize necessary gate tensors:
initialized
=
exatn
::
initTensorData
(
"H"
,
hadamard
);
assert
(
initialized
);
initialized
=
exatn
::
initTensorData
(
"CNOT"
,
cnot
);
assert
(
initialized
);
initialized
=
exatn
::
initTensorData
(
"I"
,
identity
);
assert
(
initialized
);
{
// Open a new scope:
// Build a tensor network from the quantum circuit:
TensorNetwork
circuit
(
"QuantumCircuit"
);
auto
appended
=
false
;
appended
=
circuit
.
appendTensor
(
1
,
exatn
::
getTensor
(
"Q0"
),
{});
assert
(
appended
);
appended
=
circuit
.
appendTensor
(
2
,
exatn
::
getTensor
(
"Q1"
),
{});
assert
(
appended
);
appended
=
circuit
.
appendTensor
(
3
,
exatn
::
getTensor
(
"Q2"
),
{});
assert
(
appended
);
appended
=
circuit
.
appendTensorGate
(
4
,
exatn
::
getTensor
(
"H"
),
{
0
});
assert
(
appended
);
appended
=
circuit
.
appendTensorGate
(
5
,
exatn
::
getTensor
(
"CNOT"
),
{
1
,
0
});
assert
(
appended
);
appended
=
circuit
.
appendTensorGate
(
6
,
exatn
::
getTensor
(
"CNOT"
),
{
2
,
0
});
assert
(
appended
);
appended
=
circuit
.
appendTensorGate
(
7
,
exatn
::
getTensor
(
"I"
),
{
0
});
assert
(
appended
);
appended
=
circuit
.
appendTensorGate
(
8
,
exatn
::
getTensor
(
"I"
),
{
1
});
assert
(
appended
);
appended
=
circuit
.
appendTensorGate
(
9
,
exatn
::
getTensor
(
"I"
),
{
2
});
assert
(
appended
);
circuit
.
printIt