Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
ORNL Quantum Computing Institute
exatn
Commits
f1ce5c88
Commit
f1ce5c88
authored
Oct 13, 2021
by
Dmitry I. Lyakh
Browse files
Implemented tensor operator builders and converter to tensor expansion
Signed-off-by:
Dmitry I. Lyakh
<
quant4me@gmail.com
>
parent
ccc58f0d
Changes
9
Hide whitespace changes
Inline
Side-by-side
src/exatn/tests/NumServerTester.cpp
View file @
f1ce5c88
...
...
@@ -18,7 +18,7 @@
#include
"errors.hpp"
//Test activation:
#define EXATN_TEST0
/*
#define EXATN_TEST0
#define EXATN_TEST1
#define EXATN_TEST2
#define EXATN_TEST3
...
...
@@ -44,10 +44,11 @@
#define EXATN_TEST23
#define EXATN_TEST24
#define EXATN_TEST25
#define EXATN_TEST26
#define EXATN_TEST26
*/
//#define EXATN_TEST27 //requires input file from source
//#define EXATN_TEST28 //requires input file from source
#define EXATN_TEST30
#define EXATN_TEST29
//#define EXATN_TEST30
#ifdef EXATN_TEST0
...
...
@@ -3131,6 +3132,8 @@ TEST(NumServerTester, MCVQEHamiltonian) {
const
int
num_sites
=
8
;
const
int
bond_dim_lim
=
1
;
const
int
max_bond_dim
=
std
::
min
(
static_cast
<
int
>
(
std
::
pow
(
2
,
num_sites
/
2
)),
bond_dim_lim
);
const
int
arity
=
2
;
const
std
::
string
tn_type
=
"MPS"
;
//MPS or TTN
//Read the Hamiltonian in spin representation:
auto
hamiltonian_operator
=
exatn
::
quantum
::
readSpinHamiltonian
(
"MCVQEHam"
,
...
...
@@ -3139,10 +3142,17 @@ TEST(NumServerTester, MCVQEHamiltonian) {
hamiltonian_operator
->
printIt
();
//Create tensor network ansatz:
auto
mps_builder
=
exatn
::
getTensorNetworkBuilder
(
"MPS"
);
success
=
mps_builder
->
setParameter
(
"max_bond_dim"
,
max_bond_dim
);
assert
(
success
);
auto
tn_builder
=
exatn
::
getTensorNetworkBuilder
(
tn_type
);
if
(
tn_type
==
"MPS"
){
success
=
tn_builder
->
setParameter
(
"max_bond_dim"
,
max_bond_dim
);
assert
(
success
);
}
else
if
(
tn_type
==
"TTN"
){
success
=
tn_builder
->
setParameter
(
"max_bond_dim"
,
max_bond_dim
);
assert
(
success
);
success
=
tn_builder
->
setParameter
(
"arity"
,
arity
);
assert
(
success
);
}
else
{
assert
(
false
);
}
auto
ansatz_tensor
=
exatn
::
makeSharedTensor
(
"AnsatzTensor"
,
std
::
vector
<
int
>
(
num_sites
,
2
));
auto
ansatz_net
=
exatn
::
makeSharedTensorNetwork
(
"Ansatz"
,
ansatz_tensor
,
*
mps
_builder
);
auto
ansatz_net
=
exatn
::
makeSharedTensorNetwork
(
"Ansatz"
,
ansatz_tensor
,
*
tn
_builder
);
ansatz_net
->
markOptimizableTensors
([](
const
Tensor
&
tensor
){
return
true
;});
auto
ansatz
=
exatn
::
makeSharedTensorExpansion
(
"Ansatz"
,
ansatz_net
,
std
::
complex
<
double
>
{
1.0
,
0.0
});
//ansatz->printIt(); //debug
...
...
@@ -3186,6 +3196,51 @@ TEST(NumServerTester, MCVQEHamiltonian) {
}
#endif
#ifdef EXATN_TEST29
TEST
(
NumServerTester
,
TensorOperatorReconstruction
)
{
using
exatn
::
TensorShape
;
using
exatn
::
TensorSignature
;
using
exatn
::
Tensor
;
using
exatn
::
TensorComposite
;
using
exatn
::
TensorNetwork
;
using
exatn
::
TensorExpansion
;
using
exatn
::
TensorOperator
;
using
exatn
::
TensorElementType
;
const
auto
TENS_ELEM_TYPE
=
TensorElementType
::
COMPLEX64
;
//exatn::resetLoggingLevel(2,2); //debug
bool
success
=
true
;
const
int
num_sites
=
8
;
const
int
bond_dim_lim
=
1
;
const
int
max_bond_dim
=
std
::
min
(
static_cast
<
int
>
(
std
::
pow
(
2
,
num_sites
/
2
)),
bond_dim_lim
);
const
int
arity
=
2
;
const
std
::
string
tn_type
=
"MPS"
;
//MPS or TTN
auto
tn_builder
=
exatn
::
getTensorNetworkBuilder
(
tn_type
);
if
(
tn_type
==
"MPS"
){
success
=
tn_builder
->
setParameter
(
"max_bond_dim"
,
max_bond_dim
);
assert
(
success
);
}
else
if
(
tn_type
==
"TTN"
){
success
=
tn_builder
->
setParameter
(
"max_bond_dim"
,
max_bond_dim
);
assert
(
success
);
success
=
tn_builder
->
setParameter
(
"arity"
,
arity
);
assert
(
success
);
}
else
{
assert
(
false
);
}
//Build a tensor network operator:
auto
ansatz_tensor
=
exatn
::
makeSharedTensor
(
"TensorSpaceMap"
,
std
::
vector
<
int
>
(
num_sites
*
2
,
2
));
auto
ansatz_net
=
exatn
::
makeSharedTensorNetwork
(
"Ansatz"
,
ansatz_tensor
,
*
tn_builder
,
true
);
ansatz_net
->
printIt
();
//debug
//Synchronize:
success
=
exatn
::
sync
();
assert
(
success
);
exatn
::
resetLoggingLevel
(
0
,
0
);
//Grab a beer!
}
#endif
#ifdef EXATN_TEST30
TEST
(
NumServerTester
,
TensorComposite
)
{
using
exatn
::
TensorShape
;
...
...
src/numerics/network_builder.hpp
View file @
f1ce5c88
/** ExaTN::Numerics: Tensor network builder
REVISION: 2021/
06/25
REVISION: 2021/
10/07
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -45,9 +45,9 @@ public:
network must only contain the output tensor with dummy legs.
If tensor_operator = TRUE, the tensor network operator will
be built instead of the tensor network vector. In that case,
the first half legs correspond to ket while the rest to bra. **/
the first half
open
legs correspond to ket while the rest to bra. **/
virtual
void
build
(
TensorNetwork
&
network
,
//inout: tensor network
bool
tensor_operator
=
false
)
=
0
;
//in: tensor network vector or operator
bool
tensor_operator
=
false
)
=
0
;
//in: tensor network vector or operator
to build
};
...
...
src/numerics/network_builder_mps.cpp
View file @
f1ce5c88
/** ExaTN::Numerics: Tensor network builder: MPS: Matrix Product State
REVISION: 2021/
08/12
REVISION: 2021/
10/07
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -48,8 +48,16 @@ void NetworkBuilderMPS::build(TensorNetwork & network, bool tensor_operator)
bool
appended
=
true
;
//Inspect the output tensor:
auto
output_tensor
=
network
.
getTensor
(
0
);
const
auto
output_tensor_rank
=
output_tensor
->
getRank
();
auto
output_tensor_rank
=
output_tensor
->
getRank
();
assert
(
output_tensor_rank
>
0
);
const
auto
&
output_dim_extents
=
output_tensor
->
getDimExtents
();
if
(
tensor_operator
){
assert
(
output_tensor_rank
%
2
==
0
);
//tensor operators are assumed to be of even rank here
output_tensor_rank
/=
2
;
for
(
unsigned
int
i
=
0
;
i
<
output_tensor_rank
;
++
i
){
assert
(
output_dim_extents
[
i
]
==
output_dim_extents
[
output_tensor_rank
+
i
]);
}
}
if
(
output_tensor_rank
==
1
){
appended
=
network
.
placeTensor
(
1
,
//tensor id
std
::
make_shared
<
Tensor
>
(
"_T"
+
std
::
to_string
(
1
),
//tensor name
...
...
@@ -149,6 +157,12 @@ void NetworkBuilderMPS::build(TensorNetwork & network, bool tensor_operator)
tensor
.
rename
(
generateTensorName
(
tensor
,
"t"
));
}
}
if
(
tensor_operator
){
for
(
unsigned
int
i
=
0
;
i
<
output_tensor_rank
;
++
i
){
auto
*
tens_conn
=
network
.
getTensorConn
(
1
+
i
);
tens_conn
->
appendLeg
(
output_dim_extents
[
output_tensor_rank
+
i
],
TensorLeg
{
0
,
output_tensor_rank
+
i
});
}
}
return
;
}
...
...
src/numerics/network_builder_ttn.cpp
View file @
f1ce5c88
/** ExaTN::Numerics: Tensor network builder: Tree: Tree Tensor Network
REVISION: 2021/
06/25
REVISION: 2021/
10/07
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -9,6 +9,7 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
#include
<initializer_list>
#include
<vector>
#include
<algorithm>
namespace
exatn
{
...
...
@@ -52,12 +53,20 @@ void NetworkBuilderTTN::build(TensorNetwork & network, bool tensor_operator)
{
//Inspect the output tensor:
auto
output_tensor
=
network
.
getTensor
(
0
);
const
auto
output_tensor_rank
=
output_tensor
->
getRank
();
auto
output_tensor_rank
=
output_tensor
->
getRank
();
assert
(
output_tensor_rank
>
0
);
const
auto
&
output_dim_extents
=
output_tensor
->
getDimExtents
();
if
(
tensor_operator
){
assert
(
output_tensor_rank
%
2
==
0
);
//tensor operators are assumed to be of even rank here
output_tensor_rank
/=
2
;
for
(
unsigned
int
i
=
0
;
i
<
output_tensor_rank
;
++
i
){
assert
(
output_dim_extents
[
i
]
==
output_dim_extents
[
output_tensor_rank
+
i
]);
}
}
//Build tensor tree by layers:
std
::
vector
<
DimExtent
>
extents
(
output_
dim_extents
);
unsigned
int
num_dims
=
extents
.
size
(
);
assert
(
num_dims
>
0
);
std
::
vector
<
DimExtent
>
extents
(
output_
tensor_rank
);
std
::
copy
(
output_dim_extents
.
cbegin
(),
output_dim_extents
.
cbegin
()
+
output_tensor_rank
,
extents
.
begin
()
);
unsigned
int
num_dims
=
extents
.
size
();
assert
(
num_dims
>
0
);
unsigned
int
tensor_id_base
=
1
,
layer
=
0
;
//Loop over layers:
bool
not_done
=
true
;
...
...
@@ -104,6 +113,13 @@ void NetworkBuilderTTN::build(TensorNetwork & network, bool tensor_operator)
tens_legs
,
false
,
false
);
assert
(
appended
);
if
(
tensor_operator
&&
layer
==
0
){
auto
*
tens_conn
=
network
.
getTensorConn
(
tensor_id_base
+
num_dims_new
);
for
(
unsigned
int
i
=
0
;
i
<
(
tens_rank
-
end_decr
);
++
i
){
const
unsigned
int
output_dim_id
=
output_tensor_rank
+
extent_id
+
i
;
tens_conn
->
appendLeg
(
output_dim_extents
[
output_dim_id
],
TensorLeg
{
0
,
output_dim_id
});
}
}
network
.
getTensor
(
tensor_id_base
+
num_dims_new
)
->
rename
();
++
num_dims_new
;
//next tensor within the layer (each tensor supplies one new dimension)
}
...
...
src/numerics/tensor.hpp
View file @
f1ce5c88
/** ExaTN::Numerics: Abstract Tensor
REVISION: 2021/
08/21
REVISION: 2021/
10/13
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -265,6 +265,16 @@ inline std::size_t tensor_element_type_size(TensorElementType tensor_element_typ
std
::
string
generateTensorName
(
const
Tensor
&
tensor
,
//in: tensor stored on heap
const
std
::
string
&
prefix
);
//in: desired name prefix
/** Compares a specific dimension of two tensors. **/
inline
bool
tensor_dims_conform
(
const
Tensor
&
tensor1
,
const
Tensor
&
tensor2
,
unsigned
int
dim1
,
unsigned
int
dim2
)
{
return
(
tensor1
.
getDimSpaceAttr
(
dim1
)
==
tensor2
.
getDimSpaceAttr
(
dim2
)
&&
tensor1
.
getDimExtent
(
dim1
)
==
tensor2
.
getDimExtent
(
dim2
));
}
//TEMPLATES:
template
<
typename
T
>
...
...
src/numerics/tensor_expansion.cpp
View file @
f1ce5c88
/** ExaTN::Numerics: Tensor network expansion
REVISION: 2021/
09/20
REVISION: 2021/
10/13
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -122,11 +122,71 @@ TensorExpansion::TensorExpansion(const TensorExpansion & another,
}
TensorExpansion
::
TensorExpansion
(
const
TensorOperator
&
tensor_operator
)
:
TensorExpansion
::
TensorExpansion
(
const
TensorOperator
&
tensor_operator
,
const
Tensor
&
ket_subspace
,
const
Tensor
&
bra_subspace
)
:
ket_
(
true
)
{
//`Implement
assert
(
false
);
const
auto
ket_space_rank
=
ket_subspace
.
getRank
();
const
auto
bra_space_rank
=
bra_subspace
.
getRank
();
const
auto
space_rank
=
ket_space_rank
+
bra_space_rank
;
if
(
ket_space_rank
==
bra_space_rank
){
for
(
auto
component
=
tensor_operator
.
cbegin
();
component
!=
tensor_operator
.
cend
();
++
component
){
auto
network
=
makeSharedTensorNetwork
(
*
(
component
->
network
));
network
->
rename
(
"_"
+
component
->
network
->
getName
());
const
auto
comp_ket_rank
=
component
->
ket_legs
.
size
();
const
auto
comp_bra_rank
=
component
->
bra_legs
.
size
();
const
auto
comp_rank
=
comp_ket_rank
+
comp_bra_rank
;
assert
(
comp_rank
==
network
->
getRank
());
if
(
comp_ket_rank
==
comp_bra_rank
){
if
(
comp_rank
<=
space_rank
){
auto
output_tensor
=
network
->
getTensor
(
0
);
std
::
vector
<
int
>
space_leg_ids
(
space_rank
,
-
1
);
for
(
unsigned
int
i
=
0
;
i
<
comp_ket_rank
;
++
i
){
const
auto
&
ket_leg
=
component
->
ket_legs
[
i
];
const
auto
&
bra_leg
=
component
->
bra_legs
[
i
];
if
(
ket_leg
.
first
<
ket_space_rank
&&
bra_leg
.
first
<
bra_space_rank
){
assert
(
space_leg_ids
[
ket_leg
.
first
]
<
0
);
assert
(
space_leg_ids
[
ket_space_rank
+
bra_leg
.
first
]
<
0
);
assert
(
tensor_dims_conform
(
ket_subspace
,
*
output_tensor
,
ket_leg
.
first
,
ket_leg
.
second
));
assert
(
tensor_dims_conform
(
bra_subspace
,
*
output_tensor
,
bra_leg
.
first
,
bra_leg
.
second
));
space_leg_ids
[
ket_leg
.
first
]
=
ket_leg
.
second
;
//global leg id --> local network leg id
space_leg_ids
[
ket_space_rank
+
bra_leg
.
first
]
=
bra_leg
.
second
;
//global leg id --> local network leg id
}
else
{
std
::
cout
<<
"ERROR(exatn::TensorExpansion::ctor): The ranks of provided tensor subspaces are too low for the given tensor operator!"
<<
std
::
endl
;
assert
(
false
);
}
}
unsigned
int
bi
=
0
,
ki
=
0
,
out_rank
=
comp_rank
;
while
(
ki
<
ket_space_rank
&&
bi
<
bra_space_rank
){
if
(
space_leg_ids
[
ki
]
>=
0
)
++
ki
;
if
(
space_leg_ids
[
ket_space_rank
+
bi
]
>=
0
)
++
bi
;
if
(
space_leg_ids
[
ki
]
<
0
&&
space_leg_ids
[
ket_space_rank
+
bi
]
<
0
){
space_leg_ids
[
ki
]
=
out_rank
++
;
space_leg_ids
[
ket_space_rank
+
bi
]
=
out_rank
++
;
auto
identity_tensor
=
makeSharedTensor
(
"_d"
,
std
::
initializer_list
<
DimExtent
>
{
ket_subspace
.
getDimExtent
(
ki
),
bra_subspace
.
getDimExtent
(
bi
)},
std
::
initializer_list
<
std
::
pair
<
SpaceId
,
SubspaceId
>>
{
ket_subspace
.
getDimSpaceAttr
(
ki
),
bra_subspace
.
getDimSpaceAttr
(
bi
)});
identity_tensor
->
rename
(
tensor_hex_name
(
"_d"
,
identity_tensor
->
getTensorHash
()));
auto
success
=
network
->
appendTensor
(
identity_tensor
,{});
assert
(
success
);
++
ki
;
++
bi
;
}
}
assert
(
out_rank
==
space_rank
);
auto
success
=
this
->
appendComponent
(
network
,
component
->
coefficient
);
assert
(
success
);
}
else
{
std
::
cout
<<
"ERROR(exatn::TensorExpansion::ctor): The combined rank of provided tensor subspaces is too low for the given tensor operator!"
<<
std
::
endl
;
assert
(
false
);
}
}
else
{
std
::
cout
<<
"ERROR(exatn::TensorExpansion::ctor): A tensor operator component maps different bra and ket spaces!"
<<
std
::
endl
;
assert
(
false
);
}
}
}
else
{
std
::
cout
<<
"ERROR(exatn::TensorExpansion::ctor): Provided bra and ket tensor subspaces have different rank!"
<<
std
::
endl
;
assert
(
false
);
}
this
->
rename
(
"_"
+
tensor_operator
.
getName
());
}
...
...
src/numerics/tensor_expansion.hpp
View file @
f1ce5c88
/** ExaTN::Numerics: Tensor network expansion
REVISION: 2021/
09/20
REVISION: 2021/
10/13
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -128,8 +128,17 @@ public:
bool
reset_output_tensors
,
//in: whether or not to replace output tensors in all cloned tensor networks
const
std
::
string
&
new_name
=
""
);
//in: new name for the cloned tensor network expansion
/** Generates a tensor network expansion from a tensor network operator. **/
TensorExpansion
(
const
TensorOperator
&
tensor_operator
);
/** Generates a tensor network expansion from a tensor network operator by joining
its ket and bra legs together, bra legs following the ket legs. The shape/signature
of the ket and bra subspaces from the chosen tensor operator map must be explicitly
supplied in the form of tensors (they need to be compatible with the given
tensor network operator). The tensor network operator must have its
ket and bra ranks equal for each component separately. Note that explicit
identity tensors may be added to some components of the produced tensor
network expansion (for inactive dimensions). **/
TensorExpansion
(
const
TensorOperator
&
tensor_operator
,
//in: tensor network operator
const
Tensor
&
ket_subspace
,
//in: tensor defining the ket subspace from the tensor operator map
const
Tensor
&
bra_subspace
);
//in: tensor defining the bra subspace from the tensor operator map
TensorExpansion
(
const
TensorExpansion
&
)
=
default
;
TensorExpansion
&
operator
=
(
const
TensorExpansion
&
)
=
default
;
...
...
src/numerics/tensor_network.cpp
View file @
f1ce5c88
/** ExaTN::Numerics: Tensor network
REVISION: 2021/
09/0
3
REVISION: 2021/
10/1
3
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -251,7 +251,8 @@ TensorNetwork::TensorNetwork(const std::string & name,
TensorNetwork
::
TensorNetwork
(
const
std
::
string
&
name
,
std
::
shared_ptr
<
Tensor
>
output_tensor
,
NetworkBuilder
&
builder
)
:
NetworkBuilder
&
builder
,
bool
tensor_operator
)
:
explicit_output_
(
1
),
finalized_
(
0
),
name_
(
name
),
max_tensor_id_
(
0
),
contraction_seq_flops_
(
0.0
),
max_intermediate_presence_volume_
(
0.0
),
max_intermediate_volume_
(
0.0
),
max_intermediate_rank_
(
0
),
universal_indexing_
(
false
)
...
...
@@ -264,7 +265,7 @@ TensorNetwork::TensorNetwork(const std::string & name,
std
::
cout
<<
"#ERROR(exatn::numerics::TensorNetwork::TensorNetwork): Tensor id already in use!"
<<
std
::
endl
;
assert
(
false
);
}
builder
.
build
(
*
this
);
//create and link input tensors of the tensor network
builder
.
build
(
*
this
,
tensor_operator
);
//create and link input tensors of the tensor network
finalized_
=
1
;
updateConnectionsFromInputTensors
();
//update output tensor legs
}
...
...
src/numerics/tensor_network.hpp
View file @
f1ce5c88
/** ExaTN::Numerics: Tensor network
REVISION: 2021/
09/0
3
REVISION: 2021/
10/1
3
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -173,7 +173,8 @@ public:
/** Builds a named tensor network from a template implemented by a custom tensor network builder. **/
TensorNetwork
(
const
std
::
string
&
name
,
//in: tensor network name
std
::
shared_ptr
<
Tensor
>
output_tensor
,
//in: output tensor of the tensor network
NetworkBuilder
&
builder
);
//in: specific tensor network builder
NetworkBuilder
&
builder
,
//in: specific tensor network builder
bool
tensor_operator
=
false
);
//in: whether or not to build a tensor network operator instead of a vector
/** Clones a tensor network with an optional replacement of the output tensor. **/
TensorNetwork
(
const
TensorNetwork
&
another
,
//in: another tensor network
bool
replace_output
,
//in: whether or not to replace the output tensor
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment