Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
ORNL Quantum Computing Institute
exatn
Commits
aef19982
Commit
aef19982
authored
Oct 21, 2021
by
Dmitry I. Lyakh
Browse files
Tensor operator to tensor expansion conversion test passes.
Signed-off-by:
Dmitry I. Lyakh
<
quant4me@gmail.com
>
parent
832d18ee
Changes
8
Hide whitespace changes
Inline
Side-by-side
src/exatn/exatn_numerics.hpp
View file @
aef19982
/** ExaTN::Numerics: General client header (free function API)
REVISION: 2021/10/1
7
REVISION: 2021/10/1
9
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -902,22 +902,25 @@ inline bool sync(const ProcessGroup & process_group, //in: chosen group of MPI
/** Normalizes a tensor to a given 2-norm. **/
inline
bool
normalizeNorm2Sync
(
const
std
::
string
&
name
,
//in: tensor name
double
norm
=
1.0
)
//in: desired 2-norm
{
return
numericalServer
->
normalizeNorm2Sync
(
name
,
norm
);}
inline
bool
normalizeNorm2Sync
(
const
std
::
string
&
name
,
//in: tensor name
double
norm
=
1.0
,
//in: desired 2-norm
double
*
original_norm
=
nullptr
)
//out: original 2-norm
{
return
numericalServer
->
normalizeNorm2Sync
(
name
,
norm
,
original_norm
);}
/** Normalizes a tensor network expansion to a given 2-norm by rescaling
all tensor network components by the same factor: Only the tensor
network expansion coeffcients are affected, not tensors. **/
inline
bool
normalizeNorm2Sync
(
TensorExpansion
&
expansion
,
//inout: tensor network expansion
double
norm
=
1.0
)
//in: desired 2-norm
{
return
numericalServer
->
normalizeNorm2Sync
(
expansion
,
norm
);}
inline
bool
normalizeNorm2Sync
(
TensorExpansion
&
expansion
,
//inout: tensor network expansion
double
norm
=
1.0
,
//in: desired 2-norm
double
*
original_norm
=
nullptr
)
//out: original 2-norm
{
return
numericalServer
->
normalizeNorm2Sync
(
expansion
,
norm
,
original_norm
);}
inline
bool
normalizeNorm2Sync
(
const
ProcessGroup
&
process_group
,
//in: chosen group of MPI processes
TensorExpansion
&
expansion
,
//inout: tensor network expansion
double
norm
=
1.0
)
//in: desired 2-norm
{
return
numericalServer
->
normalizeNorm2Sync
(
process_group
,
expansion
,
norm
);}
double
norm
=
1.0
,
//in: desired 2-norm
double
*
original_norm
=
nullptr
)
//out: original 2-norm
{
return
numericalServer
->
normalizeNorm2Sync
(
process_group
,
expansion
,
norm
,
original_norm
);}
/** Normalizes all input tensors in a tensor network to a given 2-norm.
...
...
src/exatn/num_server.cpp
View file @
aef19982
/** ExaTN::Numerics: Numerical server
REVISION: 2021/10/1
7
REVISION: 2021/10/1
9
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -3076,23 +3076,25 @@ bool NumServer::evaluateTensorNetworkSync(const ProcessGroup & process_group,
return
parsed
;
}
bool
NumServer
::
normalizeNorm2Sync
(
const
std
::
string
&
name
,
double
norm
)
bool
NumServer
::
normalizeNorm2Sync
(
const
std
::
string
&
name
,
double
norm
,
double
*
original_
norm
)
{
double
old_norm
=
0.0
;
bool
success
=
computeNorm2Sync
(
name
,
old_norm
);
if
(
original_norm
!=
nullptr
)
*
original_norm
=
old_norm
;
if
(
success
)
success
=
scaleTensorSync
(
name
,
norm
/
old_norm
);
return
success
;
}
bool
NumServer
::
normalizeNorm2Sync
(
TensorExpansion
&
expansion
,
double
norm
)
double
norm
,
double
*
original_
norm
)
{
return
normalizeNorm2Sync
(
getDefaultProcessGroup
(),
expansion
,
norm
);
return
normalizeNorm2Sync
(
getDefaultProcessGroup
(),
expansion
,
norm
,
original_norm
);
}
bool
NumServer
::
normalizeNorm2Sync
(
const
ProcessGroup
&
process_group
,
TensorExpansion
&
expansion
,
double
norm
)
double
norm
,
double
*
original_norm
)
{
//Determine parallel execution configuration:
unsigned
int
local_rank
;
//local process rank within the process group
...
...
@@ -3121,6 +3123,7 @@ bool NumServer::normalizeNorm2Sync(const ProcessGroup & process_group,
if
(
success
){
//std::cout << "#DEBUG(exatn::NormalizeNorm2Sync): Original squared 2-norm = " << original_norm2 << std::endl; //debug
if
(
original_norm2
>
0.0
){
if
(
original_norm
!=
nullptr
)
*
original_norm
=
std
::
sqrt
(
original_norm2
);
expansion
.
rescale
(
std
::
complex
<
double
>
(
norm
/
std
::
sqrt
(
original_norm2
)));
}
else
{
std
::
cout
<<
"#WARNING(exatn::normalizeNorm2): Tensor expansion has zero norm, thus cannot be normalized!"
<<
std
::
endl
;
...
...
src/exatn/num_server.hpp
View file @
aef19982
/** ExaTN::Numerics: Numerical server
REVISION: 2021/10/1
7
REVISION: 2021/10/1
9
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -907,18 +907,21 @@ public:
const
std
::
string
&
network
);
//in: symbolic tensor network specification
/** Normalizes a tensor to a given 2-norm. **/
bool
normalizeNorm2Sync
(
const
std
::
string
&
name
,
//in: tensor name
double
norm
);
//in: desired 2-norm
bool
normalizeNorm2Sync
(
const
std
::
string
&
name
,
//in: tensor name
double
norm
=
1.0
,
//in: desired 2-norm
double
*
original_norm
=
nullptr
);
//out: original 2-norm
/** Normalizes a tensor network expansion to a given 2-norm by rescaling
all tensor network components by the same factor: Only the tensor
network expansion coefficients are affected. **/
bool
normalizeNorm2Sync
(
TensorExpansion
&
expansion
,
//inout: tensor network expansion
double
norm
);
//in: desired 2-norm
bool
normalizeNorm2Sync
(
TensorExpansion
&
expansion
,
//inout: tensor network expansion
double
norm
=
1.0
,
//in: desired 2-norm
double
*
original_norm
=
nullptr
);
//out: original 2-norm
bool
normalizeNorm2Sync
(
const
ProcessGroup
&
process_group
,
//in: chosen group of MPI processes
TensorExpansion
&
expansion
,
//inout: tensor network expansion
double
norm
);
//in: desired 2-norm
double
norm
=
1.0
,
//in: desired 2-norm
double
*
original_norm
=
nullptr
);
//out: original 2-norm
/** Normalizes all input tensors in a tensor network to a given 2-norm.
If only_optimizable is TRUE, only optimizable tensors will be normalized. **/
...
...
src/exatn/tests/NumServerTester.cpp
View file @
aef19982
...
...
@@ -1552,9 +1552,9 @@ TEST(NumServerTester, IsingTNO)
exatn
::
resetLoggingLevel
(
2
,
2
);
//debug
//Define Ising Hamiltonian constants:
constexpr
std
::
complex
<
double
>
ZERO
{
0.0
,
0.0
};
constexpr
std
::
complex
<
double
>
HAMT
{
-
1.0
,
0.0
};
constexpr
std
::
complex
<
double
>
HAMU
{
-
2.0
,
0.0
};
constexpr
std
::
complex
<
double
>
ZERO
{
0.0
,
0.0
};
constexpr
std
::
complex
<
double
>
HAMT
{
-
1.0
,
0.0
};
constexpr
std
::
complex
<
double
>
HAMU
{
-
2.0
,
0.0
};
//Declare Ising Hamiltonian tensors:
auto
t01
=
std
::
make_shared
<
Tensor
>
(
"T01"
,
TensorShape
{
2
,
2
,
2
,
2
});
...
...
@@ -1578,15 +1578,15 @@ TEST(NumServerTester, IsingTNO)
};
//Declare the Ising Hamiltonian operator:
TensorOperator
ham
(
"Hamiltonian"
);
success
=
ham
.
appendComponent
(
t01
,{{
0
,
0
},{
1
,
1
}},{{
0
,
2
},{
1
,
3
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
.
appendComponent
(
t12
,{{
1
,
0
},{
2
,
1
}},{{
1
,
2
},{
2
,
3
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
.
appendComponent
(
t23
,{{
2
,
0
},{
3
,
1
}},{{
2
,
2
},{
3
,
3
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
.
appendComponent
(
u00
,{{
0
,
0
}},{{
0
,
1
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
.
appendComponent
(
u11
,{{
1
,
0
}},{{
1
,
1
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
.
appendComponent
(
u22
,{{
2
,
0
}},{{
2
,
1
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
.
appendComponent
(
u33
,{{
3
,
0
}},{{
3
,
1
}},{
1.0
,
0.0
});
assert
(
success
);
ham
.
printIt
();
//debug
auto
ham
=
exatn
::
makeShared
TensorOperator
(
"Hamiltonian"
);
success
=
ham
->
appendComponent
(
t01
,{{
0
,
0
},{
1
,
1
}},{{
0
,
2
},{
1
,
3
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
->
appendComponent
(
t12
,{{
1
,
0
},{
2
,
1
}},{{
1
,
2
},{
2
,
3
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
->
appendComponent
(
t23
,{{
2
,
0
},{
3
,
1
}},{{
2
,
2
},{
3
,
3
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
->
appendComponent
(
u00
,{{
0
,
0
}},{{
0
,
1
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
->
appendComponent
(
u11
,{{
1
,
0
}},{{
1
,
1
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
->
appendComponent
(
u22
,{{
2
,
0
}},{{
2
,
1
}},{
1.0
,
0.0
});
assert
(
success
);
success
=
ham
->
appendComponent
(
u33
,{{
3
,
0
}},{{
3
,
1
}},{
1.0
,
0.0
});
assert
(
success
);
//
ham
->
printIt(); //debug
//Configure the tensor network builder:
auto
tn_builder
=
exatn
::
getTensorNetworkBuilder
(
tn_type
);
...
...
@@ -1599,19 +1599,24 @@ TEST(NumServerTester, IsingTNO)
assert
(
false
);
}
//Build a tensor network
opera
tor:
//Build a tensor network
vec
tor:
auto
ket_tensor
=
exatn
::
makeSharedTensor
(
"TensorSpace"
,
std
::
vector
<
int
>
(
num_sites
,
2
));
//auto vec_net = exatn::makeSharedTensorNetwork("VectorNet",ket_tensor,*tn_builder,false);
auto
vec_net
=
exatn
::
makeSharedTensorNetwork
(
"VectorNet"
,
ket_tensor
,
*
tn_builder
,
false
);
vec_net
->
markOptimizableAllTensors
();
//vec_net->printIt(); //debug
auto
vec_tns
=
exatn
::
makeSharedTensorExpansion
(
"VectorTNS"
,
vec_net
,
std
::
complex
<
double
>
{
1.0
,
0.0
});
//Build a tensor network operator:
auto
space_tensor
=
exatn
::
makeSharedTensor
(
"TensorSpaceMap"
,
std
::
vector
<
int
>
(
num_sites
*
2
,
2
));
auto
ham_net
=
exatn
::
makeSharedTensorNetwork
(
"HamiltonianNet"
,
space_tensor
,
*
tn_builder
,
true
);
ham_net
->
markOptimizableAllTensors
();
ham_net
->
printIt
();
//debug
TensorOperator
ham_tno
(
"HamiltonianTNO"
);
success
=
ham_tno
.
appendComponent
(
ham_net
,{{
0
,
0
},{
1
,
1
},{
2
,
2
},{
3
,
3
}},{{
0
,
4
},{
1
,
5
},{
2
,
6
},{
3
,
7
}},{
1.0
,
0.0
});
//
ham_net->printIt(); //debug
auto
ham_tno
=
exatn
::
makeShared
TensorOperator
(
"HamiltonianTNO"
);
success
=
ham_tno
->
appendComponent
(
ham_net
,{{
0
,
0
},{
1
,
1
},{
2
,
2
},{
3
,
3
}},{{
0
,
4
},{
1
,
5
},{
2
,
6
},{
3
,
7
}},{
1.0
,
0.0
});
assert
(
success
);
{
//Numerical evaluation:
//Create Hamiltonian tensors:
//Create and initialize Hamiltonian tensors:
std
::
cout
<<
"Creating Hamiltonian tensors ... "
;
success
=
exatn
::
createTensorSync
(
t01
,
TENS_ELEM_TYPE
);
assert
(
success
);
success
=
exatn
::
createTensorSync
(
t12
,
TENS_ELEM_TYPE
);
assert
(
success
);
success
=
exatn
::
createTensorSync
(
t23
,
TENS_ELEM_TYPE
);
assert
(
success
);
...
...
@@ -1619,8 +1624,9 @@ TEST(NumServerTester, IsingTNO)
success
=
exatn
::
createTensorSync
(
u11
,
TENS_ELEM_TYPE
);
assert
(
success
);
success
=
exatn
::
createTensorSync
(
u22
,
TENS_ELEM_TYPE
);
assert
(
success
);
success
=
exatn
::
createTensorSync
(
u33
,
TENS_ELEM_TYPE
);
assert
(
success
);
std
::
cout
<<
"Ok"
<<
std
::
endl
;
//
Initializ
e
Hamiltonian tensors
:
std
::
cout
<<
"
Initializ
ing
Hamiltonian tensors
... "
;
success
=
exatn
::
initTensorDataSync
(
"T01"
,
hamt
);
assert
(
success
);
success
=
exatn
::
initTensorDataSync
(
"T12"
,
hamt
);
assert
(
success
);
success
=
exatn
::
initTensorDataSync
(
"T23"
,
hamt
);
assert
(
success
);
...
...
@@ -1628,29 +1634,59 @@ TEST(NumServerTester, IsingTNO)
success
=
exatn
::
initTensorDataSync
(
"U11"
,
hamu
);
assert
(
success
);
success
=
exatn
::
initTensorDataSync
(
"U22"
,
hamu
);
assert
(
success
);
success
=
exatn
::
initTensorDataSync
(
"U33"
,
hamu
);
assert
(
success
);
std
::
cout
<<
"Ok"
<<
std
::
endl
;
//Create and initialize tensor network vector tensors:
std
::
cout
<<
"Creating and initializing tensor network vector tensors ... "
;
success
=
exatn
::
createTensorsSync
(
*
vec_net
,
TENS_ELEM_TYPE
);
assert
(
success
);
success
=
exatn
::
initTensorsRndSync
(
*
vec_net
);
assert
(
success
);
std
::
cout
<<
"Ok"
<<
std
::
endl
;
//Create and initialize tensor network operator tensors:
std
::
cout
<<
"Creating and initializing tensor network operator tensors ... "
;
success
=
exatn
::
createTensorsSync
(
*
ham_net
,
TENS_ELEM_TYPE
);
assert
(
success
);
success
=
exatn
::
initTensorsRndSync
(
*
ham_net
);
assert
(
success
);
std
::
cout
<<
"Ok"
<<
std
::
endl
;
//Remap tensor operators as tensor expansions:
auto
ham_expansion
=
makeSharedTensorExpansion
(
ham
,
*
ket_tensor
);
auto
ham_expansion
=
exatn
::
makeSharedTensorExpansion
(
*
ham
,
*
ket_tensor
);
ham_expansion
->
printIt
();
//debug
auto
ham_tno_expansion
=
makeSharedTensorExpansion
(
ham_tno
,
*
ket_tensor
);
auto
ham_tno_expansion
=
exatn
::
makeSharedTensorExpansion
(
*
ham_tno
,
*
ket_tensor
);
ham_tno_expansion
->
printIt
();
//debug
//Create and initialize special tensors in the Hamiltonian tensor expansion:
std
::
cout
<<
"Creating and initializing special Hamiltonian tensors ... "
;
for
(
auto
net
=
ham_expansion
->
begin
();
net
!=
ham_expansion
->
end
();
++
net
){
success
=
exatn
::
createTensorsSync
(
*
(
net
->
network
),
TENS_ELEM_TYPE
);
assert
(
success
);
success
=
exatn
::
initTensorsSpecialSync
(
*
(
net
->
network
));
assert
(
success
);
}
std
::
cout
<<
"Ok"
<<
std
::
endl
;
//Ground state search for the original Hamiltonian:
std
::
cout
<<
"Ground state search for the original Hamiltonian:"
<<
std
::
endl
;
//success = exatn::sync(); assert(success);
//success = exatn::balanceNorm2Sync(*vec_tns,1.0,true); assert(success);
exatn
::
TensorNetworkOptimizer
::
resetDebugLevel
(
1
,
0
);
exatn
::
TensorNetworkOptimizer
optimizer1
(
ham
,
vec_tns
,
1e-5
);
success
=
exatn
::
sync
();
assert
(
success
);
bool
converged
=
optimizer1
.
optimize
();
success
=
exatn
::
sync
();
assert
(
success
);
if
(
converged
){
std
::
cout
<<
"Search succeeded!"
<<
std
::
endl
;
}
else
{
std
::
cout
<<
"Search failed!"
<<
std
::
endl
;
assert
(
false
);
}
//Reconstruct the Ising Hamiltonian as a tensor network operator:
success
=
exatn
::
normalizeNorm2Sync
(
*
ham_expansion
,
1.0
);
assert
(
success
);
success
=
exatn
::
balanceNorm2Sync
(
*
ham_tno_expansion
,
1.0
,
true
);
assert
(
success
);
std
::
cout
<<
"Reconstructing Ising Hamiltonian with a tensor network operator:"
<<
std
::
endl
;
double
ham_norm
=
0.0
;
success
=
exatn
::
normalizeNorm2Sync
(
*
ham_expansion
,
1.0
,
&
ham_norm
);
assert
(
success
);
std
::
cout
<<
"Original Hamiltonian operator norm = "
<<
ham_norm
<<
std
::
endl
;
//success = exatn::balanceNorm2Sync(*ham_tno_expansion,1.0,true); assert(success);
ham_tno_expansion
->
conjugate
();
exatn
::
TensorNetworkReconstructor
::
resetDebugLevel
(
1
,
0
);
//debug
exatn
::
TensorNetworkReconstructor
reconstructor
(
ham_expansion
,
ham_tno_expansion
,
1e-
4
);
exatn
::
TensorNetworkReconstructor
reconstructor
(
ham_expansion
,
ham_tno_expansion
,
1e-
6
);
success
=
exatn
::
sync
();
assert
(
success
);
double
residual_norm
,
fidelity
;
bool
reconstructed
=
reconstructor
.
reconstruct
(
&
residual_norm
,
&
fidelity
);
...
...
@@ -1659,12 +1695,31 @@ TEST(NumServerTester, IsingTNO)
std
::
cout
<<
"Reconstruction succeeded: Residual norm = "
<<
residual_norm
<<
"; Fidelity = "
<<
fidelity
<<
std
::
endl
;
}
else
{
std
::
cout
<<
"Reconstruction failed!"
<<
std
::
endl
;
assert
(
false
);
std
::
cout
<<
"Reconstruction failed!"
<<
std
::
endl
;
assert
(
false
);
}
const
auto
tno_exp_coefs
=
ham_tno_expansion
->
getCoefficients
();
assert
(
tno_exp_coefs
.
size
()
==
1
);
std
::
cout
<<
"Eigenvalue scaling coefficient = "
<<
ham_norm
*
tno_exp_coefs
[
0
]
<<
std
::
endl
;
//Ground state search for the tensor network Hamiltonian:
std
::
cout
<<
"Ground state search for the tensor network Hamiltonian:"
<<
std
::
endl
;
exatn
::
TensorNetworkOptimizer
::
resetDebugLevel
(
1
,
0
);
exatn
::
TensorNetworkOptimizer
optimizer2
(
ham_tno
,
vec_tns
,
1e-5
);
success
=
exatn
::
sync
();
assert
(
success
);
converged
=
optimizer2
.
optimize
();
success
=
exatn
::
sync
();
assert
(
success
);
if
(
converged
){
std
::
cout
<<
"Search succeeded!"
<<
std
::
endl
;
}
else
{
std
::
cout
<<
"Search failed!"
<<
std
::
endl
;
assert
(
false
);
}
//Destroy all tensors:
success
=
exatn
::
sync
();
assert
(
success
);
success
=
exatn
::
destroyTensorsSync
(
*
ham_net
);
assert
(
success
);
success
=
exatn
::
destroyTensorsSync
(
*
vec_net
);
assert
(
success
);
success
=
exatn
::
destroyTensorSync
(
"U33"
);
assert
(
success
);
success
=
exatn
::
destroyTensorSync
(
"U22"
);
assert
(
success
);
success
=
exatn
::
destroyTensorSync
(
"U11"
);
assert
(
success
);
...
...
src/numerics/tensor_expansion.cpp
View file @
aef19982
/** ExaTN::Numerics: Tensor network expansion
REVISION: 2021/10/1
5
REVISION: 2021/10/
2
1
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -346,6 +346,15 @@ void TensorExpansion::markOptimizableAllTensors()
}
std
::
vector
<
std
::
complex
<
double
>>
TensorExpansion
::
getCoefficients
()
const
{
std
::
vector
<
std
::
complex
<
double
>>
coefs
(
components_
.
size
(),{
0.0
,
0.0
});
std
::
size_t
i
=
0
;
for
(
const
auto
&
component
:
components_
)
coefs
[
i
++
]
=
component
.
coefficient
;
return
coefs
;
}
void
TensorExpansion
::
printIt
()
const
{
if
(
ket_
){
...
...
src/numerics/tensor_expansion.hpp
View file @
aef19982
/** ExaTN::Numerics: Tensor network expansion
REVISION: 2021/10/1
4
REVISION: 2021/10/
2
1
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -246,6 +246,9 @@ public:
void
markOptimizableTensors
(
std
::
function
<
bool
(
const
Tensor
&
)
>
predicate
);
void
markOptimizableAllTensors
();
//marks all input tensors as optimizable
/** Returns linear combination coefficients for all components. **/
std
::
vector
<
std
::
complex
<
double
>>
getCoefficients
()
const
;
/** Prints. **/
void
printIt
()
const
;
...
...
src/numerics/tensor_operator.cpp
View file @
aef19982
/** ExaTN::Numerics: Tensor operator
REVISION: 2021/0
9
/2
0
REVISION: 2021/
1
0/2
1
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -179,6 +179,15 @@ void TensorOperator::conjugate()
}
std
::
vector
<
std
::
complex
<
double
>>
TensorOperator
::
getCoefficients
()
const
{
std
::
vector
<
std
::
complex
<
double
>>
coefs
(
components_
.
size
(),{
0.0
,
0.0
});
std
::
size_t
i
=
0
;
for
(
const
auto
&
component
:
components_
)
coefs
[
i
++
]
=
component
.
coefficient
;
return
coefs
;
}
void
TensorOperator
::
printIt
()
const
{
std
::
cout
<<
"TensorNetworkOperator("
<<
this
->
getName
()
...
...
src/numerics/tensor_operator.hpp
View file @
aef19982
/** ExaTN::Numerics: Tensor operator
REVISION: 2021/0
9
/2
0
REVISION: 2021/
1
0/2
1
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -143,6 +143,9 @@ public:
complex linear expansion coefficients are complex conjugated. **/
void
conjugate
();
/** Returns linear combination coefficients for all components. **/
std
::
vector
<
std
::
complex
<
double
>>
getCoefficients
()
const
;
/** Prints. **/
void
printIt
()
const
;
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment