Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
ORNL Quantum Computing Institute
exatn
Commits
ccc58f0d
Commit
ccc58f0d
authored
Oct 06, 2021
by
Dmitry I. Lyakh
Browse files
Fixed initTensorRnd, only implemented for simple replicated tensors
Signed-off-by:
Dmitry I. Lyakh
<
quant4me@gmail.com
>
parent
d963244e
Pipeline
#170118
failed with stage
in 5 minutes and 59 seconds
Changes
2
Pipelines
18
Hide whitespace changes
Inline
Side-by-side
src/exatn/num_server.cpp
View file @
ccc58f0d
/** ExaTN::Numerics: Numerical server
REVISION: 2021/10/0
5
REVISION: 2021/10/0
6
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...
...
@@ -1124,8 +1124,8 @@ std::shared_ptr<Tensor> NumServer::getTensor(const std::string & name)
{
auto
iter
=
tensors_
.
find
(
name
);
if
(
iter
==
tensors_
.
end
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::getTensor): Tensor "
<<
name
<<
" not found!"
<<
std
::
endl
;
assert
(
false
);
//
std::cout << "#ERROR(exatn::NumServer::getTensor): Tensor " << name << " not found!" << std::endl;
return
std
::
shared_ptr
<
Tensor
>
(
nullptr
);
}
return
iter
->
second
;
}
...
...
@@ -1521,17 +1521,37 @@ bool NumServer::initTensorFileSync(const std::string & name,
bool
NumServer
::
initTensorRnd
(
const
std
::
string
&
name
)
{
const
auto
&
process_group
=
getTensorProcessGroup
(
name
);
bool
success
=
transformTensor
(
name
,
std
::
shared_ptr
<
TensorMethod
>
(
new
numerics
::
FunctorInitRnd
()));
if
(
success
)
success
=
broadcastTensor
(
process_group
,
name
,
0
);
if
(
success
){
auto
tensor
=
getTensor
(
name
);
if
(
tensor
!=
nullptr
){
if
(
tensor
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::initTensorRnd): Random initialization of composite tensors is not implemented yet!
\n
"
;
assert
(
false
);
}
else
{
const
auto
&
process_group
=
getTensorProcessGroup
(
name
);
success
=
broadcastTensor
(
process_group
,
name
,
0
);
}
}
}
return
success
;
}
bool
NumServer
::
initTensorRndSync
(
const
std
::
string
&
name
)
{
const
auto
&
process_group
=
getTensorProcessGroup
(
name
);
bool
success
=
transformTensorSync
(
name
,
std
::
shared_ptr
<
TensorMethod
>
(
new
numerics
::
FunctorInitRnd
()));
if
(
success
)
success
=
broadcastTensorSync
(
process_group
,
name
,
0
);
if
(
success
){
auto
tensor
=
getTensor
(
name
);
if
(
tensor
!=
nullptr
){
if
(
tensor
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::initTensorRndSync): Random initialization of composite tensors is not implemented yet!
\n
"
;
assert
(
false
);
}
else
{
const
auto
&
process_group
=
getTensorProcessGroup
(
name
);
success
=
broadcastTensorSync
(
process_group
,
name
,
0
);
}
}
}
return
success
;
}
...
...
@@ -1761,7 +1781,7 @@ bool NumServer::replicateTensor(const ProcessGroup & process_group, const std::s
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::replicateTensor): Tensor "
<<
name
<<
" is composite, replication not allowed!"
<<
std
::
endl
<<
std
::
flush
;
return
false
;
assert
(
false
)
;
}
iter
->
second
->
pack
(
byte_packet_
);
byte_packet_len
=
static_cast
<
int
>
(
byte_packet_
.
size_bytes
);
assert
(
byte_packet_len
>
0
);
...
...
@@ -1817,7 +1837,7 @@ bool NumServer::replicateTensorSync(const ProcessGroup & process_group, const st
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::replicateTensorSync): Tensor "
<<
name
<<
" is composite, replication not allowed!"
<<
std
::
endl
<<
std
::
flush
;
return
false
;
assert
(
false
)
;
}
iter
->
second
->
pack
(
byte_packet_
);
byte_packet_len
=
static_cast
<
int
>
(
byte_packet_
.
size_bytes
);
assert
(
byte_packet_len
>
0
);
...
...
@@ -1962,20 +1982,21 @@ bool NumServer::broadcastTensor(const ProcessGroup & process_group, const std::s
bool
success
=
true
;
auto
tensor_mapper
=
getTensorMapper
(
process_group
);
auto
iter
=
tensors_
.
find
(
name
);
if
(
iter
==
tensors_
.
end
()){
if
(
iter
!=
tensors_
.
end
()){
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::broadcastTensor): Tensor "
<<
name
<<
" is composite, broadcast not implemented!"
<<
std
::
endl
<<
std
::
flush
;
assert
(
false
);
}
else
{
std
::
shared_ptr
<
TensorOperation
>
op
=
tensor_op_factory_
->
createTensorOp
(
TensorOpCode
::
BROADCAST
);
op
->
setTensorOperand
(
iter
->
second
);
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpBroadcast
>
(
op
)
->
resetMPICommunicator
(
process_group
.
getMPICommProxy
());
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpBroadcast
>
(
op
)
->
resetRootRank
(
root_process_rank
);
success
=
submit
(
op
,
tensor_mapper
);
}
}
else
{
std
::
cout
<<
"#ERROR(exatn::NumServer::broadcastTensor): Tensor "
<<
name
<<
" not found!"
<<
std
::
endl
;
return
false
;
}
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::broadcastTensor): Tensor "
<<
name
<<
" is composite, broadcast not implemented!"
<<
std
::
endl
<<
std
::
flush
;
assert
(
false
);
}
else
{
std
::
shared_ptr
<
TensorOperation
>
op
=
tensor_op_factory_
->
createTensorOp
(
TensorOpCode
::
BROADCAST
);
op
->
setTensorOperand
(
iter
->
second
);
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpBroadcast
>
(
op
)
->
resetMPICommunicator
(
process_group
.
getMPICommProxy
());
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpBroadcast
>
(
op
)
->
resetRootRank
(
root_process_rank
);
success
=
submit
(
op
,
tensor_mapper
);
}
return
success
;
}
...
...
@@ -1986,26 +2007,27 @@ bool NumServer::broadcastTensorSync(const ProcessGroup & process_group, const st
bool
success
=
true
;
auto
tensor_mapper
=
getTensorMapper
(
process_group
);
auto
iter
=
tensors_
.
find
(
name
);
if
(
iter
==
tensors_
.
end
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::broadcastTensorSync): Tensor "
<<
name
<<
" not found!"
<<
std
::
endl
;
return
false
;
}
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::broadcastTensorSync): Tensor "
<<
name
<<
" is composite, broadcast not implemented!"
<<
std
::
endl
<<
std
::
flush
;
assert
(
false
);
}
else
{
std
::
shared_ptr
<
TensorOperation
>
op
=
tensor_op_factory_
->
createTensorOp
(
TensorOpCode
::
BROADCAST
);
op
->
setTensorOperand
(
iter
->
second
);
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpBroadcast
>
(
op
)
->
resetMPICommunicator
(
process_group
.
getMPICommProxy
());
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpBroadcast
>
(
op
)
->
resetRootRank
(
root_process_rank
);
success
=
submit
(
op
,
tensor_mapper
);
if
(
success
){
success
=
sync
(
*
op
);
if
(
iter
!=
tensors_
.
end
()){
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::broadcastTensorSync): Tensor "
<<
name
<<
" is composite, broadcast not implemented!"
<<
std
::
endl
<<
std
::
flush
;
assert
(
false
);
}
else
{
std
::
shared_ptr
<
TensorOperation
>
op
=
tensor_op_factory_
->
createTensorOp
(
TensorOpCode
::
BROADCAST
);
op
->
setTensorOperand
(
iter
->
second
);
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpBroadcast
>
(
op
)
->
resetMPICommunicator
(
process_group
.
getMPICommProxy
());
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpBroadcast
>
(
op
)
->
resetRootRank
(
root_process_rank
);
success
=
submit
(
op
,
tensor_mapper
);
if
(
success
){
success
=
sync
(
*
op
);
#ifdef MPI_ENABLED
if
(
success
)
success
=
sync
(
process_group
);
if
(
success
)
success
=
sync
(
process_group
);
#endif
}
}
}
else
{
std
::
cout
<<
"#ERROR(exatn::NumServer::broadcastTensorSync): Tensor "
<<
name
<<
" not found!"
<<
std
::
endl
;
assert
(
false
);
}
return
success
;
}
...
...
@@ -2026,19 +2048,20 @@ bool NumServer::allreduceTensor(const ProcessGroup & process_group, const std::s
bool
success
=
true
;
auto
tensor_mapper
=
getTensorMapper
(
process_group
);
auto
iter
=
tensors_
.
find
(
name
);
if
(
iter
==
tensors_
.
end
()){
if
(
iter
!=
tensors_
.
end
()){
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::allreduceTensor): Tensor "
<<
name
<<
" is composite, allreduce not implemented!"
<<
std
::
endl
<<
std
::
flush
;
assert
(
false
);
}
else
{
std
::
shared_ptr
<
TensorOperation
>
op
=
tensor_op_factory_
->
createTensorOp
(
TensorOpCode
::
ALLREDUCE
);
op
->
setTensorOperand
(
iter
->
second
);
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpAllreduce
>
(
op
)
->
resetMPICommunicator
(
process_group
.
getMPICommProxy
());
success
=
submit
(
op
,
tensor_mapper
);
}
}
else
{
std
::
cout
<<
"#ERROR(exatn::NumServer::allreduceTensor): Tensor "
<<
name
<<
" not found!"
<<
std
::
endl
;
return
false
;
}
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::allreduceTensor): Tensor "
<<
name
<<
" is composite, allreduce not implemented!"
<<
std
::
endl
<<
std
::
flush
;
assert
(
false
);
}
else
{
std
::
shared_ptr
<
TensorOperation
>
op
=
tensor_op_factory_
->
createTensorOp
(
TensorOpCode
::
ALLREDUCE
);
op
->
setTensorOperand
(
iter
->
second
);
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpAllreduce
>
(
op
)
->
resetMPICommunicator
(
process_group
.
getMPICommProxy
());
success
=
submit
(
op
,
tensor_mapper
);
}
return
success
;
}
...
...
@@ -2049,25 +2072,26 @@ bool NumServer::allreduceTensorSync(const ProcessGroup & process_group, const st
bool
success
=
true
;
auto
tensor_mapper
=
getTensorMapper
(
process_group
);
auto
iter
=
tensors_
.
find
(
name
);
if
(
iter
==
tensors_
.
end
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::allreduceTensorSync): Tensor "
<<
name
<<
" not found!"
<<
std
::
endl
;
return
false
;
}
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::allreduceTensorSync): Tensor "
<<
name
<<
" is composite, allreduce not implemented!"
<<
std
::
endl
<<
std
::
flush
;
assert
(
false
);
}
else
{
std
::
shared_ptr
<
TensorOperation
>
op
=
tensor_op_factory_
->
createTensorOp
(
TensorOpCode
::
ALLREDUCE
);
op
->
setTensorOperand
(
iter
->
second
);
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpAllreduce
>
(
op
)
->
resetMPICommunicator
(
process_group
.
getMPICommProxy
());
success
=
submit
(
op
,
tensor_mapper
);
if
(
success
){
success
=
sync
(
*
op
);
if
(
iter
!=
tensors_
.
end
()){
if
(
iter
->
second
->
isComposite
()){
std
::
cout
<<
"#ERROR(exatn::NumServer::allreduceTensorSync): Tensor "
<<
name
<<
" is composite, allreduce not implemented!"
<<
std
::
endl
<<
std
::
flush
;
assert
(
false
);
}
else
{
std
::
shared_ptr
<
TensorOperation
>
op
=
tensor_op_factory_
->
createTensorOp
(
TensorOpCode
::
ALLREDUCE
);
op
->
setTensorOperand
(
iter
->
second
);
std
::
dynamic_pointer_cast
<
numerics
::
TensorOpAllreduce
>
(
op
)
->
resetMPICommunicator
(
process_group
.
getMPICommProxy
());
success
=
submit
(
op
,
tensor_mapper
);
if
(
success
){
success
=
sync
(
*
op
);
#ifdef MPI_ENABLED
if
(
success
)
success
=
sync
(
process_group
);
if
(
success
)
success
=
sync
(
process_group
);
#endif
}
}
}
else
{
std
::
cout
<<
"#ERROR(exatn::NumServer::allreduceTensorSync): Tensor "
<<
name
<<
" not found!"
<<
std
::
endl
;
assert
(
false
);
}
return
success
;
}
...
...
src/exatn/tests/NumServerTester.cpp
View file @
ccc58f0d
...
...
@@ -45,8 +45,8 @@
#define EXATN_TEST24
#define EXATN_TEST25
#define EXATN_TEST26
#define EXATN_TEST27 //requires input file from source
#define EXATN_TEST28 //requires input file from source
//
#define EXATN_TEST27 //requires input file from source
//
#define EXATN_TEST28 //requires input file from source
#define EXATN_TEST30
...
...
@@ -3256,7 +3256,7 @@ TEST(NumServerTester, TensorComposite) {
// << exatn::withinTensorExistenceDomain("A","B","C") << std::endl; //debug
//Initialize composite tensors:
success
=
exatn
::
initTensor
Rnd
(
"A"
);
assert
(
success
);
success
=
exatn
::
initTensor
(
"A"
,
std
::
complex
<
float
>
{
1e-3
,
0.0
}
);
assert
(
success
);
success
=
exatn
::
initTensor
(
"B"
,
std
::
complex
<
float
>
{
0.0
,
0.0
});
assert
(
success
);
success
=
exatn
::
initTensor
(
"C"
,
std
::
complex
<
float
>
{
1e-4
,
0.0
});
assert
(
success
);
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment