Commit 2ef74b8c authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Implemented exatn::replicateTensor() API.

parent 31eb6aaa
Pipeline #109618 passed with stage
in 14 minutes and 21 seconds
/** ExaTN::Numerics: General client header
REVISION: 2020/06/04
REVISION: 2020/06/30
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -288,6 +288,90 @@ inline bool computePartialNormsSync(const std::string & name, //in: t
{return numericalServer->computePartialNormsSync(name,tensor_dimension,partial_norms);}
/** Replicates a tensor within the given process group, which defaults to all MPI processes.
Only the root_process_rank within the given process group is required to have the tensor,
that is, the tensor will automatically be created in those MPI processes which do not have it. **/
inline bool replicateTensor(const std::string & name, //in: tensor name
int root_process_rank) //in: local rank of the root process within the given process group
{return numericalServer->replicateTensor(name,root_process_rank);}
inline bool replicateTensor(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name, //in: tensor name
int root_process_rank) //in: local rank of the root process within the given process group
{return numericalServer->replicateTensor(process_group,name,root_process_rank);}
inline bool replicateTensorSync(const std::string & name, //in: tensor name
int root_process_rank) //in: local rank of the root process within the given process group
{return numericalServer->replicateTensorSync(name,root_process_rank);}
inline bool replicateTensorSync(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name, //in: tensor name
int root_process_rank) //in: local rank of the root process within the given process group
{return numericalServer->replicateTensorSync(process_group,name,root_process_rank);}
/** Broadcast a tensor among all MPI processes within a given process group,
which defaults to all MPI processes. This function is needed when
a tensor is updated in an operation submitted to a subset of MPI processes
such that the excluded MPI processes can receive an updated version of the tensor.
Note that the tensor must exist in all participating MPI processes. **/
inline bool broadcastTensor(const std::string & name, //in: tensor name
int root_process_rank) //in: local rank of the root process within the given process group
{return numericalServer->broadcastTensor(name,root_process_rank);}
inline bool broadcastTensor(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name, //in: tensor name
int root_process_rank) //in: local rank of the root process within the given process group
{return numericalServer->broadcastTensor(process_group,name,root_process_rank);}
inline bool broadcastTensorSync(const std::string & name, //in: tensor name
int root_process_rank) //in: local rank of the root process within the given process group
{return numericalServer->broadcastTensorSync(name,root_process_rank);}
inline bool broadcastTensorSync(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name, //in: tensor name
int root_process_rank) //in: local rank of the root process within the given process group
{return numericalServer->broadcastTensorSync(process_group,name,root_process_rank);}
inline bool broadcastTensor(MPICommProxy intra_comm, //in: explicit MPI intra-communicator
const std::string & name, //in: tensor name
int root_process_rank) //in: rank of the root process within the MPI intra-communicator
{return numericalServer->broadcastTensor(intra_comm,name,root_process_rank);}
inline bool broadcastTensorSync(MPICommProxy intra_comm, //in: explicit MPI intra-communicator
const std::string & name, //in: tensor name
int root_process_rank) //in: rank of the root process within the MPI intra-communicator
{return numericalServer->broadcastTensorSync(intra_comm,name,root_process_rank);}
/** Performs a global sum reduction on a tensor among all MPI processes within a given
process group, which defaults to all MPI processes. This function is needed when
multiple MPI processes compute their local updates to the tensor, thus requiring
a global sum reduction such that each MPI process will get the final (same) tensor
value. Note that the tensor must exist in all participating MPI processes. **/
inline bool allreduceTensor(const std::string & name) //in: tensor name
{return numericalServer->allreduceTensor(name);}
inline bool allreduceTensor(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name) //in: tensor name
{return numericalServer->allreduceTensor(process_group,name);}
inline bool allreduceTensorSync(const std::string & name) //in: tensor name
{return numericalServer->allreduceTensorSync(name);}
inline bool allreduceTensorSync(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name) //in: tensor name
{return numericalServer->allreduceTensorSync(process_group,name);}
inline bool allreduceTensor(MPICommProxy intra_comm, //in: explicit MPI intra-communicator
const std::string & name) //in: tensor name
{return numericalServer->allreduceTensor(intra_comm,name);}
inline bool allreduceTensorSync(MPICommProxy intra_comm, //in: explicit MPI intra-communicator
const std::string & name) //in: tensor name
{return numericalServer->allreduceTensorSync(intra_comm,name);}
/** Scales a tensor by a scalar value. **/
template<typename NumericType>
inline bool scaleTensor(const std::string & name, //in: tensor name
......@@ -300,34 +384,6 @@ inline bool scaleTensorSync(const std::string & name, //in: tensor name
{return numericalServer->scaleTensorSync(name,value);}
/** Broadcast a tensor among all MPI processes within an intra-communicator.
This function is needed when a tensor is updated in an operation
submitted to a subset of MPI processes such that the excluded
MPI processes can receive an updated version of the tensor. **/
inline bool broadcastTensor(const std::string & name, //in: tensor name
int root_process_rank, //in: rank of the root process within the MPI intra-communicator
MPICommProxy intra_comm = MPICommProxy()) //in: MPI intra-communicator, defaults to all processes
{return numericalServer->broadcastTensor(name,root_process_rank,intra_comm);}
inline bool broadcastTensorSync(const std::string & name, //in: tensor name
int root_process_rank, //in: rank of the root process within the MPI intra-communicator
MPICommProxy intra_comm = MPICommProxy()) //in: MPI intra-communicator, defaults to all processes
{return numericalServer->broadcastTensorSync(name,root_process_rank,intra_comm);}
/** Performs a global sum reduction on a tensor among all MPI processes within
an intra-communicator. This function is needed when multiple MPI processes
compute their local updates to the tensor, thus requiring a global sum
reduction such that each MPI process will get the final (same) tensor value. **/
inline bool allreduceTensor(const std::string & name, //in: tensor name
MPICommProxy intra_comm = MPICommProxy()) //in: MPI intra-communicator, defaults to all processes
{return numericalServer->allreduceTensor(name,intra_comm);}
inline bool allreduceTensorSync(const std::string & name, //in: tensor name
MPICommProxy intra_comm = MPICommProxy()) //in: MPI intra-communicator, defaults to all processes
{return numericalServer->allreduceTensorSync(name,intra_comm);}
/** Transforms (updates) a tensor according to a user-defined tensor functor. **/
inline bool transformTensor(const std::string & name, //in: tensor name
std::shared_ptr<TensorMethod> functor) //in: functor defining tensor transformation
......
This diff is collapsed.
/** ExaTN::Numerics: Numerical server
REVISION: 2020/06/28
REVISION: 2020/06/30
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -359,27 +359,70 @@ public:
unsigned int tensor_dimension, //in: chosen tensor dimension
std::vector<double> & partial_norms); //out: partial 2-norms over the chosen tensor dimension
/** Broadcast a tensor among all MPI processes within an intra-communicator.
This function is needed when a tensor is updated in an operation
submitted to a subset of MPI processes such that the excluded
MPI processes can receive an updated version of the tensor. **/
bool broadcastTensor(const std::string & name, //in: tensor name
int root_process_rank, //in: rank of the root process within the MPI intra-communicator
MPICommProxy intra_comm = MPICommProxy()); //in: MPI intra-communicator, defaults to all processes
/** Replicates a tensor within the given process group, which defaults to all MPI processes.
Only the root_process_rank within the given process group is required to have the tensor,
that is, the tensor will automatically be created in those MPI processes which do not have it. **/
bool replicateTensor(const std::string & name, //in: tensor name
int root_process_rank); //in: local rank of the root process within the given process group
bool broadcastTensorSync(const std::string & name, //in: tensor name
int root_process_rank, //in: rank of the root process within the MPI intra-communicator
MPICommProxy intra_comm = MPICommProxy()); //in: MPI intra-communicator, defaults to all processes
bool replicateTensor(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name, //in: tensor name
int root_process_rank); //in: local rank of the root process within the given process group
/** Performs a global sum reduction on a tensor among all MPI processes within
an intra-communicator. This function is needed when multiple MPI processes
compute their local updates to the tensor, thus requiring a global sum
reduction such that each MPI process will get the final (same) tensor value. **/
bool allreduceTensor(const std::string & name, //in: tensor name
MPICommProxy intra_comm = MPICommProxy()); //in: MPI intra-communicator, defaults to all processes
bool replicateTensorSync(const std::string & name, //in: tensor name
int root_process_rank); //in: local rank of the root process within the given process group
bool allreduceTensorSync(const std::string & name, //in: tensor name
MPICommProxy intra_comm = MPICommProxy()); //in: MPI intra-communicator, defaults to all processes
bool replicateTensorSync(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name, //in: tensor name
int root_process_rank); //in: local rank of the root process within the given process group
/** Broadcast a tensor among all MPI processes within a given process group,
which defaults to all MPI processes. This function is needed when
a tensor is updated in an operation submitted to a subset of MPI processes
such that the excluded MPI processes can receive an updated version of the tensor.
Note that the tensor must exist in all participating MPI processes. **/
bool broadcastTensor(const std::string & name, //in: tensor name
int root_process_rank); //in: local rank of the root process within the given process group
bool broadcastTensor(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name, //in: tensor name
int root_process_rank); //in: local rank of the root process within the given process group
bool broadcastTensorSync(const std::string & name, //in: tensor name
int root_process_rank); //in: local rank of the root process within the given process group
bool broadcastTensorSync(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name, //in: tensor name
int root_process_rank); //in: local rank of the root process within the given process group
bool broadcastTensor(MPICommProxy intra_comm, //in: explicit MPI intra-communicator
const std::string & name, //in: tensor name
int root_process_rank); //in: rank of the root process within the MPI intra-communicator
bool broadcastTensorSync(MPICommProxy intra_comm, //in: explicit MPI intra-communicator
const std::string & name, //in: tensor name
int root_process_rank); //in: rank of the root process within the MPI intra-communicator
/** Performs a global sum reduction on a tensor among all MPI processes within a given
process group, which defaults to all MPI processes. This function is needed when
multiple MPI processes compute their local updates to the tensor, thus requiring
a global sum reduction such that each MPI process will get the final (same) tensor
value. Note that the tensor must exist in all participating MPI processes. **/
bool allreduceTensor(const std::string & name); //in: tensor name
bool allreduceTensor(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name); //in: tensor name
bool allreduceTensorSync(const std::string & name); //in: tensor name
bool allreduceTensorSync(const ProcessGroup & process_group, //in: chosen group of MPI processes
const std::string & name); //in: tensor name
bool allreduceTensor(MPICommProxy intra_comm, //in: explicit MPI intra-communicator
const std::string & name); //in: tensor name
bool allreduceTensorSync(MPICommProxy intra_comm, //in: explicit MPI intra-communicator
const std::string & name); //in: tensor name
/** Scales a tensor by a scalar value. **/
template<typename NumericType>
......@@ -541,6 +584,7 @@ private:
std::shared_ptr<ProcessGroup> process_world_; //default process group comprising all MPI processes and their communicator
std::shared_ptr<ProcessGroup> process_self_; //current process group comprising solely the current MPI process and its own communicator
std::shared_ptr<runtime::TensorRuntime> tensor_rt_; //tensor runtime (for actual execution of tensor operations)
BytePacket byte_packet_; //byte packet for exchanging tensor meta-data
};
/** Numerical service singleton (numerical server) **/
......
/** ExaTN::Numerics: Tensor
REVISION: 2020/06/25
REVISION: 2020/06/30
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -94,6 +94,11 @@ name_(name), element_type_(TensorElementType::VOID)
this->setElementType(left_tensor_type);
}
Tensor::Tensor(BytePacket & byte_packet)
{
unpack(byte_packet);
}
Tensor::Tensor(const Tensor & another,
const std::vector<unsigned int> & order):
name_(another.getName()),
......
/** ExaTN::Numerics: Abstract Tensor
REVISION: 2020/06/25
REVISION: 2020/06/30
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -104,6 +104,8 @@ public:
const Tensor & left_tensor, //left tensor
const Tensor & right_tensor, //right tensor
const std::vector<TensorLeg> & contraction); //tensor contraction pattern
/** Create a tensor from a byte packet. **/
Tensor(BytePacket & byte_packet);
/** Create a tensor by permuting another tensor. **/
Tensor(const Tensor & another, //in: another tensor
......
Subproject commit 25d5feddbf3276052f2f1a33fc7415eda7b0dbf0
Subproject commit c0a96b2fca285b4994cae7635ea9cb0ed3e470cd
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment