Commit b8d459a1 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh
Browse files

Fixed initialization of ordering projection tensors



Signed-off-by: default avatarDmitry I. Lyakh <quant4me@gmail.com>
parent 5e622c07
Pipeline #165650 failed with stage
in 5 minutes and 52 seconds
/** ExaTN::Numerics: Tensor Functor: Initialization of Ordering Projection tensors /** ExaTN::Numerics: Tensor Functor: Initialization of Ordering Projection tensors
REVISION: 2021/02/16 REVISION: 2021/09/21
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...@@ -35,7 +35,7 @@ int FunctorInitProj::apply(talsh::Tensor & local_tensor) //tensor slice (in gene ...@@ -35,7 +35,7 @@ int FunctorInitProj::apply(talsh::Tensor & local_tensor) //tensor slice (in gene
assert(rank%2 == 0); //only even-order tensors assert(rank%2 == 0); //only even-order tensors
auto init_proj = [&](auto * tensor_body){ auto init_proj_strong = [&](auto * tensor_body){
std::vector<DimOffset> bas(rank); std::vector<DimOffset> bas(rank);
for(unsigned int i = 0; i < rank; ++i) bas[i] = offsets[i]; //tensor slice dimension base offsets for(unsigned int i = 0; i < rank; ++i) bas[i] = offsets[i]; //tensor slice dimension base offsets
std::vector<DimExtent> ext(rank); std::vector<DimExtent> ext(rank);
...@@ -53,29 +53,71 @@ int FunctorInitProj::apply(talsh::Tensor & local_tensor) //tensor slice (in gene ...@@ -53,29 +53,71 @@ int FunctorInitProj::apply(talsh::Tensor & local_tensor) //tensor slice (in gene
return 0; return 0;
}; };
auto init_proj_weak = [&](auto * tensor_body){
std::vector<DimOffset> bas(rank);
for(unsigned int i = 0; i < rank; ++i) bas[i] = offsets[i]; //tensor slice dimension base offsets
std::vector<DimExtent> ext(rank);
for(unsigned int i = 0; i < rank; ++i) ext[i] = extents[i]; //tensor slice dimension extents
TensorRange rng(bas,ext);
bool not_over = true;
while(not_over){
if(rng.nondecreasingOrderDiag()){
tensor_body[rng.localOffset()] = 1.0;
}else{
tensor_body[rng.localOffset()] = 0.0;
}
not_over = rng.next();
}
return 0;
};
auto access_granted = false; auto access_granted = false;
{//Try REAL32: {//Try REAL32:
float * body; float * body;
access_granted = local_tensor.getDataAccessHost(&body); access_granted = local_tensor.getDataAccessHost(&body);
if(access_granted) return init_proj(body); if(access_granted){
if(weak_ordering_){
return init_proj_weak(body);
}else{
return init_proj_strong(body);
}
}
} }
{//Try REAL64: {//Try REAL64:
double * body; double * body;
access_granted = local_tensor.getDataAccessHost(&body); access_granted = local_tensor.getDataAccessHost(&body);
if(access_granted) return init_proj(body); if(access_granted){
if(weak_ordering_){
return init_proj_weak(body);
}else{
return init_proj_strong(body);
}
}
} }
{//Try COMPLEX32: {//Try COMPLEX32:
std::complex<float> * body; std::complex<float> * body;
access_granted = local_tensor.getDataAccessHost(&body); access_granted = local_tensor.getDataAccessHost(&body);
if(access_granted) return init_proj(body); if(access_granted){
if(weak_ordering_){
return init_proj_weak(body);
}else{
return init_proj_strong(body);
}
}
} }
{//Try COMPLEX64: {//Try COMPLEX64:
std::complex<double> * body; std::complex<double> * body;
access_granted = local_tensor.getDataAccessHost(&body); access_granted = local_tensor.getDataAccessHost(&body);
if(access_granted) return init_proj(body); if(access_granted){
if(weak_ordering_){
return init_proj_weak(body);
}else{
return init_proj_strong(body);
}
}
} }
std::cout << "#ERROR(exatn::numerics::FunctorInitProj): Unknown data kind in talsh::Tensor!" << std::endl; std::cout << "#ERROR(exatn::numerics::FunctorInitProj): Unknown data kind in talsh::Tensor!" << std::endl;
......
/** ExaTN::Numerics: Tensor Functor: Initialization of Ordering Projection tensors /** ExaTN::Numerics: Tensor Functor: Initialization of Ordering Projection tensors
REVISION: 2021/02/16 REVISION: 2021/09/21
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...@@ -8,7 +8,7 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ ...@@ -8,7 +8,7 @@ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
(A) This tensor functor (method) is used to initialize an ordering projection tensor. (A) This tensor functor (method) is used to initialize an ordering projection tensor.
An ordering projection tensor is an even-order tensor with the following elements: An ordering projection tensor is an even-order tensor with the following elements:
- Element = 1 if: The first half and the second half of the indices are the same and - Element = 1 if: The first half and the second half of the indices are the same and
both are in a monotonically increasing order; both are in a monotonically increasing (or non-decreasing) order;
- Elelent = 0 otherwise. - Elelent = 0 otherwise.
**/ **/
...@@ -33,7 +33,9 @@ namespace numerics{ ...@@ -33,7 +33,9 @@ namespace numerics{
class FunctorInitProj: public talsh::TensorFunctor<Identifiable>{ class FunctorInitProj: public talsh::TensorFunctor<Identifiable>{
public: public:
FunctorInitProj() = default; FunctorInitProj(bool weak_ordering = false): //strong VS weak index ordering
weak_ordering_(weak_ordering)
{}
virtual ~FunctorInitProj() = default; virtual ~FunctorInitProj() = default;
...@@ -61,6 +63,7 @@ public: ...@@ -61,6 +63,7 @@ public:
private: private:
bool weak_ordering_;
}; };
} //namespace numerics } //namespace numerics
......
/** ExaTN::Numerics: Tensor range /** ExaTN::Numerics: Tensor range
REVISION: 2021/02/16 REVISION: 2021/09/21
Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh) Copyright (C) 2018-2021 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/ Copyright (C) 2018-2021 Oak Ridge National Laboratory (UT-Battelle) **/
...@@ -108,6 +108,14 @@ public: ...@@ -108,6 +108,14 @@ public:
each is in a monotonically decreasing order. **/ each is in a monotonically decreasing order. **/
inline bool decreasingOrderDiag() const; inline bool decreasingOrderDiag() const;
/** Tests whether both halves of the indices of the current multi-index are the same and
each is in a monotonically non-decreasing order. **/
inline bool nondecreasingOrderDiag() const;
/** Tests whether both halves of the indices of the current multi-index are the same and
each is in a monotonically non-increasing order. **/
inline bool nonincreasingOrderDiag() const;
/** Returns the flat offset produced by the current multi-index value per se. **/ /** Returns the flat offset produced by the current multi-index value per se. **/
inline DimOffset localOffset() const; //little endian inline DimOffset localOffset() const; //little endian
...@@ -353,7 +361,7 @@ inline bool TensorRange::increasingOrderDiag() const ...@@ -353,7 +361,7 @@ inline bool TensorRange::increasingOrderDiag() const
if(indeed && half_size > 0){ if(indeed && half_size > 0){
for(int i = 1; i < half_size; ++i){ for(int i = 1; i < half_size; ++i){
if(((bases_[i] + mlndx_[i]) <= (bases_[i-1] + mlndx_[i-1])) || if(((bases_[i] + mlndx_[i]) <= (bases_[i-1] + mlndx_[i-1])) ||
((bases_[i] + mlndx_[i]) == (bases_[half_size+i] + mlndx_[half_size+i]))){ ((bases_[i] + mlndx_[i]) != (bases_[half_size+i] + mlndx_[half_size+i]))){
indeed = false; indeed = false;
break; break;
} }
...@@ -371,7 +379,43 @@ inline bool TensorRange::decreasingOrderDiag() const ...@@ -371,7 +379,43 @@ inline bool TensorRange::decreasingOrderDiag() const
if(indeed && half_size > 0){ if(indeed && half_size > 0){
for(int i = 1; i < half_size; ++i){ for(int i = 1; i < half_size; ++i){
if(((bases_[i] + mlndx_[i]) >= (bases_[i-1] + mlndx_[i-1])) || if(((bases_[i] + mlndx_[i]) >= (bases_[i-1] + mlndx_[i-1])) ||
((bases_[i] + mlndx_[i]) == (bases_[half_size+i] + mlndx_[half_size+i]))){ ((bases_[i] + mlndx_[i]) != (bases_[half_size+i] + mlndx_[half_size+i]))){
indeed = false;
break;
}
}
indeed = indeed && ((bases_[0] + mlndx_[0]) == (bases_[half_size] + mlndx_[half_size]));
}
return indeed;
}
inline bool TensorRange::nondecreasingOrderDiag() const
{
bool indeed = (mlndx_.size() % 2 == 0);
int half_size = mlndx_.size() / 2;
if(indeed && half_size > 0){
for(int i = 1; i < half_size; ++i){
if(((bases_[i] + mlndx_[i]) < (bases_[i-1] + mlndx_[i-1])) ||
((bases_[i] + mlndx_[i]) != (bases_[half_size+i] + mlndx_[half_size+i]))){
indeed = false;
break;
}
}
indeed = indeed && ((bases_[0] + mlndx_[0]) == (bases_[half_size] + mlndx_[half_size]));
}
return indeed;
}
inline bool TensorRange::nonincreasingOrderDiag() const
{
bool indeed = (mlndx_.size() % 2 == 0);
int half_size = mlndx_.size() / 2;
if(indeed && half_size > 0){
for(int i = 1; i < half_size; ++i){
if(((bases_[i] + mlndx_[i]) > (bases_[i-1] + mlndx_[i-1])) ||
((bases_[i] + mlndx_[i]) != (bases_[half_size+i] + mlndx_[half_size+i]))){
indeed = false; indeed = false;
break; break;
} }
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment