Commit e8bdb1f9 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Introduced TensorNetwork::Iterator.

Implemented ContractionSeqOptimizerDummy.
parent 3f37904e
Pipeline #70476 passed with stage
in 4 minutes and 28 seconds
/** ExaTN::Numerics: Tensor contraction sequence optimizer
REVISION: 2019/09/05
REVISION: 2019/09/08
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -34,7 +34,8 @@ class ContractionSeqOptimizer{
public:
virtual double determineContractionSequence(const TensorNetwork & network,
std::list<ContrTriple> & contr_seq) = 0;
std::list<ContrTriple> & contr_seq,
unsigned int intermediate_num_begin) = 0;
};
using createContractionSeqOptimizerFn = std::unique_ptr<ContractionSeqOptimizer> (*)(void);
......
/** ExaTN::Numerics: Tensor contraction sequence optimizer: Dummy
REVISION: 2019/09/05
REVISION: 2019/09/08
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -12,10 +12,34 @@ namespace exatn{
namespace numerics{
double ContractionSeqOptimizerDummy::determineContractionSequence(const TensorNetwork & network,
std::list<ContrTriple> & contr_seq)
std::list<ContrTriple> & contr_seq,
unsigned int intermediate_num_begin)
{
contr_seq.clear();
double flops = 0.0;
//`Finish: Requires TensorNetwork::iterator
const auto num_tensors = network.getNumTensors();
if(num_tensors > 1){
TensorNetwork net(network);
unsigned int ids[num_tensors], i = 0;
for(auto iter = net.begin(); iter != net.end(); ++iter){
if(iter->first != 0) ids[i++] = iter->first;
}
assert(i == num_tensors);
unsigned int prev_tensor = ids[0];
for(unsigned int j = 1; j < num_tensors; ++j){
unsigned int curr_tensor = ids[j];
if(j == (num_tensors - 1)){ //last tensor contraction
contr_seq.emplace_back(ContrTriple{0,curr_tensor,prev_tensor});
flops += net.getContractionCost(curr_tensor,prev_tensor);
}else{ //intermediate tensor contraction
contr_seq.emplace_back(ContrTriple{intermediate_num_begin,curr_tensor,prev_tensor});
flops += net.getContractionCost(curr_tensor,prev_tensor);
auto merged = net.mergeTensors(curr_tensor,prev_tensor,intermediate_num_begin);
assert(merged);
prev_tensor = intermediate_num_begin++;
}
}
}
return flops;
}
......
/** ExaTN::Numerics: Tensor contraction sequence optimizer: Dummy
REVISION: 2019/09/05
REVISION: 2019/09/08
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -21,7 +21,8 @@ class ContractionSeqOptimizerDummy: public ContractionSeqOptimizer{
public:
virtual double determineContractionSequence(const TensorNetwork & network,
std::list<ContrTriple> & contr_seq) override;
std::list<ContrTriple> & contr_seq,
unsigned int intermediate_num_begin) override;
static std::unique_ptr<ContractionSeqOptimizer> createNew();
};
......
/** ExaTN::Numerics: Tensor network
REVISION: 2019/09/05
REVISION: 2019/09/08
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -314,7 +314,7 @@ double TensorNetwork::determineContractionSequence(ContractionSeqOptimizer & con
{
assert(finalized_ != 0); //tensor network must be in finalized state
if(contraction_seq_.empty()){
contraction_seq_flops_ = contr_seq_optimizer.determineContractionSequence(*this,contraction_seq_);
contraction_seq_flops_ = contr_seq_optimizer.determineContractionSequence(*this,contraction_seq_,this->getNumTensors()+1);
}
return contraction_seq_flops_;
}
......@@ -750,6 +750,26 @@ bool TensorNetwork::mergeTensors(unsigned int left_id, unsigned int right_id, un
}
bool TensorNetwork::splitTensor(unsigned int tensor_id,
const TensorShape & contracted_dims,
const std::vector<bool> & left_dims)
{
if(tensor_id == 0){
std::cout << "#ERROR(TensorNetwork::splitTensor): Invalid request: " <<
"Splitting the output tensor of the tensor network is forbidden!" << std::endl;
return false;
}
if(finalized_ == 0){
std::cout << "#ERROR(TensorNetwork::splitTensor): Invalid request: " <<
"Splitting a tensor in an unfinalized tensor network is forbidden!" << std::endl;
return false;
}
//`Finish
invalidateContractionSequence(); //invalidate previously cached tensor contraction sequence
return true;
}
double TensorNetwork::getContractionCost(unsigned int left_id, unsigned int right_id,
double * arithm_intensity, bool adjust_cost)
{
......
/** ExaTN::Numerics: Tensor network
REVISION: 2019/09/05
REVISION: 2019/09/08
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -75,6 +75,7 @@ public:
static constexpr unsigned int NUM_WALKERS = 1024; //default number of walkers for tensor contraction sequence optimization
using ContractionSequence = std::vector<std::pair<unsigned int, unsigned int>>; //pairs of contracted tensor id's
using Iterator = typename std::unordered_map<unsigned int,TensorConn>::iterator; //iterator
/** Creates an unnamed empty tensor network with a single scalar output tensor named "_SMOKY_TENSOR_" **/
TensorNetwork();
......@@ -122,6 +123,11 @@ public:
If not found, returns nullptr. **/
std::shared_ptr<Tensor> getTensor(unsigned int tensor_id);
/** Begin iterator **/
inline Iterator begin() {return tensors_.begin();}
/** End iterator **/
inline Iterator end() {return tensors_.end();}
/** Finalizes the explicit construction of the tensor network (construction with advance knowledge).
The tensor network cannot be empty. **/
bool finalize(bool check_validity = false);
......@@ -178,6 +184,13 @@ public:
unsigned int right_id, //in: right tensor id (present in the tensor network)
unsigned int result_id); //in: result tensor id (absent in the tensor network, to be appended)
/** Splits a given tensor in a finalized tensor network into two tensors by introducing new dimensions
across the cutting boundary. The original tensor dimensions are then assigned to either left or
right tensor. The new dimensions are then appended at the end. **/
bool splitTensor(unsigned int tensor_id, //in: id of the tensor to be split into two tensors
const TensorShape & contracted_dims, //in: dimension extents of the contracted (new) dimensions connecting two tensors after splitting
const std::vector<bool> & left_dims); //in: assignment of original tensor dimensions to new tensors (true: belongs to left, false: belongs to right tensor)
/** Returns the FMA flop count for a given contraction of two tensors identified by their ids
in the tensor network. Optionally returns the arithmetic intensity of the tensor contraction as well.
Additionally, it also allows rescaling of the tensor contraction cost with the adjustment
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment