Commit fc77064b authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Made heuristic tensor contraction sequence optimizer work properly.

parent 95034047
Pipeline #85159 passed with stage
in 4 minutes and 20 seconds
/** ExaTN::Numerics: Numerical server
REVISION: 2019/12/12
REVISION: 2019/12/30
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -210,7 +210,7 @@ bool NumServer::submit(std::shared_ptr<TensorOperation> operation)
bool NumServer::submit(TensorNetwork & network)
{
assert(network.isValid()); //debug
auto & op_list = network.getOperationList();
auto & op_list = network.getOperationList("heuro");
auto output_tensor = network.getTensor(0);
auto iter = tensors_.find(output_tensor->getName());
if(iter == tensors_.end()){ //output tensor does not exist and needs to be created and initialized to zero
......
......@@ -433,6 +433,150 @@ TEST(NumServerTester, circuitNumServer)
}
TEST(NumServerTester, largeCircuitNumServer)
{
using exatn::numerics::Tensor;
using exatn::numerics::TensorShape;
using exatn::numerics::TensorNetwork;
using exatn::TensorElementType;
exatn::resetRuntimeLoggingLevel(0); //debug
//Quantum Circuit:
//Q00---H-----
//Q01---H-----
// |
//Q49---H-----
const unsigned int nbQubits = 10;
//Define the initial qubit state vector:
std::vector<std::complex<double>> qzero {
{1.0,0.0}, {0.0,0.0}
};
//Define quantum gates:
std::vector<std::complex<double>> hadamard {
{1.0,0.0}, {1.0,0.0},
{1.0,0.0}, {-1.0,0.0}
};
//Create qubit tensors:
for (unsigned int i = 0; i < nbQubits; ++i) {
const bool created = exatn::createTensor("Q" + std::to_string(i),TensorElementType::COMPLEX64,TensorShape{2});
assert(created);
}
//Create gate tensors:
{
const bool created = exatn::createTensor("H",TensorElementType::COMPLEX64,TensorShape{2,2});
assert(created);
const bool registered =(exatn::registerTensorIsometry("H",{0},{1}));
assert(registered);
}
//Initialize qubit tensors to zero state:
for (unsigned int i = 0; i < nbQubits; ++i) {
const bool initialized = exatn::initTensorData("Q" + std::to_string(i),qzero);
assert(initialized);
}
//Initialize necessary gate tensors:
{
const bool initialized = exatn::initTensorData("H",hadamard);
assert(initialized);
}
{//Open a new scope:
//Build a tensor network from the quantum circuit:
TensorNetwork circuit("QuantumCircuit");
unsigned int tensorCounter = 1;
// Qubit tensors:
for (unsigned int i = 0; i < nbQubits; ++i) {
const bool appended = circuit.appendTensor(tensorCounter, exatn::getTensor("Q" + std::to_string(i)),{});
assert(appended);
++tensorCounter;
}
// Copy the qubit reg tensor to fully-close the entire network
TensorNetwork qubitReg(circuit);
qubitReg.rename("QubitKet");
// Hadamard tensors:
for (unsigned int i = 0; i < nbQubits; ++i) {
const bool appended = circuit.appendTensorGate(tensorCounter,exatn::getTensor("H"),{i});
assert(appended);
++tensorCounter;
}
circuit.printIt(); //debug
//Contract the circuit tensor network with its conjugate:
TensorNetwork inverse(circuit);
inverse.rename("InverseCircuit");
for (unsigned int i = 0; i < nbQubits; ++i) {
const bool appended = inverse.appendTensorGate(tensorCounter,exatn::getTensor("H"),{nbQubits - i - 1}, true);
assert(appended);
++tensorCounter;
}
const bool collapsed = inverse.collapseIsometries();
assert(collapsed);
inverse.printIt(); //debug
{// Closing the tensor network with the bra
auto bra = qubitReg;
bra.conjugate();
bra.rename("QubitBra");
std::vector<std::pair<unsigned int, unsigned int>> pairings;
for (unsigned int i = 0; i < nbQubits; ++i) {
pairings.emplace_back(std::make_pair(i, i));
}
inverse.appendTensorNetwork(std::move(bra), pairings);
}
inverse.printIt(); //debug
{
const bool rankEqualZero = (inverse.getRank() == 0);
assert(rankEqualZero);
}
//Evaluate the quantum circuit expressed as a tensor network:
// NOTE: We evaluate the *inverse* tensor which should be fully-closed.
const bool evaluated = exatn::evaluateSync(inverse);
assert(evaluated);
//Synchronize:
exatn::sync();
auto talsh_tensor = exatn::getLocalTensor(inverse.getTensor(0)->getName());
const std::complex<double>* body_ptr;
if (talsh_tensor->getDataAccessHostConst(&body_ptr)) {
std::cout << "Fina result is " << *body_ptr << "\n";
}
}
//Destroy all tensors:
{
const bool destroyed = exatn::destroyTensor("H");
assert(destroyed);
}
for (unsigned int i = 0; i < nbQubits; ++i) {
const bool destroyed = exatn::destroyTensor("Q" + std::to_string(i));
assert(destroyed);
}
//Synchronize:
exatn::sync();
//Grab a coffee!
}
TEST(NumServerTester, HamiltonianNumServer)
{
using exatn::numerics::Tensor;
......
/** ExaTN::Numerics: Tensor contraction sequence optimizer: Heuristics
REVISION: 2019/11/08
REVISION: 2019/12/30
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -19,7 +19,7 @@ namespace exatn{
namespace numerics{
static constexpr unsigned int NUM_WALKERS = 1024; //default number of walkers for tensor contraction sequence optimization
static constexpr unsigned int NUM_WALKERS = 128; //default number of walkers for tensor contraction sequence optimization
ContractionSeqOptimizerHeuro::ContractionSeqOptimizerHeuro():
......@@ -48,7 +48,7 @@ double ContractionSeqOptimizerHeuro::determineContractionSequence(const TensorNe
auto numContractions = network.getNumTensors() - 1; //number of contractions is one less than the number of r.h.s. tensors
if(numContractions == 0) return flops;
std::cout << "#DEBUG(ContractionSeqOptimizerHeuro): Determining a pseudo-optimal tensor contraction sequence ... "; //debug
//std::cout << "#DEBUG(ContractionSeqOptimizerHeuro): Determining a pseudo-optimal tensor contraction sequence ... " << std::endl; //debug
auto timeBeg = std::chrono::high_resolution_clock::now();
ContractionSequence contrSeqEmpty;
......@@ -59,10 +59,12 @@ double ContractionSeqOptimizerHeuro::determineContractionSequence(const TensorNe
std::priority_queue<ContrPath, std::vector<ContrPath>, decltype(cmpPaths)> priq(cmpPaths); //prioritized contraction paths
for(decltype(numContractions) pass = 0; pass < numContractions; ++pass){
unsigned int intermediate_id = 0;
if(pass < numContractions - 1) intermediate_id = intermediate_num_generator(); //id of the next intermediate tensor
unsigned int numPassCands = 0;
//std::cout << "#DEBUG(ContractionSeqOptimizerHeuro): Pass " << pass << " started with "
// << inputPaths.size() << " candidates" << std::endl; //debug
unsigned int intermediate_id = intermediate_num_generator(); //id of the next intermediate tensor
unsigned int numPassCands = 0, candid = 0;
for(auto & contrPath: inputPaths){
//std::cout << " #DEBUG(ContractionSeqOptimizerHeuro): Processing candidate " << candid++ << std::endl; //debug
auto & parentTensNet = std::get<0>(contrPath); //parental tensor network
const auto numTensors = parentTensNet.getNumTensors(); //number of r.h.s. tensors in the parental tensor network
const auto & parentContrSeq = std::get<1>(contrPath); //contraction sequence in the parental tensor network
......@@ -77,7 +79,11 @@ double ContractionSeqOptimizerHeuro::determineContractionSequence(const TensorNe
TensorNetwork tensNet(parentTensNet);
auto contracted = tensNet.mergeTensors(i,j,intermediate_id); assert(contracted);
auto cSeq = parentContrSeq;
cSeq.emplace_back(ContrTriple{intermediate_id,i,j}); //append a new pair of contracted tensors
if(pass == numContractions - 1){ //the very last tensor contraction writes into the output tensor #0
cSeq.emplace_back(ContrTriple{0,i,j}); //append the last pair of contracted tensors
}else{
cSeq.emplace_back(ContrTriple{intermediate_id,i,j}); //append a new pair of contracted tensors
}
priq.emplace(std::make_tuple(tensNet, cSeq, contrCost + std::get<2>(contrPath))); //cloning tensor network and contraction sequence
if(priq.size() > num_walkers_) priq.pop();
numPassCands++;
......@@ -86,14 +92,14 @@ double ContractionSeqOptimizerHeuro::determineContractionSequence(const TensorNe
}
}
}
std::cout << std::endl << "Pass " << pass << ": Total number of candidates considered = " << numPassCands; //debug
//std::cout << "Pass " << pass << ": Total number of candidates considered = " << numPassCands << std::endl; //debug
inputPaths.clear();
if(pass == numContractions - 1){ //last pass
while(priq.size() > 1) priq.pop();
contr_seq = std::get<1>(priq.top());
flops = std::get<2>(priq.top());
priq.pop();
std::cout << std::endl << "Best tensor contraction sequence found with cost (flops) = " << flops; //debug
//std::cout << "Best tensor contraction sequence found with cost (flops) = " << flops << std::endl; //debug
}else{
while(priq.size() > 0){
inputPaths.emplace_back(priq.top());
......@@ -104,9 +110,10 @@ double ContractionSeqOptimizerHeuro::determineContractionSequence(const TensorNe
auto timeEnd = std::chrono::high_resolution_clock::now();
auto timeTot = std::chrono::duration_cast<std::chrono::duration<double>>(timeEnd - timeBeg);
std::cout << std::endl << "Done (" << timeTot.count() << " sec):"; //debug
for(const auto & cPair: contr_seq) std::cout << " {" << cPair.left_id << "," << cPair.right_id << "}"; //debug
std::cout << std::endl; //debug
//std::cout << "Done (" << timeTot.count() << " sec):"; //debug
//for(const auto & cPair: contr_seq) std::cout << " {" << cPair.left_id << "," << cPair.right_id
// << "->" << cPair.result_id <<"}"; //debug
//std::cout << std::endl; //debug
return flops;
}
......
/** ExaTN::Numerics: Tensor
REVISION: 2019/12/08
REVISION: 2019/12/30
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -69,19 +69,21 @@ name_(name), element_type_(TensorElementType::VOID)
++inp_mode;
if(argt == 1 && inp_mode == left_rank){inp_mode = 0; argt = 2;};
}
assert(max_out_dim < out_mode);
//Form the output tensor shape/signature:
for(unsigned int i = 0; i <= max_out_dim; ++i){
inp_mode = contr[i][1];
if(contr[i][0] == 1){
shape_.appendDimension(left_tensor.getDimExtent(inp_mode));
signature_.appendDimension(left_tensor.getDimSpaceAttr(inp_mode));
}else if(contr[i][0] == 2){
shape_.appendDimension(right_tensor.getDimExtent(inp_mode));
signature_.appendDimension(right_tensor.getDimSpaceAttr(inp_mode));
}else{
std::cout << "#ERROR(Tensor::Tensor): Invalid function argument: contraction: Missing output tensor mode!" << std::endl;
assert(false); //missing output tensor dimension
if(out_mode > 0){ //output tensor is not a scalar
assert(max_out_dim < out_mode);
for(unsigned int i = 0; i <= max_out_dim; ++i){
inp_mode = contr[i][1];
if(contr[i][0] == 1){
shape_.appendDimension(left_tensor.getDimExtent(inp_mode));
signature_.appendDimension(left_tensor.getDimSpaceAttr(inp_mode));
}else if(contr[i][0] == 2){
shape_.appendDimension(right_tensor.getDimExtent(inp_mode));
signature_.appendDimension(right_tensor.getDimSpaceAttr(inp_mode));
}else{
std::cout << "#ERROR(Tensor::Tensor): Invalid function argument: contraction: Missing output tensor mode!" << std::endl;
assert(false); //missing output tensor dimension
}
}
}
}
......
/** ExaTN::Numerics: Tensor network
REVISION: 2019/12/23
REVISION: 2019/12/30
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -1141,7 +1141,7 @@ bool TensorNetwork::mergeTensors(unsigned int left_id, unsigned int right_id, un
auto res = emplaceTensorConnDirect(true,
result_id,
std::make_shared<Tensor>(
left_tensor->getTensor()->getName() + right_tensor->getTensor()->getName(),
"_y" + std::to_string(result_id),
*(left_tensor->getTensor()),
*(right_tensor->getTensor()),
pattern
......
/** ExaTN: Numerics: Symbolic tensor processing
REVISION: 2019/12/10
REVISION: 2019/12/30
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle)
......@@ -118,7 +118,7 @@ template <typename Integer>
std::string tensor_hex_name(Integer hash)
{
static_assert(std::is_integral<Integer>::value,"#FATAL(tensor_hex_name): Non-integer type passed!");
char digit[] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'};
static const char digit[] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'};
auto n = hash; if(n < 0) n = -n;
std::string name("_z");
while(n > 0){
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment