Commit 307d4060 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Introduced a method to TensorNetwork class to alter tensor optimizability.

parent de638c6f
/** ExaTN:: Extreme eigenvalue/vector solver over tensor networks
REVISION: 2019/12/18
/** ExaTN:: Extreme eigenvalue/eigenvector solver over tensor networks
REVISION: 2020/01/24
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
#include "eigensolver.hpp"
......@@ -13,7 +13,7 @@ namespace exatn{
TensorNetworkEigenSolver::TensorNetworkEigenSolver(std::shared_ptr<TensorOperator> tensor_operator,
std::shared_ptr<TensorExpansion> tensor_expansion,
double tolerance):
tensor_operator_(tensor_operator), tensor_expansion_(tensor_expansion), tolerance_(tolerance)
tensor_operator_(tensor_operator), tensor_expansion_(tensor_expansion), tolerance_(tolerance), num_roots_(0)
{
}
......@@ -35,6 +35,8 @@ bool TensorNetworkEigenSolver::solve(unsigned int num_roots, const std::vector<d
{
assert(accuracy != nullptr);
if(num_roots == 0) return false;
num_roots_ = num_roots;
for(unsigned int i = 0; i < num_roots; ++i) accuracy_.emplace_back(-1.0);
//`Finish
*accuracy = &accuracy_;
return true;
......
/** ExaTN:: Extreme eigenvalue/vector solver over tensor networks
REVISION: 2019/12/18
/** ExaTN:: Extreme eigenvalue/eigenvector solver over tensor networks
REVISION: 2020/01/24
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(a) Finds the approximate extreme eigenvalues and corresponding eigenvectors
expanded in the Krylov subspace spanned by tensor networks using the
Davidson-Nakatsuji-Hirao algorithm for non-Hermitian tensor operators.
(a) The tensor network expansion eigensolver finds the approximate extreme
eigenvalues and their corresponding eigenvectors expanded in the Krylov
subspace spanned by tensor network expansions. The procedure is derived
from the Davidson-Nakatsuji-Hirao algorithm for non-Hermitian matrices,
which in turn is based on the Arnoldi algorithm.
**/
#ifndef EXATN_EIGENSOLVER_HPP_
......@@ -27,8 +29,8 @@ class TensorNetworkEigenSolver{
public:
TensorNetworkEigenSolver(std::shared_ptr<TensorOperator> tensor_operator, //in: tensor operator the extreme eigenroots of which need to be found
std::shared_ptr<TensorExpansion> tensor_expansion, //in: tensor expansion form that will be used for each eigenvector
TensorNetworkEigenSolver(std::shared_ptr<TensorOperator> tensor_operator, //in: tensor operator the extreme eigenroots of which are to be found
std::shared_ptr<TensorExpansion> tensor_expansion, //in: tensor network expansion form that will be used for each eigenvector
double tolerance); //in: desired numerical covergence tolerance
TensorNetworkEigenSolver(const TensorNetworkEigenSolver &) = default;
......@@ -37,8 +39,9 @@ public:
TensorNetworkEigenSolver & operator=(TensorNetworkEigenSolver &&) noexcept = default;
~TensorNetworkEigenSolver() = default;
/** Runs the eigensolver for one or more extreme eigenroots.
Upon success, returns the achieved accuracy for each eigenroot. **/
/** Runs the tensor network eigensolver for one or more extreme eigenroots
of the underlying tensor operator. Upon success, returns the achieved
accuracy for each eigenroot. **/
bool solve(unsigned int num_roots, //in: number of extreme eigenroots to find
const std::vector<double> ** accuracy); //out: achieved accuracy for each root: accuracy[num_roots]
......@@ -49,12 +52,12 @@ public:
private:
std::shared_ptr<TensorOperator> tensor_operator_; //tensor operator the extreme eigenroots of which need to be found
std::shared_ptr<TensorExpansion> tensor_expansion_; //desired form of the eigenvector as a tensor expansion
std::vector<std::shared_ptr<TensorExpansion>> eigenvector_; //tensor expansion approximating each requested eigenvector
std::shared_ptr<TensorOperator> tensor_operator_; //tensor operator the extreme eigenroots of which are to be found
std::shared_ptr<TensorExpansion> tensor_expansion_; //desired form of the eigenvector as a tensor network expansion
std::vector<std::shared_ptr<TensorExpansion>> eigenvector_; //tensor network expansion approximating each requested eigenvector
std::vector<std::complex<double>> eigenvalue_; //computed eigenvalues
std::vector<double> accuracy_; //actually achieved accuracy for each eigenroot
double tolerance_; //desired numerical convergence tolerance
double tolerance_; //desired numerical convergence tolerance for each eigenroot
unsigned int num_roots_; //number of extreme eigenroots requested
};
......
/** ExaTN:: Optimizer of a closed tensor expansion functional
REVISION: 2019/12/14
REVISION: 2020/01/24
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
#include "optimizer.hpp"
......
/** ExaTN:: Optimizer of a closed tensor expansion functional
REVISION: 2019/12/18
/** ExaTN:: Optimizer of a closed tensor network expansion functional
REVISION: 2020/01/24
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(A) Given a closed tensor network expansion functional, the tensor network
expansion optimizer optimizes tensor factors to achieve an extremum of
that functional.
**/
#ifndef EXATN_OPTIMIZER_HPP_
......@@ -21,7 +23,7 @@ class TensorNetworkOptimizer{
public:
TensorNetworkOptimizer(std::shared_ptr<TensorExpansion> expansion, //in: closed tensor expansion functional to be optimized
TensorNetworkOptimizer(std::shared_ptr<TensorExpansion> expansion, //inout: closed tensor network expansion functional to optimize
double tolerance); //in: desired numerical convergence tolerance
TensorNetworkOptimizer(const TensorNetworkOptimizer &) = default;
......@@ -30,17 +32,17 @@ public:
TensorNetworkOptimizer & operator=(TensorNetworkOptimizer &&) noexcept = default;
~TensorNetworkOptimizer() = default;
/** Optimizes the given closed tensor expansion functional. Upon success,
returns the achieved accuracy of the optimization. **/
/** Optimizes the given closed tensor network expansion functional.
Upon success, returns the achieved accuracy of the optimization. **/
bool optimize(double * accuracy);
/** Returns the optimized tensor expansion. **/
/** Returns the optimized tensor network expansion functional. **/
std::shared_ptr<TensorExpansion> getSolution(double * accuracy = nullptr);
private:
std::shared_ptr<TensorExpansion> expansion_; //closed tensor expansion functional to optimize
double tolerance_; //optimization convergence tolerance
std::shared_ptr<TensorExpansion> expansion_; //closed tensor network expansion functional to optimize
double tolerance_; //numerical optimization convergence tolerance
double accuracy_; //actually achieved optimization accuracy
};
......
/** ExaTN:: Reconstructor of an approximate tensor expansion from a given tensor expansion
REVISION: 2019/12/18
/** ExaTN:: Reconstructor of an approximate tensor network expansion from a given tensor network expansion
REVISION: 2020/01/24
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
#include "reconstructor.hpp"
......
/** ExaTN:: Reconstructor of an approximate tensor expansion from a given tensor expansion
REVISION: 2019/12/18
/** ExaTN:: Reconstructor of an approximate tensor network expansion from a given tensor network expansion
REVISION: 2020/01/24
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(A) Given a tensor network expansion of some form, the tensor network reconstructor
optimizes its tensor factors to maximize the overlap with another given constant
tensor network expansion, thus providing an approximation to it.
The reconstruction fidelity is the overlap between the two tensor network expansions.
The reconstruction tolerance is a numerical tolerance used for checking convergence
of the underlying linear algebra procedures.
**/
#ifndef EXATN_RECONSTRUCTOR_HPP_
......@@ -21,9 +26,9 @@ class TensorNetworkReconstructor{
public:
TensorNetworkReconstructor(std::shared_ptr<TensorExpansion> expansion, //in: tensor expansion to be reconstructed
std::shared_ptr<TensorExpansion> approximant, //in: reconstructing tensor expansion (unoptimized)
double tolerance); //in: reconstruction convergence tolerance
TensorNetworkReconstructor(std::shared_ptr<TensorExpansion> expansion, //in: tensor expansion to be reconstructed (constant)
std::shared_ptr<TensorExpansion> approximant, //inout: reconstructing tensor expansion (unoptimized)
double tolerance); //in: desired reconstruction convergence tolerance
TensorNetworkReconstructor(const TensorNetworkReconstructor &) = default;
TensorNetworkReconstructor & operator=(const TensorNetworkReconstructor &) = default;
......@@ -31,18 +36,19 @@ public:
TensorNetworkReconstructor & operator=(TensorNetworkReconstructor &&) noexcept = default;
~TensorNetworkReconstructor() = default;
/** Reconstructs a tensor expansion via another tensor expansion approximately.
Upon success, returns the achieved fidelity of the reconstruction. **/
/** Approximately reconstructs a tensor network expansion via another tensor network
expansion. Upon success, returns the achieved fidelity of the reconstruction,
that is, the overlap between the two tensor network expansions, [0..1]. **/
bool reconstruct(double * fidelity);
/** Returns the reconstructing (optimized) tensor expansion. **/
/** Returns the reconstructing (optimized) tensor network expansion. **/
std::shared_ptr<TensorExpansion> getSolution(double * fidelity = nullptr);
private:
std::shared_ptr<TensorExpansion> expansion_; //tensor expansion to reconstruct
std::shared_ptr<TensorExpansion> approximant_; //reconstructing tensor expansion
double tolerance_; //reconstruction convergence tolerance
double tolerance_; //numerical reconstruction convergence tolerance
double fidelity_; //actually achieved reconstruction fidelity
};
......
/** ExaTN::Numerics: Tensor connected to other tensors inside a tensor network
REVISION: 2019/12/22
REVISION: 2020/01/24
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
#include "tensor_connected.hpp"
#include "tensor_symbol.hpp"
......@@ -20,7 +20,7 @@ TensorConn::TensorConn(std::shared_ptr<Tensor> tensor,
unsigned int id,
const std::vector<TensorLeg> & legs,
bool conjugated):
tensor_(tensor), id_(id), legs_(legs), conjugated_(conjugated)
tensor_(tensor), id_(id), legs_(legs), conjugated_(conjugated), optimizable_(false)
{
}
......@@ -179,6 +179,17 @@ const std::list<std::vector<unsigned int>> & TensorConn::retrieveIsometries() co
return tensor_->retrieveIsometries();
}
bool TensorConn::isOptimizable() const
{
return optimizable_;
}
void TensorConn::resetOptimizability(bool optimizable)
{
optimizable_ = optimizable;
return;
}
} //namespace numerics
} //namespace exatn
/** ExaTN::Numerics: Tensor connected to other tensors in a tensor network
REVISION: 2019/12/22
REVISION: 2020/01/24
Copyright (C) 2018-2019 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2019 Oak Ridge National Laboratory (UT-Battelle) **/
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
/** Rationale:
(a) A tensor inside a tensor network is generally connected
......@@ -110,12 +110,23 @@ public:
/** Retrieves the list of all registered isometries in the tensor. **/
const std::list<std::vector<unsigned int>> & retrieveIsometries() const;
/** Returns whether this connected tensor is optimizable or not (whether or not this
connected tensor should be optimized during the tensor network functional optimization).**/
bool isOptimizable() const;
/** Resets the optimizability attribute (whether or not this connected tensor
should be optimized during the tensor network functional optimization).
Note that this attribute specifically applies to the tensor in its current
connected position within the tensor network, not to the tensor per se. **/
void resetOptimizability(bool optimizable);
private:
std::shared_ptr<Tensor> tensor_; //co-owned pointer to the tensor
unsigned int id_; //tensor id in the tensor network
std::vector<TensorLeg> legs_; //tensor legs: Connections to other tensors
bool conjugated_; //complex conjugation flag
bool optimizable_; //whether or not the tensor is subject to optimization as part of the optimized tensor network
};
} //namespace numerics
......
/** ExaTN::Numerics: Tensor network
REVISION: 2020/01/13
REVISION: 2020/01/24
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -1412,6 +1412,16 @@ bool TensorNetwork::collapseIsometries()
}
void TensorNetwork::markOptimizableTensors(std::function<bool (const Tensor &)> predicate)
{
for(auto iter = this->begin(); iter != this->end(); ++iter){
auto & tensor_conn = iter->second;
tensor_conn.resetOptimizability(predicate(*(tensor_conn.getTensor())));
}
return;
}
double TensorNetwork::getContractionCost(unsigned int left_id, unsigned int right_id,
double * arithm_intensity, bool adjust_cost)
{
......
/** ExaTN::Numerics: Tensor network
REVISION: 2020/01/16
REVISION: 2020/01/24
Copyright (C) 2018-2020 Dmitry I. Lyakh (Liakh)
Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
......@@ -58,6 +58,7 @@ Copyright (C) 2018-2020 Oak Ridge National Laboratory (UT-Battelle) **/
#include "network_build_factory.hpp"
#include "contraction_seq_optimizer.hpp"
#include <functional>
#include <unordered_map>
#include <map>
#include <vector>
......@@ -269,6 +270,12 @@ public:
of the output tensor it should be able to handle spectators (orphaned tensor legs). **/
bool collapseIsometries();
/** Traverses the tensor network and marks certain tensors as optimizable
based on the user-provided predicate function. If marked optimizable,
these specific tensors (in their specific positions within the tensor network)
will become subject to optimization when optimizing the tensor network. **/
void markOptimizableTensors(std::function<bool (const Tensor &)> predicate);
/** Returns the FMA flop count for a given contraction of two tensors identified by their ids
in the tensor network. Optionally returns the arithmetic intensity of the tensor contraction as well.
Additionally, it also allows rescaling of the tensor contraction cost with the adjustment
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment