Loading examples/sycamore/sycamore_circ_approx.cpp +3 −3 Original line number Diff line number Diff line Loading @@ -17,9 +17,9 @@ std::string bitStringVecToString(const std::vector<int>& in_vec) int main(int argc, char **argv) { xacc::Initialize(); //xacc::set_verbose(true); xacc::set_verbose(true); //xacc::logToFile(true); //xacc::setLoggingLevel(1); xacc::setLoggingLevel(1); // Options: 4, 5, 6, 8, 10, 12, 14, 16, 18, 20: const int CIRCUIT_DEPTH = 4; Loading @@ -45,7 +45,7 @@ int main(int argc, char **argv) // Note: // (1) "exatn" == "exatn:double" uses double (64-bit) type; // (1) "exatn:float" uses float (32-bit) type; constexpr int NB_LAYERS = 23; constexpr int NB_LAYERS = 1; constexpr double RECONSTRUCTION_TOL = 1e-3; constexpr int MAX_BOND_DIM = 16; auto qpu = xacc::getAccelerator("tnqvm", Loading tnqvm/visitors/exatn-gen/ExatnGenVisitor.cpp +79 −36 Original line number Diff line number Diff line Loading @@ -15,14 +15,14 @@ * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE *ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, *BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contributors: * Implementation - Thien Nguyen; Loading Loading @@ -223,7 +223,8 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::initialize( // Note: If xacc::verbose is not set, we always set ExaTN logging level to // 0. exatn::resetClientLoggingLevel(xacc::verbose ? xacc::getLoggingLevel() : 0); exatn::resetRuntimeLoggingLevel(xacc::verbose ? xacc::getLoggingLevel() : 0); exatn::resetRuntimeLoggingLevel(xacc::verbose ? xacc::getLoggingLevel() : 0); xacc::subscribeLoggingLevel([](int level) { exatn::resetClientLoggingLevel(xacc::verbose ? level : 0); Loading @@ -250,11 +251,21 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::initialize( // Default number of layers m_layersReconstruct = 4; m_countByGates = false; m_layerTracker.clear(); if (options.keyExists<int>("reconstruct-gates")) { m_layersReconstruct = options.get<int>("reconstruct-gates"); xacc::info("Reconstruct tensor network every " + std::to_string(m_layersReconstruct) + " 2-body gates."); m_countByGates = true; } else { if (options.keyExists<int>("reconstruct-layers")) { m_layersReconstruct = options.get<int>("reconstruct-layers"); } xacc::info("Reconstruct tensor network every " + std::to_string(m_layersReconstruct) + " layers."); } } m_reconstructTol = 1e-3; m_maxBondDim = 512; m_reconstructionFidelity = 1.0; Loading Loading @@ -589,8 +600,10 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::appendGateTensor( const xacc::Instruction &in_gateInstruction, GateParams &&... in_params) { // Count gate layer if this is a multi-qubit gate. if (in_gateInstruction.nRequiredBits() > 1) { ++m_layerCounter; updateLayerCounter(in_gateInstruction); reconstructCircuitTensor(); } const auto gateName = GetGateName(GateType); const GateInstanceIdentifier gateInstanceId(gateName, in_params...); const std::string uniqueGateName = gateInstanceId.toNameString(); Loading Loading @@ -665,8 +678,6 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::appendGateTensor( xacc::error("Failed to append tensor for gate " + in_gateInstruction.name() + ", pairing = " + gatePairingString); } reconstructCircuitTensor(); } template <typename TNQVM_COMPLEX_TYPE> Loading @@ -674,7 +685,7 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::reconstructCircuitTensor() { if (m_layersReconstruct <= 0) { return; } if (m_layerCounter > m_layersReconstruct) { if (m_layerCounter >= m_layersReconstruct) { xacc::info("Reconstruct Tensor Expansion"); auto target = std::make_shared<exatn::TensorExpansion>(m_tensorExpansion); // List of Approximate tensors to delete: Loading @@ -685,18 +696,20 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::reconstructCircuitTensor() { const std::vector<int> qubitTensorDim(m_buffer->size(), 2); auto rootTensor = std::make_shared<exatn::Tensor>("ROOT", qubitTensorDim); auto &networkBuildFactory = *(exatn::numerics::NetworkBuildFactory::get()); auto builder = networkBuildFactory.createNetworkBuilderShared(m_reconstructBuilder); auto builder = networkBuildFactory.createNetworkBuilderShared(m_reconstructBuilder); builder->setParameter("max_bond_dim", m_maxBondDim); auto approximant = [&]() { if (m_initReconstructionRandom || !m_previousOptExpansion) { auto approximantTensorNetwork = exatn::makeSharedTensorNetwork("Approx", rootTensor, *builder); auto approximantTensorNetwork = exatn::makeSharedTensorNetwork("Approx", rootTensor, *builder); for (auto iter = approximantTensorNetwork->cbegin(); iter != approximantTensorNetwork->cend(); ++iter) { const auto &tensorName = iter->second.getTensor()->getName(); if (tensorName != "ROOT") { auto tensor = iter->second.getTensor(); const bool created = exatn::createTensorSync( tensor, getExatnElementType()); const bool created = exatn::createTensorSync(tensor, getExatnElementType()); assert(created); const bool initialized = exatn::initTensorRnd(tensor->getName()); assert(initialized); Loading @@ -706,8 +719,10 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::reconstructCircuitTensor() { } } approximantTensorNetwork->markOptimizableAllTensors(); auto approximant_expansion = std::make_shared<exatn::TensorExpansion>("Approx"); approximant_expansion->appendComponent(approximantTensorNetwork, TNQVM_COMPLEX_TYPE{1.0, 0.0}); auto approximant_expansion = std::make_shared<exatn::TensorExpansion>("Approx"); approximant_expansion->appendComponent(approximantTensorNetwork, TNQVM_COMPLEX_TYPE{1.0, 0.0}); approximant_expansion->conjugate(); return approximant_expansion; } else { Loading Loading @@ -919,7 +934,8 @@ const double ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::getExpectationValueZ( // std::cout << "Component coeff: " << component.coefficient << "\n"; const std::complex<double> renormalizedComponentExpVal = tensor_body_val * component.coefficient; // std::cout << "renormalizedComponentExpVal: " << renormalizedComponentExpVal << "\n"; // std::cout << "renormalizedComponentExpVal: " << // renormalizedComponentExpVal << "\n"; return renormalizedComponentExpVal.real(); } xacc::error("Unable to map execution data for sub-composite: " + Loading @@ -944,9 +960,9 @@ ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::computeWaveFuncSlice( const auto bitVal = in_bitString[i]; const std::string braQubitName = "QB" + std::to_string(i); if (bitVal == 0) { const bool created = exatn::createTensor( in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2}); const bool created = exatn::createTensor(in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2}); assert(created); // Bit = 0 const bool initialized = exatn::initTensorData( Loading @@ -955,9 +971,9 @@ ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::computeWaveFuncSlice( assert(initialized); pairings.emplace_back(std::make_pair(i, i + nbOpenLegs)); } else if (bitVal == 1) { const bool created = exatn::createTensor( in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2}); const bool created = exatn::createTensor(in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2}); assert(created); // Bit = 1 const bool initialized = exatn::initTensorData( Loading @@ -967,8 +983,8 @@ ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::computeWaveFuncSlice( pairings.emplace_back(std::make_pair(i, i + nbOpenLegs)); } else if (bitVal == -1) { // Add an Id tensor const bool created = exatn::createTensor( in_processGroup, braQubitName, getExatnElementType(), const bool created = exatn::createTensor(in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2, 2}); assert(created); const bool initialized = exatn::initTensorData( Loading Loading @@ -1018,5 +1034,32 @@ ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::computeWaveFuncSlice( } return waveFnSlice; } template <typename TNQVM_COMPLEX_TYPE> void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::updateLayerCounter( const xacc::Instruction &in_gateInstruction) { auto &gate = const_cast<xacc::Instruction &>(in_gateInstruction); assert(gate.bits().size() == 2); if (m_countByGates) { ++m_layerCounter; } else { bool canCombine = true; const auto q1 = gate.bits()[0]; const auto q2 = gate.bits()[1]; for (const auto& [bit1, bit2]: m_layerTracker) { if ((q1 == bit1 || q1 == bit2) || (q2 == bit1 || q2 == bit2)) { canCombine = false; break; } } if (canCombine) { m_layerTracker.emplace(std::make_pair(q1, q2)); } else { ++m_layerCounter; m_layerTracker.clear(); m_layerTracker.emplace(std::make_pair(q1, q2)); } } } } // end namespace tnqvm #endif // TNQVM_HAS_EXATN tnqvm/visitors/exatn-gen/ExatnGenVisitor.hpp +4 −1 Original line number Diff line number Diff line Loading @@ -32,7 +32,7 @@ // +-----------------------------+------------------------------------------------------------------------+-------------+--------------------------+ // | Initialization Parameter | Parameter Description | type | default | // +=============================+========================================================================+=============+==========================+ // | reconstruct-layers | Perform reconstruction after this number of consecutive 2-q gates | int | -1 (no reconstruct) | // | reconstruct-gates | Perform reconstruction after this number of consecutive 2-q gates | int | -1 (no reconstruct) | // +-----------------------------+------------------------------------------------------------------------+-------------+--------------------------+ // | reconstruct-tolerance | Reconstruction convergence tolerance | double | 1e-4 | // +-----------------------------+------------------------------------------------------------------------+-------------+--------------------------+ Loading Loading @@ -141,10 +141,13 @@ private: const exatn::ProcessGroup &in_processGroup) const; private: void updateLayerCounter(const xacc::Instruction &in_gateInstruction); std::set<std::pair<size_t, size_t>> m_layerTracker; std::shared_ptr<exatn::TensorNetwork> m_qubitNetwork; exatn::TensorExpansion m_tensorExpansion; std::shared_ptr<exatn::TensorExpansion> m_previousOptExpansion; int m_layersReconstruct; bool m_countByGates; double m_reconstructTol; int m_layerCounter; int m_maxBondDim; Loading tnqvm/visitors/exatn-gen/tests/ExaTnGenTester.cpp +5 −5 Original line number Diff line number Diff line Loading @@ -120,7 +120,7 @@ TEST(ExaTnGenTester, checkVqeH2) { TEST(ExaTnGenTester, checkVqeH3) { auto accelerator = xacc::getAccelerator( "tnqvm", {{"tnqvm-visitor", "exatn-gen"}, {"reconstruct-layers", -1}}); "tnqvm", {{"tnqvm-visitor", "exatn-gen"}, {"reconstruct-gates", -1}}); // Create the N=3 deuteron Hamiltonian auto H_N_3 = xacc::quantum::getObservable( "pauli", Loading Loading @@ -172,7 +172,7 @@ TEST(ExaTnGenTester, checkBitstringAmpl) { auto program = ir->getComposite("test1"); auto accelerator = xacc::getAccelerator("tnqvm", {{"tnqvm-visitor", "exatn-gen:float"}, {"reconstruct-layers", 2}, {"reconstruct-gates", 2}, {"reconstruct-tolerance", 0.01}, {"bitstring", bitstring}}); auto qreg = xacc::qalloc(8); Loading @@ -196,7 +196,7 @@ TEST(ExaTnGenTester, checkWavefunctionSlice) { auto program = ir->getComposite("test1"); auto accelerator = xacc::getAccelerator("tnqvm", {{"tnqvm-visitor", "exatn-gen:float"}, {"reconstruct-layers", 2}, {"reconstruct-gates", 2}, {"reconstruct-tolerance", 0.01}, {"bitstring", bitstring}}); auto qreg = xacc::qalloc(8); Loading @@ -220,7 +220,7 @@ TEST(ExaTnGenTester, checkVqeH3Approx) { // Use very high tolerance to save test time auto accelerator = xacc::getAccelerator("tnqvm", {{"tnqvm-visitor", "exatn-gen"}, {"reconstruct-layers", 4}, {"reconstruct-gates", 4}, {"reconstruct-tolerance", 0.01}}); xacc::set_verbose(true); xacc::qasm(R"( Loading Loading @@ -250,7 +250,7 @@ TEST(ExaTnGenTester, checkVqeH3Approx) { auto energies = vqe->execute(buffer, {0.0684968, 0.17797}); buffer->print(); std::cout << "Energy = " << energies[0] << "\n"; EXPECT_NEAR(energies[0], -2.04482, 0.1); EXPECT_NEAR(energies[0], -2.04482, 0.25); } int main(int argc, char **argv) { Loading Loading
examples/sycamore/sycamore_circ_approx.cpp +3 −3 Original line number Diff line number Diff line Loading @@ -17,9 +17,9 @@ std::string bitStringVecToString(const std::vector<int>& in_vec) int main(int argc, char **argv) { xacc::Initialize(); //xacc::set_verbose(true); xacc::set_verbose(true); //xacc::logToFile(true); //xacc::setLoggingLevel(1); xacc::setLoggingLevel(1); // Options: 4, 5, 6, 8, 10, 12, 14, 16, 18, 20: const int CIRCUIT_DEPTH = 4; Loading @@ -45,7 +45,7 @@ int main(int argc, char **argv) // Note: // (1) "exatn" == "exatn:double" uses double (64-bit) type; // (1) "exatn:float" uses float (32-bit) type; constexpr int NB_LAYERS = 23; constexpr int NB_LAYERS = 1; constexpr double RECONSTRUCTION_TOL = 1e-3; constexpr int MAX_BOND_DIM = 16; auto qpu = xacc::getAccelerator("tnqvm", Loading
tnqvm/visitors/exatn-gen/ExatnGenVisitor.cpp +79 −36 Original line number Diff line number Diff line Loading @@ -15,14 +15,14 @@ * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE *ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, *BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contributors: * Implementation - Thien Nguyen; Loading Loading @@ -223,7 +223,8 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::initialize( // Note: If xacc::verbose is not set, we always set ExaTN logging level to // 0. exatn::resetClientLoggingLevel(xacc::verbose ? xacc::getLoggingLevel() : 0); exatn::resetRuntimeLoggingLevel(xacc::verbose ? xacc::getLoggingLevel() : 0); exatn::resetRuntimeLoggingLevel(xacc::verbose ? xacc::getLoggingLevel() : 0); xacc::subscribeLoggingLevel([](int level) { exatn::resetClientLoggingLevel(xacc::verbose ? level : 0); Loading @@ -250,11 +251,21 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::initialize( // Default number of layers m_layersReconstruct = 4; m_countByGates = false; m_layerTracker.clear(); if (options.keyExists<int>("reconstruct-gates")) { m_layersReconstruct = options.get<int>("reconstruct-gates"); xacc::info("Reconstruct tensor network every " + std::to_string(m_layersReconstruct) + " 2-body gates."); m_countByGates = true; } else { if (options.keyExists<int>("reconstruct-layers")) { m_layersReconstruct = options.get<int>("reconstruct-layers"); } xacc::info("Reconstruct tensor network every " + std::to_string(m_layersReconstruct) + " layers."); } } m_reconstructTol = 1e-3; m_maxBondDim = 512; m_reconstructionFidelity = 1.0; Loading Loading @@ -589,8 +600,10 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::appendGateTensor( const xacc::Instruction &in_gateInstruction, GateParams &&... in_params) { // Count gate layer if this is a multi-qubit gate. if (in_gateInstruction.nRequiredBits() > 1) { ++m_layerCounter; updateLayerCounter(in_gateInstruction); reconstructCircuitTensor(); } const auto gateName = GetGateName(GateType); const GateInstanceIdentifier gateInstanceId(gateName, in_params...); const std::string uniqueGateName = gateInstanceId.toNameString(); Loading Loading @@ -665,8 +678,6 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::appendGateTensor( xacc::error("Failed to append tensor for gate " + in_gateInstruction.name() + ", pairing = " + gatePairingString); } reconstructCircuitTensor(); } template <typename TNQVM_COMPLEX_TYPE> Loading @@ -674,7 +685,7 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::reconstructCircuitTensor() { if (m_layersReconstruct <= 0) { return; } if (m_layerCounter > m_layersReconstruct) { if (m_layerCounter >= m_layersReconstruct) { xacc::info("Reconstruct Tensor Expansion"); auto target = std::make_shared<exatn::TensorExpansion>(m_tensorExpansion); // List of Approximate tensors to delete: Loading @@ -685,18 +696,20 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::reconstructCircuitTensor() { const std::vector<int> qubitTensorDim(m_buffer->size(), 2); auto rootTensor = std::make_shared<exatn::Tensor>("ROOT", qubitTensorDim); auto &networkBuildFactory = *(exatn::numerics::NetworkBuildFactory::get()); auto builder = networkBuildFactory.createNetworkBuilderShared(m_reconstructBuilder); auto builder = networkBuildFactory.createNetworkBuilderShared(m_reconstructBuilder); builder->setParameter("max_bond_dim", m_maxBondDim); auto approximant = [&]() { if (m_initReconstructionRandom || !m_previousOptExpansion) { auto approximantTensorNetwork = exatn::makeSharedTensorNetwork("Approx", rootTensor, *builder); auto approximantTensorNetwork = exatn::makeSharedTensorNetwork("Approx", rootTensor, *builder); for (auto iter = approximantTensorNetwork->cbegin(); iter != approximantTensorNetwork->cend(); ++iter) { const auto &tensorName = iter->second.getTensor()->getName(); if (tensorName != "ROOT") { auto tensor = iter->second.getTensor(); const bool created = exatn::createTensorSync( tensor, getExatnElementType()); const bool created = exatn::createTensorSync(tensor, getExatnElementType()); assert(created); const bool initialized = exatn::initTensorRnd(tensor->getName()); assert(initialized); Loading @@ -706,8 +719,10 @@ void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::reconstructCircuitTensor() { } } approximantTensorNetwork->markOptimizableAllTensors(); auto approximant_expansion = std::make_shared<exatn::TensorExpansion>("Approx"); approximant_expansion->appendComponent(approximantTensorNetwork, TNQVM_COMPLEX_TYPE{1.0, 0.0}); auto approximant_expansion = std::make_shared<exatn::TensorExpansion>("Approx"); approximant_expansion->appendComponent(approximantTensorNetwork, TNQVM_COMPLEX_TYPE{1.0, 0.0}); approximant_expansion->conjugate(); return approximant_expansion; } else { Loading Loading @@ -919,7 +934,8 @@ const double ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::getExpectationValueZ( // std::cout << "Component coeff: " << component.coefficient << "\n"; const std::complex<double> renormalizedComponentExpVal = tensor_body_val * component.coefficient; // std::cout << "renormalizedComponentExpVal: " << renormalizedComponentExpVal << "\n"; // std::cout << "renormalizedComponentExpVal: " << // renormalizedComponentExpVal << "\n"; return renormalizedComponentExpVal.real(); } xacc::error("Unable to map execution data for sub-composite: " + Loading @@ -944,9 +960,9 @@ ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::computeWaveFuncSlice( const auto bitVal = in_bitString[i]; const std::string braQubitName = "QB" + std::to_string(i); if (bitVal == 0) { const bool created = exatn::createTensor( in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2}); const bool created = exatn::createTensor(in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2}); assert(created); // Bit = 0 const bool initialized = exatn::initTensorData( Loading @@ -955,9 +971,9 @@ ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::computeWaveFuncSlice( assert(initialized); pairings.emplace_back(std::make_pair(i, i + nbOpenLegs)); } else if (bitVal == 1) { const bool created = exatn::createTensor( in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2}); const bool created = exatn::createTensor(in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2}); assert(created); // Bit = 1 const bool initialized = exatn::initTensorData( Loading @@ -967,8 +983,8 @@ ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::computeWaveFuncSlice( pairings.emplace_back(std::make_pair(i, i + nbOpenLegs)); } else if (bitVal == -1) { // Add an Id tensor const bool created = exatn::createTensor( in_processGroup, braQubitName, getExatnElementType(), const bool created = exatn::createTensor(in_processGroup, braQubitName, getExatnElementType(), exatn::TensorShape{2, 2}); assert(created); const bool initialized = exatn::initTensorData( Loading Loading @@ -1018,5 +1034,32 @@ ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::computeWaveFuncSlice( } return waveFnSlice; } template <typename TNQVM_COMPLEX_TYPE> void ExatnGenVisitor<TNQVM_COMPLEX_TYPE>::updateLayerCounter( const xacc::Instruction &in_gateInstruction) { auto &gate = const_cast<xacc::Instruction &>(in_gateInstruction); assert(gate.bits().size() == 2); if (m_countByGates) { ++m_layerCounter; } else { bool canCombine = true; const auto q1 = gate.bits()[0]; const auto q2 = gate.bits()[1]; for (const auto& [bit1, bit2]: m_layerTracker) { if ((q1 == bit1 || q1 == bit2) || (q2 == bit1 || q2 == bit2)) { canCombine = false; break; } } if (canCombine) { m_layerTracker.emplace(std::make_pair(q1, q2)); } else { ++m_layerCounter; m_layerTracker.clear(); m_layerTracker.emplace(std::make_pair(q1, q2)); } } } } // end namespace tnqvm #endif // TNQVM_HAS_EXATN
tnqvm/visitors/exatn-gen/ExatnGenVisitor.hpp +4 −1 Original line number Diff line number Diff line Loading @@ -32,7 +32,7 @@ // +-----------------------------+------------------------------------------------------------------------+-------------+--------------------------+ // | Initialization Parameter | Parameter Description | type | default | // +=============================+========================================================================+=============+==========================+ // | reconstruct-layers | Perform reconstruction after this number of consecutive 2-q gates | int | -1 (no reconstruct) | // | reconstruct-gates | Perform reconstruction after this number of consecutive 2-q gates | int | -1 (no reconstruct) | // +-----------------------------+------------------------------------------------------------------------+-------------+--------------------------+ // | reconstruct-tolerance | Reconstruction convergence tolerance | double | 1e-4 | // +-----------------------------+------------------------------------------------------------------------+-------------+--------------------------+ Loading Loading @@ -141,10 +141,13 @@ private: const exatn::ProcessGroup &in_processGroup) const; private: void updateLayerCounter(const xacc::Instruction &in_gateInstruction); std::set<std::pair<size_t, size_t>> m_layerTracker; std::shared_ptr<exatn::TensorNetwork> m_qubitNetwork; exatn::TensorExpansion m_tensorExpansion; std::shared_ptr<exatn::TensorExpansion> m_previousOptExpansion; int m_layersReconstruct; bool m_countByGates; double m_reconstructTol; int m_layerCounter; int m_maxBondDim; Loading
tnqvm/visitors/exatn-gen/tests/ExaTnGenTester.cpp +5 −5 Original line number Diff line number Diff line Loading @@ -120,7 +120,7 @@ TEST(ExaTnGenTester, checkVqeH2) { TEST(ExaTnGenTester, checkVqeH3) { auto accelerator = xacc::getAccelerator( "tnqvm", {{"tnqvm-visitor", "exatn-gen"}, {"reconstruct-layers", -1}}); "tnqvm", {{"tnqvm-visitor", "exatn-gen"}, {"reconstruct-gates", -1}}); // Create the N=3 deuteron Hamiltonian auto H_N_3 = xacc::quantum::getObservable( "pauli", Loading Loading @@ -172,7 +172,7 @@ TEST(ExaTnGenTester, checkBitstringAmpl) { auto program = ir->getComposite("test1"); auto accelerator = xacc::getAccelerator("tnqvm", {{"tnqvm-visitor", "exatn-gen:float"}, {"reconstruct-layers", 2}, {"reconstruct-gates", 2}, {"reconstruct-tolerance", 0.01}, {"bitstring", bitstring}}); auto qreg = xacc::qalloc(8); Loading @@ -196,7 +196,7 @@ TEST(ExaTnGenTester, checkWavefunctionSlice) { auto program = ir->getComposite("test1"); auto accelerator = xacc::getAccelerator("tnqvm", {{"tnqvm-visitor", "exatn-gen:float"}, {"reconstruct-layers", 2}, {"reconstruct-gates", 2}, {"reconstruct-tolerance", 0.01}, {"bitstring", bitstring}}); auto qreg = xacc::qalloc(8); Loading @@ -220,7 +220,7 @@ TEST(ExaTnGenTester, checkVqeH3Approx) { // Use very high tolerance to save test time auto accelerator = xacc::getAccelerator("tnqvm", {{"tnqvm-visitor", "exatn-gen"}, {"reconstruct-layers", 4}, {"reconstruct-gates", 4}, {"reconstruct-tolerance", 0.01}}); xacc::set_verbose(true); xacc::qasm(R"( Loading Loading @@ -250,7 +250,7 @@ TEST(ExaTnGenTester, checkVqeH3Approx) { auto energies = vqe->execute(buffer, {0.0684968, 0.17797}); buffer->print(); std::cout << "Energy = " << energies[0] << "\n"; EXPECT_NEAR(energies[0], -2.04482, 0.1); EXPECT_NEAR(energies[0], -2.04482, 0.25); } int main(int argc, char **argv) { Loading