Commit 59c53736 authored by Alvarez, Gonzalo's avatar Alvarez, Gonzalo
Browse files

tensorEval

parent cce6625d
Loading
Loading
Loading
Loading
+7 −5
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@ public:
	typedef std::map<PairStringSizeType,SizeType> MapPairStringSizeType;
	typedef typename PsimagLite::Vector<PairStringSizeType>::Type VectorPairStringSizeType;
	typedef SymmetryLocal SymmetryLocalType;
	typedef typename TensorType::TensorBlobType TensorBlobType;

	TensorEval(const SrepStatementType& tSrep,
	           const VectorTensorType&,
@@ -60,15 +61,16 @@ public:
		//Evaluate a tensor network:
		auto evaluated = exatn::evaluateTensorNetwork("srepStatement_.sRep()" , copy);
		TensorType::checkTalshErrorCode(evaluated, "evaluateTensorNetwork");
		PsimagLite::String lhs = srepStatement_.lhs().sRep();
		filterForExatn(lhs);
		auto synced = exatn::sync(lhs);

		auto synced = exatn::sync(srepStatement_.nameIdOfOutput());
		return synced;
	}

	void printResult(std::ostream& os) const
	void printResult(std::ostream& os, TensorType& outtensor) const
	{
		throw PsimagLite::RuntimeError("TensorEvalNew::printResult: Not implemented yet\n");
		TensorBlobType data = outtensor.data();
		for (SizeType i = 0; i < data.size(); ++i)
			os<<data[i]<<"\n";
	}

private:
+4 −0
Original line number Diff line number Diff line
@@ -59,6 +59,10 @@ public:
			return ptr_[ind];
		}

		SizeType size() const { return size_; }

	private:

		SizeType size_;
		const ComplexOrRealType* ptr_;
	};

src/exatn_exec_thread.log

deleted100644 → 0
+0 −48
Original line number Diff line number Diff line
#DEBUG(EagerGraphExecutor)[EXEC_THREAD]: Submitting tensor operation 0: Opcode = 1: Details:
TensorOperation(1){
 u0{0:0,0:0}{3,5}
}
: Status = 0: Syncing ... Success
#DEBUG(EagerGraphExecutor)[EXEC_THREAD]: Submitting tensor operation 1: Opcode = 1: Details:
TensorOperation(1){
 u1{0:0}{5}
}
: Status = 0: Syncing ... Success
#DEBUG(EagerGraphExecutor)[EXEC_THREAD]: Submitting tensor operation 2: Opcode = 1: Details:
TensorOperation(1){
 r0{0:0}{3}
}
: Status = 0: Syncing ... Success
#DEBUG(EagerGraphExecutor)[EXEC_THREAD]: Submitting tensor operation 3: Opcode = 3: Details:
TensorOperation(3){
 u0{0:0,0:0}{3,5}
 (0,0)
}
: Status = 0: Syncing ... Success
#DEBUG(EagerGraphExecutor)[EXEC_THREAD]: Submitting tensor operation 4: Opcode = 3: Details:
TensorOperation(3){
 u1{0:0}{5}
 (0,0)
}
: Status = 0: Syncing ... Success
#DEBUG(EagerGraphExecutor)[EXEC_THREAD]: Submitting tensor operation 5: Opcode = 3: Details:
TensorOperation(3){
 r0{0:0}{3}
 (0,0)
}
: Status = 0: Syncing ... Success
#DEBUG(EagerGraphExecutor)[EXEC_THREAD]: Submitting tensor operation 6: Opcode = 3: Details:
TensorOperation(3){
 u1{0:0}{5}
 (0,0)
}
: Status = 0: Syncing ... Success
#DEBUG(EagerGraphExecutor)[EXEC_THREAD]: Submitting tensor operation 7: Opcode = 5: Details:
TensorOperation(5){
 D(u0)+=L(u0,c0)*R(c0)
 r0{0:0}{3}
 u0{0:0,0:0}{3,5}
 u1{0:0}{5}
 (1,0) (1,0)
}
: Status = 0: Syncing ... Success
+2 −1
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ int main(int argc, char **argv)
	}

	vt[1]->setToConstant(1.5);
	vt[2]->setToConstant(3.3);

	Mera::SrepStatement<double> srepEq(str);
	Mera::NameToIndexLut<TensorType> nameToIndexLut(vt);
@@ -49,7 +50,7 @@ int main(int argc, char **argv)

	while (!handle);

	tensorEval.printResult(std::cout);
	tensorEval.printResult(std::cout, *vt[2]);

	for (SizeType i = 0; i < vt.size(); ++i) {
		delete vt[i];