Commit 230fc068 authored by Dmitry I. Lyakh's avatar Dmitry I. Lyakh

Removed debug output normal execution.

parent f048a185
Pipeline #86638 passed with stage
in 4 minutes and 24 seconds
......@@ -8,9 +8,9 @@ void initialize() {
if(!exatnFrameworkInitialized){
serviceRegistry->initialize();
exatnFrameworkInitialized = true;
std::cout << "#DEBUG(exatn): ExaTN services initialized" << std::endl << std::flush;
//std::cout << "#DEBUG(exatn): ExaTN services initialized" << std::endl << std::flush;
numericalServer = std::make_shared<NumServer>();
std::cout << "#DEBUG(exatn): ExaTN numerical server initialized" << std::endl << std::flush;
//std::cout << "#DEBUG(exatn): ExaTN numerical server initialized" << std::endl << std::flush;
}
return;
}
......@@ -24,7 +24,7 @@ bool isInitialized() {
void finalize() {
numericalServer.reset();
exatnFrameworkInitialized = false;
std::cout << "#DEBUG(exatn): ExaTN numerical server shut down" << std::endl << std::flush;
//std::cout << "#DEBUG(exatn): ExaTN numerical server shut down" << std::endl << std::flush;
return;
}
......
......@@ -22,8 +22,8 @@ TensorRuntime::TensorRuntime(const std::string & graph_executor_name,
current_dag_(nullptr), executing_(false), alive_(false)
{
graph_executor_ = exatn::getService<TensorGraphExecutor>(graph_executor_name_);
std::cout << "#DEBUG(exatn::runtime::TensorRuntime)[MAIN_THREAD]: DAG executor set to "
<< graph_executor_name_ << " + " << node_executor_name_ << std::endl << std::flush;
// std::cout << "#DEBUG(exatn::runtime::TensorRuntime)[MAIN_THREAD]: DAG executor set to "
// << graph_executor_name_ << " + " << node_executor_name_ << std::endl << std::flush;
launchExecutionThread();
}
......@@ -32,9 +32,9 @@ TensorRuntime::~TensorRuntime()
{
if(alive_.load()){
alive_.store(false); //signal for the execution thread to finish
std::cout << "#DEBUG(exatn::runtime::TensorRuntime)[MAIN_THREAD]: Waiting Execution Thread ... " << std::flush;
// std::cout << "#DEBUG(exatn::runtime::TensorRuntime)[MAIN_THREAD]: Waiting Execution Thread ... " << std::flush;
exec_thread_.join(); //wait until the execution thread has finished
std::cout << "Joined" << std::endl << std::flush;
// std::cout << "Joined" << std::endl << std::flush;
}
}
......@@ -43,9 +43,9 @@ void TensorRuntime::launchExecutionThread()
{
if(!(alive_.load())){
alive_.store(true);
std::cout << "#DEBUG(exatn::runtime::TensorRuntime)[MAIN_THREAD]: Launching Execution Thread ... " << std::flush;
// std::cout << "#DEBUG(exatn::runtime::TensorRuntime)[MAIN_THREAD]: Launching Execution Thread ... " << std::flush;
exec_thread_ = std::thread(&TensorRuntime::executionThreadWorkflow,this);
std::cout << "Done" << std::endl << std::flush;
// std::cout << "Done" << std::endl << std::flush;
}
return; //only the main thread returns to the client
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment