From 98ca403f4e6907554ae064d50b920ed1f9fc8aa5 Mon Sep 17 00:00:00 2001 From: Sam Jenkins <s.jenkins@stfc.ac.uk> Date: Thu, 23 May 2019 11:07:08 +0100 Subject: [PATCH] Re #25465 Replaced raw pointers with Shared for threading --- .../DataHandling/src/DefaultEventLoader.cpp | 2 +- .../DataHandling/src/LoadBankFromDiskTask.cpp | 4 +- .../inc/MantidDataObjects/MDGridBox.tcc | 6 +-- .../Kernel/inc/MantidKernel/ThreadPool.h | 2 +- .../Kernel/inc/MantidKernel/ThreadScheduler.h | 30 ++++++------ .../inc/MantidKernel/ThreadSchedulerMutexes.h | 20 ++++---- Framework/Kernel/src/ThreadPool.cpp | 2 +- Framework/Kernel/src/ThreadPoolRunnable.cpp | 5 +- Framework/Kernel/test/LoggerTest.h | 2 +- Framework/Kernel/test/MemoryTest.h | 2 +- Framework/Kernel/test/MutexTest.h | 8 ++-- .../Kernel/test/ThreadPoolRunnableTest.h | 4 +- Framework/Kernel/test/ThreadPoolTest.h | 30 ++++++------ .../Kernel/test/ThreadSchedulerMutexesTest.h | 46 ++++++++----------- Framework/Kernel/test/ThreadSchedulerTest.h | 22 ++++----- 15 files changed, 87 insertions(+), 98 deletions(-) diff --git a/Framework/DataHandling/src/DefaultEventLoader.cpp b/Framework/DataHandling/src/DefaultEventLoader.cpp index d23921d20a5..9874ccce0e4 100644 --- a/Framework/DataHandling/src/DefaultEventLoader.cpp +++ b/Framework/DataHandling/src/DefaultEventLoader.cpp @@ -42,7 +42,7 @@ void DefaultEventLoader::load(LoadEventNexus *alg, EventWorkspaceCollection &ws, for (size_t i = bankRange.first; i < bankRange.second; i++) { if (bankNumEvents[i] > 0) - pool.schedule(new LoadBankFromDiskTask( + pool.schedule(std::make_shared<LoadBankFromDiskTask>( loader, bankNames[i], classType, bankNumEvents[i], oldNeXusFileNames, prog.get(), diskIOMutex, *scheduler, periodLog)); } diff --git a/Framework/DataHandling/src/LoadBankFromDiskTask.cpp b/Framework/DataHandling/src/LoadBankFromDiskTask.cpp index c7b7dfafd36..715887908a2 100644 --- a/Framework/DataHandling/src/LoadBankFromDiskTask.cpp +++ b/Framework/DataHandling/src/LoadBankFromDiskTask.cpp @@ -482,13 +482,13 @@ void LoadBankFromDiskTask::run() { auto event_index_shrd = boost::make_shared<std::vector<uint64_t>>(std::move(event_index)); - ProcessBankData *newTask1 = new ProcessBankData( + std::shared_ptr<Task> newTask1 = std::make_shared<ProcessBankData>( m_loader, entry_name, prog, event_id_shrd, event_time_of_flight_shrd, numEvents, startAt, event_index_shrd, thisBankPulseTimes, m_have_weight, event_weight_shrd, m_min_id, mid_id); scheduler.push(newTask1); if (m_loader.splitProcessing && (mid_id < m_max_id)) { - ProcessBankData *newTask2 = new ProcessBankData( + std::shared_ptr<Task> newTask2 = std::make_shared<ProcessBankData>( m_loader, entry_name, prog, event_id_shrd, event_time_of_flight_shrd, numEvents, startAt, event_index_shrd, thisBankPulseTimes, m_have_weight, event_weight_shrd, (mid_id + 1), m_max_id); diff --git a/Framework/DataObjects/inc/MantidDataObjects/MDGridBox.tcc b/Framework/DataObjects/inc/MantidDataObjects/MDGridBox.tcc index 26f7c3cd07a..b1feafa7677 100644 --- a/Framework/DataObjects/inc/MantidDataObjects/MDGridBox.tcc +++ b/Framework/DataObjects/inc/MantidDataObjects/MDGridBox.tcc @@ -707,7 +707,7 @@ TMDE(void MDGridBox)::splitContents(size_t index, Kernel::ThreadScheduler *ts) { if (ts) { // Create a task to split the newly created MDGridBox. - ts->push(new Kernel::FunctionTask( + ts->push(std::make_shared<Kernel::FunctionTask>( boost::bind(&MDGridBox<MDE, nd>::splitAllIfNeeded, &*gridbox, ts))); } else { gridbox->splitAllIfNeeded(nullptr); @@ -760,7 +760,7 @@ TMDE(void MDGridBox)::splitAllIfNeeded(Kernel::ThreadScheduler *ts) { // ------ Perform split in parallel (using ThreadPool) ------ // So we create a task to split this MDBox, // Task is : this->splitContents(i, ts); - ts->push(new Kernel::FunctionTask( + ts->push(std::make_shared<Kernel::FunctionTask>( boost::bind(&MDGridBox<MDE, nd>::splitContents, &*this, i, ts))); } } else { @@ -789,7 +789,7 @@ TMDE(void MDGridBox)::splitAllIfNeeded(Kernel::ThreadScheduler *ts) { else // Go parallel if this is a big enough gridbox. // Task is : gridBox->splitAllIfNeeded(ts); - ts->push(new Kernel::FunctionTask(boost::bind( + ts->push(std::make_shared<Kernel::FunctionTask>(boost::bind( &MDGridBox<MDE, nd>::splitAllIfNeeded, &*gridBox, ts))); } } diff --git a/Framework/Kernel/inc/MantidKernel/ThreadPool.h b/Framework/Kernel/inc/MantidKernel/ThreadPool.h index 7dd51777ba3..25810709c2b 100644 --- a/Framework/Kernel/inc/MantidKernel/ThreadPool.h +++ b/Framework/Kernel/inc/MantidKernel/ThreadPool.h @@ -43,7 +43,7 @@ public: void start(double waitSec = 0.0); - void schedule(Task *task, bool start = false); + void schedule(std::shared_ptr<Task> task, bool start = false); void joinAll(); diff --git a/Framework/Kernel/inc/MantidKernel/ThreadScheduler.h b/Framework/Kernel/inc/MantidKernel/ThreadScheduler.h index 94ec9749710..8b632b9ffee 100644 --- a/Framework/Kernel/inc/MantidKernel/ThreadScheduler.h +++ b/Framework/Kernel/inc/MantidKernel/ThreadScheduler.h @@ -43,14 +43,14 @@ public: /** Add a Task to the queue. * @param newTask :: Task to add to queue */ - virtual void push(Task *newTask) = 0; + virtual void push(std::shared_ptr<Task> newTask) = 0; //----------------------------------------------------------------------------------- /** Retrieves the next Task to execute. * @param threadnum :: ID of the calling thread. * @return a Task pointer to execute. */ - virtual Task *pop(size_t threadnum) = 0; + virtual std::shared_ptr<Task> pop(size_t threadnum) = 0; //----------------------------------------------------------------------------------- /** Signal to the scheduler that a task is complete. @@ -139,7 +139,7 @@ public: } //------------------------------------------------------------------------------- - void push(Task *newTask) override { + void push(std::shared_ptr<Task> newTask) override { // Cache the total cost m_queueLock.lock(); m_cost += newTask->cost(); @@ -148,9 +148,9 @@ public: } //------------------------------------------------------------------------------- - Task *pop(size_t threadnum) override { + std::shared_ptr<Task> pop(size_t threadnum) override { UNUSED_ARG(threadnum); - Task *temp = nullptr; + std::shared_ptr<Task> temp = nullptr; m_queueLock.lock(); // Check the size within the same locking block; otherwise the size may // change before you get the next item. @@ -174,9 +174,7 @@ public: //------------------------------------------------------------------------------- void clear() override { m_queueLock.lock(); - // Empty out the queue and delete the pointers! - for (auto &task : m_queue) - delete task; + // Empty out the queue m_queue.clear(); m_cost = 0; m_costExecuted = 0; @@ -185,7 +183,7 @@ public: protected: /// Queue of tasks - std::deque<Task *> m_queue; + std::deque<std::shared_ptr<Task>> m_queue; }; //=========================================================================== @@ -200,9 +198,9 @@ protected: class MANTID_KERNEL_DLL ThreadSchedulerLIFO : public ThreadSchedulerFIFO { //------------------------------------------------------------------------------- - Task *pop(size_t threadnum) override { + std::shared_ptr<Task> pop(size_t threadnum) override { UNUSED_ARG(threadnum); - Task *temp = nullptr; + std::shared_ptr<Task> temp = nullptr; m_queueLock.lock(); // Check the size within the same locking block; otherwise the size may // change before you get the next item. @@ -243,7 +241,7 @@ public: } //------------------------------------------------------------------------------- - void push(Task *newTask) override { + void push(std::shared_ptr<Task> newTask) override { // Cache the total cost m_queueLock.lock(); m_cost += newTask->cost(); @@ -252,9 +250,9 @@ public: } //------------------------------------------------------------------------------- - Task *pop(size_t threadnum) override { + std::shared_ptr<Task> pop(size_t threadnum) override { UNUSED_ARG(threadnum); - Task *temp = nullptr; + std::shared_ptr<Task> temp = nullptr; m_queueLock.lock(); // Check the size within the same locking block; otherwise the size may // change before you get the next item. @@ -281,8 +279,6 @@ public: void clear() override { m_queueLock.lock(); // Empty out the queue and delete the pointers! - for (auto &taskPair : m_map) - delete taskPair.second; m_map.clear(); m_cost = 0; m_costExecuted = 0; @@ -291,7 +287,7 @@ public: protected: /// A multimap keeps tasks sorted by the key (cost) - std::multimap<double, Task *> m_map; + std::multimap<double, std::shared_ptr<Task>> m_map; }; } // namespace Kernel diff --git a/Framework/Kernel/inc/MantidKernel/ThreadSchedulerMutexes.h b/Framework/Kernel/inc/MantidKernel/ThreadSchedulerMutexes.h index 7e2f2c97da7..39c56c7646a 100644 --- a/Framework/Kernel/inc/MantidKernel/ThreadSchedulerMutexes.h +++ b/Framework/Kernel/inc/MantidKernel/ThreadSchedulerMutexes.h @@ -9,6 +9,8 @@ #include "MantidKernel/DllConfig.h" #include "MantidKernel/ThreadScheduler.h" + +#include <memory> #include <mutex> #include <numeric> #include <set> @@ -39,7 +41,7 @@ public: ~ThreadSchedulerMutexes() override { clear(); } //------------------------------------------------------------------------------- - void push(Task *newTask) override { + void push(std::shared_ptr<Task> newTask) override { // Cache the total cost std::lock_guard<std::mutex> lock(m_queueLock); m_cost += newTask->cost(); @@ -49,10 +51,10 @@ public: } //------------------------------------------------------------------------------- - Task *pop(size_t threadnum) override { + std::shared_ptr<Task> pop(size_t threadnum) override { UNUSED_ARG(threadnum); - Task *temp = nullptr; + std::shared_ptr<Task> temp = nullptr; std::lock_guard<std::mutex> lock(m_queueLock); // Check the size within the same locking block; otherwise the size may @@ -73,7 +75,7 @@ public: auto it2 = map.end(); --it2; // Great, we found something. - temp = it2->second; + temp = std::move(it2->second); // Take it out of the map (popped) map.erase(it2); break; @@ -87,7 +89,7 @@ public: if (!mutexedMap.second.empty()) { InnerMap &map = mutexedMap.second; // Use the first one - temp = map.begin()->second; + temp = std::move(map.begin()->second); // And erase that item (pop it) map.erase(map.begin()); break; @@ -130,7 +132,7 @@ public: return std::accumulate( m_supermap.cbegin(), m_supermap.cend(), size_t{0}, [](size_t total, - const std::pair<boost::shared_ptr<std::mutex>, InnerMap> + const std::pair<const boost::shared_ptr<std::mutex>&, const InnerMap &> &mutexedMap) { return total + mutexedMap.second.size(); }); } @@ -140,7 +142,7 @@ public: std::lock_guard<std::mutex> lock(m_queueLock); auto mapWithTasks = std::find_if_not( m_supermap.cbegin(), m_supermap.cend(), - [](const std::pair<boost::shared_ptr<std::mutex>, InnerMap> + [](const std::pair<const boost::shared_ptr<std::mutex>, const InnerMap &> &mutexedMap) { return mutexedMap.second.empty(); }); return mapWithTasks == m_supermap.cend(); } @@ -152,8 +154,6 @@ public: // Empty out the queue and delete the pointers! for (auto &it : m_supermap) { InnerMap &map = it.second; - for (auto &it2 : map) - delete it2.second; map.clear(); } m_supermap.clear(); @@ -163,7 +163,7 @@ public: protected: /// Map to tasks, sorted by cost - using InnerMap = std::multimap<double, Task *>; + using InnerMap = std::multimap<double, std::shared_ptr<Task>>; /// Map to maps, sorted by Mutex* using SuperMap = std::map<boost::shared_ptr<std::mutex>, InnerMap>; diff --git a/Framework/Kernel/src/ThreadPool.cpp b/Framework/Kernel/src/ThreadPool.cpp index f381b9f7f3b..400f25afccd 100644 --- a/Framework/Kernel/src/ThreadPool.cpp +++ b/Framework/Kernel/src/ThreadPool.cpp @@ -125,7 +125,7 @@ void ThreadPool::start(double waitSec) { * @param task :: pointer to a Task object to run. * @param start :: start the thread at the same time; default false */ -void ThreadPool::schedule(Task *task, bool start) { +void ThreadPool::schedule(std::shared_ptr<Task> task, bool start) { if (task) { m_scheduler->push(task); // Start all the threads if they were not already. diff --git a/Framework/Kernel/src/ThreadPoolRunnable.cpp b/Framework/Kernel/src/ThreadPoolRunnable.cpp index 05bf20ad5b3..332673bf537 100644 --- a/Framework/Kernel/src/ThreadPoolRunnable.cpp +++ b/Framework/Kernel/src/ThreadPoolRunnable.cpp @@ -44,7 +44,7 @@ void ThreadPoolRunnable::clearWait() { m_waitSec = 0.0; } * as scheduled to it. */ void ThreadPoolRunnable::run() { - Task *task; + std::shared_ptr<Task> task; // If there are no tasks yet, wait up to m_waitSec for them to come up while (m_scheduler->empty() && m_waitSec > 0.0) { @@ -74,7 +74,7 @@ void ThreadPoolRunnable::run() { } // Tell the scheduler that we finished this task - m_scheduler->finished(task, m_threadnum); + m_scheduler->finished(task.get(), m_threadnum); // Report progress, if specified. if (m_prog) @@ -85,7 +85,6 @@ void ThreadPoolRunnable::run() { mutex->unlock(); // We now delete the task to free up memory - delete task; } else { // No appropriate task for this thread (perhaps a mutex is locked) // but there are more tasks. diff --git a/Framework/Kernel/test/LoggerTest.h b/Framework/Kernel/test/LoggerTest.h index 1c57502ed4b..eae3879505e 100644 --- a/Framework/Kernel/test/LoggerTest.h +++ b/Framework/Kernel/test/LoggerTest.h @@ -93,7 +93,7 @@ public: void test_ThreadPool_ParallelLogging() { ThreadPool tp; for (int i = 0; i < 1000; i++) - tp.schedule(new FunctionTask( + tp.schedule(std::make_shared<FunctionTask>( boost::bind(&LoggerTest::doLogInParallel, &*this, i))); tp.joinAll(); } diff --git a/Framework/Kernel/test/MemoryTest.h b/Framework/Kernel/test/MemoryTest.h index 05aa8c5a376..f140f163c45 100644 --- a/Framework/Kernel/test/MemoryTest.h +++ b/Framework/Kernel/test/MemoryTest.h @@ -56,7 +56,7 @@ public: void test_parallel_threadpool() { ThreadPool pool; for (int i = 0; i < 500; i++) { - pool.schedule(new FunctionTask(&MemoryTest_myTaskFunction, 1.0)); + pool.schedule(std::make_shared<FunctionTask>(&MemoryTest_myTaskFunction, 1.0)); } pool.joinAll(); } diff --git a/Framework/Kernel/test/MutexTest.h b/Framework/Kernel/test/MutexTest.h index 4130d1a554c..fe515e02dff 100644 --- a/Framework/Kernel/test/MutexTest.h +++ b/Framework/Kernel/test/MutexTest.h @@ -62,7 +62,7 @@ public: CPUTimer tim; size_t numTasks = 50; for (size_t i = 0; i < numTasks; i++) - pool.schedule(new FunctionTask(reader)); + pool.schedule(std::make_shared<FunctionTask>(reader)); pool.joinAll(); std::cout << tim << " to execute all " << numTasks << " tasks\n"; } @@ -73,7 +73,7 @@ public: CPUTimer tim; size_t numTasks = 10; for (size_t i = 0; i < numTasks; i++) - pool.schedule(new FunctionTask(unconditional_writer)); + pool.schedule(std::make_shared<FunctionTask>(unconditional_writer)); pool.joinAll(); std::cout << tim << " to execute all " << numTasks << " tasks\n"; TSM_ASSERT_EQUALS("The writers were all called", shared_data.size(), @@ -87,9 +87,9 @@ public: size_t numTasks = 50; for (size_t i = 0; i < numTasks; i++) { if (i % 10 == 0) - pool.schedule(new FunctionTask(unconditional_writer)); + pool.schedule(std::make_shared<FunctionTask>(unconditional_writer)); else - pool.schedule(new FunctionTask(reader)); + pool.schedule(std::make_shared<FunctionTask>(reader)); } pool.joinAll(); std::cout << tim << " to execute all " << numTasks << " tasks\n"; diff --git a/Framework/Kernel/test/ThreadPoolRunnableTest.h b/Framework/Kernel/test/ThreadPoolRunnableTest.h index d112072b13a..731f2c014f5 100644 --- a/Framework/Kernel/test/ThreadPoolRunnableTest.h +++ b/Framework/Kernel/test/ThreadPoolRunnableTest.h @@ -35,7 +35,7 @@ public: std::unique_ptr<ThreadPoolRunnable> tpr; std::unique_ptr<ThreadScheduler> sc = std::make_unique<ThreadSchedulerFIFO>(); tpr = std ::make_unique<ThreadPoolRunnable>(0, sc.get()); - sc->push(new SimpleTask()); + sc->push(std::make_shared<SimpleTask>()); TS_ASSERT_EQUALS(sc->size(), 1); // Run it @@ -66,7 +66,7 @@ public: // Put 10 tasks in for (size_t i = 0; i < 10; i++) - sc->push(new TaskThatThrows()); + sc->push(std::make_shared<TaskThatThrows>()); // The task throws but the runnable just aborts instead ThreadPoolRunnableTest_value = 0; diff --git a/Framework/Kernel/test/ThreadPoolTest.h b/Framework/Kernel/test/ThreadPoolTest.h index 5927ad00a17..caa2bdfee5e 100644 --- a/Framework/Kernel/test/ThreadPoolTest.h +++ b/Framework/Kernel/test/ThreadPoolTest.h @@ -100,7 +100,7 @@ public: if (depth < 4) { // Add ten tasks (one level deeper) for (size_t i = 0; i < 10; i++) { - m_scheduler->push(new TaskThatAddsTasks(m_scheduler, depth + 1)); + m_scheduler->push(std::make_shared<TaskThatAddsTasks>(m_scheduler, depth + 1)); } } else { // Lock to ensure you don't step on yourself. @@ -131,7 +131,7 @@ public: for (int i = 0; i < 16; i++) { double cost = i; // time is exactly i // Bind to a member function of mywaster - p.schedule(new FunctionTask( + p.schedule(std::make_shared<FunctionTask>( boost::bind(&TimeWaster::waste_time_with_lock, &mywaster, i), cost)); } @@ -147,7 +147,7 @@ public: void test_schedule() { ThreadPool p; TS_ASSERT_EQUALS(threadpooltest_check, 0); - p.schedule(new FunctionTask(threadpooltest_function)); + p.schedule(std::make_shared<FunctionTask>(threadpooltest_function)); TS_ASSERT_EQUALS(threadpooltest_check, 0); TS_ASSERT_THROWS_NOTHING(p.joinAll()); TS_ASSERT_EQUALS(threadpooltest_check, 12); @@ -184,7 +184,7 @@ public: new MyTestProgress(0.0, 1.0, 10, this)); for (int i = 0; i < 10; i++) { double cost = i; - p.schedule(new FunctionTask(threadpooltest_function, cost)); + p.schedule(std::make_shared<FunctionTask>(threadpooltest_function, cost)); } TS_ASSERT_THROWS_NOTHING(p.joinAll()); // The test reporter was called @@ -207,7 +207,7 @@ public: Poco::Thread::sleep(40); // Now you add the task - p.schedule(new FunctionTask(threadpooltest_function)); + p.schedule(std::make_shared<FunctionTask>(threadpooltest_function)); // Simulate doing more work (this allows the task to run) Poco::Thread::sleep(40); @@ -218,7 +218,7 @@ public: // Reset and try again. The threads are still waiting, it has been less than // 1 second. threadpooltest_check = 0; - p.schedule(new FunctionTask(threadpooltest_function)); + p.schedule(std::make_shared<FunctionTask>(threadpooltest_function)); Poco::Thread::sleep(40); TS_ASSERT_EQUALS(threadpooltest_check, 12); @@ -241,7 +241,7 @@ public: // But it takes too long before the task is actually added Poco::Thread::sleep(100); - p.schedule(new FunctionTask(threadpooltest_function)); + p.schedule(std::make_shared<FunctionTask>(threadpooltest_function)); Poco::Thread::sleep(30); // So the task has not run, since the threads exited before! TS_ASSERT_EQUALS(threadpooltest_check, 0); @@ -260,14 +260,14 @@ public: void test_schedule_resume_tasks() { ThreadPool p; // Makes a default scheduler threadpooltest_check = 0; - p.schedule(new FunctionTask(threadpooltest_function)); + p.schedule(std::make_shared<FunctionTask>(threadpooltest_function)); TS_ASSERT_THROWS_NOTHING(p.joinAll()); // Ok, the task did execute. TS_ASSERT_EQUALS(threadpooltest_check, 12); // Now we reset. threadpooltest_check = 0; - p.schedule(new FunctionTask(threadpooltest_function)); + p.schedule(std::make_shared<FunctionTask>(threadpooltest_function)); TS_ASSERT_THROWS_NOTHING(p.joinAll()); TS_ASSERT_EQUALS(threadpooltest_check, 12); } @@ -280,7 +280,7 @@ public: for (int i = 0; i < 10; i++) { double cost = i; p.schedule( - new FunctionTask(boost::bind(threadpooltest_adding_stuff, i), cost)); + std::make_shared<FunctionTask>(boost::bind(threadpooltest_adding_stuff, i), cost)); } TS_ASSERT_THROWS_NOTHING(p.joinAll()); TS_ASSERT_EQUALS(threadpooltest_vec.size(), 10); @@ -299,7 +299,7 @@ public: for (int i = 0; i < 10; i++) { double cost = i; p.schedule( - new FunctionTask(boost::bind(threadpooltest_adding_stuff, i), cost)); + std::make_shared<FunctionTask>(boost::bind(threadpooltest_adding_stuff, i), cost)); } TS_ASSERT_THROWS_NOTHING(p.joinAll()); TS_ASSERT_EQUALS(threadpooltest_vec.size(), 10); @@ -317,7 +317,7 @@ public: for (int i = 0; i < 10; i++) { double cost = i; p.schedule( - new FunctionTask(boost::bind(threadpooltest_adding_stuff, i), cost)); + std::make_shared<FunctionTask>(boost::bind(threadpooltest_adding_stuff, i), cost)); } TS_ASSERT_THROWS_NOTHING(p.joinAll()); TS_ASSERT_EQUALS(threadpooltest_vec.size(), 10); @@ -340,7 +340,7 @@ public: mywaster.total = 0; boost::shared_ptr<std::mutex> lastMutex; for (size_t i = 0; i <= num; i++) { - Task *task = new FunctionTask( + auto task = std::make_shared<FunctionTask>( boost::bind(&TimeWaster::add_to_number, &mywaster, i), static_cast<double>(i)); // Create a new mutex every 1000 tasks. This is more relevant to the @@ -386,7 +386,7 @@ public: void do_StressTest_TasksThatCreateTasks(ThreadScheduler *sched) { ThreadPool *p = new ThreadPool(sched, 0); // Create the first task, depth 0, that will recursively create 10000 - TaskThatAddsTasks *task = new TaskThatAddsTasks(sched, 0); + auto task = std::make_shared<TaskThatAddsTasks>(sched, 0); p->schedule(task); // Reset the total @@ -429,7 +429,7 @@ public: ThreadPool p(new ThreadSchedulerFIFO(), 1); // one core ThreadPoolTest_TaskThatThrows_counter = 0; for (int i = 0; i < 10; i++) { - p.schedule(new TaskThatThrows()); + p.schedule(std::make_shared<TaskThatThrows>()); } // joinAll rethrows TS_ASSERT_THROWS(p.joinAll(), const std::runtime_error &); diff --git a/Framework/Kernel/test/ThreadSchedulerMutexesTest.h b/Framework/Kernel/test/ThreadSchedulerMutexesTest.h index c9657d85612..6ffe833612e 100644 --- a/Framework/Kernel/test/ThreadSchedulerMutexesTest.h +++ b/Framework/Kernel/test/ThreadSchedulerMutexesTest.h @@ -41,8 +41,8 @@ public: ThreadSchedulerMutexes sc; auto mut1 = boost::make_shared<std::mutex>(); auto mut2 = boost::make_shared<std::mutex>(); - TaskWithMutex *task1 = new TaskWithMutex(mut1, 10.0); - TaskWithMutex *task2 = new TaskWithMutex(mut2, 9.0); + auto task1 = std::make_shared< TaskWithMutex>(mut1, 10.0); + auto task2 = std::make_shared< TaskWithMutex>(mut2, 9.0); sc.push(task1); TS_ASSERT_EQUALS(sc.size(), 1); @@ -55,20 +55,20 @@ public: auto mut1 = boost::make_shared<std::mutex>(); auto mut2 = boost::make_shared<std::mutex>(); auto mut3 = boost::make_shared<std::mutex>(); - TaskWithMutex *task1 = new TaskWithMutex(mut1, 10.0); - TaskWithMutex *task2 = new TaskWithMutex(mut1, 9.0); - TaskWithMutex *task3 = new TaskWithMutex(mut1, 8.0); - TaskWithMutex *task4 = new TaskWithMutex(mut2, 7.0); - TaskWithMutex *task5 = new TaskWithMutex(mut2, 6.0); - TaskWithMutex *task6 = new TaskWithMutex(mut3, 5.0); - TaskWithMutex *task7 = - new TaskWithMutex(boost::shared_ptr<std::mutex>(), 4.0); + auto task1 = std::make_shared< TaskWithMutex>(mut1, 10.0); + auto task2 = std::make_shared< TaskWithMutex>(mut1, 9.0); + auto task3 = std::make_shared< TaskWithMutex>(mut1, 8.0); + auto task4 = std::make_shared< TaskWithMutex>(mut2, 7.0); + auto task5 = std::make_shared< TaskWithMutex>(mut2, 6.0); + auto task6 = std::make_shared< TaskWithMutex>(mut3, 5.0); + auto task7 = + std::make_shared< TaskWithMutex>(boost::shared_ptr<std::mutex>(), 4.0); sc.push(task1); sc.push(task2); sc.push(task3); TS_ASSERT_EQUALS(sc.size(), 3); - Task *task; + std::shared_ptr<Task> task; // Run the first task. mut1 becomes busy task = sc.pop(0); TS_ASSERT_EQUALS(task, task1); @@ -99,11 +99,11 @@ public: TS_ASSERT_EQUALS(sc.size(), 3); // Now we release task1, allowing task2 to come next - sc.finished(task1, 0); + sc.finished(task1.get(), 0); task = sc.pop(0); TS_ASSERT_EQUALS(task, task2); TS_ASSERT_EQUALS(sc.size(), 2); - sc.finished(task2, 0); // Have to complete task2 before task3 comes + sc.finished(task2.get(), 0); // Have to complete task2 before task3 comes task = sc.pop(0); TS_ASSERT_EQUALS(task, task3); TS_ASSERT_EQUALS(sc.size(), 1); @@ -115,20 +115,14 @@ public: // (for this task, the thread pool would have to wait till the mutex is // released) - delete task1; - delete task2; - delete task3; - delete task4; - delete task5; - delete task6; - delete task7; + } void test_clear() { ThreadSchedulerMutexes sc; for (size_t i = 0; i < 10; i++) { - TaskWithMutex *task = - new TaskWithMutex(boost::make_shared<std::mutex>(), 10.0); + std::shared_ptr<TaskWithMutex> task = + std::make_shared< TaskWithMutex>(boost::make_shared<std::mutex>(), 10.0); sc.push(task); } TS_ASSERT_EQUALS(sc.size(), 10); @@ -145,14 +139,14 @@ public: auto mut1 = boost::make_shared<std::mutex>(); size_t num = 500; for (size_t i = 0; i < num; i++) { - sc.push(new TaskWithMutex(mut1, 10.0)); + sc.push(std::make_shared< TaskWithMutex>(mut1, 10.0)); } // std::cout << tim0.elapsed() << " secs to push.\n"; TS_ASSERT_EQUALS(sc.size(), num); Timer tim1; for (size_t i = 0; i < num; i++) { - delete sc.pop(0); + sc.pop(0); } // std::cout << tim1.elapsed() << " secs to pop.\n"; TS_ASSERT_EQUALS(sc.size(), 0); @@ -163,14 +157,14 @@ public: Timer tim0; size_t num = 500; for (size_t i = 0; i < num; i++) { - sc.push(new TaskWithMutex(boost::make_shared<std::mutex>(), 10.0)); + sc.push(std::make_shared< TaskWithMutex>(boost::make_shared<std::mutex>(), 10.0)); } // std::cout << tim0.elapsed() << " secs to push.\n"; TS_ASSERT_EQUALS(sc.size(), num); Timer tim1; for (size_t i = 0; i < num; i++) { - delete sc.pop(0); + sc.pop(0); } // std::cout << tim1.elapsed() << " secs to pop.\n"; TS_ASSERT_EQUALS(sc.size(), 0); diff --git a/Framework/Kernel/test/ThreadSchedulerTest.h b/Framework/Kernel/test/ThreadSchedulerTest.h index f83c90d41d1..e6f9474246b 100644 --- a/Framework/Kernel/test/ThreadSchedulerTest.h +++ b/Framework/Kernel/test/ThreadSchedulerTest.h @@ -39,9 +39,9 @@ public: TS_ASSERT_EQUALS(std::string(sc->getAbortException().what()), ""); TS_ASSERT_EQUALS(sc->size(), 0); - sc->push(new TaskDoNothing()); + sc->push(std::make_shared<TaskDoNothing>()); TS_ASSERT_EQUALS(sc->size(), 1); - sc->push(new TaskDoNothing()); + sc->push(std::make_shared<TaskDoNothing>()); TS_ASSERT_EQUALS(sc->size(), 2); // Clear empties the queue @@ -70,18 +70,21 @@ public: ThreadSchedulerTest_numDestructed = 0; // Create and push them in order - TaskDoNothing *tasks[4]; + TaskDoNothing * tasks[4]; for (size_t i = 0; i < 4; i++) { - tasks[i] = new TaskDoNothing(costs[i]); - sc->push(tasks[i]); + auto temp =std::make_shared<TaskDoNothing>(costs[i]); + sc->push(temp); + tasks[i]=temp.get(); } - + std::vector<std::shared_ptr<TaskDoNothing>> task(4,nullptr); + // Pop them, and check that we get them in the order we expected for (size_t i = 0; i < 4; i++) { - TaskDoNothing *task = dynamic_cast<TaskDoNothing *>(sc->pop(0)); + + task[i]=std::dynamic_pointer_cast<TaskDoNothing>(sc->pop(0)); size_t index = 0; for (index = 0; index < 4; index++) - if (task == tasks[index]) + if (task[i]->cost() == tasks[index]->cost()) break; TS_ASSERT_EQUALS(index, poppedIndices[i]); } @@ -91,9 +94,6 @@ public: // And ThreadScheduler does not delete popped tasks in this way TS_ASSERT_EQUALS(ThreadSchedulerTest_numDestructed, 0); - for (auto &task : tasks) { - delete task; - } } void test_ThreadSchedulerFIFO() { -- GitLab