Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#ifndef MANTID_KERNEL_THREADSCHEDULERMUTEXESTEST_H_
#define MANTID_KERNEL_THREADSCHEDULERMUTEXESTEST_H_
#include <cxxtest/TestSuite.h>
#include <MantidKernel/Timer.h>
#include <MantidKernel/System.h>
#include <iostream>
#include <iomanip>
#include <MantidKernel/ThreadSchedulerMutexes.h>
using namespace Mantid::Kernel;
class ThreadSchedulerMutexesTest : public CxxTest::TestSuite
{
public:
/** A custom implementation of Task,
* that sets its mutex */
class TaskWithMutex : public Task
{
public:
TaskWithMutex(Mutex * mutex, double cost)
{
m_mutex = mutex;
m_cost = cost;
}
void run()
{
//TaskTestNamespace::my_check_value = 123;
}
};
void test_push()
{
ThreadSchedulerMutexes sc;
Mutex * mut1 = new Mutex();
Mutex * mut2 = new Mutex();
TaskWithMutex * task1 = new TaskWithMutex(mut1, 10.0);
TaskWithMutex * task2 = new TaskWithMutex(mut2, 9.0);
sc.push( task1 );
TS_ASSERT_EQUALS( sc.size(), 1);
sc.push( task2 );
TS_ASSERT_EQUALS( sc.size(), 2);
}
void xtest_queue()
{
ThreadSchedulerMutexes sc;
Mutex * mut1 = new Mutex();
Mutex * mut2 = new Mutex();
Mutex * mut3 = new Mutex();
TaskWithMutex * task1 = new TaskWithMutex(mut1, 10.0);
TaskWithMutex * task2 = new TaskWithMutex(mut1, 9.0);
TaskWithMutex * task3 = new TaskWithMutex(mut1, 8.0);
TaskWithMutex * task4 = new TaskWithMutex(mut2, 7.0);
TaskWithMutex * task5 = new TaskWithMutex(mut2, 6.0);
TaskWithMutex * task6 = new TaskWithMutex(mut3, 5.0);
sc.push(task1);
sc.push(task2);
sc.push(task3);
sc.push(task4);
sc.push(task5);
sc.push(task6);
Task * task;
// Run the first task
task = sc.pop(0);
TS_ASSERT_EQUALS( task, task1 );
//task->getMutex()->lock();
// Next one will be task4 since mut1 is locked
task = sc.pop(0);
TS_ASSERT_EQUALS( task, task4 );
//task->getMutex()->lock();
// Next one will be task6 since mut1 and mut2 are locked
task = sc.pop(0);
TS_ASSERT_EQUALS( task, task6 );
// Now we release task1, allowing task2 to come next
sc.finished(task1,0);
task = sc.pop(0);
TS_ASSERT_EQUALS( task, task2 );
sc.finished(task2,0); // Have to complete task2 before task3 comes
task = sc.pop(0);
TS_ASSERT_EQUALS( task, task3 );
// mut2 is still locked, but since it's the last one, task5 is returned
task = sc.pop(0);
TS_ASSERT_EQUALS( task, task5 );
// (at this point, the thread pool would have to wait till the mutex is released)
TS_ASSERT_EQUALS( sc.size(), 0 );
}
void xtest_performance()
{
ThreadSchedulerMutexes sc;
Timer tim0;
Mutex * mut1 = new Mutex();
size_t num = 5000;
for (size_t i=0; i < num; i++)
{
sc.push(new TaskWithMutex(mut1, 10.0));
}
std::cout << tim0.elapsed() << " secs to push." << std::endl;
// Now lock the only mutex
mut1->lock();
Timer tim1;
for (size_t i=0; i < num; i++)
{
Task * task;
task = sc.pop(0);
}
std::cout << tim1.elapsed() << " secs to pop." << std::endl;
// Now lock the only mutex
//mut1->unlock();
}
};
#endif /* MANTID_KERNEL_THREADSCHEDULERMUTEXESTEST_H_ */