Newer
Older
#ifndef MUTEXTEST_H
#define MUTEXTEST_H
#include <cxxtest/TestSuite.h>
#include "MantidKernel/MultiThreaded.h"
#include "MantidKernel/ThreadPool.h"
#include "MantidKernel/FunctionTask.h"
#include <iostream>
#include "MantidKernel/CPUTimer.h"
using namespace Mantid::Kernel;
#define DATA_SIZE 10000000
std::vector<double> shared_data;
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
//#include <boost/thread.hpp>
//boost::shared_mutex _access;
//void reader()
//{
// boost::shared_lock< boost::shared_mutex > lock(_access);
// // do work here, without anyone having exclusive access
// for (size_t i=0; i<shared_data.size(); i++)
// {
// double val = shared_data[i];
// }
//}
//
//void conditional_writer()
//{
// boost::upgrade_lock< boost::shared_mutex > lock(_access);
// // do work here, without anyone having exclusive access
//
// if (true)
// {
// boost::upgrade_to_unique_lock< boost::shared_mutex > uniqueLock(lock);
// // do work here, but now you have exclusive access
// for (size_t i=0; i<shared_data.size(); i++)
// {
// shared_data[i] = 2.345;
// }
// }
//
// // do more work here, without anyone having exclusive access
//}
//
//void unconditional_writer()
//{
// boost::unique_lock< boost::shared_mutex > lock( _access );
// // do work here, with exclusive access
// shared_data.resize(shared_data.size()+1, 2.345);
// // Dumb thing to slow down the writer
// for (size_t i=0; i<shared_data.size(); i++)
// shared_data[i] = 4.567;
//}
class MutexTest : public CxxTest::TestSuite
{
public:
shared_data.resize(DATA_SIZE, 1.000);
}
void tearDown()
{
}
void test_nothing()
{
}
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
// /** Launch a bunch of reading threads */
// void test_simultaneous_read()
// {
// ThreadPool pool;
// CPUTimer tim;
// size_t numTasks = 100;
// for (size_t i=0; i<numTasks; i++)
// pool.schedule( new FunctionTask(reader) );
// pool.joinAll();
// std::cout << tim << " to execute all " << numTasks << " tasks" << std::endl;
// }
//
// /** Launch a bunch of writing threads */
// void test_simultaneous_write()
// {
// ThreadPool pool;
// CPUTimer tim;
// size_t numTasks = 10;
// for (size_t i=0; i<numTasks; i++)
// pool.schedule( new FunctionTask(unconditional_writer) );
// pool.joinAll();
// std::cout << tim << " to execute all " << numTasks << " tasks" << std::endl;
// TSM_ASSERT_EQUALS( "The writers were all called", shared_data.size(), DATA_SIZE + numTasks)
// }
//
// /** Mix 1 writing thread for 9 reading threads */
// void test_write_blocks_readers()
// {
// ThreadPool pool;
// CPUTimer tim;
// size_t numTasks = 100;
// for (size_t i=0; i<numTasks; i++)
// {
// if (i%10 == 0)
// pool.schedule( new FunctionTask(unconditional_writer) );
// else
// pool.schedule( new FunctionTask(reader) );
// }
// pool.joinAll();
// std::cout << tim << " to execute all " << numTasks << " tasks" << std::endl;
// TSM_ASSERT_EQUALS( "The writers were all called", shared_data.size(), DATA_SIZE + numTasks/10)
// }
};
#endif /* MUTEXTEST_H */