Parallel.h 8.59 KB
Newer Older
1
//===- llvm/Support/Parallel.h - Parallel algorithms ----------------------===//
2
//
3
4
5
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
7
8
//
//===----------------------------------------------------------------------===//

9
10
#ifndef LLVM_SUPPORT_PARALLEL_H
#define LLVM_SUPPORT_PARALLEL_H
11

12
#include "llvm/ADT/STLExtras.h"
13
#include "llvm/Config/llvm-config.h"
14
#include "llvm/Support/Error.h"
15
#include "llvm/Support/MathExtras.h"
16
#include "llvm/Support/Threading.h"
17
18

#include <algorithm>
19
20
21
#include <condition_variable>
#include <functional>
#include <mutex>
22

23
24
namespace llvm {

25
26
namespace parallel {

27
28
29
30
31
// Strategy for the default executor used by the parallel routines provided by
// this file. It defaults to using all hardware threads and should be
// initialized before the first use of parallel routines.
extern ThreadPoolStrategy strategy;

32
namespace detail {
33
34
35

#if LLVM_ENABLE_THREADS

36
37
38
39
40
41
class Latch {
  uint32_t Count;
  mutable std::mutex Mutex;
  mutable std::condition_variable Cond;

public:
42
  explicit Latch(uint32_t Count = 0) : Count(Count) {}
43
44
45
46
  ~Latch() {
    // Ensure at least that sync() was called.
    assert(Count == 0);
  }
47
48

  void inc() {
49
    std::lock_guard<std::mutex> lock(Mutex);
50
51
52
53
    ++Count;
  }

  void dec() {
54
    std::lock_guard<std::mutex> lock(Mutex);
55
56
57
58
59
60
61
62
63
64
65
66
    if (--Count == 0)
      Cond.notify_all();
  }

  void sync() const {
    std::unique_lock<std::mutex> lock(Mutex);
    Cond.wait(lock, [&] { return Count == 0; });
  }
};

class TaskGroup {
  Latch L;
67
  bool Parallel;
68
69

public:
70
71
72
  TaskGroup();
  ~TaskGroup();

73
74
75
76
  void spawn(std::function<void()> f);

  void sync() const { L.sync(); }
};
77

78
const ptrdiff_t MinParallelSize = 1024;
79

80
/// Inclusive median.
81
82
83
84
85
86
87
88
89
90
91
92
93
94
template <class RandomAccessIterator, class Comparator>
RandomAccessIterator medianOf3(RandomAccessIterator Start,
                               RandomAccessIterator End,
                               const Comparator &Comp) {
  RandomAccessIterator Mid = Start + (std::distance(Start, End) / 2);
  return Comp(*Start, *(End - 1))
             ? (Comp(*Mid, *(End - 1)) ? (Comp(*Start, *Mid) ? Mid : Start)
                                       : End - 1)
             : (Comp(*Mid, *Start) ? (Comp(*(End - 1), *Mid) ? Mid : End - 1)
                                   : Start);
}

template <class RandomAccessIterator, class Comparator>
void parallel_quick_sort(RandomAccessIterator Start, RandomAccessIterator End,
95
                         const Comparator &Comp, TaskGroup &TG, size_t Depth) {
96
  // Do a sequential sort for small inputs.
97
  if (std::distance(Start, End) < detail::MinParallelSize || Depth == 0) {
98
    llvm::sort(Start, End, Comp);
99
100
101
102
    return;
  }

  // Partition.
103
104
105
106
107
  auto Pivot = medianOf3(Start, End, Comp);
  // Move Pivot to End.
  std::swap(*(End - 1), *Pivot);
  Pivot = std::partition(Start, End - 1, [&Comp, End](decltype(*Start) V) {
    return Comp(V, *(End - 1));
108
  });
109
110
  // Move Pivot to middle of partition.
  std::swap(*Pivot, *(End - 1));
111
112

  // Recurse.
113
114
  TG.spawn([=, &Comp, &TG] {
    parallel_quick_sort(Start, Pivot, Comp, TG, Depth - 1);
115
  });
116
  parallel_quick_sort(Pivot + 1, End, Comp, TG, Depth - 1);
117
118
}

119
template <class RandomAccessIterator, class Comparator>
120
121
void parallel_sort(RandomAccessIterator Start, RandomAccessIterator End,
                   const Comparator &Comp) {
122
  TaskGroup TG;
123
124
  parallel_quick_sort(Start, End, Comp, TG,
                      llvm::Log2_64(std::distance(Start, End)) + 1);
125
}
126

127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
// TaskGroup has a relatively high overhead, so we want to reduce
// the number of spawn() calls. We'll create up to 1024 tasks here.
// (Note that 1024 is an arbitrary number. This code probably needs
// improving to take the number of available cores into account.)
enum { MaxTasksPerGroup = 1024 };

template <class IterTy, class ResultTy, class ReduceFuncTy,
          class TransformFuncTy>
ResultTy parallel_transform_reduce(IterTy Begin, IterTy End, ResultTy Init,
                                   ReduceFuncTy Reduce,
                                   TransformFuncTy Transform) {
  // Limit the number of tasks to MaxTasksPerGroup to limit job scheduling
  // overhead on large inputs.
  size_t NumInputs = std::distance(Begin, End);
  if (NumInputs == 0)
    return std::move(Init);
  size_t NumTasks = std::min(static_cast<size_t>(MaxTasksPerGroup), NumInputs);
  std::vector<ResultTy> Results(NumTasks, Init);
  {
    // Each task processes either TaskSize or TaskSize+1 inputs. Any inputs
    // remaining after dividing them equally amongst tasks are distributed as
    // one extra input over the first tasks.
    TaskGroup TG;
    size_t TaskSize = NumInputs / NumTasks;
    size_t RemainingInputs = NumInputs % NumTasks;
    IterTy TBegin = Begin;
    for (size_t TaskId = 0; TaskId < NumTasks; ++TaskId) {
      IterTy TEnd = TBegin + TaskSize + (TaskId < RemainingInputs ? 1 : 0);
      TG.spawn([=, &Transform, &Reduce, &Results] {
        // Reduce the result of transformation eagerly within each task.
        ResultTy R = Init;
        for (IterTy It = TBegin; It != TEnd; ++It)
          R = Reduce(R, Transform(*It));
        Results[TaskId] = R;
      });
      TBegin = TEnd;
    }
    assert(TBegin == End);
  }

  // Do a final reduction. There are at most 1024 tasks, so this only adds
  // constant single-threaded overhead for large inputs. Hopefully most
  // reductions are cheaper than the transformation.
  ResultTy FinalResult = std::move(Results.front());
  for (ResultTy &PartialResult :
       makeMutableArrayRef(Results.data() + 1, Results.size() - 1))
    FinalResult = Reduce(FinalResult, std::move(PartialResult));
  return std::move(FinalResult);
}

177
178
179
#endif

} // namespace detail
180
} // namespace parallel
181

182
183
184
185
186
187
188
189
190
191
192
template <class RandomAccessIterator,
          class Comparator = std::less<
              typename std::iterator_traits<RandomAccessIterator>::value_type>>
void parallelSort(RandomAccessIterator Start, RandomAccessIterator End,
                  const Comparator &Comp = Comparator()) {
#if LLVM_ENABLE_THREADS
  if (parallel::strategy.ThreadsRequested != 1) {
    parallel::detail::parallel_sort(Start, End, Comp);
    return;
  }
#endif
193
  llvm::sort(Start, End, Comp);
194
195
}

196
197
void parallelForEachN(size_t Begin, size_t End, function_ref<void(size_t)> Fn);

198
199
template <class IterTy, class FuncTy>
void parallelForEach(IterTy Begin, IterTy End, FuncTy Fn) {
200
  parallelForEachN(0, End - Begin, [&](size_t I) { Fn(Begin[I]); });
201
202
}

203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
template <class IterTy, class ResultTy, class ReduceFuncTy,
          class TransformFuncTy>
ResultTy parallelTransformReduce(IterTy Begin, IterTy End, ResultTy Init,
                                 ReduceFuncTy Reduce,
                                 TransformFuncTy Transform) {
#if LLVM_ENABLE_THREADS
  if (parallel::strategy.ThreadsRequested != 1) {
    return parallel::detail::parallel_transform_reduce(Begin, End, Init, Reduce,
                                                       Transform);
  }
#endif
  for (IterTy I = Begin; I != End; ++I)
    Init = Reduce(std::move(Init), Transform(*I));
  return std::move(Init);
}

219
220
221
222
223
// Range wrappers.
template <class RangeTy,
          class Comparator = std::less<decltype(*std::begin(RangeTy()))>>
void parallelSort(RangeTy &&R, const Comparator &Comp = Comparator()) {
  parallelSort(std::begin(R), std::end(R), Comp);
224
225
}

226
227
228
template <class RangeTy, class FuncTy>
void parallelForEach(RangeTy &&R, FuncTy Fn) {
  parallelForEach(std::begin(R), std::end(R), Fn);
229
230
}

231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
template <class RangeTy, class ResultTy, class ReduceFuncTy,
          class TransformFuncTy>
ResultTy parallelTransformReduce(RangeTy &&R, ResultTy Init,
                                 ReduceFuncTy Reduce,
                                 TransformFuncTy Transform) {
  return parallelTransformReduce(std::begin(R), std::end(R), Init, Reduce,
                                 Transform);
}

// Parallel for-each, but with error handling.
template <class RangeTy, class FuncTy>
Error parallelForEachError(RangeTy &&R, FuncTy Fn) {
  // The transform_reduce algorithm requires that the initial value be copyable.
  // Error objects are uncopyable. We only need to copy initial success values,
  // so work around this mismatch via the C API. The C API represents success
  // values with a null pointer. The joinErrors discards null values and joins
  // multiple errors into an ErrorList.
  return unwrap(parallelTransformReduce(
      std::begin(R), std::end(R), wrap(Error::success()),
      [](LLVMErrorRef Lhs, LLVMErrorRef Rhs) {
        return wrap(joinErrors(unwrap(Lhs), unwrap(Rhs)));
      },
      [&Fn](auto &&V) { return wrap(Fn(V)); }));
}

256
} // namespace llvm
257

258
#endif // LLVM_SUPPORT_PARALLEL_H