blob: ab2cfdebf07d4507d4464f0eba0b0670a7506a26 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
|
//===- llvm/Support/Parallel.cpp - Parallel algorithms --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/Parallel.h"
#include "llvm/Config/llvm-config.h"
#include <atomic>
#include <stack>
#include <thread>
using namespace llvm;
namespace {
/// \brief An abstract class that takes closures and runs them asynchronously.
class Executor {
public:
virtual ~Executor() = default;
virtual void add(std::function<void()> func) = 0;
static Executor *getDefaultExecutor();
};
#if !LLVM_ENABLE_THREADS
class SyncExecutor : public Executor {
public:
virtual void add(std::function<void()> F) { F(); }
};
Executor *Executor::getDefaultExecutor() {
static SyncExecutor Exec;
return &Exec;
}
#elif defined(_MSC_VER)
/// \brief An Executor that runs tasks via ConcRT.
class ConcRTExecutor : public Executor {
struct Taskish {
Taskish(std::function<void()> Task) : Task(Task) {}
std::function<void()> Task;
static void run(void *P) {
Taskish *Self = static_cast<Taskish *>(P);
Self->Task();
concurrency::Free(Self);
}
};
public:
virtual void add(std::function<void()> F) {
Concurrency::CurrentScheduler::ScheduleTask(
Taskish::run, new (concurrency::Alloc(sizeof(Taskish))) Taskish(F));
}
};
Executor *Executor::getDefaultExecutor() {
static ConcRTExecutor exec;
return &exec;
}
#else
/// \brief An implementation of an Executor that runs closures on a thread pool
/// in filo order.
class ThreadPoolExecutor : public Executor {
public:
explicit ThreadPoolExecutor(
unsigned ThreadCount = std::thread::hardware_concurrency())
: Done(ThreadCount) {
// Spawn all but one of the threads in another thread as spawning threads
// can take a while.
std::thread([&, ThreadCount] {
for (size_t i = 1; i < ThreadCount; ++i) {
std::thread([=] { work(); }).detach();
}
work();
}).detach();
}
~ThreadPoolExecutor() override {
std::unique_lock<std::mutex> Lock(Mutex);
Stop = true;
Lock.unlock();
Cond.notify_all();
// Wait for ~Latch.
}
void add(std::function<void()> F) override {
std::unique_lock<std::mutex> Lock(Mutex);
WorkStack.push(F);
Lock.unlock();
Cond.notify_one();
}
private:
void work() {
while (true) {
std::unique_lock<std::mutex> Lock(Mutex);
Cond.wait(Lock, [&] { return Stop || !WorkStack.empty(); });
if (Stop)
break;
auto Task = WorkStack.top();
WorkStack.pop();
Lock.unlock();
Task();
}
Done.dec();
}
std::atomic<bool> Stop{false};
std::stack<std::function<void()>> WorkStack;
std::mutex Mutex;
std::condition_variable Cond;
parallel::detail::Latch Done;
};
Executor *Executor::getDefaultExecutor() {
static ThreadPoolExecutor exec;
return &exec;
}
#endif
}
#if LLVM_ENABLE_THREADS
void parallel::detail::TaskGroup::spawn(std::function<void()> F) {
L.inc();
Executor::getDefaultExecutor()->add([&, F] {
F();
L.dec();
});
}
#endif
|