我正试图在C++中实现未来调用机制。虽然这只是一个测试代码(有点匆忙),但我打算为我正在使用的语言的运行时使用类似的东西来实现透明并行。将执行从一个线程移动到另一个线程以执行任务并行和调用
,我干我的工作的代码,使其稍微小了一点,但它仍然是很大的:
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <vector>
#include <queue>
#include <future>
#include <thread>
#include <functional>
#include <type_traits>
#include <utility>
using namespace std;
using namespace std::chrono;
//------------------------------------------------------------------------------
// Simple locked printer
static std::recursive_mutex print_lock;
inline void print_() {
return;
};
template<typename T, typename... Args>
inline void print_(T t, Args... args) {
print_lock.lock();
std::cout << t;
print_(args...);
print_lock.unlock();
};
//------------------------------------------------------------------------------
template<typename R>
class PooledTask {
public:
explicit PooledTask(function<R()>);
// Possibly execute the task and return the value
R &operator()() {
// If we can get the lock, we're not executing
if(lock.try_lock()) {
// We may already have executed it
if(done)
goto end;
// Otherwise, execute it now
try {
result = move(task());
} catch(...) {
// If an exception is thrown, save it for later
eptr = current_exception();
failed = true;
};
done = true;
goto end;
} else {
// Wait until the task is completed
lock.lock();
end: {
lock.unlock();
// Maybe we got an exception!
if(failed)
rethrow_exception(eptr);
// Otherwise, just return the result
return result;
};
};
};
private:
exception_ptr eptr;
function<R()> task;
bool done;
bool failed;
mutex lock;
R result;
};
extern class TaskPool pool;
class TaskPool {
public:
TaskPool() noexcept: TaskPool(thread::hardware_concurrency() - 1) {
return;
};
TaskPool(const TaskPool &) = delete;
TaskPool(TaskPool &&) = delete;
template<typename T>
void push(PooledTask<T> *task) noexcept {
lock_guard<mutex> guard(lock);
builders.push([=] {
try {
(*task)();
} catch(...) {
// Ignore it here! The task will save it. :)
};
});
};
~TaskPool() {
// TODO: wait for all tasks to finish...
};
private:
queue<thread *> threads;
queue<function<void()>> builders;
mutex lock;
TaskPool(signed N) noexcept {
while(N --> 0)
threads.push(new thread([this, N] {
for(;;) {
pop_task();
};
}));
};
void pop_task() noexcept {
lock.lock();
if(builders.size()) {
auto task = builders.front();
builders.pop();
lock.unlock();
task();
} else
lock.unlock();
};
} pool;
template<typename R>
PooledTask<R>::PooledTask(function<R()> fun):
task(fun),
done(false),
failed(false)
{
pool.push(this);
};
// Should probably return a std::shared_ptr here...
template<typename F, typename... Args>
auto byfuture(F fun, Args&&... args) noexcept ->
PooledTask<decltype(fun(args...))> *
{
using R = decltype(fun(args...));
auto pooled = new PooledTask<R> {
bind(fun, forward<Args>(args)...)
};
return pooled;
};
//------------------------------------------------------------------------------
#include <map>
// Get the current thread id as a simple number
static int myid() noexcept {
static unsigned N = 0;
static map<thread::id, unsigned> hash;
static mutex lock;
lock_guard<mutex> guard(lock);
auto current = this_thread::get_id();
if(!hash[current])
hash[current] = ++N;
return hash[current];
};
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// The fibonacci test implementation
int future_fib(int x, int parent) {
if(x < 3)
return 1;
print_("future_fib(", x, ")", " on thread ", myid(), \
", asked by thread ", parent, "\n");
auto f1 = byfuture(future_fib, x - 1, myid());
auto f2 = byfuture(future_fib, x - 2, myid());
auto res = (*f1)() + (*f2)();
delete f1;
delete f2;
return res;
};
//------------------------------------------------------------------------------
int main() {
// Force main thread to get id 1
myid();
// Get task
auto f = byfuture(future_fib, 8, myid());
// Make sure it starts on the task pool
this_thread::sleep_for(seconds(1));
// Blocks
(*f)();
// Simply wait to be sure all threads are clean
this_thread::sleep_for(seconds(2));
//
return EXIT_SUCCESS;
};
此程序的结果是这样的(我有一个四核,所以3个线程池中):
future_fib(8) on thread 2, asked by thread 1
future_fib(7) on thread 3, asked by thread 2
future_fib(6) on thread 4, asked by thread 2
future_fib(6) on thread 3, asked by thread 3
future_fib(5) on thread 4, asked by thread 4
future_fib(5) on thread 3, asked by thread 3
future_fib(4) on thread 4, asked by thread 4
future_fib(4) on thread 3, asked by thread 3
future_fib(3) on thread 4, asked by thread 4
future_fib(3) on thread 3, asked by thread 3
future_fib(3) on thread 4, asked by thread 4
future_fib(3) on thread 3, asked by thread 3
future_fib(4) on thread 4, asked by thread 4
future_fib(4) on thread 3, asked by thread 3
future_fib(3) on thread 4, asked by thread 4
future_fib(3) on thread 3, asked by thread 3
future_fib(5) on thread 3, asked by thread 3
future_fib(4) on thread 3, asked by thread 3
future_fib(3) on thread 3, asked by thread 3
future_fib(3) on thread 3, asked by thread 3
此实现自己都慢比正常的斐波那契功能。
所以这里的问题:当池中运行fib(8)
,它会创建将在接下来的线程上运行两个任务,但是,当它到达auto res = (*f1)() + (*f2)();
,两个任务都已经在运行,所以它会阻塞f1
(上运行线程3)。
为了提高速度,我需要做的是为线程2而不是在f1
上进行阻塞,以假定线程3正在执行的任务,让它准备好接受另一个任务,所以没有线程会睡觉做计算。
这篇文章在这里http://bartoszmilewski.com/2011/10/10/async-tasks-in-c11-not-quite-there-yet/说有必要做我想做的事,但没有指定如何。
我的疑问是:我怎么可能做到这一点?
有没有其他的选择做我想要的?
[Threading Building Blocks(TBB)library](https://www.threadingbuildingblocks.org/)怎么样?它提供了带有线程池的并发任务系统。 – yohjp
看看C++ 1z的'.then()'方案吗? 'return pooled_fib(x-2).then([x](auto && r1){auto r2 = pooled_fib(x-1); return r1.get()+ r2.get();});'或者somesuch。 – Yakk