PIThreadPoolExecutor & PIBlockingDequeue improvements

- add support move & copy semantic
- introduce submit method for executor with future result
This commit is contained in:
8 changed files with 193 additions and 83 deletions

View File

@@ -22,8 +22,47 @@
#include "piblockingdequeue.h"
#include <atomic>
#include <future>
template <typename Thread_, typename Dequeue_>
/**
* @brief Wrapper for custom invoke operator available function types.
* @note Source from: "Энтони Уильямс, Параллельное программирование на С++ в действии. Практика разработки многопоточных
* программ. Пер. с англ. Слинкин А. А. - M.: ДМК Пресс, 2012 - 672c.: ил." (page 387)
*/
class FunctionWrapper {
struct ImplBase {
virtual void call() = 0;
virtual ~ImplBase() = default;
};
std::unique_ptr<ImplBase> impl;
template<typename F>
struct ImplType: ImplBase {
F f;
explicit ImplType(F&& f): f(std::forward<F>(f)) {}
void call() final { f(); }
};
public:
template<typename F, typename = std::enable_if<!std::is_same<F, FunctionWrapper>::value> >
explicit FunctionWrapper(F&& f): impl(new ImplType<F>(std::forward<F>(f))) {}
void operator()() { impl->call(); }
explicit operator bool() const noexcept { return static_cast<bool>(impl); }
FunctionWrapper() = default;
FunctionWrapper(FunctionWrapper&& other) noexcept : impl(std::move(other.impl)) {}
FunctionWrapper& operator=(FunctionWrapper&& other) noexcept {
impl = std::move(other.impl);
return *this;
}
FunctionWrapper(const FunctionWrapper& other) = delete;
FunctionWrapper& operator=(const FunctionWrapper&) = delete;
};
template <typename Thread_, template<typename> class Dequeue_>
class PIThreadPoolExecutorTemplate {
public:
NO_COPY_CLASS(PIThreadPoolExecutorTemplate)
@@ -34,8 +73,27 @@ public:
while (threadPool.size() > 0) delete threadPool.take_back();
}
void execute(const std::function<void()> & runnable) {
if (!isShutdown_) taskQueue.offer(runnable);
template<typename FunctionType>
std::future<typename std::result_of<FunctionType()>::type> submit(FunctionType&& callable) {
typedef typename std::result_of<FunctionType()>::type ResultType;
if (!isShutdown_) {
std::packaged_task<ResultType()> callable_task(std::forward<FunctionType>(callable));
auto future = callable_task.get_future();
FunctionWrapper functionWrapper(callable_task);
taskQueue.offer(std::move(functionWrapper));
return future;
} else {
return std::future<ResultType>();
}
}
template<typename FunctionType>
void execute(FunctionType&& runnable) {
if (!isShutdown_) {
FunctionWrapper function_wrapper(std::forward<FunctionType>(runnable));
taskQueue.offer(std::move(function_wrapper));
}
}
void shutdown() {
@@ -63,15 +121,15 @@ public:
protected:
std::atomic_bool isShutdown_;
Dequeue_ taskQueue;
Dequeue_<FunctionWrapper> taskQueue;
PIVector<Thread_*> threadPool;
template<typename Function>
PIThreadPoolExecutorTemplate(size_t corePoolSize, Function onBeforeStart) : isShutdown_(false) {
makePool(corePoolSize, onBeforeStart);
PIThreadPoolExecutorTemplate(size_t corePoolSize, Function&& onBeforeStart) : isShutdown_(false) {
makePool(corePoolSize, std::forward<Function>(onBeforeStart));
}
void makePool(size_t corePoolSize, std::function<void(Thread_*)> onBeforeStart = [](Thread_*){}) {
void makePool(size_t corePoolSize, std::function<void(Thread_*)>&& onBeforeStart = [](Thread_*){}) {
for (size_t i = 0; i < corePoolSize; ++i) {
auto* thread = new Thread_([&, i](){
auto runnable = taskQueue.poll(100);
@@ -87,7 +145,7 @@ protected:
}
};
typedef PIThreadPoolExecutorTemplate<PIThread, PIBlockingDequeue<std::function<void()> > > PIThreadPoolExecutor;
typedef PIThreadPoolExecutorTemplate<PIThread, PIBlockingDequeue> PIThreadPoolExecutor;
#ifdef DOXYGEN
/**