refactor concurrent module code

This commit is contained in:
3 changed files with 58 additions and 35 deletions

View File

@@ -23,16 +23,10 @@
#include "piblockingdequeue.h"
#include <atomic>
/**
* @brief Thread pools address two different problems: they usually provide improved performance when executing large
* numbers of asynchronous tasks, due to reduced per-task invocation overhead, and they provide a means of bounding and
* managing the resources, including threads, consumed when executing a collection of tasks.
*
* TODO adapt documentation to template
*/
template <typename Thread_, typename Dequeue_>
class PIThreadPoolExecutorTemplate {
public:
NO_COPY_CLASS(PIThreadPoolExecutorTemplate)
explicit PIThreadPoolExecutorTemplate(size_t corePoolSize = 1) : isShutdown_(false) { makePool(corePoolSize); }
virtual ~PIThreadPoolExecutorTemplate() {
@@ -40,22 +34,10 @@ public:
while (threadPool.size() > 0) delete threadPool.take_back();
}
/**
* @brief Executes the given task sometime in the future. The task execute in an existing pooled thread. If the task
* cannot be submitted for execution, either because this executor has been shutdown or because its capacity has been
* reached.
*
* @param runnable not empty function for thread pool execution
*/
void execute(const std::function<void()> & runnable) {
if (!isShutdown_) taskQueue.offer(runnable);
}
/**
* @brief Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be
* accepted. Invocation has no additional effect if already shut down. This method does not wait for previously
* submitted tasks to complete execution. Use awaitTermination to do that.
*/
void shutdown() {
isShutdown_ = true;
}
@@ -107,5 +89,42 @@ protected:
typedef PIThreadPoolExecutorTemplate<PIThread, PIBlockingDequeue<std::function<void()> > > PIThreadPoolExecutor;
#ifdef DOXYGEN
/**
* @brief Thread pools address two different problems: they usually provide improved performance when executing large
* numbers of asynchronous tasks, due to reduced per-task invocation overhead, and they provide a means of bounding and
* managing the resources, including threads, consumed when executing a collection of tasks.
*
* TODO adapt documentation to template
*/
class PIThreadPoolExecutor {
public:
explicit PIThreadPoolExecutor(size_t corePoolSize);
virtual ~PIThreadPoolExecutor();
/**
* @brief Executes the given task sometime in the future. The task execute in an existing pooled thread. If the task
* cannot be submitted for execution, either because this executor has been shutdown or because its capacity has been
* reached.
*
* @param runnable not empty function for thread pool execution
*/
void execute(const std::function<void()> & runnable);
/**
* @brief Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be
* accepted. Invocation has no additional effect if already shut down. This method does not wait for previously
* submitted tasks to complete execution. Use awaitTermination to do that.
*/
void shutdown();
void shutdownNow();
bool isShutdown() const;
bool awaitTermination(int timeoutMs);
};
#endif //DOXYGEN
#endif //PIEXECUTOR_H