refactor concurrent module code

This commit is contained in:
3 changed files with 58 additions and 35 deletions

View File

@@ -23,16 +23,10 @@
#include "piblockingdequeue.h" #include "piblockingdequeue.h"
#include <atomic> #include <atomic>
/**
* @brief Thread pools address two different problems: they usually provide improved performance when executing large
* numbers of asynchronous tasks, due to reduced per-task invocation overhead, and they provide a means of bounding and
* managing the resources, including threads, consumed when executing a collection of tasks.
*
* TODO adapt documentation to template
*/
template <typename Thread_, typename Dequeue_> template <typename Thread_, typename Dequeue_>
class PIThreadPoolExecutorTemplate { class PIThreadPoolExecutorTemplate {
public: public:
NO_COPY_CLASS(PIThreadPoolExecutorTemplate)
explicit PIThreadPoolExecutorTemplate(size_t corePoolSize = 1) : isShutdown_(false) { makePool(corePoolSize); } explicit PIThreadPoolExecutorTemplate(size_t corePoolSize = 1) : isShutdown_(false) { makePool(corePoolSize); }
virtual ~PIThreadPoolExecutorTemplate() { virtual ~PIThreadPoolExecutorTemplate() {
@@ -40,22 +34,10 @@ public:
while (threadPool.size() > 0) delete threadPool.take_back(); while (threadPool.size() > 0) delete threadPool.take_back();
} }
/**
* @brief Executes the given task sometime in the future. The task execute in an existing pooled thread. If the task
* cannot be submitted for execution, either because this executor has been shutdown or because its capacity has been
* reached.
*
* @param runnable not empty function for thread pool execution
*/
void execute(const std::function<void()> & runnable) { void execute(const std::function<void()> & runnable) {
if (!isShutdown_) taskQueue.offer(runnable); if (!isShutdown_) taskQueue.offer(runnable);
} }
/**
* @brief Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be
* accepted. Invocation has no additional effect if already shut down. This method does not wait for previously
* submitted tasks to complete execution. Use awaitTermination to do that.
*/
void shutdown() { void shutdown() {
isShutdown_ = true; isShutdown_ = true;
} }
@@ -107,5 +89,42 @@ protected:
typedef PIThreadPoolExecutorTemplate<PIThread, PIBlockingDequeue<std::function<void()> > > PIThreadPoolExecutor; typedef PIThreadPoolExecutorTemplate<PIThread, PIBlockingDequeue<std::function<void()> > > PIThreadPoolExecutor;
#ifdef DOXYGEN
/**
* @brief Thread pools address two different problems: they usually provide improved performance when executing large
* numbers of asynchronous tasks, due to reduced per-task invocation overhead, and they provide a means of bounding and
* managing the resources, including threads, consumed when executing a collection of tasks.
*
* TODO adapt documentation to template
*/
class PIThreadPoolExecutor {
public:
explicit PIThreadPoolExecutor(size_t corePoolSize);
virtual ~PIThreadPoolExecutor();
/**
* @brief Executes the given task sometime in the future. The task execute in an existing pooled thread. If the task
* cannot be submitted for execution, either because this executor has been shutdown or because its capacity has been
* reached.
*
* @param runnable not empty function for thread pool execution
*/
void execute(const std::function<void()> & runnable);
/**
* @brief Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be
* accepted. Invocation has no additional effect if already shut down. This method does not wait for previously
* submitted tasks to complete execution. Use awaitTermination to do that.
*/
void shutdown();
void shutdownNow();
bool isShutdown() const;
bool awaitTermination(int timeoutMs);
};
#endif //DOXYGEN
#endif //PIEXECUTOR_H #endif //PIEXECUTOR_H

View File

@@ -71,10 +71,6 @@ TEST(ExecutorUnitTest, is_corePool_started) {
.WillOnce(Return(true)); .WillOnce(Return(true));
}); });
EXPECT_EQ(THREAD_COUNT, executor.getThreadPool()->size()); EXPECT_EQ(THREAD_COUNT, executor.getThreadPool()->size());
executor.getThreadPool()->forEach([](MockThread* thread){
EXPECT_CALL(*thread, stop())
.WillOnce(Return());
});
} }
TEST(ExecutorUnitTest, execute_is_added_to_taskQueue) { TEST(ExecutorUnitTest, execute_is_added_to_taskQueue) {
@@ -94,9 +90,19 @@ TEST(ExecutorUnitTest, is_corePool_execute_queue_elements) {
executor.getThreadPool()->at(0)->runnnable(); executor.getThreadPool()->at(0)->runnnable();
ASSERT_TRUE(is_executed); ASSERT_TRUE(is_executed);
} }
/* FIXME
TEST(ExecutorUnitTest, shutdown_is_stop_threads) { TEST(ExecutorUnitTest, shutdown_is_stop_threads) {
PIThreadPoolExecutorMoc executor(THREAD_COUNT); // Exclude stop calls when executor deleting
executor.shutdown(); auto* executor = new PIThreadPoolExecutorMoc(THREAD_COUNT, [](MockThread* thread){
testing::Mock::AllowLeak(thread);
EXPECT_CALL(*thread, stop())
.WillOnce(Return());
});
testing::Mock::AllowLeak(executor);
testing::Mock::AllowLeak(executor->getTaskQueue());
EXPECT_CALL(*executor->getTaskQueue(), poll(Ge(0)))
.WillRepeatedly(Return(std::function<VoidFunc()>()));
executor->shutdown();
executor->getThreadPool()->forEach([](MockThread* thread){ thread->runnnable(); });
} }
*/

View File

@@ -1,16 +1,14 @@
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "gmock/gmock.h"
#include "piconditionvar.h"
#include "pithread.h" #include "pithread.h"
#include "testutil.h" #include "testutil.h"
class ConditionLock : public ::testing::Test, public TestUtil { class Mutex : public ::testing::Test, public TestUtil {
public: public:
PIMutex* m = new PIMutex(); PIMutex* m = new PIMutex();
}; };
TEST_F(ConditionLock, lock_is_protect) { TEST_F(Mutex, lock_is_protect) {
m->lock(); m->lock();
bool* isProtect = new bool(true); bool* isProtect = new bool(true);
@@ -22,7 +20,7 @@ TEST_F(ConditionLock, lock_is_protect) {
ASSERT_TRUE(*isProtect); ASSERT_TRUE(*isProtect);
} }
TEST_F(ConditionLock, unlock_is_release) { TEST_F(Mutex, unlock_is_release) {
m->lock(); m->lock();
bool* isReleased = new bool(false); bool* isReleased = new bool(false);
m->unlock(); m->unlock();
@@ -35,7 +33,7 @@ TEST_F(ConditionLock, unlock_is_release) {
ASSERT_TRUE(*isReleased); ASSERT_TRUE(*isReleased);
} }
TEST_F(ConditionLock, tryLock_is_false_when_locked) { TEST_F(Mutex, tryLock_is_false_when_locked) {
createThread([&](){ createThread([&](){
m->lock(); m->lock();
piMSleep(WAIT_THREAD_TIME_MS); piMSleep(WAIT_THREAD_TIME_MS);
@@ -43,11 +41,11 @@ TEST_F(ConditionLock, tryLock_is_false_when_locked) {
ASSERT_FALSE(m->tryLock()); ASSERT_FALSE(m->tryLock());
} }
TEST_F(ConditionLock, tryLock_is_true_when_unlocked) { TEST_F(Mutex, tryLock_is_true_when_unlocked) {
ASSERT_TRUE(m->tryLock()); ASSERT_TRUE(m->tryLock());
} }
TEST_F(ConditionLock, tryLock_is_recursive_lock_enable) { TEST_F(Mutex, tryLock_is_recursive_lock_enable) {
m->lock(); m->lock();
ASSERT_TRUE(m->tryLock()); ASSERT_TRUE(m->tryLock());
} }