【c++】c++11实现线程池

#include <iostream>
#include <mutex>
#include <queue>
#include <vector>
#include <functional>
#include <condition_variable>
#include <atomic>
#include <chrono>
class ThreadPool
{
private:
std::queue<std::function<void()>> m_tasks;
std::vector<std::thread> m_threads;
std::thread m_threadManagement;
std::condition_variable cvTasksNotEmpty;
std::mutex mtxTasks;
std::mutex mtxThreads;
std::mutex mtxPoolFields;
bool m_shutdown;
int m_numMinThreads;
int m_numMaxThreads;
std::atomic<int> m_numCurThreads;
std::atomic<int> m_numBusyThreads;
std::atomic<int> m_numExitThreads;
const int THREADMANAGENUM = 2;
public:
ThreadPool(int min, int max) :m_threadManagement(ThreadPool::threadManage, this), m_numMinThreads(min < 1 ? 1 : min), m_numMaxThreads(max)
{
m_shutdown = false;
m_numCurThreads = m_numMinThreads;
m_numBusyThreads = 0;
m_numExitThreads = 0;
for (int i = 0; i < m_numCurThreads; i++)
{
std::unique_lock<std::mutex> ulThreads(mtxThreads);
m_threads.emplace_back([this]() {
m_numCurThreads.fetch_add(1, std::memory_order_relaxed);
threadTask();
});
}
}
ThreadPool(ThreadPool& another) = delete;
ThreadPool& operator=(ThreadPool& another) = delete;
~ThreadPool()
{
//析构销毁线程池,释放资源
m_shutdown = true;
cvTasksNotEmpty.notify_all(); //线程池要销毁了。通知所有线程全部不阻塞向下执行
for (auto& item : m_threads)
{
item.join(); //等待全部线程return终止
}
m_threadManagement.join(); //等待管理线程终止
}
//子线程任务
void threadTask()
{
while (true)
{
std::unique_lock<std::mutex> ulTasks(mtxTasks);
cvTasksNotEmpty.wait(ulTasks, [=]() {
return !m_tasks.empty() || m_shutdown;
});
if (m_numExitThreads > 0)
{
m_numExitThreads.fetch_sub(1, std::memory_order_relaxed);
if (m_numCurThreads > m_numMinThreads)
{
m_numCurThreads.fetch_sub(1, std::memory_order_relaxed);
//关闭当前线程,从m_threads剔除
for (auto iter = m_threads.begin(); iter != m_threads.end();)
{
if (iter->get_id() == std::this_thread::get_id())
{
iter->detach();
std::unique_lock<std::mutex> ulThreads(mtxThreads);
iter = m_threads.erase(iter);
}
else
{
iter++;
}
}
return;
}
}
//判断是否线程池被关闭,管理者线程不工作时m_threads中线程数固定,在析构中全部join(),闲置线程直接return终止
if (m_shutdown == true && m_tasks.empty())
{
return;
}
//取任务执行
std::function<void()> task(std::move(m_tasks.front()));
m_tasks.pop();
std::cout << "threadID=" << std::this_thread::get_id() << ",start working..." << std::endl;
ulTasks.unlock();
m_numBusyThreads.fetch_add(1, std::memory_order_relaxed);
task(); //线程执行任务
m_numBusyThreads.fetch_sub(1, std::memory_order_relaxed);
std::cout << "threadID=" << std::this_thread::get_id() << ",end working..." << std::endl; //线程执行完任务日志输出
}
}
//主线程添加任务。生产者
template<typename F, typename... Args>
void enqueue(F&& f, Args&&... args)
{
//bind一下让task的类型变成void(),m_tasks不用写泛型
auto task = std::bind(std::forward<F>(f), std::forward<Args>(args)...);
{
std::unique_lock<std::mutex> ulTasks(mtxTasks);
m_tasks.emplace(std::move(task));
}
cvTasksNotEmpty.notify_one();
}
//管理者线程任务
static void threadManage(ThreadPool* poolInstance);
};
void ThreadPool::threadManage(ThreadPool* poolInstance)
{
while (!poolInstance->m_shutdown)
{
std::this_thread::sleep_for(std::chrono::seconds(3));
std::unique_lock<std::mutex> ulTasks(poolInstance->mtxTasks);
int numTasks = static_cast<int>(poolInstance->m_tasks.size());
ulTasks.unlock();
//加线程
if (poolInstance->m_numCurThreads < poolInstance->m_numMaxThreads && poolInstance->m_numCurThreads < numTasks)
{
for (int i = 0; i < poolInstance->THREADMANAGENUM && poolInstance->m_numCurThreads < poolInstance->m_numMaxThreads; i++)
{
std::unique_lock<std::mutex> ulThreads(poolInstance->mtxThreads);
poolInstance->m_threads.emplace_back([poolInstance]() {
poolInstance->threadTask();
});
}
poolInstance->m_numCurThreads.fetch_add(1, std::memory_order_relaxed);
}
//减少线程
if (poolInstance->m_numBusyThreads * 2 < poolInstance->m_numCurThreads && poolInstance->m_numCurThreads > poolInstance->m_numMinThreads)
{
poolInstance->m_numExitThreads.store(poolInstance->THREADMANAGENUM);
//通知m_numExitThreads个阻塞的线程向下执行,就会退出
for (int i = 0; i < poolInstance->m_numExitThreads; i++)
{
poolInstance->cvTasksNotEmpty.notify_one();
}
}
}
}
std::mutex mtxfunc;
int data = 0;
void func()
{
for (int i = 0; i < 1000000; i++)
{
std::unique_lock<std::mutex> ul(mtxfunc);
data++;
}
}
int main()
{
#if 0
auto start1 = std::chrono::steady_clock::now();
ThreadPool* tp = new ThreadPool(6,10);
tp->enqueue(func);
tp->enqueue(func);
tp->enqueue(func);
tp->enqueue(func);
tp->enqueue(func);
tp->enqueue(func);
delete tp; //才能调用到析构里的join()让主线程确保任务都执行完,否则主线程就先往下跑了
auto end1 = std::chrono::steady_clock::now();
std::cout << "MainThreadID=" << std::this_thread::get_id() << ", final data= " << data << std::endl;
std::cout << "pool spend:" << std::chrono::duration_cast<std::chrono::microseconds>(end1 - start1).count() << "ms" << std::endl;
#else
auto start2 = std::chrono::steady_clock::now();
for (int i = 0; i < 6000000; i++)
{
data++;
}
auto end2 = std::chrono::steady_clock::now();
std::cout << "mainthread spend:" << std::chrono::duration_cast<std::chrono::microseconds>(end2 - start2).count() << "ms" << std::endl;
std::cout << "MainThreadID=" << std::this_thread::get_id() << ", final data= " << data << std::endl;
#endif
}

目前存在问题:

  • MSVC编译测试,效率低
  • 调用时,无法获知子线程的任务何时执行完毕
  • 主线程打印data时线程可能没执行完

改进:

  • 让enqueue函数返回std::future,主线程可以通过wait()、wait_for()、get()获取任务执行状态、结果
//修改enqueue函数为addTask
//return_type是因为std::packaged_task的模版需要f(args...)的返回值类型
template<typename F, typename... Args>
auto addTask(F&& f, Args&&... args) -> std::future<decltype(f(args...)) >
{
/*using fReturnType = decltype(f(args...));
auto task = std::bind(std::forward<F>(f), std::forward<Args>(args)...);
std::packaged_task<fReturnType()> ptTask(task);
std::future<fReturnType> res = ptTask.get_future();*/
using return_type = typename std::result_of<F(Args...)>::type;
auto task = std::make_shared<std::packaged_task<return_type()>>(std::bind(std::forward<F>(f), std::forward<Args>(args)...));
std::future<return_type> res = task->get_future();
{
std::unique_lock<std::mutex> lock(mtxTasks);
if (m_shutdown) {
throw std::runtime_error("addTask on stopped ThreadPool");
}
m_tasks.emplace([task]() { (*task)(); });
}
cvTasksNotEmpty.notify_one();
return res;
}

改进:

  • 调用者肯定知道线程绑定函数的返回值类型,把返回值类型也传进去吧,就像C#里的内置委托Func那样
template<typename R, typename F, typename... Args>
std::future<R> addTask(F&& f, Args&&... args)
{
//auto task = std::bind(std::forward<F>(f), std::forward<Args>(args)...);
//auto taskPtr = std::make_shared<std::packaged_task<R()>>(std::packaged_task<R()>(task));
auto task = std::make_shared<std::packaged_task<R()>>(std::bind(std::forward<F>(f), std::forward<Args>(args)...));
{
std::unique_lock<std::mutex> ulTasks(mtxTasks); //要访问共享变量m_tasks,上锁
m_tasks.emplace([task]() {
(*task)();
});
}
cvTasksNotEmpty.notify_one();
return task->get_future();
}
std::mutex mtxfunc;
int data = 0;
int func()
{
for (int i = 0; i < 1000000; i++)
{
std::unique_lock<std::mutex> ul(mtxfunc);
data++;
}
return data;
}
int main()
{
ThreadPool* tp = new ThreadPool(5, 10);
std::future<int>&& res1 = tp->addTask<int, int()>(func);
std::future<int>&& res2 = tp->addTask<int, int()>(func);
std::future<int>&& res3 = tp->addTask<int, int()>(func);
std::future<int>&& res4 = tp->addTask<int, int()>(func);
std::future<int>&& res5 = tp->addTask<int, int()>(func);
std::cout << "MainThreadID=" << std::this_thread::get_id() << ", final data= " << res1.get() << std::endl;
std::cout << "MainThreadID=" << std::this_thread::get_id() << ", final data= " << res2.get() << std::endl;
std::cout << "MainThreadID=" << std::this_thread::get_id() << ", final data= " << res3.get() << std::endl;
std::cout << "MainThreadID=" << std::this_thread::get_id() << ", final data= " << res4.get() << std::endl;
std::cout << "MainThreadID=" << std::this_thread::get_id() << ", final data= " << res5.get() << std::endl;
delete tp;
}
posted @   徘徊彼岸花  阅读(45)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· AI技术革命,工作效率10个最佳AI工具
点击右上角即可分享
微信分享提示