用户态线程在AI中的应用
最近这段时间在修改服务器AI,准备将AI分配到单独的服务器中做,但为了不至于对原有架构造成
太大的影响,攻击的判定,移动的判定仍然在gameserver上处理,AI服务器的角色就是根据状态
选择合适的决策并向gameserver发出决策命令。
例如:一个简单的AI函数可能像下面这样
void onAi() { //从视野中选择一个目标 target = findtarget(); if(target && target.distence(this) <= 10) { attack(target); } }
调用attack的时候,将向gameserver发送一条攻击命令,由gameserver做出判定并将结果返回
给Ai服务器。这时就出现了一个问题,如果Ai循环又一次执行到onAi,但前一次的攻击结果还没有返回。
这个时候ai就不能根据正确的状态做出合理的决策了。
正确的做法是执行attack的时候阻塞在attack上,直到攻击结果返回attack才返回。
显然,如果阻塞在attack上,那么将导致其它AI无法继续运行,这个时候,用户态线程的威力就发挥出来
了。
执行attack的时候,可以把路径切换回调度器,由调度器选择没有被阻塞的用户态线程序来执行,当攻击
结果返回后,把被阻塞的用户态线程重新置为可运行状态,调度器以后可以重新调度该线程继续执行。
当调度器重新调度被阻塞的线程时,那个线程将会从attack中返回,继续后续的处理。这样保证了AI
的串行处理。
用户态线程的实现有很多种方式,包括linux的ucontext和windows下的fiber,下面给出一个fiber实现的
简单的用户态线程调度框架.
uthread.h
#ifndef _UTHREAD_H #define _UTHREAD_H #include <Windows.h> enum { NONE, //仅仅让出处理器 WAIT4EVENT = 1, //等待某事件的来临 WAIT4EVENTTIMEOUT, DEAD, //纤程已死亡 ACTIVED, //可运行的 SLEEP, }; //纤程等待的事件 enum { MOVE_RET = 1, //移动的返回结果 SKILL_RET, //技能使用的返回结果 }; typedef int uthread_t; class uthread; class runnable { public: virtual void main_routine() = 0; uthread *p_uthread; }; //纤程 class uthread { public: static void WINAPI thread_routine(LPVOID pvParam); void OnEvent(unsigned short ev); uthread_t uthread_id; unsigned char status; PVOID p_uthreadContext; unsigned short waitevent;//一个纤程只能等待在一个事件上 }; #endif
Scheduler.h
#ifndef _SCHEDULER_H #define _SCHEDULER_H #include <Windows.h> #include "uthread.h" #include <map> #include <list> #include <time.h> #define MAX_FIBER 8192 class Scheduler { friend class uthread; public: //初始化纤程库 static void scheduler_init() { m_pUthreadContext = ConvertThreadToFiber(NULL); } static void scheduler_destroy(); static uthread_t spawn(runnable *param,int stacksize); //选择一个纤程以进行调度 static void schedule(); static void sleep(time_t timeout) { if(timeout > 0) { m_uthreads[m_curuid]->status = SLEEP; time_t t = timeout + time(NULL); m_sleepList.push_back(std::make_pair(t,m_uthreads[m_curuid])); } SwitchToFiber(m_pUthreadContext); } //将一个纤程添加到可运行队列中 static void add2Active(uthread *ut) { ut->status = ACTIVED; m_pendingAdd.push_back(ut); } //将当运行权交给scheduler static void yield() { //将运行权交给调度器 SwitchToFiber(m_pUthreadContext); } //阻塞在ev上,timeout==0将永远等待 static int block(unsigned short ev,time_t timeout); private: static std::map<PVOID,uthread*> m_activeList;//可运行列表 static std::list<uthread*> m_pendingAdd;//等待添加进可运行列表中的纤程 static std::list<std::pair<time_t,uthread*> > m_sleepList;//正在睡眠的纤程,将来改成用优先队列 static PVOID m_pUthreadContext;//调度器所在纤程的上下文 static uthread *m_uthreads[MAX_FIBER]; static int m_count; static int m_curuid; //当前正在运行的纤程的uid,==-1表示在scheduler中运行 static volatile bool m_terminate; }; #endif
Scheduler.cpp
#include "stdafx.h" #include "Scheduler.h" std::map<PVOID,uthread*> Scheduler::m_activeList;//可运行列表 std::list<uthread*> Scheduler::m_pendingAdd; std::list<std::pair<time_t,uthread*> > Scheduler::m_sleepList; PVOID Scheduler::m_pUthreadContext;//调度器所在纤程的上下文 uthread *Scheduler::m_uthreads[MAX_FIBER]; int Scheduler::m_count = 0; int Scheduler::m_curuid = -1; volatile bool Scheduler::m_terminate = false; void WINAPI uthread::thread_routine(LPVOID pvParam) { ((runnable*)pvParam)->main_routine(); ((runnable*)pvParam)->p_uthread->status = DEAD; /*这里不能直接退出纤程运行函数,否则会导致运行线程的退出, * 正确的做法是把运行权交回给scheduler,由scheduler来删除 * 这个纤程 */ Scheduler::yield(); } //等待的事件到达了,将纤程重新插入到可运行队列中 void uthread::OnEvent(unsigned short ev) { if(ev == waitevent) { status = ACTIVED; Scheduler::add2Active(this); waitevent = 0; //从sleeplist中删除 std::list<std::pair<time_t,uthread*> >::iterator it = Scheduler::m_sleepList.begin(); std::list<std::pair<time_t,uthread*> >::iterator end = Scheduler::m_sleepList.end(); for( ; it != end; ++it ) { if(it->second == this) { it = Scheduler::m_sleepList.erase(it); break; } } } } void Scheduler::schedule() { printf("schedule/n"); while(!m_terminate) { std::list<std::map<PVOID,uthread*>::iterator> deletes; std::map<PVOID,uthread*>::iterator it = m_activeList.begin(); std::map<PVOID,uthread*>::iterator end = m_activeList.end(); for( ; it != end; ++it) { m_curuid = it->second->uthread_id; SwitchToFiber(it->first); m_curuid = -1; if(it->second->status == DEAD || it->second->status == SLEEP || it->second->status == WAIT4EVENT || it->second->status == WAIT4EVENTTIMEOUT) { deletes.push_back(it); } printf("come back/n"); } { std::list<std::map<PVOID,uthread*>::iterator>::iterator it = deletes.begin(); std::list<std::map<PVOID,uthread*>::iterator>::iterator end = deletes.end(); for( ; it != end; ++it) { if((*it)->second->status == DEAD) { DeleteFiber((*it)->first); m_uthreads[(*it)->second->uthread_id] = NULL; delete (*it)->second; --m_count; } m_activeList.erase(*it); } } //将所有等待添加到m_activeList中的纤程都添加进去 { while(!m_pendingAdd.empty()) { uthread *tmp = m_pendingAdd.back(); m_pendingAdd.pop_back(); m_activeList.insert(std::make_pair(tmp->p_uthreadContext,tmp)); } } //看看有没有timeout的纤程 { time_t now = time(NULL); std::list<std::pair<time_t,uthread*> >::iterator it = m_sleepList.begin(); for( ; it != m_sleepList.end(); ) { time_t t = it->first; if(it->first <= now) { it->second->status = ACTIVED; m_activeList.insert(std::make_pair(it->second->p_uthreadContext,it->second)); it = m_sleepList.erase(it); } else ++it; } } } scheduler_destroy(); ConvertFiberToThread(); printf("scheduler end/n"); } void Scheduler::scheduler_destroy() { for(int i = 0; i < MAX_FIBER; ++i) { if(m_uthreads[i]) { DeleteFiber(m_uthreads[i]->p_uthreadContext); delete m_uthreads[i]; } } } uthread_t Scheduler::spawn(runnable *param,int stacksize)//创建一个新的纤程 { if(m_count >= MAX_FIBER) return -1; //刚创建的纤程不处于可运行状态 PVOID uthreadcontext = CreateFiber(stacksize,uthread::thread_routine,param); uthread *nthread = new uthread; nthread->p_uthreadContext = uthreadcontext; for(int i= 0; i < MAX_FIBER; ++i) { if(0 == m_uthreads[i]) { nthread->uthread_id = i; m_uthreads[i] = nthread; break; } } add2Active(nthread); ++m_count; param->p_uthread = nthread; return nthread->uthread_id; } static int Scheduler::block(unsigned short ev,time_t timeout) { m_uthreads[m_curuid]->waitevent = ev; if(timeout > 0) { m_uthreads[m_curuid]->status = WAIT4EVENTTIMEOUT; time_t t = timeout + time(NULL); m_sleepList.push_back(std::make_pair(t,m_uthreads[m_curuid])); } else m_uthreads[m_curuid]->status = WAIT4EVENT; SwitchToFiber(m_pUthreadContext); if(m_uthreads[m_curuid]->waitevent == 0) { //等待的事件到达 return 0; } else return -1;//超时间 }
test.cpp
// AiScheduler.cpp : 定义控制台应用程序的入口点。 // #include "stdafx.h" #include "Scheduler.h" class test22 : public runnable { public: void main_routine() { for(int i = 0 ; i < 20; ++i) { printf("%d/n",i); printf("begin block/n"); if(0 == Scheduler::block(MOVE_RET,1)) printf("test wake me up/n"); else printf("timeout/n"); //Scheduler::sleep(1); } printf("die/n"); } uthread_t uid; }; class test : public runnable { public: void main_routine() { for(int i = 0 ; i < 10; ++i) { printf("%d/n",i); if(t22->p_uthread->waitevent == MOVE_RET) t22->p_uthread->OnEvent(MOVE_RET); Scheduler::yield(); } printf("die/n"); } test22 *t22; uthread_t uid; }; int _tmain(int argc, _TCHAR* argv[]) { Scheduler::scheduler_init(); test22 test2; test test1; test1.t22 = &test2; test2.uid = Scheduler::spawn(&test2,4096); test1.uid = Scheduler::spawn(&test1,4096); //test3.uid = Scheduler::spawn(&test3,4096); //test4.uid = Scheduler::spawn(&test4,4096); Scheduler::schedule(); return 0; }