rtthread:"rt_schedule"调度
1 线程调度 rt_schedule
rtthread中的线程切换是通过rt_schedule( )线程调度来实现的;
rt_schedule( )线程调度 通过 rt_thread_ready_priority_group 搭配 rt_thread_priority_table 进行调度;
1.1 rt_thread_ready_priority_group 线程就绪优先级组
线程就绪优先级组是一个32bits常数,每1bit对应一个优先级;最低位优先级最高;
//scheduler.c
#if RT_THREAD_PRIORITY_MAX > 32
/* Maximum priority level, 256 */
rt_uint32_t rt_thread_ready_priority_group;
rt_uint8_t rt_thread_ready_table[32];
#else
/* Maximum priority level, 32 */
rt_uint32_t rt_thread_ready_priority_group; //线程就绪优先级组
#endif
1.1.1 优先级组查询
rtthread觉得按bit判断取出最高优先级费时,所以采用以内存换效率的数组寻址优先级,搭配逻辑判断使用;
为什么不32bit全部采用数组寻址,可能全部采用数组寻址内存占用又太大划不来;
//scheduler.c 优先级组调用方式;
highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
to_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next, struct rt_thread, tlist);
//kservice.c
//下文判断函数中对优先级都进行了+1处理,所以上文调用函数中又-1处理;
int __rt_ffs(int value)
{
if (value == 0)
return 0; //优先级组为空,调用函数中-1处理得到-1 error;
if (value & 0xff)
return __lowest_bit_bitmap[value & 0xff] + 1; //bit[7:0],优先级为0也会+1处理;
if (value & 0xff00)
return __lowest_bit_bitmap[(value & 0xff00) >> 8] + 9; //bit[15:8]
if (value & 0xff0000)
return __lowest_bit_bitmap[(value & 0xff0000) >> 16] + 17; //bit[23:16]
return __lowest_bit_bitmap[(value & 0xff000000) >> 24] + 25; //bit[31:24]
}
//咋一看还以为是什么复杂的东西,其实这个数组纯粹力气活,把可能性(0:255)依次穷举列出,然后再把该数值的最高优先级存入在数值所在位;
//虽然这个数组是个力气活,但是人家的大名叫“位图算法”;
//下面的优先级是所在字节的实际优先级
const rt_uint8_t __lowest_bit_bitmap[] =
{
/* 00 */ 0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 10 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 20 */ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 30 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 40 */ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 50 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 60 */ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 70 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 80 */ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* 90 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* A0 */ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* B0 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* C0 */ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* D0 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* E0 */ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
/* F0 */ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
1.2 rt_thread_priority_table 线程就绪优先级表
线程就绪优先级表是一组对象为 rt_list_t 的数组;每个对象是一个链表节点,用来挂载相同优先级的 &tlist 节点;
//scheduler.c
rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]; //优先级表;
struct rt_thread *rt_current_thread; //当前线程指针;
rt_uint8_t rt_current_priority;
rt_list_t rt_thread_defunct;
2 rt_system_scheduler_init( )
rt_thread_priority_table[ ] 和 rt_thread_ready_priority_group 初始化;rt_thread_defunct 也在这初始化;
//component.c rtthread_startup()中调用rt_system_scheduler_init();
//scheduler.c
void rt_system_scheduler_init(void)
{
register rt_base_t offset;
rt_scheduler_lock_nest = 0;
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("start scheduler: max priority 0x%02x\n",RT_THREAD_PRIORITY_MAX));
for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
{
rt_list_init(&rt_thread_priority_table[offset]); //rt_thread_priority_table[]
}
rt_current_priority = RT_THREAD_PRIORITY_MAX - 1; //rt_current_priority
rt_current_thread = RT_NULL; //rt_current_thread
/* initialize ready priority group */
rt_thread_ready_priority_group = 0; //rt_thread_ready_priority_group
#if RT_THREAD_PRIORITY_MAX > 32
/* initialize ready table */
rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
#endif
/* initialize thread defunct */
rt_list_init(&rt_thread_defunct); //rt_thread_defunct
}
3 rt_schedule_insert_thread( )
为什么shceduler只有增删操作,而没有查改操作呢?因为list先进先出的排队属性,所以不需要查改操作;
//scheduler.c
void rt_schedule_insert_thread(struct rt_thread *thread)
{
register rt_base_t temp;
RT_ASSERT(thread != RT_NULL);
/* disable interrupt */
temp = rt_hw_interrupt_disable();
/* change stat */
thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK);
/* insert thread to ready list */
rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]),
&(thread->tlist));
/* set priority mask */
#if RT_THREAD_PRIORITY_MAX <= 32
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n",
RT_NAME_MAX, thread->name, thread->current_priority));
#else
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
("insert thread[%.*s], the priority: %d 0x%x %d\n",
RT_NAME_MAX,
thread->name,
thread->number,
thread->number_mask,
thread->high_mask));
#endif
#if RT_THREAD_PRIORITY_MAX > 32
rt_thread_ready_table[thread->number] |= thread->high_mask;
#endif
rt_thread_ready_priority_group |= thread->number_mask;
/* enable interrupt */
rt_hw_interrupt_enable(temp);
}
4 rt_schedule_remove_thread( )
//scheduler.c
//将node移出list之后,如果list为空那么清零list所对应优先级组bit;
void rt_schedule_remove_thread(struct rt_thread *thread)
{
register rt_base_t temp;
RT_ASSERT(thread != RT_NULL);
/* disable interrupt */
temp = rt_hw_interrupt_disable();
#if RT_THREAD_PRIORITY_MAX <= 32
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n",
RT_NAME_MAX, thread->name,
thread->current_priority));
#else
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
("remove thread[%.*s], the priority: %d 0x%x %d\n",
RT_NAME_MAX,
thread->name,
thread->number,
thread->number_mask,
thread->high_mask));
#endif
/* remove thread from ready list */
rt_list_remove(&(thread->tlist));
if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority])))
{
#if RT_THREAD_PRIORITY_MAX > 32
rt_thread_ready_table[thread->number] &= ~thread->high_mask;
if (rt_thread_ready_table[thread->number] == 0)
{
rt_thread_ready_priority_group &= ~thread->number_mask;
}
#else
rt_thread_ready_priority_group &= ~thread->number_mask;
#endif
}
/* enable interrupt */
rt_hw_interrupt_enable(temp);
}
5 rt_system_scheduler_start( ) 首次调度
//component.c rtthread_startup()结尾调用rt_system_scheduler_start();
//scheduler.c
void rt_system_scheduler_start(void)
{
register struct rt_thread *to_thread;
register rt_ubase_t highest_ready_priority;
#if RT_THREAD_PRIORITY_MAX > 32
register rt_ubase_t number;
number = __rt_ffs(rt_thread_ready_priority_group) - 1;
highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
#else
highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
#endif
/* get switch to thread */
to_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
struct rt_thread,
tlist);
rt_current_thread = to_thread;
/* switch to new thread */
rt_hw_context_switch_to((rt_uint32_t)&to_thread->sp);
/* never come back */
}
6 rt_schedule( ) 后续调度
void rt_schedule(void)
{
rt_base_t level;
struct rt_thread *to_thread;
struct rt_thread *from_thread;
/* disable interrupt */
level = rt_hw_interrupt_disable();
/* check the scheduler is enabled or not */
if (rt_scheduler_lock_nest == 0)
{
register rt_ubase_t highest_ready_priority;
#if RT_THREAD_PRIORITY_MAX <= 32
highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
#else
register rt_ubase_t number;
number = __rt_ffs(rt_thread_ready_priority_group) - 1;
highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
#endif
/* get switch to thread */
to_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next,
struct rt_thread,
tlist);
/* if the destination thread is not the same as current thread */
if (to_thread != rt_current_thread)
{
rt_current_priority = (rt_uint8_t)highest_ready_priority;
from_thread = rt_current_thread;
rt_current_thread = to_thread;
RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
/* switch to new thread */
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER,
("[%d]switch to priority#%d "
"thread:%.*s(sp:0x%p), "
"from thread:%.*s(sp: 0x%p)\n",
rt_interrupt_nest, highest_ready_priority,
RT_NAME_MAX, to_thread->name, to_thread->sp,
RT_NAME_MAX, from_thread->name, from_thread->sp));
#ifdef RT_USING_OVERFLOW_CHECK
_rt_scheduler_stack_check(to_thread);
#endif
if (rt_interrupt_nest == 0)
{
extern void rt_thread_handle_sig(rt_bool_t clean_state);
rt_hw_context_switch((rt_uint32_t)&from_thread->sp,
(rt_uint32_t)&to_thread->sp);
/* enable interrupt */
rt_hw_interrupt_enable(level);
#ifdef RT_USING_SIGNALS
/* check signal status */
rt_thread_handle_sig(RT_TRUE);
#endif
}
else
{
RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n"));
rt_hw_context_switch_interrupt((rt_uint32_t)&from_thread->sp,
(rt_uint32_t)&to_thread->sp);
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
}
else
{
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
}
else
{
/* enable interrupt */
rt_hw_interrupt_enable(level);
}
}
7 rt_list_t注意事项
节点源码整理到这里面:https://www.cnblogs.com/caesura-k/p/12368258.html
//这个问题困扰了我相当长的时间,我决定给这个问题足够的关注度;
//如果node都是插入到list_head之前,那么第一个插入的node在最前面,即table[list_head].next;
//如果node都是插入到list_head之后,那么第一个插入的node在最后面,即table[list_head].prev;
//这个优先级表是insert_before,所以取出的时候是.next;
rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]), &(thread->tlist));
to_thread = rt_list_entry(rt_thread_priority_table[rt_current_priority].next, struct rt_thread, tlist);
//这个容器列表是insert_after,所以取出的时候是.prve;目前没有取出的例子;
rt_list_insert_after(&(information->object_list), &(object->list));
//这个定时器链表虽然是insert_after,但是是升序排列插入,所以用.next取出第一个来比较;
rt_list_insert_after(row_head[RT_TIMER_SKIP_LIST_LEVEL - 1], &(timer->row[RT_TIMER_SKIP_LIST_LEVEL - 1]));
t = rt_list_entry(rt_timer_list[RT_TIMER_SKIP_LIST_LEVEL - 1].next, struct rt_timer, row[RT_TIMER_SKIP_LIST_LEVEL - 1]);
8 小结
rt_schedule 的逻辑还是比较简单的,都属于可以一遍就看懂的代码,不要被表象的未知所迷惑;
感觉为了系统化,导致我总是把随笔糅杂的又臭又长,不利于结构的清晰;以后还是一个知识点写一篇;