基于 mykernel 2.0 编写一个操作系统内核

实验要求

1,按照https://github.com/mengning/mykernel 的说明配置mykernel 2.0,熟悉Linux内核的编译。
2,基于mykernel 2.0编写一个操作系统内核,参照https://github.com/mengning/mykernel 提供的范例代码。
3,简要分析操作系统内核核心功能及运行工作机制。

实验内容

安装linux系统,下载mykernel2.0补丁,下载在linux虚拟机上下载开发工具qemu,以及下载linux源码,具体代码如下:

wget https://raw.github.com/mengning/mykernel/master/mykernel-2.0_for_linux-5.4.34.patch
sudo apt install axel
axel -n 20 https://mirrors.edge.kernel.org/pub/linux/kernel/v5.x/linux-5.4.34.tar.xz
xz -d linux-5.4.34.tar.xz
tar -xvf linux-5.4.34.tar
cd linux-5.4.34
patch -p1 < ../mykernel-2.0_for_linux-5.4.34.patch
sudo apt install build-essential libncurses-dev bison flex libssl-dev libelf-dev
make defconfig # Default configuration is based on 'x86_64_defconfig'
make -j$(nproc) 
sudo apt install qemu # install QEMU
qemu-system-x86_64 -kernel arch/x86/boot/bzImage

通过运行产生如下结果:


通过mymain.c和 myinterrupt.c 程序可以看出 mymain.c 通过每次计数达到100000输出一次my_start_kernel here从而不停的执行。
同时还具有一个中断处理程序的上下文环境,周期性的产生时钟中断信号,触发myinterrupt.c程序执行输出 my_timer_handler here。
这样就通过Linux内核代码模拟了一个具有时钟中断和C代码执行环境的硬件平台。
基于mykernel 2.0编写一个操作系统内核
基于上述mykernel来编写一个操作系统内核,我们首先需要添加一个mypcb.h头文件,其代码如下:
#define MAX_TASK_NUM        4
#define KERNEL_STACK_SIZE   1024*2

/* CPU-specific state of this task */
struct Thread {
    unsigned long        ip;
    unsigned long        sp;
};

typedef struct PCB{
    int pid;
    volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
    unsigned long stack[KERNEL_STACK_SIZE];

    /* CPU-specific state of this task */

    struct Thread thread;
    unsigned long    task_entry;
    struct PCB *next;
}tPCB;

void my_schedule(void);
同时要修改mymain.c中的my_start_kernel函数,该函数是内核初始化函数,用来在内核启动时初始化一系列模块,具体添加代码如下:
#include "mypcb.h"

tPCB task[MAX_TASK_NUM];
tPCB * my_current_task = NULL;

volatile int my_need_sched = 0;

void my_process(void);

void __init my_start_kernel(void)

{
    int pid = 0;
    int i;

    /* Initialize process 0*/

    task[pid].pid = pid;
    task[pid].state = 0;/* -1 unrunnable, 0 runnable, >0 stopped */
    task[pid].task_entry = task[pid].thread.ip = (unsigned long)my_process;
    task[pid].thread.sp = (unsigned long)&task[pid].stack[KERNEL_STACK_SIZE-1];
    task[pid].next = &task[pid];

    /*fork more process */

    for(i=1;i<MAX_TASK_NUM;i++)
    {
        memcpy(&task[i],&task[0],sizeof(tPCB));
        task[i].pid = i;
        task[i].thread.sp = (unsigned long)(&task[i].stack[KERNEL_STACK_SIZE-1]);
        task[i].next = task[i-1].next;
        task[i-1].next = &task[i];
    }

    /* start process 0 by task[0] */

    pid = 0;
    my_current_task = &task[pid];
    asm volatile(
        "movq %1,%%rsp\n\t"     /* set task[pid].thread.sp to rsp */
        "pushq %1\n\t"             /* push rbp */
        "pushq %0\n\t"             /* push task[pid].thread.ip */
        "ret\n\t"                 /* pop task[pid].thread.ip to rip */
        : 
        : "c" (task[pid].thread.ip),"d" (task[pid].thread.sp)    /* input c or d mean %ecx/%edx*/
    );
} 

int i = 0;
void my_process(void)

{    
    while(1)
    {
        i++;
        if(i%10000000 == 0)
        {
            printk(KERN_NOTICE "this is process %d -\n",my_current_task->pid);
            if(my_need_sched == 1)
            {
                my_need_sched = 0;
                my_schedule();
            }
            printk(KERN_NOTICE "this is process %d +\n",my_current_task->pid);
        }     
    }
}
my_start_kernel函数首先对0号进程进行初始化,将进程的程序入口地址设置为my_process 函数,
接下来继续初始化剩余进程并将进程通过链表形式来进行链接,其中各进程之间的 pid 和栈顶指针以及指向下一进程的指针都不相同。
最后通过一些列汇编代码来启动0号进程。接下来的my_process函数是通过进程运行完一个时间片后主动让出CPU的方式,
通过判断my_need_sched的值来决定是否调用my_schedule()函数来进行进程之间的切换。

下面修改myinterrupt.c的代码: #include "mypcb.h" extern tPCB task[MAX_TASK_NUM]; extern tPCB * my_current_task; extern volatile int my_need_sched; volatile int time_count = 0; /* * Called by timer interrupt. * it runs in the name of current running process, * so it use kernel stack of current running process */ void my_timer_handler(void) { if(time_count%1000 == 0 && my_need_sched != 1) { printk(KERN_NOTICE ">>>my_timer_handler here<<<\n"); my_need_sched = 1; } time_count ++ ; return; } void my_schedule(void) { tPCB * next; tPCB * prev; if(my_current_task == NULL || my_current_task->next == NULL) { return; } printk(KERN_NOTICE ">>>my_schedule<<<\n"); /* schedule */ next = my_current_task->next; prev = my_current_task; if(next->state == 0)/* -1 unrunnable, 0 runnable, >0 stopped */ { my_current_task = next; printk(KERN_NOTICE ">>>switch %d to %d<<<\n",prev->pid,next->pid); /* switch to next process */ asm volatile( "pushq %%rbp\n\t" /* save rbp of prev */ "movq %%rsp,%0\n\t" /* save rsp of prev */ "movq %2,%%rsp\n\t" /* restore rsp of next */ "movq $1f,%1\n\t" /* save rip of prev */ "pushq %3\n\t" "ret\n\t" /* restore rip of next */ "1:\t" /* next process start here */ "popq %%rbp\n\t" : "=m" (prev->thread.sp),"=m" (prev->thread.ip) : "m" (next->thread.sp),"m" (next->thread.ip) ); } return; }
上述代码都修改完成后,重新编译内核,启动qemu,结果如下:

简要分析进程切换核心汇编代码:
asm volatile(
         "pushq %%rbp\n\t"       /* 1 save rbp of prev */ 
         "movq %%rsp,%0\n\t"     /* 2 save rsp of prev */
         "movq %2,%%rsp\n\t"     /* 3 restore  rsp of next */
         "movq $1f,%1\n\t"       /* 4 save rip of prev */
         "pushq %3\n\t"        /* 5 save rip of next */   
         "ret\n\t"               /* 6 restore  rip of next */
         "1:\t"                  /* 7 next process start here */
         "popq %%rbp\n\t"        /* 8 restore rbp of next  */
        : "=m" (prev->thread.sp),"=m" (prev->thread.ip)
        : "m" (next->thread.sp),"m" (next->thread.ip)
      );
    }

  1. pushq %%rbp保存prev进程RBP寄存器的值到堆栈;

  2. movq %%rsp,%0保存prev进程RSP寄存器的值到prev->thread.sp,这时RSP寄存器指向进程的栈顶地址,实际上就是将prev进程的栈顶地址保存。

  3. movq %2,%%rsp将next进程的栈顶地址next->thread.sp放⼊RSP寄存器,完成了进程的堆栈切换。

  4. movq $1f,%1保存prev进程当前RIP寄存器值到prev->thread.ip,这⾥$1f是指标号1的地址。

  5. pushq %3把即将执⾏的next进程的指令地址next->thread.ip⼊栈。

  6. ret就是将压⼊栈中的next->thread.ip放⼊RIP寄存器,为什么不直接放⼊RIP寄存器呢?因为程序不能直接使⽤RIP寄存器,只能通过call、ret等指令间接改变RIP寄存器。



 

posted @ 2020-05-13 20:29  1837085815  阅读(218)  评论(0编辑  收藏  举报