字符设备研究_4

  随着对内核代码研究的深入,问题愈发庞杂,这几天闷头研究回过头来才有了一个比较清晰的思路,今天就解决等待队列的问题.等待队列,是应阻塞(Block)这一功能实现的要求产生的.在学单片机的时候,检测一个按键是否按下,最初的设计都是在一个循环里轮询寄存器状态,直到按键按下,但这要求单片机CPU一直检测,对性能有极大的浪费,于是开始学习用按键中断的方式通知CPU.阻塞的应用,也是顺应了这样节约CPU资源的思想,当进程运行的条件不满足时,则挂起进程直到满足可操作的条件后再进行操作。被挂起的进程进入休眠状态,被从调度器的运行队列移走,直到等待的条件被满足。

  而一个进程调用schedule()休眠后(这一过程设计到了内核的进程调度,留坑)是如何被唤醒的?这就要通过等待队列(wait_queue)了,先看定义:

struct __wait_queue_head {
    spinlock_t        lock;//自旋锁
    struct list_head    task_list;//等待队列头
};

struct __wait_queue {
    unsigned int        flags;//标志位
#define WQ_FLAG_EXCLUSIVE    0x01//独占,每次仅唤醒一个
    void            *private;//实际用做struct task_struck* 指向加入等待队列的进程结构体
    wait_queue_func_t    func;//唤醒函数
    struct list_head    task_list;//加入等待队列的队列成员
};
struct list_head {
    struct list_head *next, *prev;//双向链表
};

  等待队列以基本数据结构里的双向链表实现,其结构如下图(引自互联网):

  linux中,双向链表的添加操作为:

static inline void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
{
    next->prev = new;
    new->next = next;
    new->prev = prev;
    prev->next = new;
}

  可见,每个进程加入队列后,其后继指向原队列第二个成员,先导指向队列头,形成环状双向链表,后加入如的成员排在前面.而在测试时发现带有exclusive时加入队列的顺序却相反,可能因为不加exclusive时队列顺序没有影响有关.(写到这,继续往下读书的时候才发现书上写着有"等待队列成员设置了WQ_FLAG_EXCLUSIVE标志时,会被添加到等待队列尾部,没有这个标志会被添加到头部")

  而唤醒时,wake_up()函数会从队列头开始,依次唤醒每一个队列中的等待成员,直至重回head或者唤醒了一个带有exclusive标志的成员.如下代码中,唤醒后会继续判断condition,以防止其他被唤醒的进程改变了condition.但测试中偶尔也会出现同时唤醒两个线程的情况.

    for (;;) {                            \
        long __int = prepare_to_wait_event(&wq, &__wait, state);\
                                    \
        if (condition)                        \
            break;                        \
                                    \
        if (___wait_is_interruptible(state) && __int) {        \
            __ret = __int;                    \
            if (exclusive) {                \
                abort_exclusive_wait(&wq, &__wait,    \
                             state, NULL);    \
                goto __out;                \
            }                        \
            break;                        \
        }                            \
                                    \
        schedule();                            \
    }

 

  

#include <linux/module.h>
#include <linux/sched.h>//
#include <linux/types.h>//size_t etc.
#include <linux/semaphore.h>//semaphore
#include <linux/wait.h>//wait queue
#include <asm-generic/current.h>//current
#include <linux/cdev.h>//cdev
#include <linux/fs.h>//file
#include <linux/slab.h>//kmalloc
#include <linux/list.h>//list_head操作
#include <linux/device.h>//创建设备节点

static struct class *sleepy_class;
static struct device *sleepy_device;

struct dev {
    struct cdev cdev;
    struct semaphore sem;
}*devp;
static int sleepy_major = 250;

static DECLARE_WAIT_QUEUE_HEAD(wq);
static int flag = 0;

void printk_wq(void)
{
    struct list_head *pos, *head = &wq.task_list;
    int n = 0;
    printk("~~~~~~~~~\n");
    list_for_each(pos, head) {
        n++;
        struct __wait_queue* wq1 = list_entry(pos, struct __wait_queue, task_list);//list_entry == container_of
        struct task_struct* ts = (struct task_struct*)wq1->private;
        printk(KERN_INFO "no.%d:pid--%d\n", n, ts->pid);
    }
    printk("~~~~~~~~~\n");
}

int sleepy_open(struct inode *inode, struct file *filp)
{
    filp->private_data = devp;
    return 0;
}

int sleepy_release(struct inode *inode, struct file *filp)
{
    filp->private_data = NULL;
    return 0;
}

ssize_t sleepy_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
{
    printk(KERN_INFO "Wait queue: \n");
    printk_wq();
    wait_event_interruptible(wq, 0 != flag);
    wait_event_interruptible_exclusive(wq, 0 != flag);
//    if (down_interruptible(&devp->sem))
//        return - ERESTARTSYS;
    flag = 0;
    printk(KERN_INFO "Awoken, %i(%s)\n", current->pid , current->comm);
    return 0;
}

ssize_t sleepy_write(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
{
    flag = 1;
//    up(&devp->sem);
    printk_wq();
    wake_up_interruptible(&wq);
    printk(KERN_INFO "%i(%s) awaking\n", current->pid, current->comm);
    return 0;
}

static const struct file_operations sleepy_fops = {
        .owner = THIS_MODULE,
        .read = sleepy_read,
        .write = sleepy_write,
        .release = sleepy_release,
        .open = sleepy_open,
};

static void mem_setup_cdev(struct dev *dev, int index)
{
    int err, devno = MKDEV(sleepy_major, index);//主,次设备号

    cdev_init(&dev->cdev, &sleepy_fops);//cdev的指针,fileoperations的指针
    dev->cdev.owner = THIS_MODULE;
    sema_init(&dev->sem, 1);
    err = cdev_add(&dev->cdev, devno, 1);
    if (err)
        printk(KERN_NOTICE "Error %d adding sleepy cdev:%d", err, index);
}

static int sleepy_init(void)
{
    int result;
    dev_t devno = MKDEV(sleepy_major, 0);

    if (sleepy_major)
        result = register_chrdev_region(devno, 1, "sleepy");
    else {
        result = alloc_chrdev_region(&devno, 0 ,1, "sleepy");
        sleepy_major = MAJOR(devno);
    }
    //创建设备节点
    sleepy_class = class_create(THIS_MODULE, "sleepy");
    sleepy_device = device_create(sleepy_class, NULL, MKDEV(sleepy_major, 0), NULL, "sleepy");
    //
    if (result<0)
        return result;
    devp = kmalloc(sizeof(struct dev), GFP_KERNEL);//get free pages
    if (!devp) {
        result = - ENOMEM;
        goto fail_malloc;
    }
    memset(devp, 0, sizeof(struct dev));
    mem_setup_cdev(devp, 0);
    printk(KERN_INFO "init success_________________________________________________________\n");
    return 0;

fail_malloc:
    unregister_chrdev_region(devno, 1);
    return result;
}

static int sleepy_exit(void)
{
    cdev_del(&devp->cdev);
    kfree(devp);
    unregister_chrdev_region(MKDEV(sleepy_major, 0), 1);
    device_unregister(sleepy_device);
    class_destroy(sleepy_class);
    printk(KERN_INFO "exit success___________________________________________________________________\n");
    return 0;
}
module_init(sleepy_init);
module_exit(sleepy_exit);
MODULE_LICENSE("GPL");

 

posted @ 2016-11-16 17:43  胡星宇  阅读(123)  评论(0编辑  收藏  举报