binder
1.binder_init
注册binder驱动函数,调用misc_register对binder驱动进行注册,后续我们调用open("/dev/binder")就是使用该设备。
static struct miscdevice binder_miscdev = {//binder设备的一些信息
.minor = MISC_DYNAMIC_MINOR,//设备号信息是Linux,0表示使用过该设备,1表示没使用过。
.name = "binder",// 该设备的名称
.fops = &binder_fops //该设备支持的一些操作 比如open mmap flush release等 对Native层开放的函数sysCall
};
static int __init binder_init(void)
{
int ret;
//创建在一个线程中运行的workqueue,命名为binder
binder_deferred_workqueue = create_singlethread_workqueue("binder");
if (!binder_deferred_workqueue)
return -ENOMEM;
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
binder_debugfs_dir_entry_root);
//注册驱动设备
ret = misc_register(&binder_miscdev);
return ret;
}
2.binder_open
初始化binder_proc结构体,初始化4个红黑树,todo工作列表,以及把当前binder节点加入到渠道的列表中。注:binder_proc是当前进程的biner管理器,内部包含了binder的线程红黑树,当前进程存在的binder,以及依赖的其他binder。
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads; //binder的线程信息
struct rb_root nodes; //自己binder的root信息
struct rb_root refs_by_desc;//其他进程对应的binder对象 以handle为key
struct rb_root refs_by_node;//其他进程的binder对象,内存地址为key
int pid;
struct vm_area_struct *vma; //用户内存的映射管理
struct mm_struct *vma_vm_mm;//虚拟内存信息
struct task_struct *tsk;//进程管理
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
void *buffer;//内核空间对应的首地址
ptrdiff_t user_buffer_offset;//用户空间和内核空间的偏移量。
struct list_head buffers;
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
struct page **pages;
size_t buffer_size;
uint32_t buffer_free;
struct list_head todo;//todo 队列 目标进程的任务
wait_queue_head_t wait;//wati队列 当前进程的任务
struct binder_stats stats;
struct list_head delivered_death;
int max_threads;//最大线程数
int requested_threads;
int requested_threads_started;
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
};
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;//binder的结构体 很关键
proc = kzalloc(sizeof(*proc), GFP_KERNEL);//申请binder_proc大小的一段连续内存空间地址
if (proc == NULL)
return -ENOMEM;
get_task_struct(current);
proc->tsk = current;
INIT_LIST_HEAD(&proc->todo);//初始化todo队列 很重要
init_waitqueue_head(&proc->wait);//初始化wait队列 很重要
proc->default_priority = task_nice(current);
//加入互斥锁
binder_lock(__func__);
binder_stats_created(BINDER_STAT_PROC);
hlist_add_head(&proc->proc_node, &binder_procs);//将当前的binder_proc添加到binder_procs中。
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
filp->private_data = proc;
binder_unlock(__func__);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
}
return 0;
}
3.binder_mmap
根据用户空间内存的大小来分配一块内核的内存,通过kzalloc按照Page分配对应的物理内存,然后把这块内存映射到用户空间和内核空间。
static int binder_mmap(struct file *filp, struct vm_area_struct *vma/*用户空间*/)
{
int ret;
struct vm_struct *area;//内核的虚拟内存
struct binder_proc *proc = filp->private_data;
const char *failure_string;
struct binder_buffer *buffer;
if ((vma->vm_end - vma->vm_start) > SZ_4M)//最大空间
vma->vm_end = vma->vm_start + SZ_4M;
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
mutex_lock(&binder_mmap_lock);
if (proc->buffer) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);//将内核空间和用户空间的内存大小进行同步
proc->buffer = area->addr;//当前进程的buffer指向内核空间的地址
proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;//计算出来用户空间和内核空间的偏移值
mutex_unlock(&binder_mmap_lock);
proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
proc->buffer_size = vma->vm_end - vma->vm_start;
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
//分配或者释放binder内存空间以及用户空间的映射
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
buffer = proc->buffer;
INIT_LIST_HEAD(&proc->buffers);
list_add(&buffer->entry, &proc->buffers);
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
proc->free_async_space = proc->buffer_size / 2;//异步的话使用的buffersize 只有1/2
barrier();
proc->files = get_files_struct(current);
proc->vma = vma;
proc->vma_vm_mm = vma->vm_mm;
/*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
err_alloc_small_buf_failed:
kfree(proc->pages);
proc->pages = NULL;
err_alloc_pages_failed:
mutex_lock(&binder_mmap_lock);
vfree(proc->buffer);
proc->buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
return ret;
}
static int binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
struct vm_struct tmp_area;
struct page **page;
struct mm_struct *mm;
if (vma)
mm = NULL;
else
mm = get_task_mm(proc->tsk);
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
if (vma && mm != proc->vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n",
proc->pid);
vma = NULL;
}
}
if (allocate == 0)//释放内核空间
goto free_range;
//申请空间
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
BUG_ON(*page);
//分配一个物理page页 4k
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
tmp_area.addr = page_addr;
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
//物理空间分配到内核空间
ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
//物理内存映射到用户空间
user_page_addr =
(uintptr_t)page_addr + proc->user_buffer_offset;
ret = vm_insert_page(vma, user_page_addr, page[0]);
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
up_write(&mm->mmap_sem);
mmput(mm);
}
return 0;
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
if (vma)
zap_page_range(vma, (uintptr_t)page_addr +
proc->user_buffer_offset, PAGE_SIZE, NULL);
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
__free_page(*page);
*page = NULL;
err_alloc_page_failed:
;
}
err_no_vma:
if (mm) {
up_write(&mm->mmap_sem);
mmput(mm);
}
return -ENOMEM;
}
4.binder_ioctl
对数据相关的操作都在这个函数中,非常关键的一个函数。在这里会根据调用放的命令 来进行对应的操作,比如读写数据,设置管理者等。
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
binder_lock(__func__);
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ://读写数据
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
case BINDER_SET_MAX_THREADS://设置最大线程数
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
case BINDER_SET_CONTEXT_MGR://设置binder_manager
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
goto err;
break;
case BINDER_THREAD_EXIT://退出binder线程
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_free_thread(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {//binder_version的校验
struct binder_version __user *ver = ubuf;
if (size != sizeof(struct binder_version)) {
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
}
default:
ret = -EINVAL;
goto err;
}
ret = 0;
return ret;
}
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (size != sizeof(struct binder_write_read)) {
ret = -EINVAL;
goto out;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {//拷贝用户空间的数据头,也就是最初的命令 比如BC_ENTER_LOOP这些命令信息。
ret = -EFAULT;
goto out;
}
if (bwr.write_size > 0) {//根据写的大小来判断是写操作还是读操作
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (bwr.read_size > 0) {//如果是读取数据就进行读操作
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))//如果目标进程的todo队列有任务就挂起当前进程,等待目标进程处理。
wake_up_interruptible(&proc->wait);
if (ret < 0) {//把数据拷贝到用户空间
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}