FUSE引起的SIGBUS问题分析报告

【问题描述】

经常在AssetManager中扫描apk的时候出现SIGBUS错误:

pid: 870, tid: 16106, name: Thread-6536  >>> com.xxx.guardprovider <<<
signal 7 (SIGBUS), code 0 (SI_USER), fault addr 0x9ed30253
    r0 00009a98  r1 b6206e40  r2 0000000c  r3 00000000
    r4 00000000  r5 9fd3f088  r6 0004f4e8  r7 00000000
    r8 00000000  r9 9f9e3800  sl 9fd3f088  fp 9ed30253
    ip b6206e58  sp 9fd3f000  lr b6bee519  pc b61fd6a8  cpsr 20000010

backtrace:
    #00 pc 0000c6a8  /system/lib/libz.so (inflate+944)
    #01 pc 0001c515  /system/lib/libandroidfw.so (_Z15inflateToBufferI12BufferReaderEbRT_Pvll+164)
    #02 pc 0001c597  /system/lib/libandroidfw.so (_ZN7android8ZipUtils15inflateToBufferEPvS1_ll+14)
    #03 pc 00010701  /system/lib/libandroidfw.so (_ZN7android16_CompressedAsset9getBufferEb+38)
    #04 pc 00011911  /system/lib/libandroidfw.so (_ZN7android12AssetManager9SharedZip21setResourceTableAssetEPNS_5AssetE+36)
    #05 pc 000122f9  /system/lib/libandroidfw.so (_ZN7android12AssetManager6ZipSet24setZipResourceTableAssetERKNS_7String8EPNS_5AssetE+28)
    #06 pc 000132cf  /system/lib/libandroidfw.so (_ZNK7android12AssetManager20appendPathToResTableERKNS0_10asset_pathEPj+194)
    #07 pc 0001375f  /system/lib/libandroidfw.so (_ZNK7android12AssetManager11getResTableEb+98)
    #08 pc 00087009  /system/lib/libandroid_runtime.so

memory near fp:
    9ed30230 b5ae1300 31f68d46 04f4e83c 135bbc00  ....F..1<.....[.
    9ed30240 00000e00 73657200 6372756f 612e7365  .....resources.a
    9ed30250 24637372 55bc65dc e030f6d5 410e8275  rsc$.e.U..0.u..A
    9ed30260 544441ba 151410b0 ba524c15 3bbbba53  .ADT.....LR.S..;
    9ed30270 a1eddd0f bbbbbbbb d0e1bbbb dcffe75d  ............]...
    9ed30280 fcf0fdf7 bd9f65ee cc739cd6 fb9cc631  .....e....s.1...

--->9ed30000-9ed7ffff r--    47d000     50000  /storage/emulated/0/UCDownloads/com.qihoo.appstore_300030395.apk

奇怪的是fatal addr指向的地址实际上是可读的,因为debuggerd能读出来。

该问题还有一个特点是每次都是读取sdcard内容时出问题。

 

【问题分析】

显然这个问题和sdcard相关,也就是fuse文件系统相关。

在出问题的地址是sdcard文件映射到内存上去的,这种情况下只有page_fault的时候fuse文件系统才会工作。

这个问题看起来是fuse的page_fault处理出了问题。

static const struct vm_operations_struct fuse_file_vm_ops = {
    .close        = fuse_vma_close,
    .fault        = filemap_fault,
    .map_pages    = filemap_map_pages,
    .page_mkwrite    = fuse_page_mkwrite,
    .remap_pages    = generic_file_remap_pages,
};

在filemap_fault()中有可能返回SIGBUS的地方都添加日志:

int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
    int error;
    struct file *file = vma->vm_file;
    struct address_space *mapping = file->f_mapping;
    struct file_ra_state *ra = &file->f_ra;
    struct inode *inode = mapping->host;
    pgoff_t offset = vmf->pgoff;
    struct page *page;
    loff_t size;
    int ret = 0;

    size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
    if (offset >= size >> PAGE_CACHE_SHIFT)
        printk("filemap_fault :pid=%lld, tgid=%lld size=%lld, offset=%lld\n",
                      (long long)current->pid, (long long)current->tgid, (long long)size, (long long)offset);
        return VM_FAULT_SIGBUS;

    /*
     * Do we have something in the page cache already?
     */
    page = find_get_page(mapping, offset);
    if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
        /*
         * We found the page, so try async readahead before
         * waiting for the lock.
         */
        do_async_mmap_readahead(vma, ra, file, page, offset);
    } else if (!page) {
        /* No page in the page cache at all */
        do_sync_mmap_readahead(vma, ra, file, offset);
        count_vm_event(PGMAJFAULT);
        mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
        ret = VM_FAULT_MAJOR;
retry_find:
        page = find_get_page(mapping, offset);
        if (!page)
            goto no_cached_page;
    }

    if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
        page_cache_release(page);
        return ret | VM_FAULT_RETRY;
    }

    /* Did it get truncated? */
    if (unlikely(page->mapping != mapping)) {
        unlock_page(page);
        put_page(page);
        goto retry_find;
    }
    VM_BUG_ON_PAGE(page->index != offset, page);

    /*
     * We have a locked page in the page cache, now we need to check
     * that it's up-to-date. If not, it is going to be due to an error.
     */
    if (unlikely(!PageUptodate(page)))
        goto page_not_uptodate;

    /*
     * Found the page and have a reference on it.
     * We must recheck i_size under page lock.
     */
    size = round_up(i_size_read(inode), PAGE_CACHE_SIZE);
    if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) {
        unlock_page(page);
        page_cache_release(page);
        printk("filemap_fault retry_find:pid=%lld, tgid=%lld size=%lld, offset=%lld\n",
                      (long long)current->pid, (long long)current->tgid, (long long)size, (long long)offset);

        return VM_FAULT_SIGBUS;
    }

    vmf->page = page;
    return ret | VM_FAULT_LOCKED;

no_cached_page:
    /*
     * We're only likely to ever get here if MADV_RANDOM is in
     * effect.
     */
    error = page_cache_read(file, offset);

    /*
     * The page we want has now been added to the page cache.
     * In the unlikely event that someone removed it in the
     * meantime, we'll just come back here and read it again.
     */
    if (error >= 0)
        goto retry_find;

    /*
     * An error return from page_cache_read can result if the
     * system is low on memory, or a problem occurs while trying
     * to schedule I/O.
     */
    if (error == -ENOMEM)
        return VM_FAULT_OOM;
printk(
"filemap_fault no_cached_page:pid=%lld, tgid=%lld size=%lld, offset=%lld, error=%d\n", (long long)current->pid, (long long)current->tgid, (long long)size, (long long)offset, error); return VM_FAULT_SIGBUS; page_not_uptodate: /* * Umm, take care of errors if the page isn't up-to-date. * Try to re-read it _once_. We do this synchronously, * because there really aren't any performance issues here * and we need to check for errors. */ ClearPageError(page); error = mapping->a_ops->readpage(file, page); if (!error) { wait_on_page_locked(page); if (!PageUptodate(page)) error = -EIO; } page_cache_release(page); if (!error || error == AOP_TRUNCATED_PAGE) goto retry_find; /* Things didn't work out. Return zero to tell the mm layer so. */ shrink_readahead_size_eio(file, ra); printk("filemap_fault page_not_uptodate:pid=%lld, tgid=%lld size=%lld, offset=%lld, error=%d\n", (long long)current->pid, (long long)current->tgid, (long long)size, (long long)offset, error); return VM_FAULT_SIGBUS;

复现问题后,发现每次都是mapping->a_ops->readpage()返回了-4, 而这个函数正是fuse_readpage()函数。

#define EINTR 4 /* Interrupted system call */

static int fuse_readpage(struct file *file, struct page *page)
{
    struct inode *inode = page->mapping->host;
    int err;

    err = -EIO;
    if (is_bad_inode(inode))
        goto out;

    err = fuse_do_readpage(file, page);
    fuse_invalidate_atime(inode);
 out:
    unlock_page(page);
    return err;
}

static int fuse_do_readpage(struct file *file, struct page *page)
{
    struct fuse_io_priv io = { .async = 0, .file = file };
    struct inode *inode = page->mapping->host;
    struct fuse_conn *fc = get_fuse_conn(inode);
    struct fuse_req *req;
    size_t num_read;
    loff_t pos = page_offset(page);
    size_t count = PAGE_CACHE_SIZE;
    u64 attr_ver;
    int err;

    /*
     * Page writeback can extend beyond the lifetime of the
     * page-cache page, so make sure we read a properly synced
     * page.
     */
    fuse_wait_on_page_writeback(inode, page->index);

    req = fuse_get_req(fc, 1);
    if (IS_ERR(req))
        return PTR_ERR(req);

    attr_ver = fuse_get_attr_version(fc);

    req->out.page_zeroing = 1;
    req->out.argpages = 1;
    req->num_pages = 1;
    req->pages[0] = page;
    req->page_descs[0].length = count;
    num_read = fuse_send_read(req, &io, pos, count, NULL);
    err = req->out.h.error;

    if (!err) {
        /*
         * Short read means EOF.  If file size is larger, truncate it
         */
        if (num_read < count)
            fuse_short_read(req, inode, attr_ver);

        SetPageUptodate(page);
    }

    fuse_put_request(fc, req);

    return err;
}

static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
                 loff_t pos, size_t count, fl_owner_t owner)
{
    struct file *file = io->file;
    struct fuse_file *ff = file->private_data;
    struct fuse_conn *fc = ff->fc;

    fuse_read_fill(req, file, pos, count, FUSE_READ);
    if (owner != NULL) {
        struct fuse_read_in *inarg = &req->misc.read.in;

        inarg->read_flags |= FUSE_READ_LOCKOWNER;
        inarg->lock_owner = fuse_lock_owner_id(fc, owner);
    }

    if (io->async)
        return fuse_async_req_send(fc, req, count, io);

    fuse_request_send(fc, req);
    return req->out.args[0].size;
}

void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
{
    req->isreply = 1;
    __fuse_request_send(fc, req);
}

static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
{
    BUG_ON(req->background);
    spin_lock(&fc->lock);
    if (!fc->connected)
        req->out.h.error = -ENOTCONN;
    else if (fc->conn_error)
        req->out.h.error = -ECONNREFUSED;
    else {
        req->in.h.unique = fuse_get_unique(fc);
        queue_request(fc, req);
        /* acquire extra reference, since request is still needed
           after request_end() */
        __fuse_get_request(req);

        request_wait_answer(fc, req);
    }
    spin_unlock(&fc->lock);
}

static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
__releases(fc->lock)
__acquires(fc->lock)
{
    if (!fc->no_interrupt) {
        /* Any signal may interrupt this */
        wait_answer_interruptible(fc, req);

        if (req->aborted)
            goto aborted;
        if (req->state == FUSE_REQ_FINISHED)
            return;

        req->interrupted = 1;
        if (req->state == FUSE_REQ_SENT)
            queue_interrupt(fc, req);
    }

    if (!req->force) {
        sigset_t oldset;

        /* Only fatal signals may interrupt this */
        block_sigs(&oldset);
        wait_answer_interruptible(fc, req);
        restore_sigs(&oldset);

        if (req->aborted)
            goto aborted;
        if (req->state == FUSE_REQ_FINISHED)
            return;

        /* Request is not yet in userspace, bail out */
        if (req->state == FUSE_REQ_PENDING) {
            list_del(&req->list);
            __fuse_put_request(req);
            req->out.h.error = -EINTR;
            return;
        }
    }

    /*
     * Either request is already in userspace, or it was forced.
     * Wait it out.
     */
    spin_unlock(&fc->lock);
    wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
    spin_lock(&fc->lock);

    if (!req->aborted)
        return;

 aborted:
    BUG_ON(req->state != FUSE_REQ_FINISHED);
    if (req->locked) {
        /* This is uninterruptible sleep, because data is
           being copied to/from the buffers of req.  During
           locked state, there mustn't be any filesystem
           operation (e.g. page fault), since that could lead
           to deadlock */
        spin_unlock(&fc->lock);
        wait_event(req->waitq, !req->locked);
        spin_lock(&fc->lock);
    }
}

添加log,最终确认是下面这个wait_answer_interruptible()被打断了:

        block_sigs(&oldset);
        wait_answer_interruptible(fc, req);
        restore_sigs(&oldset);

其中:

static void block_sigs(sigset_t *oldset)
{
    sigset_t mask;

    siginitsetinv(&mask, sigmask(SIGKILL));
    sigprocmask(SIG_BLOCK, &mask, oldset);
}

static void restore_sigs(sigset_t *oldset)
{
    sigprocmask(SIG_SETMASK, oldset, NULL);
}

static void wait_answer_interruptible(struct fuse_conn *fc,
                      struct fuse_req *req)
__releases(fc->lock)
__acquires(fc->lock)
{
    if (signal_pending(current))
        return;

    spin_unlock(&fc->lock);
    wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
    spin_lock(&fc->lock);
}

代码原意是:

1、屏蔽除SIGKILL以外的所有信号

2、如果有pending signal(这里应该指的是SIGKILL)则返回

3、进入interruptible sleep状态

也就是说除了SIGKILL信号,其他信号都不可能打断这个流程。

但打印日志后发现出问题的时候sigset_t中并没有任何信号!

查看代码发现,signal pending并不是检查sigset_t,它只是判断thread_info中的flag:

static inline int signal_pending(struct task_struct *p)
{
    return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}

可能是有个路径设置了TIF_SIGPENDING导致的。

仔细排查日志,发现每次出现问题时,都有如下日志:

[21814.557813] PM: suspend entry 2016-11-17 02:07:33.679008730 UTC
[21814.577530] PM: Syncing filesystems ...
[21814.577533] done.
[21814.608656] Freezing user space processes ...

系统在进入休眠状态,代码如下:

int freeze_processes(void)
{
    ...
    printk("Freezing user space processes ... ");
    pm_freezing = true;
    oom_kills_saved = oom_kills_count();
    error = try_to_freeze_tasks(true);
    ...
    return error;
}

static int try_to_freeze_tasks(bool user_only)
{
    ...
    while (true) {
        todo = 0;
        read_lock(&tasklist_lock);
        for_each_process_thread(g, p) {
            if (p == current || !freeze_task(p))
                continue;

            if (!freezer_should_skip(p))
                todo++;
        }
        ...
    }
    ...
    return todo ? -EBUSY : 0;
}

bool freeze_task(struct task_struct *p)
{
    ...

    if (!(p->flags & PF_KTHREAD))
        fake_signal_wake_up(p);
    else
        wake_up_state(p, TASK_INTERRUPTIBLE);

    spin_unlock_irqrestore(&freezer_lock, flags);
    return true;
}

static void fake_signal_wake_up(struct task_struct *p)
{
    unsigned long flags;

    if (lock_task_sighand(p, &flags)) {
        signal_wake_up(p, 0);
        unlock_task_sighand(p, &flags);
    }
}

static inline void signal_wake_up(struct task_struct *t, bool resume)
{
    signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}

void signal_wake_up_state(struct task_struct *t, unsigned int state)
{
    set_tsk_thread_flag(t, TIF_SIGPENDING);

    if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
        kick_process(t);
}

原来就是被freeze流程给打断了。

所以wait_answer_interruptible()确实可以被freeze打断。

 

【解决方案】

        block_sigs(&oldset);
        wait_answer_interruptible(fc, req);
        restore_sigs(&oldset);

改成

        wait_answer_killable(fc, req);

其中:

@fs/fuse/dev.c
static void wait_answer_killable(struct fuse_conn *fc, struct fuse_req *req)
__releases(fc->lock)
__acquires(fc->lock)
{
        if (fatal_signal_pending(current))
                return;

        spin_unlock(&fc->lock);
        wait_event_killable(req->waitq, req->state == FUSE_REQ_FINISHED);
        spin_lock(&fc->lock);
}

@include/linux/wait.h
#define __wait_event_killable_exclusive(wq, condition, ret)            \
do {                                                                   \
       DEFINE_WAIT(__wait);                                            \
                                                                       \
       for (;;) {                                                      \
               prepare_to_wait_exclusive(&wq, &__wait, TASK_KILLABLE); \
               if (condition)                                          \
                       break;                                          \
               if (!fatal_signal_pending(current)) {                   \
                       schedule();                                     \
                       continue;                                       \
               }                                                       \
               ret = -ERESTARTSYS;                                     \
               break;                                                  \
       }                                                               \
       finish_wait(&wq, &__wait);                                      \
} while (0)


#define wait_event_killable_exclusive(wq, condition)                   \
({                                                                     \
       int __ret = 0;                                                  \
       if (!(condition))                                               \
               __wait_event_killable_exclusive(wq, condition, __ret);  \
       __ret;                                                          \
})
posted @ 2017-05-16 18:38  YYPapa  阅读(1753)  评论(0编辑  收藏  举报