user space进程退出时,进程的mmap资源将由kernel进行unmap

user space进程退出时,进程的mmap资源将由kernel进行unmap

user space进程退出时,会调用exit_mmap()将mmap都释放掉,callstack如下:

do_exit
exit_mm
mmput
__mmput
exit_mmap

 

mm/mmap.c

/* Release all mmaps. */
void exit_mmap(struct mm_struct *mm)
{
    struct mmu_gather tlb;
    struct vm_area_struct *vma;
    unsigned long nr_accounted = 0;

    /* mm's last user has gone, and its about to be pulled down */
    mmu_notifier_release(mm);

    if (unlikely(mm_is_oom_victim(mm))) {
        /*
         * Manually reap the mm to free as much memory as possible.
         * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
         * this mm from further consideration.  Taking mm->mmap_lock for
         * write after setting MMF_OOM_SKIP will guarantee that the oom
         * reaper will not run on this mm again after mmap_lock is
         * dropped.
         *
         * Nothing can be holding mm->mmap_lock here and the above call
         * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
         * __oom_reap_task_mm() will not block.
         *
         * This needs to be done before calling munlock_vma_pages_all(),
         * which clears VM_LOCKED, otherwise the oom reaper cannot
         * reliably test it.
         */
        (void)__oom_reap_task_mm(mm);

        set_bit(MMF_OOM_SKIP, &mm->flags);
        mmap_write_lock(mm);
        mmap_write_unlock(mm);
    }

    if (mm->locked_vm)
        unlock_range(mm->mmap, ULONG_MAX);

    arch_exit_mmap(mm);

    vma = mm->mmap;
    if (!vma)    /* Can happen if dup_mmap() received an OOM */
        return;

    lru_add_drain();
    flush_cache_mm(mm);
    tlb_gather_mmu_fullmm(&tlb, mm);
    /* update_hiwater_rss(mm) here? but nobody should be looking */
    /* Use -1 here to ensure all VMAs in the mm are unmapped */
    unmap_vmas(&tlb, vma, 0, -1);
    free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
    tlb_finish_mmu(&tlb);

    /*
     * Walk the list again, actually closing and freeing it,
     * with preemption enabled, without holding any MM locks.
     */
    while (vma) {
        if (vma->vm_flags & VM_ACCOUNT)
            nr_accounted += vma_pages(vma);
        vma = remove_vma(vma);
        cond_resched();
    }
    vm_unacct_memory(nr_accounted);
}

 

调用unmap_vmas(),注意这个函数的end_addr是unsigned long型,所以上面传一个-1的话,在unmap_vmas里就是0xfffffffffffffff:

void unmap_vmas(struct mmu_gather *tlb,
        struct vm_area_struct *vma, unsigned long start_addr,
        unsigned long end_addr)
{
    struct mmu_notifier_range range;

    mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
                start_addr, end_addr);
    mmu_notifier_invalidate_range_start(&range);
    for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
        unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
    mmu_notifier_invalidate_range_end(&range);
}

 

调用unmap_single_vma():

static void unmap_single_vma(struct mmu_gather *tlb,
        struct vm_area_struct *vma, unsigned long start_addr,
        unsigned long end_addr,
        struct zap_details *details)
{
    unsigned long start = max(vma->vm_start, start_addr);
    unsigned long end;

    if (start >= vma->vm_end)
        return;
    end = min(vma->vm_end, end_addr);
    if (end <= vma->vm_start)
        return;

    if (vma->vm_file)
        uprobe_munmap(vma, start, end);

    if (unlikely(vma->vm_flags & VM_PFNMAP))
        untrack_pfn(vma, 0, 0);

    if (start != end) {
        if (unlikely(is_vm_hugetlb_page(vma))) {
            /*
             * It is undesirable to test vma->vm_file as it
             * should be non-null for valid hugetlb area.
             * However, vm_file will be NULL in the error
             * cleanup path of mmap_region. When
             * hugetlbfs ->mmap method fails,
             * mmap_region() nullifies vma->vm_file
             * before calling this function to clean up.
             * Since no pte has actually been setup, it is
             * safe to do nothing in this case.
             */
            if (vma->vm_file) {
                i_mmap_lock_write(vma->vm_file->f_mapping);
                __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
                i_mmap_unlock_write(vma->vm_file->f_mapping);
            }
        } else
            unmap_page_range(tlb, vma, start, end, details);
    }
}

 

调用unmap_page_range() unmap一个vma。

 

posted @ 2021-11-02 14:57  aspirs  阅读(369)  评论(0编辑  收藏  举报