__virt_to_phys与__phys_to_virt(AArch64)

__virt_to_phys与__phys_to_virt
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
    struct page *page;

    /*
     * __get_free_pages() returns a 32-bit address, which cannot represent
     * a highmem page
     */
    VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);

    page = alloc_pages(gfp_mask, order);
    if (!page)
        return 0;
    return (unsigned long) page_address(page);
}

 

kernel/4.9\include\linux\Mm.h
#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address)  do { } while(0)
#define page_address_init()  do { } while(0)
#endif

 

static __always_inline void *lowmem_page_address(const struct page *page)
{
    return page_to_virt(page);
}

 

kernel\4.9\include\linux\Mm.h
#ifndef page_to_virt
#define page_to_virt(x)    __va(PFN_PHYS(page_to_pfn(x)))
#endif

  

kernel\4.9\include\asm-generic\Memory_model.h
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
/* memmap is virtually contiguous.  */
#define __pfn_to_page(pfn)    (vmemmap + (pfn)) //根据这两个macro,可以看出pfn实际就是page的index
#define __page_to_pfn(page)    (unsigned long)((page) - vmemmap)

 

#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)

kernel\4.9\arch\arm64\include\asm\Memory.h

#ifndef __phys_to_virt
#define __phys_to_virt(x)    ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
#endif
#define __va(x)            ((void *)__phys_to_virt((phys_addr_t)(x)))

 

virt_to_page

kernel\4.9\arch\arm64\include\asm\Memory.h

#define __virt_to_phys(x) ({                        \
    phys_addr_t __x = (phys_addr_t)(x);                \
    __x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET :    \
                 (__x - kimage_voffset); })

#define __pa(x)            __virt_to_phys((unsigned long)(x))

#define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)

 

如果VA_BITS = 39

#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
(UL(1) << (VA_BITS - 1)) + 1)
==>
PAGE_OFFSET = 0xFFFFFFC000000000

 

上述__virt_to_phys macro define里BIT(VA_BITS - 1)即BIT(38),等于0x4000000000,和0xFFFFFFC000000000相与,结果是1,所以取(__x & ~PAGE_OFFSET) + PHYS_OFFSET,和__phys_to_virt里的操作相反

 

vmemmap=(struct page *)ffffffbdbf000000 //MACRO, can't get from KE minidump, can dump to log

example:
pfn=fa193=>page=(struct page *)ffffffbdbf000000+fa193=(struct page *)(ffffffbdbf000000+fa193*64)=(struct page *)ffffffbdc2e864c0
page=ffffffbdc2e864c0=>pfn=(unsigned long)((struct page *)ffffffbdc2e864c0-(struct page *)ffffffbdbf000000)=(unsigned long)((ffffffbdc2e864c0-ffffffbdbf000000)/64)=fa193

64 is sizeof(struct page)

 

PTE 2 PFN/PFN 2 PTE

#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)

#define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))

 

__pa/__va macros

转va,看下__va的实现,它的实现就是先减去PHYS_OFFSET,再加上PAGE_OFFSET。PHYS_OFFSET就是系统DRAM的start address

#define __va(x)            ((void *)__phys_to_virt((phys_addr_t)(x)))
#define __phys_to_virt(x)    ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)

 

看下__pa的实现,它根据__is_lm_address()先判断这个虚拟地址是否为线性地址,如果是线性地址,则和~PAGE_OFFSET相与,再加上PHYS_OFFSET;否则就是根据__kimg_to_phys得出物理地址。

判断是否为线性地址,是判断虚拟地址的BIT(VA_BITS-1)是否为1,如果为1,则是线性地址;否则不是。以VA_BITS为39为例,则是判断BIT(38)是否为1,即0xFFFFFFFXXXXXXXXX中红色F中的bit2是否为1

可以看到如果虚拟地址为线性地址,__pa/__va转化的过程恰好相反

#define __lm_to_phys(addr)    (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)

 

#define __is_lm_address(addr)    (!!((addr) & BIT(VA_BITS - 1)))

 

#define __pa(x)            __virt_to_phys((unsigned long)(x))
#define __virt_to_phys(x)    __virt_to_phys_nodebug(x)
#define __virt_to_phys_nodebug(x) ({                    \
    phys_addr_t __x = (phys_addr_t)(x);                \
    __is_lm_address(__x) ? __lm_to_phys(__x) :            \
                   __kimg_to_phys(__x);            \
})

 

posted @ 2020-11-01 18:47  aspirs  阅读(1200)  评论(0编辑  收藏  举报