MMU

page : logic virtual page

page frame : physical memory page ,

struct page 
{
  unsigned long flags : 
  atomic_t count ;
  struct list_head list;
  struct address_space *mapping;
  unsigned long index;
  union
  {
    struct pte_chain *chain;
   pte_addr_t direct;
  }
  unsigned long private;
  #if defined WANT_PAGE_VIRTUAL 
  void *virtual;
  #endif
}

here we have the definition of memory zone :

struct zone 
{
    spinlock_t lock; //for read and write synchronization 
    unsigned long free_pages; //the left blank pages
    unsigned long pages_min , pages_low , pages_high ; //threshold , for kernel judging current usage situation 
    
    spinlock_t lru_lock;
    struct list_head active_list;
    struct list_head inactive_list;
    atomic_t refill_counter;
    unsigned long nr_active , nr_inactive;
    int all_unreclaimable;
    unsigned long pages_scanned;
    
    int temp_priority;
    int prev_priority;
    
    struct free_area free_area[MAX_ORDER];
    wait_queue_head_t *wait_table;
    unsigned long wait_table_size;
    unsigned long wait_table_bits;
    
    

};

struct kmem_cache_s 
{
    struct kmem_list3 lists; //partial , complete , empty list
    unsigned int objsize; //object size in the cache 
    unsigned int flag; 
    unsigned int num; //num of object 
    
    unsigned int gfporder; //order of cache in page
    unsigned int gfpflags; //flag of cache in page
    
    size_t color; 
    unsigned int color_off;
    unsigned int color_next;
    kmem_cache_t *slabp_cache; //
    unsigned int dflags;
    
    void (*ctor)(void *,kmem_cache_t *,unsigned long);
    void (*dtor)(void *,kmem_cache_t *,unsigned long);
    
    const char *name; //name of object
    struct list_head next;
};

struct mm_struct 
{
    struct vm_area_struct *mmap;
    struct rb_root mm_rb;
    struct vm_area_struct *mmap_cache;
    unsigned long free_area_cache;
    pgd_t *pgd;
    atomic_t mm_users;
    atomic_t mm_count;
    int map_count;
    struct rw_semaphore mmap_sem;
    spinloct_t page_table_lock;
    struct list_head mmlist;
    unsigned long start_code , end_code , start_data , end_data;
    unsigned long start_brk , brk , start_stack;  //we call heap as brick, namely brk in kernel 
    unsigned long arg_start , arg_end , env_start , env_end;
    unsigned long rss , total_vm , locked_vm;
    unsigned long def_flags;
    cpumask_t cpu_vm_mask;
    unsigned long swap_address;
    
};

 

 

struct vm_area_struct 
{
    struct mm_struct *vm_mm; 
    unsigned long vm_start, vm_end;
    
    struct vm_area_struct *vm_next;
    unsigned long vm_flags;
    struct rb_node vm_rb;
    struct vm_operations_struct *vm_ops;

};

 

when application call for malloc() to expand current heap space , kernel will use sys_brk() to move brk to new place .

 

3 level paging mechanical here:

PGD : page global diretory

PMD : page middle directory

PTE : page table entry

 

posted @ 2014-10-27 21:37  dragen1860  阅读(256)  评论(0编辑  收藏  举报