内存回收

内核主要有4种LRU链表:

enum lru_list {

LRU_INACTIVE_ANON = LRU_BASE,

LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,

LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,

LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,

LRU_UNEVICTABLE,

NR_LRU_LISTS

};

inactive_anon - # anonymous and swap cache memory on inactive LRU list.

active_anon - #anonymous and swap cache memory on active LRU list.

inactive_file - # file-backed memory on inactive LRU list.

active_file - # file-backed memory on active LRU list.

内核回收内存时,会在get_scan_out中计算每个链表中回收的page数量:

 

/*

 * Determine how aggressively the anon and file LRU lists should be

 * scanned.  The relative value of each set of LRU lists is determined

 * by looking at the fraction of the pages scanned we did rotate back

 * onto the active list instead of evict.

 *

 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan

 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan

 */

static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,

  unsigned long *nr)

** (1)如果没有swap space,就不会回收anonymous page: **

/* If we have no swap space, do not bother scanning anon pages. */

if (!sc->may_swap || (get_nr_swap_pages() <= 0)) {

scan_balance = SCAN_FILE;

goto out;

}

** (2)如果是group回收,且memory.swappiness==0,则回收file cache: **

/*

* Global reclaim will swap to prevent OOM even with no

* swappiness, but memcg users want to use this knob to

* disable swapping for individual groups completely when

* using the memory controller's swap limit feature would be

* too expensive.

*/

if (!global_reclaim(sc) && !vmscan_swappiness(sc)) {

scan_balance = SCAN_FILE; ///mem cgroup swappiness == 0

goto out;

}

** (3)系统(group)内存已经不足,且swappiness!=0,则anon page和file cache都可以回收: **

/*

* Do not apply any pressure balancing cleverness when the

* system is close to OOM, scan both anon and file equally

* (unless the swappiness setting disagrees with swapping).

*/

if (!sc->priority && vmscan_swappiness(sc)) {

scan_balance = SCAN_EQUAL;

goto out;

}

** (4)如果空闲页(free)+file cache小于high water mark,则回收anon page(注:仅对全局回收): **

anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +

get_lru_size(lruvec, LRU_INACTIVE_ANON);

file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +

get_lru_size(lruvec, LRU_INACTIVE_FILE);

 

/*

* If it's foreseeable that reclaiming the file cache won't be

* enough to get the zone back into a desirable shape, we have

* to swap.  Better start now and leave the - probably heavily

* thrashing - remaining file pages alone.

*/

if (global_reclaim(sc)) {

free = zone_page_state(zone, NR_FREE_PAGES);

if (unlikely(file + free <= high_wmark_pages(zone))) {

scan_balance = SCAN_ANON;

goto out;

}///file cache isn't enough, add ANON

}

** (5)inactive file cache足够,则回收file cache: **

/*

* There is enough inactive page cache, do not reclaim

* anything from the anonymous working set right now.

*/

if (!inactive_file_is_low(lruvec)) {

scan_balance = SCAN_FILE;

goto out;

}

** (6)其它情况,则根据swappiness计算回收anon page和file cache page的数量: **

scan_balance = SCAN_FRACT;

/*

* With swappiness at 100, anonymous and file have the same priority.

* This scanning priority is essentially the inverse of IO cost.

*/

anon_prio = vmscan_swappiness(sc);

file_prio = 200 - anon_prio;

posted @ 2016-08-02 09:41  yuyue2014  阅读(667)  评论(0编辑  收藏  举报