mm: make shrink_page_list with pages work from multiple zones
Shrink_page_list expects all pages come from a same zone
but it's too limited to use.
This patch removes the dependency so next patch can use
shrink_page_list with pages from multiple zones.
Change-Id: I34469b7f0a79f2b79e30e40033ba8b3e1dd5f2d0
Signed-off-by: Minchan Kim <minchan@kernel.org>
Patch-mainline: linux-mm @ 9 May 2013 16:21:25
[vinmenon@codeaurora.org: changes for node based lrus.
shrink_page_list expects all pages come from same node]
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2628140..1778828 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -977,6 +977,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep;
VM_BUG_ON_PAGE(PageActive(page), page);
+ if (pgdat)
+ VM_BUG_ON_PAGE(page_pgdat(page) != pgdat, page);
sc->nr_scanned++;
@@ -1055,7 +1057,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
/* Case 1 above */
if (current_is_kswapd() &&
PageReclaim(page) &&
- test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
+ (pgdat && test_bit(PGDAT_WRITEBACK, &pgdat->flags))) {
nr_immediate++;
goto keep_locked;
@@ -1151,7 +1153,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
if (page_is_file_cache(page) &&
(!current_is_kswapd() ||
- !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
+ (pgdat &&
+ !test_bit(PGDAT_DIRTY, &pgdat->flags)))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()