summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2010-08-02 20:42:00 +0200
committerAndrea Arcangeli <aarcange@redhat.com>2010-08-02 18:43:23 +0000
commit5278156dba5d7d3b284f656faf1f6760b315bb54 (patch)
tree790012e083482a5b3abb03a16b90f0d9d1bc2ead
parent65ff11dc45394cd1558c3881a9770c5e32109259 (diff)
scale nr_rotated to balance memory pressureTHP-29
Make sure we scale up nr_rotated when we encounter a referenced transparent huge page. This ensures pageout scanning balance is not distorted when there are huge pages on the LRU. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
-rw-r--r--mm/vmscan.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5fb7bbc40e0e..dcd519687664 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1220,7 +1220,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
add_page_to_lru_list(zone, page, lru);
if (is_active_lru(lru)) {
int file = is_file_lru(lru);
- reclaim_stat->recent_rotated[file]++;
+ int numpages = hpage_nr_pages(page);
+ reclaim_stat->recent_rotated[file] += numpages;
}
if (!pagevec_add(&pvec, page)) {
spin_unlock_irq(&zone->lru_lock);
@@ -1357,7 +1358,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
}
if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
- nr_rotated++;
+ nr_rotated += hpage_nr_pages(page);
/*
* Identify referenced, file-backed active pages and
* give them one more trip around the active list. So