From ef2a5153b4d2c48c05b9280491cb5592a46df385 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 14 Apr 2015 15:44:22 -0700 Subject: mm/migrate: mark unmap_and_move() "noinline" to avoid ICE in gcc 4.7.3 With gcc version 4.7.3 (Ubuntu/Linaro 4.7.3-12ubuntu1) : mm/migrate.c: In function `migrate_pages': mm/migrate.c:1148:1: internal compiler error: in push_minipool_fix, at config/arm/arm.c:13500 Please submit a full bug report, with preprocessed source if appropriate. See for instructions. Preprocessed source stored into /tmp/ccPoM1tr.out file, please attach this to your bugreport. make[1]: *** [mm/migrate.o] Error 1 make: *** [mm/migrate.o] Error 2 Mark unmap_and_move() (which is used in a single place only) "noinline" to work around this compiler bug. [akpm@linux-foundation.org: make it conditional on gcc-4.7.3 and arm] [khilman@kernel.org: fine-tune compiler versions] [akpm@linux-foundation.org: fix comment] Signed-off-by: Geert Uytterhoeven Reported-by: Kevin Hilman Cc: Marc Zyngier Tested-by: Kevin Hilman Tested-by: Lina Iyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'mm/migrate.c') diff --git a/mm/migrate.c b/mm/migrate.c index 85e042686031..ec1802d85f05 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -900,13 +900,24 @@ out: return rc; } +/* + * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work + * around it. + */ +#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM) +#define ICE_noinline noinline +#else +#define ICE_noinline +#endif + /* * Obtain the lock on page, remove all ptes and migrate the page * to the newly allocated page in newpage. */ -static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, - unsigned long private, struct page *page, int force, - enum migrate_mode mode) +static ICE_noinline int unmap_and_move(new_page_t get_new_page, + free_page_t put_new_page, + unsigned long private, struct page *page, + int force, enum migrate_mode mode) { int rc = 0; int *result = NULL; -- cgit v1.2.3 From 2a8e70026435ad97570a1e0a0c4c941e0f700a3e Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 14 Apr 2015 15:48:15 -0700 Subject: mm: numa: remove migrate_ratelimited This code is dead since commit 9e645ab6d089 ("sched/numa: Continue PTE scanning even if migrate rate limited") so remove it. Signed-off-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 5 ----- mm/migrate.c | 20 -------------------- 2 files changed, 25 deletions(-) (limited to 'mm/migrate.c') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 78baed5f2952..cac1c0904d5f 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -69,7 +69,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); -extern bool migrate_ratelimited(int node); #else static inline bool pmd_trans_migrating(pmd_t pmd) { @@ -80,10 +79,6 @@ static inline int migrate_misplaced_page(struct page *page, { return -EAGAIN; /* can't migrate now */ } -static inline bool migrate_ratelimited(int node) -{ - return false; -} #endif /* CONFIG_NUMA_BALANCING */ #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) diff --git a/mm/migrate.c b/mm/migrate.c index ec1802d85f05..a65ff72ab739 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1565,30 +1565,10 @@ static struct page *alloc_misplaced_dst_page(struct page *page, * page migration rate limiting control. * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs * window of time. Default here says do not migrate more than 1280M per second. - * If a node is rate-limited then PTE NUMA updates are also rate-limited. However - * as it is faults that reset the window, pte updates will happen unconditionally - * if there has not been a fault since @pteupdate_interval_millisecs after the - * throttle window closed. */ static unsigned int migrate_interval_millisecs __read_mostly = 100; -static unsigned int pteupdate_interval_millisecs __read_mostly = 1000; static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT); -/* Returns true if NUMA migration is currently rate limited */ -bool migrate_ratelimited(int node) -{ - pg_data_t *pgdat = NODE_DATA(node); - - if (time_after(jiffies, pgdat->numabalancing_migrate_next_window + - msecs_to_jiffies(pteupdate_interval_millisecs))) - return false; - - if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages) - return false; - - return true; -} - /* Returns true if the node is migrate rate-limited after the update */ static bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages) -- cgit v1.2.3 From b3b3a99c5371e2e96a2c680e6ac20218bddbd422 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 15 Apr 2015 16:13:15 -0700 Subject: mm/migrate: check-before-clear PageSwapCache With the page flag sanitization patchset, an invalid usage of ClearPageSwapCache() is detected in migration_page_copy(). migrate_page_copy() is shared by both normal and hugepage (both thp and hugetlb) code path, so let's check PageSwapCache() and clear it if it's set to avoid misuse of the invalid clear operation. Signed-off-by: Naoya Horiguchi Acked-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm/migrate.c') diff --git a/mm/migrate.c b/mm/migrate.c index a65ff72ab739..f53838fe3dfe 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -537,7 +537,8 @@ void migrate_page_copy(struct page *newpage, struct page *page) * Please do not reorder this without considering how mm/ksm.c's * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). */ - ClearPageSwapCache(page); + if (PageSwapCache(page)) + ClearPageSwapCache(page); ClearPagePrivate(page); set_page_private(page, 0); -- cgit v1.2.3