From 77fcd88de2ae1309c16e4fc35fb90cbc60c3d2ec Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 2 Mar 2024 13:27:50 -0500 Subject: mm/slub: Make __ksize() faster with slab gone, we now have a free u32 in struct slab. This steals it to make __ksize() faster; it's now a single dependent load, instead of two. This is going to be important for tracking the amount of memory stranded by RCU, which we want to be able to do if we're going to be freeing all pagecache folios (and perhaps all folios) via RCU. Cc: linux-mm@kvack.org Cc: Vlastimil Babka Cc: Andrew Morton Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Roman Gushchin Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Matthew Wilcox (Oracle) Signed-off-by: Kent Overstreet --- mm/slab.h | 2 +- mm/slab_common.c | 9 ++++----- mm/slub.c | 2 ++ 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/slab.h b/mm/slab.h index 54deeb0428c6..64f06431cc97 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -84,7 +84,7 @@ struct slab { }; struct rcu_head rcu_head; }; - unsigned int __unused; + unsigned int object_size; atomic_t __page_refcount; #ifdef CONFIG_MEMCG diff --git a/mm/slab_common.c b/mm/slab_common.c index 6ec0f6543f34..f209b8cf4965 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -963,13 +963,12 @@ size_t __ksize(const void *object) if (WARN_ON(object != folio_address(folio))) return 0; return folio_size(folio); - } - + } else { #ifdef CONFIG_SLUB_DEBUG - skip_orig_size_check(folio_slab(folio)->slab_cache, object); + skip_orig_size_check(folio_slab(folio)->slab_cache, object); #endif - - return slab_ksize(folio_slab(folio)->slab_cache); + return folio_slab(folio)->object_size; + } } /** diff --git a/mm/slub.c b/mm/slub.c index 2ef88bbf56a3..45e95133653e 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2366,6 +2366,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) } slab->objects = oo_objects(oo); + slab->object_size = slab_ksize(s); slab->inuse = 0; slab->frozen = 0; @@ -2414,6 +2415,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab) int order = folio_order(folio); int pages = 1 << order; + slab->object_size = -1; /* page->_mapcount */ __slab_clear_pfmemalloc(slab); folio->mapping = NULL; /* Make the mapping reset visible before clearing the flag */ -- cgit v1.2.3