summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-03-02 13:27:50 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2024-03-07 23:18:26 -0500
commit77fcd88de2ae1309c16e4fc35fb90cbc60c3d2ec (patch)
treef8ba44500c7d06eed1f3c4794759b4226d6a7c09
parent1054c0f498fcb57de4858313d974aee23daf7376 (diff)
mm/slub: Make __ksize() fastervmalloc_size
with slab gone, we now have a free u32 in struct slab. This steals it to make __ksize() faster; it's now a single dependent load, instead of two. This is going to be important for tracking the amount of memory stranded by RCU, which we want to be able to do if we're going to be freeing all pagecache folios (and perhaps all folios) via RCU. Cc: linux-mm@kvack.org Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slab_common.c9
-rw-r--r--mm/slub.c2
3 files changed, 7 insertions, 6 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 54deeb0428c6..64f06431cc97 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -84,7 +84,7 @@ struct slab {
};
struct rcu_head rcu_head;
};
- unsigned int __unused;
+ unsigned int object_size;
atomic_t __page_refcount;
#ifdef CONFIG_MEMCG
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 6ec0f6543f34..f209b8cf4965 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -963,13 +963,12 @@ size_t __ksize(const void *object)
if (WARN_ON(object != folio_address(folio)))
return 0;
return folio_size(folio);
- }
-
+ } else {
#ifdef CONFIG_SLUB_DEBUG
- skip_orig_size_check(folio_slab(folio)->slab_cache, object);
+ skip_orig_size_check(folio_slab(folio)->slab_cache, object);
#endif
-
- return slab_ksize(folio_slab(folio)->slab_cache);
+ return folio_slab(folio)->object_size;
+ }
}
/**
diff --git a/mm/slub.c b/mm/slub.c
index 2ef88bbf56a3..45e95133653e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2366,6 +2366,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
}
slab->objects = oo_objects(oo);
+ slab->object_size = slab_ksize(s);
slab->inuse = 0;
slab->frozen = 0;
@@ -2414,6 +2415,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
int order = folio_order(folio);
int pages = 1 << order;
+ slab->object_size = -1; /* page->_mapcount */
__slab_clear_pfmemalloc(slab);
folio->mapping = NULL;
/* Make the mapping reset visible before clearing the flag */