summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-05-30 21:49:51 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2024-05-30 21:49:51 -0400
commit0d63bd45cc2aa8ed98e6cf3f62ac133599fa8177 (patch)
tree2bf3ca698c5fec1b7fa4ea1c912deea4725758e9
parenta7eab32c3580c6f4c051c5a16cad8877307b853f (diff)
verify_obj_has_alloc_tag()memalloc_prof_debug
-rw-r--r--include/linux/alloc_tag.h4
-rw-r--r--include/linux/slab.h6
-rw-r--r--kernel/rcu/tree.c3
-rw-r--r--lib/alloc_tag.c17
-rw-r--r--mm/slub.c26
5 files changed, 55 insertions, 1 deletions
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index abd24016a900..48d587b0a37c 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -127,7 +127,9 @@ static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag
static inline void alloc_tag_sub_check(union codetag_ref *ref)
{
- WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
+ if (ref && !ref->ct)
+ printk(KERN_WARNING "alloc_tag was not set\n");
+ //WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
}
#else
static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7247e217e21b..c5df1534ebf3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -835,6 +835,12 @@ unsigned int kmem_cache_size(struct kmem_cache *s);
*/
size_t kmalloc_size_roundup(size_t size);
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+void verify_slab_obj_has_alloc_tag(void *ptr);
+#else
+static inline void verify_slab_obj_has_alloc_tag(void *ptr) {}
+#endif
+
void __init kmem_cache_init_late(void);
#endif /* _LINUX_SLAB_H */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 28c7031711a3..76323d9621ad 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3758,6 +3758,9 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
struct kfree_rcu_cpu *krcp;
bool success;
+ if (!is_vmalloc_addr(ptr))
+ verify_slab_obj_has_alloc_tag(ptr);
+
/*
* Please note there is a limitation for the head-less
* variant, that is why there is a clear rule for such
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 11ed973ac359..1498cf8e2098 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -16,6 +16,23 @@ EXPORT_SYMBOL(_shared_alloc_tag);
DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+void verify_obj_has_alloc_tag(void *ptr)
+{
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+#if 0
+ if (is_vmalloc_addr(ptr))
+ verify_vmalloc_obj_has_alloc_tag(ptr);
+ else if (folio_test_slab(virt_to_folio(ptr)))
+ verify_slab_obj_has_alloc_tag(ptr);
+ else
+ verify_page_obj_has_alloc_tag(ptr);
+#endif
+}
+#endif
+
struct allocinfo_private {
struct codetag_iterator iter;
bool print_header;
diff --git a/mm/slub.c b/mm/slub.c
index 0809760cf789..3ed15d3cd029 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4494,6 +4494,32 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
return cachep;
}
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+void verify_slab_obj_has_alloc_tag(void *ptr)
+{
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+ if (!ptr)
+ return;
+
+ struct kmem_cache *s = cache_from_obj(s, ptr);
+ struct slab *slab = virt_to_slab(ptr);
+ if (!slab)
+ return;
+
+ struct slabobj_ext *obj_exts = slab_obj_exts(slab);
+ if (!obj_exts)
+ return;
+
+ unsigned idx = obj_to_index(s, slab, ptr);
+ if (idx >= objs_per_slab(s, slab))
+ return;
+
+ alloc_tag_sub_check(&obj_exts[idx].ref);
+}
+#endif
+
/**
* kmem_cache_free - Deallocate an object
* @s: The cache the allocation was from.