diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2023-11-22 18:17:06 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2025-06-30 17:52:47 -0400 |
commit | 3e35ea055e719892902226fdf304d2bb84345c30 (patch) | |
tree | dbaf3d9b7e2e39ec6d4716cba90d0f05f495fe13 | |
parent | 8730399a8dfd9c3f12c962ead9073f775420951c (diff) |
mm: shrinker: Add new stats for .to_text()
Add a few new shrinker stats.
number of objects requested to free, number of objects freed:
Shrinkers won't necessarily free all objects requested for a variety of
reasons, but if the two counts are wildly different something is likely
amiss.
.scan_objects runtime:
If one shrinker is taking an excessive amount of time to free
objects that will block kswapd from running other shrinkers.
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: linux-mm@kvack.org
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | include/linux/shrinker.h | 6 | ||||
-rw-r--r-- | mm/shrinker.c | 24 |
2 files changed, 29 insertions, 1 deletions
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 6193612617a1..106622ddac77 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -118,6 +118,12 @@ struct shrinker { #endif /* objs pending delete, per node */ atomic_long_t *nr_deferred; + + atomic_long_t objects_requested_to_free; + atomic_long_t objects_freed; + unsigned long last_freed; /* timestamp, in jiffies */ + unsigned long last_scanned; /* timestamp, in jiffies */ + atomic64_t ns_run; }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ diff --git a/mm/shrinker.c b/mm/shrinker.c index 82d2161d6b4b..c56c1f824f79 100644 --- a/mm/shrinker.c +++ b/mm/shrinker.c @@ -412,6 +412,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, trace_mm_shrink_slab_start(shrinker, shrinkctl, nr, freeable, delta, total_scan, priority); + u64 start_time = ktime_get_ns(); /* * Normally, we should not scan less than batch_size objects in one @@ -462,6 +463,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, */ new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl); + unsigned long now = jiffies; + if (freed) { + atomic_long_add(freed, &shrinker->objects_freed); + shrinker->last_freed = now; + } + shrinker->last_scanned = now; + atomic_long_add(scanned, &shrinker->objects_requested_to_free); + + atomic64_add(ktime_get_ns() - start_time, &shrinker->ns_run); + + trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan); return freed; } @@ -814,9 +826,19 @@ EXPORT_SYMBOL_GPL(shrinker_free); void shrinker_to_text(struct seq_buf *out, struct shrinker *shrinker) { struct shrink_control sc = { .gfp_mask = GFP_KERNEL, }; + unsigned long nr_freed = atomic_long_read(&shrinker->objects_freed); seq_buf_puts(out, shrinker->name); - seq_buf_printf(out, " objects: %lu\n", shrinker->count_objects(shrinker, &sc)); + seq_buf_putc(out, '\n'); + + seq_buf_printf(out, "objects: %lu\n", shrinker->count_objects(shrinker, &sc)); + seq_buf_printf(out, "requested to free: %lu\n", atomic_long_read(&shrinker->objects_requested_to_free)); + seq_buf_printf(out, "objects freed: %lu\n", nr_freed); + seq_buf_printf(out, "last scanned: %li sec ago\n", (jiffies - shrinker->last_scanned) / HZ); + seq_buf_printf(out, "last freed: %li sec ago\n", (jiffies - shrinker->last_freed) / HZ); + seq_buf_printf(out, "ns per object freed: %llu\n", nr_freed + ? div64_ul(atomic64_read(&shrinker->ns_run), nr_freed) + : 0); if (shrinker->to_text) { shrinker->to_text(out, shrinker); |