summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/oom_kill.c23
-rw-r--r--mm/show_mem.c14
-rw-r--r--mm/slab.h6
-rw-r--r--mm/slab_common.c53
4 files changed, 63 insertions, 33 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 832fb330376e..659c7d637687 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -171,27 +171,6 @@ static bool oom_unkillable_task(struct task_struct *p)
return false;
}
-/*
- * Check whether unreclaimable slab amount is greater than
- * all user memory(LRU pages).
- * dump_unreclaimable_slab() could help in the case that
- * oom due to too much unreclaimable slab used by kernel.
-*/
-static bool should_dump_unreclaim_slab(void)
-{
- unsigned long nr_lru;
-
- nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
- global_node_page_state(NR_INACTIVE_ANON) +
- global_node_page_state(NR_ACTIVE_FILE) +
- global_node_page_state(NR_INACTIVE_FILE) +
- global_node_page_state(NR_ISOLATED_ANON) +
- global_node_page_state(NR_ISOLATED_FILE) +
- global_node_page_state(NR_UNEVICTABLE);
-
- return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B) > nr_lru);
-}
-
/**
* oom_badness - heuristic function to determine which candidate task to kill
* @p: task struct of which task we should calculate
@@ -465,8 +444,6 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
mem_cgroup_print_oom_meminfo(oc->memcg);
else {
show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
- if (should_dump_unreclaim_slab())
- dump_unreclaimable_slab();
}
if (sysctl_oom_dump_tasks)
dump_tasks(oc);
diff --git a/mm/show_mem.c b/mm/show_mem.c
index 1c26c14ffbb9..24b662f64d40 100644
--- a/mm/show_mem.c
+++ b/mm/show_mem.c
@@ -7,11 +7,15 @@
#include <linux/mm.h>
#include <linux/cma.h>
+#include <linux/printbuf.h>
+
+#include "slab.h"
void show_mem(unsigned int filter, nodemask_t *nodemask)
{
pg_data_t *pgdat;
unsigned long total = 0, reserved = 0, highmem = 0;
+ struct printbuf buf = PRINTBUF;
printk("Mem-Info:\n");
show_free_areas(filter, nodemask);
@@ -41,4 +45,14 @@ void show_mem(unsigned int filter, nodemask_t *nodemask)
#ifdef CONFIG_MEMORY_FAILURE
printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
#endif
+
+ pr_info("Unreclaimable slab info:\n");
+ dump_unreclaimable_slab(&buf);
+ printk("%s", printbuf_str(&buf));
+ printbuf_reset(&buf);
+
+ printk("Shrinkers:\n");
+ shrinkers_to_text(&buf);
+ printk("%s", printbuf_str(&buf));
+ printbuf_exit(&buf);
}
diff --git a/mm/slab.h b/mm/slab.h
index c7f2abc2b154..abefbf767497 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -788,10 +788,12 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
#endif
+struct printbuf;
+
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
-void dump_unreclaimable_slab(void);
+void dump_unreclaimable_slab(struct printbuf *);
#else
-static inline void dump_unreclaimable_slab(void)
+static inline void dump_unreclaimable_slab(struct printbuf *out)
{
}
#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 23f2ab0713b7..120948079794 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -24,6 +24,7 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <linux/memcontrol.h>
+#include <linux/printbuf.h>
#define CREATE_TRACE_POINTS
#include <trace/events/kmem.h>
@@ -1084,10 +1085,15 @@ static int slab_show(struct seq_file *m, void *p)
return 0;
}
-void dump_unreclaimable_slab(void)
+void dump_unreclaimable_slab(struct printbuf *out)
{
struct kmem_cache *s;
struct slabinfo sinfo;
+ struct slab_by_mem {
+ struct kmem_cache *s;
+ size_t total, active;
+ } slabs_by_mem[10], n;
+ int i, nr = 0;
/*
* Here acquiring slab_mutex is risky since we don't prefer to get
@@ -1097,12 +1103,11 @@ void dump_unreclaimable_slab(void)
* without acquiring the mutex.
*/
if (!mutex_trylock(&slab_mutex)) {
- pr_warn("excessive unreclaimable slab but cannot dump stats\n");
+ pr_buf(out, "excessive unreclaimable slab but cannot dump stats\n");
return;
}
- pr_info("Unreclaimable slab info:\n");
- pr_info("Name Used Total\n");
+ printbuf_atomic_inc(out);
list_for_each_entry(s, &slab_caches, list) {
if (s->flags & SLAB_RECLAIM_ACCOUNT)
@@ -1110,11 +1115,43 @@ void dump_unreclaimable_slab(void)
get_slabinfo(s, &sinfo);
- if (sinfo.num_objs > 0)
- pr_info("%-17s %10luKB %10luKB\n", s->name,
- (sinfo.active_objs * s->size) / 1024,
- (sinfo.num_objs * s->size) / 1024);
+ if (!sinfo.num_objs)
+ continue;
+
+ n.s = s;
+ n.total = sinfo.num_objs * s->size;
+ n.active = sinfo.active_objs * s->size;
+
+ for (i = 0; i < nr; i++)
+ if (n.total < slabs_by_mem[i].total)
+ break;
+
+ if (nr < ARRAY_SIZE(slabs_by_mem)) {
+ memmove(&slabs_by_mem[i + 1],
+ &slabs_by_mem[i],
+ sizeof(slabs_by_mem[0]) * (nr - i));
+ nr++;
+ } else if (i) {
+ i--;
+ memmove(&slabs_by_mem[0],
+ &slabs_by_mem[1],
+ sizeof(slabs_by_mem[0]) * i);
+ } else {
+ continue;
+ }
+
+ slabs_by_mem[i] = n;
+ }
+
+ for (i = nr - 1; i >= 0; --i) {
+ pr_buf(out, "%-17s total: ", slabs_by_mem[i].s->name);
+ pr_human_readable_u64(out, slabs_by_mem[i].total);
+ pr_buf(out, " active: ");
+ pr_human_readable_u64(out, slabs_by_mem[i].active);
+ pr_newline(out);
}
+
+ printbuf_atomic_dec(out);
mutex_unlock(&slab_mutex);
}