summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h63
-rw-r--r--include/linux/types.h1
-rw-r--r--include/linux/vmalloc.h53
-rw-r--r--include/trace/events/bcachefs.h27
4 files changed, 86 insertions, 58 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 557c0411..17fe235e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -7,10 +7,14 @@
#include <linux/kernel.h>
#include <linux/log2.h>
+#include <linux/overflow.h>
#include <linux/page.h>
#include <linux/shrinker.h>
#include <linux/types.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+
#define ARCH_KMALLOC_MINALIGN 16
#define KMALLOC_MAX_SIZE SIZE_MAX
@@ -58,6 +62,16 @@ static inline void *krealloc(void *old, size_t size, gfp_t flags)
return new;
}
+static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return krealloc(p, bytes, flags);
+}
+
#define kzalloc(size, flags) kmalloc(size, flags|__GFP_ZERO)
#define kmalloc_array(n, size, flags) \
((size) != 0 && (n) > SIZE_MAX / (size) \
@@ -174,4 +188,53 @@ static inline struct kmem_cache *kmem_cache_create(size_t obj_size)
#define KMEM_CACHE(_struct, _flags) kmem_cache_create(sizeof(struct _struct))
+#define PAGE_KERNEL 0
+#define PAGE_KERNEL_EXEC 1
+
+#define vfree(p) free(p)
+
+static inline void *__vmalloc(unsigned long size, gfp_t gfp_mask)
+{
+ unsigned i = 0;
+ void *p;
+
+ size = round_up(size, PAGE_SIZE);
+
+ do {
+ run_shrinkers(gfp_mask, i != 0);
+
+ p = aligned_alloc(PAGE_SIZE, size);
+ if (p && gfp_mask & __GFP_ZERO)
+ memset(p, 0, size);
+ } while (!p && i++ < 10);
+
+ return p;
+}
+
+static inline void *vmalloc_exec(unsigned long size, gfp_t gfp_mask)
+{
+ void *p;
+
+ p = __vmalloc(size, gfp_mask);
+ if (!p)
+ return NULL;
+
+ if (mprotect(p, size, PROT_READ|PROT_WRITE|PROT_EXEC)) {
+ vfree(p);
+ return NULL;
+ }
+
+ return p;
+}
+
+static inline void *vmalloc(unsigned long size)
+{
+ return __vmalloc(size, GFP_KERNEL);
+}
+
+static inline void *vzalloc(unsigned long size)
+{
+ return __vmalloc(size, GFP_KERNEL|__GFP_ZERO);
+}
+
#endif /* __TOOLS_LINUX_SLAB_H */
diff --git a/include/linux/types.h b/include/linux/types.h
index 77f96737..7eb2222f 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -6,6 +6,7 @@
#include <stdint.h>
#include <fcntl.h>
+#include <sys/stat.h>
#include <sys/types.h>
#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 965e341d..55fffb59 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -1,59 +1,6 @@
#ifndef __TOOLS_LINUX_VMALLOC_H
#define __TOOLS_LINUX_VMALLOC_H
-#include <stdlib.h>
-#include <sys/mman.h>
-
#include "linux/slab.h"
-#include "tools-util.h"
-
-#define PAGE_KERNEL 0
-#define PAGE_KERNEL_EXEC 1
-
-#define vfree(p) free(p)
-
-static inline void *__vmalloc(unsigned long size, gfp_t gfp_mask)
-{
- unsigned i = 0;
- void *p;
-
- size = round_up(size, PAGE_SIZE);
-
- do {
- run_shrinkers(gfp_mask, i != 0);
-
- p = aligned_alloc(PAGE_SIZE, size);
- if (p && gfp_mask & __GFP_ZERO)
- memset(p, 0, size);
- } while (!p && i++ < 10);
-
- return p;
-}
-
-static inline void *vmalloc_exec(unsigned long size, gfp_t gfp_mask)
-{
- void *p;
-
- p = __vmalloc(size, gfp_mask);
- if (!p)
- return NULL;
-
- if (mprotect(p, size, PROT_READ|PROT_WRITE|PROT_EXEC)) {
- vfree(p);
- return NULL;
- }
-
- return p;
-}
-
-static inline void *vmalloc(unsigned long size)
-{
- return __vmalloc(size, GFP_KERNEL);
-}
-
-static inline void *vzalloc(unsigned long size)
-{
- return __vmalloc(size, GFP_KERNEL|__GFP_ZERO);
-}
#endif /* __TOOLS_LINUX_VMALLOC_H */
diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h
index 08de7e61..ac2aecd4 100644
--- a/include/trace/events/bcachefs.h
+++ b/include/trace/events/bcachefs.h
@@ -493,32 +493,49 @@ DEFINE_EVENT(bucket_alloc, bucket_alloc,
TRACE_EVENT(bucket_alloc_fail,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
- u64 avail, u64 need_journal_commit,
- bool nonblocking),
- TP_ARGS(ca, alloc_reserve, avail, need_journal_commit, nonblocking),
+ u64 avail,
+ u64 seen,
+ u64 open,
+ u64 need_journal_commit,
+ u64 nouse,
+ bool nonblocking,
+ int ret),
+ TP_ARGS(ca, alloc_reserve, avail, seen, open, need_journal_commit, nouse, nonblocking, ret),
TP_STRUCT__entry(
__field(dev_t, dev )
__array(char, reserve, 16 )
__field(u64, avail )
+ __field(u64, seen )
+ __field(u64, open )
__field(u64, need_journal_commit )
+ __field(u64, nouse )
__field(bool, nonblocking )
+ __field(int, ret )
),
TP_fast_assign(
__entry->dev = ca->dev;
strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
__entry->avail = avail;
+ __entry->seen = seen;
+ __entry->open = open;
__entry->need_journal_commit = need_journal_commit;
+ __entry->nouse = nouse;
__entry->nonblocking = nonblocking;
+ __entry->ret = ret;
),
- TP_printk("%d,%d reserve %s avail %llu need_journal_commit %llu nonblocking %u",
+ TP_printk("%d,%d reserve %s avail %llu seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u ret %i",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->reserve,
__entry->avail,
+ __entry->seen,
+ __entry->open,
__entry->need_journal_commit,
- __entry->nonblocking)
+ __entry->nouse,
+ __entry->nonblocking,
+ __entry->ret)
);
DEFINE_EVENT(bucket_alloc, open_bucket_alloc_fail,