summaryrefslogtreecommitdiff
path: root/include/linux/slab.h
blob: a36f6f4369b6ce3a6cac57d71d13bf730805e720 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
#ifndef __TOOLS_LINUX_SLAB_H
#define __TOOLS_LINUX_SLAB_H

#include <malloc.h>
#include <stdlib.h>
#include <string.h>

#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/overflow.h>
#include <linux/page.h>
#include <linux/shrinker.h>
#include <linux/types.h>

#include <stdlib.h>
#include <sys/mman.h>

#define alloc_hooks(_do, ...)		_do

#define ARCH_KMALLOC_MINALIGN		16
#define KMALLOC_MAX_SIZE		SIZE_MAX

static inline void *kmalloc_noprof(size_t size, gfp_t flags)
{
	unsigned i;
	void *p;

	for (i = 0; i < 10; i++) {
		if (size) {
			size_t alignment = min(rounddown_pow_of_two(size), (size_t)PAGE_SIZE);
			alignment = max(sizeof(void *), alignment);
			if (posix_memalign(&p, alignment, size))
				p = NULL;
		} else {
			p = malloc(0);
		}

		if (p) {
			if (flags & __GFP_ZERO)
				memset(p, 0, size);
			break;
		}

		run_shrinkers(flags, true);
	}

	return p;
}
#define kmalloc		kmalloc_noprof

static inline void *krealloc(void *old, size_t size, gfp_t flags)
{
	void *new;

	new = kmalloc(size, flags);
	if (!new)
		return NULL;

	if (flags & __GFP_ZERO)
		memset(new, 0, size);

	if (old) {
		memcpy(new, old,
		       min(malloc_usable_size(old),
			   malloc_usable_size(new)));
		free(old);
	}

	return new;
}

static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
{
	size_t bytes;

	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
		return NULL;

	return krealloc(p, bytes, flags);
}

#define kzalloc(size, flags)		kmalloc(size, flags|__GFP_ZERO)
#define kmalloc_array(n, size, flags)					\
	((size) != 0 && (n) > SIZE_MAX / (size)				\
	 ? NULL : kmalloc((n) * (size), flags))

#define kvmalloc_array(n, size, flags)					\
	((size) != 0 && (n) > SIZE_MAX / (size)				\
	 ? NULL : kmalloc((n) * (size), flags))

#define kcalloc(n, size, flags)		kmalloc_array(n, size, flags|__GFP_ZERO)

#define kfree(p)			free(p)
#define kzfree(p)			free(p)

#define kvmalloc(size, flags)		kmalloc(size, flags)
#define kvzalloc(size, flags)		kzalloc(size, flags)
#define kvfree(p)			kfree(p)

static inline struct page *alloc_pages_noprof(gfp_t flags, unsigned int order)
{
	size_t size = PAGE_SIZE << order;
	unsigned i;
	void *p;

	for (i = 0; i < 10; i++) {
		p = aligned_alloc(PAGE_SIZE, size);

		if (p) {
			if (flags & __GFP_ZERO)
				memset(p, 0, size);
			break;
		}

		run_shrinkers(flags, true);
	}

	return p;
}
#define alloc_pages			alloc_pages_noprof

#define alloc_page(gfp)			alloc_pages(gfp, 0)

#define _get_free_pages(gfp, order)	((unsigned long) alloc_pages(gfp, order))
#define __get_free_pages(gfp, order)	((unsigned long) alloc_pages(gfp, order))
#define get_free_pages_noprof(gfp, order)				\
					((unsigned long) alloc_pages(gfp, order))
#define __get_free_page(gfp)		__get_free_pages(gfp, 0)

#define __free_pages(page, order)			\
do {							\
	(void) order;					\
	free(page);					\
} while (0)

#define free_pages(addr, order)				\
do {							\
	(void) order;					\
	free((void *) (addr));				\
} while (0)

#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)

#define VM_IOREMAP		0x00000001	/* ioremap() and friends */
#define VM_ALLOC		0x00000002	/* vmalloc() */
#define VM_MAP			0x00000004	/* vmap()ed pages */
#define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
#define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
#define VM_NO_GUARD		0x00000040      /* don't add guard page */
#define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */

static inline void vunmap(const void *addr) {}

static inline void *vmap(struct page **pages, unsigned int count,
			 unsigned long flags, unsigned prot)
{
	return NULL;
}

#define is_vmalloc_addr(page)		0

#define vmalloc_to_page(addr)		((struct page *) (addr))

static inline void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
	void *p;

	p = kmalloc(len, gfp);
	if (p)
		memcpy(p, src, len);
	return p;
}

struct kmem_cache {
	size_t		    obj_size;
};

static inline void *kmem_cache_alloc(struct kmem_cache *c, gfp_t gfp)
{
	return kmalloc(c->obj_size, gfp);
}

static inline void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t gfp)
{
	return kzalloc(c->obj_size, gfp);
}

static inline void kmem_cache_free(struct kmem_cache *c, void *p)
{
	kfree(p);
}

static inline void kmem_cache_destroy(struct kmem_cache *p)
{
	kfree(p);
}

static inline struct kmem_cache *kmem_cache_create(size_t obj_size)
{
	struct kmem_cache *p = kmalloc(sizeof(*p), GFP_KERNEL);
	if (!p)
		return NULL;

	p->obj_size = obj_size;
	return p;
}

#define KMEM_CACHE(_struct, _flags)	kmem_cache_create(sizeof(struct _struct))

#define PAGE_KERNEL		0
#define PAGE_KERNEL_EXEC	1

#define vfree(p)		free(p)

static inline void *__vmalloc(unsigned long size, gfp_t flags)
{
	unsigned i;
	void *p;

	size = round_up(size, PAGE_SIZE);

	for (i = 0; i < 10; i++) {
		p = aligned_alloc(PAGE_SIZE, size);

		if (p) {
			if (flags & __GFP_ZERO)
				memset(p, 0, size);
			break;
		}

		run_shrinkers(flags, true);
	}

	return p;
}

static inline void *vmalloc_exec(unsigned long size, gfp_t gfp_mask)
{
	void *p;

	p = __vmalloc(size, gfp_mask);
	if (!p)
		return NULL;

	if (mprotect(p, size, PROT_READ|PROT_WRITE|PROT_EXEC)) {
		vfree(p);
		return NULL;
	}

	return p;
}

static inline void *vmalloc(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL);
}

static inline void *vzalloc(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL|__GFP_ZERO);
}

#endif /* __TOOLS_LINUX_SLAB_H */