diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2023-05-11 15:46:52 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-05-11 15:46:52 -0400 |
commit | a768c157ba14aeb89f49cebed452e2a43b5f512b (patch) | |
tree | af6c302812758f1513e58e9db2f08f26b39a0598 | |
parent | aa45828c9d760f0f1e7b30977772c429db7c106a (diff) |
alloc_hooks cleanup: use __VA_ARGS__
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | include/linux/gfp.h | 70 | ||||
-rw-r--r-- | include/linux/mempool.h | 8 | ||||
-rw-r--r-- | include/linux/pagemap.h | 4 | ||||
-rw-r--r-- | include/linux/slab.h | 62 |
4 files changed, 71 insertions, 73 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index d1586b90bb90..2306a465f010 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -178,34 +178,32 @@ static inline void arch_alloc_page(struct page *page, int order) { } struct page *_alloc_pages2(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); -#define __alloc_pages(_gfp, _order, _preferred_nid, _nodemask) \ - alloc_hooks(_alloc_pages2(_gfp, _order, _preferred_nid, _nodemask)) +#define __alloc_pages(...) \ + alloc_hooks(_alloc_pages2(__VA_ARGS__)) struct folio *_folio_alloc2(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); -#define __folio_alloc(_gfp, _order, _preferred_nid, _nodemask) \ - alloc_hooks(_folio_alloc2(_gfp, _order, _preferred_nid, _nodemask)) +#define __folio_alloc(...) \ + alloc_hooks(_folio_alloc2(__VA_ARGS__)) unsigned long _alloc_pages_bulk(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, struct list_head *page_list, struct page **page_array); -#define __alloc_pages_bulk(_gfp, _preferred_nid, _nodemask, _nr_pages, \ - _page_list, _page_array) \ - alloc_hooks(_alloc_pages_bulk(_gfp, _preferred_nid, _nodemask, \ - _nr_pages, _page_list, _page_array)) +#define __alloc_pages_bulk(...) \ + alloc_hooks(_alloc_pages_bulk(__VA_ARGS__)) unsigned long _alloc_pages_bulk_array_mempolicy(gfp_t gfp, unsigned long nr_pages, struct page **page_array); -#define alloc_pages_bulk_array_mempolicy(_gfp, _nr_pages, _page_array) \ - alloc_hooks(_alloc_pages_bulk_array_mempolicy(_gfp, _nr_pages, _page_array)) +#define alloc_pages_bulk_array_mempolicy(...) \ + alloc_hooks(_alloc_pages_bulk_array_mempolicy(__VA_ARGS__)) /* Bulk allocate order-0 pages */ -#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \ +#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \ __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL) -#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \ +#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \ __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array) static inline unsigned long @@ -217,8 +215,8 @@ _alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct return _alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); } -#define alloc_pages_bulk_array_node(_gfp, _nid, _nr_pages, _page_array) \ - alloc_hooks(_alloc_pages_bulk_array_node(_gfp, _nid, _nr_pages, _page_array)) +#define alloc_pages_bulk_array_node(...) \ + alloc_hooks(_alloc_pages_bulk_array_node(__VA_ARGS__)) static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) { @@ -247,8 +245,8 @@ _alloc_pages_node2(int nid, gfp_t gfp_mask, unsigned int order) return _alloc_pages2(gfp_mask, order, nid, NULL); } -#define __alloc_pages_node(_nid, _gfp_mask, _order) \ - alloc_hooks(_alloc_pages_node2(_nid, _gfp_mask, _order)) +#define __alloc_pages_node(...) \ + alloc_hooks(_alloc_pages_node2(__VA_ARGS__)) static inline struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid) @@ -273,8 +271,8 @@ static inline struct page *_alloc_pages_node(int nid, gfp_t gfp_mask, return _alloc_pages_node2(nid, gfp_mask, order); } -#define alloc_pages_node(_nid, _gfp_mask, _order) \ - alloc_hooks(_alloc_pages_node(_nid, _gfp_mask, _order)) +#define alloc_pages_node(...) \ + alloc_hooks(_alloc_pages_node(__VA_ARGS__)) #ifdef CONFIG_NUMA struct page *_alloc_pages(gfp_t gfp, unsigned int order); @@ -294,12 +292,12 @@ static inline struct folio *_folio_alloc(gfp_t gfp, unsigned int order) _folio_alloc(gfp, order) #endif -#define alloc_pages(_gfp, _order) \ - alloc_hooks(_alloc_pages(_gfp, _order)) -#define folio_alloc(_gfp, _order) \ - alloc_hooks(_folio_alloc(_gfp, _order)) -#define vma_alloc_folio(_gfp, _order, _vma, _addr, _hugepage) \ - alloc_hooks(_vma_alloc_folio(_gfp, _order, _vma, _addr, _hugepage)) +#define alloc_pages(...) \ + alloc_hooks(_alloc_pages(__VA_ARGS__)) +#define folio_alloc(...) \ + alloc_hooks(_folio_alloc(__VA_ARGS__)) +#define vma_alloc_folio(...) \ + alloc_hooks(_vma_alloc_folio(__VA_ARGS__)) #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) static inline struct page *alloc_page_vma(gfp_t gfp, @@ -311,22 +309,22 @@ static inline struct page *alloc_page_vma(gfp_t gfp, } extern unsigned long _get_free_pages(gfp_t gfp_mask, unsigned int order); -#define __get_free_pages(_gfp_mask, _order) \ - alloc_hooks(_get_free_pages(_gfp_mask, _order)) +#define __get_free_pages(...) \ + alloc_hooks(_get_free_pages(__VA_ARGS__)) extern unsigned long _get_zeroed_page(gfp_t gfp_mask); -#define get_zeroed_page(_gfp_mask) \ - alloc_hooks(_get_zeroed_page(_gfp_mask)) +#define get_zeroed_page(...) \ + alloc_hooks(_get_zeroed_page(__VA_ARGS__)) void *_alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1); -#define alloc_pages_exact(_size, _gfp_mask) \ - alloc_hooks(_alloc_pages_exact(_size, _gfp_mask)) +#define alloc_pages_exact(...) \ + alloc_hooks(_alloc_pages_exact(__VA_ARGS__)) void free_pages_exact(void *virt, size_t size); __meminit void *_alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); -#define alloc_pages_exact_nid(_nid, _size, _gfp_mask) \ - alloc_hooks(_alloc_pages_exact_nid(_nid, _size, _gfp_mask)) +#define alloc_pages_exact_nid(...) \ + alloc_hooks(_alloc_pages_exact_nid(__VA_ARGS__)) #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask), 0) @@ -391,13 +389,13 @@ static inline bool pm_suspended_storage(void) /* The below functions must be run on a range from a single zone. */ extern int _alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask); -#define alloc_contig_range(_start, _end, _migratetype, _gfp_mask) \ - alloc_hooks(_alloc_contig_range(_start, _end, _migratetype, _gfp_mask)) +#define alloc_contig_range(...) \ + alloc_hooks(_alloc_contig_range(__VA_ARGS__)) extern struct page *_alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask); -#define alloc_contig_pages(_nr_pages, _gfp_mask, _nid, _nodemask) \ - alloc_hooks(_alloc_contig_pages(_nr_pages, _gfp_mask, _nid, _nodemask)) +#define alloc_contig_pages(...) \ + alloc_hooks(_alloc_contig_pages(__VA_ARGS__)) #endif void free_contig_range(unsigned long pfn, unsigned long nr_pages); diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 383910f9f683..5555a41d0fc7 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -44,7 +44,7 @@ int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, int _mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data); -#define mempool_init(...) \ +#define mempool_init(...) \ alloc_hooks(_mempool_init(__VA_ARGS__)) extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, @@ -53,7 +53,7 @@ extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, extern mempool_t *_mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int nid); -#define mempool_create_node(...) \ +#define mempool_create_node(...) \ alloc_hooks(_mempool_create_node(__VA_ARGS__)) #define mempool_create(_min_nr, _alloc_fn, _free_fn, _pool_data) \ @@ -64,8 +64,8 @@ extern int mempool_resize(mempool_t *pool, int new_min_nr); extern void mempool_destroy(mempool_t *pool); extern void *_mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; -#define mempool_alloc(_pool, _gfp) \ - alloc_hooks(_mempool_alloc((_pool), (_gfp))) +#define mempool_alloc(...) \ + alloc_hooks(_mempool_alloc(__VA_ARGS__)) extern void mempool_free(void *element, mempool_t *pool); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 8d423384062a..f9e0d7578d5e 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -475,8 +475,8 @@ static inline struct folio *_filemap_alloc_folio(gfp_t gfp, unsigned int order) } #endif -#define filemap_alloc_folio(_gfp, _order) \ - alloc_hooks(_filemap_alloc_folio(_gfp, _order)) +#define filemap_alloc_folio(...) \ + alloc_hooks(_filemap_alloc_folio(__VA_ARGS__)) static inline struct page *__page_cache_alloc(gfp_t gfp) { diff --git a/include/linux/slab.h b/include/linux/slab.h index 594e96f8c128..31213110c99a 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -214,8 +214,8 @@ int kmem_cache_shrink(struct kmem_cache *s); * Common kmalloc functions provided by all allocators */ void * __must_check _krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2); -#define krealloc(_p, _size, _flags) \ - alloc_hooks(_krealloc(_p, _size, _flags)) +#define krealloc(...) \ + alloc_hooks(_krealloc(__VA_ARGS__)) void kfree(const void *objp); void kfree_sensitive(const void *objp); @@ -469,13 +469,13 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz * Return: pointer to the new object or %NULL in case of error */ void *_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc; -#define kmem_cache_alloc(_s, _flags) \ - alloc_hooks(_kmem_cache_alloc(_s, _flags)) +#define kmem_cache_alloc(...) \ + alloc_hooks(_kmem_cache_alloc(__VA_ARGS__)) void *_kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags) __assume_slab_alignment __malloc; -#define kmem_cache_alloc_lru(_s, _lru, _flags) \ - alloc_hooks(_kmem_cache_alloc_lru(_s, _lru, _flags)) +#define kmem_cache_alloc_lru(...) \ + alloc_hooks(_kmem_cache_alloc_lru(__VA_ARGS__)) void kmem_cache_free(struct kmem_cache *s, void *objp); @@ -489,8 +489,8 @@ void kmem_cache_free(struct kmem_cache *s, void *objp); void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); int _kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); -#define kmem_cache_alloc_bulk(_s, _flags, _size, _p) \ - alloc_hooks(_kmem_cache_alloc_bulk(_s, _flags, _size, _p)) +#define kmem_cache_alloc_bulk(...) \ + alloc_hooks(_kmem_cache_alloc_bulk(__VA_ARGS__)) static __always_inline void kfree_bulk(size_t size, void **p) { @@ -501,8 +501,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignm __alloc_size(1); void *_kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment __malloc; -#define kmem_cache_alloc_node(_s, _flags, _node) \ - alloc_hooks(_kmem_cache_alloc_node(_s, _flags, _node)) +#define kmem_cache_alloc_node(...) \ + alloc_hooks(_kmem_cache_alloc_node(__VA_ARGS__)) void *_kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) __assume_kmalloc_alignment __alloc_size(3); @@ -510,21 +510,21 @@ void *_kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) void *_kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) __assume_kmalloc_alignment __alloc_size(4); -#define kmalloc_trace(_s, _flags, _size) \ - alloc_hooks(_kmalloc_trace(_s, _flags, _size)) +#define kmalloc_trace(...) \ + alloc_hooks(_kmalloc_trace(__VA_ARGS__)) -#define kmalloc_node_trace(_s, _gfpflags, _node, _size) \ - alloc_hooks(_kmalloc_node_trace(_s, _gfpflags, _node, _size)) +#define kmalloc_node_trace(...) \ + alloc_hooks(_kmalloc_node_trace(__VA_ARGS__)) void *_kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment __alloc_size(1); -#define kmalloc_large(_size, _flags) \ - alloc_hooks(_kmalloc_large(_size, _flags)) +#define kmalloc_large(...) \ + alloc_hooks(_kmalloc_large(__VA_ARGS__)) void *_kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment __alloc_size(1); -#define kmalloc_large_node(_size, _flags, _node) \ - alloc_hooks(_kmalloc_large_node(_size, _flags, _node)) +#define kmalloc_large_node(...) \ + alloc_hooks(_kmalloc_large_node(__VA_ARGS__)) /** * kmalloc - allocate kernel memory @@ -595,7 +595,7 @@ static __always_inline __alloc_size(1) void *_kmalloc(size_t size, gfp_t flags) } return __kmalloc(size, flags); } -#define kmalloc(_size, _flags) alloc_hooks(_kmalloc(_size, _flags)) +#define kmalloc(...) alloc_hooks(_kmalloc(__VA_ARGS__)) static __always_inline __alloc_size(1) void *_kmalloc_node(size_t size, gfp_t flags, int node) { @@ -612,8 +612,8 @@ static __always_inline __alloc_size(1) void *_kmalloc_node(size_t size, gfp_t fl } return __kmalloc_node(size, flags, node); } -#define kmalloc_node(_size, _flags, _node) \ - alloc_hooks(_kmalloc_node(_size, _flags, _node)) +#define kmalloc_node(...) \ + alloc_hooks(_kmalloc_node(__VA_ARGS__)) /** * kmalloc_array - allocate memory for an array. @@ -631,8 +631,8 @@ static inline __alloc_size(1, 2) void *_kmalloc_array(size_t n, size_t size, gfp return _kmalloc(bytes, flags); return _kmalloc(bytes, flags); } -#define kmalloc_array(_n, _size, _flags) \ - alloc_hooks(_kmalloc_array(_n, _size, _flags)) +#define kmalloc_array(...) \ + alloc_hooks(_kmalloc_array(__VA_ARGS__)) /** * krealloc_array - reallocate memory for an array. @@ -653,8 +653,8 @@ static inline __realloc_size(2, 3) void * __must_check _krealloc_array(void *p, return _krealloc(p, bytes, flags); } -#define krealloc_array(_p, _n, _size, _flags) \ - alloc_hooks(_krealloc_array(_p, _n, _size, _flags)) +#define krealloc_array(...) \ + alloc_hooks(_krealloc_array(__VA_ARGS__)) /** * kcalloc - allocate memory for an array. The memory is set to zero. @@ -692,8 +692,8 @@ static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size return _kmalloc_node(bytes, flags, node); return __kmalloc_node(bytes, flags, node); } -#define kmalloc_array_node(_n, _size, _flags, _node) \ - alloc_hooks(_kmalloc_array_node(_n, _size, _flags, _node)) +#define kmalloc_array_node(...) \ + alloc_hooks(_kmalloc_array_node(__VA_ARGS__)) #define kcalloc_node(_n, _size, _flags, _node) \ kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node) @@ -713,8 +713,8 @@ static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size #define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node) extern void *_kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1); -#define kvmalloc_node(_size, _flags, _node) \ - alloc_hooks(_kvmalloc_node(_size, _flags, _node)) +#define kvmalloc_node(...) \ + alloc_hooks(_kvmalloc_node(__VA_ARGS__)) #define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) #define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO) @@ -733,8 +733,8 @@ extern void *_kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1); extern void *_kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) __realloc_size(3); -#define kvrealloc(_p, _oldsize, _newsize, _flags) \ - alloc_hooks(_kvrealloc(_p, _oldsize, _newsize, _flags)) +#define kvrealloc(...) \ + alloc_hooks(_kvrealloc(__VA_ARGS__)) extern void kvfree(const void *addr); extern void kvfree_sensitive(const void *addr, size_t len); |