diff options
-rw-r--r-- | include/linux/idr.h | 239 | ||||
-rw-r--r-- | init/main.c | 1 | ||||
-rw-r--r-- | lib/idr.c | 937 |
3 files changed, 256 insertions, 921 deletions
diff --git a/include/linux/idr.h b/include/linux/idr.h index ac6d6763d5d6..f2830614d9b7 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -15,43 +15,37 @@ #include <linux/init.h> #include <linux/bitmap-tree.h> #include <linux/bitops.h> +#include <linux/gfp.h> +#include <linux/radix-tree.h> #include <linux/rcupdate.h> #include <linux/types.h> -/* - * We want shallower trees and thus more bits covered at each layer. 8 - * bits gives us large enough first layer for most use cases and maximum - * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and - * 1k on 32bit. - */ -#define IDR_BITS 8 -#define IDR_SIZE (1 << IDR_BITS) -#define IDR_MASK ((1 << IDR_BITS)-1) - -struct idr_layer { - int prefix; /* the ID prefix of this idr_layer */ - DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */ - struct idr_layer __rcu *ary[1<<IDR_BITS]; - int count; /* When zero, we can release it */ - int layer; /* distance from leaf */ - struct rcu_head rcu_head; -}; +/* IDA */ -struct idr { - struct idr_layer __rcu *hint; /* the last layer allocated from */ - struct idr_layer __rcu *top; - struct idr_layer *id_free; - int layers; /* only valid w/o concurrent changes */ - int id_free_cnt; - int cur; /* current pos for cyclic allocation */ - spinlock_t lock; +struct ida { + struct bitmap_tree map; }; -#define IDR_INIT(name) \ -{ \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ +#define IDA_INIT(name) \ +{ \ + .map = BITMAP_TREE_INIT(name.map), \ } -#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) +#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) + +void ida_remove(struct ida *ida, unsigned id); + +void ida_destroy(struct ida *ida); +void ida_init(struct ida *ida); + +int ida_get_range(struct ida *ida, unsigned int start, + unsigned int end, gfp_t gfp_mask); + +static inline int ida_get(struct ida *ida, gfp_t gfp_mask) +{ + return ida_get_range(ida, 0, 0, gfp_mask); +} + +/* IDR */ /** * DOC: idr sync @@ -70,34 +64,51 @@ struct idr { * period). */ -/* - * This is what we export. - */ +struct idr; -void *idr_find_slowpath(struct idr *idp, int id); -void idr_preload(gfp_t gfp_mask); -int idr_alloc_range(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); -int idr_for_each(struct idr *idp, +void *idr_find_next(struct idr *idr, int *nextid); +int idr_for_each(struct idr *idr, int (*fn)(int id, void *p, void *data), void *data); -void *idr_find_next(struct idr *idp, int *nextid); -void *idr_replace(struct idr *idp, void *ptr, int id); -void idr_remove(struct idr *idp, int id); -void idr_free(struct idr *idp, int id); -void idr_destroy(struct idr *idp); -void idr_init(struct idr *idp); +void *idr_replace(struct idr *idr, void *ptr, unsigned id); +void idr_remove(struct idr *idr, unsigned id); +int idr_alloc_range(struct idr *idr, void *ptr, unsigned start, + unsigned end, gfp_t gfp); +int idr_alloc_cyclic(struct idr *idr, void *ptr, unsigned start, + unsigned end, gfp_t gfp_mask); +void idr_destroy(struct idr *idr); +void idr_init(struct idr *idr); /** - * idr_preload_end - end preload section started with idr_preload() + * idr_for_each_entry - iterate over an idr's elements of a given type + * @idr: idr handle + * @entry: the type * to use as cursor + * @id: id entry's key * - * Each idr_preload() should be matched with an invocation of this - * function. See idr_preload() for details. + * @entry and @id do not need to be initialized before the loop, and + * after normal terminatinon @entry is left with the value NULL. This + * is convenient for a "not found" value. */ -static inline void idr_preload_end(void) +#define idr_for_each_entry(idr, entry, id) \ + for (id = 0; ((entry) = idr_find_next(idr, &(id))) != NULL; ++id) + +static inline int idr_alloc(struct idr *idr, void *ptr, gfp_t gfp) { - preempt_enable(); + return idr_alloc_range(idr, ptr, 0, 0, gfp); } +struct idr { + struct ida ida; + unsigned cur; + struct radix_tree_root ptrs; +}; + +#define IDR_INIT(name) \ +{ \ + .ida = IDA_INIT(name.ida), \ + .ptrs = RADIX_TREE_INIT(GFP_NOWAIT), \ +} +#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) + /** * idr_find - return pointer for given id * @idr: idr handle @@ -105,121 +116,65 @@ static inline void idr_preload_end(void) * * Return the pointer given the id it has been registered with. A %NULL * return indicates that @id is not valid or you passed %NULL in - * idr_get_new(). + * idr_alloc(). * * This function can be called under rcu_read_lock(), given that the leaf * pointers lifetimes are correctly managed. */ -static inline void *idr_find(struct idr *idr, int id) +static inline void *idr_find(struct idr *idr, unsigned id) { - struct idr_layer *hint = rcu_dereference_raw(idr->hint); + void *ret; - if (hint && (id & ~IDR_MASK) == hint->prefix) - return rcu_dereference_raw(hint->ary[id & IDR_MASK]); + rcu_read_lock(); + ret = radix_tree_lookup(&idr->ptrs, id); + rcu_read_unlock(); - return idr_find_slowpath(idr, id); + return ret; } /** - * idr_for_each_entry - iterate over an idr's elements of a given type - * @idp: idr handle - * @entry: the type * to use as cursor - * @id: id entry's key - * - * @entry and @id do not need to be initialized before the loop, and - * after normal terminatinon @entry is left with the value NULL. This - * is convenient for a "not found" value. - */ -#define idr_for_each_entry(idp, entry, id) \ - for (id = 0; ((entry) = idr_find_next(idp, &(id))) != NULL; ++id) - -/* - * Don't use the following functions. These exist only to suppress - * deprecated warnings on EXPORT_SYMBOL()s. - */ -int __idr_pre_get(struct idr *idp, gfp_t gfp_mask); -int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); -void __idr_remove_all(struct idr *idp); - -/** - * idr_pre_get - reserve resources for idr allocation - * @idp: idr handle - * @gfp_mask: memory allocation flags + * idr_preload_end - end preload section started with idr_preload() * - * Part of old alloc interface. This is going away. Use - * idr_preload[_end]() and idr_alloc_range() instead. + * Each idr_preload() should be matched with an invocation of this + * function. See idr_preload() for details. */ -static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask) +static inline void idr_preload_end(void) { - return __idr_pre_get(idp, gfp_mask); + radix_tree_preload_end(); } /** - * idr_get_new_above - allocate new idr entry above or equal to a start id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @starting_id: id to start search at - * @id: pointer to the allocated handle + * idr_preload - preload for idr_alloc_range() + * @gfp: allocation mask to use for preloading * - * Part of old alloc interface. This is going away. Use - * idr_preload[_end]() and idr_alloc_range() instead. - */ -static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr, - int starting_id, int *id) -{ - return __idr_get_new_above(idp, ptr, starting_id, id); -} - -/** - * idr_get_new - allocate new idr entry - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @id: pointer to the allocated handle + * Preload per-cpu layer buffer for idr_alloc_range(). Can only be used from + * process context and each idr_preload() invocation should be matched with + * idr_preload_end(). Note that preemption is disabled while preloaded. * - * Part of old alloc interface. This is going away. Use - * idr_preload[_end]() and idr_alloc_range() instead. - */ -static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id) -{ - return __idr_get_new_above(idp, ptr, 0, id); -} - -/** - * idr_remove_all - remove all ids from the given idr tree - * @idp: idr handle + * The first idr_alloc_range() in the preloaded section can be treated as if it + * were invoked with @gfp_mask used for preloading. This allows using more + * permissive allocation masks for idrs protected by spinlocks. + * + * For example, if idr_alloc_range() below fails, the failure can be treated as + * if idr_alloc_range() were called with GFP_KERNEL rather than GFP_NOWAIT. + * + * idr_preload(GFP_KERNEL); + * spin_lock(lock); * - * If you're trying to destroy @idp, calling idr_destroy() is enough. - * This is going away. Don't use. + * id = idr_alloc_range(idr, ptr, start, end, GFP_NOWAIT); + * + * spin_unlock(lock); + * idr_preload_end(); + * if (id < 0) + * error; */ -static inline void __deprecated idr_remove_all(struct idr *idp) +static inline void idr_preload(gfp_t gfp) { - __idr_remove_all(idp); -} + might_sleep_if(gfp & __GFP_WAIT); -void __init idr_init_cache(void); - -/* IDA */ - -struct ida { - struct bitmap_tree map; -}; - -#define IDA_INIT(name) \ -{ \ - .map = BITMAP_TREE_INIT(name.map), \ -} -#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) - -void ida_remove(struct ida *ida, unsigned id); -void ida_destroy(struct ida *ida); -void ida_init(struct ida *ida); - -int ida_get_range(struct ida *ida, unsigned int start, - unsigned int end, gfp_t gfp_mask); - -static inline int ida_get(struct ida *ida, gfp_t gfp_mask) -{ - return ida_get_range(ida, 0, 0, gfp_mask); + /* Well this is horrible, but idr_preload doesn't return errors */ + if (radix_tree_preload(gfp)) + preempt_disable(); } #endif /* __IDR_H__ */ diff --git a/init/main.c b/init/main.c index 9484f4ba88d0..87b5a0fe0d87 100644 --- a/init/main.c +++ b/init/main.c @@ -541,7 +541,6 @@ asmlinkage void __init start_kernel(void) preempt_disable(); if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n")) local_irq_disable(); - idr_init_cache(); perf_event_init(); rcu_init(); tick_nohz_init(); diff --git a/lib/idr.c b/lib/idr.c index c3161abbc714..a9a7168b6431 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -35,418 +35,188 @@ #include <linux/string.h> #include <linux/idr.h> #include <linux/spinlock.h> -#include <linux/percpu.h> #include <linux/hardirq.h> -#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) -#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) - -/* Leave the possibility of an incomplete final layer */ -#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) - -/* Number of id_layer structs to leave in free list */ -#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) - -static struct kmem_cache *idr_layer_cache; -static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); -static DEFINE_PER_CPU(int, idr_preload_cnt); - -/* the maximum ID which can be allocated given idr->layers */ -static int idr_max(int layers) -{ - int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); - - return (1 << bits) - 1; -} +/* IDA */ -/* - * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is - * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and - * so on. +/** + * DOC: IDA description + * IDA - IDR based ID allocator + * + * This is id allocator without id -> pointer translation. Memory + * usage is much lower than full blown idr because each id only + * occupies a bit. ida uses a custom leaf node which contains + * IDA_BITMAP_BITS slots. + * + * 2007-04-25 written by Tejun Heo <htejun@gmail.com> */ -static int idr_layer_prefix_mask(int layer) -{ - return ~idr_max(layer + 1); -} -static struct idr_layer *get_from_free_list(struct idr *idp) +/** + * ida_remove - remove an allocated id. + * @ida: the (initialized) ida. + * @id: the id returned by ida_get_range. + */ +void ida_remove(struct ida *ida, unsigned int id) { - struct idr_layer *p; - unsigned long flags; - - spin_lock_irqsave(&idp->lock, flags); - if ((p = idp->id_free)) { - idp->id_free = p->ary[0]; - idp->id_free_cnt--; - p->ary[0] = NULL; - } - spin_unlock_irqrestore(&idp->lock, flags); - return(p); + BUG_ON(id > INT_MAX); + bitmap_tree_clear_bit(&ida->map, id); } +EXPORT_SYMBOL(ida_remove); /** - * idr_layer_alloc - allocate a new idr_layer - * @gfp_mask: allocation mask - * @layer_idr: optional idr to allocate from + * ida_get_range - get a new id. + * @ida: the (initialized) ida. + * @start: the minimum id (inclusive, < 0x8000000) + * @end: the maximum id (exclusive, < 0x8000000 or 0) + * @gfp_mask: memory allocation flags * - * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch - * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch - * an idr_layer from @idr->id_free. + * Allocates an id in the range start <= id < end, or returns -ENOSPC. + * On memory allocation failure, returns -ENOMEM. * - * @layer_idr is to maintain backward compatibility with the old alloc - * interface - idr_pre_get() and idr_get_new*() - and will be removed - * together with per-pool preload buffer. + * Use ida_remove() to get rid of an id. */ -static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) +int ida_get_range(struct ida *ida, unsigned int start, + unsigned int end, gfp_t gfp) { - struct idr_layer *new; - - /* this is the old path, bypass to get_from_free_list() */ - if (layer_idr) - return get_from_free_list(layer_idr); - - /* - * Try to allocate directly from kmem_cache. We want to try this - * before preload buffer; otherwise, non-preloading idr_alloc_range() - * users will end up taking advantage of preloading ones. As the - * following is allowed to fail for preloaded cases, suppress - * warning this time. - */ - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); - if (new) - return new; - - /* - * Try to fetch one from the per-cpu preload buffer if in process - * context. See idr_preload() for details. - */ - if (!in_interrupt()) { - preempt_disable(); - new = __this_cpu_read(idr_preload_head); - if (new) { - __this_cpu_write(idr_preload_head, new->ary[0]); - __this_cpu_dec(idr_preload_cnt); - new->ary[0] = NULL; - } - preempt_enable(); - if (new) - return new; - } + unsigned id; + int ret = bitmap_tree_find_set_bits_from(&ida->map, &id, 1, + start, end ?: INT_MAX, gfp); + if (ret < 0) + return ret; - /* - * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so - * that memory allocation failure warning is printed as intended. - */ - return kmem_cache_zalloc(idr_layer_cache, gfp_mask); + return id; } +EXPORT_SYMBOL(ida_get_range); -static void idr_layer_rcu_free(struct rcu_head *head) +/** + * ida_destroy - release all cached layers within an ida tree + * @ida: ida handle + */ +void ida_destroy(struct ida *ida) { - struct idr_layer *layer; - - layer = container_of(head, struct idr_layer, rcu_head); - kmem_cache_free(idr_layer_cache, layer); + bitmap_tree_destroy(&ida->map); } +EXPORT_SYMBOL(ida_destroy); -static inline void free_layer(struct idr *idr, struct idr_layer *p) +/** + * ida_init - initialize ida handle + * @ida: ida handle + * + * This function is use to set up the handle (@ida) that you will pass + * to the rest of the functions. + */ +void ida_init(struct ida *ida) { - if (idr->hint && idr->hint == p) - RCU_INIT_POINTER(idr->hint, NULL); - call_rcu(&p->rcu_head, idr_layer_rcu_free); -} + bitmap_tree_init(&ida->map, 0); -/* only called when idp->lock is held */ -static void __move_to_free_list(struct idr *idp, struct idr_layer *p) -{ - p->ary[0] = idp->id_free; - idp->id_free = p; - idp->id_free_cnt++; } +EXPORT_SYMBOL(ida_init); -static void move_to_free_list(struct idr *idp, struct idr_layer *p) -{ - unsigned long flags; - - /* - * Depends on the return element being zeroed. - */ - spin_lock_irqsave(&idp->lock, flags); - __move_to_free_list(idp, p); - spin_unlock_irqrestore(&idp->lock, flags); -} +/* IDR */ -static void idr_mark_full(struct idr_layer **pa, int id) +/** + * idr_find_next - lookup next object of id to given id. + * @idp: idr handle + * @nextidp: pointer to lookup key + * + * Returns pointer to registered object with id, which is next number to + * given id. After being looked up, *@nextidp will be updated for the next + * iteration. + * + * This function can be called under rcu_read_lock(), given that the leaf + * pointers lifetimes are correctly managed. + */ +void *idr_find_next(struct idr *idr, int *nextidp) { - struct idr_layer *p = pa[0]; - int l = 0; - - __set_bit(id & IDR_MASK, p->bitmap); - /* - * If this layer is full mark the bit in the layer above to - * show that this part of the radix tree is full. This may - * complete the layer above and require walking up the radix - * tree. - */ - while (bitmap_full(p->bitmap, IDR_SIZE)) { - if (!(p = pa[++l])) - break; - id = id >> IDR_BITS; - __set_bit((id & IDR_MASK), p->bitmap); - } -} + void **slot; + struct radix_tree_iter iter; -int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) -{ - while (idp->id_free_cnt < MAX_IDR_FREE) { - struct idr_layer *new; - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - if (new == NULL) - return (0); - move_to_free_list(idp, new); + radix_tree_for_each_slot(slot, &idr->ptrs, &iter, *nextidp) { + *nextidp = iter.index; + return radix_tree_deref_slot(slot); } - return 1; + + return NULL; } -EXPORT_SYMBOL(__idr_pre_get); +EXPORT_SYMBOL(idr_find_next); /** - * sub_alloc - try to allocate an id without growing the tree depth + * idr_for_each - iterate through all stored pointers * @idp: idr handle - * @starting_id: id to start search at - * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer - * @gfp_mask: allocation mask for idr_layer_alloc() - * @layer_idr: optional idr passed to idr_layer_alloc() + * @fn: function to be called for each pointer + * @data: data passed back to callback function * - * Allocate an id in range [@starting_id, INT_MAX] from @idp without - * growing its depth. Returns + * Iterate over the pointers registered with the given idr. The + * callback function will be called for each pointer currently + * registered, passing the id, the pointer and the data pointer passed + * to this function. It is not safe to modify the idr tree while in + * the callback, so functions such as idr_remove are not allowed. + * + * We check the return of @fn each time. If it returns anything other + * than %0, we break out and return that value. * - * the allocated id >= 0 if successful, - * -EAGAIN if the tree needs to grow for allocation to succeed, - * -ENOSPC if the id space is exhausted, - * -ENOMEM if more idr_layers need to be allocated. + * The caller must serialize idr_for_each() vs idr_remove(). */ -static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, - gfp_t gfp_mask, struct idr *layer_idr) +int idr_for_each(struct idr *idr, + int (*fn)(int id, void *p, void *data), void *data) { - int n, m, sh; - struct idr_layer *p, *new; - int l, id, oid; - - id = *starting_id; - restart: - p = idp->top; - l = idp->layers; - pa[l--] = NULL; - while (1) { - /* - * We run around this while until we reach the leaf node... - */ - n = (id >> (IDR_BITS*l)) & IDR_MASK; - m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); - if (m == IDR_SIZE) { - /* no space available go back to previous layer. */ - l++; - oid = id; - id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; - - /* if already at the top layer, we need to grow */ - if (id >= 1 << (idp->layers * IDR_BITS)) { - *starting_id = id; - return -EAGAIN; - } - p = pa[l]; - BUG_ON(!p); - - /* If we need to go up one layer, continue the - * loop; otherwise, restart from the top. - */ - sh = IDR_BITS * (l + 1); - if (oid >> sh == id >> sh) - continue; - else - goto restart; - } - if (m != n) { - sh = IDR_BITS*l; - id = ((id >> sh) ^ n ^ m) << sh; - } - if ((id >= MAX_IDR_BIT) || (id < 0)) - return -ENOSPC; - if (l == 0) + void *p; + unsigned id; + int error = 0; + + idr_for_each_entry(idr, p, id) { + error = fn(id, (void *)p, data); + if (error) break; - /* - * Create the layer below if it is missing. - */ - if (!p->ary[m]) { - new = idr_layer_alloc(gfp_mask, layer_idr); - if (!new) - return -ENOMEM; - new->layer = l-1; - new->prefix = id & idr_layer_prefix_mask(new->layer); - rcu_assign_pointer(p->ary[m], new); - p->count++; - } - pa[l--] = p; - p = p->ary[m]; } - pa[l] = p; - return id; -} - -static int idr_get_empty_slot(struct idr *idp, int starting_id, - struct idr_layer **pa, gfp_t gfp_mask, - struct idr *layer_idr) -{ - struct idr_layer *p, *new; - int layers, v, id; - unsigned long flags; - - id = starting_id; -build_up: - p = idp->top; - layers = idp->layers; - if (unlikely(!p)) { - if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) - return -ENOMEM; - p->layer = 0; - layers = 1; - } - /* - * Add a new layer to the top of the tree if the requested - * id is larger than the currently allocated space. - */ - while (id > idr_max(layers)) { - layers++; - if (!p->count) { - /* special case: if the tree is currently empty, - * then we grow the tree by moving the top node - * upwards. - */ - p->layer++; - WARN_ON_ONCE(p->prefix); - continue; - } - if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { - /* - * The allocation failed. If we built part of - * the structure tear it down. - */ - spin_lock_irqsave(&idp->lock, flags); - for (new = p; p && p != idp->top; new = p) { - p = p->ary[0]; - new->ary[0] = NULL; - new->count = 0; - bitmap_clear(new->bitmap, 0, IDR_SIZE); - __move_to_free_list(idp, new); - } - spin_unlock_irqrestore(&idp->lock, flags); - return -ENOMEM; - } - new->ary[0] = p; - new->count = 1; - new->layer = layers-1; - new->prefix = id & idr_layer_prefix_mask(new->layer); - if (bitmap_full(p->bitmap, IDR_SIZE)) - __set_bit(0, new->bitmap); - p = new; - } - rcu_assign_pointer(idp->top, p); - idp->layers = layers; - v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); - if (v == -EAGAIN) - goto build_up; - return(v); + return error; } +EXPORT_SYMBOL(idr_for_each); -/* - * @id and @pa are from a successful allocation from idr_get_empty_slot(). - * Install the user pointer @ptr and mark the slot full. +/** + * idr_replace - replace pointer for given id + * @idp: idr handle + * @ptr: pointer you want associated with the id + * @id: lookup key + * + * Replace the pointer registered with an id and return the old value. + * A %-ENOENT return indicates that @id was not found. + * A %-EINVAL return indicates that @id was not within valid constraints. + * + * The caller must serialize with writers. */ -static void idr_fill_slot(struct idr *idr, void *ptr, int id, - struct idr_layer **pa) +void *idr_replace(struct idr *idr, void *ptr, unsigned id) { - /* update hint used for lookup, cleared from free_layer() */ - rcu_assign_pointer(idr->hint, pa[0]); + void **slot, *old = ERR_PTR(-ENOENT); - rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); - pa[0]->count++; - idr_mark_full(pa, id); -} + rcu_read_lock(); -int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) -{ - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - int rv; + slot = radix_tree_lookup_slot(&idr->ptrs, id); + + if (slot) { + old = radix_tree_deref_slot(slot); + if (old) + radix_tree_replace_slot(slot, ptr); + } - rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); - if (rv < 0) - return rv == -ENOMEM ? -EAGAIN : rv; + rcu_read_unlock(); - idr_fill_slot(idp, ptr, rv, pa); - *id = rv; - return 0; + return old; } -EXPORT_SYMBOL(__idr_get_new_above); +EXPORT_SYMBOL(idr_replace); /** - * idr_preload - preload for idr_alloc_range() - * @gfp_mask: allocation mask to use for preloading - * - * Preload per-cpu layer buffer for idr_alloc_range(). Can only be used from - * process context and each idr_preload() invocation should be matched with - * idr_preload_end(). Note that preemption is disabled while preloaded. - * - * The first idr_alloc_range() in the preloaded section can be treated as if it - * were invoked with @gfp_mask used for preloading. This allows using more - * permissive allocation masks for idrs protected by spinlocks. - * - * For example, if idr_alloc_range() below fails, the failure can be treated as - * if idr_alloc_range() were called with GFP_KERNEL rather than GFP_NOWAIT. - * - * idr_preload(GFP_KERNEL); - * spin_lock(lock); - * - * id = idr_alloc_range(idr, ptr, start, end, GFP_NOWAIT); - * - * spin_unlock(lock); - * idr_preload_end(); - * if (id < 0) - * error; + * idr_remove - remove the given id and free its slot + * @idp: idr handle + * @id: unique key */ -void idr_preload(gfp_t gfp_mask) +void idr_remove(struct idr *idr, unsigned id) { - /* - * Consuming preload buffer from non-process context breaks preload - * allocation guarantee. Disallow usage from those contexts. - */ - WARN_ON_ONCE(in_interrupt()); - might_sleep_if(gfp_mask & __GFP_WAIT); - - preempt_disable(); - - /* - * idr_alloc_range() is likely to succeed w/o full idr_layer buffer and - * return value from idr_alloc_range() needs to be checked for failure - * anyway. Silently give up if allocation fails. The caller can - * treat failures from idr_alloc_range() as if idr_alloc() were called - * with @gfp_mask which should be enough. - */ - while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { - struct idr_layer *new; - - preempt_enable(); - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - preempt_disable(); - if (!new) - break; - - /* link the new one to per-cpu preload list */ - new->ary[0] = __this_cpu_read(idr_preload_head); - __this_cpu_write(idr_preload_head, new); - __this_cpu_inc(idr_preload_cnt); - } + radix_tree_delete(&idr->ptrs, id); + ida_remove(&idr->ida, id); } -EXPORT_SYMBOL(idr_preload); +EXPORT_SYMBOL(idr_remove); /** * idr_alloc_range - allocate new idr entry @@ -454,7 +224,7 @@ EXPORT_SYMBOL(idr_preload); * @ptr: pointer to be associated with the new id * @start: the minimum id (inclusive) * @end: the maximum id (exclusive, <= 0 for max) - * @gfp_mask: memory allocation flags + * @gfp: memory allocation flags * * Allocate an id in [start, end) and associate it with @ptr. If no ID is * available in the specified range, returns -ENOSPC. On memory allocation @@ -468,29 +238,27 @@ EXPORT_SYMBOL(idr_preload); * or iteration can be performed under RCU read lock provided the user * destroys @ptr in RCU-safe way after removal from idr. */ -int idr_alloc_range(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) +int idr_alloc_range(struct idr *idr, void *ptr, unsigned start, + unsigned end, gfp_t gfp) { - int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - int id; + int id, ret; - might_sleep_if(gfp_mask & __GFP_WAIT); + might_sleep_if(gfp & __GFP_WAIT); - /* sanity checks */ - if (WARN_ON_ONCE(start < 0)) - return -EINVAL; - if (unlikely(max < start)) - return -ENOSPC; - - /* allocate id */ - id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); + id = ida_get_range(&idr->ida, start, end, gfp); if (unlikely(id < 0)) return id; - if (unlikely(id > max)) - return -ENOSPC; - idr_fill_slot(idr, ptr, id, pa); - return id; + ret = radix_tree_preload(gfp); + if (ret) { + ida_remove(&idr->ida, id); + return ret; + } + + radix_tree_insert(&idr->ptrs, ret, ptr); + radix_tree_preload_end(); + + return ret; } EXPORT_SYMBOL_GPL(idr_alloc_range); @@ -506,8 +274,8 @@ EXPORT_SYMBOL_GPL(idr_alloc_range); * higher ids if it can. If the "cur" counter wraps, then it will start again * at the "start" end of the range and allocate one that has already been used. */ -int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, - gfp_t gfp_mask) +int idr_alloc_cyclic(struct idr *idr, void *ptr, unsigned start, + unsigned end, gfp_t gfp_mask) { int id; @@ -521,128 +289,6 @@ int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, } EXPORT_SYMBOL(idr_alloc_cyclic); -static void idr_remove_warning(int id) -{ - printk(KERN_WARNING - "idr_remove called for id=%d which is not allocated.\n", id); - dump_stack(); -} - -static void sub_remove(struct idr *idp, int shift, int id) -{ - struct idr_layer *p = idp->top; - struct idr_layer **pa[MAX_IDR_LEVEL + 1]; - struct idr_layer ***paa = &pa[0]; - struct idr_layer *to_free; - int n; - - *paa = NULL; - *++paa = &idp->top; - - while ((shift > 0) && p) { - n = (id >> shift) & IDR_MASK; - __clear_bit(n, p->bitmap); - *++paa = &p->ary[n]; - p = p->ary[n]; - shift -= IDR_BITS; - } - n = id & IDR_MASK; - if (likely(p != NULL && test_bit(n, p->bitmap))) { - __clear_bit(n, p->bitmap); - rcu_assign_pointer(p->ary[n], NULL); - to_free = NULL; - while(*paa && ! --((**paa)->count)){ - if (to_free) - free_layer(idp, to_free); - to_free = **paa; - **paa-- = NULL; - } - if (!*paa) - idp->layers = 0; - if (to_free) - free_layer(idp, to_free); - } else - idr_remove_warning(id); -} - -/** - * idr_remove - remove the given id and free its slot - * @idp: idr handle - * @id: unique key - */ -void idr_remove(struct idr *idp, int id) -{ - struct idr_layer *p; - struct idr_layer *to_free; - - if (id < 0) - return; - - sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); - if (idp->top && idp->top->count == 1 && (idp->layers > 1) && - idp->top->ary[0]) { - /* - * Single child at leftmost slot: we can shrink the tree. - * This level is not needed anymore since when layers are - * inserted, they are inserted at the top of the existing - * tree. - */ - to_free = idp->top; - p = idp->top->ary[0]; - rcu_assign_pointer(idp->top, p); - --idp->layers; - to_free->count = 0; - bitmap_clear(to_free->bitmap, 0, IDR_SIZE); - free_layer(idp, to_free); - } - while (idp->id_free_cnt >= MAX_IDR_FREE) { - p = get_from_free_list(idp); - /* - * Note: we don't call the rcu callback here, since the only - * layers that fall into the freelist are those that have been - * preallocated. - */ - kmem_cache_free(idr_layer_cache, p); - } - return; -} -EXPORT_SYMBOL(idr_remove); - -void __idr_remove_all(struct idr *idp) -{ - int n, id, max; - int bt_mask; - struct idr_layer *p; - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - struct idr_layer **paa = &pa[0]; - - n = idp->layers * IDR_BITS; - p = idp->top; - rcu_assign_pointer(idp->top, NULL); - max = idr_max(idp->layers); - - id = 0; - while (id >= 0 && id <= max) { - while (n > IDR_BITS && p) { - n -= IDR_BITS; - *paa++ = p; - p = p->ary[(id >> n) & IDR_MASK]; - } - - bt_mask = id; - id += 1 << n; - /* Get the highest bit that the above add changed from 0->1. */ - while (n < fls(id ^ bt_mask)) { - if (p) - free_layer(idp, p); - n += IDR_BITS; - p = *--paa; - } - } - idp->layers = 0; -} -EXPORT_SYMBOL(__idr_remove_all); - /** * idr_destroy - release all cached layers within an idr tree * @idp: idr handle @@ -656,293 +302,28 @@ EXPORT_SYMBOL(__idr_remove_all); * idr_for_each() to free all objects, if necessay, then idr_destroy() to * free up the id mappings and cached idr_layers. */ -void idr_destroy(struct idr *idp) -{ - __idr_remove_all(idp); - - while (idp->id_free_cnt) { - struct idr_layer *p = get_from_free_list(idp); - kmem_cache_free(idr_layer_cache, p); - } -} -EXPORT_SYMBOL(idr_destroy); - -void *idr_find_slowpath(struct idr *idp, int id) -{ - int n; - struct idr_layer *p; - - if (id < 0) - return NULL; - - p = rcu_dereference_raw(idp->top); - if (!p) - return NULL; - n = (p->layer+1) * IDR_BITS; - - if (id > idr_max(p->layer + 1)) - return NULL; - BUG_ON(n == 0); - - while (n > 0 && p) { - n -= IDR_BITS; - BUG_ON(n != p->layer*IDR_BITS); - p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); - } - return((void *)p); -} -EXPORT_SYMBOL(idr_find_slowpath); - -/** - * idr_for_each - iterate through all stored pointers - * @idp: idr handle - * @fn: function to be called for each pointer - * @data: data passed back to callback function - * - * Iterate over the pointers registered with the given idr. The - * callback function will be called for each pointer currently - * registered, passing the id, the pointer and the data pointer passed - * to this function. It is not safe to modify the idr tree while in - * the callback, so functions such as idr_get_new and idr_remove are - * not allowed. - * - * We check the return of @fn each time. If it returns anything other - * than %0, we break out and return that value. - * - * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). - */ -int idr_for_each(struct idr *idp, - int (*fn)(int id, void *p, void *data), void *data) -{ - int n, id, max, error = 0; - struct idr_layer *p; - struct idr_layer *pa[MAX_IDR_LEVEL + 1]; - struct idr_layer **paa = &pa[0]; - - n = idp->layers * IDR_BITS; - p = rcu_dereference_raw(idp->top); - max = idr_max(idp->layers); - - id = 0; - while (id >= 0 && id <= max) { - while (n > 0 && p) { - n -= IDR_BITS; - *paa++ = p; - p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); - } - - if (p) { - error = fn(id, (void *)p, data); - if (error) - break; - } - - id += 1 << n; - while (n < fls(id)) { - n += IDR_BITS; - p = *--paa; - } - } - - return error; -} -EXPORT_SYMBOL(idr_for_each); - -/** - * idr_find_next - lookup next object of id to given id. - * @idp: idr handle - * @nextidp: pointer to lookup key - * - * Returns pointer to registered object with id, which is next number to - * given id. After being looked up, *@nextidp will be updated for the next - * iteration. - * - * This function can be called under rcu_read_lock(), given that the leaf - * pointers lifetimes are correctly managed. - */ -void *idr_find_next(struct idr *idp, int *nextidp) +void idr_destroy(struct idr *idr) { - struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; - struct idr_layer **paa = &pa[0]; - int id = *nextidp; - int n, max; - - /* find first ent */ - p = rcu_dereference_raw(idp->top); - if (!p) - return NULL; - n = (p->layer + 1) * IDR_BITS; - max = idr_max(p->layer + 1); - - while (id >= 0 && id <= max) { - while (n > 0 && p) { - n -= IDR_BITS; - *paa++ = p; - p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); - } - - if (p) { - *nextidp = id; - return p; - } - - /* - * Proceed to the next layer at the current level. Unlike - * idr_for_each(), @id isn't guaranteed to be aligned to - * layer boundary at this point and adding 1 << n may - * incorrectly skip IDs. Make sure we jump to the - * beginning of the next layer using round_up(). - */ - id = round_up(id + 1, 1 << n); - while (n < fls(id)) { - n += IDR_BITS; - p = *--paa; - } - } - return NULL; -} -EXPORT_SYMBOL(idr_find_next); - - -/** - * idr_replace - replace pointer for given id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @id: lookup key - * - * Replace the pointer registered with an id and return the old value. - * A %-ENOENT return indicates that @id was not found. - * A %-EINVAL return indicates that @id was not within valid constraints. - * - * The caller must serialize with writers. - */ -void *idr_replace(struct idr *idp, void *ptr, int id) -{ - int n; - struct idr_layer *p, *old_p; - - if (id < 0) - return ERR_PTR(-EINVAL); - - p = idp->top; - if (!p) - return ERR_PTR(-EINVAL); - - n = (p->layer+1) * IDR_BITS; - - if (id >= (1 << n)) - return ERR_PTR(-EINVAL); - - n -= IDR_BITS; - while ((n > 0) && p) { - p = p->ary[(id >> n) & IDR_MASK]; - n -= IDR_BITS; - } - - n = id & IDR_MASK; - if (unlikely(p == NULL || !test_bit(n, p->bitmap))) - return ERR_PTR(-ENOENT); - - old_p = p->ary[n]; - rcu_assign_pointer(p->ary[n], ptr); + void *p; + unsigned id; - return old_p; -} -EXPORT_SYMBOL(idr_replace); + idr_for_each_entry(idr, p, id) + idr_remove(idr, id); -void __init idr_init_cache(void) -{ - idr_layer_cache = kmem_cache_create("idr_layer_cache", - sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); + ida_destroy(&idr->ida); } +EXPORT_SYMBOL(idr_destroy); /** - * idr_init - initialize idr handle + * idr_init - initialize sparse idr handle * @idp: idr handle * * This function is use to set up the handle (@idp) that you will pass * to the rest of the functions. */ -void idr_init(struct idr *idp) +void idr_init(struct idr *idr) { - memset(idp, 0, sizeof(struct idr)); - spin_lock_init(&idp->lock); + ida_init(&idr->ida); + INIT_RADIX_TREE(&idr->ptrs, GFP_NOWAIT); } EXPORT_SYMBOL(idr_init); - -/* IDA */ - - - -/** - * DOC: IDA description - * IDA - IDR based ID allocator - * - * This is id allocator without id -> pointer translation. Memory - * usage is much lower than full blown idr because each id only - * occupies a bit. ida uses a custom leaf node which contains - * IDA_BITMAP_BITS slots. - * - * 2007-04-25 written by Tejun Heo <htejun@gmail.com> - */ - -/** - * ida_destroy - release all cached layers within an ida tree - * @ida: ida handle - */ -void ida_destroy(struct ida *ida) -{ - bitmap_tree_destroy(&ida->map); -} -EXPORT_SYMBOL(ida_destroy); - -/** - * ida_get_range - get a new id. - * @ida: the (initialized) ida. - * @start: the minimum id (inclusive, < 0x8000000) - * @end: the maximum id (exclusive, < 0x8000000 or 0) - * @gfp_mask: memory allocation flags - * - * Allocates an id in the range start <= id < end, or returns -ENOSPC. - * On memory allocation failure, returns -ENOMEM. - * - * Use ida_remove() to get rid of an id. - */ -int ida_get_range(struct ida *ida, unsigned int start, - unsigned int end, gfp_t gfp) -{ - unsigned id; - int ret = bitmap_tree_find_set_bits_from(&ida->map, &id, 1, - start, end ?: INT_MAX, gfp); - if (ret < 0) - return ret; - - return id; -} -EXPORT_SYMBOL(ida_get_range); - -/** - * ida_remove - remove an allocated id. - * @ida: the (initialized) ida. - * @id: the id returned by ida_get_range. - */ -void ida_remove(struct ida *ida, unsigned int id) -{ - BUG_ON(id > INT_MAX); - bitmap_tree_clear_bit(&ida->map, id); -} -EXPORT_SYMBOL(ida_remove); - -/** - * ida_init - initialize ida handle - * @ida: ida handle - * - * This function is use to set up the handle (@ida) that you will pass - * to the rest of the functions. - */ -void ida_init(struct ida *ida) -{ - bitmap_tree_init(&ida->map, 0); - -} -EXPORT_SYMBOL(ida_init); |