/* * include/linux/idr.h * * 2002-10-18 written by Jim Houston jim.houston@ccur.com * Copyright (C) 2002 by Concurrent Computer Corporation * Distributed under the GNU GPL license version 2. * * Small id to pointer translation service avoiding fixed sized * tables. */ #ifndef __IDR_H__ #define __IDR_H__ #include #include #include #include #include #include /* IDA */ #define IDA_INLINE_NODES 4 struct ida { spinlock_t lock; /* * cur_id and allocated_ids are for ida_alloc_cyclic. For cyclic * allocations we search for new ids to allocate starting from the last * id allocated - cur_id is the next id to try allocating. * * But we also don't want the allocated ids to be arbitrarily sparse - * the memory usage for the bitmap could be arbitrarily bad, and if * they're used as keys in a radix tree the memory overhead of the radix * tree could be quite bad as well. So we use allocated_ids to decide * when to restart cur_id from 0, and bound how sparse the bitmap can * be. */ unsigned cur_id; unsigned allocated_ids; /* size of ida->tree */ unsigned nodes; /* * Index of first leaf node in ida->tree; equal to the number of non * leaf nodes, ida->nodes - ida->first_leaf == number of leaf nodes */ unsigned first_leaf; unsigned long *tree; unsigned long inline_nodes[IDA_INLINE_NODES]; }; #define IDA_INIT(name) \ { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ .nodes = IDA_INLINE_NODES, \ .first_leaf = 1, \ .tree = name.inline_nodes, \ } #define DEFINE_IDA(name) struct ida name = IDA_INIT(name) void ida_remove(struct ida *ida, unsigned id); int ida_alloc_range(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp); int ida_alloc_cyclic(struct ida *ida, unsigned start, unsigned end, gfp_t gfp); void ida_destroy(struct ida *ida); int ida_init_prealloc(struct ida *ida, unsigned prealloc); /** * ida_alloc_range - allocate a new id. * @ida: the (initialized) ida. * @gfp_mask: memory allocation flags * * Allocates an id in the range [0, INT_MAX]. Returns -ENOSPC if no ids are * available, or -ENOMEM on memory allocation failure. * * Returns the smallest available id * * Use ida_remove() to get rid of an id. */ static inline int ida_alloc(struct ida *ida, gfp_t gfp_mask) { return ida_alloc_range(ida, 0, 0, gfp_mask); } /** * ida_init - initialize ida handle * @ida: ida handle * * This function is use to set up the handle (@ida) that you will pass * to the rest of the functions. */ static inline void ida_init(struct ida *ida) { ida_init_prealloc(ida, 0); } /* Percpu IDA/tag allocator */ struct percpu_ida_cpu; struct percpu_ida { /* * number of tags available to be allocated, as passed to * percpu_ida_init() */ unsigned nr_tags; struct percpu_ida_cpu __percpu *tag_cpu; /* * Bitmap of cpus that (may) have tags on their percpu freelists: * steal_tags() uses this to decide when to steal tags, and which cpus * to try stealing from. * * It's ok for a freelist to be empty when its bit is set - steal_tags() * will just keep looking - but the bitmap _must_ be set whenever a * percpu freelist does have tags. */ unsigned long *cpus_have_tags; struct { /* * When we go to steal tags from another cpu (see steal_tags()), * we want to pick a cpu at random. Cycling through them every * time we steal is a bit easier and more or less equivalent: */ unsigned cpu_last_stolen; /* For sleeping on allocation failure */ wait_queue_head_t wait; /* Global freelist */ struct ida ida; } ____cacheline_aligned_in_smp; }; int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp); void percpu_ida_free(struct percpu_ida *pool, unsigned tag); void percpu_ida_destroy(struct percpu_ida *pool); int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags); /* IDR */ /* * We want shallower trees and thus more bits covered at each layer. 8 * bits gives us large enough first layer for most use cases and maximum * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and * 1k on 32bit. */ #define IDR_BITS 8 #define IDR_SIZE (1 << IDR_BITS) #define IDR_MASK ((1 << IDR_BITS)-1) struct idr_layer { int prefix; /* the ID prefix of this idr_layer */ DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */ struct idr_layer __rcu *ary[1<hint); if (hint && (id & ~IDR_MASK) == hint->prefix) return rcu_dereference_raw(hint->ary[id & IDR_MASK]); return idr_find_slowpath(idr, id); } /** * idr_for_each_entry - iterate over an idr's elements of a given type * @idp: idr handle * @entry: the type * to use as cursor * @id: id entry's key * * @entry and @id do not need to be initialized before the loop, and * after normal terminatinon @entry is left with the value NULL. This * is convenient for a "not found" value. */ #define idr_for_each_entry(idp, entry, id) \ for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) /* * Don't use the following functions. These exist only to suppress * deprecated warnings on EXPORT_SYMBOL()s. */ int __idr_pre_get(struct idr *idp, gfp_t gfp_mask); int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); void __idr_remove_all(struct idr *idp); /** * idr_pre_get - reserve resources for idr allocation * @idp: idr handle * @gfp_mask: memory allocation flags * * Part of old alloc interface. This is going away. Use * idr_preload[_end]() and idr_alloc() instead. */ static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask) { return __idr_pre_get(idp, gfp_mask); } /** * idr_get_new_above - allocate new idr entry above or equal to a start id * @idp: idr handle * @ptr: pointer you want associated with the id * @starting_id: id to start search at * @id: pointer to the allocated handle * * Part of old alloc interface. This is going away. Use * idr_preload[_end]() and idr_alloc() instead. */ static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) { return __idr_get_new_above(idp, ptr, starting_id, id); } /** * idr_get_new - allocate new idr entry * @idp: idr handle * @ptr: pointer you want associated with the id * @id: pointer to the allocated handle * * Part of old alloc interface. This is going away. Use * idr_preload[_end]() and idr_alloc() instead. */ static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id) { return __idr_get_new_above(idp, ptr, 0, id); } /** * idr_remove_all - remove all ids from the given idr tree * @idp: idr handle * * If you're trying to destroy @idp, calling idr_destroy() is enough. * This is going away. Don't use. */ static inline void __deprecated idr_remove_all(struct idr *idp) { __idr_remove_all(idp); } void __init idr_init_cache(void); #endif /* __IDR_H__ */