2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* include/linux/idr.h
|
|
|
|
*
|
|
|
|
* 2002-10-18 written by Jim Houston jim.houston@ccur.com
|
|
|
|
* Copyright (C) 2002 by Concurrent Computer Corporation
|
|
|
|
* Distributed under the GNU GPL license version 2.
|
|
|
|
*
|
|
|
|
* Small id to pointer translation service avoiding fixed sized
|
|
|
|
* tables.
|
|
|
|
*/
|
2005-11-08 19:14:08 +03:00
|
|
|
|
|
|
|
#ifndef __IDR_H__
|
|
|
|
#define __IDR_H__
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/bitops.h>
|
2008-04-29 12:03:13 +04:00
|
|
|
#include <linux/init.h>
|
2008-07-25 12:47:57 +04:00
|
|
|
#include <linux/rcupdate.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-02-28 05:05:06 +04:00
|
|
|
/*
|
2016-12-15 02:09:22 +03:00
|
|
|
* Using 6 bits at each layer allows us to allocate 7 layers out of each page.
|
|
|
|
* 8 bits only gave us 3 layers out of every pair of pages, which is less
|
|
|
|
* efficient except for trees with a largest element between 192-255 inclusive.
|
2013-02-28 05:05:06 +04:00
|
|
|
*/
|
2016-12-15 02:09:22 +03:00
|
|
|
#define IDR_BITS 6
|
2005-04-17 02:20:36 +04:00
|
|
|
#define IDR_SIZE (1 << IDR_BITS)
|
|
|
|
#define IDR_MASK ((1 << IDR_BITS)-1)
|
|
|
|
|
|
|
|
struct idr_layer {
|
2013-02-28 05:05:07 +04:00
|
|
|
int prefix; /* the ID prefix of this idr_layer */
|
2014-06-07 01:37:15 +04:00
|
|
|
int layer; /* distance from leaf */
|
2010-02-26 16:53:26 +03:00
|
|
|
struct idr_layer __rcu *ary[1<<IDR_BITS];
|
2013-02-28 05:03:51 +04:00
|
|
|
int count; /* When zero, we can release it */
|
2014-06-07 01:37:15 +04:00
|
|
|
union {
|
|
|
|
/* A zero bit means "space here" */
|
|
|
|
DECLARE_BITMAP(bitmap, IDR_SIZE);
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
};
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct idr {
|
2013-02-28 05:05:08 +04:00
|
|
|
struct idr_layer __rcu *hint; /* the last layer allocated from */
|
2013-02-28 05:03:51 +04:00
|
|
|
struct idr_layer __rcu *top;
|
|
|
|
int layers; /* only valid w/o concurrent changes */
|
2013-04-30 03:21:16 +04:00
|
|
|
int cur; /* current pos for cyclic allocation */
|
2013-02-28 05:03:51 +04:00
|
|
|
spinlock_t lock;
|
2014-06-07 01:37:15 +04:00
|
|
|
int id_free_cnt;
|
|
|
|
struct idr_layer *id_free;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2013-02-28 05:03:51 +04:00
|
|
|
#define IDR_INIT(name) \
|
|
|
|
{ \
|
|
|
|
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
|
|
|
|
2016-12-15 02:09:19 +03:00
|
|
|
/**
|
|
|
|
* idr_get_cursor - Return the current position of the cyclic allocator
|
|
|
|
* @idr: idr handle
|
|
|
|
*
|
|
|
|
* The value returned is the value that will be next returned from
|
|
|
|
* idr_alloc_cyclic() if it is free (otherwise the search will start from
|
|
|
|
* this position).
|
|
|
|
*/
|
|
|
|
static inline unsigned int idr_get_cursor(struct idr *idr)
|
|
|
|
{
|
|
|
|
return READ_ONCE(idr->cur);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* idr_set_cursor - Set the current position of the cyclic allocator
|
|
|
|
* @idr: idr handle
|
|
|
|
* @val: new position
|
|
|
|
*
|
|
|
|
* The next call to idr_alloc_cyclic() will return @val if it is free
|
|
|
|
* (otherwise the search will start from this position).
|
|
|
|
*/
|
|
|
|
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
|
|
|
|
{
|
|
|
|
WRITE_ONCE(idr->cur, val);
|
|
|
|
}
|
|
|
|
|
2008-07-25 12:48:01 +04:00
|
|
|
/**
|
2010-10-27 01:19:08 +04:00
|
|
|
* DOC: idr sync
|
2008-07-25 12:48:01 +04:00
|
|
|
* idr synchronization (stolen from radix-tree.h)
|
|
|
|
*
|
|
|
|
* idr_find() is able to be called locklessly, using RCU. The caller must
|
|
|
|
* ensure calls to this function are made within rcu_read_lock() regions.
|
|
|
|
* Other readers (lock-free or otherwise) and modifications may be running
|
|
|
|
* concurrently.
|
|
|
|
*
|
|
|
|
* It is still required that the caller manage the synchronization and
|
|
|
|
* lifetimes of the items. So if RCU lock-free lookups are used, typically
|
|
|
|
* this would mean that the items have their own locks, or are amenable to
|
|
|
|
* lock-free access; and that the items are freed by RCU (or only freed after
|
|
|
|
* having been deleted from the idr tree *and* a synchronize_rcu() grace
|
|
|
|
* period).
|
|
|
|
*/
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* This is what we export.
|
|
|
|
*/
|
|
|
|
|
2013-02-28 05:05:08 +04:00
|
|
|
void *idr_find_slowpath(struct idr *idp, int id);
|
idr: implement idr_preload[_end]() and idr_alloc()
The current idr interface is very cumbersome.
* For all allocations, two function calls - idr_pre_get() and
idr_get_new*() - should be made.
* idr_pre_get() doesn't guarantee that the following idr_get_new*()
will not fail from memory shortage. If idr_get_new*() returns
-EAGAIN, the caller is expected to retry pre_get and allocation.
* idr_get_new*() can't enforce upper limit. Upper limit can only be
enforced by allocating and then freeing if above limit.
* idr_layer buffer is unnecessarily per-idr. Each idr ends up keeping
around MAX_IDR_FREE idr_layers. The memory consumed per idr is
under two pages but it makes it difficult to make idr_layer larger.
This patch implements the following new set of allocation functions.
* idr_preload[_end]() - Similar to radix preload but doesn't fail.
The first idr_alloc() inside preload section can be treated as if it
were called with @gfp_mask used for idr_preload().
* idr_alloc() - Allocate an ID w/ lower and upper limits. Takes
@gfp_flags and can be used w/o preloading. When used inside
preloaded section, the allocation mask of preloading can be assumed.
If idr_alloc() can be called from a context which allows sufficiently
relaxed @gfp_mask, it can be used by itself. If, for example,
idr_alloc() is called inside spinlock protected region, preloading can
be used like the following.
idr_preload(GFP_KERNEL);
spin_lock(lock);
id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
spin_unlock(lock);
idr_preload_end();
if (id < 0)
error;
which is much simpler and less error-prone than idr_pre_get and
idr_get_new*() loop.
The new interface uses per-pcu idr_layer buffer and thus the number of
idr's in the system doesn't affect the amount of memory used for
preloading.
idr_layer_alloc() is introduced to handle idr_layer allocations for
both old and new ID allocation paths. This is a bit hairy now but the
new interface is expected to replace the old and the internal
implementation eventually will become simpler.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:03:55 +04:00
|
|
|
void idr_preload(gfp_t gfp_mask);
|
|
|
|
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
|
2013-04-30 03:21:16 +04:00
|
|
|
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
|
2007-07-16 10:37:24 +04:00
|
|
|
int idr_for_each(struct idr *idp,
|
|
|
|
int (*fn)(int id, void *p, void *data), void *data);
|
cgroup: CSS ID support
Patch for Per-CSS(Cgroup Subsys State) ID and private hierarchy code.
This patch attaches unique ID to each css and provides following.
- css_lookup(subsys, id)
returns pointer to struct cgroup_subysys_state of id.
- css_get_next(subsys, id, rootid, depth, foundid)
returns the next css under "root" by scanning
When cgroup_subsys->use_id is set, an id for css is maintained.
The cgroup framework only parepares
- css_id of root css for subsys
- id is automatically attached at creation of css.
- id is *not* freed automatically. Because the cgroup framework
don't know lifetime of cgroup_subsys_state.
free_css_id() function is provided. This must be called by subsys.
There are several reasons to develop this.
- Saving space .... For example, memcg's swap_cgroup is array of
pointers to cgroup. But it is not necessary to be very fast.
By replacing pointers(8bytes per ent) to ID (2byes per ent), we can
reduce much amount of memory usage.
- Scanning without lock.
CSS_ID provides "scan id under this ROOT" function. By this, scanning
css under root can be written without locks.
ex)
do {
rcu_read_lock();
next = cgroup_get_next(subsys, id, root, &found);
/* check sanity of next here */
css_tryget();
rcu_read_unlock();
id = found + 1
} while(...)
Characteristics:
- Each css has unique ID under subsys.
- Lifetime of ID is controlled by subsys.
- css ID contains "ID" and "Depth in hierarchy" and stack of hierarchy
- Allowed ID is 1-65535, ID 0 is UNUSED ID.
Design Choices:
- scan-by-ID v.s. scan-by-tree-walk.
As /proc's pid scan does, scan-by-ID is robust when scanning is done
by following kind of routine.
scan -> rest a while(release a lock) -> conitunue from interrupted
memcg's hierarchical reclaim does this.
- When subsys->use_id is set, # of css in the system is limited to
65535.
[bharata@linux.vnet.ibm.com: remove rcu_read_lock() from css_get_next()]
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Paul Menage <menage@google.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-04-03 03:57:25 +04:00
|
|
|
void *idr_get_next(struct idr *idp, int *nextid);
|
2006-06-26 11:27:19 +04:00
|
|
|
void *idr_replace(struct idr *idp, void *ptr, int id);
|
2005-04-17 02:20:36 +04:00
|
|
|
void idr_remove(struct idr *idp, int id);
|
2005-10-23 23:57:18 +04:00
|
|
|
void idr_destroy(struct idr *idp);
|
2005-04-17 02:20:36 +04:00
|
|
|
void idr_init(struct idr *idp);
|
2011-08-09 01:36:56 +04:00
|
|
|
bool idr_is_empty(struct idr *idp);
|
2005-11-08 19:14:08 +03:00
|
|
|
|
idr: implement idr_preload[_end]() and idr_alloc()
The current idr interface is very cumbersome.
* For all allocations, two function calls - idr_pre_get() and
idr_get_new*() - should be made.
* idr_pre_get() doesn't guarantee that the following idr_get_new*()
will not fail from memory shortage. If idr_get_new*() returns
-EAGAIN, the caller is expected to retry pre_get and allocation.
* idr_get_new*() can't enforce upper limit. Upper limit can only be
enforced by allocating and then freeing if above limit.
* idr_layer buffer is unnecessarily per-idr. Each idr ends up keeping
around MAX_IDR_FREE idr_layers. The memory consumed per idr is
under two pages but it makes it difficult to make idr_layer larger.
This patch implements the following new set of allocation functions.
* idr_preload[_end]() - Similar to radix preload but doesn't fail.
The first idr_alloc() inside preload section can be treated as if it
were called with @gfp_mask used for idr_preload().
* idr_alloc() - Allocate an ID w/ lower and upper limits. Takes
@gfp_flags and can be used w/o preloading. When used inside
preloaded section, the allocation mask of preloading can be assumed.
If idr_alloc() can be called from a context which allows sufficiently
relaxed @gfp_mask, it can be used by itself. If, for example,
idr_alloc() is called inside spinlock protected region, preloading can
be used like the following.
idr_preload(GFP_KERNEL);
spin_lock(lock);
id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
spin_unlock(lock);
idr_preload_end();
if (id < 0)
error;
which is much simpler and less error-prone than idr_pre_get and
idr_get_new*() loop.
The new interface uses per-pcu idr_layer buffer and thus the number of
idr's in the system doesn't affect the amount of memory used for
preloading.
idr_layer_alloc() is introduced to handle idr_layer allocations for
both old and new ID allocation paths. This is a bit hairy now but the
new interface is expected to replace the old and the internal
implementation eventually will become simpler.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 05:03:55 +04:00
|
|
|
/**
|
|
|
|
* idr_preload_end - end preload section started with idr_preload()
|
|
|
|
*
|
|
|
|
* Each idr_preload() should be matched with an invocation of this
|
|
|
|
* function. See idr_preload() for details.
|
|
|
|
*/
|
|
|
|
static inline void idr_preload_end(void)
|
|
|
|
{
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
2013-02-28 05:05:08 +04:00
|
|
|
/**
|
|
|
|
* idr_find - return pointer for given id
|
2013-03-05 02:32:54 +04:00
|
|
|
* @idr: idr handle
|
2013-02-28 05:05:08 +04:00
|
|
|
* @id: lookup key
|
|
|
|
*
|
|
|
|
* Return the pointer given the id it has been registered with. A %NULL
|
|
|
|
* return indicates that @id is not valid or you passed %NULL in
|
|
|
|
* idr_get_new().
|
|
|
|
*
|
|
|
|
* This function can be called under rcu_read_lock(), given that the leaf
|
|
|
|
* pointers lifetimes are correctly managed.
|
|
|
|
*/
|
|
|
|
static inline void *idr_find(struct idr *idr, int id)
|
|
|
|
{
|
|
|
|
struct idr_layer *hint = rcu_dereference_raw(idr->hint);
|
|
|
|
|
|
|
|
if (hint && (id & ~IDR_MASK) == hint->prefix)
|
|
|
|
return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
|
|
|
|
|
|
|
|
return idr_find_slowpath(idr, id);
|
|
|
|
}
|
|
|
|
|
2013-02-28 05:03:52 +04:00
|
|
|
/**
|
|
|
|
* idr_for_each_entry - iterate over an idr's elements of a given type
|
|
|
|
* @idp: idr handle
|
|
|
|
* @entry: the type * to use as cursor
|
|
|
|
* @id: id entry's key
|
2013-03-27 17:08:33 +04:00
|
|
|
*
|
|
|
|
* @entry and @id do not need to be initialized before the loop, and
|
|
|
|
* after normal terminatinon @entry is left with the value NULL. This
|
|
|
|
* is convenient for a "not found" value.
|
2013-02-28 05:03:52 +04:00
|
|
|
*/
|
2013-03-27 17:08:33 +04:00
|
|
|
#define idr_for_each_entry(idp, entry, id) \
|
|
|
|
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
|
2013-02-28 05:03:52 +04:00
|
|
|
|
2014-08-28 15:31:14 +04:00
|
|
|
/**
|
|
|
|
* idr_for_each_entry - continue iteration over an idr's elements of a given type
|
|
|
|
* @idp: idr handle
|
|
|
|
* @entry: the type * to use as cursor
|
|
|
|
* @id: id entry's key
|
|
|
|
*
|
|
|
|
* Continue to iterate over list of given type, continuing after
|
|
|
|
* the current position.
|
|
|
|
*/
|
|
|
|
#define idr_for_each_entry_continue(idp, entry, id) \
|
|
|
|
for ((entry) = idr_get_next((idp), &(id)); \
|
|
|
|
entry; \
|
|
|
|
++id, (entry) = idr_get_next((idp), &(id)))
|
|
|
|
|
2007-06-13 22:45:13 +04:00
|
|
|
/*
|
|
|
|
* IDA - IDR based id allocator, use when translation from id to
|
|
|
|
* pointer isn't necessary.
|
2010-09-15 20:30:19 +04:00
|
|
|
*
|
|
|
|
* IDA_BITMAP_LONGS is calculated to be one less to accommodate
|
|
|
|
* ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
|
2007-06-13 22:45:13 +04:00
|
|
|
*/
|
|
|
|
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
|
2010-09-15 20:30:19 +04:00
|
|
|
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
|
|
|
|
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
|
2007-06-13 22:45:13 +04:00
|
|
|
|
|
|
|
struct ida_bitmap {
|
|
|
|
long nr_busy;
|
|
|
|
unsigned long bitmap[IDA_BITMAP_LONGS];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ida {
|
|
|
|
struct idr idr;
|
|
|
|
struct ida_bitmap *free_bitmap;
|
|
|
|
};
|
|
|
|
|
2011-07-17 23:25:03 +04:00
|
|
|
#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
|
2007-06-13 22:45:13 +04:00
|
|
|
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
|
|
|
|
|
|
|
|
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
|
|
|
|
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
|
|
|
|
void ida_remove(struct ida *ida, int id);
|
|
|
|
void ida_destroy(struct ida *ida);
|
|
|
|
void ida_init(struct ida *ida);
|
|
|
|
|
2011-08-04 03:21:06 +04:00
|
|
|
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
|
|
|
|
gfp_t gfp_mask);
|
|
|
|
void ida_simple_remove(struct ida *ida, unsigned int id);
|
|
|
|
|
2011-07-20 16:59:37 +04:00
|
|
|
/**
|
2013-02-28 05:03:52 +04:00
|
|
|
* ida_get_new - allocate new ID
|
|
|
|
* @ida: idr handle
|
|
|
|
* @p_id: pointer to the allocated handle
|
|
|
|
*
|
|
|
|
* Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
|
2011-07-20 16:59:37 +04:00
|
|
|
*/
|
2013-02-28 05:03:52 +04:00
|
|
|
static inline int ida_get_new(struct ida *ida, int *p_id)
|
|
|
|
{
|
|
|
|
return ida_get_new_above(ida, 0, p_id);
|
|
|
|
}
|
|
|
|
|
2016-12-15 02:09:13 +03:00
|
|
|
static inline bool ida_is_empty(struct ida *ida)
|
|
|
|
{
|
|
|
|
return idr_is_empty(&ida->idr);
|
|
|
|
}
|
|
|
|
|
2013-02-28 05:03:52 +04:00
|
|
|
void __init idr_init_cache(void);
|
2011-07-20 16:59:37 +04:00
|
|
|
|
2005-11-08 19:14:08 +03:00
|
|
|
#endif /* __IDR_H__ */
|