summaryrefslogtreecommitdiffstats
path: root/lib/idr.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-10-14 22:41:27 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-10-14 22:41:27 -0700
commitc362495586e8a3a6487a318fcd82eaf15ffe2142 (patch)
tree86f7b195d36ba198f24f86be327f21a8d24ec248 /lib/idr.c
parentb70936d9ffbf0f45f4fa13a03122f015f13ecdb0 (diff)
parentddffeb8c4d0331609ef2581d84de4d763607bd37 (diff)
Merge 3.7-rc1 into tty-linus
This syncs up the tty-linus branch to the latest in Linus's tree to get all of the UAPI stuff needed for the next set of patches to merge. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib/idr.c')
-rw-r--r--lib/idr.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 4046e29c0a9..648239079dd 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -20,7 +20,7 @@
* that id to this code and it returns your pointer.
* You can release ids at any time. When all ids are released, most of
- * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
+ * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
* don't need to go to the memory "store" during an id allocate, just
* so you don't need to be too concerned about locking and conflicts
* with the slab allocator.
@@ -122,7 +122,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
*/
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
- while (idp->id_free_cnt < IDR_FREE_MAX) {
+ while (idp->id_free_cnt < MAX_IDR_FREE) {
struct idr_layer *new;
new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
if (new == NULL)
@@ -179,7 +179,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
sh = IDR_BITS*l;
id = ((id >> sh) ^ n ^ m) << sh;
}
- if ((id >= MAX_ID_BIT) || (id < 0))
+ if ((id >= MAX_IDR_BIT) || (id < 0))
return IDR_NOMORE_SPACE;
if (l == 0)
break;
@@ -223,7 +223,7 @@ build_up:
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
- while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
+ while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
layers++;
if (!p->count) {
/* special case: if the tree is currently empty,
@@ -265,7 +265,7 @@ build_up:
static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
{
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
int id;
id = idr_get_empty_slot(idp, starting_id, pa);
@@ -357,7 +357,7 @@ static void idr_remove_warning(int id)
static void sub_remove(struct idr *idp, int shift, int id)
{
struct idr_layer *p = idp->top;
- struct idr_layer **pa[MAX_LEVEL];
+ struct idr_layer **pa[MAX_IDR_LEVEL];
struct idr_layer ***paa = &pa[0];
struct idr_layer *to_free;
int n;
@@ -402,7 +402,7 @@ void idr_remove(struct idr *idp, int id)
struct idr_layer *to_free;
/* Mask off upper bits we don't use for the search. */
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
@@ -420,7 +420,7 @@ void idr_remove(struct idr *idp, int id)
to_free->bitmap = to_free->count = 0;
free_layer(to_free);
}
- while (idp->id_free_cnt >= IDR_FREE_MAX) {
+ while (idp->id_free_cnt >= MAX_IDR_FREE) {
p = get_from_free_list(idp);
/*
* Note: we don't call the rcu callback here, since the only
@@ -451,7 +451,7 @@ void idr_remove_all(struct idr *idp)
int n, id, max;
int bt_mask;
struct idr_layer *p;
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
@@ -517,7 +517,7 @@ void *idr_find(struct idr *idp, int id)
n = (p->layer+1) * IDR_BITS;
/* Mask off upper bits we don't use for the search. */
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
if (id >= (1 << n))
return NULL;
@@ -555,7 +555,7 @@ int idr_for_each(struct idr *idp,
{
int n, id, max, error = 0;
struct idr_layer *p;
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(idr_for_each);
*/
void *idr_get_next(struct idr *idp, int *nextidp)
{
- struct idr_layer *p, *pa[MAX_LEVEL];
+ struct idr_layer *p, *pa[MAX_IDR_LEVEL];
struct idr_layer **paa = &pa[0];
int id = *nextidp;
int n, max;
@@ -659,7 +659,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
n = (p->layer+1) * IDR_BITS;
- id &= MAX_ID_MASK;
+ id &= MAX_IDR_MASK;
if (id >= (1 << n))
return ERR_PTR(-EINVAL);
@@ -780,7 +780,7 @@ EXPORT_SYMBOL(ida_pre_get);
*/
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
- struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer *pa[MAX_IDR_LEVEL];
struct ida_bitmap *bitmap;
unsigned long flags;
int idr_id = starting_id / IDA_BITMAP_BITS;
@@ -793,7 +793,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
if (t < 0)
return _idr_rc_to_errno(t);
- if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
+ if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
return -ENOSPC;
if (t != idr_id)
@@ -827,7 +827,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
}
id = idr_id * IDA_BITMAP_BITS + t;
- if (id >= MAX_ID_BIT)
+ if (id >= MAX_IDR_BIT)
return -ENOSPC;
__set_bit(t, bitmap->bitmap);