diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-12-10 15:19:18 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-12-10 15:19:18 +0100 |
commit | 2ade0c1d9d93b7642212657ef76f4a1e30233711 (patch) | |
tree | 63bc720c0ffe5f4760cac4ed617b9870b050175e /lib | |
parent | 504499f22c08a03e2e19dc88d31aa0ecd2ac815e (diff) | |
parent | 6313e3c21743cc88bb5bd8aa72948ee1e83937b6 (diff) |
Merge branch 'master' into upstream
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 32 | ||||
-rw-r--r-- | lib/bitmap.c | 3 | ||||
-rw-r--r-- | lib/debug_locks.c | 2 | ||||
-rw-r--r-- | lib/div64.c | 52 | ||||
-rw-r--r-- | lib/idr.c | 65 | ||||
-rw-r--r-- | lib/list_sort.c | 172 | ||||
-rw-r--r-- | lib/parser.c | 7 | ||||
-rw-r--r-- | lib/percpu_counter.c | 55 | ||||
-rw-r--r-- | lib/radix-tree.c | 83 | ||||
-rw-r--r-- | lib/vsprintf.c | 19 |
10 files changed, 360 insertions, 130 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 69a32664c28..28b42b9274d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -317,6 +317,14 @@ config DEBUG_OBJECTS_RCU_HEAD help Enable this to turn on debugging of RCU list heads (call_rcu() usage). +config DEBUG_OBJECTS_PERCPU_COUNTER + bool "Debug percpu counter objects" + depends on DEBUG_OBJECTS + help + If you say Y here, additional code will be inserted into the + percpu counter routines to track the life time of percpu counter + objects and validate the percpu counter operations. + config DEBUG_OBJECTS_ENABLE_DEFAULT int "debug_objects bootup default value (0-1)" range 0 1 @@ -366,7 +374,7 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ - (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) + (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT @@ -740,6 +748,15 @@ config DEBUG_LIST If unsure, say N. +config TEST_LIST_SORT + bool "Linked list sorting test" + depends on DEBUG_KERNEL + help + Enable this to turn on 'list_sort()' function test. This test is + executed only once during system boot, so affects only boot time. + + If unsure, say N. + config DEBUG_SG bool "Debug SG table operations" depends on DEBUG_KERNEL @@ -1200,6 +1217,19 @@ config ATOMIC64_SELFTEST If unsure, say N. +config ASYNC_RAID6_TEST + tristate "Self test for hardware accelerated raid6 recovery" + depends on ASYNC_RAID6_RECOV + select ASYNC_MEMCPY + ---help--- + This is a one-shot self test that permutes through the + recovery of all the possible two disk failure scenarios for a + N-disk array. Recovery is performed with the asynchronous + raid6 recovery routines, and will optionally use an offload + engine if one is available. + + If unsure, say N. + source "samples/Kconfig" source "lib/Kconfig.kgdb" diff --git a/lib/bitmap.c b/lib/bitmap.c index ffb78c916cc..741fae905ae 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -359,7 +359,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area); #define CHUNKSZ 32 #define nbits_to_hold_value(val) fls(val) -#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10)) #define BASEDEC 10 /* fancier cpuset lists input in decimal */ /** @@ -466,7 +465,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1)) return -EOVERFLOW; - chunk = (chunk << 4) | unhex(c); + chunk = (chunk << 4) | hex_to_bin(c); ndigits++; totaldigits++; } if (ndigits == 0) diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 5bf0020b924..b1c17730767 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -8,7 +8,6 @@ * * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> */ -#include <linux/kernel.h> #include <linux/rwsem.h> #include <linux/mutex.h> #include <linux/module.h> @@ -39,7 +38,6 @@ int debug_locks_off(void) { if (__debug_locks_off()) { if (!debug_locks_silent) { - oops_in_progress = 1; console_verbose(); return 1; } diff --git a/lib/div64.c b/lib/div64.c index a111eb8de9c..5b491919177 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -77,26 +77,58 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) EXPORT_SYMBOL(div_s64_rem); #endif -/* 64bit divisor, dividend and result. dynamic precision */ +/** + * div64_u64 - unsigned 64bit divide with 64bit divisor + * @dividend: 64bit dividend + * @divisor: 64bit divisor + * + * This implementation is a modified version of the algorithm proposed + * by the book 'Hacker's Delight'. The original source and full proof + * can be found here and is available for use without restriction. + * + * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c' + */ #ifndef div64_u64 u64 div64_u64(u64 dividend, u64 divisor) { - u32 high, d; + u32 high = divisor >> 32; + u64 quot; - high = divisor >> 32; - if (high) { - unsigned int shift = fls(high); + if (high == 0) { + quot = div_u64(dividend, divisor); + } else { + int n = 1 + fls(high); + quot = div_u64(dividend >> n, divisor >> n); - d = divisor >> shift; - dividend >>= shift; - } else - d = divisor; + if (quot != 0) + quot--; + if ((dividend - quot * divisor) >= divisor) + quot++; + } - return div_u64(dividend, d); + return quot; } EXPORT_SYMBOL(div64_u64); #endif +/** + * div64_s64 - signed 64bit divide with 64bit divisor + * @dividend: 64bit dividend + * @divisor: 64bit divisor + */ +#ifndef div64_s64 +s64 div64_s64(s64 dividend, s64 divisor) +{ + s64 quot, t; + + quot = div64_u64(abs64(dividend), abs64(divisor)); + t = (dividend ^ divisor) >> 63; + + return (quot ^ t) - t; +} +EXPORT_SYMBOL(div64_s64); +#endif + #endif /* BITS_PER_LONG == 32 */ /* diff --git a/lib/idr.c b/lib/idr.c index 5e0966be0f7..e15502e8b21 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -106,16 +106,17 @@ static void idr_mark_full(struct idr_layer **pa, int id) } /** - * idr_pre_get - reserver resources for idr allocation + * idr_pre_get - reserve resources for idr allocation * @idp: idr handle * @gfp_mask: memory allocation flags * - * This function should be called prior to locking and calling the - * idr_get_new* functions. It preallocates enough memory to satisfy - * the worst possible allocation. + * This function should be called prior to calling the idr_get_new* functions. + * It preallocates enough memory to satisfy the worst possible allocation. The + * caller should pass in GFP_KERNEL if possible. This of course requires that + * no spinning locks be held. * - * If the system is REALLY out of memory this function returns 0, - * otherwise 1. + * If the system is REALLY out of memory this function returns %0, + * otherwise %1. */ int idr_pre_get(struct idr *idp, gfp_t gfp_mask) { @@ -290,11 +291,13 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) * This is the allocate id function. It should be called with any * required locks. * - * If memory is required, it will return -EAGAIN, you should unlock - * and go back to the idr_pre_get() call. If the idr is full, it will - * return -ENOSPC. + * If allocation from IDR's private freelist fails, idr_get_new_above() will + * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill + * IDR's preallocation and then retry the idr_get_new_above() call. * - * @id returns a value in the range @starting_id ... 0x7fffffff + * If the idr is full idr_get_new_above() will return %-ENOSPC. + * + * @id returns a value in the range @starting_id ... %0x7fffffff */ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) { @@ -318,14 +321,13 @@ EXPORT_SYMBOL(idr_get_new_above); * @ptr: pointer you want associated with the id * @id: pointer to the allocated handle * - * This is the allocate id function. It should be called with any - * required locks. + * If allocation from IDR's private freelist fails, idr_get_new_above() will + * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill + * IDR's preallocation and then retry the idr_get_new_above() call. * - * If memory is required, it will return -EAGAIN, you should unlock - * and go back to the idr_pre_get() call. If the idr is full, it will - * return -ENOSPC. + * If the idr is full idr_get_new_above() will return %-ENOSPC. * - * @id returns a value in the range 0 ... 0x7fffffff + * @id returns a value in the range %0 ... %0x7fffffff */ int idr_get_new(struct idr *idp, void *ptr, int *id) { @@ -388,7 +390,7 @@ static void sub_remove(struct idr *idp, int shift, int id) } /** - * idr_remove - remove the given id and free it's slot + * idr_remove - remove the given id and free its slot * @idp: idr handle * @id: unique key */ @@ -437,7 +439,7 @@ EXPORT_SYMBOL(idr_remove); * function will remove all id mappings and leave all idp_layers * unused. * - * A typical clean-up sequence for objects stored in an idr tree, will + * A typical clean-up sequence for objects stored in an idr tree will * use idr_for_each() to free all objects, if necessay, then * idr_remove_all() to remove all ids, and idr_destroy() to free * up the cached idr_layers. @@ -542,7 +544,7 @@ EXPORT_SYMBOL(idr_find); * not allowed. * * We check the return of @fn each time. If it returns anything other - * than 0, we break out and return that value. + * than %0, we break out and return that value. * * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). */ @@ -637,8 +639,8 @@ EXPORT_SYMBOL(idr_get_next); * @id: lookup key * * Replace the pointer registered with an id and return the old value. - * A -ENOENT return indicates that @id was not found. - * A -EINVAL return indicates that @id was not within valid constraints. + * A %-ENOENT return indicates that @id was not found. + * A %-EINVAL return indicates that @id was not within valid constraints. * * The caller must serialize with writers. */ @@ -696,10 +698,11 @@ void idr_init(struct idr *idp) EXPORT_SYMBOL(idr_init); -/* +/** + * DOC: IDA description * IDA - IDR based ID allocator * - * this is id allocator without id -> pointer translation. Memory + * This is id allocator without id -> pointer translation. Memory * usage is much lower than full blown idr because each id only * occupies a bit. ida uses a custom leaf node which contains * IDA_BITMAP_BITS slots. @@ -732,8 +735,8 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) * following function. It preallocates enough memory to satisfy the * worst possible allocation. * - * If the system is REALLY out of memory this function returns 0, - * otherwise 1. + * If the system is REALLY out of memory this function returns %0, + * otherwise %1. */ int ida_pre_get(struct ida *ida, gfp_t gfp_mask) { @@ -765,11 +768,11 @@ EXPORT_SYMBOL(ida_pre_get); * Allocate new ID above or equal to @ida. It should be called with * any required locks. * - * If memory is required, it will return -EAGAIN, you should unlock + * If memory is required, it will return %-EAGAIN, you should unlock * and go back to the ida_pre_get() call. If the ida is full, it will - * return -ENOSPC. + * return %-ENOSPC. * - * @p_id returns a value in the range @starting_id ... 0x7fffffff. + * @p_id returns a value in the range @starting_id ... %0x7fffffff. */ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) { @@ -851,11 +854,11 @@ EXPORT_SYMBOL(ida_get_new_above); * * Allocate new ID. It should be called with any required locks. * - * If memory is required, it will return -EAGAIN, you should unlock + * If memory is required, it will return %-EAGAIN, you should unlock * and go back to the idr_pre_get() call. If the idr is full, it will - * return -ENOSPC. + * return %-ENOSPC. * - * @id returns a value in the range 0 ... 0x7fffffff. + * @id returns a value in the range %0 ... %0x7fffffff. */ int ida_get_new(struct ida *ida, int *p_id) { diff --git a/lib/list_sort.c b/lib/list_sort.c index a7616fa3162..d7325c6b103 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -141,77 +141,151 @@ void list_sort(void *priv, struct list_head *head, } EXPORT_SYMBOL(list_sort); -#ifdef DEBUG_LIST_SORT +#ifdef CONFIG_TEST_LIST_SORT + +#include <linux/random.h> + +/* + * The pattern of set bits in the list length determines which cases + * are hit in list_sort(). + */ +#define TEST_LIST_LEN (512+128+2) /* not including head */ + +#define TEST_POISON1 0xDEADBEEF +#define TEST_POISON2 0xA324354C + struct debug_el { - struct list_head l_h; + unsigned int poison1; + struct list_head list; + unsigned int poison2; int value; unsigned serial; }; -static int cmp(void *priv, struct list_head *a, struct list_head *b) +/* Array, containing pointers to all elements in the test list */ +static struct debug_el **elts __initdata; + +static int __init check(struct debug_el *ela, struct debug_el *elb) { - return container_of(a, struct debug_el, l_h)->value - - container_of(b, struct debug_el, l_h)->value; + if (ela->serial >= TEST_LIST_LEN) { + printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n", + ela->serial); + return -EINVAL; + } + if (elb->serial >= TEST_LIST_LEN) { + printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n", + elb->serial); + return -EINVAL; + } + if (elts[ela->serial] != ela || elts[elb->serial] != elb) { + printk(KERN_ERR "list_sort_test: error: phantom element\n"); + return -EINVAL; + } + if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) { + printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n", + ela->poison1, ela->poison2); + return -EINVAL; + } + if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) { + printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n", + elb->poison1, elb->poison2); + return -EINVAL; + } + return 0; } -/* - * The pattern of set bits in the list length determines which cases - * are hit in list_sort(). - */ -#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */ +static int __init cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct debug_el *ela, *elb; + + ela = container_of(a, struct debug_el, list); + elb = container_of(b, struct debug_el, list); + + check(ela, elb); + return ela->value - elb->value; +} static int __init list_sort_test(void) { - int i, r = 1, count; - struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL); - struct list_head *cur; + int i, count = 1, err = -EINVAL; + struct debug_el *el; + struct list_head *cur, *tmp; + LIST_HEAD(head); + + printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n"); - printk(KERN_WARNING "testing list_sort()\n"); + elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL); + if (!elts) { + printk(KERN_ERR "list_sort_test: error: cannot allocate " + "memory\n"); + goto exit; + } - cur = head; - for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) { - struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL); - BUG_ON(!el); + for (i = 0; i < TEST_LIST_LEN; i++) { + el = kmalloc(sizeof(*el), GFP_KERNEL); + if (!el) { + printk(KERN_ERR "list_sort_test: error: cannot " + "allocate memory\n"); + goto exit; + } /* force some equivalencies */ - el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3); + el->value = random32() % (TEST_LIST_LEN/3); el->serial = i; - - el->l_h.prev = cur; - cur->next = &el->l_h; - cur = cur->next; + el->poison1 = TEST_POISON1; + el->poison2 = TEST_POISON2; + elts[i] = el; + list_add_tail(&el->list, &head); } - head->prev = cur; - list_sort(NULL, head, cmp); + list_sort(NULL, &head, cmp); + + for (cur = head.next; cur->next != &head; cur = cur->next) { + struct debug_el *el1; + int cmp_result; - count = 1; - for (cur = head->next; cur->next != head; cur = cur->next) { - struct debug_el *el = container_of(cur, struct debug_el, l_h); - int cmp_result = cmp(NULL, cur, cur->next); if (cur->next->prev != cur) { - printk(KERN_EMERG "list_sort() returned " - "a corrupted list!\n"); - return 1; - } else if (cmp_result > 0) { - printk(KERN_EMERG "list_sort() failed to sort!\n"); - return 1; - } else if (cmp_result == 0 && - el->serial >= container_of(cur->next, - struct debug_el, l_h)->serial) { - printk(KERN_EMERG "list_sort() failed to preserve order" - " of equivalent elements!\n"); - return 1; + printk(KERN_ERR "list_sort_test: error: list is " + "corrupted\n"); + goto exit; + } + + cmp_result = cmp(NULL, cur, cur->next); + if (cmp_result > 0) { + printk(KERN_ERR "list_sort_test: error: list is not " + "sorted\n"); + goto exit; + } + + el = container_of(cur, struct debug_el, list); + el1 = container_of(cur->next, struct debug_el, list); + if (cmp_result == 0 && el->serial >= el1->serial) { + printk(KERN_ERR "list_sort_test: error: order of " + "equivalent elements not preserved\n"); + goto exit; + } + + if (check(el, el1)) { + printk(KERN_ERR "list_sort_test: error: element check " + "failed\n"); + goto exit; } - kfree(cur->prev); count++; } - kfree(cur); - if (count != LIST_SORT_TEST_LENGTH) { - printk(KERN_EMERG "list_sort() returned list of" - "different length!\n"); - return 1; + + if (count != TEST_LIST_LEN) { + printk(KERN_ERR "list_sort_test: error: bad list length %d", + count); + goto exit; } - return 0; + + err = 0; +exit: + kfree(elts); + list_for_each_safe(cur, tmp, &head) { + list_del(cur); + kfree(container_of(cur, struct debug_el, list)); + } + return err; } module_init(list_sort_test); -#endif +#endif /* CONFIG_TEST_LIST_SORT */ diff --git a/lib/parser.c b/lib/parser.c index fb34977246b..6e89eca5cca 100644 --- a/lib/parser.c +++ b/lib/parser.c @@ -128,12 +128,13 @@ static int match_number(substring_t *s, int *result, int base) char *endp; char *buf; int ret; + size_t len = s->to - s->from; - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL); + buf = kmalloc(len + 1, GFP_KERNEL); if (!buf) return -ENOMEM; - memcpy(buf, s->from, s->to - s->from); - buf[s->to - s->from] = '\0'; + memcpy(buf, s->from, len); + buf[len] = '\0'; *result = simple_strtol(buf, &endp, base); ret = 0; if (endp == buf) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index ec9048e74f4..604678d7d06 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -8,10 +8,53 @@ #include <linux/init.h> #include <linux/cpu.h> #include <linux/module.h> +#include <linux/debugobjects.h> static LIST_HEAD(percpu_counters); static DEFINE_MUTEX(percpu_counters_lock); +#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER + +static struct debug_obj_descr percpu_counter_debug_descr; + +static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state) +{ + struct percpu_counter *fbc = addr; + + switch (state) { + case ODEBUG_STATE_ACTIVE: + percpu_counter_destroy(fbc); + debug_object_free(fbc, &percpu_counter_debug_descr); + return 1; + default: + return 0; + } +} + +static struct debug_obj_descr percpu_counter_debug_descr = { + .name = "percpu_counter", + .fixup_free = percpu_counter_fixup_free, +}; + +static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) +{ + debug_object_init(fbc, &percpu_counter_debug_descr); + debug_object_activate(fbc, &percpu_counter_debug_descr); +} + +static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) +{ + debug_object_deactivate(fbc, &percpu_counter_debug_descr); + debug_object_free(fbc, &percpu_counter_debug_descr); +} + +#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ +static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) +{ } +static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) +{ } +#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ + void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { int cpu; @@ -30,9 +73,9 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; s32 *pcount; - int cpu = get_cpu(); - pcount = per_cpu_ptr(fbc->counters, cpu); + preempt_disable(); + pcount = this_cpu_ptr(fbc->counters); count = *pcount + amount; if (count >= batch || count <= -batch) { spin_lock(&fbc->lock); @@ -42,7 +85,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) } else { *pcount = count; } - put_cpu(); + preempt_enable(); } EXPORT_SYMBOL(__percpu_counter_add); @@ -75,7 +118,11 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, fbc->counters = alloc_percpu(s32); if (!fbc->counters) return -ENOMEM; + + debug_percpu_counter_activate(fbc); + #ifdef CONFIG_HOTPLUG_CPU + INIT_LIST_HEAD(&fbc->list); mutex_lock(&percpu_counters_lock); list_add(&fbc->list, &percpu_counters); mutex_unlock(&percpu_counters_lock); @@ -89,6 +136,8 @@ void percpu_counter_destroy(struct percpu_counter *fbc) if (!fbc->counters) return; + debug_percpu_counter_deactivate(fbc); + #ifdef CONFIG_HOTPLUG_CPU mutex_lock(&percpu_counters_lock); list_del(&fbc->list); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 6f412ab4c24..5086bb962b4 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -82,6 +82,16 @@ struct radix_tree_preload { }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +static inline void *ptr_to_indirect(void *ptr) +{ + return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); +} + +static inline void *indirect_to_ptr(void *ptr) +{ + return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); +} + static inline gfp_t root_gfp_mask(struct radix_tree_root *root) { return root->gfp_mask & __GFP_BITS_MASK; @@ -265,7 +275,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) return -ENOMEM; /* Increase the height. */ - node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); + node->slots[0] = indirect_to_ptr(root->rnode); /* Propagate the aggregated tag info into the new root */ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { @@ -276,7 +286,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) newheight = root->height+1; node->height = newheight; node->count = 1; - node = radix_tree_ptr_to_indirect(node); + node = ptr_to_indirect(node); rcu_assign_pointer(root->rnode, node); root->height = newheight; } while (height > root->height); @@ -309,7 +319,7 @@ int radix_tree_insert(struct radix_tree_root *root, return error; } - slot = radix_tree_indirect_to_ptr(root->rnode); + slot = indirect_to_ptr(root->rnode); height = root->height; shift = (height-1) * RADIX_TREE_MAP_SHIFT; @@ -325,8 +335,7 @@ int radix_tree_insert(struct radix_tree_root *root, rcu_assign_pointer(node->slots[offset], slot); node->count++; } else - rcu_assign_pointer(root->rnode, - radix_tree_ptr_to_indirect(slot)); + rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); } /* Go a level down */ @@ -374,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, return NULL; return is_slot ? (void *)&root->rnode : node; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); height = node->height; if (index > radix_tree_maxindex(height)) @@ -393,7 +402,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, height--; } while (height > 0); - return is_slot ? (void *)slot:node; + return is_slot ? (void *)slot : indirect_to_ptr(node); } /** @@ -455,7 +464,7 @@ void *radix_tree_tag_set(struct radix_tree_root *root, height = root->height; BUG_ON(index > radix_tree_maxindex(height)); - slot = radix_tree_indirect_to_ptr(root->rnode); + slot = indirect_to_ptr(root->rnode); shift = (height - 1) * RADIX_TREE_MAP_SHIFT; while (height > 0) { @@ -509,7 +518,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, shift = (height - 1) * RADIX_TREE_MAP_SHIFT; pathp->node = NULL; - slot = radix_tree_indirect_to_ptr(root->rnode); + slot = indirect_to_ptr(root->rnode); while (height > 0) { int offset; @@ -579,7 +588,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, if (!radix_tree_is_indirect_ptr(node)) return (index == 0); - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); height = node->height; if (index > radix_tree_maxindex(height)) @@ -666,7 +675,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, } shift = (height - 1) * RADIX_TREE_MAP_SHIFT; - slot = radix_tree_indirect_to_ptr(root->rnode); + slot = indirect_to_ptr(root->rnode); /* * we fill the path from (root->height - 2) to 0, leaving the index at @@ -897,7 +906,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, results[0] = node; return 1; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -916,7 +925,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, slot = *(((void ***)results)[ret + i]); if (!slot) continue; - results[ret + nr_found] = rcu_dereference_raw(slot); + results[ret + nr_found] = + indirect_to_ptr(rcu_dereference_raw(slot)); nr_found++; } ret += nr_found; @@ -965,7 +975,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, results[0] = (void **)&root->rnode; return 1; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -1090,7 +1100,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, results[0] = node; return 1; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -1109,7 +1119,8 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, slot = *(((void ***)results)[ret + i]); if (!slot) continue; - results[ret + nr_found] = rcu_dereference_raw(slot); + results[ret + nr_found] = + indirect_to_ptr(rcu_dereference_raw(slot)); nr_found++; } ret += nr_found; @@ -1159,7 +1170,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, results[0] = (void **)&root->rnode; return 1; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -1195,7 +1206,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) void *newptr; BUG_ON(!radix_tree_is_indirect_ptr(to_free)); - to_free = radix_tree_indirect_to_ptr(to_free); + to_free = indirect_to_ptr(to_free); /* * The candidate node has more than one child, or its child @@ -1208,16 +1219,39 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) /* * We don't need rcu_assign_pointer(), since we are simply - * moving the node from one part of the tree to another. If - * it was safe to dereference the old pointer to it + * moving the node from one part of the tree to another: if it + * was safe to dereference the old pointer to it * (to_free->slots[0]), it will be safe to dereference the new - * one (root->rnode). + * one (root->rnode) as far as dependent read barriers go. */ newptr = to_free->slots[0]; if (root->height > 1) - newptr = radix_tree_ptr_to_indirect(newptr); + newptr = ptr_to_indirect(newptr); root->rnode = newptr; root->height--; + + /* + * We have a dilemma here. The node's slot[0] must not be + * NULLed in case there are concurrent lookups expecting to + * find the item. However if this was a bottom-level node, + * then it may be subject to the slot pointer being visible + * to callers dereferencing it. If item corresponding to + * slot[0] is subsequently deleted, these callers would expect + * their slot to become empty sooner or later. + * + * For example, lockless pagecache will look up a slot, deref + * the page pointer, and if the page is 0 refcount it means it + * was concurrently deleted from pagecache so try the deref + * again. Fortunately there is already a requirement for logic + * to retry the entire slot lookup -- the indirect pointer + * problem (replacing direct root node with an indirect pointer + * also results in a stale slot). So tag the slot as indirect + * to force callers to retry. + */ + if (root->height == 0) + *((unsigned long *)&to_free->slots[0]) |= + RADIX_TREE_INDIRECT_PTR; + radix_tree_node_free(to_free); } } @@ -1254,7 +1288,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) root->rnode = NULL; goto out; } - slot = radix_tree_indirect_to_ptr(slot); + slot = indirect_to_ptr(slot); shift = (height - 1) * RADIX_TREE_MAP_SHIFT; pathp->node = NULL; @@ -1296,8 +1330,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) radix_tree_node_free(to_free); if (pathp->node->count) { - if (pathp->node == - radix_tree_indirect_to_ptr(root->rnode)) + if (pathp->node == indirect_to_ptr(root->rnode)) radix_tree_shrink(root); goto out; } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 7af9d841c43..c150d3dafff 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -988,8 +988,15 @@ static noinline_for_stack char *pointer(const char *fmt, char *buf, char *end, void *ptr, struct printf_spec spec) { - if (!ptr) + if (!ptr) { + /* + * Print (null) with the same width as a pointer so it makes + * tabular output look nice. + */ + if (spec.field_width == -1) + spec.field_width = 2 * sizeof(void *); return string(buf, end, "(null)", spec); + } switch (*fmt) { case 'F': @@ -1031,7 +1038,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, } spec.flags |= SMALL; if (spec.field_width == -1) { - spec.field_width = 2*sizeof(void *); + spec.field_width = 2 * sizeof(void *); spec.flags |= ZEROPAD; } spec.base = 16; @@ -1497,7 +1504,7 @@ EXPORT_SYMBOL(snprintf); * @...: Arguments for the format string * * The return value is the number of characters written into @buf not including - * the trailing '\0'. If @size is <= 0 the function returns 0. + * the trailing '\0'. If @size is == 0 the function returns 0. */ int scnprintf(char *buf, size_t size, const char *fmt, ...) @@ -1509,7 +1516,11 @@ int scnprintf(char *buf, size_t size, const char *fmt, ...) i = vsnprintf(buf, size, fmt, args); va_end(args); - return (i >= size) ? (size - 1) : i; + if (likely(i < size)) + return i; + if (size != 0) + return size - 1; + return 0; } EXPORT_SYMBOL(scnprintf); |