diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 119 | ||||
-rw-r--r-- | lib/bug.c | 1 | ||||
-rw-r--r-- | lib/cpu_rmap.c | 54 | ||||
-rw-r--r-- | lib/digsig.c | 43 | ||||
-rw-r--r-- | lib/mpi/mpi-internal.h | 4 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 8 | ||||
-rw-r--r-- | lib/rbtree.c | 20 |
7 files changed, 149 insertions, 100 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3a353091a90..a1714c897e3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -134,7 +134,7 @@ config DEBUG_SECTION_MISMATCH any use of code/data previously in these sections would most likely result in an oops. In the code, functions and variables are annotated with - __init, __devinit, etc. (see the full list in include/linux/init.h), + __init, __cpuinit, etc. (see the full list in include/linux/init.h), which results in the code/data being placed in specific sections. The section mismatch analysis is always performed after a full kernel build, and enabling this option causes the following @@ -605,61 +605,6 @@ config PROVE_LOCKING For more details, see Documentation/lockdep-design.txt. -config PROVE_RCU - bool "RCU debugging: prove RCU correctness" - depends on PROVE_LOCKING - default n - help - This feature enables lockdep extensions that check for correct - use of RCU APIs. This is currently under development. Say Y - if you want to debug RCU usage or help work on the PROVE_RCU - feature. - - Say N if you are unsure. - -config PROVE_RCU_REPEATEDLY - bool "RCU debugging: don't disable PROVE_RCU on first splat" - depends on PROVE_RCU - default n - help - By itself, PROVE_RCU will disable checking upon issuing the - first warning (or "splat"). This feature prevents such - disabling, allowing multiple RCU-lockdep warnings to be printed - on a single reboot. - - Say Y to allow multiple RCU-lockdep warnings per boot. - - Say N if you are unsure. - -config PROVE_RCU_DELAY - bool "RCU debugging: preemptible RCU race provocation" - depends on DEBUG_KERNEL && PREEMPT_RCU - default n - help - There is a class of races that involve an unlikely preemption - of __rcu_read_unlock() just after ->rcu_read_lock_nesting has - been set to INT_MIN. This feature inserts a delay at that - point to increase the probability of these races. - - Say Y to increase probability of preemption of __rcu_read_unlock(). - - Say N if you are unsure. - -config SPARSE_RCU_POINTER - bool "RCU debugging: sparse-based checks for pointer usage" - default n - help - This feature enables the __rcu sparse annotation for - RCU-protected pointers. This annotation will cause sparse - to flag any non-RCU used of annotated pointers. This can be - helpful when debugging RCU usage. Please note that this feature - is not intended to enforce code cleanliness; it is instead merely - a debugging aid. - - Say Y to make sparse flag questionable use of RCU-protected pointers - - Say N if you are unsure. - config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT @@ -937,6 +882,63 @@ config BOOT_PRINTK_DELAY BOOT_PRINTK_DELAY also may cause LOCKUP_DETECTOR to detect what it believes to be lockup conditions. +menu "RCU Debugging" + +config PROVE_RCU + bool "RCU debugging: prove RCU correctness" + depends on PROVE_LOCKING + default n + help + This feature enables lockdep extensions that check for correct + use of RCU APIs. This is currently under development. Say Y + if you want to debug RCU usage or help work on the PROVE_RCU + feature. + + Say N if you are unsure. + +config PROVE_RCU_REPEATEDLY + bool "RCU debugging: don't disable PROVE_RCU on first splat" + depends on PROVE_RCU + default n + help + By itself, PROVE_RCU will disable checking upon issuing the + first warning (or "splat"). This feature prevents such + disabling, allowing multiple RCU-lockdep warnings to be printed + on a single reboot. + + Say Y to allow multiple RCU-lockdep warnings per boot. + + Say N if you are unsure. + +config PROVE_RCU_DELAY + bool "RCU debugging: preemptible RCU race provocation" + depends on DEBUG_KERNEL && PREEMPT_RCU + default n + help + There is a class of races that involve an unlikely preemption + of __rcu_read_unlock() just after ->rcu_read_lock_nesting has + been set to INT_MIN. This feature inserts a delay at that + point to increase the probability of these races. + + Say Y to increase probability of preemption of __rcu_read_unlock(). + + Say N if you are unsure. + +config SPARSE_RCU_POINTER + bool "RCU debugging: sparse-based checks for pointer usage" + default n + help + This feature enables the __rcu sparse annotation for + RCU-protected pointers. This annotation will cause sparse + to flag any non-RCU used of annotated pointers. This can be + helpful when debugging RCU usage. Please note that this feature + is not intended to enforce code cleanliness; it is instead merely + a debugging aid. + + Say Y to make sparse flag questionable use of RCU-protected pointers + + Say N if you are unsure. + config RCU_TORTURE_TEST tristate "torture tests for RCU" depends on DEBUG_KERNEL @@ -970,7 +972,7 @@ config RCU_TORTURE_TEST_RUNNABLE config RCU_CPU_STALL_TIMEOUT int "RCU CPU stall timeout in seconds" - depends on TREE_RCU || TREE_PREEMPT_RCU + depends on RCU_STALL_COMMON range 3 300 default 21 help @@ -1008,6 +1010,7 @@ config RCU_CPU_STALL_INFO config RCU_TRACE bool "Enable tracing for RCU" depends on DEBUG_KERNEL + select TRACE_CLOCK help This option provides tracing in RCU which presents stats in debugfs for debugging RCU implementation. @@ -1015,6 +1018,8 @@ config RCU_TRACE Say Y here if you want to enable RCU tracing Say N if you are unsure. +endmenu # "RCU Debugging" + config KPROBES_SANITY_TEST bool "Kprobes sanity tests" depends on DEBUG_KERNEL diff --git a/lib/bug.c b/lib/bug.c index a28c1415357..d0cdf14c651 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -55,6 +55,7 @@ static inline unsigned long bug_addr(const struct bug_entry *bug) } #ifdef CONFIG_MODULES +/* Updates are protected by module mutex */ static LIST_HEAD(module_bug_list); static const struct bug_entry *module_find_bug(unsigned long bugaddr) diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 145dec5267c..5fbed5caba6 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -45,6 +45,7 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) if (!rmap) return NULL; + kref_init(&rmap->refcount); rmap->obj = (void **)((char *)rmap + obj_offset); /* Initially assign CPUs to objects on a rota, since we have @@ -63,6 +64,35 @@ struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags) } EXPORT_SYMBOL(alloc_cpu_rmap); +/** + * cpu_rmap_release - internal reclaiming helper called from kref_put + * @ref: kref to struct cpu_rmap + */ +static void cpu_rmap_release(struct kref *ref) +{ + struct cpu_rmap *rmap = container_of(ref, struct cpu_rmap, refcount); + kfree(rmap); +} + +/** + * cpu_rmap_get - internal helper to get new ref on a cpu_rmap + * @rmap: reverse-map allocated with alloc_cpu_rmap() + */ +static inline void cpu_rmap_get(struct cpu_rmap *rmap) +{ + kref_get(&rmap->refcount); +} + +/** + * cpu_rmap_put - release ref on a cpu_rmap + * @rmap: reverse-map allocated with alloc_cpu_rmap() + */ +int cpu_rmap_put(struct cpu_rmap *rmap) +{ + return kref_put(&rmap->refcount, cpu_rmap_release); +} +EXPORT_SYMBOL(cpu_rmap_put); + /* Reevaluate nearest object for given CPU, comparing with the given * neighbours at the given distance. */ @@ -197,8 +227,7 @@ struct irq_glue { * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL * - * Must be called in process context, before freeing the IRQs, and - * without holding any locks required by global workqueue items. + * Must be called in process context, before freeing the IRQs. */ void free_irq_cpu_rmap(struct cpu_rmap *rmap) { @@ -212,12 +241,18 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap) glue = rmap->obj[index]; irq_set_affinity_notifier(glue->notify.irq, NULL); } - irq_run_affinity_notifiers(); - kfree(rmap); + cpu_rmap_put(rmap); } EXPORT_SYMBOL(free_irq_cpu_rmap); +/** + * irq_cpu_rmap_notify - callback for IRQ subsystem when IRQ affinity updated + * @notify: struct irq_affinity_notify passed by irq/manage.c + * @mask: cpu mask for new SMP affinity + * + * This is executed in workqueue context. + */ static void irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) { @@ -230,10 +265,16 @@ irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc); } +/** + * irq_cpu_rmap_release - reclaiming callback for IRQ subsystem + * @ref: kref to struct irq_affinity_notify passed by irq/manage.c + */ static void irq_cpu_rmap_release(struct kref *ref) { struct irq_glue *glue = container_of(ref, struct irq_glue, notify.kref); + + cpu_rmap_put(glue->rmap); kfree(glue); } @@ -258,10 +299,13 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) glue->notify.notify = irq_cpu_rmap_notify; glue->notify.release = irq_cpu_rmap_release; glue->rmap = rmap; + cpu_rmap_get(rmap); glue->index = cpu_rmap_add(rmap, glue); rc = irq_set_affinity_notifier(irq, &glue->notify); - if (rc) + if (rc) { + cpu_rmap_put(glue->rmap); kfree(glue); + } return rc; } EXPORT_SYMBOL(irq_cpu_rmap_add); diff --git a/lib/digsig.c b/lib/digsig.c index 8c0e62975c8..2f31e6a45f0 100644 --- a/lib/digsig.c +++ b/lib/digsig.c @@ -30,11 +30,10 @@ static struct crypto_shash *shash; -static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, - unsigned long msglen, - unsigned long modulus_bitlen, - unsigned char *out, - unsigned long *outlen) +static const char *pkcs_1_v1_5_decode_emsa(const unsigned char *msg, + unsigned long msglen, + unsigned long modulus_bitlen, + unsigned long *outlen) { unsigned long modulus_len, ps_len, i; @@ -42,11 +41,11 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, /* test message size */ if ((msglen > modulus_len) || (modulus_len < 11)) - return -EINVAL; + return NULL; /* separate encoded message */ - if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1)) - return -EINVAL; + if (msg[0] != 0x00 || msg[1] != 0x01) + return NULL; for (i = 2; i < modulus_len - 1; i++) if (msg[i] != 0xFF) @@ -56,19 +55,13 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg, if (msg[i] != 0) /* There was no octet with hexadecimal value 0x00 to separate ps from m. */ - return -EINVAL; + return NULL; ps_len = i - 2; - if (*outlen < (msglen - (2 + ps_len + 1))) { - *outlen = msglen - (2 + ps_len + 1); - return -EOVERFLOW; - } - *outlen = (msglen - (2 + ps_len + 1)); - memcpy(out, &msg[2 + ps_len + 1], *outlen); - return 0; + return msg + 2 + ps_len + 1; } /* @@ -83,7 +76,8 @@ static int digsig_verify_rsa(struct key *key, unsigned long mlen, mblen; unsigned nret, l; int head, i; - unsigned char *out1 = NULL, *out2 = NULL; + unsigned char *out1 = NULL; + const char *m; MPI in = NULL, res = NULL, pkey[2]; uint8_t *p, *datap, *endp; struct user_key_payload *ukp; @@ -120,7 +114,7 @@ static int digsig_verify_rsa(struct key *key, } mblen = mpi_get_nbits(pkey[0]); - mlen = (mblen + 7)/8; + mlen = DIV_ROUND_UP(mblen, 8); if (mlen == 0) goto err; @@ -129,10 +123,6 @@ static int digsig_verify_rsa(struct key *key, if (!out1) goto err; - out2 = kzalloc(mlen, GFP_KERNEL); - if (!out2) - goto err; - nret = siglen; in = mpi_read_from_buffer(sig, &nret); if (!in) @@ -162,18 +152,17 @@ static int digsig_verify_rsa(struct key *key, memset(out1, 0, head); memcpy(out1 + head, p, l); - err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); - if (err) - goto err; + kfree(p); + + m = pkcs_1_v1_5_decode_emsa(out1, len, mblen, &len); - if (len != hlen || memcmp(out2, h, hlen)) + if (!m || len != hlen || memcmp(m, h, hlen)) err = -EINVAL; err: mpi_free(in); mpi_free(res); kfree(out1); - kfree(out2); while (--i >= 0) mpi_free(pkey[i]); err1: diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h index 77adcf6bc25..60cf765628e 100644 --- a/lib/mpi/mpi-internal.h +++ b/lib/mpi/mpi-internal.h @@ -65,10 +65,6 @@ typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */ typedef int mpi_size_t; /* (must be a signed type) */ -#define ABS(x) (x >= 0 ? x : -x) -#define MIN(l, o) ((l) < (o) ? (l) : (o)) -#define MAX(h, i) ((h) > (i) ? (h) : (i)) - static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) { if (a->alloced < b) diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 3962b7f7fe3..5f9c44cdf1f 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -52,7 +52,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes) else nbits = 0; - nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); val = mpi_alloc(nlimbs); if (!val) return NULL; @@ -96,8 +96,8 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) buffer += 2; nread = 2; - nbytes = (nbits + 7) / 8; - nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; + nbytes = DIV_ROUND_UP(nbits, 8); + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); val = mpi_alloc(nlimbs); if (!val) return NULL; @@ -193,7 +193,7 @@ int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) int nlimbs; int i; - nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB; + nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); if (RESIZE_IF_NEEDED(a, nlimbs) < 0) return -ENOMEM; a->sign = sign; diff --git a/lib/rbtree.c b/lib/rbtree.c index 4f56a11d67f..c0e31fe2fab 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -194,8 +194,12 @@ __rb_insert(struct rb_node *node, struct rb_root *root, } } -__always_inline void -__rb_erase_color(struct rb_node *parent, struct rb_root *root, +/* + * Inline version for rb_erase() use - we want to be able to inline + * and eliminate the dummy_rotate callback there + */ +static __always_inline void +____rb_erase_color(struct rb_node *parent, struct rb_root *root, void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) { struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; @@ -355,6 +359,13 @@ __rb_erase_color(struct rb_node *parent, struct rb_root *root, } } } + +/* Non-inline version for rb_erase_augmented() use */ +void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) +{ + ____rb_erase_color(parent, root, augment_rotate); +} EXPORT_SYMBOL(__rb_erase_color); /* @@ -380,7 +391,10 @@ EXPORT_SYMBOL(rb_insert_color); void rb_erase(struct rb_node *node, struct rb_root *root) { - rb_erase_augmented(node, root, &dummy_callbacks); + struct rb_node *rebalance; + rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); + if (rebalance) + ____rb_erase_color(rebalance, root, dummy_rotate); } EXPORT_SYMBOL(rb_erase); |