diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 20 | ||||
-rw-r--r-- | lib/cpumask.c | 2 | ||||
-rw-r--r-- | lib/iovec.c | 55 | ||||
-rw-r--r-- | lib/lockref.c | 3 | ||||
-rw-r--r-- | lib/lz4/lz4_decompress.c | 12 | ||||
-rw-r--r-- | lib/lzo/lzo1x_decompress_safe.c | 62 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 86 | ||||
-rw-r--r-- | lib/swiotlb.c | 28 |
8 files changed, 183 insertions, 85 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7cfcc1b8e10..901096d31c6 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -835,7 +835,7 @@ config DEBUG_RT_MUTEXES config RT_MUTEX_TESTER bool "Built-in scriptable tester for rt-mutexes" - depends on DEBUG_KERNEL && RT_MUTEXES + depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN help This option enables a rt-mutex tester. @@ -930,7 +930,7 @@ config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC + select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE select KALLSYMS select KALLSYMS_ALL @@ -1131,20 +1131,6 @@ config PROVE_RCU_REPEATEDLY Say N if you are unsure. -config PROVE_RCU_DELAY - bool "RCU debugging: preemptible RCU race provocation" - depends on DEBUG_KERNEL && PREEMPT_RCU - default n - help - There is a class of races that involve an unlikely preemption - of __rcu_read_unlock() just after ->rcu_read_lock_nesting has - been set to INT_MIN. This feature inserts a delay at that - point to increase the probability of these races. - - Say Y to increase probability of preemption of __rcu_read_unlock(). - - Say N if you are unsure. - config SPARSE_RCU_POINTER bool "RCU debugging: sparse-based checks for pointer usage" default n @@ -1408,7 +1394,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT depends on !X86_64 select STACKTRACE - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE help Provide stacktrace filter for fault-injection capabilities diff --git a/lib/cpumask.c b/lib/cpumask.c index c101230658e..b6513a9f289 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) i %= num_online_cpus(); - if (!cpumask_of_node(numa_node)) { + if (numa_node == -1 || !cpumask_of_node(numa_node)) { /* Use all online cpu's for non numa aware system */ cpumask_copy(mask, cpu_online_mask); } else { diff --git a/lib/iovec.c b/lib/iovec.c index 454baa88bf2..7a7c2da4cdd 100644 --- a/lib/iovec.c +++ b/lib/iovec.c @@ -51,3 +51,58 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len) return 0; } EXPORT_SYMBOL(memcpy_toiovec); + +/* + * Copy kernel to iovec. Returns -EFAULT on error. + */ + +int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, + int offset, int len) +{ + int copy; + for (; len > 0; ++iov) { + /* Skip over the finished iovecs */ + if (unlikely(offset >= iov->iov_len)) { + offset -= iov->iov_len; + continue; + } + copy = min_t(unsigned int, iov->iov_len - offset, len); + if (copy_to_user(iov->iov_base + offset, kdata, copy)) + return -EFAULT; + offset = 0; + kdata += copy; + len -= copy; + } + + return 0; +} +EXPORT_SYMBOL(memcpy_toiovecend); + +/* + * Copy iovec to kernel. Returns -EFAULT on error. + */ + +int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, + int offset, int len) +{ + /* Skip over the finished iovecs */ + while (offset >= iov->iov_len) { + offset -= iov->iov_len; + iov++; + } + + while (len > 0) { + u8 __user *base = iov->iov_base + offset; + int copy = min_t(unsigned int, len, iov->iov_len - offset); + + offset = 0; + if (copy_from_user(kdata, base, copy)) + return -EFAULT; + len -= copy; + kdata += copy; + iov++; + } + + return 0; +} +EXPORT_SYMBOL(memcpy_fromiovecend); diff --git a/lib/lockref.c b/lib/lockref.c index f07a40d3387..d2233de9a86 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -1,6 +1,5 @@ #include <linux/export.h> #include <linux/lockref.h> -#include <linux/mutex.h> #if USE_CMPXCHG_LOCKREF @@ -29,7 +28,7 @@ if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ - arch_mutex_cpu_relax(); \ + cpu_relax_lowlatency(); \ } \ } while (0) diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index df6839e3ce0..7a85967060a 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize) len = *ip++; for (; len == 255; length += 255) len = *ip++; + if (unlikely(length > (size_t)(length + len))) + goto _output_error; length += len; } @@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize) if (length == ML_MASK) { for (; *ip == 255; length += 255) ip++; + if (unlikely(length > (size_t)(length + *ip))) + goto _output_error; length += *ip++; } @@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize) /* write overflow error detected */ _output_error: - return (int) (-(((char *)ip) - source)); + return -1; } static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, @@ -188,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, int s = 255; while ((ip < iend) && (s == 255)) { s = *ip++; + if (unlikely(length > (size_t)(length + s))) + goto _output_error; length += s; } } @@ -228,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, if (length == ML_MASK) { while (ip < iend) { int s = *ip++; + if (unlikely(length > (size_t)(length + s))) + goto _output_error; length += s; if (s == 255) continue; @@ -280,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, /* write overflow error detected */ _output_error: - return (int) (-(((char *) ip) - source)); + return -1; } int lz4_decompress(const unsigned char *src, size_t *src_len, diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index 569985d522d..8563081e8da 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c @@ -19,11 +19,31 @@ #include <linux/lzo.h> #include "lzodefs.h" -#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) -#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) -#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun -#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun -#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun +#define HAVE_IP(t, x) \ + (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \ + (((t + x) >= t) && ((t + x) >= x))) + +#define HAVE_OP(t, x) \ + (((size_t)(op_end - op) >= (size_t)(t + x)) && \ + (((t + x) >= t) && ((t + x) >= x))) + +#define NEED_IP(t, x) \ + do { \ + if (!HAVE_IP(t, x)) \ + goto input_overrun; \ + } while (0) + +#define NEED_OP(t, x) \ + do { \ + if (!HAVE_OP(t, x)) \ + goto output_overrun; \ + } while (0) + +#define TEST_LB(m_pos) \ + do { \ + if ((m_pos) < out) \ + goto lookbehind_overrun; \ + } while (0) int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, unsigned char *out, size_t *out_len) @@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, while (unlikely(*ip == 0)) { t += 255; ip++; - NEED_IP(1); + NEED_IP(1, 0); } t += 15 + *ip++; } t += 3; copy_literal_run: #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) - if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { + if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) { const unsigned char *ie = ip + t; unsigned char *oe = op + t; do { @@ -81,8 +101,8 @@ copy_literal_run: } else #endif { - NEED_OP(t); - NEED_IP(t + 3); + NEED_OP(t, 0); + NEED_IP(t, 3); do { *op++ = *ip++; } while (--t > 0); @@ -95,7 +115,7 @@ copy_literal_run: m_pos -= t >> 2; m_pos -= *ip++ << 2; TEST_LB(m_pos); - NEED_OP(2); + NEED_OP(2, 0); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; @@ -119,10 +139,10 @@ copy_literal_run: while (unlikely(*ip == 0)) { t += 255; ip++; - NEED_IP(1); + NEED_IP(1, 0); } t += 31 + *ip++; - NEED_IP(2); + NEED_IP(2, 0); } m_pos = op - 1; next = get_unaligned_le16(ip); @@ -137,10 +157,10 @@ copy_literal_run: while (unlikely(*ip == 0)) { t += 255; ip++; - NEED_IP(1); + NEED_IP(1, 0); } t += 7 + *ip++; - NEED_IP(2); + NEED_IP(2, 0); } next = get_unaligned_le16(ip); ip += 2; @@ -154,7 +174,7 @@ copy_literal_run: #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) if (op - m_pos >= 8) { unsigned char *oe = op + t; - if (likely(HAVE_OP(t + 15))) { + if (likely(HAVE_OP(t, 15))) { do { COPY8(op, m_pos); op += 8; @@ -164,7 +184,7 @@ copy_literal_run: m_pos += 8; } while (op < oe); op = oe; - if (HAVE_IP(6)) { + if (HAVE_IP(6, 0)) { state = next; COPY4(op, ip); op += next; @@ -172,7 +192,7 @@ copy_literal_run: continue; } } else { - NEED_OP(t); + NEED_OP(t, 0); do { *op++ = *m_pos++; } while (op < oe); @@ -181,7 +201,7 @@ copy_literal_run: #endif { unsigned char *oe = op + t; - NEED_OP(t); + NEED_OP(t, 0); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; @@ -194,15 +214,15 @@ match_next: state = next; t = next; #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) - if (likely(HAVE_IP(6) && HAVE_OP(4))) { + if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) { COPY4(op, ip); op += t; ip += t; } else #endif { - NEED_IP(t + 3); - NEED_OP(t); + NEED_IP(t, 3); + NEED_OP(t, 0); while (t > 0) { *op++ = *ip++; t--; diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 963b7034a51..fe5a3342e96 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -31,6 +31,11 @@ #define PCPU_COUNT_BIAS (1U << 31) +static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) +{ + return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); +} + /** * percpu_ref_init - initialize a percpu refcount * @ref: percpu_ref to initialize @@ -46,8 +51,8 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) { atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); - ref->pcpu_count = alloc_percpu(unsigned); - if (!ref->pcpu_count) + ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned); + if (!ref->pcpu_count_ptr) return -ENOMEM; ref->release = release; @@ -56,53 +61,71 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) EXPORT_SYMBOL_GPL(percpu_ref_init); /** - * percpu_ref_cancel_init - cancel percpu_ref_init() - * @ref: percpu_ref to cancel init for + * percpu_ref_reinit - re-initialize a percpu refcount + * @ref: perpcu_ref to re-initialize * - * Once a percpu_ref is initialized, its destruction is initiated by - * percpu_ref_kill() and completes asynchronously, which can be painful to - * do when destroying a half-constructed object in init failure path. + * Re-initialize @ref so that it's in the same state as when it finished + * percpu_ref_init(). @ref must have been initialized successfully, killed + * and reached 0 but not exited. * - * This function destroys @ref without invoking @ref->release and the - * memory area containing it can be freed immediately on return. To - * prevent accidental misuse, it's required that @ref has finished - * percpu_ref_init(), whether successful or not, but never used. - * - * The weird name and usage restriction are to prevent people from using - * this function by mistake for normal shutdown instead of - * percpu_ref_kill(). + * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while + * this function is in progress. */ -void percpu_ref_cancel_init(struct percpu_ref *ref) +void percpu_ref_reinit(struct percpu_ref *ref) { - unsigned __percpu *pcpu_count = ref->pcpu_count; + unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); int cpu; - WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS); + BUG_ON(!pcpu_count); + WARN_ON(!percpu_ref_is_zero(ref)); + + atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); + + /* + * Restore per-cpu operation. smp_store_release() is paired with + * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees + * that the zeroing is visible to all percpu accesses which can see + * the following PCPU_REF_DEAD clearing. + */ + for_each_possible_cpu(cpu) + *per_cpu_ptr(pcpu_count, cpu) = 0; + + smp_store_release(&ref->pcpu_count_ptr, + ref->pcpu_count_ptr & ~PCPU_REF_DEAD); +} +EXPORT_SYMBOL_GPL(percpu_ref_reinit); + +/** + * percpu_ref_exit - undo percpu_ref_init() + * @ref: percpu_ref to exit + * + * This function exits @ref. The caller is responsible for ensuring that + * @ref is no longer in active use. The usual places to invoke this + * function from are the @ref->release() callback or in init failure path + * where percpu_ref_init() succeeded but other parts of the initialization + * of the embedding object failed. + */ +void percpu_ref_exit(struct percpu_ref *ref) +{ + unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); if (pcpu_count) { - for_each_possible_cpu(cpu) - WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu)); - free_percpu(ref->pcpu_count); + free_percpu(pcpu_count); + ref->pcpu_count_ptr = PCPU_REF_DEAD; } } -EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); +EXPORT_SYMBOL_GPL(percpu_ref_exit); static void percpu_ref_kill_rcu(struct rcu_head *rcu) { struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); - unsigned __percpu *pcpu_count = ref->pcpu_count; + unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); unsigned count = 0; int cpu; - /* Mask out PCPU_REF_DEAD */ - pcpu_count = (unsigned __percpu *) - (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK); - for_each_possible_cpu(cpu) count += *per_cpu_ptr(pcpu_count, cpu); - free_percpu(pcpu_count); - pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); /* @@ -152,11 +175,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill) { - WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD, + WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, "percpu_ref_kill() called more than once!\n"); - ref->pcpu_count = (unsigned __percpu *) - (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD); + ref->pcpu_count_ptr |= PCPU_REF_DEAD; ref->confirm_kill = confirm_kill; call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 649d097853a..4abda074ea4 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -86,6 +86,7 @@ static unsigned int io_tlb_index; * We need to save away the original address corresponding to a mapped entry * for the sync operations. */ +#define INVALID_PHYS_ADDR (~(phys_addr_t)0) static phys_addr_t *io_tlb_orig_addr; /* @@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) io_tlb_list = memblock_virt_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), PAGE_SIZE); - for (i = 0; i < io_tlb_nslabs; i++) - io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); - io_tlb_index = 0; io_tlb_orig_addr = memblock_virt_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), PAGE_SIZE); + for (i = 0; i < io_tlb_nslabs; i++) { + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } + io_tlb_index = 0; if (verbose) swiotlb_print_info(); @@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!io_tlb_list) goto cleanup3; - for (i = 0; i < io_tlb_nslabs; i++) - io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); - io_tlb_index = 0; - io_tlb_orig_addr = (phys_addr_t *) __get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * @@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!io_tlb_orig_addr) goto cleanup4; - memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); + for (i = 0; i < io_tlb_nslabs; i++) { + io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } + io_tlb_index = 0; swiotlb_print_info(); @@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, /* * First, sync the memory before unmapping the entry */ - if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) + if (orig_addr != INVALID_PHYS_ADDR && + ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); /* @@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * Step 1: return the slots to the free list, merging the * slots with superceeding slots */ - for (i = index + nslots - 1; i >= index; i--) + for (i = index + nslots - 1; i >= index; i--) { io_tlb_list[i] = ++count; + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + } /* * Step 2: merge the returned slots with the preceding slots, * if available (non zero) @@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; + if (orig_addr == INVALID_PHYS_ADDR) + return; orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); switch (target) { |