diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 14 | ||||
-rw-r--r-- | lib/assoc_array.c | 6 | ||||
-rw-r--r-- | lib/hweight.c | 4 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 16 | ||||
-rw-r--r-- | lib/rhashtable.c | 13 | ||||
-rw-r--r-- | lib/string.c | 4 | ||||
-rw-r--r-- | lib/test_bpf.c | 25 |
8 files changed, 70 insertions, 15 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index a5ce0c7f6c3..54cf309a92a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -51,6 +51,9 @@ config PERCPU_RWSEM config ARCH_USE_CMPXCHG_LOCKREF bool +config ARCH_HAS_FAST_MULTIPLIER + bool + config CRC_CCITT tristate "CRC-CCITT functions" help diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 07c28323f88..3ac43f34437 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -892,6 +892,10 @@ config DEBUG_WW_MUTEX_SLOWPATH the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this will test all possible w/w mutex interface abuse with the exception of simply not acquiring all the required locks. + Note that this feature can introduce significant overhead, so + it really should not be enabled in a production or distro kernel, + even a debug kernel. If you are a driver writer, enable it. If + you are a distro, do not. config DEBUG_LOCK_ALLOC bool "Lock debugging: detect incorrect freeing of live locks" @@ -1032,8 +1036,13 @@ config TRACE_IRQFLAGS either tracing or lock debugging. config STACKTRACE - bool + bool "Stack backtrace support" depends on STACKTRACE_SUPPORT + help + This option causes the kernel to create a /proc/pid/stack for + every process, showing its current stack trace. + It is also used by various kernel debugging features that require + stack trace generation. config DEBUG_KOBJECT bool "kobject debugging" @@ -1663,7 +1672,8 @@ config TEST_BPF against the BPF interpreter or BPF JIT compiler depending on the current setting. This is in particular useful for BPF JIT compiler development, but also to run regression tests against changes in - the interpreter code. + the interpreter code. It also enables test stubs for eBPF maps and + verifier used by user space verifier testsuite. If unsure, say N. diff --git a/lib/assoc_array.c b/lib/assoc_array.c index c0b1007011e..2404d03e251 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -1723,11 +1723,13 @@ ascend_old_tree: shortcut = assoc_array_ptr_to_shortcut(ptr); slot = shortcut->parent_slot; cursor = shortcut->back_pointer; + if (!cursor) + goto gc_complete; } else { slot = node->parent_slot; cursor = ptr; } - BUG_ON(!ptr); + BUG_ON(!cursor); node = assoc_array_ptr_to_node(cursor); slot++; goto continue_node; @@ -1735,7 +1737,7 @@ ascend_old_tree: gc_complete: edit->set[0].to = new_root; assoc_array_apply_edit(edit); - edit->array->nr_leaves_on_tree = nr_leaves_on_tree; + array->nr_leaves_on_tree = nr_leaves_on_tree; return 0; enomem: diff --git a/lib/hweight.c b/lib/hweight.c index b7d81ba143d..9a5c1f22155 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -11,7 +11,7 @@ unsigned int __sw_hweight32(unsigned int w) { -#ifdef ARCH_HAS_FAST_MULTIPLIER +#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x55555555; w = (w & 0x33333333) + ((w >> 2) & 0x33333333); w = (w + (w >> 4)) & 0x0f0f0f0f; @@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w) return __sw_hweight32((unsigned int)(w >> 32)) + __sw_hweight32((unsigned int)w); #elif BITS_PER_LONG == 64 -#ifdef ARCH_HAS_FAST_MULTIPLIER +#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER w -= (w >> 1) & 0x5555555555555555ul; w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index fe5a3342e96..a89cf09a826 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -184,3 +184,19 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); } EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); + +/* + * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by + * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18 + * devel cycle. Do not use anywhere else. + */ +void __percpu_ref_kill_expedited(struct percpu_ref *ref) +{ + WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, + "percpu_ref_kill() called more than once on %pf!", + ref->release); + + ref->pcpu_count_ptr |= PCPU_REF_DEAD; + synchronize_sched_expedited(); + percpu_ref_kill_rcu(&ref->rcu); +} diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a2c78810ebc..25f14c586ad 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -23,7 +23,6 @@ #include <linux/hash.h> #include <linux/random.h> #include <linux/rhashtable.h> -#include <linux/log2.h> #define HASH_DEFAULT_SIZE 64UL #define HASH_MIN_SIZE 4UL @@ -298,7 +297,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) ASSERT_RHT_MUTEX(ht); - if (tbl->size <= HASH_MIN_SIZE) + if (ht->shift <= ht->p.min_shift) return 0; ntbl = bucket_table_alloc(tbl->size / 2, flags); @@ -506,9 +505,10 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, } EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); -static size_t rounded_hashtable_size(unsigned int nelem) +static size_t rounded_hashtable_size(struct rhashtable_params *params) { - return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE); + return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), + 1UL << params->min_shift); } /** @@ -566,8 +566,11 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) (!params->key_len && !params->obj_hashfn)) return -EINVAL; + params->min_shift = max_t(size_t, params->min_shift, + ilog2(HASH_MIN_SIZE)); + if (params->nelem_hint) - size = rounded_hashtable_size(params->nelem_hint); + size = rounded_hashtable_size(params); tbl = bucket_table_alloc(size, GFP_KERNEL); if (tbl == NULL) diff --git a/lib/string.c b/lib/string.c index 992bf30af75..f3c6ff59641 100644 --- a/lib/string.c +++ b/lib/string.c @@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes) return check_bytes8(start, value, bytes); value64 = value; -#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 +#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 value64 *= 0x0101010101010101; -#elif defined(ARCH_HAS_FAST_MULTIPLIER) +#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) value64 *= 0x01010101; value64 |= value64 << 32; #else diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 8c66c6aace0..23e070bcf72 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -1735,6 +1735,27 @@ static struct bpf_test tests[] = { { }, { { 1, 0 } }, }, + { + "load 64-bit immediate", + .u.insns_int = { + BPF_LD_IMM64(R1, 0x567800001234LL), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_ALU64_IMM(BPF_RSH, R2, 32), + BPF_ALU64_IMM(BPF_LSH, R3, 32), + BPF_ALU64_IMM(BPF_RSH, R3, 32), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, }; static struct net_device dev; @@ -1836,7 +1857,7 @@ static struct bpf_prog *generate_filter(int which, int *err) break; case INTERNAL: - fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL); + fp = bpf_prog_alloc(bpf_prog_size(flen), 0); if (fp == NULL) { pr_cont("UNEXPECTED_FAIL no memory left\n"); *err = -ENOMEM; @@ -1873,7 +1894,7 @@ static int __run_one(const struct bpf_prog *fp, const void *data, int runs, u64 *duration) { u64 start, finish; - int ret, i; + int ret = 0, i; start = ktime_to_us(ktime_get()); |