From d835502f3dacad1638d516ab156d66f0ba377cf5 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Tue, 31 Dec 2013 11:38:27 +0800 Subject: percpu_ida: fix a live lock steal_tags only happens when free tags is more than half of the total tags. This is too strict and can cause live lock. I found that if one cpu has free tags, but other cpu can't steal (thread is bound to specific cpus), threads which want to allocate tags are always sleeping. I found this when I run next patch, but this could happen without it I think. I did performance test too with null_blk. Two cases (each cpu has enough percpu tags, or total tags are limited), no performance changes were observed. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- lib/percpu_ida.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 9d054bf91d0..85f43b1718d 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr, /* * Try to steal tags from a remote cpu's percpu freelist. * - * We first check how many percpu freelists have tags - we don't steal tags - * unless enough percpu freelists have tags on them that it's possible more than - * half the total tags could be stuck on remote percpu freelists. + * We first check how many percpu freelists have tags * * Then we iterate through the cpus until we find some tags - we don't attempt * to find the "best" cpu to steal from, to keep cacheline bouncing to a @@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool, struct percpu_ida_cpu *remote; for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); - cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2; - cpus_have_tags--) { + cpus_have_tags; cpus_have_tags--) { cpu = cpumask_next(cpu, &pool->cpus_have_tags); if (cpu >= nr_cpu_ids) { -- cgit v1.2.3-70-g09d2 From 12b13835a0a8bfabea68741e1ab4d4a4cb77d037 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 4 Feb 2014 12:20:01 -0800 Subject: kbuild: don't enable DEBUG_INFO when building for COMPILE_TEST It really isn't very interesting to have DEBUG_INFO when doing compile coverage stuff (you wouldn't want to run the result anyway, that's kind of the whole point of COMPILE_TEST), and it currently makes the build take longer and use much more disk space for "all{yes,mod}config". There's somewhat active discussion about this still, and we might end up with some new config option for things like this (Andi points out that the silly X86_DECODER_SELFTEST option also slows down the normal coverage tests hugely), but I'm starting the ball rolling with this simple one-liner. DEBUG_INFO isn't that noticeable if you have tons of memory and a good IO subsystem, but it hurts you a lot if you don't - for very little upside for the common use. Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index dbf94a7d25a..a48abeac753 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options" config DEBUG_INFO bool "Compile the kernel with debug info" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !COMPILE_TEST help If you say Y here the resulting kernel image will include debugging info resulting in a larger kernel image. -- cgit v1.2.3-70-g09d2 From 6583327c4dd55acbbf2a6f25e775b28b3abf9a42 Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Thu, 6 Feb 2014 15:58:20 +0100 Subject: x86, hweight: Fix BUG when booting with CONFIG_GCOV_PROFILE_ALL=y Commit d61931d89b, "x86: Add optimized popcnt variants" introduced compile flag -fcall-saved-rdi for lib/hweight.c. When combined with options -fprofile-arcs and -O2, this flag causes gcc to generate broken constructor code. As a result, a 64 bit x86 kernel compiled with CONFIG_GCOV_PROFILE_ALL=y prints message "gcov: could not create file" and runs into sproadic BUGs during boot. The gcc people indicate that these kinds of problems are endemic when using ad hoc calling conventions. It is therefore best to treat any file compiled with ad hoc calling conventions as an isolated environment and avoid things like profiling or coverage analysis, since those subsystems assume a "normal" calling conventions. This patch avoids the bug by excluding lib/hweight.o from coverage profiling. Reported-by: Meelis Roos Cc: Andrew Morton Signed-off-by: Peter Oberparleiter Link: http://lkml.kernel.org/r/52F3A30C.7050205@linux.vnet.ibm.com Signed-off-by: H. Peter Anvin Cc: --- lib/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index a459c31e8c6..04944e9993e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o +GCOV_PROFILE_hweight.o := n CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o -- cgit v1.2.3-70-g09d2