From 297e1b256b1090adbb4357608be3d4301e76c0ce Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 26 Apr 2008 18:59:42 +0200 Subject: uml: fix build error fix: arch/um/os-Linux/helper.c: In function 'run_helper': arch/um/os-Linux/helper.c:73: error: 'PATH_MAX' undeclared (first use in this function) Signed-off-by: Ingo Molnar --- arch/um/os-Linux/helper.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c index f4bd349d441..f25c29a12d0 100644 --- a/arch/um/os-Linux/helper.c +++ b/arch/um/os-Linux/helper.c @@ -14,6 +14,7 @@ #include "os.h" #include "um_malloc.h" #include "user.h" +#include struct helper_data { void (*pre_exec)(void*); -- cgit v1.2.3-70-g09d2 From 18e413f7193ed2f6991d959863f46330813aa242 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 26 Apr 2008 19:10:17 +0200 Subject: uml: Kconfig cleanup pointed out by Linus: arch/um/Kconfig.x86_64 should include arch/x86/Kconfig.cpu instead of defining those symbols itself. Signed-off-by: Ingo Molnar --- arch/um/Kconfig.x86_64 | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64 index 3fbe69e359e..5696e7b374b 100644 --- a/arch/um/Kconfig.x86_64 +++ b/arch/um/Kconfig.x86_64 @@ -1,3 +1,10 @@ + +menu "Host processor type and features" + +source "arch/x86/Kconfig.cpu" + +endmenu + config UML_X86 bool default y -- cgit v1.2.3-70-g09d2 From 6fd92b63d0626a8fe7eb8e2e50d19ecaa18cb412 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Sun, 9 Mar 2008 21:01:04 +0100 Subject: x86: change x86 to use generic find_next_bit The versions with inline assembly are in fact slower on the machines I tested them on (in userspace) (Athlon XP 2800+, p4-like Xeon 2.8GHz, AMD Opteron 270). The i386-version needed a fix similar to 06024f21 to avoid crashing the benchmark. Benchmark using: gcc -fomit-frame-pointer -Os. For each bitmap size 1...512, for each possible bitmap with one bit set, for each possible offset: find the position of the first bit starting at offset. If you follow ;). Times include setup of the bitmap and checking of the results. Athlon Xeon Opteron 32/64bit x86-specific: 0m3.692s 0m2.820s 0m3.196s / 0m2.480s generic: 0m2.622s 0m1.662s 0m2.100s / 0m1.572s If the bitmap size is not a multiple of BITS_PER_LONG, and no set (cleared) bit is found, find_next_bit (find_next_zero_bit) returns a value outside of the range [0, size]. The generic version always returns exactly size. The generic version also uses unsigned long everywhere, while the x86 versions use a mishmash of int, unsigned (int), long and unsigned long. Using the generic version does give a slightly bigger kernel, though. defconfig: text data bss dec hex filename x86-specific: 4738555 481232 626688 5846475 5935cb vmlinux (32 bit) generic: 4738621 481232 626688 5846541 59360d vmlinux (32 bit) x86-specific: 5392395 846568 724424 6963387 6a40bb vmlinux (64 bit) generic: 5392458 846568 724424 6963450 6a40fa vmlinux (64 bit) Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 3 ++ arch/x86/lib/Makefile | 2 +- arch/x86/lib/bitops_32.c | 70 --------------------------------------------- arch/x86/lib/bitops_64.c | 68 ------------------------------------------- include/asm-x86/bitops.h | 6 ++++ include/asm-x86/bitops_32.h | 16 ----------- include/asm-x86/bitops_64.h | 2 -- lib/find_next_bit.c | 2 ++ 8 files changed, 12 insertions(+), 157 deletions(-) delete mode 100644 arch/x86/lib/bitops_32.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 2fadf794483..5639de47ed4 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -77,6 +77,9 @@ config GENERIC_BUG def_bool y depends on BUG +config GENERIC_FIND_NEXT_BIT + def_bool y + config GENERIC_HWEIGHT def_bool y diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 25df1c1989f..436093299bd 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -11,7 +11,7 @@ lib-y += memcpy_$(BITS).o ifeq ($(CONFIG_X86_32),y) lib-y += checksum_32.o lib-y += strstr_32.o - lib-y += bitops_32.o semaphore_32.o string_32.o + lib-y += semaphore_32.o string_32.o lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o else diff --git a/arch/x86/lib/bitops_32.c b/arch/x86/lib/bitops_32.c deleted file mode 100644 index b6544045985..00000000000 --- a/arch/x86/lib/bitops_32.c +++ /dev/null @@ -1,70 +0,0 @@ -#include -#include - -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -int find_next_bit(const unsigned long *addr, int size, int offset) -{ - const unsigned long *p = addr + (offset >> 5); - int set = 0, bit = offset & 31, res; - - if (bit) { - /* - * Look for nonzero in the first 32 bits: - */ - __asm__("bsfl %1,%0\n\t" - "jne 1f\n\t" - "movl $32, %0\n" - "1:" - : "=r" (set) - : "r" (*p >> bit)); - if (set < (32 - bit)) - return set + offset; - set = 32 - bit; - p++; - } - /* - * No set bit yet, search remaining full words for a bit - */ - res = find_first_bit (p, size - 32 * (p - addr)); - return (offset + set + res); -} -EXPORT_SYMBOL(find_next_bit); - -/** - * find_next_zero_bit - find the first zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -int find_next_zero_bit(const unsigned long *addr, int size, int offset) -{ - const unsigned long *p = addr + (offset >> 5); - int set = 0, bit = offset & 31, res; - - if (bit) { - /* - * Look for zero in the first 32 bits. - */ - __asm__("bsfl %1,%0\n\t" - "jne 1f\n\t" - "movl $32, %0\n" - "1:" - : "=r" (set) - : "r" (~(*p >> bit))); - if (set < (32 - bit)) - return set + offset; - set = 32 - bit; - p++; - } - /* - * No zero yet, search remaining full bytes for a zero - */ - res = find_first_zero_bit(p, size - 32 * (p - addr)); - return (offset + set + res); -} -EXPORT_SYMBOL(find_next_zero_bit); diff --git a/arch/x86/lib/bitops_64.c b/arch/x86/lib/bitops_64.c index 0e8f491e6cc..0eeb704d251 100644 --- a/arch/x86/lib/bitops_64.c +++ b/arch/x86/lib/bitops_64.c @@ -1,9 +1,7 @@ #include #undef find_first_zero_bit -#undef find_next_zero_bit #undef find_first_bit -#undef find_next_bit static inline long __find_first_zero_bit(const unsigned long * addr, unsigned long size) @@ -57,39 +55,6 @@ long find_first_zero_bit(const unsigned long * addr, unsigned long size) return __find_first_zero_bit (addr, size); } -/** - * find_next_zero_bit - find the next zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -long find_next_zero_bit (const unsigned long * addr, long size, long offset) -{ - const unsigned long * p = addr + (offset >> 6); - unsigned long set = 0; - unsigned long res, bit = offset&63; - - if (bit) { - /* - * Look for zero in first word - */ - asm("bsfq %1,%0\n\t" - "cmoveq %2,%0" - : "=r" (set) - : "r" (~(*p >> bit)), "r"(64L)); - if (set < (64 - bit)) - return set + offset; - set = 64 - bit; - p++; - } - /* - * No zero yet, search remaining full words for a zero - */ - res = __find_first_zero_bit (p, size - 64 * (p - addr)); - - return (offset + set + res); -} - static inline long __find_first_bit(const unsigned long * addr, unsigned long size) { @@ -136,40 +101,7 @@ long find_first_bit(const unsigned long * addr, unsigned long size) return __find_first_bit(addr,size); } -/** - * find_next_bit - find the first set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -long find_next_bit(const unsigned long * addr, long size, long offset) -{ - const unsigned long * p = addr + (offset >> 6); - unsigned long set = 0, bit = offset & 63, res; - - if (bit) { - /* - * Look for nonzero in the first 64 bits: - */ - asm("bsfq %1,%0\n\t" - "cmoveq %2,%0\n\t" - : "=r" (set) - : "r" (*p >> bit), "r" (64L)); - if (set < (64 - bit)) - return set + offset; - set = 64 - bit; - p++; - } - /* - * No set bit yet, search remaining full words for a bit - */ - res = __find_first_bit (p, size - 64 * (p - addr)); - return (offset + set + res); -} - #include -EXPORT_SYMBOL(find_next_bit); EXPORT_SYMBOL(find_first_bit); EXPORT_SYMBOL(find_first_zero_bit); -EXPORT_SYMBOL(find_next_zero_bit); diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 1ae7b270a1e..31e408de90c 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -306,6 +306,12 @@ static int test_bit(int nr, const volatile unsigned long *addr); #undef BIT_ADDR #undef ADDR +unsigned long find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); +unsigned long find_next_zero_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + + #ifdef CONFIG_X86_32 # include "bitops_32.h" #else diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index 2513a81f82a..7c9ed759afb 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h @@ -39,14 +39,6 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) return res; } -/** - * find_next_zero_bit - find the first zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bit number to start searching at - * @size: The maximum size to search - */ -int find_next_zero_bit(const unsigned long *addr, int size, int offset); - /** * __ffs - find first bit in word. * @word: The word to search @@ -82,14 +74,6 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) return x; } -/** - * find_next_bit - find the first set bit in a memory region - * @addr: The address to base the search on - * @offset: The bit number to start searching at - * @size: The maximum size to search - */ -int find_next_bit(const unsigned long *addr, int size, int offset); - /** * ffz - find first zero in word. * @word: The word to search diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index 365f8207ea5..65b20fb2ae7 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -6,9 +6,7 @@ */ extern long find_first_zero_bit(const unsigned long *addr, unsigned long size); -extern long find_next_zero_bit(const unsigned long *addr, long size, long offset); extern long find_first_bit(const unsigned long *addr, unsigned long size); -extern long find_next_bit(const unsigned long *addr, long size, long offset); /* return index of first bet set in val or max when no bit is set */ static inline long __scanbit(unsigned long val, unsigned long max) diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 78ccd73a884..5820e072b89 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -15,6 +15,8 @@ #include #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#undef find_next_bit +#undef find_next_zero_bit /** * find_next_bit - find the next set bit in a memory region -- cgit v1.2.3-70-g09d2 From 60b6783a044a55273b637983f52965c2808a6b86 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Thu, 13 Mar 2008 18:53:52 +0100 Subject: x86, uml: fix uml with generic find_next_bit for x86 Update UML to use the generic bits too. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/um/Kconfig.i386 | 4 ++++ arch/um/Kconfig.x86_64 | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index e09edfa560d..be755602829 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 @@ -39,6 +39,10 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA bool default y +config GENERIC_FIND_NEXT_BIT + bool + default y + config GENERIC_HWEIGHT bool default y diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64 index 5696e7b374b..4ab5aa62ff3 100644 --- a/arch/um/Kconfig.x86_64 +++ b/arch/um/Kconfig.x86_64 @@ -34,6 +34,10 @@ config SMP_BROKEN bool default y +config GENERIC_FIND_NEXT_BIT + bool + default y + config GENERIC_HWEIGHT bool default y -- cgit v1.2.3-70-g09d2 From 64970b68d2b3ed32b964b0b30b1b98518fde388e Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Tue, 11 Mar 2008 16:17:19 +0100 Subject: x86, generic: optimize find_next_(zero_)bit for small constant-size bitmaps This moves an optimization for searching constant-sized small bitmaps form x86_64-specific to generic code. On an i386 defconfig (the x86#testing one), the size of vmlinux hardly changes with this applied. I have observed only four places where this optimization avoids a call into find_next_bit: In the functions return_unused_surplus_pages, alloc_fresh_huge_page, and adjust_pool_surplus, this patch avoids a call for a 1-bit bitmap. In __next_cpu a call is avoided for a 32-bit bitmap. That's it. On x86_64, 52 locations are optimized with a minimal increase in code size: Current #testing defconfig: 146 x bsf, 27 x find_next_*bit text data bss dec hex filename 5392637 846592 724424 6963653 6a41c5 vmlinux After removing the x86_64 specific optimization for find_next_*bit: 94 x bsf, 79 x find_next_*bit text data bss dec hex filename 5392358 846592 724424 6963374 6a40ae vmlinux After this patch (making the optimization generic): 146 x bsf, 27 x find_next_*bit text data bss dec hex filename 5392396 846592 724424 6963412 6a40d4 vmlinux [ tglx@linutronix.de: build fixes ] Signed-off-by: Ingo Molnar --- include/asm-generic/bitops/find.h | 2 + include/asm-x86/bitops.h | 6 --- include/asm-x86/bitops_64.h | 10 ----- include/linux/bitops.h | 77 +++++++++++++++++++++++++++++++++++++++ lib/find_next_bit.c | 25 +++++-------- 5 files changed, 88 insertions(+), 32 deletions(-) diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 72a51e5a12e..1914e974251 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h @@ -1,11 +1,13 @@ #ifndef _ASM_GENERIC_BITOPS_FIND_H_ #define _ASM_GENERIC_BITOPS_FIND_H_ +#ifndef CONFIG_GENERIC_FIND_NEXT_BIT extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +#endif #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 31e408de90c..1ae7b270a1e 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -306,12 +306,6 @@ static int test_bit(int nr, const volatile unsigned long *addr); #undef BIT_ADDR #undef ADDR -unsigned long find_next_bit(const unsigned long *addr, - unsigned long size, unsigned long offset); -unsigned long find_next_zero_bit(const unsigned long *addr, - unsigned long size, unsigned long offset); - - #ifdef CONFIG_X86_32 # include "bitops_32.h" #else diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index 65b20fb2ae7..7118ef2cc4e 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -15,16 +15,6 @@ static inline long __scanbit(unsigned long val, unsigned long max) return val; } -#define find_next_bit(addr,size,off) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \ - find_next_bit(addr,size,off))) - -#define find_next_zero_bit(addr,size,off) \ -((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \ - ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \ - find_next_zero_bit(addr,size,off))) - #define find_first_bit(addr, size) \ ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ ? (__scanbit(*(unsigned long *)(addr), (size))) \ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 40d54731de7..3865f2c93bd 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -112,4 +112,81 @@ static inline unsigned fls_long(unsigned long l) return fls64(l); } +#ifdef __KERNEL__ +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT +extern unsigned long __find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +static __always_inline unsigned long +find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + unsigned long value; + + /* Avoid a function call if the bitmap size is a constant */ + /* and not bigger than BITS_PER_LONG. */ + + /* insert a sentinel so that __ffs returns size if there */ + /* are no set bits in the bitmap */ + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) { + value = (*addr) & ((~0ul) << offset); + value |= (1ul << size); + return __ffs(value); + } + + /* the result of __ffs(0) is undefined, so it needs to be */ + /* handled separately */ + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) { + value = (*addr) & ((~0ul) << offset); + return (value == 0) ? BITS_PER_LONG : __ffs(value); + } + + /* size is not constant or too big */ + return __find_next_bit(addr, size, offset); +} + +extern unsigned long __find_next_zero_bit(const unsigned long *addr, + unsigned long size, unsigned long offset); + +/** + * find_next_zero_bit - find the next cleared bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The bitmap size in bits + */ +static __always_inline unsigned long +find_next_zero_bit(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + unsigned long value; + + /* Avoid a function call if the bitmap size is a constant */ + /* and not bigger than BITS_PER_LONG. */ + + /* insert a sentinel so that __ffs returns size if there */ + /* are no set bits in the bitmap */ + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) { + value = (~(*addr)) & ((~0ul) << offset); + value |= (1ul << size); + return __ffs(value); + } + + /* the result of __ffs(0) is undefined, so it needs to be */ + /* handled separately */ + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) { + value = (~(*addr)) & ((~0ul) << offset); + return (value == 0) ? BITS_PER_LONG : __ffs(value); + } + + /* size is not constant or too big */ + return __find_next_zero_bit(addr, size, offset); +} +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ +#endif /* __KERNEL__ */ #endif diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index 5820e072b89..ce94c4c92d1 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -15,17 +15,12 @@ #include #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) -#undef find_next_bit -#undef find_next_zero_bit - -/** - * find_next_bit - find the next set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search + +/* + * Find the next set bit in a memory region. */ -unsigned long find_next_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) +unsigned long __find_next_bit(const unsigned long *addr, + unsigned long size, unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); @@ -62,15 +57,14 @@ found_first: found_middle: return result + __ffs(tmp); } - -EXPORT_SYMBOL(find_next_bit); +EXPORT_SYMBOL(__find_next_bit); /* * This implementation of find_{first,next}_zero_bit was stolen from * Linus' asm-alpha/bitops.h. */ -unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, - unsigned long offset) +unsigned long __find_next_zero_bit(const unsigned long *addr, + unsigned long size, unsigned long offset) { const unsigned long *p = addr + BITOP_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG-1); @@ -107,8 +101,7 @@ found_first: found_middle: return result + ffz(tmp); } - -EXPORT_SYMBOL(find_next_zero_bit); +EXPORT_SYMBOL(__find_next_zero_bit); #ifdef __BIG_ENDIAN -- cgit v1.2.3-70-g09d2 From 12d9c8420b9daa1da3d9e090640fb24bcd0deba2 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Sat, 15 Mar 2008 13:04:42 +0100 Subject: x86: merge the simple bitops and move them to bitops.h Some of those can be written in such a way that the same inline assembly can be used to generate both 32 bit and 64 bit code. For ffs and fls, x86_64 unconditionally used the cmov instruction and i386 unconditionally used a conditional branch over a mov instruction. In the current patch I chose to select the version based on the availability of the cmov instruction instead. A small detail here is that x86_64 did not previously set CONFIG_X86_CMOV=y. Improved comments for ffs, ffz, fls and variations. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.cpu | 2 +- include/asm-x86/bitops.h | 99 ++++++++++++++++++++++++++++++++++++++++++++- include/asm-x86/bitops_32.h | 64 ----------------------------- include/asm-x86/bitops_64.h | 76 ---------------------------------- 4 files changed, 99 insertions(+), 142 deletions(-) diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 4da3cdb9c1b..cf3ff2c5cef 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -398,7 +398,7 @@ config X86_TSC # generates cmov. config X86_CMOV def_bool y - depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7) + depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || X86_64) config X86_MINIMUM_CPU_FAMILY int diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 1ae7b270a1e..1b6f547cb6b 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -67,7 +67,6 @@ static inline void __set_bit(int nr, volatile void *addr) : "Ir" (nr) : "memory"); } - /** * clear_bit - Clears a bit in memory * @nr: Bit to clear @@ -304,6 +303,104 @@ static int test_bit(int nr, const volatile unsigned long *addr); #undef BASE_ADDR #undef BIT_ADDR +/** + * __ffs - find first set bit in word + * @word: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static inline unsigned long __ffs(unsigned long word) +{ + __asm__("bsf %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + +/** + * ffz - find first zero bit in word + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +static inline unsigned long ffz(unsigned long word) +{ + __asm__("bsf %1,%0" + :"=r" (word) + :"r" (~word)); + return word; +} + +/* + * __fls: find last set bit in word + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +static inline unsigned long __fls(unsigned long word) +{ + __asm__("bsr %1,%0" + :"=r" (word) + :"rm" (word)); + return word; +} + +#ifdef __KERNEL__ +/** + * ffs - find first set bit in word + * @x: the word to search + * + * This is defined the same way as the libc and compiler builtin ffs + * routines, therefore differs in spirit from the other bitops. + * + * ffs(value) returns 0 if value is 0 or the position of the first + * set bit if value is nonzero. The first (least significant) bit + * is at position 1. + */ +static inline int ffs(int x) +{ + int r; +#ifdef CONFIG_X86_CMOV + __asm__("bsfl %1,%0\n\t" + "cmovzl %2,%0" + : "=r" (r) : "rm" (x), "r" (-1)); +#else + __asm__("bsfl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); +#endif + return r + 1; +} + +/** + * fls - find last set bit in word + * @x: the word to search + * + * This is defined in a similar way as the libc and compiler builtin + * ffs, but returns the position of the most significant set bit. + * + * fls(value) returns 0 if value is 0 or the position of the last + * set bit if value is nonzero. The last (most significant) bit is + * at position 32. + */ +static inline int fls(int x) +{ + int r; +#ifdef CONFIG_X86_CMOV + __asm__("bsrl %1,%0\n\t" + "cmovzl %2,%0" + : "=&r" (r) : "rm" (x), "rm" (-1)); +#else + __asm__("bsrl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); +#endif + return r + 1; +} +#endif /* __KERNEL__ */ + #undef ADDR #ifdef CONFIG_X86_32 diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index 7c9ed759afb..3ed64b21b76 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h @@ -39,20 +39,6 @@ static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) return res; } -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - __asm__("bsfl %1,%0" - :"=r" (word) - :"rm" (word)); - return word; -} - /** * find_first_bit - find the first set bit in a memory region * @addr: The address to start the search at @@ -74,60 +60,10 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) return x; } -/** - * ffz - find first zero in word. - * @word: The word to search - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -static inline unsigned long ffz(unsigned long word) -{ - __asm__("bsfl %1,%0" - :"=r" (word) - :"r" (~word)); - return word; -} - #ifdef __KERNEL__ #include -/** - * ffs - find first bit set - * @x: the word to search - * - * This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz() (man ffs). - */ -static inline int ffs(int x) -{ - int r; - - __asm__("bsfl %1,%0\n\t" - "jnz 1f\n\t" - "movl $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return r+1; -} - -/** - * fls - find last bit set - * @x: the word to search - * - * This is defined the same way as ffs(). - */ -static inline int fls(int x) -{ - int r; - - __asm__("bsrl %1,%0\n\t" - "jnz 1f\n\t" - "movl $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return r+1; -} - #include #endif /* __KERNEL__ */ diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index 7118ef2cc4e..a5fbe7a02a3 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -35,70 +35,10 @@ static inline void set_bit_string(unsigned long *bitmap, unsigned long i, } } -/** - * ffz - find first zero in word. - * @word: The word to search - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -static inline unsigned long ffz(unsigned long word) -{ - __asm__("bsfq %1,%0" - :"=r" (word) - :"r" (~word)); - return word; -} - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - __asm__("bsfq %1,%0" - :"=r" (word) - :"rm" (word)); - return word; -} - -/* - * __fls: find last bit set. - * @word: The word to search - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -static inline unsigned long __fls(unsigned long word) -{ - __asm__("bsrq %1,%0" - :"=r" (word) - :"rm" (word)); - return word; -} - #ifdef __KERNEL__ #include -/** - * ffs - find first bit set - * @x: the word to search - * - * This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). - */ -static inline int ffs(int x) -{ - int r; - - __asm__("bsfl %1,%0\n\t" - "cmovzl %2,%0" - : "=r" (r) : "rm" (x), "r" (-1)); - return r+1; -} - /** * fls64 - find last bit set in 64 bit word * @x: the word to search @@ -112,22 +52,6 @@ static inline int fls64(__u64 x) return __fls(x) + 1; } -/** - * fls - find last bit set - * @x: the word to search - * - * This is defined the same way as ffs. - */ -static inline int fls(int x) -{ - int r; - - __asm__("bsrl %1,%0\n\t" - "cmovzl %2,%0" - : "=&r" (r) : "rm" (x), "rm" (-1)); - return r+1; -} - #define ARCH_HAS_FAST_MULTIPLIER 1 #include -- cgit v1.2.3-70-g09d2 From 7d9dff22e8ad06ad330968c9e3d3a2fb55a5f9c3 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Sat, 15 Mar 2008 18:30:57 +0100 Subject: generic: introduce a generic __fls implementation Add a generic __fls implementation in the same spirit as the generic __ffs one. It finds the last (most significant) set bit in the given long value. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- include/asm-generic/bitops/__fls.h | 43 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 include/asm-generic/bitops/__fls.h diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h new file mode 100644 index 00000000000..be24465403d --- /dev/null +++ b/include/asm-generic/bitops/__fls.h @@ -0,0 +1,43 @@ +#ifndef _ASM_GENERIC_BITOPS___FLS_H_ +#define _ASM_GENERIC_BITOPS___FLS_H_ + +#include + +/** + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +static inline unsigned long __fls(unsigned long word) +{ + int num = BITS_PER_LONG - 1; + +#if BITS_PER_LONG == 64 + if (!(word & (~0ul << 32))) { + num -= 32; + word <<= 32; + } +#endif + if (!(word & (~0ul << (BITS_PER_LONG-16)))) { + num -= 16; + word <<= 16; + } + if (!(word & (~0ul << (BITS_PER_LONG-8)))) { + num -= 8; + word <<= 8; + } + if (!(word & (~0ul << (BITS_PER_LONG-4)))) { + num -= 4; + word <<= 4; + } + if (!(word & (~0ul << (BITS_PER_LONG-2)))) { + num -= 2; + word <<= 2; + } + if (!(word & (~0ul << (BITS_PER_LONG-1)))) + num -= 1; + return num; +} + +#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */ -- cgit v1.2.3-70-g09d2 From 56a6b1eb7bfb5ace0b5cb9c149f502fbd101b8ab Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Sat, 15 Mar 2008 18:31:49 +0100 Subject: generic: implement __fls on all 64-bit archs Implement __fls on all 64-bit archs: alpha has an implementation of fls64. Added __fls(x) = fls64(x) - 1. ia64 has fls, but not __fls. Added __fls based on code of fls. mips and powerpc have __ilog2, which is the same as __fls. Added __fls = __ilog2. parisc, s390, sh and sparc64: Include generic __fls. x86_64 already has __fls. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- include/asm-alpha/bitops.h | 5 +++++ include/asm-ia64/bitops.h | 16 ++++++++++++++++ include/asm-mips/bitops.h | 5 +++++ include/asm-parisc/bitops.h | 1 + include/asm-powerpc/bitops.h | 5 +++++ include/asm-s390/bitops.h | 1 + include/asm-sh/bitops.h | 1 + include/asm-sparc64/bitops.h | 1 + 8 files changed, 35 insertions(+) diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h index 9e19a704d48..15f3ae25c51 100644 --- a/include/asm-alpha/bitops.h +++ b/include/asm-alpha/bitops.h @@ -388,6 +388,11 @@ static inline int fls64(unsigned long x) } #endif +static inline unsigned long __fls(unsigned long x) +{ + return fls64(x) - 1; +} + static inline int fls(int x) { return fls64((unsigned int) x); diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 953d3df9dd2..e2ca8003733 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h @@ -407,6 +407,22 @@ fls (int t) return ia64_popcnt(x); } +/* + * Find the last (most significant) bit set. Undefined for x==0. + * Bits are numbered from 0..63 (e.g., __fls(9) == 3). + */ +static inline unsigned long +__fls (unsigned long x) +{ + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x |= x >> 32; + return ia64_popcnt(x) - 1; +} + #include /* diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index ec75ce4cdb8..c2bd126c3b4 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h @@ -591,6 +591,11 @@ static inline int __ilog2(unsigned long x) return 63 - lz; } +static inline unsigned long __fls(unsigned long x) +{ + return __ilog2(x); +} + #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) /* diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index f8eebcbad01..7a6ea10bd23 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h @@ -210,6 +210,7 @@ static __inline__ int fls(int x) return ret; } +#include #include #include #include diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index a99a7492947..897eade3afb 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h @@ -313,6 +313,11 @@ static __inline__ int fls(unsigned int x) return 32 - lz; } +static __inline__ unsigned long __fls(unsigned long x) +{ + return __ilog2(x); +} + /* * 64-bit can do this using one cntlzd (count leading zeroes doubleword) * instruction; for 32-bit we use the generic version, which does two diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 965394e6945..b4eb24ab5af 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h @@ -769,6 +769,7 @@ static inline int sched_find_first_bit(unsigned long *b) } #include +#include #include #include diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h index b6ba5a60dec..d7d382f63ee 100644 --- a/include/asm-sh/bitops.h +++ b/include/asm-sh/bitops.h @@ -95,6 +95,7 @@ static inline unsigned long ffz(unsigned long word) #include #include #include +#include #include #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index 982ce8992b9..11f9d8146cd 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h @@ -34,6 +34,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr); #include #include #include +#include #include #ifdef __KERNEL__ -- cgit v1.2.3-70-g09d2 From d57594c203b1e7b54373080a797f0cbfa4aade68 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Sat, 15 Mar 2008 18:32:36 +0100 Subject: bitops: use __fls for fls64 on 64-bit archs Use __fls for fls64 on 64-bit archs. The implementation for 64-bit archs is moved from x86_64 to asm-generic. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- include/asm-generic/bitops/fls64.h | 22 ++++++++++++++++++++++ include/asm-x86/bitops_64.h | 15 ++------------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h index 1b6b17ce242..86d403f8b25 100644 --- a/include/asm-generic/bitops/fls64.h +++ b/include/asm-generic/bitops/fls64.h @@ -3,6 +3,18 @@ #include +/** + * fls64 - find last set bit in a 64-bit word + * @x: the word to search + * + * This is defined in a similar way as the libc and compiler builtin + * ffsll, but returns the position of the most significant set bit. + * + * fls64(value) returns 0 if value is 0 or the position of the last + * set bit if value is nonzero. The last (most significant) bit is + * at position 64. + */ +#if BITS_PER_LONG == 32 static inline int fls64(__u64 x) { __u32 h = x >> 32; @@ -10,5 +22,15 @@ static inline int fls64(__u64 x) return fls(h) + 32; return fls(x); } +#elif BITS_PER_LONG == 64 +static inline int fls64(__u64 x) +{ + if (x == 0) + return 0; + return __fls(x) + 1; +} +#else +#error BITS_PER_LONG not 32 or 64 +#endif #endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index a5fbe7a02a3..d1335208719 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -39,25 +39,14 @@ static inline void set_bit_string(unsigned long *bitmap, unsigned long i, #include -/** - * fls64 - find last bit set in 64 bit word - * @x: the word to search - * - * This is defined the same way as fls. - */ -static inline int fls64(__u64 x) -{ - if (x == 0) - return 0; - return __fls(x) + 1; -} - #define ARCH_HAS_FAST_MULTIPLIER 1 #include #endif /* __KERNEL__ */ +#include + #ifdef __KERNEL__ #include -- cgit v1.2.3-70-g09d2 From 77b9bd9c49442407804c37bcc82021a35277f83c Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Tue, 1 Apr 2008 11:46:19 +0200 Subject: x86: generic versions of find_first_(zero_)bit, convert i386 Generic versions of __find_first_bit and __find_first_zero_bit are introduced as simplified versions of __find_next_bit and __find_next_zero_bit. Their compilation and use are guarded by a new config variable GENERIC_FIND_FIRST_BIT. The generic versions of find_first_bit and find_first_zero_bit are implemented in terms of the newly introduced __find_first_bit and __find_first_zero_bit. This patch does not remove the i386-specific implementation, but it does switch i386 to use the generic functions by setting GENERIC_FIND_FIRST_BIT=y for X86_32. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 3 +++ include/asm-x86/bitops_32.h | 2 ++ include/linux/bitops.h | 34 ++++++++++++++++++++++++++ lib/Makefile | 1 + lib/find_next_bit.c | 58 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 98 insertions(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5639de47ed4..1a69b68ff6c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -77,6 +77,9 @@ config GENERIC_BUG def_bool y depends on BUG +config GENERIC_FIND_FIRST_BIT + def_bool X86_32 + config GENERIC_FIND_NEXT_BIT def_bool y diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index 3ed64b21b76..ba2c0defafa 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h @@ -5,6 +5,7 @@ * Copyright 1992, Linus Torvalds. */ +#ifndef CONFIG_GENERIC_FIND_FIRST_BIT /** * find_first_zero_bit - find the first zero bit in a memory region * @addr: The address to start the search at @@ -59,6 +60,7 @@ static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) } return x; } +#endif #ifdef __KERNEL__ diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 3865f2c93bd..355d67ba3bd 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -113,6 +113,40 @@ static inline unsigned fls_long(unsigned long l) } #ifdef __KERNEL__ +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT +extern unsigned long __find_first_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_bit - find the first set bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first set bit. + */ +static __always_inline unsigned long +find_first_bit(const unsigned long *addr, unsigned long size) +{ + return __find_first_bit(addr, size); +} + +extern unsigned long __find_first_zero_bit(const unsigned long *addr, + unsigned long size); + +/** + * find_first_zero_bit - find the first cleared bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit number of the first cleared bit. + */ +static __always_inline unsigned long +find_first_zero_bit(const unsigned long *addr, unsigned long size) +{ + return __find_first_zero_bit(addr, size); +} +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ + #ifdef CONFIG_GENERIC_FIND_NEXT_BIT extern unsigned long __find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); diff --git a/lib/Makefile b/lib/Makefile index bf8000fc7d4..2d7001b7f5a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o +lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index ce94c4c92d1..d3f5784807b 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c @@ -16,6 +16,7 @@ #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) +#ifdef CONFIG_GENERIC_FIND_NEXT_BIT /* * Find the next set bit in a memory region. */ @@ -102,6 +103,63 @@ found_middle: return result + ffz(tmp); } EXPORT_SYMBOL(__find_next_zero_bit); +#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ + +#ifdef CONFIG_GENERIC_FIND_FIRST_BIT +/* + * Find the first set bit in a memory region. + */ +unsigned long __find_first_bit(const unsigned long *addr, + unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + + tmp = (*p) & (~0UL >> (BITS_PER_LONG - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found: + return result + __ffs(tmp); +} +EXPORT_SYMBOL(__find_first_bit); + +/* + * Find the first cleared bit in a memory region. + */ +unsigned long __find_first_zero_bit(const unsigned long *addr, + unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG-1)) { + if (~(tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + + tmp = (*p) | (~0UL << size); + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found: + return result + ffz(tmp); +} +EXPORT_SYMBOL(__find_first_zero_bit); +#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ #ifdef __BIG_ENDIAN -- cgit v1.2.3-70-g09d2 From 2aba6925fdb96428d1129a61b1233597a03a387b Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Tue, 1 Apr 2008 17:41:26 +0200 Subject: x86: switch 64-bit to generic find_first_bit Switch x86_64 to generic find_first_bit. The x86_64-specific implementation is not removed. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- arch/x86/lib/bitops_64.c | 2 ++ include/asm-x86/bitops_64.h | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1a69b68ff6c..700447738e7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -78,7 +78,7 @@ config GENERIC_BUG depends on BUG config GENERIC_FIND_FIRST_BIT - def_bool X86_32 + def_bool y config GENERIC_FIND_NEXT_BIT def_bool y diff --git a/arch/x86/lib/bitops_64.c b/arch/x86/lib/bitops_64.c index 0eeb704d251..568467d390c 100644 --- a/arch/x86/lib/bitops_64.c +++ b/arch/x86/lib/bitops_64.c @@ -1,3 +1,4 @@ +#ifndef CONFIG_GENERIC_FIND_FIRST_BIT #include #undef find_first_zero_bit @@ -105,3 +106,4 @@ long find_first_bit(const unsigned long * addr, unsigned long size) EXPORT_SYMBOL(find_first_bit); EXPORT_SYMBOL(find_first_zero_bit); +#endif diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index d1335208719..4081d7ecc2b 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -5,6 +5,7 @@ * Copyright 1992, Linus Torvalds. */ +#ifndef CONFIG_GENERIC_FIND_FIRST_BIT extern long find_first_zero_bit(const unsigned long *addr, unsigned long size); extern long find_first_bit(const unsigned long *addr, unsigned long size); @@ -24,6 +25,7 @@ static inline long __scanbit(unsigned long val, unsigned long max) ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ ? (__scanbit(~*(unsigned long *)(addr), (size))) \ : find_first_zero_bit((addr), (size)))) +#endif static inline void set_bit_string(unsigned long *bitmap, unsigned long i, int len) -- cgit v1.2.3-70-g09d2 From 3a48305028aa38afba93fc05066c71a6ee668ad8 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Tue, 1 Apr 2008 17:42:21 +0200 Subject: x86: optimize find_first_bit for small bitmaps Avoid a call to find_first_bit if the bitmap size is know at compile time and small enough to fit in a single long integer. Modeled after an optimization in the original x86_64-specific code. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- include/linux/bitops.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 355d67ba3bd..48bde600a2d 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -127,6 +127,20 @@ extern unsigned long __find_first_bit(const unsigned long *addr, static __always_inline unsigned long find_first_bit(const unsigned long *addr, unsigned long size) { + /* Avoid a function call if the bitmap size is a constant */ + /* and not bigger than BITS_PER_LONG. */ + + /* insert a sentinel so that __ffs returns size if there */ + /* are no set bits in the bitmap */ + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) + return __ffs((*addr) | (1ul << size)); + + /* the result of __ffs(0) is undefined, so it needs to be */ + /* handled separately */ + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) + return ((*addr) == 0) ? BITS_PER_LONG : __ffs(*addr); + + /* size is not constant or too big */ return __find_first_bit(addr, size); } @@ -143,6 +157,21 @@ extern unsigned long __find_first_zero_bit(const unsigned long *addr, static __always_inline unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) { + /* Avoid a function call if the bitmap size is a constant */ + /* and not bigger than BITS_PER_LONG. */ + + /* insert a sentinel so that __ffs returns size if there */ + /* are no set bits in the bitmap */ + if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) { + return __ffs(~(*addr) | (1ul << size)); + } + + /* the result of __ffs(0) is undefined, so it needs to be */ + /* handled separately */ + if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) + return (~(*addr) == 0) ? BITS_PER_LONG : __ffs(~(*addr)); + + /* size is not constant or too big */ return __find_first_zero_bit(addr, size); } #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ -- cgit v1.2.3-70-g09d2 From 5245698f665c4b7a533dcc47a5afdf33095d436a Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Tue, 1 Apr 2008 17:47:57 +0200 Subject: x86, UML: remove x86-specific implementations of find_first_bit x86 has been switched to the generic versions of find_first_bit and find_first_zero_bit, but the original versions were retained. This patch just removes the now unused x86-specific versions. also update UML. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/um/Kconfig.i386 | 4 ++ arch/um/Kconfig.x86_64 | 4 ++ arch/um/sys-i386/Makefile | 2 +- arch/um/sys-x86_64/Makefile | 2 +- arch/x86/lib/Makefile | 1 - arch/x86/lib/bitops_64.c | 109 -------------------------------------------- include/asm-x86/bitops_32.h | 58 ----------------------- include/asm-x86/bitops_64.h | 23 ---------- 8 files changed, 10 insertions(+), 193 deletions(-) delete mode 100644 arch/x86/lib/bitops_64.c diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index be755602829..49990ea422e 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 @@ -39,6 +39,10 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA bool default y +config GENERIC_FIND_FIRST_BIT + bool + default y + config GENERIC_FIND_NEXT_BIT bool default y diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64 index 4ab5aa62ff3..cc42e59585d 100644 --- a/arch/um/Kconfig.x86_64 +++ b/arch/um/Kconfig.x86_64 @@ -34,6 +34,10 @@ config SMP_BROKEN bool default y +config GENERIC_FIND_FIRST_BIT + bool + default y + config GENERIC_FIND_NEXT_BIT bool default y diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile index 964dc1a04c3..598b5c1903a 100644 --- a/arch/um/sys-i386/Makefile +++ b/arch/um/sys-i386/Makefile @@ -6,7 +6,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \ sys_call_table.o tls.o -subarch-obj-y = lib/bitops_32.o lib/semaphore_32.o lib/string_32.o +subarch-obj-y = lib/semaphore_32.o lib/string_32.o subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile index 3c22de53208..c8b4cce9cfe 100644 --- a/arch/um/sys-x86_64/Makefile +++ b/arch/um/sys-x86_64/Makefile @@ -10,7 +10,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \ obj-$(CONFIG_MODULES) += um_module.o -subarch-obj-y = lib/bitops_64.o lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o +subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o ldt-y = ../sys-i386/ldt.o diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 436093299bd..76f60f52a88 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -21,7 +21,6 @@ else lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o lib-y += thunk_64.o clear_page_64.o copy_page_64.o - lib-y += bitops_64.o lib-y += memmove_64.o memset_64.o lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o endif diff --git a/arch/x86/lib/bitops_64.c b/arch/x86/lib/bitops_64.c deleted file mode 100644 index 568467d390c..00000000000 --- a/arch/x86/lib/bitops_64.c +++ /dev/null @@ -1,109 +0,0 @@ -#ifndef CONFIG_GENERIC_FIND_FIRST_BIT -#include - -#undef find_first_zero_bit -#undef find_first_bit - -static inline long -__find_first_zero_bit(const unsigned long * addr, unsigned long size) -{ - long d0, d1, d2; - long res; - - /* - * We must test the size in words, not in bits, because - * otherwise incoming sizes in the range -63..-1 will not run - * any scasq instructions, and then the flags used by the je - * instruction will have whatever random value was in place - * before. Nobody should call us like that, but - * find_next_zero_bit() does when offset and size are at the - * same word and it fails to find a zero itself. - */ - size += 63; - size >>= 6; - if (!size) - return 0; - asm volatile( - " repe; scasq\n" - " je 1f\n" - " xorq -8(%%rdi),%%rax\n" - " subq $8,%%rdi\n" - " bsfq %%rax,%%rdx\n" - "1: subq %[addr],%%rdi\n" - " shlq $3,%%rdi\n" - " addq %%rdi,%%rdx" - :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) - :"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL), - [addr] "S" (addr) : "memory"); - /* - * Any register would do for [addr] above, but GCC tends to - * prefer rbx over rsi, even though rsi is readily available - * and doesn't have to be saved. - */ - return res; -} - -/** - * find_first_zero_bit - find the first zero bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first zero bit, not the number of the byte - * containing a bit. - */ -long find_first_zero_bit(const unsigned long * addr, unsigned long size) -{ - return __find_first_zero_bit (addr, size); -} - -static inline long -__find_first_bit(const unsigned long * addr, unsigned long size) -{ - long d0, d1; - long res; - - /* - * We must test the size in words, not in bits, because - * otherwise incoming sizes in the range -63..-1 will not run - * any scasq instructions, and then the flags used by the jz - * instruction will have whatever random value was in place - * before. Nobody should call us like that, but - * find_next_bit() does when offset and size are at the same - * word and it fails to find a one itself. - */ - size += 63; - size >>= 6; - if (!size) - return 0; - asm volatile( - " repe; scasq\n" - " jz 1f\n" - " subq $8,%%rdi\n" - " bsfq (%%rdi),%%rax\n" - "1: subq %[addr],%%rdi\n" - " shlq $3,%%rdi\n" - " addq %%rdi,%%rax" - :"=a" (res), "=&c" (d0), "=&D" (d1) - :"0" (0ULL), "1" (size), "2" (addr), - [addr] "r" (addr) : "memory"); - return res; -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -long find_first_bit(const unsigned long * addr, unsigned long size) -{ - return __find_first_bit(addr,size); -} - -#include - -EXPORT_SYMBOL(find_first_bit); -EXPORT_SYMBOL(find_first_zero_bit); -#endif diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h index ba2c0defafa..2e863021bf8 100644 --- a/include/asm-x86/bitops_32.h +++ b/include/asm-x86/bitops_32.h @@ -4,64 +4,6 @@ /* * Copyright 1992, Linus Torvalds. */ - -#ifndef CONFIG_GENERIC_FIND_FIRST_BIT -/** - * find_first_zero_bit - find the first zero bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit number of the first zero bit, not the number of the byte - * containing a bit. - */ -static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) -{ - int d0, d1, d2; - int res; - - if (!size) - return 0; - /* This looks at memory. - * Mark it volatile to tell gcc not to move it around - */ - asm volatile("movl $-1,%%eax\n\t" - "xorl %%edx,%%edx\n\t" - "repe; scasl\n\t" - "je 1f\n\t" - "xorl -4(%%edi),%%eax\n\t" - "subl $4,%%edi\n\t" - "bsfl %%eax,%%edx\n" - "1:\tsubl %%ebx,%%edi\n\t" - "shll $3,%%edi\n\t" - "addl %%edi,%%edx" - : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) - : "1" ((size + 31) >> 5), "2" (addr), - "b" (addr) : "memory"); - return res; -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit number of the first set bit, not the number of the byte - * containing a bit. - */ -static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) -{ - unsigned x = 0; - - while (x < size) { - unsigned long val = *addr++; - if (val) - return __ffs(val) + x; - x += sizeof(*addr) << 3; - } - return x; -} -#endif - #ifdef __KERNEL__ #include diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h index 4081d7ecc2b..cb23122d23f 100644 --- a/include/asm-x86/bitops_64.h +++ b/include/asm-x86/bitops_64.h @@ -4,29 +4,6 @@ /* * Copyright 1992, Linus Torvalds. */ - -#ifndef CONFIG_GENERIC_FIND_FIRST_BIT -extern long find_first_zero_bit(const unsigned long *addr, unsigned long size); -extern long find_first_bit(const unsigned long *addr, unsigned long size); - -/* return index of first bet set in val or max when no bit is set */ -static inline long __scanbit(unsigned long val, unsigned long max) -{ - asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max)); - return val; -} - -#define find_first_bit(addr, size) \ - ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ - ? (__scanbit(*(unsigned long *)(addr), (size))) \ - : find_first_bit((addr), (size)))) - -#define find_first_zero_bit(addr, size) \ - ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \ - ? (__scanbit(~*(unsigned long *)(addr), (size))) \ - : find_first_zero_bit((addr), (size)))) -#endif - static inline void set_bit_string(unsigned long *bitmap, unsigned long i, int len) { -- cgit v1.2.3-70-g09d2 From d66462f5314b0e70ddad8032eb76099475ca5571 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Fri, 4 Apr 2008 20:49:30 +0200 Subject: x86: finalize bitops unification include/asm-x86/bitops_32.h and include/asm-x86/bitops_64.h are now almost identical. The 64-bit version sets ARCH_HAS_FAST_MULTIPLIER and has an extra inline function set_bit_string. The define currently has no influence on the generated code, but it can be argued that setting it on i386 is the right thing to do anyhow. The addition of the extra inline function on i386 does not hurt either. Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- include/asm-x86/bitops.h | 38 +++++++++++++++++++++++++++++++++----- include/asm-x86/bitops_32.h | 30 ------------------------------ include/asm-x86/bitops_64.h | 42 ------------------------------------------ 3 files changed, 33 insertions(+), 77 deletions(-) delete mode 100644 include/asm-x86/bitops_32.h delete mode 100644 include/asm-x86/bitops_64.h diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index 1b6f547cb6b..daf1a72bdc4 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -403,10 +403,38 @@ static inline int fls(int x) #undef ADDR -#ifdef CONFIG_X86_32 -# include "bitops_32.h" -#else -# include "bitops_64.h" -#endif +static inline void set_bit_string(unsigned long *bitmap, + unsigned long i, int len) +{ + unsigned long end = i + len; + while (i < end) { + __set_bit(i, bitmap); + i++; + } +} + +#ifdef __KERNEL__ + +#include + +#define ARCH_HAS_FAST_MULTIPLIER 1 + +#include +#endif /* __KERNEL__ */ + +#include + +#ifdef __KERNEL__ + +#include + +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_bit((nr), (unsigned long *)(addr)) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_bit((nr), (unsigned long *)(addr)) + +#include + +#endif /* __KERNEL__ */ #endif /* _ASM_X86_BITOPS_H */ diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h deleted file mode 100644 index 2e863021bf8..00000000000 --- a/include/asm-x86/bitops_32.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _I386_BITOPS_H -#define _I386_BITOPS_H - -/* - * Copyright 1992, Linus Torvalds. - */ -#ifdef __KERNEL__ - -#include - -#include - -#endif /* __KERNEL__ */ - -#include - -#ifdef __KERNEL__ - -#include - -#define ext2_set_bit_atomic(lock, nr, addr) \ - test_and_set_bit((nr), (unsigned long *)(addr)) -#define ext2_clear_bit_atomic(lock, nr, addr) \ - test_and_clear_bit((nr), (unsigned long *)(addr)) - -#include - -#endif /* __KERNEL__ */ - -#endif /* _I386_BITOPS_H */ diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h deleted file mode 100644 index cb23122d23f..00000000000 --- a/include/asm-x86/bitops_64.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _X86_64_BITOPS_H -#define _X86_64_BITOPS_H - -/* - * Copyright 1992, Linus Torvalds. - */ -static inline void set_bit_string(unsigned long *bitmap, unsigned long i, - int len) -{ - unsigned long end = i + len; - while (i < end) { - __set_bit(i, bitmap); - i++; - } -} - -#ifdef __KERNEL__ - -#include - -#define ARCH_HAS_FAST_MULTIPLIER 1 - -#include - -#endif /* __KERNEL__ */ - -#include - -#ifdef __KERNEL__ - -#include - -#define ext2_set_bit_atomic(lock, nr, addr) \ - test_and_set_bit((nr), (unsigned long *)(addr)) -#define ext2_clear_bit_atomic(lock, nr, addr) \ - test_and_clear_bit((nr), (unsigned long *)(addr)) - -#include - -#endif /* __KERNEL__ */ - -#endif /* _X86_64_BITOPS_H */ -- cgit v1.2.3-70-g09d2 From f19dcf4a61ea4a3d155acb239348d09cb264f6a0 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 23 Mar 2008 01:03:07 -0700 Subject: x86: include/asm-x86/pgalloc.h/bitops.h: checkpatch cleanups - formatting only Signed-off-by: Joe Perches Signed-off-by: Ingo Molnar --- include/asm-x86/bitops.h | 62 +++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 32 deletions(-) diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h index daf1a72bdc4..b81a4d4d333 100644 --- a/include/asm-x86/bitops.h +++ b/include/asm-x86/bitops.h @@ -62,9 +62,7 @@ static inline void set_bit(int nr, volatile void *addr) */ static inline void __set_bit(int nr, volatile void *addr) { - asm volatile("bts %1,%0" - : ADDR - : "Ir" (nr) : "memory"); + asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); } /** @@ -296,13 +294,11 @@ static inline int variable_test_bit(int nr, volatile const void *addr) static int test_bit(int nr, const volatile unsigned long *addr); #endif -#define test_bit(nr,addr) \ - (__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))) +#define test_bit(nr, addr) \ + (__builtin_constant_p((nr)) \ + ? constant_test_bit((nr), (addr)) \ + : variable_test_bit((nr), (addr))) -#undef BASE_ADDR -#undef BIT_ADDR /** * __ffs - find first set bit in word * @word: The word to search @@ -311,9 +307,9 @@ static int test_bit(int nr, const volatile unsigned long *addr); */ static inline unsigned long __ffs(unsigned long word) { - __asm__("bsf %1,%0" - :"=r" (word) - :"rm" (word)); + asm("bsf %1,%0" + : "=r" (word) + : "rm" (word)); return word; } @@ -325,9 +321,9 @@ static inline unsigned long __ffs(unsigned long word) */ static inline unsigned long ffz(unsigned long word) { - __asm__("bsf %1,%0" - :"=r" (word) - :"r" (~word)); + asm("bsf %1,%0" + : "=r" (word) + : "r" (~word)); return word; } @@ -339,9 +335,9 @@ static inline unsigned long ffz(unsigned long word) */ static inline unsigned long __fls(unsigned long word) { - __asm__("bsr %1,%0" - :"=r" (word) - :"rm" (word)); + asm("bsr %1,%0" + : "=r" (word) + : "rm" (word)); return word; } @@ -361,14 +357,14 @@ static inline int ffs(int x) { int r; #ifdef CONFIG_X86_CMOV - __asm__("bsfl %1,%0\n\t" - "cmovzl %2,%0" - : "=r" (r) : "rm" (x), "r" (-1)); + asm("bsfl %1,%0\n\t" + "cmovzl %2,%0" + : "=r" (r) : "rm" (x), "r" (-1)); #else - __asm__("bsfl %1,%0\n\t" - "jnz 1f\n\t" - "movl $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); + asm("bsfl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); #endif return r + 1; } @@ -388,19 +384,21 @@ static inline int fls(int x) { int r; #ifdef CONFIG_X86_CMOV - __asm__("bsrl %1,%0\n\t" - "cmovzl %2,%0" - : "=&r" (r) : "rm" (x), "rm" (-1)); + asm("bsrl %1,%0\n\t" + "cmovzl %2,%0" + : "=&r" (r) : "rm" (x), "rm" (-1)); #else - __asm__("bsrl %1,%0\n\t" - "jnz 1f\n\t" - "movl $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); + asm("bsrl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n" + "1:" : "=r" (r) : "rm" (x)); #endif return r + 1; } #endif /* __KERNEL__ */ +#undef BASE_ADDR +#undef BIT_ADDR #undef ADDR static inline void set_bit_string(unsigned long *bitmap, -- cgit v1.2.3-70-g09d2 From 19870def587554c4055df3e74a21508e3647fb7e Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Fri, 25 Apr 2008 13:12:53 +0200 Subject: x86, bitops: select the generic bitmap search functions Introduce GENERIC_FIND_FIRST_BIT and GENERIC_FIND_NEXT_BIT in lib/Kconfig, defaulting to off. An arch that wants to use the generic implementation now only has to use a select statement to include them. I added an always-y option (X86_CPU) to arch/x86/Kconfig.cpu and used that to select the generic search functions. This way ARCH=um SUBARCH=i386 automatically picks up the change too, and arch/um/Kconfig.i386 can therefore be simplified a bit. ARCH=um SUBARCH=x86_64 does things differently, but still compiles fine. It seems that a "def_bool y" always wins over a "def_bool n"? Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/um/Kconfig.i386 | 8 -------- arch/um/Kconfig.x86_64 | 8 -------- arch/x86/Kconfig | 6 ------ arch/x86/Kconfig.cpu | 5 +++++ lib/Kconfig | 6 ++++++ 5 files changed, 11 insertions(+), 22 deletions(-) diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index 49990ea422e..e09edfa560d 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 @@ -39,14 +39,6 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA bool default y -config GENERIC_FIND_FIRST_BIT - bool - default y - -config GENERIC_FIND_NEXT_BIT - bool - default y - config GENERIC_HWEIGHT bool default y diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64 index cc42e59585d..5696e7b374b 100644 --- a/arch/um/Kconfig.x86_64 +++ b/arch/um/Kconfig.x86_64 @@ -34,14 +34,6 @@ config SMP_BROKEN bool default y -config GENERIC_FIND_FIRST_BIT - bool - default y - -config GENERIC_FIND_NEXT_BIT - bool - default y - config GENERIC_HWEIGHT bool default y diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 700447738e7..2fadf794483 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -77,12 +77,6 @@ config GENERIC_BUG def_bool y depends on BUG -config GENERIC_FIND_FIRST_BIT - def_bool y - -config GENERIC_FIND_NEXT_BIT - def_bool y - config GENERIC_HWEIGHT def_bool y diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index cf3ff2c5cef..7ef18b01f0b 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -278,6 +278,11 @@ config GENERIC_CPU endchoice +config X86_CPU + def_bool y + select GENERIC_FIND_FIRST_BIT + select GENERIC_FIND_NEXT_BIT + config X86_GENERIC bool "Generic x86 support" depends on X86_32 diff --git a/lib/Kconfig b/lib/Kconfig index 2d53dc092e8..8cc8e8722a3 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -7,6 +7,12 @@ menu "Library routines" config BITREVERSE tristate +config GENERIC_FIND_FIRST_BIT + def_bool n + +config GENERIC_FIND_NEXT_BIT + def_bool n + config CRC_CCITT tristate "CRC-CCITT functions" help -- cgit v1.2.3-70-g09d2