summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-11-01 17:28:10 +1100
committerPaul Mackerras <paulus@samba.org>2005-11-01 21:49:02 +1100
commita0e60b2033b30a6bb8479629001cf98e58e4079a (patch)
tree6386eeca340a25c4ae1876f2f9663f94628c8cc3 /arch/powerpc
parent031ef0a72aa8f7ee63ae9f307c1bcff92b3ccc2c (diff)
[PATCH] powerpc: Merge bitops.h
Here's a revised version. This re-introduces the set_bits() function from ppc64, which I removed because I thought it was unused (it exists on no other arch). In fact it is used in the powermac interrupt code (but not on pSeries). - We use LARXL/STCXL macros to generate the right (32 or 64 bit) instructions, similar to LDL/STL from ppc_asm.h, used in fpu.S - ppc32 previously used a full "sync" barrier at the end of test_and_*_bit(), whereas ppc64 used an "isync". The merged version uses "isync", since I believe that's sufficient. - The ppc64 versions of then minix_*() bitmap functions have changed semantics. Previously on ppc64, these functions were big-endian (that is bit 0 was the LSB in the first 64-bit, big-endian word). On ppc32 (and x86, for that matter, they were little-endian. As far as I can tell, the big-endian usage was simply wrong - I guess no-one ever tried to use minixfs on ppc64. - On ppc32 find_next_bit() and find_next_zero_bit() are no longer inline (they were already out-of-line on ppc64). - For ppc64, sched_find_first_bit() has moved from mmu_context.h to the merged bitops. What it was doing in mmu_context.h in the first place, I have no idea. - The fls() function is now implemented using the cntlzw instruction on ppc64, instead of generic_fls(), as it already was on ppc32. - For ARCH=ppc, this patch requires adding arch/powerpc/lib to the arch/ppc/Makefile. This in turn requires some changes to arch/powerpc/lib/Makefile which didn't correctly handle ARCH=ppc. Built and running on G5. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c9
-rw-r--r--arch/powerpc/lib/Makefile9
-rw-r--r--arch/powerpc/lib/bitops.c150
3 files changed, 155 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 8bc540337ba..47d6f7e2ea9 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -81,15 +81,6 @@ EXPORT_SYMBOL(_prep_type);
EXPORT_SYMBOL(ucSystemType);
#endif
-#if !defined(__INLINE_BITOPS)
-EXPORT_SYMBOL(set_bit);
-EXPORT_SYMBOL(clear_bit);
-EXPORT_SYMBOL(change_bit);
-EXPORT_SYMBOL(test_and_set_bit);
-EXPORT_SYMBOL(test_and_clear_bit);
-EXPORT_SYMBOL(test_and_change_bit);
-#endif /* __INLINE_BITOPS */
-
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index dfb33915ad6..34f5c2e074c 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -3,13 +3,14 @@
#
ifeq ($(CONFIG_PPC_MERGE),y)
-obj-y := string.o
+obj-y := string.o strcase.o
+obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
endif
-obj-y += strcase.o
-obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
+obj-y += bitops.o
obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \
- memcpy_64.o usercopy_64.o mem_64.o string.o
+ memcpy_64.o usercopy_64.o mem_64.o string.o \
+ strcase.o
obj-$(CONFIG_PPC_ISERIES) += e2a.o
obj-$(CONFIG_XMON) += sstep.o
diff --git a/arch/powerpc/lib/bitops.c b/arch/powerpc/lib/bitops.c
new file mode 100644
index 00000000000..b67ce3004eb
--- /dev/null
+++ b/arch/powerpc/lib/bitops.c
@@ -0,0 +1,150 @@
+#include <linux/types.h>
+#include <linux/module.h>
+#include <asm/byteorder.h>
+#include <asm/bitops.h>
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (64 - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(find_next_bit);
+
+/*
+ * This implementation of find_{first,next}_zero_bit was stolen from
+ * Linus' asm-alpha/bitops.h.
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (BITS_PER_LONG - offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffz(tmp);
+}
+EXPORT_SYMBOL(find_next_zero_bit);
+
+static inline unsigned int ext2_ilog2(unsigned int x)
+{
+ int lz;
+
+ asm("cntlzw %0,%1": "=r"(lz):"r"(x));
+ return 31 - lz;
+}
+
+static inline unsigned int ext2_ffz(unsigned int x)
+{
+ u32 rc;
+ if ((x = ~x) == 0)
+ return 32;
+ rc = ext2_ilog2(x & -x);
+ return rc;
+}
+
+unsigned long find_next_zero_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ const unsigned int *p = ((const unsigned int *)addr) + (offset >> 5);
+ unsigned int result = offset & ~31;
+ unsigned int tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31;
+ if (offset) {
+ tmp = cpu_to_le32p(p++);
+ tmp |= ~0U >> (32 - offset); /* bug or feature ? */
+ if (size < 32)
+ goto found_first;
+ if (tmp != ~0)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size >= 32) {
+ if ((tmp = cpu_to_le32p(p++)) != ~0)
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = cpu_to_le32p(p);
+found_first:
+ tmp |= ~0 << size;
+ if (tmp == ~0) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ext2_ffz(tmp);
+}
+EXPORT_SYMBOL(find_next_zero_le_bit);