summaryrefslogtreecommitdiffstats
path: root/include/asm-ppc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 16:27:50 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-04 16:27:50 -0800
commit602d4a7e2f4b843d1a67375d4d7104073495b758 (patch)
tree0b9f184e54fa693c27bd5986c114bdcf6949f788 /include/asm-ppc
parent0bbacc402e67abca8794a8401c1621dc0c0202e9 (diff)
parentc51e3a417bb0f295e13a5bad86302b5212eafdf3 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc-merge
Diffstat (limited to 'include/asm-ppc')
-rw-r--r--include/asm-ppc/bitops.h460
-rw-r--r--include/asm-ppc/commproc.h2
-rw-r--r--include/asm-ppc/futex.h53
-rw-r--r--include/asm-ppc/ipcbuf.h29
-rw-r--r--include/asm-ppc/kexec.h40
-rw-r--r--include/asm-ppc/ptrace.h152
-rw-r--r--include/asm-ppc/sigcontext.h15
-rw-r--r--include/asm-ppc/stat.h69
-rw-r--r--include/asm-ppc/tlb.h57
-rw-r--r--include/asm-ppc/tlbflush.h115
-rw-r--r--include/asm-ppc/uaccess.h393
-rw-r--r--include/asm-ppc/ucontext.h27
12 files changed, 2 insertions, 1410 deletions
diff --git a/include/asm-ppc/bitops.h b/include/asm-ppc/bitops.h
deleted file mode 100644
index e30f536fd83..00000000000
--- a/include/asm-ppc/bitops.h
+++ /dev/null
@@ -1,460 +0,0 @@
-/*
- * bitops.h: Bit string operations on the ppc
- */
-
-#ifdef __KERNEL__
-#ifndef _PPC_BITOPS_H
-#define _PPC_BITOPS_H
-
-#include <linux/config.h>
-#include <linux/compiler.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-
-/*
- * The test_and_*_bit operations are taken to imply a memory barrier
- * on SMP systems.
- */
-#ifdef CONFIG_SMP
-#define SMP_WMB "eieio\n"
-#define SMP_MB "\nsync"
-#else
-#define SMP_WMB
-#define SMP_MB
-#endif /* CONFIG_SMP */
-
-static __inline__ void set_bit(int nr, volatile unsigned long * addr)
-{
- unsigned long old;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3 \n\
- or %0,%0,%2 \n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc" );
-}
-
-/*
- * non-atomic version
- */
-static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- *p |= mask;
-}
-
-/*
- * clear_bit doesn't imply a memory barrier
- */
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
-
-static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long old;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3 \n\
- andc %0,%0,%2 \n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-}
-
-/*
- * non-atomic version
- */
-static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- *p &= ~mask;
-}
-
-static __inline__ void change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long old;
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- __asm__ __volatile__("\n\
-1: lwarx %0,0,%3 \n\
- xor %0,%0,%2 \n"
- PPC405_ERR77(0,%3)
-" stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc");
-}
-
-/*
- * non-atomic version
- */
-static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
-
- *p ^= mask;
-}
-
-/*
- * test_and_*_bit do imply a memory barrier (?)
- */
-static __inline__ int test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int old, t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%4 \n\
- or %1,%0,%3 \n"
- PPC405_ERR77(0,%4)
-" stwcx. %1,0,%4 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=&r" (t), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-/*
- * non-atomic version
- */
-static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int old, t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%4 \n\
- andc %1,%0,%3 \n"
- PPC405_ERR77(0,%4)
-" stwcx. %1,0,%4 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=&r" (t), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-/*
- * non-atomic version
- */
-static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned int old, t;
- unsigned int mask = 1 << (nr & 0x1f);
- volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
-
- __asm__ __volatile__(SMP_WMB "\n\
-1: lwarx %0,0,%4 \n\
- xor %1,%0,%3 \n"
- PPC405_ERR77(0,%4)
-" stwcx. %1,0,%4 \n\
- bne 1b"
- SMP_MB
- : "=&r" (old), "=&r" (t), "=m" (*p)
- : "r" (mask), "r" (p), "m" (*p)
- : "cc", "memory");
-
- return (old & mask) != 0;
-}
-
-/*
- * non-atomic version
- */
-static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = 1 << (nr & 0x1f);
- unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
-
-static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
-{
- return ((addr[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
-}
-
-/* Return the bit position of the most significant 1 bit in a word */
-static __inline__ int __ilog2(unsigned long x)
-{
- int lz;
-
- asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
- return 31 - lz;
-}
-
-static __inline__ int ffz(unsigned long x)
-{
- if ((x = ~x) == 0)
- return 32;
- return __ilog2(x & -x);
-}
-
-static inline int __ffs(unsigned long x)
-{
- return __ilog2(x & -x);
-}
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static __inline__ int ffs(int x)
-{
- return __ilog2(x & -x) + 1;
-}
-
-/*
- * fls: find last (most-significant) bit set.
- * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
- */
-static __inline__ int fls(unsigned int x)
-{
- int lz;
-
- asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
- return 32 - lz;
-}
-
-/*
- * hweightN: returns the hamming weight (i.e. the number
- * of bits set) of a N-bit word
- */
-
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
-
-/*
- * Find the first bit set in a 140-bit bitmap.
- * The first 100 bits are unlikely to be set.
- */
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (unlikely(b[0]))
- return __ffs(b[0]);
- if (unlikely(b[1]))
- return __ffs(b[1]) + 32;
- if (unlikely(b[2]))
- return __ffs(b[2]) + 64;
- if (b[3])
- return __ffs(b[3]) + 96;
- return __ffs(b[4]) + 128;
-}
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static __inline__ unsigned long find_next_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp &= ~0UL << offset;
- if (size < 32)
- goto found_first;
- if (tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != 0)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-
-found_first:
- tmp &= ~0UL >> (32 - size);
- if (tmp == 0UL) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __ffs(tmp);
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-#define find_first_bit(addr, size) \
- find_next_bit((addr), (size), 0)
-
-/*
- * This implementation of find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h.
- */
-#define find_first_zero_bit(addr, size) \
- find_next_zero_bit((addr), (size), 0)
-
-static __inline__ unsigned long find_next_zero_bit(const unsigned long *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = *p++;
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (tmp != ~0U)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = *p++) != ~0U)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = *p;
-found_first:
- tmp |= ~0UL << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
-
-
-#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
-#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
-#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
-#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
-
-static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
-{
- __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
-
- return (ADDR[nr >> 3] >> (nr & 7)) & 1;
-}
-
-/*
- * This implementation of ext2_find_{first,next}_zero_bit was stolen from
- * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
- */
-
-#define ext2_find_first_zero_bit(addr, size) \
- ext2_find_next_zero_bit((addr), (size), 0)
-
-static __inline__ unsigned long ext2_find_next_zero_bit(const void *addr,
- unsigned long size, unsigned long offset)
-{
- unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
- unsigned int result = offset & ~31UL;
- unsigned int tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if (offset) {
- tmp = cpu_to_le32p(p++);
- tmp |= ~0UL >> (32-offset);
- if (size < 32)
- goto found_first;
- if (tmp != ~0U)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while (size >= 32) {
- if ((tmp = cpu_to_le32p(p++)) != ~0U)
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if (!size)
- return result;
- tmp = cpu_to_le32p(p);
-found_first:
- tmp |= ~0U << size;
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + ffz(tmp);
-}
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
-#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
-#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
-#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
-#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
-
-#endif /* _PPC_BITOPS_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/commproc.h b/include/asm-ppc/commproc.h
index 5bbb8e2c1c6..973e6090823 100644
--- a/include/asm-ppc/commproc.h
+++ b/include/asm-ppc/commproc.h
@@ -83,6 +83,8 @@ extern uint m8xx_cpm_hostalloc(uint size);
extern int m8xx_cpm_hostfree(uint start);
extern void m8xx_cpm_hostdump(void);
+extern void cpm_load_patch(volatile immap_t *immr);
+
/* Buffer descriptors used by many of the CPM protocols.
*/
typedef struct cpm_buf_desc {
diff --git a/include/asm-ppc/futex.h b/include/asm-ppc/futex.h
deleted file mode 100644
index 9feff4ce142..00000000000
--- a/include/asm-ppc/futex.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#ifdef __KERNEL__
-
-#include <linux/futex.h>
-#include <asm/errno.h>
-#include <asm/uaccess.h>
-
-static inline int
-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
- int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
- inc_preempt_count();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- dec_preempt_count();
-
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
- return ret;
-}
-
-#endif
-#endif
diff --git a/include/asm-ppc/ipcbuf.h b/include/asm-ppc/ipcbuf.h
deleted file mode 100644
index fab6752c748..00000000000
--- a/include/asm-ppc/ipcbuf.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __PPC_IPCBUF_H__
-#define __PPC_IPCBUF_H__
-
-/*
- * The ipc64_perm structure for PPC architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 1 32-bit value to fill up for 8-byte alignment
- * - 2 miscellaneous 64-bit values (so that this structure matches
- * PPC64 ipc64_perm)
- */
-
-struct ipc64_perm
-{
- __kernel_key_t key;
- __kernel_uid_t uid;
- __kernel_gid_t gid;
- __kernel_uid_t cuid;
- __kernel_gid_t cgid;
- __kernel_mode_t mode;
- unsigned long seq;
- unsigned int __pad2;
- unsigned long long __unused1;
- unsigned long long __unused2;
-};
-
-#endif /* __PPC_IPCBUF_H__ */
diff --git a/include/asm-ppc/kexec.h b/include/asm-ppc/kexec.h
deleted file mode 100644
index 6d2aa0aa464..00000000000
--- a/include/asm-ppc/kexec.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef _PPC_KEXEC_H
-#define _PPC_KEXEC_H
-
-#ifdef CONFIG_KEXEC
-
-/*
- * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
- * I.e. Maximum page that is mapped directly into kernel memory,
- * and kmap is not required.
- *
- * Someone correct me if FIXADDR_START - PAGEOFFSET is not the correct
- * calculation for the amount of memory directly mappable into the
- * kernel memory space.
- */
-
-/* Maximum physical address we can use pages from */
-#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
-/* Maximum address we can reach in physical address mode */
-#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
-/* Maximum address we can use for the control code buffer */
-#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
-
-#define KEXEC_CONTROL_CODE_SIZE 4096
-
-/* The native architecture */
-#define KEXEC_ARCH KEXEC_ARCH_PPC
-
-#ifndef __ASSEMBLY__
-
-extern void *crash_notes;
-
-struct kimage;
-
-extern void machine_kexec_simple(struct kimage *image);
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* CONFIG_KEXEC */
-
-#endif /* _PPC_KEXEC_H */
diff --git a/include/asm-ppc/ptrace.h b/include/asm-ppc/ptrace.h
deleted file mode 100644
index c34fb4e37a9..00000000000
--- a/include/asm-ppc/ptrace.h
+++ /dev/null
@@ -1,152 +0,0 @@
-#ifndef _PPC_PTRACE_H
-#define _PPC_PTRACE_H
-
-/*
- * This struct defines the way the registers are stored on the
- * kernel stack during a system call or other kernel entry.
- *
- * this should only contain volatile regs
- * since we can keep non-volatile in the thread_struct
- * should set this up when only volatiles are saved
- * by intr code.
- *
- * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
- * that the overall structure is a multiple of 16 bytes in length.
- *
- * Note that the offsets of the fields in this struct correspond with
- * the PT_* values below. This simplifies arch/ppc/kernel/ptrace.c.
- */
-
-#ifndef __ASSEMBLY__
-struct pt_regs {
- unsigned long gpr[32];
- unsigned long nip;
- unsigned long msr;
- unsigned long orig_gpr3; /* Used for restarting system calls */
- unsigned long ctr;
- unsigned long link;
- unsigned long xer;
- unsigned long ccr;
- unsigned long mq; /* 601 only (not used at present) */
- /* Used on APUS to hold IPL value. */
- unsigned long trap; /* Reason for being here */
- /* N.B. for critical exceptions on 4xx, the dar and dsisr
- fields are overloaded to hold srr0 and srr1. */
- unsigned long dar; /* Fault registers */
- unsigned long dsisr; /* on 4xx/Book-E used for ESR */
- unsigned long result; /* Result of a system call */
-};
-
-#endif /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
-
-/* Size of stack frame allocated when calling signal handler. */
-#define __SIGNAL_FRAMESIZE 64
-
-#ifndef __ASSEMBLY__
-#define instruction_pointer(regs) ((regs)->nip)
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
-#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
-
-#define force_successful_syscall_return() \
- do { \
- current_thread_info()->syscall_noerror = 1; \
- } while(0)
-
-/*
- * We use the least-significant bit of the trap field to indicate
- * whether we have saved the full set of registers, or only a
- * partial set. A 1 there means the partial set.
- * On 4xx we use the next bit to indicate whether the exception
- * is a critical exception (1 means it is).
- */
-#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
-#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) == 0)
-#define TRAP(regs) ((regs)->trap & ~0xF)
-
-#define CHECK_FULL_REGS(regs) \
-do { \
- if ((regs)->trap & 1) \
- printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
-} while (0)
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
-/*
- * Offsets used by 'ptrace' system call interface.
- * These can't be changed without breaking binary compatibility
- * with MkLinux, etc.
- */
-#define PT_R0 0
-#define PT_R1 1
-#define PT_R2 2
-#define PT_R3 3
-#define PT_R4 4
-#define PT_R5 5
-#define PT_R6 6
-#define PT_R7 7
-#define PT_R8 8
-#define PT_R9 9
-#define PT_R10 10
-#define PT_R11 11
-#define PT_R12 12
-#define PT_R13 13
-#define PT_R14 14
-#define PT_R15 15
-#define PT_R16 16
-#define PT_R17 17
-#define PT_R18 18
-#define PT_R19 19
-#define PT_R20 20
-#define PT_R21 21
-#define PT_R22 22
-#define PT_R23 23
-#define PT_R24 24
-#define PT_R25 25
-#define PT_R26 26
-#define PT_R27 27
-#define PT_R28 28
-#define PT_R29 29
-#define PT_R30 30
-#define PT_R31 31
-
-#define PT_NIP 32
-#define PT_MSR 33
-#ifdef __KERNEL__
-#define PT_ORIG_R3 34
-#endif
-#define PT_CTR 35
-#define PT_LNK 36
-#define PT_XER 37
-#define PT_CCR 38
-#define PT_MQ 39
-
-#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
-#define PT_FPR31 (PT_FPR0 + 2*31)
-#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
-
-/* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go */
-#define PTRACE_GETVRREGS 18
-#define PTRACE_SETVRREGS 19
-
-/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
- * spefscr, in one go */
-#define PTRACE_GETEVRREGS 20
-#define PTRACE_SETEVRREGS 21
-
-/*
- * Get or set a debug register. The first 16 are DABR registers and the
- * second 16 are IABR registers.
- */
-#define PTRACE_GET_DEBUGREG 25
-#define PTRACE_SET_DEBUGREG 26
-
-#endif
diff --git a/include/asm-ppc/sigcontext.h b/include/asm-ppc/sigcontext.h
deleted file mode 100644
index b7a417e0a92..00000000000
--- a/include/asm-ppc/sigcontext.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_PPC_SIGCONTEXT_H
-#define _ASM_PPC_SIGCONTEXT_H
-
-#include <asm/ptrace.h>
-#include <linux/compiler.h>
-
-struct sigcontext {
- unsigned long _unused[4];
- int signal;
- unsigned long handler;
- unsigned long oldmask;
- struct pt_regs __user *regs;
-};
-
-#endif
diff --git a/include/asm-ppc/stat.h b/include/asm-ppc/stat.h
deleted file mode 100644
index cadb3429849..00000000000
--- a/include/asm-ppc/stat.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#ifndef _PPC_STAT_H
-#define _PPC_STAT_H
-
-#ifdef __KERNEL__
-#include <linux/types.h>
-#endif /* __KERNEL__ */
-
-struct __old_kernel_stat {
- unsigned short st_dev;
- unsigned short st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned long st_size;
- unsigned long st_atime;
- unsigned long st_mtime;
- unsigned long st_ctime;
-};
-
-#define STAT_HAVE_NSEC 1
-
-struct stat {
- unsigned st_dev;
- ino_t st_ino;
- mode_t st_mode;
- nlink_t st_nlink;
- uid_t st_uid;
- gid_t st_gid;
- unsigned st_rdev;
- off_t st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-/* This matches struct stat64 in glibc2.1.
- */
-struct stat64 {
- unsigned long long st_dev; /* Device. */
- unsigned long long st_ino; /* File serial number. */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- unsigned long long st_rdev; /* Device number, if device. */
- unsigned short int __pad2;
- long long st_size; /* Size of file, in bytes. */
- long st_blksize; /* Optimal block size for I/O. */
-
- long long st_blocks; /* Number 512-byte blocks allocated. */
- long st_atime; /* Time of last access. */
- unsigned long st_atime_nsec;
- long st_mtime; /* Time of last modification. */
- unsigned long int st_mtime_nsec;
- long st_ctime; /* Time of last status change. */
- unsigned long int st_ctime_nsec;
- unsigned long int __unused4;
- unsigned long int __unused5;
-};
-#endif
diff --git a/include/asm-ppc/tlb.h b/include/asm-ppc/tlb.h
deleted file mode 100644
index 2c142c5d858..00000000000
--- a/include/asm-ppc/tlb.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * TLB shootdown specifics for PPC
- *
- * Copyright (C) 2002 Paul Mackerras, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _PPC_TLB_H
-#define _PPC_TLB_H
-
-#include <linux/config.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
-
-#ifdef CONFIG_PPC_STD_MMU
-/* Classic PPC with hash-table based MMU... */
-
-struct mmu_gather;
-extern void tlb_flush(struct mmu_gather *tlb);
-
-/* Get the generic bits... */
-#include <asm-generic/tlb.h>
-
-/* Nothing needed here in fact... */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-
-extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
- unsigned long address);
-
-static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
- unsigned long address)
-{
- if (pte_val(*ptep) & _PAGE_HASHPTE)
- flush_hash_entry(tlb->mm, ptep, address);
-}
-
-#else
-/* Embedded PPC with software-loaded TLB, very simple... */
-
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
-/* Get the generic bits... */
-#include <asm-generic/tlb.h>
-
-#endif /* CONFIG_PPC_STD_MMU */
-
-#endif /* __PPC_TLB_H */
diff --git a/include/asm-ppc/tlbflush.h b/include/asm-ppc/tlbflush.h
deleted file mode 100644
index 9afee4ffc83..00000000000
--- a/include/asm-ppc/tlbflush.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * include/asm-ppc/tlbflush.h
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifdef __KERNEL__
-#ifndef _PPC_TLBFLUSH_H
-#define _PPC_TLBFLUSH_H
-
-#include <linux/config.h>
-#include <linux/mm.h>
-
-extern void _tlbie(unsigned long address);
-extern void _tlbia(void);
-
-#if defined(CONFIG_4xx)
-
-#ifndef CONFIG_44x
-#define __tlbia() asm volatile ("sync; tlbia; isync" : : : "memory")
-#else
-#define __tlbia _tlbia
-#endif
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
- { __tlbia(); }
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
- { __tlbia(); }
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
- { __tlbia(); }
-
-#elif defined(CONFIG_FSL_BOOKE)
-
-/* TODO: determine if flush_tlb_range & flush_tlb_kernel_range
- * are best implemented as tlbia vs specific tlbie's */
-
-#define __tlbia() _tlbia()
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
- { __tlbia(); }
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
- { __tlbia(); }
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
- { __tlbia(); }
-
-#elif defined(CONFIG_8xx)
-#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
- { __tlbia(); }
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
- { _tlbie(vmaddr); }
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
- { __tlbia(); }
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
- { __tlbia(); }
-
-#else /* 6xx, 7xx, 7xxx cpus */
-struct mm_struct;
-struct vm_area_struct;
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
-extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-#endif
-
-/*
- * This is called in munmap when we have freed up some page-table
- * pages. We don't need to do anything here, there's nothing special
- * about our page-table pages. -- paulus
- */
-static inline void flush_tlb_pgtables(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
-/*
- * This gets called at the end of handling a page fault, when
- * the kernel has put a new PTE into the page table for the process.
- * We use it to ensure coherency between the i-cache and d-cache
- * for the page which has just been mapped in.
- * On machines which use an MMU hash table, we use this to put a
- * corresponding HPTE into the hash table ahead of time, instead of
- * waiting for the inevitable extra hash-table miss exception.
- */
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
-
-#endif /* _PPC_TLBFLUSH_H */
-#endif /*__KERNEL__ */
diff --git a/include/asm-ppc/uaccess.h b/include/asm-ppc/uaccess.h
deleted file mode 100644
index 63f56224da8..00000000000
--- a/include/asm-ppc/uaccess.h
+++ /dev/null
@@ -1,393 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _PPC_UACCESS_H
-#define _PPC_UACCESS_H
-
-#ifndef __ASSEMBLY__
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <asm/processor.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- *
- * The fs/ds values are now the highest legal address in the "segment".
- * This simplifies the checking in the routines below.
- */
-
-#define KERNEL_DS ((mm_segment_t) { ~0UL })
-#define USER_DS ((mm_segment_t) { TASK_SIZE - 1 })
-
-#define get_ds() (KERNEL_DS)
-#define get_fs() (current->thread.fs)
-#define set_fs(val) (current->thread.fs = (val))
-
-#define segment_eq(a,b) ((a).seg == (b).seg)
-
-#define __access_ok(addr,size) \
- ((addr) <= current->thread.fs.seg \
- && ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr)))
-
-#define access_ok(type, addr, size) \
- (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- *
- * This gets kind of ugly. We want to return _two_ values in "get_user()"
- * and yet we don't want to do any pointers, because that is too much
- * of a performance impact. Thus we have a few rather ugly macros here,
- * and hide all the ugliness from the user.
- *
- * The "__xxx" versions of the user access functions are versions that
- * do not verify the address space, that must have been done previously
- * with a separate "access_ok()" call (this is used when we do multiple
- * accesses to the same area of user memory).
- *
- * As we use the same address space for kernel and user data on the
- * PowerPC, we can just do these as direct assignments. (Of course, the
- * exception handling means that it's no longer "just"...)
- *
- * The "user64" versions of the user access functions are versions that
- * allow access of 64-bit data. The "get_user" functions do not
- * properly handle 64-bit data because the value gets down cast to a long.
- * The "put_user" functions already handle 64-bit data properly but we add
- * "user64" versions for completeness
- */
-#define get_user(x,ptr) \
- __get_user_check((x),(ptr),sizeof(*(ptr)))
-#define get_user64(x,ptr) \
- __get_user64_check((x),(ptr),sizeof(*(ptr)))
-#define put_user(x,ptr) \
- __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-#define put_user64(x,ptr) put_user(x,ptr)
-
-#define __get_user(x,ptr) \
- __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __get_user64(x,ptr) \
- __get_user64_nocheck((x),(ptr),sizeof(*(ptr)))
-#define __put_user(x,ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-#define __put_user64(x,ptr) __put_user(x,ptr)
-
-extern long __put_user_bad(void);
-
-#define __put_user_nocheck(x,ptr,size) \
-({ \
- long __pu_err; \
- __chk_user_ptr(ptr); \
- __put_user_size((x),(ptr),(size),__pu_err); \
- __pu_err; \
-})
-
-#define __put_user_check(x,ptr,size) \
-({ \
- long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
- __put_user_size((x),__pu_addr,(size),__pu_err); \
- __pu_err; \
-})
-
-#define __put_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: \
- __put_user_asm(x, ptr, retval, "stb"); \
- break; \
- case 2: \
- __put_user_asm(x, ptr, retval, "sth"); \
- break; \
- case 4: \
- __put_user_asm(x, ptr, retval, "stw"); \
- break; \
- case 8: \
- __put_user_asm2(x, ptr, retval); \
- break; \
- default: \
- __put_user_bad(); \
- } \
-} while (0)
-
-/*
- * We don't tell gcc that we are accessing memory, but this is OK
- * because we do not write to any memory gcc knows about, so there
- * are no aliasing issues.
- */
-#define __put_user_asm(x, addr, err, op) \
- __asm__ __volatile__( \
- "1: "op" %1,0(%2)\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: li %0,%3\n" \
- " b 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : "=r" (err) \
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
-
-#define __put_user_asm2(x, addr, err) \
- __asm__ __volatile__( \
- "1: stw %1,0(%2)\n" \
- "2: stw %1+1,4(%2)\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: li %0,%3\n" \
- " b 3b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,4b\n" \
- " .long 2b,4b\n" \
- ".previous" \
- : "=r" (err) \
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
-
-#define __get_user_nocheck(x, ptr, size) \
-({ \
- long __gu_err; \
- unsigned long __gu_val; \
- __chk_user_ptr(ptr); \
- __get_user_size(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-#define __get_user64_nocheck(x, ptr, size) \
-({ \
- long __gu_err; \
- long long __gu_val; \
- __chk_user_ptr(ptr); \
- __get_user_size64(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-#define __get_user_check(x, ptr, size) \
-({ \
- long __gu_err = -EFAULT; \
- unsigned long __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (access_ok(VERIFY_READ, __gu_addr, (size))) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-#define __get_user64_check(x, ptr, size) \
-({ \
- long __gu_err = -EFAULT; \
- long long __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (access_ok(VERIFY_READ, __gu_addr, (size))) \
- __get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-extern long __get_user_bad(void);
-
-#define __get_user_size(x, ptr, size, retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: \
- __get_user_asm(x, ptr, retval, "lbz"); \
- break; \
- case 2: \
- __get_user_asm(x, ptr, retval, "lhz"); \
- break; \
- case 4: \
- __get_user_asm(x, ptr, retval, "lwz"); \
- break; \
- default: \
- x = __get_user_bad(); \
- } \
-} while (0)
-
-#define __get_user_size64(x, ptr, size, retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: \
- __get_user_asm(x, ptr, retval, "lbz"); \
- break; \
- case 2: \
- __get_user_asm(x, ptr, retval, "lhz"); \
- break; \
- case 4: \
- __get_user_asm(x, ptr, retval, "lwz"); \
- break; \
- case 8: \
- __get_user_asm2(x, ptr, retval); \
- break; \
- default: \
- x = __get_user_bad(); \
- } \
-} while (0)
-
-#define __get_user_asm(x, addr, err, op) \
- __asm__ __volatile__( \
- "1: "op" %1,0(%2)\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: li %0,%3\n" \
- " li %1,0\n" \
- " b 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : "=r"(err), "=r"(x) \
- : "b"(addr), "i"(-EFAULT), "0"(err))
-
-#define __get_user_asm2(x, addr, err) \
- __asm__ __volatile__( \
- "1: lwz %1,0(%2)\n" \
- "2: lwz %1+1,4(%2)\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: li %0,%3\n" \
- " li %1,0\n" \
- " li %1+1,0\n" \
- " b 3b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,4b\n" \
- " .long 2b,4b\n" \
- ".previous" \
- : "=r"(err), "=&r"(x) \
- : "b"(addr), "i"(-EFAULT), "0"(err))
-
-/* more complex routines */
-
-extern int __copy_tofrom_user(void __user *to, const void __user *from,
- unsigned long size);
-
-extern inline unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- unsigned long over;
-
- if (access_ok(VERIFY_READ, from, n))
- return __copy_tofrom_user((__force void __user *)to, from, n);
- if ((unsigned long)from < TASK_SIZE) {
- over = (unsigned long)from + n - TASK_SIZE;
- return __copy_tofrom_user((__force void __user *)to, from, n - over) + over;
- }
- return n;
-}
-
-extern inline unsigned long
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- unsigned long over;
-
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_tofrom_user(to, (__force void __user *) from, n);
- if ((unsigned long)to < TASK_SIZE) {
- over = (unsigned long)to + n - TASK_SIZE;
- return __copy_tofrom_user(to, (__force void __user *) from, n - over) + over;
- }
- return n;
-}
-
-static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long size)
-{
- return __copy_tofrom_user((__force void __user *)to, from, size);
-}
-
-static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long size)
-{
- return __copy_tofrom_user(to, (__force void __user *)from, size);
-}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-extern unsigned long __clear_user(void __user *addr, unsigned long size);
-
-extern inline unsigned long
-clear_user(void __user *addr, unsigned long size)
-{
- if (access_ok(VERIFY_WRITE, addr, size))
- return __clear_user(addr, size);
- if ((unsigned long)addr < TASK_SIZE) {
- unsigned long over = (unsigned long)addr + size - TASK_SIZE;
- return __clear_user(addr, size - over) + over;
- }
- return size;
-}
-
-extern int __strncpy_from_user(char *dst, const char __user *src, long count);
-
-extern inline long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- if (access_ok(VERIFY_READ, src, 1))
- return __strncpy_from_user(dst, src, count);
- return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-
-extern int __strnlen_user(const char __user *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-extern __inline__ int strnlen_user(const char __user *str, long len)
-{
- unsigned long top = current->thread.fs.seg;
-
- if ((unsigned long)str > top)
- return 0;
- return __strnlen_user(str, len, top);
-}
-
-#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _PPC_UACCESS_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/ucontext.h b/include/asm-ppc/ucontext.h
deleted file mode 100644
index 664bc984d51..00000000000
--- a/include/asm-ppc/ucontext.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _ASMPPC_UCONTEXT_H
-#define _ASMPPC_UCONTEXT_H
-
-#include <asm/elf.h>
-#include <asm/signal.h>
-
-struct mcontext {
- elf_gregset_t mc_gregs;
- elf_fpregset_t mc_fregs;
- unsigned long mc_pad[2];
- elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
-};
-
-struct ucontext {
- unsigned long uc_flags;
- struct ucontext __user *uc_link;
- stack_t uc_stack;
- int uc_pad[7];
- struct mcontext __user *uc_regs;/* points to uc_mcontext field */
- sigset_t uc_sigmask;
- /* glibc has 1024-bit signal masks, ours are 64-bit */
- int uc_maskext[30];
- int uc_pad2[3];
- struct mcontext uc_mcontext;
-};
-
-#endif /* !_ASMPPC_UCONTEXT_H */