summaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/a.out.h36
-rw-r--r--include/asm-powerpc/atomic.h209
-rw-r--r--include/asm-powerpc/auxvec.h21
-rw-r--r--include/asm-powerpc/backlight.h31
-rw-r--r--include/asm-powerpc/bitops.h437
-rw-r--r--include/asm-powerpc/bug.h77
-rw-r--r--include/asm-powerpc/byteorder.h89
-rw-r--r--include/asm-powerpc/checksum.h132
-rw-r--r--include/asm-powerpc/cputable.h427
-rw-r--r--include/asm-powerpc/dbdma.h102
-rw-r--r--include/asm-powerpc/dma.h390
-rw-r--r--include/asm-powerpc/elf.h415
-rw-r--r--include/asm-powerpc/firmware.h97
-rw-r--r--include/asm-powerpc/futex.h84
-rw-r--r--include/asm-powerpc/grackle.h7
-rw-r--r--include/asm-powerpc/hardirq.h27
-rw-r--r--include/asm-powerpc/heathrow.h62
-rw-r--r--include/asm-powerpc/hw_irq.h114
-rw-r--r--include/asm-powerpc/i8259.h12
-rw-r--r--include/asm-powerpc/ioctls.h3
-rw-r--r--include/asm-powerpc/iommu.h113
-rw-r--r--include/asm-powerpc/ipcbuf.h34
-rw-r--r--include/asm-powerpc/irq.h504
-rw-r--r--include/asm-powerpc/iseries/hv_call.h113
-rw-r--r--include/asm-powerpc/iseries/hv_call_event.h253
-rw-r--r--include/asm-powerpc/iseries/hv_call_sc.h51
-rw-r--r--include/asm-powerpc/iseries/hv_call_xm.h78
-rw-r--r--include/asm-powerpc/iseries/hv_lp_config.h138
-rw-r--r--include/asm-powerpc/iseries/hv_lp_event.h142
-rw-r--r--include/asm-powerpc/iseries/hv_types.h113
-rw-r--r--include/asm-powerpc/iseries/iseries_io.h49
-rw-r--r--include/asm-powerpc/iseries/it_exp_vpd_panel.h52
-rw-r--r--include/asm-powerpc/iseries/it_lp_naca.h80
-rw-r--r--include/asm-powerpc/iseries/it_lp_queue.h81
-rw-r--r--include/asm-powerpc/iseries/it_lp_reg_save.h84
-rw-r--r--include/asm-powerpc/iseries/lpar_map.h83
-rw-r--r--include/asm-powerpc/iseries/mf.h57
-rw-r--r--include/asm-powerpc/iseries/vio.h130
-rw-r--r--include/asm-powerpc/kdebug.h42
-rw-r--r--include/asm-powerpc/kexec.h49
-rw-r--r--include/asm-powerpc/keylargo.h248
-rw-r--r--include/asm-powerpc/kmap_types.h33
-rw-r--r--include/asm-powerpc/kprobes.h66
-rw-r--r--include/asm-powerpc/lmb.h81
-rw-r--r--include/asm-powerpc/machdep.h285
-rw-r--r--include/asm-powerpc/macio.h140
-rw-r--r--include/asm-powerpc/mediabay.h31
-rw-r--r--include/asm-powerpc/mpic.h287
-rw-r--r--include/asm-powerpc/numnodes.h7
-rw-r--r--include/asm-powerpc/of_device.h64
-rw-r--r--include/asm-powerpc/ohare.h48
-rw-r--r--include/asm-powerpc/oprofile_impl.h123
-rw-r--r--include/asm-powerpc/pSeries_reconfig.h25
-rw-r--r--include/asm-powerpc/parport.h18
-rw-r--r--include/asm-powerpc/pmac_feature.h380
-rw-r--r--include/asm-powerpc/pmac_low_i2c.h43
-rw-r--r--include/asm-powerpc/pmc.h46
-rw-r--r--include/asm-powerpc/posix_types.h129
-rw-r--r--include/asm-powerpc/ppc-pci.h54
-rw-r--r--include/asm-powerpc/ppc_asm.h518
-rw-r--r--include/asm-powerpc/processor.h281
-rw-r--r--include/asm-powerpc/prom.h219
-rw-r--r--include/asm-powerpc/ptrace.h248
-rw-r--r--include/asm-powerpc/reg.h613
-rw-r--r--include/asm-powerpc/rtas.h232
-rw-r--r--include/asm-powerpc/rtc.h78
-rw-r--r--include/asm-powerpc/rwsem.h168
-rw-r--r--include/asm-powerpc/scatterlist.h45
-rw-r--r--include/asm-powerpc/seccomp.h16
-rw-r--r--include/asm-powerpc/sections.h20
-rw-r--r--include/asm-powerpc/semaphore.h95
-rw-r--r--include/asm-powerpc/sigcontext.h52
-rw-r--r--include/asm-powerpc/smp.h119
-rw-r--r--include/asm-powerpc/smu.h379
-rw-r--r--include/asm-powerpc/sparsemem.h16
-rw-r--r--include/asm-powerpc/spinlock_types.h20
-rw-r--r--include/asm-powerpc/sstep.h26
-rw-r--r--include/asm-powerpc/stat.h81
-rw-r--r--include/asm-powerpc/statfs.h60
-rw-r--r--include/asm-powerpc/synch.h51
-rw-r--r--include/asm-powerpc/system.h411
-rw-r--r--include/asm-powerpc/termios.h135
-rw-r--r--include/asm-powerpc/thread_info.h142
-rw-r--r--include/asm-powerpc/time.h226
-rw-r--r--include/asm-powerpc/tlb.h70
-rw-r--r--include/asm-powerpc/tlbflush.h146
-rw-r--r--include/asm-powerpc/types.h110
-rw-r--r--include/asm-powerpc/uaccess.h468
-rw-r--r--include/asm-powerpc/ucontext.h40
-rw-r--r--include/asm-powerpc/uninorth.h229
-rw-r--r--include/asm-powerpc/unistd.h509
-rw-r--r--include/asm-powerpc/vga.h54
-rw-r--r--include/asm-powerpc/vio.h106
-rw-r--r--include/asm-powerpc/xmon.h12
94 files changed, 12987 insertions, 134 deletions
diff --git a/include/asm-powerpc/a.out.h b/include/asm-powerpc/a.out.h
new file mode 100644
index 00000000000..c7393a97736
--- /dev/null
+++ b/include/asm-powerpc/a.out.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_POWERPC_A_OUT_H
+#define _ASM_POWERPC_A_OUT_H
+
+struct exec
+{
+ unsigned long a_info; /* Use macros N_MAGIC, etc for access */
+ unsigned a_text; /* length of text, in bytes */
+ unsigned a_data; /* length of data, in bytes */
+ unsigned a_bss; /* length of uninitialized data area for file, in bytes */
+ unsigned a_syms; /* length of symbol table data in file, in bytes */
+ unsigned a_entry; /* start address */
+ unsigned a_trsize; /* length of relocation info for text, in bytes */
+ unsigned a_drsize; /* length of relocation info for data, in bytes */
+};
+
+#define N_TRSIZE(a) ((a).a_trsize)
+#define N_DRSIZE(a) ((a).a_drsize)
+#define N_SYMSIZE(a) ((a).a_syms)
+
+#ifdef __KERNEL__
+#ifdef __powerpc64__
+
+#define STACK_TOP_USER64 TASK_SIZE_USER64
+#define STACK_TOP_USER32 TASK_SIZE_USER32
+
+#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
+ STACK_TOP_USER32 : STACK_TOP_USER64)
+
+#else /* __powerpc64__ */
+
+#define STACK_TOP TASK_SIZE
+
+#endif /* __powerpc64__ */
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_A_OUT_H */
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
new file mode 100644
index 00000000000..ed4b345ed75
--- /dev/null
+++ b/include/asm-powerpc/atomic.h
@@ -0,0 +1,209 @@
+#ifndef _ASM_POWERPC_ATOMIC_H_
+#define _ASM_POWERPC_ATOMIC_H_
+
+/*
+ * PowerPC atomic operations
+ */
+
+typedef struct { volatile int counter; } atomic_t;
+
+#ifdef __KERNEL__
+#include <asm/synch.h>
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
+ * The old ATOMIC_SYNC_FIX covered some but not all of this.
+ */
+#ifdef CONFIG_IBM405_ERR77
+#define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";"
+#else
+#define PPC405_ERR77(ra,rb)
+#endif
+
+static __inline__ void atomic_add(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_add\n\
+ add %0,%2,%0\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_add_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # atomic_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+static __inline__ void atomic_sub(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%3 # atomic_sub\n\
+ subf %0,%2,%0\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_sub_return(int a, atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # atomic_sub_return\n\
+ subf %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic_inc(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_inc\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_inc_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_inc_return\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+static __inline__ void atomic_dec(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 # atomic_dec\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%2)\
+" stwcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ int atomic_dec_return(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_dec_return\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ int atomic_dec_if_positive(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
+ addic. %0,%0,-1\n\
+ blt- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:" : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define smp_mb__before_atomic_dec() smp_mb()
+#define smp_mb__after_atomic_dec() smp_mb()
+#define smp_mb__before_atomic_inc() smp_mb()
+#define smp_mb__after_atomic_inc() smp_mb()
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-powerpc/auxvec.h b/include/asm-powerpc/auxvec.h
new file mode 100644
index 00000000000..79d8c473230
--- /dev/null
+++ b/include/asm-powerpc/auxvec.h
@@ -0,0 +1,21 @@
+#ifndef _ASM_POWERPC_AUXVEC_H
+#define _ASM_POWERPC_AUXVEC_H
+
+/*
+ * We need to put in some extra aux table entries to tell glibc what
+ * the cache block size is, so it can use the dcbz instruction safely.
+ */
+#define AT_DCACHEBSIZE 19
+#define AT_ICACHEBSIZE 20
+#define AT_UCACHEBSIZE 21
+/* A special ignored type value for PPC, for glibc compatibility. */
+#define AT_IGNOREPPC 22
+
+/* The vDSO location. We have to use the same value as x86 for glibc's
+ * sake :-)
+ */
+#ifdef __powerpc64__
+#define AT_SYSINFO_EHDR 33
+#endif
+
+#endif
diff --git a/include/asm-powerpc/backlight.h b/include/asm-powerpc/backlight.h
new file mode 100644
index 00000000000..1ba1f27a0b6
--- /dev/null
+++ b/include/asm-powerpc/backlight.h
@@ -0,0 +1,31 @@
+/*
+ * Routines for handling backlight control on PowerBooks
+ *
+ * For now, implementation resides in
+ * arch/powerpc/platforms/powermac/pmac_support.c
+ *
+ */
+#ifndef __ASM_POWERPC_BACKLIGHT_H
+#define __ASM_POWERPC_BACKLIGHT_H
+#ifdef __KERNEL__
+
+/* Abstract values */
+#define BACKLIGHT_OFF 0
+#define BACKLIGHT_MIN 1
+#define BACKLIGHT_MAX 0xf
+
+struct backlight_controller {
+ int (*set_enable)(int enable, int level, void *data);
+ int (*set_level)(int level, void *data);
+};
+
+extern void register_backlight_controller(struct backlight_controller *ctrler, void *data, char *type);
+extern void unregister_backlight_controller(struct backlight_controller *ctrler, void *data);
+
+extern int set_backlight_enable(int enable);
+extern int get_backlight_enable(void);
+extern int set_backlight_level(int level);
+extern int get_backlight_level(void);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
new file mode 100644
index 00000000000..dc25c53704d
--- /dev/null
+++ b/include/asm-powerpc/bitops.h
@@ -0,0 +1,437 @@
+/*
+ * PowerPC atomic bit operations.
+ *
+ * Merged version by David Gibson <david@gibson.dropbear.id.au>.
+ * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
+ * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
+ * originally took it from the ppc32 code.
+ *
+ * Within a word, bits are numbered LSB first. Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for a
+ * ppc64 system the bits end up numbered:
+ * |63..............0|127............64|191...........128|255...........196|
+ * and on ppc32:
+ * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_BITOPS_H
+#define _ASM_POWERPC_BITOPS_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <asm/atomic.h>
+#include <asm/synch.h>
+
+/*
+ * clear_bit doesn't imply a memory barrier
+ */
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+
+#define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+
+#ifdef CONFIG_PPC64
+#define LARXL "ldarx"
+#define STCXL "stdcx."
+#define CNTLZL "cntlzd"
+#else
+#define LARXL "lwarx"
+#define STCXL "stwcx."
+#define CNTLZL "cntlzw"
+#endif
+
+static __inline__ void set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" LARXL " %0,0,%3 # set_bit\n"
+ "or %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ STCXL " %0,0,%3\n"
+ "bne- 1b"
+ : "=&r"(old), "=m"(*p)
+ : "r"(mask), "r"(p), "m"(*p)
+ : "cc" );
+}
+
+static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" LARXL " %0,0,%3 # set_bit\n"
+ "andc %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ STCXL " %0,0,%3\n"
+ "bne- 1b"
+ : "=&r"(old), "=m"(*p)
+ : "r"(mask), "r"(p), "m"(*p)
+ : "cc" );
+}
+
+static __inline__ void change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" LARXL " %0,0,%3 # set_bit\n"
+ "xor %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ STCXL " %0,0,%3\n"
+ "bne- 1b"
+ : "=&r"(old), "=m"(*p)
+ : "r"(mask), "r"(p), "m"(*p)
+ : "cc" );
+}
+
+static __inline__ int test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1:" LARXL " %0,0,%3 # test_and_set_bit\n"
+ "or %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ STCXL " %1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1:" LARXL " %0,0,%3 # test_and_clear_bit\n"
+ "andc %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ STCXL " %1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1:" LARXL " %0,0,%3 # test_and_change_bit\n"
+ "xor %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ STCXL " %1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
+static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
+{
+ unsigned long old;
+
+ __asm__ __volatile__(
+"1:" LARXL " %0,0,%3 # set_bit\n"
+ "or %0,%0,%2\n"
+ STCXL " %0,0,%3\n"
+ "bne- 1b"
+ : "=&r" (old), "=m" (*addr)
+ : "r" (mask), "r" (addr), "m" (*addr)
+ : "cc");
+}
+
+/* Non-atomic versions */
+static __inline__ int test_bit(unsigned long nr,
+ __const__ volatile unsigned long *addr)
+{
+ return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+static __inline__ void __set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ *p |= mask;
+}
+
+static __inline__ void __clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ *p &= ~mask;
+}
+
+static __inline__ void __change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ *p ^= mask;
+}
+
+static __inline__ int __test_and_set_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int __test_and_change_bit(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/*
+ * Return the zero-based bit position (LE, not IBM bit numbering) of
+ * the most significant 1-bit in a double word.
+ */
+static __inline__ int __ilog2(unsigned long x)
+{
+ int lz;
+
+ asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x));
+ return BITS_PER_LONG - 1 - lz;
+}
+
+/*
+ * Determines the bit position of the least significant 0 bit in the
+ * specified double word. The returned bit position will be
+ * zero-based, starting from the right side (63/31 - 0).
+ */
+static __inline__ unsigned long ffz(unsigned long x)
+{
+ /* no zero exists anywhere in the 8 byte area. */
+ if ((x = ~x) == 0)
+ return BITS_PER_LONG;
+
+ /*
+ * Calculate the bit position of the least signficant '1' bit in x
+ * (since x has been changed this will actually be the least signficant
+ * '0' bit in * the original x). Note: (x & -x) gives us a mask that
+ * is the least significant * (RIGHT-most) 1-bit of the value in x.
+ */
+ return __ilog2(x & -x);
+}
+
+static __inline__ int __ffs(unsigned long x)
+{
+ return __ilog2(x & -x);
+}
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static __inline__ int ffs(int x)
+{
+ unsigned long i = (unsigned long)x;
+ return __ilog2(i & -i) + 1;
+}
+
+/*
+ * fls: find last (most-significant) bit set.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+static __inline__ int fls(unsigned int x)
+{
+ int lz;
+
+ asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
+ return 32 - lz;
+}
+
+/*
+ * hweightN: returns the hamming weight (i.e. the number
+ * of bits set) of a N-bit word
+ */
+#define hweight64(x) generic_hweight64(x)
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+unsigned long find_next_zero_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+unsigned long find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+/* Little-endian versions */
+
+static __inline__ int test_le_bit(unsigned long nr,
+ __const__ unsigned long *addr)
+{
+ __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
+ return (tmp[nr >> 3] >> (nr & 7)) & 1;
+}
+
+#define __set_le_bit(nr, addr) \
+ __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define __clear_le_bit(nr, addr) \
+ __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define test_and_set_le_bit(nr, addr) \
+ test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define test_and_clear_le_bit(nr, addr) \
+ test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define __test_and_set_le_bit(nr, addr) \
+ __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define __test_and_clear_le_bit(nr, addr) \
+ __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define find_first_zero_le_bit(addr, size) find_next_zero_le_bit((addr), (size), 0)
+unsigned long find_next_zero_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+/* Bitmap functions for the ext2 filesystem */
+
+#define ext2_set_bit(nr,addr) \
+ __test_and_set_le_bit((nr), (unsigned long*)addr)
+#define ext2_clear_bit(nr, addr) \
+ __test_and_clear_le_bit((nr), (unsigned long*)addr)
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_le_bit((nr), (unsigned long*)addr)
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_le_bit((nr), (unsigned long*)addr)
+
+#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
+
+#define ext2_find_first_zero_bit(addr, size) \
+ find_first_zero_le_bit((unsigned long*)addr, size)
+#define ext2_find_next_zero_bit(addr, size, off) \
+ find_next_zero_le_bit((unsigned long*)addr, size, off)
+
+/* Bitmap functions for the minix filesystem. */
+
+#define minix_test_and_set_bit(nr,addr) \
+ __test_and_set_le_bit(nr, (unsigned long *)addr)
+#define minix_set_bit(nr,addr) \
+ __set_le_bit(nr, (unsigned long *)addr)
+#define minix_test_and_clear_bit(nr,addr) \
+ __test_and_clear_le_bit(nr, (unsigned long *)addr)
+#define minix_test_bit(nr,addr) \
+ test_le_bit(nr, (unsigned long *)addr)
+
+#define minix_find_first_zero_bit(addr,size) \
+ find_first_zero_le_bit((unsigned long *)addr, size)
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 140-bit bitmap where the first 100 bits are
+ * unlikely to be set. It's guaranteed that at least one of the 140
+ * bits is cleared.
+ */
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+#ifdef CONFIG_PPC64
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 64;
+ return __ffs(b[2]) + 128;
+#else
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 32;
+ if (unlikely(b[2]))
+ return __ffs(b[2]) + 64;
+ if (b[3])
+ return __ffs(b[3]) + 96;
+ return __ffs(b[4]) + 128;
+#endif
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_BITOPS_H */
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
new file mode 100644
index 00000000000..d625ee55f95
--- /dev/null
+++ b/include/asm-powerpc/bug.h
@@ -0,0 +1,77 @@
+#ifndef _ASM_POWERPC_BUG_H
+#define _ASM_POWERPC_BUG_H
+
+/*
+ * Define an illegal instr to trap on the bug.
+ * We don't use 0 because that marks the end of a function
+ * in the ELF ABI. That's "Boo Boo" in case you wonder...
+ */
+#define BUG_OPCODE .long 0x00b00b00 /* For asm */
+#define BUG_ILLEGAL_INSTR "0x00b00b00" /* For BUG macro */
+
+#ifndef __ASSEMBLY__
+
+#ifdef __powerpc64__
+#define BUG_TABLE_ENTRY ".llong"
+#define BUG_TRAP_OP "tdnei"
+#else
+#define BUG_TABLE_ENTRY ".long"
+#define BUG_TRAP_OP "twnei"
+#endif /* __powerpc64__ */
+
+struct bug_entry {
+ unsigned long bug_addr;
+ long line;
+ const char *file;
+ const char *function;
+};
+
+struct bug_entry *find_bug(unsigned long bugaddr);
+
+/*
+ * If this bit is set in the line number it means that the trap
+ * is for WARN_ON rather than BUG or BUG_ON.
+ */
+#define BUG_WARNING_TRAP 0x1000000
+
+#ifdef CONFIG_BUG
+
+#define BUG() do { \
+ __asm__ __volatile__( \
+ "1: twi 31,0,0\n" \
+ ".section __bug_table,\"a\"\n" \
+ "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \
+ ".previous" \
+ : : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
+} while (0)
+
+#define BUG_ON(x) do { \
+ __asm__ __volatile__( \
+ "1: "BUG_TRAP_OP" %0,0\n" \
+ ".section __bug_table,\"a\"\n" \
+ "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
+ ".previous" \
+ : : "r" ((long)(x)), "i" (__LINE__), \
+ "i" (__FILE__), "i" (__FUNCTION__)); \
+} while (0)
+
+#define WARN_ON(x) do { \
+ __asm__ __volatile__( \
+ "1: "BUG_TRAP_OP" %0,0\n" \
+ ".section __bug_table,\"a\"\n" \
+ "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
+ ".previous" \
+ : : "r" ((long)(x)), \
+ "i" (__LINE__ + BUG_WARNING_TRAP), \
+ "i" (__FILE__), "i" (__FUNCTION__)); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_BUG_ON
+#define HAVE_ARCH_WARN_ON
+#endif /* CONFIG_BUG */
+#endif /* __ASSEMBLY __ */
+
+#include <asm-generic/bug.h>
+
+#endif /* _ASM_POWERPC_BUG_H */
diff --git a/include/asm-powerpc/byteorder.h b/include/asm-powerpc/byteorder.h
new file mode 100644
index 00000000000..b37752214a1
--- /dev/null
+++ b/include/asm-powerpc/byteorder.h
@@ -0,0 +1,89 @@
+#ifndef _ASM_POWERPC_BYTEORDER_H
+#define _ASM_POWERPC_BYTEORDER_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#ifdef __GNUC__
+#ifdef __KERNEL__
+
+static __inline__ __u16 ld_le16(const volatile __u16 *addr)
+{
+ __u16 val;
+
+ __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
+{
+ __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static __inline__ __u32 ld_le32(const volatile __u32 *addr)
+{
+ __u32 val;
+
+ __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+
+static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
+{
+ __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
+{
+ __u16 result;
+
+ __asm__("rlwimi %0,%1,8,16,23"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 8));
+ return result;
+}
+
+static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
+{
+ __u32 result;
+
+ __asm__("rlwimi %0,%1,24,16,23\n\t"
+ "rlwimi %0,%1,8,8,15\n\t"
+ "rlwimi %0,%1,24,0,7"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 24));
+ return result;
+}
+
+#define __arch__swab16(x) ___arch__swab16(x)
+#define __arch__swab32(x) ___arch__swab32(x)
+
+/* The same, but returns converted value from the location pointer by addr. */
+#define __arch__swab16p(addr) ld_le16(addr)
+#define __arch__swab32p(addr) ld_le32(addr)
+
+/* The same, but do the conversion in situ, ie. put the value back to addr. */
+#define __arch__swab16s(addr) st_le16(addr,*addr)
+#define __arch__swab32s(addr) st_le32(addr,*addr)
+
+#endif /* __KERNEL__ */
+
+#ifndef __STRICT_ANSI__
+#define __BYTEORDER_HAS_U64__
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+#endif /* __STRICT_ANSI__ */
+
+#endif /* __GNUC__ */
+
+#include <linux/byteorder/big_endian.h>
+
+#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/include/asm-powerpc/checksum.h b/include/asm-powerpc/checksum.h
new file mode 100644
index 00000000000..d8354d8a49c
--- /dev/null
+++ b/include/asm-powerpc/checksum.h
@@ -0,0 +1,132 @@
+#ifndef _ASM_POWERPC_CHECKSUM_H
+#define _ASM_POWERPC_CHECKSUM_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries. ihl is the number
+ * of 32-bit words and is always >= 5.
+ */
+extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern unsigned short csum_tcpudp_magic(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum);
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern unsigned int csum_partial(const unsigned char * buff, int len,
+ unsigned int sum);
+
+/*
+ * Computes the checksum of a memory block at src, length len,
+ * and adds in "sum" (32-bit), while copying the block to dst.
+ * If an access exception occurs on src or dst, it stores -EFAULT
+ * to *src_err or *dst_err respectively (if that pointer is not
+ * NULL), and, for an error on src, zeroes the rest of dst.
+ *
+ * Like csum_partial, this must be called with even lengths,
+ * except for the last fragment.
+ */
+extern unsigned int csum_partial_copy_generic(const char *src, char *dst,
+ int len, unsigned int sum,
+ int *src_err, int *dst_err);
+/*
+ * the same as csum_partial, but copies from src to dst while it
+ * checksums.
+ */
+unsigned int csum_partial_copy_nocheck(const char *src,
+ char *dst,
+ int len,
+ unsigned int sum);
+
+#define csum_partial_copy_from_user(src, dst, len, sum, errp) \
+ csum_partial_copy_generic((src), (dst), (len), (sum), (errp), NULL)
+
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
+
+
+/*
+ * turns a 32-bit partial checksum (e.g. from csum_partial) into a
+ * 1's complement 16-bit checksum.
+ */
+static inline unsigned int csum_fold(unsigned int sum)
+{
+ unsigned int tmp;
+
+ /* swap the two 16-bit halves of sum */
+ __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
+ /* if there is a carry from adding the two 16-bit halves,
+ it will carry from the lower half into the upper half,
+ giving us the correct sum in the upper half. */
+ sum = ~(sum + tmp) >> 16;
+ return sum;
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
+#ifdef __powerpc64__
+static inline u32 csum_tcpudp_nofold(u32 saddr,
+ u32 daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ unsigned long s = sum;
+
+ s += saddr;
+ s += daddr;
+ s += (proto << 16) + len;
+ s += (s >> 32);
+ return (u32) s;
+}
+#else
+static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ __asm__("\n\
+ addc %0,%0,%1 \n\
+ adde %0,%0,%2 \n\
+ adde %0,%0,%3 \n\
+ addze %0,%0 \n\
+ "
+ : "=r" (sum)
+ : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum));
+ return sum;
+}
+
+#endif
+#endif
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
new file mode 100644
index 00000000000..c019501dace
--- /dev/null
+++ b/include/asm-powerpc/cputable.h
@@ -0,0 +1,427 @@
+#ifndef __ASM_POWERPC_CPUTABLE_H
+#define __ASM_POWERPC_CPUTABLE_H
+
+#include <linux/config.h>
+#include <asm/ppc_asm.h> /* for ASM_CONST */
+
+#define PPC_FEATURE_32 0x80000000
+#define PPC_FEATURE_64 0x40000000
+#define PPC_FEATURE_601_INSTR 0x20000000
+#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
+#define PPC_FEATURE_HAS_FPU 0x08000000
+#define PPC_FEATURE_HAS_MMU 0x04000000
+#define PPC_FEATURE_HAS_4xxMAC 0x02000000
+#define PPC_FEATURE_UNIFIED_CACHE 0x01000000
+#define PPC_FEATURE_HAS_SPE 0x00800000
+#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000
+#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000
+#define PPC_FEATURE_NO_TB 0x00100000
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+/* This structure can grow, it's real size is used by head.S code
+ * via the mkdefs mechanism.
+ */
+struct cpu_spec;
+struct op_powerpc_model;
+
+typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
+
+struct cpu_spec {
+ /* CPU is matched via (PVR & pvr_mask) == pvr_value */
+ unsigned int pvr_mask;
+ unsigned int pvr_value;
+
+ char *cpu_name;
+ unsigned long cpu_features; /* Kernel features */
+ unsigned int cpu_user_features; /* Userland features */
+
+ /* cache line sizes */
+ unsigned int icache_bsize;
+ unsigned int dcache_bsize;
+
+ /* number of performance monitor counters */
+ unsigned int num_pmcs;
+
+ /* this is called to initialize various CPU bits like L1 cache,
+ * BHT, SPD, etc... from head.S before branching to identify_machine
+ */
+ cpu_setup_t cpu_setup;
+
+ /* Used by oprofile userspace to select the right counters */
+ char *oprofile_cpu_type;
+
+ /* Processor specific oprofile operations */
+ struct op_powerpc_model *oprofile_model;
+};
+
+extern struct cpu_spec *cur_cpu_spec;
+
+extern void identify_cpu(unsigned long offset, unsigned long cpu);
+extern void do_cpu_ftr_fixups(unsigned long offset);
+
+#endif /* __ASSEMBLY__ */
+
+/* CPU kernel features */
+
+/* Retain the 32b definitions all use bottom half of word */
+#define CPU_FTR_SPLIT_ID_CACHE ASM_CONST(0x0000000000000001)
+#define CPU_FTR_L2CR ASM_CONST(0x0000000000000002)
+#define CPU_FTR_SPEC7450 ASM_CONST(0x0000000000000004)
+#define CPU_FTR_ALTIVEC ASM_CONST(0x0000000000000008)
+#define CPU_FTR_TAU ASM_CONST(0x0000000000000010)
+#define CPU_FTR_CAN_DOZE ASM_CONST(0x0000000000000020)
+#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
+#define CPU_FTR_604_PERF_MON ASM_CONST(0x0000000000000080)
+#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
+#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
+#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
+#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
+#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
+#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
+#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
+#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
+#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000)
+#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
+#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
+#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
+
+#ifdef __powerpc64__
+/* Add the 64b processor unique features in the top half of the word */
+#define CPU_FTR_SLB ASM_CONST(0x0000000100000000)
+#define CPU_FTR_16M_PAGE ASM_CONST(0x0000000200000000)
+#define CPU_FTR_TLBIEL ASM_CONST(0x0000000400000000)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000800000000)
+#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000001000000000)
+#define CPU_FTR_IABR ASM_CONST(0x0000002000000000)
+#define CPU_FTR_MMCRA ASM_CONST(0x0000004000000000)
+#define CPU_FTR_CTRL ASM_CONST(0x0000008000000000)
+#define CPU_FTR_SMT ASM_CONST(0x0000010000000000)
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0000020000000000)
+#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0000040000000000)
+#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0000080000000000)
+#else
+/* ensure on 32b processors the flags are available for compiling but
+ * don't do anything */
+#define CPU_FTR_SLB ASM_CONST(0x0)
+#define CPU_FTR_16M_PAGE ASM_CONST(0x0)
+#define CPU_FTR_TLBIEL ASM_CONST(0x0)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x0)
+#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0)
+#define CPU_FTR_IABR ASM_CONST(0x0)
+#define CPU_FTR_MMCRA ASM_CONST(0x0)
+#define CPU_FTR_CTRL ASM_CONST(0x0)
+#define CPU_FTR_SMT ASM_CONST(0x0)
+#define CPU_FTR_COHERENT_ICACHE ASM_CONST(0x0)
+#define CPU_FTR_LOCKLESS_TLBIE ASM_CONST(0x0)
+#define CPU_FTR_MMCRA_SIHV ASM_CONST(0x0)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \
+ CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL)
+
+/* iSeries doesn't support large pages */
+#ifdef CONFIG_PPC_ISERIES
+#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE)
+#else
+#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_PPCAS_ARCH_V2_BASE | CPU_FTR_16M_PAGE)
+#endif /* CONFIG_PPC_ISERIES */
+
+/* We only set the altivec features if the kernel was compiled with altivec
+ * support
+ */
+#ifdef CONFIG_ALTIVEC
+#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
+#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
+#else
+#define CPU_FTR_ALTIVEC_COMP 0
+#define PPC_FEATURE_HAS_ALTIVEC_COMP 0
+#endif
+
+/* We need to mark all pages as being coherent if we're SMP or we
+ * have a 74[45]x and an MPC107 host bridge.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE)
+#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
+#else
+#define CPU_FTR_COMMON 0
+#endif
+
+/* The powersave features NAP & DOZE seems to confuse BDI when
+ debugging. So if a BDI is used, disable theses
+ */
+#ifndef CONFIG_BDI_SWITCH
+#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE
+#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP
+#else
+#define CPU_FTR_MAYBE_CAN_DOZE 0
+#define CPU_FTR_MAYBE_CAN_NAP 0
+#endif
+
+#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
+ !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
+ !defined(CONFIG_BOOKE))
+
+enum {
+ CPU_FTRS_PPC601 = CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE,
+ CPU_FTRS_603 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB |
+ CPU_FTR_MAYBE_CAN_NAP,
+ CPU_FTRS_604 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
+ CPU_FTRS_740_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+ CPU_FTRS_740 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+ CPU_FTRS_750 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
+ CPU_FTRS_750FX1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
+ CPU_FTRS_750FX2 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+ CPU_FTR_NO_DPM,
+ CPU_FTRS_750FX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTRS_750GX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
+ CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
+ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTRS_7400_NOTAU = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_MAYBE_CAN_NAP,
+ CPU_FTRS_7400 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR |
+ CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
+ CPU_FTR_MAYBE_CAN_NAP,
+ CPU_FTRS_7450_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+ CPU_FTR_NEED_COHERENT,
+ CPU_FTRS_7450_21 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB |
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
+ CPU_FTR_NEED_COHERENT,
+ CPU_FTRS_7450_23 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB |
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
+ CPU_FTRS_7455_1 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB |
+ CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS |
+ CPU_FTR_NEED_COHERENT,
+ CPU_FTRS_7455_20 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB |
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
+ CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTRS_7455 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB |
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+ CPU_FTR_NEED_COHERENT,
+ CPU_FTRS_7447_10 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB |
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+ CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
+ CPU_FTRS_7447 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB |
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+ CPU_FTR_NEED_COHERENT,
+ CPU_FTRS_7447A = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB |
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
+ CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
+ CPU_FTR_NEED_COHERENT,
+ CPU_FTRS_82XX = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB,
+ CPU_FTRS_G2_LE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTRS_E300 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE |
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
+ CPU_FTRS_CLASSIC32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+ CPU_FTRS_POWER3_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+ CPU_FTRS_POWER4_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE,
+ CPU_FTRS_970_32 = CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_ALTIVEC_COMP |
+ CPU_FTR_MAYBE_CAN_NAP,
+ CPU_FTRS_8XX = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+ CPU_FTRS_40X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+ CPU_FTRS_44X = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+ CPU_FTRS_E200 = CPU_FTR_USE_TB,
+ CPU_FTRS_E500 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB,
+ CPU_FTRS_E500_2 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_BIG_PHYS,
+ CPU_FTRS_GENERIC_32 = CPU_FTR_COMMON,
+#ifdef __powerpc64__
+ CPU_FTRS_POWER3 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
+ CPU_FTRS_RS64 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
+ CPU_FTR_MMCRA | CPU_FTR_CTRL,
+ CPU_FTRS_POWER4 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
+ CPU_FTRS_PPC970 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
+ CPU_FTRS_POWER5 = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
+ CPU_FTR_MMCRA | CPU_FTR_SMT |
+ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
+ CPU_FTR_MMCRA_SIHV,
+ CPU_FTRS_CELL = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 |
+ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT,
+ CPU_FTRS_COMPATIBLE = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
+ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2,
+#endif
+
+ CPU_FTRS_POSSIBLE =
+#if CLASSIC_PPC
+ CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
+ CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
+ CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
+ CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
+ CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
+ CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
+ CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
+ CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_CLASSIC32 |
+#else
+ CPU_FTRS_GENERIC_32 |
+#endif
+#ifdef CONFIG_PPC64BRIDGE
+ CPU_FTRS_POWER3_32 |
+#endif
+#ifdef CONFIG_POWER4
+ CPU_FTRS_POWER4_32 | CPU_FTRS_970_32 |
+#endif
+#ifdef CONFIG_8xx
+ CPU_FTRS_8XX |
+#endif
+#ifdef CONFIG_40x
+ CPU_FTRS_40X |
+#endif
+#ifdef CONFIG_44x
+ CPU_FTRS_44X |
+#endif
+#ifdef CONFIG_E200
+ CPU_FTRS_E200 |
+#endif
+#ifdef CONFIG_E500
+ CPU_FTRS_E500 | CPU_FTRS_E500_2 |
+#endif
+#ifdef __powerpc64__
+ CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 |
+ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_CELL |
+#endif
+ 0,
+
+ CPU_FTRS_ALWAYS =
+#if CLASSIC_PPC
+ CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
+ CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
+ CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
+ CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
+ CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
+ CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
+ CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
+ CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_CLASSIC32 &
+#else
+ CPU_FTRS_GENERIC_32 &
+#endif
+#ifdef CONFIG_PPC64BRIDGE
+ CPU_FTRS_POWER3_32 &
+#endif
+#ifdef CONFIG_POWER4
+ CPU_FTRS_POWER4_32 & CPU_FTRS_970_32 &
+#endif
+#ifdef CONFIG_8xx
+ CPU_FTRS_8XX &
+#endif
+#ifdef CONFIG_40x
+ CPU_FTRS_40X &
+#endif
+#ifdef CONFIG_44x
+ CPU_FTRS_44X &
+#endif
+#ifdef CONFIG_E200
+ CPU_FTRS_E200 &
+#endif
+#ifdef CONFIG_E500
+ CPU_FTRS_E500 & CPU_FTRS_E500_2 &
+#endif
+#ifdef __powerpc64__
+ CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 &
+ CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_CELL &
+#endif
+ CPU_FTRS_POSSIBLE,
+};
+
+static inline int cpu_has_feature(unsigned long feature)
+{
+ return (CPU_FTRS_ALWAYS & feature) ||
+ (CPU_FTRS_POSSIBLE
+ & cur_cpu_spec->cpu_features
+ & feature);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef __ASSEMBLY__
+
+#define BEGIN_FTR_SECTION 98:
+
+#ifndef __powerpc64__
+#define END_FTR_SECTION(msk, val) \
+99: \
+ .section __ftr_fixup,"a"; \
+ .align 2; \
+ .long msk; \
+ .long val; \
+ .long 98b; \
+ .long 99b; \
+ .previous
+#else /* __powerpc64__ */
+#define END_FTR_SECTION(msk, val) \
+99: \
+ .section __ftr_fixup,"a"; \
+ .align 3; \
+ .llong msk; \
+ .llong val; \
+ .llong 98b; \
+ .llong 99b; \
+ .previous
+#endif /* __powerpc64__ */
+
+#define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk))
+#define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_CPUTABLE_H */
diff --git a/include/asm-powerpc/dbdma.h b/include/asm-powerpc/dbdma.h
new file mode 100644
index 00000000000..8973565f95d
--- /dev/null
+++ b/include/asm-powerpc/dbdma.h
@@ -0,0 +1,102 @@
+/*
+ * Definitions for using the Apple Descriptor-Based DMA controller
+ * in Power Macintosh computers.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+
+#ifdef __KERNEL__
+#ifndef _ASM_DBDMA_H_
+#define _ASM_DBDMA_H_
+/*
+ * DBDMA control/status registers. All little-endian.
+ */
+struct dbdma_regs {
+ unsigned int control; /* lets you change bits in status */
+ unsigned int status; /* DMA and device status bits (see below) */
+ unsigned int cmdptr_hi; /* upper 32 bits of command address */
+ unsigned int cmdptr; /* (lower 32 bits of) command address (phys) */
+ unsigned int intr_sel; /* select interrupt condition bit */
+ unsigned int br_sel; /* select branch condition bit */
+ unsigned int wait_sel; /* select wait condition bit */
+ unsigned int xfer_mode;
+ unsigned int data2ptr_hi;
+ unsigned int data2ptr;
+ unsigned int res1;
+ unsigned int address_hi;
+ unsigned int br_addr_hi;
+ unsigned int res2[3];
+};
+
+/* Bits in control and status registers */
+#define RUN 0x8000
+#define PAUSE 0x4000
+#define FLUSH 0x2000
+#define WAKE 0x1000
+#define DEAD 0x0800
+#define ACTIVE 0x0400
+#define BT 0x0100
+#define DEVSTAT 0x00ff
+
+/*
+ * DBDMA command structure. These fields are all little-endian!
+ */
+struct dbdma_cmd {
+ unsigned short req_count; /* requested byte transfer count */
+ unsigned short command; /* command word (has bit-fields) */
+ unsigned int phy_addr; /* physical data address */
+ unsigned int cmd_dep; /* command-dependent field */
+ unsigned short res_count; /* residual count after completion */
+ unsigned short xfer_status; /* transfer status */
+};
+
+/* DBDMA command values in command field */
+#define OUTPUT_MORE 0 /* transfer memory data to stream */
+#define OUTPUT_LAST 0x1000 /* ditto followed by end marker */
+#define INPUT_MORE 0x2000 /* transfer stream data to memory */
+#define INPUT_LAST 0x3000 /* ditto, expect end marker */
+#define STORE_WORD 0x4000 /* write word (4 bytes) to device reg */
+#define LOAD_WORD 0x5000 /* read word (4 bytes) from device reg */
+#define DBDMA_NOP 0x6000 /* do nothing */
+#define DBDMA_STOP 0x7000 /* suspend processing */
+
+/* Key values in command field */
+#define KEY_STREAM0 0 /* usual data stream */
+#define KEY_STREAM1 0x100 /* control/status stream */
+#define KEY_STREAM2 0x200 /* device-dependent stream */
+#define KEY_STREAM3 0x300 /* device-dependent stream */
+#define KEY_REGS 0x500 /* device register space */
+#define KEY_SYSTEM 0x600 /* system memory-mapped space */
+#define KEY_DEVICE 0x700 /* device memory-mapped space */
+
+/* Interrupt control values in command field */
+#define INTR_NEVER 0 /* don't interrupt */
+#define INTR_IFSET 0x10 /* intr if condition bit is 1 */
+#define INTR_IFCLR 0x20 /* intr if condition bit is 0 */
+#define INTR_ALWAYS 0x30 /* always interrupt */
+
+/* Branch control values in command field */
+#define BR_NEVER 0 /* don't branch */
+#define BR_IFSET 0x4 /* branch if condition bit is 1 */
+#define BR_IFCLR 0x8 /* branch if condition bit is 0 */
+#define BR_ALWAYS 0xc /* always branch */
+
+/* Wait control values in command field */
+#define WAIT_NEVER 0 /* don't wait */
+#define WAIT_IFSET 1 /* wait if condition bit is 1 */
+#define WAIT_IFCLR 2 /* wait if condition bit is 0 */
+#define WAIT_ALWAYS 3 /* always wait */
+
+/* Align an address for a DBDMA command structure */
+#define DBDMA_ALIGN(x) (((unsigned long)(x) + sizeof(struct dbdma_cmd) - 1) \
+ & -sizeof(struct dbdma_cmd))
+
+/* Useful macros */
+#define DBDMA_DO_STOP(regs) do { \
+ out_le32(&((regs)->control), (RUN|FLUSH)<<16); \
+ while(in_le32(&((regs)->status)) & (ACTIVE|FLUSH)) \
+ ; \
+} while(0)
+
+#endif /* _ASM_DBDMA_H_ */
+#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/dma.h b/include/asm-powerpc/dma.h
new file mode 100644
index 00000000000..926378d2cd9
--- /dev/null
+++ b/include/asm-powerpc/dma.h
@@ -0,0 +1,390 @@
+#ifndef _ASM_POWERPC_DMA_H
+#define _ASM_POWERPC_DMA_H
+
+/*
+ * Defines for using and allocating dma channels.
+ * Written by Hennus Bergman, 1992.
+ * High DMA channel support & info by Hannu Savolainen
+ * and John Boyd, Nov. 1992.
+ * Changes for ppc sound by Christoph Nadig
+ */
+
+/*
+ * Note: Adapted for PowerPC by Gary Thomas
+ * Modified by Cort Dougan <cort@cs.nmt.edu>
+ *
+ * None of this really applies for Power Macintoshes. There is
+ * basically just enough here to get kernel/dma.c to compile.
+ *
+ * There may be some comments or restrictions made here which are
+ * not valid for the PReP platform. Take what you read
+ * with a grain of salt.
+ */
+
+#include <linux/config.h>
+#include <asm/io.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+
+#ifndef MAX_DMA_CHANNELS
+#define MAX_DMA_CHANNELS 8
+#endif
+
+/* The maximum address that we can perform a DMA transfer to on this platform */
+/* Doesn't really apply... */
+#define MAX_DMA_ADDRESS (~0UL)
+
+#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
+
+#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
+#define dma_outb outb_p
+#else
+#define dma_outb outb
+#endif
+
+#define dma_inb inb
+
+/*
+ * NOTES about DMA transfers:
+ *
+ * controller 1: channels 0-3, byte operations, ports 00-1F
+ * controller 2: channels 4-7, word operations, ports C0-DF
+ *
+ * - ALL registers are 8 bits only, regardless of transfer size
+ * - channel 4 is not used - cascades 1 into 2.
+ * - channels 0-3 are byte - addresses/counts are for physical bytes
+ * - channels 5-7 are word - addresses/counts are for physical words
+ * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
+ * - transfer count loaded to registers is 1 less than actual count
+ * - controller 2 offsets are all even (2x offsets for controller 1)
+ * - page registers for 5-7 don't use data bit 0, represent 128K pages
+ * - page registers for 0-3 use bit 0, represent 64K pages
+ *
+ * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
+ * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
+ * Note that addresses loaded into registers must be _physical_ addresses,
+ * not logical addresses (which may differ if paging is active).
+ *
+ * Address mapping for channels 0-3:
+ *
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Address mapping for channels 5-7:
+ *
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
+ *
+ * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
+ * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
+ * the hardware level, so odd-byte transfers aren't possible).
+ *
+ * Transfer count (_not # bytes_) is limited to 64K, represented as actual
+ * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
+ * and up to 128K bytes may be transferred on channels 5-7 in one operation.
+ *
+ */
+
+/* see prep_setup_arch() for detailed informations */
+#if defined(CONFIG_SOUND_CS4232) && defined(CONFIG_PPC_PREP)
+extern long ppc_cs4232_dma, ppc_cs4232_dma2;
+#define SND_DMA1 ppc_cs4232_dma
+#define SND_DMA2 ppc_cs4232_dma2
+#else
+#define SND_DMA1 -1
+#define SND_DMA2 -1
+#endif
+
+/* 8237 DMA controllers */
+#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
+#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
+
+/* DMA controller registers */
+#define DMA1_CMD_REG 0x08 /* command register (w) */
+#define DMA1_STAT_REG 0x08 /* status register (r) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
+#define DMA1_MODE_REG 0x0B /* mode register (w) */
+#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+
+#define DMA2_CMD_REG 0xD0 /* command register (w) */
+#define DMA2_STAT_REG 0xD0 /* status register (r) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
+#define DMA2_MODE_REG 0xD6 /* mode register (w) */
+#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_LO_PAGE_0 0x87 /* DMA page registers */
+#define DMA_LO_PAGE_1 0x83
+#define DMA_LO_PAGE_2 0x81
+#define DMA_LO_PAGE_3 0x82
+#define DMA_LO_PAGE_5 0x8B
+#define DMA_LO_PAGE_6 0x89
+#define DMA_LO_PAGE_7 0x8A
+
+#define DMA_HI_PAGE_0 0x487 /* DMA page registers */
+#define DMA_HI_PAGE_1 0x483
+#define DMA_HI_PAGE_2 0x481
+#define DMA_HI_PAGE_3 0x482
+#define DMA_HI_PAGE_5 0x48B
+#define DMA_HI_PAGE_6 0x489
+#define DMA_HI_PAGE_7 0x48A
+
+#define DMA1_EXT_REG 0x40B
+#define DMA2_EXT_REG 0x4D6
+
+#ifndef __powerpc64__
+ /* in arch/ppc/kernel/setup.c -- Cort */
+ extern unsigned int DMA_MODE_WRITE;
+ extern unsigned int DMA_MODE_READ;
+ extern unsigned long ISA_DMA_THRESHOLD;
+#else
+ #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
+ #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
+#endif
+
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+
+#define DMA_AUTOINIT 0x10
+
+extern spinlock_t dma_spin_lock;
+
+static __inline__ unsigned long claim_dma_lock(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dma_spin_lock, flags);
+ return flags;
+}
+
+static __inline__ void release_dma_lock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&dma_spin_lock, flags);
+}
+
+/* enable/disable a specific DMA channel */
+static __inline__ void enable_dma(unsigned int dmanr)
+{
+ unsigned char ucDmaCmd = 0x00;
+
+ if (dmanr != 4) {
+ dma_outb(0, DMA2_MASK_REG); /* This may not be enabled */
+ dma_outb(ucDmaCmd, DMA2_CMD_REG); /* Enable group */
+ }
+ if (dmanr <= 3) {
+ dma_outb(dmanr, DMA1_MASK_REG);
+ dma_outb(ucDmaCmd, DMA1_CMD_REG); /* Enable group */
+ } else {
+ dma_outb(dmanr & 3, DMA2_MASK_REG);
+ }
+}
+
+static __inline__ void disable_dma(unsigned int dmanr)
+{
+ if (dmanr <= 3)
+ dma_outb(dmanr | 4, DMA1_MASK_REG);
+ else
+ dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
+}
+
+/* Clear the 'DMA Pointer Flip Flop'.
+ * Write 0 for LSB/MSB, 1 for MSB/LSB access.
+ * Use this once to initialize the FF to a known state.
+ * After that, keep track of it. :-)
+ * --- In order to do that, the DMA routines below should ---
+ * --- only be used while interrupts are disabled! ---
+ */
+static __inline__ void clear_dma_ff(unsigned int dmanr)
+{
+ if (dmanr <= 3)
+ dma_outb(0, DMA1_CLEAR_FF_REG);
+ else
+ dma_outb(0, DMA2_CLEAR_FF_REG);
+}
+
+/* set mode (above) for a specific DMA channel */
+static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
+{
+ if (dmanr <= 3)
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
+ else
+ dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
+}
+
+/* Set only the page register bits of the transfer address.
+ * This is used for successive transfers when we know the contents of
+ * the lower 16 bits of the DMA current address register, but a 64k boundary
+ * may have been crossed.
+ */
+static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
+{
+ switch (dmanr) {
+ case 0:
+ dma_outb(pagenr, DMA_LO_PAGE_0);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_0);
+ break;
+ case 1:
+ dma_outb(pagenr, DMA_LO_PAGE_1);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_1);
+ break;
+ case 2:
+ dma_outb(pagenr, DMA_LO_PAGE_2);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_2);
+ break;
+ case 3:
+ dma_outb(pagenr, DMA_LO_PAGE_3);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_3);
+ break;
+ case 5:
+ if (SND_DMA1 == 5 || SND_DMA2 == 5)
+ dma_outb(pagenr, DMA_LO_PAGE_5);
+ else
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_5);
+ break;
+ case 6:
+ if (SND_DMA1 == 6 || SND_DMA2 == 6)
+ dma_outb(pagenr, DMA_LO_PAGE_6);
+ else
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_6);
+ break;
+ case 7:
+ if (SND_DMA1 == 7 || SND_DMA2 == 7)
+ dma_outb(pagenr, DMA_LO_PAGE_7);
+ else
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
+ dma_outb(pagenr >> 8, DMA_HI_PAGE_7);
+ break;
+ }
+}
+
+/* Set transfer address & page bits for specific DMA channel.
+ * Assumes dma flipflop is clear.
+ */
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
+{
+ if (dmanr <= 3) {
+ dma_outb(phys & 0xff,
+ ((dmanr & 3) << 1) + IO_DMA1_BASE);
+ dma_outb((phys >> 8) & 0xff,
+ ((dmanr & 3) << 1) + IO_DMA1_BASE);
+ } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
+ dma_outb(phys & 0xff,
+ ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((phys >> 8) & 0xff,
+ ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((dmanr & 3), DMA2_EXT_REG);
+ } else {
+ dma_outb((phys >> 1) & 0xff,
+ ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ dma_outb((phys >> 9) & 0xff,
+ ((dmanr & 3) << 2) + IO_DMA2_BASE);
+ }
+ set_dma_page(dmanr, phys >> 16);
+}
+
+
+/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
+ * a specific DMA channel.
+ * You must ensure the parameters are valid.
+ * NOTE: from a manual: "the number of transfers is one more
+ * than the initial word count"! This is taken into account.
+ * Assumes dma flip-flop is clear.
+ * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
+ */
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ count--;
+ if (dmanr <= 3) {
+ dma_outb(count & 0xff,
+ ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ dma_outb((count >> 8) & 0xff,
+ ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
+ } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
+ dma_outb(count & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ dma_outb((count >> 8) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ } else {
+ dma_outb((count >> 1) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ dma_outb((count >> 9) & 0xff,
+ ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
+ }
+}
+
+
+/* Get DMA residue count. After a DMA transfer, this
+ * should return zero. Reading this while a DMA transfer is
+ * still in progress will return unpredictable results.
+ * If called before the channel has been used, it may return 1.
+ * Otherwise, it returns the number of _bytes_ left to transfer.
+ *
+ * Assumes DMA flip-flop is clear.
+ */
+static __inline__ int get_dma_residue(unsigned int dmanr)
+{
+ unsigned int io_port = (dmanr <= 3)
+ ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
+ : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
+
+ /* using short to get 16-bit wrap around */
+ unsigned short count;
+
+ count = 1 + dma_inb(io_port);
+ count += dma_inb(io_port) << 8;
+
+ return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2)
+ ? count : (count << 1);
+}
+
+/* These are in kernel/dma.c: */
+
+/* reserve a DMA channel */
+extern int request_dma(unsigned int dmanr, const char *device_id);
+/* release it again */
+extern void free_dma(unsigned int dmanr);
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
+
+#endif /* _ASM_POWERPC_DMA_H */
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
new file mode 100644
index 00000000000..d140577d0a0
--- /dev/null
+++ b/include/asm-powerpc/elf.h
@@ -0,0 +1,415 @@
+#ifndef _ASM_POWERPC_ELF_H
+#define _ASM_POWERPC_ELF_H
+
+#include <asm/types.h>
+#include <asm/ptrace.h>
+#include <asm/cputable.h>
+#include <asm/auxvec.h>
+#include <asm/page.h>
+
+/* PowerPC relocations defined by the ABIs */
+#define R_PPC_NONE 0
+#define R_PPC_ADDR32 1 /* 32bit absolute address */
+#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
+#define R_PPC_ADDR16 3 /* 16bit absolute address */
+#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
+#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
+#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
+#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
+#define R_PPC_ADDR14_BRTAKEN 8
+#define R_PPC_ADDR14_BRNTAKEN 9
+#define R_PPC_REL24 10 /* PC relative 26 bit */
+#define R_PPC_REL14 11 /* PC relative 16 bit */
+#define R_PPC_REL14_BRTAKEN 12
+#define R_PPC_REL14_BRNTAKEN 13
+#define R_PPC_GOT16 14
+#define R_PPC_GOT16_LO 15
+#define R_PPC_GOT16_HI 16
+#define R_PPC_GOT16_HA 17
+#define R_PPC_PLTREL24 18
+#define R_PPC_COPY 19
+#define R_PPC_GLOB_DAT 20
+#define R_PPC_JMP_SLOT 21
+#define R_PPC_RELATIVE 22
+#define R_PPC_LOCAL24PC 23
+#define R_PPC_UADDR32 24
+#define R_PPC_UADDR16 25
+#define R_PPC_REL32 26
+#define R_PPC_PLT32 27
+#define R_PPC_PLTREL32 28
+#define R_PPC_PLT16_LO 29
+#define R_PPC_PLT16_HI 30
+#define R_PPC_PLT16_HA 31
+#define R_PPC_SDAREL16 32
+#define R_PPC_SECTOFF 33
+#define R_PPC_SECTOFF_LO 34
+#define R_PPC_SECTOFF_HI 35
+#define R_PPC_SECTOFF_HA 36
+
+/* PowerPC relocations defined for the TLS access ABI. */
+#define R_PPC_TLS 67 /* none (sym+add)@tls */
+#define R_PPC_DTPMOD32 68 /* word32 (sym+add)@dtpmod */
+#define R_PPC_TPREL16 69 /* half16* (sym+add)@tprel */
+#define R_PPC_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
+#define R_PPC_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
+#define R_PPC_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
+#define R_PPC_TPREL32 73 /* word32 (sym+add)@tprel */
+#define R_PPC_DTPREL16 74 /* half16* (sym+add)@dtprel */
+#define R_PPC_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
+#define R_PPC_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
+#define R_PPC_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
+#define R_PPC_DTPREL32 78 /* word32 (sym+add)@dtprel */
+#define R_PPC_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
+#define R_PPC_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
+#define R_PPC_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
+#define R_PPC_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
+#define R_PPC_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
+#define R_PPC_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
+#define R_PPC_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
+#define R_PPC_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
+#define R_PPC_GOT_TPREL16 87 /* half16* (sym+add)@got@tprel */
+#define R_PPC_GOT_TPREL16_LO 88 /* half16 (sym+add)@got@tprel@l */
+#define R_PPC_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
+#define R_PPC_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
+#define R_PPC_GOT_DTPREL16 91 /* half16* (sym+add)@got@dtprel */
+#define R_PPC_GOT_DTPREL16_LO 92 /* half16* (sym+add)@got@dtprel@l */
+#define R_PPC_GOT_DTPREL16_HI 93 /* half16* (sym+add)@got@dtprel@h */
+#define R_PPC_GOT_DTPREL16_HA 94 /* half16* (sym+add)@got@dtprel@ha */
+
+/* keep this the last entry. */
+#define R_PPC_NUM 95
+
+/*
+ * ELF register definitions..
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/ptrace.h>
+
+#define ELF_NGREG 48 /* includes nip, msr, lr, etc. */
+#define ELF_NFPREG 33 /* includes fpscr */
+
+typedef unsigned long elf_greg_t64;
+typedef elf_greg_t64 elf_gregset_t64[ELF_NGREG];
+
+typedef unsigned int elf_greg_t32;
+typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
+
+/*
+ * ELF_ARCH, CLASS, and DATA are used to set parameters in the core dumps.
+ */
+#ifdef __powerpc64__
+# define ELF_NVRREG32 33 /* includes vscr & vrsave stuffed together */
+# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
+# define ELF_GREG_TYPE elf_greg_t64
+#else
+# define ELF_NEVRREG 34 /* includes acc (as 2) */
+# define ELF_NVRREG 33 /* includes vscr */
+# define ELF_GREG_TYPE elf_greg_t32
+# define ELF_ARCH EM_PPC
+# define ELF_CLASS ELFCLASS32
+# define ELF_DATA ELFDATA2MSB
+#endif /* __powerpc64__ */
+
+#ifndef ELF_ARCH
+# define ELF_ARCH EM_PPC64
+# define ELF_CLASS ELFCLASS64
+# define ELF_DATA ELFDATA2MSB
+ typedef elf_greg_t64 elf_greg_t;
+ typedef elf_gregset_t64 elf_gregset_t;
+# define elf_addr_t unsigned long
+#else
+ /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
+ typedef elf_greg_t32 elf_greg_t;
+ typedef elf_gregset_t32 elf_gregset_t;
+# define elf_addr_t u32
+#endif /* ELF_ARCH */
+
+/* Floating point registers */
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+/* Altivec registers */
+/*
+ * The entries with indexes 0-31 contain the corresponding vector registers.
+ * The entry with index 32 contains the vscr as the last word (offset 12)
+ * within the quadword. This allows the vscr to be stored as either a
+ * quadword (since it must be copied via a vector register to/from storage)
+ * or as a word.
+ *
+ * 64-bit kernel notes: The entry at index 33 contains the vrsave as the first
+ * word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the same
+ * structures. This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ *
+ * Note that it's _not_ compatible with 32 bits ucontext which stuffs the
+ * vrsave along with vscr and so only uses 33 vectors for the register set
+ */
+typedef __vector128 elf_vrreg_t;
+typedef elf_vrreg_t elf_vrregset_t[ELF_NVRREG];
+#ifdef __powerpc64__
+typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
+#endif
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (0x08000000)
+
+#ifdef __KERNEL__
+
+/* Common routine for both 32-bit and 64-bit processes */
+static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
+ struct pt_regs *regs)
+{
+ int i, nregs;
+
+ memset((void *)elf_regs, 0, sizeof(elf_gregset_t));
+
+ /* Our registers are always unsigned longs, whether we're a 32 bit
+ * process or 64 bit, on either a 64 bit or 32 bit kernel.
+ * Don't use ELF_GREG_TYPE here. */
+ nregs = sizeof(struct pt_regs) / sizeof(unsigned long);
+ if (nregs > ELF_NGREG)
+ nregs = ELF_NGREG;
+
+ for (i = 0; i < nregs; i++) {
+ /* This will correctly truncate 64 bit registers to 32 bits
+ * for a 32 bit process on a 64 bit kernel. */
+ elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
+ }
+}
+#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
+
+static inline int dump_task_regs(struct task_struct *tsk,
+ elf_gregset_t *elf_regs)
+{
+ struct pt_regs *regs = tsk->thread.regs;
+ if (regs)
+ ppc_elf_core_copy_regs(*elf_regs, regs);
+
+ return 1;
+}
+#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
+
+extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
+#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
+
+#endif /* __KERNEL__ */
+
+/* ELF_HWCAP yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This could be done in userspace,
+ but it's not easy, and we've already done it here. */
+# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
+#ifdef __powerpc64__
+# define ELF_PLAT_INIT(_r, load_addr) do { \
+ _r->gpr[2] = load_addr; \
+} while (0)
+#endif /* __powerpc64__ */
+
+/* This yields a string that ld.so will use to load implementation
+ specific libraries for optimization. This is more specific in
+ intent than poking at uname or /proc/cpuinfo.
+
+ For the moment, we have only optimizations for the Intel generations,
+ but that could change... */
+
+#define ELF_PLATFORM (NULL)
+
+#ifdef __KERNEL__
+
+#ifdef __powerpc64__
+# define SET_PERSONALITY(ex, ibcs2) \
+do { \
+ unsigned long new_flags = 0; \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ new_flags = _TIF_32BIT; \
+ if ((current_thread_info()->flags & _TIF_32BIT) \
+ != new_flags) \
+ set_thread_flag(TIF_ABI_PENDING); \
+ else \
+ clear_thread_flag(TIF_ABI_PENDING); \
+ if (personality(current->personality) != PER_LINUX32) \
+ set_personality(PER_LINUX); \
+} while (0)
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically. This
+ * is only required to work around bugs in old 32bit toolchains. Since
+ * the 64bit ABI has never had these issues dont enable the workaround
+ * even if we have an executable stack.
+ */
+# define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \
+ (exec_stk != EXSTACK_DISABLE_X) : 0)
+#else
+# define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
+#endif /* __powerpc64__ */
+
+#endif /* __KERNEL__ */
+
+extern int dcache_bsize;
+extern int icache_bsize;
+extern int ucache_bsize;
+
+#ifdef __powerpc64__
+struct linux_binprm;
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES /* vDSO has arch_setup_additional_pages */
+extern int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack);
+#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
+#else
+#define VDSO_AUX_ENT(a,b)
+#endif /* __powerpc64__ */
+
+/*
+ * The requirements here are:
+ * - keep the final alignment of sp (sp & 0xf)
+ * - make sure the 32-bit value at the first 16 byte aligned position of
+ * AUXV is greater than 16 for glibc compatibility.
+ * AT_IGNOREPPC is used for that.
+ * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
+ * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
+ */
+#define ARCH_DLINFO \
+do { \
+ /* Handle glibc compatibility. */ \
+ NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
+ NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
+ /* Cache size items */ \
+ NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
+ NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
+ NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
+ VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->thread.vdso_base) \
+} while (0)
+
+/* PowerPC64 relocations defined by the ABIs */
+#define R_PPC64_NONE R_PPC_NONE
+#define R_PPC64_ADDR32 R_PPC_ADDR32 /* 32bit absolute address. */
+#define R_PPC64_ADDR24 R_PPC_ADDR24 /* 26bit address, word aligned. */
+#define R_PPC64_ADDR16 R_PPC_ADDR16 /* 16bit absolute address. */
+#define R_PPC64_ADDR16_LO R_PPC_ADDR16_LO /* lower 16bits of abs. address. */
+#define R_PPC64_ADDR16_HI R_PPC_ADDR16_HI /* high 16bits of abs. address. */
+#define R_PPC64_ADDR16_HA R_PPC_ADDR16_HA /* adjusted high 16bits. */
+#define R_PPC64_ADDR14 R_PPC_ADDR14 /* 16bit address, word aligned. */
+#define R_PPC64_ADDR14_BRTAKEN R_PPC_ADDR14_BRTAKEN
+#define R_PPC64_ADDR14_BRNTAKEN R_PPC_ADDR14_BRNTAKEN
+#define R_PPC64_REL24 R_PPC_REL24 /* PC relative 26 bit, word aligned. */
+#define R_PPC64_REL14 R_PPC_REL14 /* PC relative 16 bit. */
+#define R_PPC64_REL14_BRTAKEN R_PPC_REL14_BRTAKEN
+#define R_PPC64_REL14_BRNTAKEN R_PPC_REL14_BRNTAKEN
+#define R_PPC64_GOT16 R_PPC_GOT16
+#define R_PPC64_GOT16_LO R_PPC_GOT16_LO
+#define R_PPC64_GOT16_HI R_PPC_GOT16_HI
+#define R_PPC64_GOT16_HA R_PPC_GOT16_HA
+
+#define R_PPC64_COPY R_PPC_COPY
+#define R_PPC64_GLOB_DAT R_PPC_GLOB_DAT
+#define R_PPC64_JMP_SLOT R_PPC_JMP_SLOT
+#define R_PPC64_RELATIVE R_PPC_RELATIVE
+
+#define R_PPC64_UADDR32 R_PPC_UADDR32
+#define R_PPC64_UADDR16 R_PPC_UADDR16
+#define R_PPC64_REL32 R_PPC_REL32
+#define R_PPC64_PLT32 R_PPC_PLT32
+#define R_PPC64_PLTREL32 R_PPC_PLTREL32
+#define R_PPC64_PLT16_LO R_PPC_PLT16_LO
+#define R_PPC64_PLT16_HI R_PPC_PLT16_HI
+#define R_PPC64_PLT16_HA R_PPC_PLT16_HA
+
+#define R_PPC64_SECTOFF R_PPC_SECTOFF
+#define R_PPC64_SECTOFF_LO R_PPC_SECTOFF_LO
+#define R_PPC64_SECTOFF_HI R_PPC_SECTOFF_HI
+#define R_PPC64_SECTOFF_HA R_PPC_SECTOFF_HA
+#define R_PPC64_ADDR30 37 /* word30 (S + A - P) >> 2. */
+#define R_PPC64_ADDR64 38 /* doubleword64 S + A. */
+#define R_PPC64_ADDR16_HIGHER 39 /* half16 #higher(S + A). */
+#define R_PPC64_ADDR16_HIGHERA 40 /* half16 #highera(S + A). */
+#define R_PPC64_ADDR16_HIGHEST 41 /* half16 #highest(S + A). */
+#define R_PPC64_ADDR16_HIGHESTA 42 /* half16 #highesta(S + A). */
+#define R_PPC64_UADDR64 43 /* doubleword64 S + A. */
+#define R_PPC64_REL64 44 /* doubleword64 S + A - P. */
+#define R_PPC64_PLT64 45 /* doubleword64 L + A. */
+#define R_PPC64_PLTREL64 46 /* doubleword64 L + A - P. */
+#define R_PPC64_TOC16 47 /* half16* S + A - .TOC. */
+#define R_PPC64_TOC16_LO 48 /* half16 #lo(S + A - .TOC.). */
+#define R_PPC64_TOC16_HI 49 /* half16 #hi(S + A - .TOC.). */
+#define R_PPC64_TOC16_HA 50 /* half16 #ha(S + A - .TOC.). */
+#define R_PPC64_TOC 51 /* doubleword64 .TOC. */
+#define R_PPC64_PLTGOT16 52 /* half16* M + A. */
+#define R_PPC64_PLTGOT16_LO 53 /* half16 #lo(M + A). */
+#define R_PPC64_PLTGOT16_HI 54 /* half16 #hi(M + A). */
+#define R_PPC64_PLTGOT16_HA 55 /* half16 #ha(M + A). */
+
+#define R_PPC64_ADDR16_DS 56 /* half16ds* (S + A) >> 2. */
+#define R_PPC64_ADDR16_LO_DS 57 /* half16ds #lo(S + A) >> 2. */
+#define R_PPC64_GOT16_DS 58 /* half16ds* (G + A) >> 2. */
+#define R_PPC64_GOT16_LO_DS 59 /* half16ds #lo(G + A) >> 2. */
+#define R_PPC64_PLT16_LO_DS 60 /* half16ds #lo(L + A) >> 2. */
+#define R_PPC64_SECTOFF_DS 61 /* half16ds* (R + A) >> 2. */
+#define R_PPC64_SECTOFF_LO_DS 62 /* half16ds #lo(R + A) >> 2. */
+#define R_PPC64_TOC16_DS 63 /* half16ds* (S + A - .TOC.) >> 2. */
+#define R_PPC64_TOC16_LO_DS 64 /* half16ds #lo(S + A - .TOC.) >> 2. */
+#define R_PPC64_PLTGOT16_DS 65 /* half16ds* (M + A) >> 2. */
+#define R_PPC64_PLTGOT16_LO_DS 66 /* half16ds #lo(M + A) >> 2. */
+
+/* PowerPC64 relocations defined for the TLS access ABI. */
+#define R_PPC64_TLS 67 /* none (sym+add)@tls */
+#define R_PPC64_DTPMOD64 68 /* doubleword64 (sym+add)@dtpmod */
+#define R_PPC64_TPREL16 69 /* half16* (sym+add)@tprel */
+#define R_PPC64_TPREL16_LO 70 /* half16 (sym+add)@tprel@l */
+#define R_PPC64_TPREL16_HI 71 /* half16 (sym+add)@tprel@h */
+#define R_PPC64_TPREL16_HA 72 /* half16 (sym+add)@tprel@ha */
+#define R_PPC64_TPREL64 73 /* doubleword64 (sym+add)@tprel */
+#define R_PPC64_DTPREL16 74 /* half16* (sym+add)@dtprel */
+#define R_PPC64_DTPREL16_LO 75 /* half16 (sym+add)@dtprel@l */
+#define R_PPC64_DTPREL16_HI 76 /* half16 (sym+add)@dtprel@h */
+#define R_PPC64_DTPREL16_HA 77 /* half16 (sym+add)@dtprel@ha */
+#define R_PPC64_DTPREL64 78 /* doubleword64 (sym+add)@dtprel */
+#define R_PPC64_GOT_TLSGD16 79 /* half16* (sym+add)@got@tlsgd */
+#define R_PPC64_GOT_TLSGD16_LO 80 /* half16 (sym+add)@got@tlsgd@l */
+#define R_PPC64_GOT_TLSGD16_HI 81 /* half16 (sym+add)@got@tlsgd@h */
+#define R_PPC64_GOT_TLSGD16_HA 82 /* half16 (sym+add)@got@tlsgd@ha */
+#define R_PPC64_GOT_TLSLD16 83 /* half16* (sym+add)@got@tlsld */
+#define R_PPC64_GOT_TLSLD16_LO 84 /* half16 (sym+add)@got@tlsld@l */
+#define R_PPC64_GOT_TLSLD16_HI 85 /* half16 (sym+add)@got@tlsld@h */
+#define R_PPC64_GOT_TLSLD16_HA 86 /* half16 (sym+add)@got@tlsld@ha */
+#define R_PPC64_GOT_TPREL16_DS 87 /* half16ds* (sym+add)@got@tprel */
+#define R_PPC64_GOT_TPREL16_LO_DS 88 /* half16ds (sym+add)@got@tprel@l */
+#define R_PPC64_GOT_TPREL16_HI 89 /* half16 (sym+add)@got@tprel@h */
+#define R_PPC64_GOT_TPREL16_HA 90 /* half16 (sym+add)@got@tprel@ha */
+#define R_PPC64_GOT_DTPREL16_DS 91 /* half16ds* (sym+add)@got@dtprel */
+#define R_PPC64_GOT_DTPREL16_LO_DS 92 /* half16ds (sym+add)@got@dtprel@l */
+#define R_PPC64_GOT_DTPREL16_HI 93 /* half16 (sym+add)@got@dtprel@h */
+#define R_PPC64_GOT_DTPREL16_HA 94 /* half16 (sym+add)@got@dtprel@ha */
+#define R_PPC64_TPREL16_DS 95 /* half16ds* (sym+add)@tprel */
+#define R_PPC64_TPREL16_LO_DS 96 /* half16ds (sym+add)@tprel@l */
+#define R_PPC64_TPREL16_HIGHER 97 /* half16 (sym+add)@tprel@higher */
+#define R_PPC64_TPREL16_HIGHERA 98 /* half16 (sym+add)@tprel@highera */
+#define R_PPC64_TPREL16_HIGHEST 99 /* half16 (sym+add)@tprel@highest */
+#define R_PPC64_TPREL16_HIGHESTA 100 /* half16 (sym+add)@tprel@highesta */
+#define R_PPC64_DTPREL16_DS 101 /* half16ds* (sym+add)@dtprel */
+#define R_PPC64_DTPREL16_LO_DS 102 /* half16ds (sym+add)@dtprel@l */
+#define R_PPC64_DTPREL16_HIGHER 103 /* half16 (sym+add)@dtprel@higher */
+#define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */
+#define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */
+#define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */
+
+/* Keep this the last entry. */
+#define R_PPC64_NUM 107
+
+#endif /* _ASM_POWERPC_ELF_H */
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
new file mode 100644
index 00000000000..806c142ae9e
--- /dev/null
+++ b/include/asm-powerpc/firmware.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * Modifications for ppc64:
+ * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_POWERPC_FIRMWARE_H
+#define __ASM_POWERPC_FIRMWARE_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+/* firmware feature bitmask values */
+#define FIRMWARE_MAX_FEATURES 63
+
+#define FW_FEATURE_PFT (1UL<<0)
+#define FW_FEATURE_TCE (1UL<<1)
+#define FW_FEATURE_SPRG0 (1UL<<2)
+#define FW_FEATURE_DABR (1UL<<3)
+#define FW_FEATURE_COPY (1UL<<4)
+#define FW_FEATURE_ASR (1UL<<5)
+#define FW_FEATURE_DEBUG (1UL<<6)
+#define FW_FEATURE_TERM (1UL<<7)
+#define FW_FEATURE_PERF (1UL<<8)
+#define FW_FEATURE_DUMP (1UL<<9)
+#define FW_FEATURE_INTERRUPT (1UL<<10)
+#define FW_FEATURE_MIGRATE (1UL<<11)
+#define FW_FEATURE_PERFMON (1UL<<12)
+#define FW_FEATURE_CRQ (1UL<<13)
+#define FW_FEATURE_VIO (1UL<<14)
+#define FW_FEATURE_RDMA (1UL<<15)
+#define FW_FEATURE_LLAN (1UL<<16)
+#define FW_FEATURE_BULK (1UL<<17)
+#define FW_FEATURE_XDABR (1UL<<18)
+#define FW_FEATURE_MULTITCE (1UL<<19)
+#define FW_FEATURE_SPLPAR (1UL<<20)
+#define FW_FEATURE_ISERIES (1UL<<21)
+
+enum {
+ FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE |
+ FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY |
+ FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM |
+ FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT |
+ FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
+ FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
+ FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE |
+ FW_FEATURE_SPLPAR,
+ FW_FEATURE_PSERIES_ALWAYS = 0,
+ FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES,
+ FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES,
+ FW_FEATURE_POSSIBLE =
+#ifdef CONFIG_PPC_PSERIES
+ FW_FEATURE_PSERIES_POSSIBLE |
+#endif
+#ifdef CONFIG_PPC_ISERIES
+ FW_FEATURE_ISERIES_POSSIBLE |
+#endif
+ 0,
+ FW_FEATURE_ALWAYS =
+#ifdef CONFIG_PPC_PSERIES
+ FW_FEATURE_PSERIES_ALWAYS &
+#endif
+#ifdef CONFIG_PPC_ISERIES
+ FW_FEATURE_ISERIES_ALWAYS &
+#endif
+ FW_FEATURE_POSSIBLE,
+};
+
+/* This is used to identify firmware features which are available
+ * to the kernel.
+ */
+extern unsigned long ppc64_firmware_features;
+
+static inline unsigned long firmware_has_feature(unsigned long feature)
+{
+ return (FW_FEATURE_ALWAYS & feature) ||
+ (FW_FEATURE_POSSIBLE & ppc64_firmware_features & feature);
+}
+
+#ifdef CONFIG_PPC_PSERIES
+typedef struct {
+ unsigned long val;
+ char * name;
+} firmware_feature_t;
+
+extern firmware_feature_t firmware_features_table[];
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_FIRMWARE_H */
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
new file mode 100644
index 00000000000..37c94e52ab6
--- /dev/null
+++ b/include/asm-powerpc/futex.h
@@ -0,0 +1,84 @@
+#ifndef _ASM_POWERPC_FUTEX_H
+#define _ASM_POWERPC_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <asm/errno.h>
+#include <asm/synch.h>
+#include <asm/uaccess.h>
+#include <asm/ppc_asm.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile ( \
+ SYNC_ON_SMP \
+"1: lwarx %0,0,%2\n" \
+ insn \
+"2: stwcx. %1,0,%2\n" \
+ "bne- 1b\n" \
+ "li %1,0\n" \
+"3: .section .fixup,\"ax\"\n" \
+"4: li %1,%3\n" \
+ "b 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".align 3\n" \
+ DATAL " 1b,4b,2b,4b\n" \
+ ".previous" \
+ : "=&r" (oldval), "=&r" (ret) \
+ : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
+ : "cr0", "memory")
+
+static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ inc_preempt_count();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ dec_preempt_count();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_FUTEX_H */
diff --git a/include/asm-powerpc/grackle.h b/include/asm-powerpc/grackle.h
new file mode 100644
index 00000000000..563c7a5e64c
--- /dev/null
+++ b/include/asm-powerpc/grackle.h
@@ -0,0 +1,7 @@
+/*
+ * Functions for setting up and using a MPC106 northbridge
+ */
+
+#include <asm/pci-bridge.h>
+
+extern void setup_grackle(struct pci_controller *hose);
diff --git a/include/asm-powerpc/hardirq.h b/include/asm-powerpc/hardirq.h
new file mode 100644
index 00000000000..3b3e3b49ec1
--- /dev/null
+++ b/include/asm-powerpc/hardirq.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_POWERPC_HARDIRQ_H
+#define _ASM_POWERPC_HARDIRQ_H
+
+#include <asm/irq.h>
+#include <asm/bug.h>
+
+/* The __last_jiffy_stamp field is needed to ensure that no decrementer
+ * interrupt is lost on SMP machines. Since on most CPUs it is in the same
+ * cache line as local_irq_count, it is cheap to access and is also used on UP
+ * for uniformity.
+ */
+typedef struct {
+ unsigned int __softirq_pending; /* set_bit is used on this */
+ unsigned int __last_jiffy_stamp;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
+#define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
+
+static inline void ack_bad_irq(int irq)
+{
+ printk(KERN_CRIT "illegal vector %d received!\n", irq);
+ BUG();
+}
+
+#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/include/asm-powerpc/heathrow.h b/include/asm-powerpc/heathrow.h
new file mode 100644
index 00000000000..22ac179856b
--- /dev/null
+++ b/include/asm-powerpc/heathrow.h
@@ -0,0 +1,62 @@
+/*
+ * heathrow.h: definitions for using the "Heathrow" I/O controller chip.
+ *
+ * Grabbed from Open Firmware definitions on a PowerBook G3 Series
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ */
+
+/* Front light color on Yikes/B&W G3. 32 bits */
+#define HEATHROW_FRONT_LIGHT 0x32 /* (set to 0 or 0xffffffff) */
+
+/* Brightness/contrast (gossamer iMac ?). 8 bits */
+#define HEATHROW_BRIGHTNESS_CNTL 0x32
+#define HEATHROW_CONTRAST_CNTL 0x33
+
+/* offset from ohare base for feature control register */
+#define HEATHROW_MBCR 0x34 /* Media bay control */
+#define HEATHROW_FCR 0x38 /* Feature control */
+#define HEATHROW_AUX_CNTL_REG 0x3c /* Aux control */
+
+/*
+ * Bits in feature control register.
+ * Bits postfixed with a _N are in inverse logic
+ */
+#define HRW_SCC_TRANS_EN_N 0x00000001 /* Also controls modem power */
+#define HRW_BAY_POWER_N 0x00000002
+#define HRW_BAY_PCI_ENABLE 0x00000004
+#define HRW_BAY_IDE_ENABLE 0x00000008
+#define HRW_BAY_FLOPPY_ENABLE 0x00000010
+#define HRW_IDE0_ENABLE 0x00000020
+#define HRW_IDE0_RESET_N 0x00000040
+#define HRW_BAY_DEV_MASK 0x0000001c
+#define HRW_BAY_RESET_N 0x00000080
+#define HRW_IOBUS_ENABLE 0x00000100 /* Internal IDE ? */
+#define HRW_SCC_ENABLE 0x00000200
+#define HRW_MESH_ENABLE 0x00000400
+#define HRW_SWIM_ENABLE 0x00000800
+#define HRW_SOUND_POWER_N 0x00001000
+#define HRW_SOUND_CLK_ENABLE 0x00002000
+#define HRW_SCCA_IO 0x00004000
+#define HRW_SCCB_IO 0x00008000
+#define HRW_PORT_OR_DESK_VIA_N 0x00010000 /* This one is 0 on PowerBook */
+#define HRW_PWM_MON_ID_N 0x00020000 /* ??? (0) */
+#define HRW_HOOK_MB_CNT_N 0x00040000 /* ??? (0) */
+#define HRW_SWIM_CLONE_FLOPPY 0x00080000 /* ??? (0) */
+#define HRW_AUD_RUN22 0x00100000 /* ??? (1) */
+#define HRW_SCSI_LINK_MODE 0x00200000 /* Read ??? (1) */
+#define HRW_ARB_BYPASS 0x00400000 /* Disable internal PCI arbitrer */
+#define HRW_IDE1_RESET_N 0x00800000 /* Media bay */
+#define HRW_SLOW_SCC_PCLK 0x01000000 /* ??? (0) */
+#define HRW_RESET_SCC 0x02000000
+#define HRW_MFDC_CELL_ENABLE 0x04000000 /* ??? (0) */
+#define HRW_USE_MFDC 0x08000000 /* ??? (0) */
+#define HRW_BMAC_IO_ENABLE 0x60000000 /* two bits, not documented in OF */
+#define HRW_BMAC_RESET 0x80000000 /* not documented in OF */
+
+/* We OR those features at boot on desktop G3s */
+#define HRW_DEFAULTS (HRW_SCCA_IO | HRW_SCCB_IO | HRW_SCC_ENABLE)
+
+/* Looks like Heathrow has some sort of GPIOs as well... */
+#define HRW_GPIO_MODEM_RESET 0x6d
+
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h
new file mode 100644
index 00000000000..c37b31b9633
--- /dev/null
+++ b/include/asm-powerpc/hw_irq.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_HW_IRQ_H
+#define _ASM_POWERPC_HW_IRQ_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+extern void timer_interrupt(struct pt_regs *);
+extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
+
+#ifdef CONFIG_PPC_ISERIES
+
+extern unsigned long local_get_flags(void);
+extern unsigned long local_irq_disable(void);
+extern void local_irq_restore(unsigned long);
+
+#define local_irq_enable() local_irq_restore(1)
+#define local_save_flags(flags) ((flags) = local_get_flags())
+#define local_irq_save(flags) ((flags) = local_irq_disable())
+
+#define irqs_disabled() (local_get_flags() == 0)
+
+#else
+
+#if defined(CONFIG_BOOKE)
+#define SET_MSR_EE(x) mtmsr(x)
+#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
+#elif defined(__powerpc64__)
+#define SET_MSR_EE(x) __mtmsrd(x, 1)
+#define local_irq_restore(flags) do { \
+ __asm__ __volatile__("": : :"memory"); \
+ __mtmsrd((flags), 1); \
+} while(0)
+#else
+#define SET_MSR_EE(x) mtmsr(x)
+#define local_irq_restore(flags) mtmsr(flags)
+#endif
+
+static inline void local_irq_disable(void)
+{
+#ifdef CONFIG_BOOKE
+ __asm__ __volatile__("wrteei 0": : :"memory");
+#else
+ unsigned long msr;
+ __asm__ __volatile__("": : :"memory");
+ msr = mfmsr();
+ SET_MSR_EE(msr & ~MSR_EE);
+#endif
+}
+
+static inline void local_irq_enable(void)
+{
+#ifdef CONFIG_BOOKE
+ __asm__ __volatile__("wrteei 1": : :"memory");
+#else
+ unsigned long msr;
+ __asm__ __volatile__("": : :"memory");
+ msr = mfmsr();
+ SET_MSR_EE(msr | MSR_EE);
+#endif
+}
+
+static inline void local_irq_save_ptr(unsigned long *flags)
+{
+ unsigned long msr;
+ msr = mfmsr();
+ *flags = msr;
+#ifdef CONFIG_BOOKE
+ __asm__ __volatile__("wrteei 0": : :"memory");
+#else
+ SET_MSR_EE(msr & ~MSR_EE);
+#endif
+ __asm__ __volatile__("": : :"memory");
+}
+
+#define local_save_flags(flags) ((flags) = mfmsr())
+#define local_irq_save(flags) local_irq_save_ptr(&flags)
+#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
+
+#endif /* CONFIG_PPC_ISERIES */
+
+#define mask_irq(irq) \
+ ({ \
+ irq_desc_t *desc = get_irq_desc(irq); \
+ if (desc->handler && desc->handler->disable) \
+ desc->handler->disable(irq); \
+ })
+#define unmask_irq(irq) \
+ ({ \
+ irq_desc_t *desc = get_irq_desc(irq); \
+ if (desc->handler && desc->handler->enable) \
+ desc->handler->enable(irq); \
+ })
+#define ack_irq(irq) \
+ ({ \
+ irq_desc_t *desc = get_irq_desc(irq); \
+ if (desc->handler && desc->handler->ack) \
+ desc->handler->ack(irq); \
+ })
+
+/* Should we handle this via lost interrupts and IPIs or should we don't care like
+ * we do now ? --BenH.
+ */
+struct hw_interrupt_type;
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/include/asm-powerpc/i8259.h b/include/asm-powerpc/i8259.h
new file mode 100644
index 00000000000..fc4bfee124d
--- /dev/null
+++ b/include/asm-powerpc/i8259.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_POWERPC_I8259_H
+#define _ASM_POWERPC_I8259_H
+
+#include <linux/irq.h>
+
+extern struct hw_interrupt_type i8259_pic;
+
+extern void i8259_init(unsigned long intack_addr, int offset);
+extern int i8259_irq(struct pt_regs *regs);
+extern int i8259_irq_cascade(struct pt_regs *regs, void *unused);
+
+#endif /* _ASM_POWERPC_I8259_H */
diff --git a/include/asm-powerpc/ioctls.h b/include/asm-powerpc/ioctls.h
index 5b94ff489b8..279a6229584 100644
--- a/include/asm-powerpc/ioctls.h
+++ b/include/asm-powerpc/ioctls.h
@@ -62,6 +62,9 @@
# define TIOCM_DSR 0x100
# define TIOCM_CD TIOCM_CAR
# define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
new file mode 100644
index 00000000000..9d91bdd667a
--- /dev/null
+++ b/include/asm-powerpc/iommu.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
+ * Rewrite, cleanup:
+ * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_IOMMU_H
+#define _ASM_IOMMU_H
+
+#include <linux/config.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+
+/*
+ * IOMAP_MAX_ORDER defines the largest contiguous block
+ * of dma space we can get. IOMAP_MAX_ORDER = 13
+ * allows up to 2**12 pages (4096 * 4096) = 16 MB
+ */
+#define IOMAP_MAX_ORDER 13
+
+struct iommu_table {
+ unsigned long it_busno; /* Bus number this table belongs to */
+ unsigned long it_size; /* Size of iommu table in entries */
+ unsigned long it_offset; /* Offset into global table */
+ unsigned long it_base; /* mapped address of tce table */
+ unsigned long it_index; /* which iommu table this is */
+ unsigned long it_type; /* type: PCI or Virtual Bus */
+ unsigned long it_blocksize; /* Entries in each block (cacheline) */
+ unsigned long it_hint; /* Hint for next alloc */
+ unsigned long it_largehint; /* Hint for large allocs */
+ unsigned long it_halfpoint; /* Breaking point for small/large allocs */
+ spinlock_t it_lock; /* Protects it_map */
+ unsigned long *it_map; /* A simple allocation bitmap for now */
+};
+
+struct scatterlist;
+struct device_node;
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+
+/* Walks all buses and creates iommu tables */
+extern void iommu_setup_pSeries(void);
+extern void iommu_setup_u3(void);
+
+/* Frees table for an individual device node */
+extern void iommu_free_table(struct device_node *dn);
+
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+
+#ifdef CONFIG_PPC_PSERIES
+
+/* Creates table for an individual device node */
+extern void iommu_devnode_init_pSeries(struct device_node *dn);
+
+#endif /* CONFIG_PPC_PSERIES */
+
+#ifdef CONFIG_PPC_ISERIES
+
+/* Creates table for an individual device node */
+extern void iommu_devnode_init_iSeries(struct device_node *dn);
+
+#endif /* CONFIG_PPC_ISERIES */
+
+/* Initializes an iommu_table based in values set in the passed-in
+ * structure
+ */
+extern struct iommu_table *iommu_init_table(struct iommu_table * tbl);
+
+extern int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction);
+extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+ int nelems, enum dma_data_direction direction);
+
+extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
+ size_t size, enum dma_data_direction direction);
+extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction);
+
+extern void iommu_init_early_pSeries(void);
+extern void iommu_init_early_iSeries(void);
+extern void iommu_init_early_u3(void);
+
+#ifdef CONFIG_PCI
+extern void pci_iommu_init(void);
+extern void pci_direct_iommu_init(void);
+#else
+static inline void pci_iommu_init(void) { }
+#endif
+
+extern void alloc_u3_dart_table(void);
+
+#endif /* _ASM_IOMMU_H */
diff --git a/include/asm-powerpc/ipcbuf.h b/include/asm-powerpc/ipcbuf.h
new file mode 100644
index 00000000000..2c3e1d94db1
--- /dev/null
+++ b/include/asm-powerpc/ipcbuf.h
@@ -0,0 +1,34 @@
+#ifndef _ASM_POWERPC_IPCBUF_H
+#define _ASM_POWERPC_IPCBUF_H
+
+/*
+ * The ipc64_perm structure for the powerpc is identical to
+ * kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the
+ * kernel. Note extra padding because this structure is passed back
+ * and forth between kernel and user space. Pad space is left for:
+ * - 1 32-bit value to fill up for 8-byte alignment
+ * - 2 miscellaneous 64-bit values
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid_t uid;
+ __kernel_gid_t gid;
+ __kernel_uid_t cuid;
+ __kernel_gid_t cgid;
+ __kernel_mode_t mode;
+ unsigned int seq;
+ unsigned int __pad1;
+ unsigned long long __unused1;
+ unsigned long long __unused2;
+};
+
+#endif /* _ASM_POWERPC_IPCBUF_H */
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
new file mode 100644
index 00000000000..b3935ea28ff
--- /dev/null
+++ b/include/asm-powerpc/irq.h
@@ -0,0 +1,504 @@
+#ifdef __KERNEL__
+#ifndef _ASM_POWERPC_IRQ_H
+#define _ASM_POWERPC_IRQ_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/threads.h>
+
+#include <asm/types.h>
+#include <asm/atomic.h>
+
+/* this number is used when no interrupt has been assigned */
+#define NO_IRQ (-1)
+
+/*
+ * These constants are used for passing information about interrupt
+ * signal polarity and level/edge sensing to the low-level PIC chip
+ * drivers.
+ */
+#define IRQ_SENSE_MASK 0x1
+#define IRQ_SENSE_LEVEL 0x1 /* interrupt on active level */
+#define IRQ_SENSE_EDGE 0x0 /* interrupt triggered by edge */
+
+#define IRQ_POLARITY_MASK 0x2
+#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
+#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
+
+/*
+ * IRQ line status macro IRQ_PER_CPU is used
+ */
+#define ARCH_HAS_IRQ_PER_CPU
+
+#define get_irq_desc(irq) (&irq_desc[(irq)])
+
+/* Define a way to iterate across irqs. */
+#define for_each_irq(i) \
+ for ((i) = 0; (i) < NR_IRQS; ++(i))
+
+#ifdef CONFIG_PPC64
+
+/*
+ * Maximum number of interrupt sources that we can handle.
+ */
+#define NR_IRQS 512
+
+/* Interrupt numbers are virtual in case they are sparsely
+ * distributed by the hardware.
+ */
+extern unsigned int virt_irq_to_real_map[NR_IRQS];
+
+/* Create a mapping for a real_irq if it doesn't already exist.
+ * Return the virtual irq as a convenience.
+ */
+int virt_irq_create_mapping(unsigned int real_irq);
+void virt_irq_init(void);
+
+static inline unsigned int virt_irq_to_real(unsigned int virt_irq)
+{
+ return virt_irq_to_real_map[virt_irq];
+}
+
+extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
+
+/*
+ * List of interrupt controllers.
+ */
+#define IC_INVALID 0
+#define IC_OPEN_PIC 1
+#define IC_PPC_XIC 2
+#define IC_CELL_PIC 3
+#define IC_ISERIES 4
+
+extern u64 ppc64_interrupt_controller;
+
+#else /* 32-bit */
+
+#if defined(CONFIG_40x)
+#include <asm/ibm4xx.h>
+
+#ifndef NR_BOARD_IRQS
+#define NR_BOARD_IRQS 0
+#endif
+
+#ifndef UIC_WIDTH /* Number of interrupts per device */
+#define UIC_WIDTH 32
+#endif
+
+#ifndef NR_UICS /* number of UIC devices */
+#define NR_UICS 1
+#endif
+
+#if defined (CONFIG_403)
+/*
+ * The PowerPC 403 cores' Asynchronous Interrupt Controller (AIC) has
+ * 32 possible interrupts, a majority of which are not implemented on
+ * all cores. There are six configurable, external interrupt pins and
+ * there are eight internal interrupts for the on-chip serial port
+ * (SPU), DMA controller, and JTAG controller.
+ *
+ */
+
+#define NR_AIC_IRQS 32
+#define NR_IRQS (NR_AIC_IRQS + NR_BOARD_IRQS)
+
+#elif !defined (CONFIG_403)
+
+/*
+ * The PowerPC 405 cores' Universal Interrupt Controller (UIC) has 32
+ * possible interrupts as well. There are seven, configurable external
+ * interrupt pins and there are 17 internal interrupts for the on-chip
+ * serial port, DMA controller, on-chip Ethernet controller, PCI, etc.
+ *
+ */
+
+
+#define NR_UIC_IRQS UIC_WIDTH
+#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS)
+#endif
+
+#elif defined(CONFIG_44x)
+#include <asm/ibm44x.h>
+
+#define NR_UIC_IRQS 32
+#define NR_IRQS ((NR_UIC_IRQS * NR_UICS) + NR_BOARD_IRQS)
+
+#elif defined(CONFIG_8xx)
+
+/* Now include the board configuration specific associations.
+*/
+#include <asm/mpc8xx.h>
+
+/* The MPC8xx cores have 16 possible interrupts. There are eight
+ * possible level sensitive interrupts assigned and generated internally
+ * from such devices as CPM, PCMCIA, RTC, PIT, TimeBase and Decrementer.
+ * There are eight external interrupts (IRQs) that can be configured
+ * as either level or edge sensitive.
+ *
+ * On some implementations, there is also the possibility of an 8259
+ * through the PCI and PCI-ISA bridges.
+ *
+ * We are "flattening" the interrupt vectors of the cascaded CPM
+ * and 8259 interrupt controllers so that we can uniquely identify
+ * any interrupt source with a single integer.
+ */
+#define NR_SIU_INTS 16
+#define NR_CPM_INTS 32
+#ifndef NR_8259_INTS
+#define NR_8259_INTS 0
+#endif
+
+#define SIU_IRQ_OFFSET 0
+#define CPM_IRQ_OFFSET (SIU_IRQ_OFFSET + NR_SIU_INTS)
+#define I8259_IRQ_OFFSET (CPM_IRQ_OFFSET + NR_CPM_INTS)
+
+#define NR_IRQS (NR_SIU_INTS + NR_CPM_INTS + NR_8259_INTS)
+
+/* These values must be zero-based and map 1:1 with the SIU configuration.
+ * They are used throughout the 8xx I/O subsystem to generate
+ * interrupt masks, flags, and other control patterns. This is why the
+ * current kernel assumption of the 8259 as the base controller is such
+ * a pain in the butt.
+ */
+#define SIU_IRQ0 (0) /* Highest priority */
+#define SIU_LEVEL0 (1)
+#define SIU_IRQ1 (2)
+#define SIU_LEVEL1 (3)
+#define SIU_IRQ2 (4)
+#define SIU_LEVEL2 (5)
+#define SIU_IRQ3 (6)
+#define SIU_LEVEL3 (7)
+#define SIU_IRQ4 (8)
+#define SIU_LEVEL4 (9)
+#define SIU_IRQ5 (10)
+#define SIU_LEVEL5 (11)
+#define SIU_IRQ6 (12)
+#define SIU_LEVEL6 (13)
+#define SIU_IRQ7 (14)
+#define SIU_LEVEL7 (15)
+
+#define MPC8xx_INT_FEC1 SIU_LEVEL1
+#define MPC8xx_INT_FEC2 SIU_LEVEL3
+
+#define MPC8xx_INT_SCC1 (CPM_IRQ_OFFSET + CPMVEC_SCC1)
+#define MPC8xx_INT_SCC2 (CPM_IRQ_OFFSET + CPMVEC_SCC2)
+#define MPC8xx_INT_SCC3 (CPM_IRQ_OFFSET + CPMVEC_SCC3)
+#define MPC8xx_INT_SCC4 (CPM_IRQ_OFFSET + CPMVEC_SCC4)
+#define MPC8xx_INT_SMC1 (CPM_IRQ_OFFSET + CPMVEC_SMC1)
+#define MPC8xx_INT_SMC2 (CPM_IRQ_OFFSET + CPMVEC_SMC2)
+
+/* The internal interrupts we can configure as we see fit.
+ * My personal preference is CPM at level 2, which puts it above the
+ * MBX PCI/ISA/IDE interrupts.
+ */
+#ifndef PIT_INTERRUPT
+#define PIT_INTERRUPT SIU_LEVEL0
+#endif
+#ifndef CPM_INTERRUPT
+#define CPM_INTERRUPT SIU_LEVEL2
+#endif
+#ifndef PCMCIA_INTERRUPT
+#define PCMCIA_INTERRUPT SIU_LEVEL6
+#endif
+#ifndef DEC_INTERRUPT
+#define DEC_INTERRUPT SIU_LEVEL7
+#endif
+
+/* Some internal interrupt registers use an 8-bit mask for the interrupt
+ * level instead of a number.
+ */
+#define mk_int_int_mask(IL) (1 << (7 - (IL/2)))
+
+#elif defined(CONFIG_83xx)
+#include <asm/mpc83xx.h>
+
+#define NR_IRQS (NR_IPIC_INTS)
+
+#elif defined(CONFIG_85xx)
+/* Now include the board configuration specific associations.
+*/
+#include <asm/mpc85xx.h>
+
+/* The MPC8548 openpic has 48 internal interrupts and 12 external
+ * interrupts.
+ *
+ * We are "flattening" the interrupt vectors of the cascaded CPM
+ * so that we can uniquely identify any interrupt source with a
+ * single integer.
+ */
+#define NR_CPM_INTS 64
+#define NR_EPIC_INTS 60
+#ifndef NR_8259_INTS
+#define NR_8259_INTS 0
+#endif
+#define NUM_8259_INTERRUPTS NR_8259_INTS
+
+#ifndef CPM_IRQ_OFFSET
+#define CPM_IRQ_OFFSET 0
+#endif
+
+#define NR_IRQS (NR_EPIC_INTS + NR_CPM_INTS + NR_8259_INTS)
+
+/* Internal IRQs on MPC85xx OpenPIC */
+
+#ifndef MPC85xx_OPENPIC_IRQ_OFFSET
+#ifdef CONFIG_CPM2
+#define MPC85xx_OPENPIC_IRQ_OFFSET (CPM_IRQ_OFFSET + NR_CPM_INTS)
+#else
+#define MPC85xx_OPENPIC_IRQ_OFFSET 0
+#endif
+#endif
+
+/* Not all of these exist on all MPC85xx implementations */
+#define MPC85xx_IRQ_L2CACHE ( 0 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_ECM ( 1 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_DDR ( 2 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_LBIU ( 3 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_DMA0 ( 4 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_DMA1 ( 5 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_DMA2 ( 6 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_DMA3 ( 7 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_PCI1 ( 8 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_PCI2 ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_RIO_ERROR ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_RIO_BELL (10 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_RIO_TX (11 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_RIO_RX (12 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC1_TX (13 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC1_RX (14 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC3_TX (15 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC3_RX (16 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC3_ERROR (17 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC1_ERROR (18 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC2_TX (19 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC2_RX (20 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC4_TX (21 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC4_RX (22 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC4_ERROR (23 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_TSEC2_ERROR (24 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_FEC (25 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_DUART (26 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_IIC1 (27 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_PERFMON (28 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_SEC2 (29 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_CPM (30 + MPC85xx_OPENPIC_IRQ_OFFSET)
+
+/* The 12 external interrupt lines */
+#define MPC85xx_IRQ_EXT0 (48 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT1 (49 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT2 (50 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT3 (51 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT4 (52 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT5 (53 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT6 (54 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT7 (55 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT8 (56 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT9 (57 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT10 (58 + MPC85xx_OPENPIC_IRQ_OFFSET)
+#define MPC85xx_IRQ_EXT11 (59 + MPC85xx_OPENPIC_IRQ_OFFSET)
+
+/* CPM related interrupts */
+#define SIU_INT_ERROR ((uint)0x00+CPM_IRQ_OFFSET)
+#define SIU_INT_I2C ((uint)0x01+CPM_IRQ_OFFSET)
+#define SIU_INT_SPI ((uint)0x02+CPM_IRQ_OFFSET)
+#define SIU_INT_RISC ((uint)0x03+CPM_IRQ_OFFSET)
+#define SIU_INT_SMC1 ((uint)0x04+CPM_IRQ_OFFSET)
+#define SIU_INT_SMC2 ((uint)0x05+CPM_IRQ_OFFSET)
+#define SIU_INT_USB ((uint)0x0b+CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER1 ((uint)0x0c+CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER2 ((uint)0x0d+CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER3 ((uint)0x0e+CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER4 ((uint)0x0f+CPM_IRQ_OFFSET)
+#define SIU_INT_FCC1 ((uint)0x20+CPM_IRQ_OFFSET)
+#define SIU_INT_FCC2 ((uint)0x21+CPM_IRQ_OFFSET)
+#define SIU_INT_FCC3 ((uint)0x22+CPM_IRQ_OFFSET)
+#define SIU_INT_MCC1 ((uint)0x24+CPM_IRQ_OFFSET)
+#define SIU_INT_MCC2 ((uint)0x25+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC1 ((uint)0x28+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC2 ((uint)0x29+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC3 ((uint)0x2a+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC4 ((uint)0x2b+CPM_IRQ_OFFSET)
+#define SIU_INT_PC15 ((uint)0x30+CPM_IRQ_OFFSET)
+#define SIU_INT_PC14 ((uint)0x31+CPM_IRQ_OFFSET)
+#define SIU_INT_PC13 ((uint)0x32+CPM_IRQ_OFFSET)
+#define SIU_INT_PC12 ((uint)0x33+CPM_IRQ_OFFSET)
+#define SIU_INT_PC11 ((uint)0x34+CPM_IRQ_OFFSET)
+#define SIU_INT_PC10 ((uint)0x35+CPM_IRQ_OFFSET)
+#define SIU_INT_PC9 ((uint)0x36+CPM_IRQ_OFFSET)
+#define SIU_INT_PC8 ((uint)0x37+CPM_IRQ_OFFSET)
+#define SIU_INT_PC7 ((uint)0x38+CPM_IRQ_OFFSET)
+#define SIU_INT_PC6 ((uint)0x39+CPM_IRQ_OFFSET)
+#define SIU_INT_PC5 ((uint)0x3a+CPM_IRQ_OFFSET)
+#define SIU_INT_PC4 ((uint)0x3b+CPM_IRQ_OFFSET)
+#define SIU_INT_PC3 ((uint)0x3c+CPM_IRQ_OFFSET)
+#define SIU_INT_PC2 ((uint)0x3d+CPM_IRQ_OFFSET)
+#define SIU_INT_PC1 ((uint)0x3e+CPM_IRQ_OFFSET)
+#define SIU_INT_PC0 ((uint)0x3f+CPM_IRQ_OFFSET)
+
+#else /* CONFIG_40x + CONFIG_8xx */
+/*
+ * this is the # irq's for all ppc arch's (pmac/chrp/prep)
+ * so it is the max of them all
+ */
+#define NR_IRQS 256
+#define __DO_IRQ_CANON 1
+
+#ifndef CONFIG_8260
+
+#define NUM_8259_INTERRUPTS 16
+
+#else /* CONFIG_8260 */
+
+/* The 8260 has an internal interrupt controller with a maximum of
+ * 64 IRQs. We will use NR_IRQs from above since it is large enough.
+ * Don't be confused by the 8260 documentation where they list an
+ * "interrupt number" and "interrupt vector". We are only interested
+ * in the interrupt vector. There are "reserved" holes where the
+ * vector number increases, but the interrupt number in the table does not.
+ * (Document errata updates have fixed this...make sure you have up to
+ * date processor documentation -- Dan).
+ */
+
+#ifndef CPM_IRQ_OFFSET
+#define CPM_IRQ_OFFSET 0
+#endif
+
+#define NR_CPM_INTS 64
+
+#define SIU_INT_ERROR ((uint)0x00 + CPM_IRQ_OFFSET)
+#define SIU_INT_I2C ((uint)0x01 + CPM_IRQ_OFFSET)
+#define SIU_INT_SPI ((uint)0x02 + CPM_IRQ_OFFSET)
+#define SIU_INT_RISC ((uint)0x03 + CPM_IRQ_OFFSET)
+#define SIU_INT_SMC1 ((uint)0x04 + CPM_IRQ_OFFSET)
+#define SIU_INT_SMC2 ((uint)0x05 + CPM_IRQ_OFFSET)
+#define SIU_INT_IDMA1 ((uint)0x06 + CPM_IRQ_OFFSET)
+#define SIU_INT_IDMA2 ((uint)0x07 + CPM_IRQ_OFFSET)
+#define SIU_INT_IDMA3 ((uint)0x08 + CPM_IRQ_OFFSET)
+#define SIU_INT_IDMA4 ((uint)0x09 + CPM_IRQ_OFFSET)
+#define SIU_INT_SDMA ((uint)0x0a + CPM_IRQ_OFFSET)
+#define SIU_INT_USB ((uint)0x0b + CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER1 ((uint)0x0c + CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER2 ((uint)0x0d + CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER3 ((uint)0x0e + CPM_IRQ_OFFSET)
+#define SIU_INT_TIMER4 ((uint)0x0f + CPM_IRQ_OFFSET)
+#define SIU_INT_TMCNT ((uint)0x10 + CPM_IRQ_OFFSET)
+#define SIU_INT_PIT ((uint)0x11 + CPM_IRQ_OFFSET)
+#define SIU_INT_IRQ1 ((uint)0x13 + CPM_IRQ_OFFSET)
+#define SIU_INT_IRQ2 ((uint)0x14 + CPM_IRQ_OFFSET)
+#define SIU_INT_IRQ3 ((uint)0x15 + CPM_IRQ_OFFSET)
+#define SIU_INT_IRQ4 ((uint)0x16 + CPM_IRQ_OFFSET)
+#define SIU_INT_IRQ5 ((uint)0x17 + CPM_IRQ_OFFSET)
+#define SIU_INT_IRQ6 ((uint)0x18 + CPM_IRQ_OFFSET)
+#define SIU_INT_IRQ7 ((uint)0x19 + CPM_IRQ_OFFSET)
+#define SIU_INT_FCC1 ((uint)0x20 + CPM_IRQ_OFFSET)
+#define SIU_INT_FCC2 ((uint)0x21 + CPM_IRQ_OFFSET)
+#define SIU_INT_FCC3 ((uint)0x22 + CPM_IRQ_OFFSET)
+#define SIU_INT_MCC1 ((uint)0x24 + CPM_IRQ_OFFSET)
+#define SIU_INT_MCC2 ((uint)0x25 + CPM_IRQ_OFFSET)
+#define SIU_INT_SCC1 ((uint)0x28 + CPM_IRQ_OFFSET)
+#define SIU_INT_SCC2 ((uint)0x29 + CPM_IRQ_OFFSET)
+#define SIU_INT_SCC3 ((uint)0x2a + CPM_IRQ_OFFSET)
+#define SIU_INT_SCC4 ((uint)0x2b + CPM_IRQ_OFFSET)
+#define SIU_INT_PC15 ((uint)0x30 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC14 ((uint)0x31 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC13 ((uint)0x32 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC12 ((uint)0x33 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC11 ((uint)0x34 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC10 ((uint)0x35 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC9 ((uint)0x36 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC8 ((uint)0x37 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC7 ((uint)0x38 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC6 ((uint)0x39 + CPM_IRQ_OFFSET)
+#define SIU_INT_PC5 ((uint)0x3a + CPM_IRQ_OFFSET)
+#define SIU_INT_PC4 ((uint)0x3b + CPM_IRQ_OFFSET)
+#define SIU_INT_PC3 ((uint)0x3c + CPM_IRQ_OFFSET)
+#define SIU_INT_PC2 ((uint)0x3d + CPM_IRQ_OFFSET)
+#define SIU_INT_PC1 ((uint)0x3e + CPM_IRQ_OFFSET)
+#define SIU_INT_PC0 ((uint)0x3f + CPM_IRQ_OFFSET)
+
+#endif /* CONFIG_8260 */
+
+#endif
+
+#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
+/* pedantic: these are long because they are used with set_bit --RR */
+extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
+extern atomic_t ppc_n_lost_interrupts;
+
+#define virt_irq_create_mapping(x) (x)
+
+#endif
+
+/*
+ * Because many systems have two overlapping names spaces for
+ * interrupts (ISA and XICS for example), and the ISA interrupts
+ * have historically not been easy to renumber, we allow ISA
+ * interrupts to take values 0 - 15, and shift up the remaining
+ * interrupts by 0x10.
+ */
+#define NUM_ISA_INTERRUPTS 0x10
+extern int __irq_offset_value;
+
+static inline int irq_offset_up(int irq)
+{
+ return(irq + __irq_offset_value);
+}
+
+static inline int irq_offset_down(int irq)
+{
+ return(irq - __irq_offset_value);
+}
+
+static inline int irq_offset_value(void)
+{
+ return __irq_offset_value;
+}
+
+#ifdef __DO_IRQ_CANON
+extern int ppc_do_canonicalize_irqs;
+#else
+#define ppc_do_canonicalize_irqs 0
+#endif
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ if (ppc_do_canonicalize_irqs && irq == 2)
+ irq = 9;
+ return irq;
+}
+
+extern int distribute_irqs;
+
+struct irqaction;
+struct pt_regs;
+
+#ifdef CONFIG_IRQSTACKS
+/*
+ * Per-cpu stacks for handling hard and soft interrupts.
+ */
+extern struct thread_info *hardirq_ctx[NR_CPUS];
+extern struct thread_info *softirq_ctx[NR_CPUS];
+
+extern void irq_ctx_init(void);
+extern void call_do_softirq(struct thread_info *tp);
+extern int call_handle_IRQ_event(int irq, struct pt_regs *regs,
+ struct irqaction *action, struct thread_info *tp);
+
+#define __ARCH_HAS_DO_SOFTIRQ
+
+#else
+#define irq_ctx_init()
+
+#endif /* CONFIG_IRQSTACKS */
+
+extern void do_IRQ(struct pt_regs *regs);
+
+#endif /* _ASM_IRQ_H */
+#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/iseries/hv_call.h b/include/asm-powerpc/iseries/hv_call.h
new file mode 100644
index 00000000000..e9f831c9a5e
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_call.h
@@ -0,0 +1,113 @@
+/*
+ * HvCall.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * This file contains the "hypervisor call" interface which is used to
+ * drive the hypervisor from the OS.
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_H
+
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/paca.h>
+
+/* Type of yield for HvCallBaseYieldProcessor */
+#define HvCall_YieldTimed 0 /* Yield until specified time (tb) */
+#define HvCall_YieldToActive 1 /* Yield until all active procs have run */
+#define HvCall_YieldToProc 2 /* Yield until the specified processor has run */
+
+/* interrupt masks for setEnabledInterrupts */
+#define HvCall_MaskIPI 0x00000001
+#define HvCall_MaskLpEvent 0x00000002
+#define HvCall_MaskLpProd 0x00000004
+#define HvCall_MaskTimeout 0x00000008
+
+/* Log buffer formats */
+#define HvCall_LogBuffer_ASCII 0
+#define HvCall_LogBuffer_EBCDIC 1
+
+#define HvCallBaseAckDeferredInts HvCallBase + 0
+#define HvCallBaseCpmPowerOff HvCallBase + 1
+#define HvCallBaseGetHwPatch HvCallBase + 2
+#define HvCallBaseReIplSpAttn HvCallBase + 3
+#define HvCallBaseSetASR HvCallBase + 4
+#define HvCallBaseSetASRAndRfi HvCallBase + 5
+#define HvCallBaseSetIMR HvCallBase + 6
+#define HvCallBaseSendIPI HvCallBase + 7
+#define HvCallBaseTerminateMachine HvCallBase + 8
+#define HvCallBaseTerminateMachineSrc HvCallBase + 9
+#define HvCallBaseProcessPlicInterrupts HvCallBase + 10
+#define HvCallBaseIsPrimaryCpmOrMsdIpl HvCallBase + 11
+#define HvCallBaseSetVirtualSIT HvCallBase + 12
+#define HvCallBaseVaryOffThisProcessor HvCallBase + 13
+#define HvCallBaseVaryOffMemoryChunk HvCallBase + 14
+#define HvCallBaseVaryOffInteractivePercentage HvCallBase + 15
+#define HvCallBaseSendLpProd HvCallBase + 16
+#define HvCallBaseSetEnabledInterrupts HvCallBase + 17
+#define HvCallBaseYieldProcessor HvCallBase + 18
+#define HvCallBaseVaryOffSharedProcUnits HvCallBase + 19
+#define HvCallBaseSetVirtualDecr HvCallBase + 20
+#define HvCallBaseClearLogBuffer HvCallBase + 21
+#define HvCallBaseGetLogBufferCodePage HvCallBase + 22
+#define HvCallBaseGetLogBufferFormat HvCallBase + 23
+#define HvCallBaseGetLogBufferLength HvCallBase + 24
+#define HvCallBaseReadLogBuffer HvCallBase + 25
+#define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26
+#define HvCallBaseWriteLogBuffer HvCallBase + 27
+#define HvCallBaseRouter28 HvCallBase + 28
+#define HvCallBaseRouter29 HvCallBase + 29
+#define HvCallBaseRouter30 HvCallBase + 30
+#define HvCallBaseSetDebugBus HvCallBase + 31
+
+#define HvCallCcSetDABR HvCallCc + 7
+
+static inline void HvCall_setVirtualDecr(void)
+{
+ /*
+ * Ignore any error return codes - most likely means that the
+ * target value for the LP has been increased and this vary off
+ * would bring us below the new target.
+ */
+ HvCall0(HvCallBaseSetVirtualDecr);
+}
+
+static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm)
+{
+ HvCall2(HvCallBaseYieldProcessor, typeOfYield, yieldParm);
+}
+
+static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts)
+{
+ HvCall1(HvCallBaseSetEnabledInterrupts, enabledInterrupts);
+}
+
+static inline void HvCall_setLogBufferFormatAndCodepage(int format,
+ u32 codePage)
+{
+ HvCall2(HvCallBaseSetLogBufferFormatAndCodePage, format, codePage);
+}
+
+extern void HvCall_writeLogBuffer(const void *buffer, u64 bufLen);
+
+static inline void HvCall_sendIPI(struct paca_struct *targetPaca)
+{
+ HvCall1(HvCallBaseSendIPI, targetPaca->paca_index);
+}
+
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_H */
diff --git a/include/asm-powerpc/iseries/hv_call_event.h b/include/asm-powerpc/iseries/hv_call_event.h
new file mode 100644
index 00000000000..46763a30590
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_call_event.h
@@ -0,0 +1,253 @@
+/*
+ * HvCallEvent.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*
+ * This file contains the "hypervisor call" interface which is used to
+ * drive the hypervisor from the OS.
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
+
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/abs_addr.h>
+
+struct HvLpEvent;
+
+typedef u8 HvLpEvent_Type;
+typedef u8 HvLpEvent_AckInd;
+typedef u8 HvLpEvent_AckType;
+
+struct HvCallEvent_PackedParms {
+ u8 xAckType:1;
+ u8 xAckInd:1;
+ u8 xRsvd:1;
+ u8 xTargetLp:5;
+ u8 xType;
+ u16 xSubtype;
+ HvLpInstanceId xSourceInstId;
+ HvLpInstanceId xTargetInstId;
+};
+
+typedef u8 HvLpDma_Direction;
+typedef u8 HvLpDma_AddressType;
+
+struct HvCallEvent_PackedDmaParms {
+ u8 xDirection:1;
+ u8 xLocalAddrType:1;
+ u8 xRemoteAddrType:1;
+ u8 xRsvd1:5;
+ HvLpIndex xRemoteLp;
+ u8 xType;
+ u8 xRsvd2;
+ HvLpInstanceId xLocalInstId;
+ HvLpInstanceId xRemoteInstId;
+};
+
+typedef u64 HvLpEvent_Rc;
+typedef u64 HvLpDma_Rc;
+
+#define HvCallEventAckLpEvent HvCallEvent + 0
+#define HvCallEventCancelLpEvent HvCallEvent + 1
+#define HvCallEventCloseLpEventPath HvCallEvent + 2
+#define HvCallEventDmaBufList HvCallEvent + 3
+#define HvCallEventDmaSingle HvCallEvent + 4
+#define HvCallEventDmaToSp HvCallEvent + 5
+#define HvCallEventGetOverflowLpEvents HvCallEvent + 6
+#define HvCallEventGetSourceLpInstanceId HvCallEvent + 7
+#define HvCallEventGetTargetLpInstanceId HvCallEvent + 8
+#define HvCallEventOpenLpEventPath HvCallEvent + 9
+#define HvCallEventSetLpEventStack HvCallEvent + 10
+#define HvCallEventSignalLpEvent HvCallEvent + 11
+#define HvCallEventSignalLpEventParms HvCallEvent + 12
+#define HvCallEventSetInterLpQueueIndex HvCallEvent + 13
+#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14
+#define HvCallEventRouter15 HvCallEvent + 15
+
+static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
+{
+ HvCall1(HvCallEventGetOverflowLpEvents, queueIndex);
+}
+
+static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
+{
+ HvCall1(HvCallEventSetInterLpQueueIndex, queueIndex);
+}
+
+static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
+ char *eventStackAddr, u32 eventStackSize)
+{
+ u64 abs_addr;
+
+ abs_addr = virt_to_abs(eventStackAddr);
+ HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr,
+ eventStackSize);
+}
+
+static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
+ u16 lpLogicalProcIndex)
+{
+ HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex,
+ lpLogicalProcIndex);
+}
+
+static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
+{
+ u64 abs_addr;
+
+#ifdef DEBUG_SENDEVENT
+ printk("HvCallEvent_signalLpEvent: *event = %016lx\n ",
+ (unsigned long)event);
+#endif
+ abs_addr = virt_to_abs(event);
+ return HvCall1(HvCallEventSignalLpEvent, abs_addr);
+}
+
+static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
+ HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd,
+ HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId,
+ HvLpInstanceId targetInstanceId, u64 correlationToken,
+ u64 eventData1, u64 eventData2, u64 eventData3,
+ u64 eventData4, u64 eventData5)
+{
+ /* Pack the misc bits into a single Dword to pass to PLIC */
+ union {
+ struct HvCallEvent_PackedParms parms;
+ u64 dword;
+ } packed;
+ packed.parms.xAckType = ackType;
+ packed.parms.xAckInd = ackInd;
+ packed.parms.xRsvd = 0;
+ packed.parms.xTargetLp = targetLp;
+ packed.parms.xType = type;
+ packed.parms.xSubtype = subtype;
+ packed.parms.xSourceInstId = sourceInstanceId;
+ packed.parms.xTargetInstId = targetInstanceId;
+
+ return HvCall7(HvCallEventSignalLpEventParms, packed.dword,
+ correlationToken, eventData1, eventData2,
+ eventData3, eventData4, eventData5);
+}
+
+static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
+{
+ u64 abs_addr;
+
+ abs_addr = virt_to_abs(event);
+ return HvCall1(HvCallEventAckLpEvent, abs_addr);
+}
+
+static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
+{
+ u64 abs_addr;
+
+ abs_addr = virt_to_abs(event);
+ return HvCall1(HvCallEventCancelLpEvent, abs_addr);
+}
+
+static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
+ HvLpIndex targetLp, HvLpEvent_Type type)
+{
+ return HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type);
+}
+
+static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(
+ HvLpIndex targetLp, HvLpEvent_Type type)
+{
+ return HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type);
+}
+
+static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
+ HvLpEvent_Type type)
+{
+ HvCall2(HvCallEventOpenLpEventPath, targetLp, type);
+}
+
+static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
+ HvLpEvent_Type type)
+{
+ HvCall2(HvCallEventCloseLpEventPath, targetLp, type);
+}
+
+static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
+ HvLpIndex remoteLp, HvLpDma_Direction direction,
+ HvLpInstanceId localInstanceId,
+ HvLpInstanceId remoteInstanceId,
+ HvLpDma_AddressType localAddressType,
+ HvLpDma_AddressType remoteAddressType,
+ /* Do these need to be converted to absolute addresses? */
+ u64 localBufList, u64 remoteBufList, u32 transferLength)
+{
+ /* Pack the misc bits into a single Dword to pass to PLIC */
+ union {
+ struct HvCallEvent_PackedDmaParms parms;
+ u64 dword;
+ } packed;
+
+ packed.parms.xDirection = direction;
+ packed.parms.xLocalAddrType = localAddressType;
+ packed.parms.xRemoteAddrType = remoteAddressType;
+ packed.parms.xRsvd1 = 0;
+ packed.parms.xRemoteLp = remoteLp;
+ packed.parms.xType = type;
+ packed.parms.xRsvd2 = 0;
+ packed.parms.xLocalInstId = localInstanceId;
+ packed.parms.xRemoteInstId = remoteInstanceId;
+
+ return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList,
+ remoteBufList, transferLength);
+}
+
+static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
+ HvLpIndex remoteLp, HvLpDma_Direction direction,
+ HvLpInstanceId localInstanceId,
+ HvLpInstanceId remoteInstanceId,
+ HvLpDma_AddressType localAddressType,
+ HvLpDma_AddressType remoteAddressType,
+ u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength)
+{
+ /* Pack the misc bits into a single Dword to pass to PLIC */
+ union {
+ struct HvCallEvent_PackedDmaParms parms;
+ u64 dword;
+ } packed;
+
+ packed.parms.xDirection = direction;
+ packed.parms.xLocalAddrType = localAddressType;
+ packed.parms.xRemoteAddrType = remoteAddressType;
+ packed.parms.xRsvd1 = 0;
+ packed.parms.xRemoteLp = remoteLp;
+ packed.parms.xType = type;
+ packed.parms.xRsvd2 = 0;
+ packed.parms.xLocalInstId = localInstanceId;
+ packed.parms.xRemoteInstId = remoteInstanceId;
+
+ return (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, packed.dword,
+ localAddrOrTce, remoteAddrOrTce, transferLength);
+}
+
+static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
+ u32 length, HvLpDma_Direction dir)
+{
+ u64 abs_addr;
+
+ abs_addr = virt_to_abs(local);
+ return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir);
+}
+
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
diff --git a/include/asm-powerpc/iseries/hv_call_sc.h b/include/asm-powerpc/iseries/hv_call_sc.h
new file mode 100644
index 00000000000..dec7e9d9ab7
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_call_sc.h
@@ -0,0 +1,51 @@
+/*
+ * HvCallSc.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_SC_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_SC_H
+
+#include <linux/types.h>
+
+#define HvCallBase 0x8000000000000000ul
+#define HvCallCc 0x8001000000000000ul
+#define HvCallCfg 0x8002000000000000ul
+#define HvCallEvent 0x8003000000000000ul
+#define HvCallHpt 0x8004000000000000ul
+#define HvCallPci 0x8005000000000000ul
+#define HvCallSm 0x8007000000000000ul
+#define HvCallXm 0x8009000000000000ul
+
+extern u64 HvCall0(u64);
+extern u64 HvCall1(u64, u64);
+extern u64 HvCall2(u64, u64, u64);
+extern u64 HvCall3(u64, u64, u64, u64);
+extern u64 HvCall4(u64, u64, u64, u64, u64);
+extern u64 HvCall5(u64, u64, u64, u64, u64, u64);
+extern u64 HvCall6(u64, u64, u64, u64, u64, u64, u64);
+extern u64 HvCall7(u64, u64, u64, u64, u64, u64, u64, u64);
+
+extern u64 HvCall0Ret16(u64, void *);
+extern u64 HvCall1Ret16(u64, void *, u64);
+extern u64 HvCall2Ret16(u64, void *, u64, u64);
+extern u64 HvCall3Ret16(u64, void *, u64, u64, u64);
+extern u64 HvCall4Ret16(u64, void *, u64, u64, u64, u64);
+extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64);
+extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64);
+extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64);
+
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_SC_H */
diff --git a/include/asm-powerpc/iseries/hv_call_xm.h b/include/asm-powerpc/iseries/hv_call_xm.h
new file mode 100644
index 00000000000..ca9202cb01e
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_call_xm.h
@@ -0,0 +1,78 @@
+/*
+ * This file contains the "hypervisor call" interface which is used to
+ * drive the hypervisor from SLIC.
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_CALL_XM_H
+#define _ASM_POWERPC_ISERIES_HV_CALL_XM_H
+
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
+
+#define HvCallXmGetTceTableParms HvCallXm + 0
+#define HvCallXmTestBus HvCallXm + 1
+#define HvCallXmConnectBusUnit HvCallXm + 2
+#define HvCallXmLoadTod HvCallXm + 8
+#define HvCallXmTestBusUnit HvCallXm + 9
+#define HvCallXmSetTce HvCallXm + 11
+#define HvCallXmSetTces HvCallXm + 13
+
+/*
+ * Structure passed to HvCallXm_getTceTableParms
+ */
+struct iommu_table_cb {
+ unsigned long itc_busno; /* Bus number for this tce table */
+ unsigned long itc_start; /* Will be NULL for secondary */
+ unsigned long itc_totalsize; /* Size (in pages) of whole table */
+ unsigned long itc_offset; /* Index into real tce table of the
+ start of our section */
+ unsigned long itc_size; /* Size (in pages) of our section */
+ unsigned long itc_index; /* Index of this tce table */
+ unsigned short itc_maxtables; /* Max num of tables for partition */
+ unsigned char itc_virtbus; /* Flag to indicate virtual bus */
+ unsigned char itc_slotno; /* IOA Tce Slot Index */
+ unsigned char itc_rsvd[4];
+};
+
+static inline void HvCallXm_getTceTableParms(u64 cb)
+{
+ HvCall1(HvCallXmGetTceTableParms, cb);
+}
+
+static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce)
+{
+ return HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce);
+}
+
+static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset,
+ u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4)
+{
+ return HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces,
+ tce1, tce2, tce3, tce4);
+}
+
+static inline u64 HvCallXm_testBus(u16 busNumber)
+{
+ return HvCall1(HvCallXmTestBus, busNumber);
+}
+
+static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber,
+ u8 deviceId)
+{
+ return HvCall2(HvCallXmTestBusUnit, busNumber,
+ (subBusNumber << 8) | deviceId);
+}
+
+static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber,
+ u8 deviceId, u64 interruptToken)
+{
+ return HvCall5(HvCallXmConnectBusUnit, busNumber,
+ (subBusNumber << 8) | deviceId, interruptToken, 0,
+ 0 /* HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)) */);
+}
+
+static inline u64 HvCallXm_loadTod(void)
+{
+ return HvCall0(HvCallXmLoadTod);
+}
+
+#endif /* _ASM_POWERPC_ISERIES_HV_CALL_XM_H */
diff --git a/include/asm-powerpc/iseries/hv_lp_config.h b/include/asm-powerpc/iseries/hv_lp_config.h
new file mode 100644
index 00000000000..bc00f036bca
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_lp_config.h
@@ -0,0 +1,138 @@
+/*
+ * HvLpConfig.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
+#define _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
+
+/*
+ * This file contains the interface to the LPAR configuration data
+ * to determine which resources should be allocated to each partition.
+ */
+
+#include <asm/iseries/hv_call_sc.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/it_lp_naca.h>
+
+enum {
+ HvCallCfg_Cur = 0,
+ HvCallCfg_Init = 1,
+ HvCallCfg_Max = 2,
+ HvCallCfg_Min = 3
+};
+
+#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6
+#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7
+#define HvCallCfgGetMsChunks HvCallCfg + 9
+#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20
+#define HvCallCfgGetSharedProcUnits HvCallCfg + 21
+#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22
+#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30
+#define HvCallCfgGetHostingLpIndex HvCallCfg + 32
+
+extern HvLpIndex HvLpConfig_getLpIndex_outline(void);
+
+static inline HvLpIndex HvLpConfig_getLpIndex(void)
+{
+ return itLpNaca.xLpIndex;
+}
+
+static inline HvLpIndex HvLpConfig_getPrimaryLpIndex(void)
+{
+ return itLpNaca.xPrimaryLpIndex;
+}
+
+static inline u64 HvLpConfig_getMsChunks(void)
+{
+ return HvCall2(HvCallCfgGetMsChunks, HvLpConfig_getLpIndex(),
+ HvCallCfg_Cur);
+}
+
+static inline u64 HvLpConfig_getSystemPhysicalProcessors(void)
+{
+ return HvCall0(HvCallCfgGetSystemPhysicalProcessors);
+}
+
+static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI)
+{
+ return (u16)HvCall1(HvCallCfgGetNumProcsInSharedPool, sPI);
+}
+
+static inline u64 HvLpConfig_getPhysicalProcessors(void)
+{
+ return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
+ HvCallCfg_Cur);
+}
+
+static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void)
+{
+ return HvCall1(HvCallCfgGetSharedPoolIndex, HvLpConfig_getLpIndex());
+}
+
+static inline u64 HvLpConfig_getSharedProcUnits(void)
+{
+ return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
+ HvCallCfg_Cur);
+}
+
+static inline u64 HvLpConfig_getMaxSharedProcUnits(void)
+{
+ return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
+ HvCallCfg_Max);
+}
+
+static inline u64 HvLpConfig_getMaxPhysicalProcessors(void)
+{
+ return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
+ HvCallCfg_Max);
+}
+
+static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp(
+ HvLpIndex lp)
+{
+ /*
+ * This is a new function in V5R1 so calls to this on older
+ * hypervisors will return -1
+ */
+ u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp);
+ if (retVal == -1)
+ retVal = 0;
+ return retVal;
+}
+
+static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void)
+{
+ return HvLpConfig_getVirtualLanIndexMapForLp(
+ HvLpConfig_getLpIndex_outline());
+}
+
+static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1,
+ HvLpIndex lp2)
+{
+ HvLpVirtualLanIndexMap virtualLanIndexMap1 =
+ HvLpConfig_getVirtualLanIndexMapForLp(lp1);
+ HvLpVirtualLanIndexMap virtualLanIndexMap2 =
+ HvLpConfig_getVirtualLanIndexMapForLp(lp2);
+ return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0);
+}
+
+static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
+{
+ return HvCall1(HvCallCfgGetHostingLpIndex, lp);
+}
+
+#endif /* _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H */
diff --git a/include/asm-powerpc/iseries/hv_lp_event.h b/include/asm-powerpc/iseries/hv_lp_event.h
new file mode 100644
index 00000000000..499ab1ad018
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_lp_event.h
@@ -0,0 +1,142 @@
+/*
+ * HvLpEvent.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* This file contains the class for HV events in the system. */
+
+#ifndef _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
+#define _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
+
+#include <asm/types.h>
+#include <asm/ptrace.h>
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_call_event.h>
+
+/*
+ * HvLpEvent is the structure for Lp Event messages passed between
+ * partitions through PLIC.
+ */
+
+struct HvEventFlags {
+ u8 xValid:1; /* Indicates a valid request x00-x00 */
+ u8 xRsvd1:4; /* Reserved ... */
+ u8 xAckType:1; /* Immediate or deferred ... */
+ u8 xAckInd:1; /* Indicates if ACK required ... */
+ u8 xFunction:1; /* Interrupt or Acknowledge ... */
+};
+
+
+struct HvLpEvent {
+ struct HvEventFlags xFlags; /* Event flags x00-x00 */
+ u8 xType; /* Type of message x01-x01 */
+ u16 xSubtype; /* Subtype for event x02-x03 */
+ u8 xSourceLp; /* Source LP x04-x04 */
+ u8 xTargetLp; /* Target LP x05-x05 */
+ u8 xSizeMinus1; /* Size of Derived class - 1 x06-x06 */
+ u8 xRc; /* RC for Ack flows x07-x07 */
+ u16 xSourceInstanceId; /* Source sides instance id x08-x09 */
+ u16 xTargetInstanceId; /* Target sides instance id x0A-x0B */
+ union {
+ u32 xSubtypeData; /* Data usable by the subtype x0C-x0F */
+ u16 xSubtypeDataShort[2]; /* Data as 2 shorts */
+ u8 xSubtypeDataChar[4]; /* Data as 4 chars */
+ } x;
+
+ u64 xCorrelationToken; /* Unique value for source/type x10-x17 */
+};
+
+typedef void (*LpEventHandler)(struct HvLpEvent *, struct pt_regs *);
+
+/* Register a handler for an event type - returns 0 on success */
+extern int HvLpEvent_registerHandler(HvLpEvent_Type eventType,
+ LpEventHandler hdlr);
+
+/*
+ * Unregister a handler for an event type
+ *
+ * This call will sleep until the handler being removed is guaranteed to
+ * be no longer executing on any CPU. Do not call with locks held.
+ *
+ * returns 0 on success
+ * Unregister will fail if there are any paths open for the type
+ */
+extern int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType);
+
+/*
+ * Open an Lp Event Path for an event type
+ * returns 0 on success
+ * openPath will fail if there is no handler registered for the event type.
+ * The lpIndex specified is the partition index for the target partition
+ * (for VirtualIo, VirtualLan and SessionMgr) other types specify zero)
+ */
+extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
+
+/*
+ * Close an Lp Event Path for a type and partition
+ * returns 0 on sucess
+ */
+extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
+
+#define HvLpEvent_Type_Hypervisor 0
+#define HvLpEvent_Type_MachineFac 1
+#define HvLpEvent_Type_SessionMgr 2
+#define HvLpEvent_Type_SpdIo 3
+#define HvLpEvent_Type_VirtualBus 4
+#define HvLpEvent_Type_PciIo 5
+#define HvLpEvent_Type_RioIo 6
+#define HvLpEvent_Type_VirtualLan 7
+#define HvLpEvent_Type_VirtualIo 8
+#define HvLpEvent_Type_NumTypes 9
+
+#define HvLpEvent_Rc_Good 0
+#define HvLpEvent_Rc_BufferNotAvailable 1
+#define HvLpEvent_Rc_Cancelled 2
+#define HvLpEvent_Rc_GenericError 3
+#define HvLpEvent_Rc_InvalidAddress 4
+#define HvLpEvent_Rc_InvalidPartition 5
+#define HvLpEvent_Rc_InvalidSize 6
+#define HvLpEvent_Rc_InvalidSubtype 7
+#define HvLpEvent_Rc_InvalidSubtypeData 8
+#define HvLpEvent_Rc_InvalidType 9
+#define HvLpEvent_Rc_PartitionDead 10
+#define HvLpEvent_Rc_PathClosed 11
+#define HvLpEvent_Rc_SubtypeError 12
+
+#define HvLpEvent_Function_Ack 0
+#define HvLpEvent_Function_Int 1
+
+#define HvLpEvent_AckInd_NoAck 0
+#define HvLpEvent_AckInd_DoAck 1
+
+#define HvLpEvent_AckType_ImmediateAck 0
+#define HvLpEvent_AckType_DeferredAck 1
+
+#define HvLpDma_Direction_LocalToRemote 0
+#define HvLpDma_Direction_RemoteToLocal 1
+
+#define HvLpDma_AddressType_TceIndex 0
+#define HvLpDma_AddressType_RealAddress 1
+
+#define HvLpDma_Rc_Good 0
+#define HvLpDma_Rc_Error 1
+#define HvLpDma_Rc_PartitionDead 2
+#define HvLpDma_Rc_PathClosed 3
+#define HvLpDma_Rc_InvalidAddress 4
+#define HvLpDma_Rc_InvalidLength 5
+
+#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
diff --git a/include/asm-powerpc/iseries/hv_types.h b/include/asm-powerpc/iseries/hv_types.h
new file mode 100644
index 00000000000..c38f7e3d01d
--- /dev/null
+++ b/include/asm-powerpc/iseries/hv_types.h
@@ -0,0 +1,113 @@
+/*
+ * HvTypes.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_HV_TYPES_H
+#define _ASM_POWERPC_ISERIES_HV_TYPES_H
+
+/*
+ * General typedefs for the hypervisor.
+ */
+
+#include <asm/types.h>
+
+typedef u8 HvLpIndex;
+typedef u16 HvLpInstanceId;
+typedef u64 HvLpTOD;
+typedef u64 HvLpSystemSerialNum;
+typedef u8 HvLpDeviceSerialNum[12];
+typedef u16 HvLpSanHwSet;
+typedef u16 HvLpBus;
+typedef u16 HvLpBoard;
+typedef u16 HvLpCard;
+typedef u8 HvLpDeviceType[4];
+typedef u8 HvLpDeviceModel[3];
+typedef u64 HvIoToken;
+typedef u8 HvLpName[8];
+typedef u32 HvIoId;
+typedef u64 HvRealMemoryIndex;
+typedef u32 HvLpIndexMap; /* Must hold HVMAXARCHITECTEDLPS bits!!! */
+typedef u16 HvLpVrmIndex;
+typedef u32 HvXmGenerationId;
+typedef u8 HvLpBusPool;
+typedef u8 HvLpSharedPoolIndex;
+typedef u16 HvLpSharedProcUnitsX100;
+typedef u8 HvLpVirtualLanIndex;
+typedef u16 HvLpVirtualLanIndexMap; /* Must hold HVMAXARCHITECTEDVIRTUALLANS bits!!! */
+typedef u16 HvBusNumber; /* Hypervisor Bus Number */
+typedef u8 HvSubBusNumber; /* Hypervisor SubBus Number */
+typedef u8 HvAgentId; /* Hypervisor DevFn */
+
+
+#define HVMAXARCHITECTEDLPS 32
+#define HVMAXARCHITECTEDVIRTUALLANS 16
+#define HVMAXARCHITECTEDVIRTUALDISKS 32
+#define HVMAXARCHITECTEDVIRTUALCDROMS 8
+#define HVMAXARCHITECTEDVIRTUALTAPES 8
+#define HVCHUNKSIZE (256 * 1024)
+#define HVPAGESIZE (4 * 1024)
+#define HVLPMINMEGSPRIMARY 256
+#define HVLPMINMEGSSECONDARY 64
+#define HVCHUNKSPERMEG 4
+#define HVPAGESPERMEG 256
+#define HVPAGESPERCHUNK 64
+
+#define HvLpIndexInvalid ((HvLpIndex)0xff)
+
+/*
+ * Enums for the sub-components under PLIC
+ * Used in HvCall and HvPrimaryCall
+ */
+enum {
+ HvCallCompId = 0,
+ HvCallCpuCtlsCompId = 1,
+ HvCallCfgCompId = 2,
+ HvCallEventCompId = 3,
+ HvCallHptCompId = 4,
+ HvCallPciCompId = 5,
+ HvCallSlmCompId = 6,
+ HvCallSmCompId = 7,
+ HvCallSpdCompId = 8,
+ HvCallXmCompId = 9,
+ HvCallRioCompId = 10,
+ HvCallRsvd3CompId = 11,
+ HvCallRsvd2CompId = 12,
+ HvCallRsvd1CompId = 13,
+ HvCallMaxCompId = 14,
+ HvPrimaryCallCompId = 0,
+ HvPrimaryCallCfgCompId = 1,
+ HvPrimaryCallPciCompId = 2,
+ HvPrimaryCallSmCompId = 3,
+ HvPrimaryCallSpdCompId = 4,
+ HvPrimaryCallXmCompId = 5,
+ HvPrimaryCallRioCompId = 6,
+ HvPrimaryCallRsvd7CompId = 7,
+ HvPrimaryCallRsvd6CompId = 8,
+ HvPrimaryCallRsvd5CompId = 9,
+ HvPrimaryCallRsvd4CompId = 10,
+ HvPrimaryCallRsvd3CompId = 11,
+ HvPrimaryCallRsvd2CompId = 12,
+ HvPrimaryCallRsvd1CompId = 13,
+ HvPrimaryCallMaxCompId = HvCallMaxCompId
+};
+
+struct HvLpBufferList {
+ u64 addr;
+ u64 len;
+};
+
+#endif /* _ASM_POWERPC_ISERIES_HV_TYPES_H */
diff --git a/include/asm-powerpc/iseries/iseries_io.h b/include/asm-powerpc/iseries/iseries_io.h
new file mode 100644
index 00000000000..56b2113ff0f
--- /dev/null
+++ b/include/asm-powerpc/iseries/iseries_io.h
@@ -0,0 +1,49 @@
+#ifndef _ASM_POWERPC_ISERIES_ISERIES_IO_H
+#define _ASM_POWERPC_ISERIES_ISERIES_IO_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_PPC_ISERIES
+#include <linux/types.h>
+/*
+ * File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000.
+ *
+ * Remaps the io.h for the iSeries Io
+ * Copyright (C) 2000 Allan H Trautman, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the:
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330,
+ * Boston, MA 02111-1307 USA
+ *
+ * Change Activity:
+ * Created December 28, 2000
+ * End Change Activity
+ */
+
+extern u8 iSeries_Read_Byte(const volatile void __iomem * IoAddress);
+extern u16 iSeries_Read_Word(const volatile void __iomem * IoAddress);
+extern u32 iSeries_Read_Long(const volatile void __iomem * IoAddress);
+extern void iSeries_Write_Byte(u8 IoData, volatile void __iomem * IoAddress);
+extern void iSeries_Write_Word(u16 IoData, volatile void __iomem * IoAddress);
+extern void iSeries_Write_Long(u32 IoData, volatile void __iomem * IoAddress);
+
+extern void iSeries_memset_io(volatile void __iomem *dest, char x, size_t n);
+extern void iSeries_memcpy_toio(volatile void __iomem *dest, void *source,
+ size_t n);
+extern void iSeries_memcpy_fromio(void *dest,
+ const volatile void __iomem *source, size_t n);
+
+#endif /* CONFIG_PPC_ISERIES */
+#endif /* _ASM_POWERPC_ISERIES_ISERIES_IO_H */
diff --git a/include/asm-powerpc/iseries/it_exp_vpd_panel.h b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
new file mode 100644
index 00000000000..66a17a230c5
--- /dev/null
+++ b/include/asm-powerpc/iseries/it_exp_vpd_panel.h
@@ -0,0 +1,52 @@
+/*
+ * ItExtVpdPanel.h
+ * Copyright (C) 2002 Dave Boutcher IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H
+#define _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H
+
+/*
+ * This struct maps the panel information
+ *
+ * Warning:
+ * This data must match the architecture for the panel information
+ */
+
+#include <asm/types.h>
+
+struct ItExtVpdPanel {
+ /* Definition of the Extended Vpd On Panel Data Area */
+ char systemSerial[8];
+ char mfgID[4];
+ char reserved1[24];
+ char machineType[4];
+ char systemID[6];
+ char somUniqueCnt[4];
+ char serialNumberCount;
+ char reserved2[7];
+ u16 bbu3;
+ u16 bbu2;
+ u16 bbu1;
+ char xLocationLabel[8];
+ u8 xRsvd1[6];
+ u16 xFrameId;
+ u8 xRsvd2[48];
+};
+
+extern struct ItExtVpdPanel xItExtVpdPanel;
+
+#endif /* _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H */
diff --git a/include/asm-powerpc/iseries/it_lp_naca.h b/include/asm-powerpc/iseries/it_lp_naca.h
new file mode 100644
index 00000000000..c3ef1de45d8
--- /dev/null
+++ b/include/asm-powerpc/iseries/it_lp_naca.h
@@ -0,0 +1,80 @@
+/*
+ * ItLpNaca.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_IT_LP_NACA_H
+#define _ASM_POWERPC_ISERIES_IT_LP_NACA_H
+
+#include <linux/types.h>
+
+/*
+ * This control block contains the data that is shared between the
+ * hypervisor (PLIC) and the OS.
+ */
+
+struct ItLpNaca {
+// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
+ u32 xDesc; // Eye catcher x00-x03
+ u16 xSize; // Size of this class x04-x05
+ u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07
+ u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08
+ u8 xPrimaryLpIndex; // LP Index of Primary x09-x09
+ u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A
+ u8 xLpIndex; // LP Index x0B-x0B
+ u16 xMaxLpQueues; // Number of allocated queues x0C-x0D
+ u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F
+ u8 xPirEnvironMode:8; // Piranha or hardware x10-x10
+ u8 xPirConsoleMode:8; // Piranha console indicator x11-x11
+ u8 xPirDasdMode:8; // Piranha dasd indicator x12-x12
+ u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17
+ u8 xLparInstalled:1; // Is LPAR installed on system x18-x1F
+ u8 xSysPartitioned:1; // Is the system partitioned ...
+ u8 xHwSyncedTBs:1; // Hardware synced TBs ...
+ u8 xIntProcUtilHmt:1; // Utilize HMT for interrupts ...
+ u8 xRsvd1_1:4; // Reserved ...
+ u8 xSpVpdFormat:8; // VPD areas are in CSP format ...
+ u8 xIntProcRatio:8; // Ratio of int procs to procs ...
+ u8 xRsvd1_2[5]; // Reserved ...
+ u16 xRsvd1_3; // Reserved x20-x21
+ u16 xPlicVrmIndex; // VRM index of PLIC x22-x23
+ u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25
+ u16 xMinCompatableSlicVrmInd;// Min compatible OS VRM index x26-x27
+ u64 xLoadAreaAddr; // ER address of load area x28-x2F
+ u32 xLoadAreaChunks; // Chunks for the load area x30-x33
+ u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37
+ // doing an ASR switch on PASE
+ // system call.
+ u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f
+ u8 xRsvd1_4[64]; // x40-x7F
+
+// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
+ u8 xRsvd2_0[128]; // Reserved x00-x7F
+
+// CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators
+// NB: Padding required to keep xInterrruptHdlr at x300 which is required
+// for v4r4 PLIC.
+ u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F
+ u8 xRsvd3_0[384]; // Reserved 180-2FF
+
+// CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt
+// handlers
+ u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF
+};
+
+extern struct ItLpNaca itLpNaca;
+
+#endif /* _ASM_POWERPC_ISERIES_IT_LP_NACA_H */
diff --git a/include/asm-powerpc/iseries/it_lp_queue.h b/include/asm-powerpc/iseries/it_lp_queue.h
new file mode 100644
index 00000000000..a60d03afbf9
--- /dev/null
+++ b/include/asm-powerpc/iseries/it_lp_queue.h
@@ -0,0 +1,81 @@
+/*
+ * ItLpQueue.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
+#define _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
+
+/*
+ * This control block defines the simple LP queue structure that is
+ * shared between the hypervisor (PLIC) and the OS in order to send
+ * events to an LP.
+ */
+
+#include <asm/types.h>
+#include <asm/ptrace.h>
+
+struct HvLpEvent;
+
+#define ITMaxLpQueues 8
+
+#define NotUsed 0 // Queue will not be used by PLIC
+#define DedicatedIo 1 // Queue dedicated to IO processor specified
+#define DedicatedLp 2 // Queue dedicated to LP specified
+#define Shared 3 // Queue shared for both IO and LP
+
+#define LpEventStackSize 4096
+#define LpEventMaxSize 256
+#define LpEventAlign 64
+
+struct hvlpevent_queue {
+/*
+ * The xSlicCurEventPtr is the pointer to the next event stack entry
+ * that will become valid. The OS must peek at this entry to determine
+ * if it is valid. PLIC will set the valid indicator as the very last
+ * store into that entry.
+ *
+ * When the OS has completed processing of the event then it will mark
+ * the event as invalid so that PLIC knows it can store into that event
+ * location again.
+ *
+ * If the event stack fills and there are overflow events, then PLIC
+ * will set the xPlicOverflowIntPending flag in which case the OS will
+ * have to fetch the additional LP events once they have drained the
+ * event stack.
+ *
+ * The first 16-bytes are known by both the OS and PLIC. The remainder
+ * of the cache line is for use by the OS.
+ */
+ u8 xPlicOverflowIntPending;// 0x00 Overflow events are pending
+ u8 xPlicStatus; // 0x01 DedicatedIo or DedicatedLp or NotUsed
+ u16 xSlicLogicalProcIndex; // 0x02 Logical Proc Index for correlation
+ u8 xPlicRsvd[12]; // 0x04
+ char *xSlicCurEventPtr; // 0x10
+ char *xSlicLastValidEventPtr; // 0x18
+ char *xSlicEventStackPtr; // 0x20
+ u8 xIndex; // 0x28 unique sequential index.
+ u8 xSlicRsvd[3]; // 0x29-2b
+ spinlock_t lock;
+};
+
+extern struct hvlpevent_queue hvlpevent_queue;
+
+extern int hvlpevent_is_pending(void);
+extern void process_hvlpevents(struct pt_regs *);
+extern void setup_hvlpevent_queue(void);
+
+#endif /* _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H */
diff --git a/include/asm-powerpc/iseries/it_lp_reg_save.h b/include/asm-powerpc/iseries/it_lp_reg_save.h
new file mode 100644
index 00000000000..288044b702d
--- /dev/null
+++ b/include/asm-powerpc/iseries/it_lp_reg_save.h
@@ -0,0 +1,84 @@
+/*
+ * ItLpRegSave.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H
+#define _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H
+
+/*
+ * This control block contains the data that is shared between PLIC
+ * and the OS
+ */
+
+struct ItLpRegSave {
+ u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003
+ u16 xSize; // Size of this class 004-005
+ u8 xInUse; // Area is live 006-007
+ u8 xRsvd1[9]; // Reserved 007-00F
+
+ u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F
+ u32 xCTRL; // Control Register 170-173
+ u32 xDEC; // Decrementer 174-177
+ u32 xFPSCR; // FP Status and Control Reg 178-17B
+ u32 xPVR; // Processor Version Number 17C-17F
+
+ u64 xMMCR0; // Monitor Mode Control Reg 0 180-187
+ u32 xPMC1; // Perf Monitor Counter 1 188-18B
+ u32 xPMC2; // Perf Monitor Counter 2 18C-18F
+ u32 xPMC3; // Perf Monitor Counter 3 190-193
+ u32 xPMC4; // Perf Monitor Counter 4 194-197
+ u32 xPIR; // Processor ID Reg 198-19B
+
+ u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F
+ u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3
+ u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7
+ u32 xPMC6; // Perf Monitor Counter 6 1A8-1AB
+ u32 xPMC7; // Perf Monitor Counter 7 1AC-1AF
+ u32 xPMC8; // Perf Monitor Counter 8 1B0-1B3
+ u32 xTSC; // Thread Switch Control 1B4-1B7
+ u32 xTST; // Thread Switch Timeout 1B8-1BB
+ u32 xRsvd; // Reserved 1BC-1BF
+
+ u64 xACCR; // Address Compare Control Reg 1C0-1C7
+ u64 xIMR; // Instruction Match Register 1C8-1CF
+ u64 xSDR1; // Storage Description Reg 1 1D0-1D7
+ u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF
+ u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7
+ u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF
+ u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7
+ u64 xTB; // Time Base Register 1F8-1FF
+
+ u64 xFPR[32]; // Floating Point Registers 200-2FF
+
+ u64 xMSR; // Machine State Register 300-307
+ u64 xNIA; // Next Instruction Address 308-30F
+
+ u64 xDABR; // Data Address Breakpoint Reg 310-317
+ u64 xIABR; // Inst Address Breakpoint Reg 318-31F
+
+ u64 xHID0; // HW Implementation Dependent0 320-327
+
+ u64 xHID4; // HW Implementation Dependent4 328-32F
+ u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337
+ u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F
+ u64 xSDAR; // Sample Data Address Register 340-347
+ u64 xSIAR; // Sample Inst Address Register 348-34F
+
+ u8 xRsvd3[176]; // Reserved 350-3FF
+};
+
+#endif /* _ITLPREGSAVE_H */
diff --git a/include/asm-powerpc/iseries/lpar_map.h b/include/asm-powerpc/iseries/lpar_map.h
new file mode 100644
index 00000000000..84fc321615b
--- /dev/null
+++ b/include/asm-powerpc/iseries/lpar_map.h
@@ -0,0 +1,83 @@
+/*
+ * LparMap.h
+ * Copyright (C) 2001 Mike Corrigan IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_LPAR_MAP_H
+#define _ASM_POWERPC_ISERIES_LPAR_MAP_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+
+/*
+ * The iSeries hypervisor will set up mapping for one or more
+ * ESID/VSID pairs (in SLB/segment registers) and will set up
+ * mappings of one or more ranges of pages to VAs.
+ * We will have the hypervisor set up the ESID->VSID mapping
+ * for the four kernel segments (C-F). With shared processors,
+ * the hypervisor will clear all segment registers and reload
+ * these four whenever the processor is switched from one
+ * partition to another.
+ */
+
+/* The Vsid and Esid identified below will be used by the hypervisor
+ * to set up a memory mapping for part of the load area before giving
+ * control to the Linux kernel. The load area is 64 MB, but this must
+ * not attempt to map the whole load area. The Hashed Page Table may
+ * need to be located within the load area (if the total partition size
+ * is 64 MB), but cannot be mapped. Typically, this should specify
+ * to map half (32 MB) of the load area.
+ *
+ * The hypervisor will set up page table entries for the number of
+ * pages specified.
+ *
+ * In 32-bit mode, the hypervisor will load all four of the
+ * segment registers (identified by the low-order four bits of the
+ * Esid field. In 64-bit mode, the hypervisor will load one SLB
+ * entry to map the Esid to the Vsid.
+*/
+
+#define HvEsidsToMap 2
+#define HvRangesToMap 1
+
+/* Hypervisor initially maps 32MB of the load area */
+#define HvPagesToMap 8192
+
+struct LparMap {
+ u64 xNumberEsids; // Number of ESID/VSID pairs
+ u64 xNumberRanges; // Number of VA ranges to map
+ u64 xSegmentTableOffs; // Page number within load area of seg table
+ u64 xRsvd[5];
+ struct {
+ u64 xKernelEsid; // Esid used to map kernel load
+ u64 xKernelVsid; // Vsid used to map kernel load
+ } xEsids[HvEsidsToMap];
+ struct {
+ u64 xPages; // Number of pages to be mapped
+ u64 xOffset; // Offset from start of load area
+ u64 xVPN; // Virtual Page Number
+ } xRanges[HvRangesToMap];
+};
+
+extern const struct LparMap xLparMap;
+
+#endif /* __ASSEMBLY__ */
+
+/* the fixed address where the LparMap exists */
+#define LPARMAP_PHYS 0x7000
+
+#endif /* _ASM_POWERPC_ISERIES_LPAR_MAP_H */
diff --git a/include/asm-powerpc/iseries/mf.h b/include/asm-powerpc/iseries/mf.h
new file mode 100644
index 00000000000..e7bd57a03fb
--- /dev/null
+++ b/include/asm-powerpc/iseries/mf.h
@@ -0,0 +1,57 @@
+/*
+ * mf.h
+ * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
+ * Copyright (C) 2004 Stephen Rothwell IBM Corporation
+ *
+ * This modules exists as an interface between a Linux secondary partition
+ * running on an iSeries and the primary partition's Virtual Service
+ * Processor (VSP) object. The VSP has final authority over powering on/off
+ * all partitions in the iSeries. It also provides miscellaneous low-level
+ * machine facility type operations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _ASM_POWERPC_ISERIES_MF_H
+#define _ASM_POWERPC_ISERIES_MF_H
+
+#include <linux/types.h>
+
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_call_event.h>
+
+struct rtc_time;
+
+typedef void (*MFCompleteHandler)(void *clientToken, int returnCode);
+
+extern void mf_allocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
+ unsigned size, unsigned amount, MFCompleteHandler hdlr,
+ void *userToken);
+extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
+ unsigned count, MFCompleteHandler hdlr, void *userToken);
+
+extern void mf_power_off(void);
+extern void mf_reboot(void);
+
+extern void mf_display_src(u32 word);
+extern void mf_display_progress(u16 value);
+extern void mf_clear_src(void);
+
+extern void mf_init(void);
+
+extern int mf_get_rtc(struct rtc_time *tm);
+extern int mf_get_boot_rtc(struct rtc_time *tm);
+extern int mf_set_rtc(struct rtc_time *tm);
+
+#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/include/asm-powerpc/iseries/vio.h b/include/asm-powerpc/iseries/vio.h
new file mode 100644
index 00000000000..7e3a469420d
--- /dev/null
+++ b/include/asm-powerpc/iseries/vio.h
@@ -0,0 +1,130 @@
+/* -*- linux-c -*-
+ * drivers/char/vio.h
+ *
+ * iSeries Virtual I/O Message Path header
+ *
+ * Authors: Dave Boutcher <boutcher@us.ibm.com>
+ * Ryan Arnold <ryanarn@us.ibm.com>
+ * Colin Devilbiss <devilbis@us.ibm.com>
+ *
+ * (C) Copyright 2000 IBM Corporation
+ *
+ * This header file is used by the iSeries virtual I/O device
+ * drivers. It defines the interfaces to the common functions
+ * (implemented in drivers/char/viopath.h) as well as defining
+ * common functions and structures. Currently (at the time I
+ * wrote this comment) the iSeries virtual I/O device drivers
+ * that use this are
+ * drivers/block/viodasd.c
+ * drivers/char/viocons.c
+ * drivers/char/viotape.c
+ * drivers/cdrom/viocd.c
+ *
+ * The iSeries virtual ethernet support (veth.c) uses a whole
+ * different set of functions.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) anyu later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef _ASM_POWERPC_ISERIES_VIO_H
+#define _ASM_POWERPC_ISERIES_VIO_H
+
+#include <asm/iseries/hv_types.h>
+#include <asm/iseries/hv_lp_event.h>
+
+/*
+ * iSeries virtual I/O events use the subtype field in
+ * HvLpEvent to figure out what kind of vio event is coming
+ * in. We use a table to route these, and this defines
+ * the maximum number of distinct subtypes
+ */
+#define VIO_MAX_SUBTYPES 8
+
+/*
+ * Each subtype can register a handler to process their events.
+ * The handler must have this interface.
+ */
+typedef void (vio_event_handler_t) (struct HvLpEvent * event);
+
+extern int viopath_open(HvLpIndex remoteLp, int subtype, int numReq);
+extern int viopath_close(HvLpIndex remoteLp, int subtype, int numReq);
+extern int vio_setHandler(int subtype, vio_event_handler_t * beh);
+extern int vio_clearHandler(int subtype);
+extern int viopath_isactive(HvLpIndex lp);
+extern HvLpInstanceId viopath_sourceinst(HvLpIndex lp);
+extern HvLpInstanceId viopath_targetinst(HvLpIndex lp);
+extern void vio_set_hostlp(void);
+extern void *vio_get_event_buffer(int subtype);
+extern void vio_free_event_buffer(int subtype, void *buffer);
+
+extern HvLpIndex viopath_hostLp;
+extern HvLpIndex viopath_ourLp;
+
+#define VIOCHAR_MAX_DATA 200
+
+#define VIOMAJOR_SUBTYPE_MASK 0xff00
+#define VIOMINOR_SUBTYPE_MASK 0x00ff
+#define VIOMAJOR_SUBTYPE_SHIFT 8
+
+#define VIOVERSION 0x0101
+
+/*
+ * This is the general structure for VIO errors; each module should have
+ * a table of them, and each table should be terminated by an entry of
+ * { 0, 0, NULL }. Then, to find a specific error message, a module
+ * should pass its local table and the return code.
+ */
+struct vio_error_entry {
+ u16 rc;
+ int errno;
+ const char *msg;
+};
+extern const struct vio_error_entry *vio_lookup_rc(
+ const struct vio_error_entry *local_table, u16 rc);
+
+enum viosubtypes {
+ viomajorsubtype_monitor = 0x0100,
+ viomajorsubtype_blockio = 0x0200,
+ viomajorsubtype_chario = 0x0300,
+ viomajorsubtype_config = 0x0400,
+ viomajorsubtype_cdio = 0x0500,
+ viomajorsubtype_tape = 0x0600,
+ viomajorsubtype_scsi = 0x0700
+};
+
+enum vioconfigsubtype {
+ vioconfigget = 0x0001,
+};
+
+enum viorc {
+ viorc_good = 0x0000,
+ viorc_noConnection = 0x0001,
+ viorc_noReceiver = 0x0002,
+ viorc_noBufferAvailable = 0x0003,
+ viorc_invalidMessageType = 0x0004,
+ viorc_invalidRange = 0x0201,
+ viorc_invalidToken = 0x0202,
+ viorc_DMAError = 0x0203,
+ viorc_useError = 0x0204,
+ viorc_releaseError = 0x0205,
+ viorc_invalidDisk = 0x0206,
+ viorc_openRejected = 0x0301
+};
+
+struct device;
+
+extern struct device *iSeries_vio_dev;
+
+#endif /* _ASM_POWERPC_ISERIES_VIO_H */
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h
new file mode 100644
index 00000000000..9dcbac67481
--- /dev/null
+++ b/include/asm-powerpc/kdebug.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_POWERPC_KDEBUG_H
+#define _ASM_POWERPC_KDEBUG_H
+
+/* nearly identical to x86_64/i386 code */
+
+#include <linux/notifier.h>
+
+struct pt_regs;
+
+struct die_args {
+ struct pt_regs *regs;
+ const char *str;
+ long err;
+ int trapnr;
+ int signr;
+};
+
+/*
+ Note - you should never unregister because that can race with NMIs.
+ If you really want to do it first unregister - then synchronize_sched -
+ then free.
+ */
+int register_die_notifier(struct notifier_block *nb);
+extern struct notifier_block *powerpc_die_chain;
+
+/* Grossly misnamed. */
+enum die_val {
+ DIE_OOPS = 1,
+ DIE_IABR_MATCH,
+ DIE_DABR_MATCH,
+ DIE_BPT,
+ DIE_SSTEP,
+ DIE_PAGE_FAULT,
+};
+
+static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
+{
+ struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
+ return notifier_call_chain(&powerpc_die_chain, val, &args);
+}
+
+#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
new file mode 100644
index 00000000000..062ab9ba68e
--- /dev/null
+++ b/include/asm-powerpc/kexec.h
@@ -0,0 +1,49 @@
+#ifndef _ASM_POWERPC_KEXEC_H
+#define _ASM_POWERPC_KEXEC_H
+
+/*
+ * Maximum page that is mapped directly into kernel memory.
+ * XXX: Since we copy virt we can use any page we allocate
+ */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/*
+ * Maximum address we can reach in physical address mode.
+ * XXX: I want to allow initrd in highmem. Otherwise set to rmo on LPAR.
+ */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+#ifdef __powerpc64__
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+#else
+/* TASK_SIZE, probably left over from use_mm ?? */
+#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#endif
+
+#define KEXEC_CONTROL_CODE_SIZE 4096
+
+/* The native architecture */
+#ifdef __powerpc64__
+#define KEXEC_ARCH KEXEC_ARCH_PPC64
+#else
+#define KEXEC_ARCH KEXEC_ARCH_PPC
+#endif
+
+#ifndef __ASSEMBLY__
+
+#define MAX_NOTE_BYTES 1024
+typedef u32 note_buf_t[MAX_NOTE_BYTES / sizeof(u32)];
+
+extern note_buf_t crash_notes[];
+
+#ifdef __powerpc64__
+extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
+ master to copy new code to 0 */
+#else
+struct kimage;
+extern void machine_kexec_simple(struct kimage *image);
+#endif
+
+#endif /* ! __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/include/asm-powerpc/keylargo.h b/include/asm-powerpc/keylargo.h
new file mode 100644
index 00000000000..a669a3f0f5a
--- /dev/null
+++ b/include/asm-powerpc/keylargo.h
@@ -0,0 +1,248 @@
+/*
+ * keylargo.h: definitions for using the "KeyLargo" I/O controller chip.
+ *
+ */
+
+/* "Pangea" chipset has keylargo device-id 0x25 while core99
+ * has device-id 0x22. The rev. of the pangea one is 0, so we
+ * fake an artificial rev. in keylargo_rev by oring 0x100
+ */
+#define KL_PANGEA_REV 0x100
+
+/* offset from base for feature control registers */
+#define KEYLARGO_MBCR 0x34 /* KL Only, Media bay control/status */
+#define KEYLARGO_FCR0 0x38
+#define KEYLARGO_FCR1 0x3c
+#define KEYLARGO_FCR2 0x40
+#define KEYLARGO_FCR3 0x44
+#define KEYLARGO_FCR4 0x48
+#define KEYLARGO_FCR5 0x4c /* Pangea only */
+
+/* K2 aditional FCRs */
+#define K2_FCR6 0x34
+#define K2_FCR7 0x30
+#define K2_FCR8 0x2c
+#define K2_FCR9 0x28
+#define K2_FCR10 0x24
+
+/* GPIO registers */
+#define KEYLARGO_GPIO_LEVELS0 0x50
+#define KEYLARGO_GPIO_LEVELS1 0x54
+#define KEYLARGO_GPIO_EXTINT_0 0x58
+#define KEYLARGO_GPIO_EXTINT_CNT 18
+#define KEYLARGO_GPIO_0 0x6A
+#define KEYLARGO_GPIO_CNT 17
+#define KEYLARGO_GPIO_EXTINT_DUAL_EDGE 0x80
+#define KEYLARGO_GPIO_OUTPUT_ENABLE 0x04
+#define KEYLARGO_GPIO_OUTOUT_DATA 0x01
+#define KEYLARGO_GPIO_INPUT_DATA 0x02
+
+/* K2 does only extint GPIOs and does 51 of them */
+#define K2_GPIO_EXTINT_0 0x58
+#define K2_GPIO_EXTINT_CNT 51
+
+/* Specific GPIO regs */
+
+#define KL_GPIO_MODEM_RESET (KEYLARGO_GPIO_0+0x03)
+#define KL_GPIO_MODEM_POWER (KEYLARGO_GPIO_0+0x02) /* Pangea */
+
+#define KL_GPIO_SOUND_POWER (KEYLARGO_GPIO_0+0x05)
+
+/* Hrm... this one is only to be used on Pismo. It seeem to also
+ * control the timebase enable on other machines. Still to be
+ * experimented... --BenH.
+ */
+#define KL_GPIO_FW_CABLE_POWER (KEYLARGO_GPIO_0+0x09)
+#define KL_GPIO_TB_ENABLE (KEYLARGO_GPIO_0+0x09)
+
+#define KL_GPIO_ETH_PHY_RESET (KEYLARGO_GPIO_0+0x10)
+
+#define KL_GPIO_EXTINT_CPU1 (KEYLARGO_GPIO_0+0x0a)
+#define KL_GPIO_EXTINT_CPU1_ASSERT 0x04
+#define KL_GPIO_EXTINT_CPU1_RELEASE 0x38
+
+#define KL_GPIO_RESET_CPU0 (KEYLARGO_GPIO_EXTINT_0+0x03)
+#define KL_GPIO_RESET_CPU1 (KEYLARGO_GPIO_EXTINT_0+0x04)
+#define KL_GPIO_RESET_CPU2 (KEYLARGO_GPIO_EXTINT_0+0x0f)
+#define KL_GPIO_RESET_CPU3 (KEYLARGO_GPIO_EXTINT_0+0x10)
+
+#define KL_GPIO_PMU_MESSAGE_IRQ (KEYLARGO_GPIO_EXTINT_0+0x09)
+#define KL_GPIO_PMU_MESSAGE_BIT KEYLARGO_GPIO_INPUT_DATA
+
+#define KL_GPIO_MEDIABAY_IRQ (KEYLARGO_GPIO_EXTINT_0+0x0e)
+
+#define KL_GPIO_AIRPORT_0 (KEYLARGO_GPIO_EXTINT_0+0x0a)
+#define KL_GPIO_AIRPORT_1 (KEYLARGO_GPIO_EXTINT_0+0x0d)
+#define KL_GPIO_AIRPORT_2 (KEYLARGO_GPIO_0+0x0d)
+#define KL_GPIO_AIRPORT_3 (KEYLARGO_GPIO_0+0x0e)
+#define KL_GPIO_AIRPORT_4 (KEYLARGO_GPIO_0+0x0f)
+
+/*
+ * Bits in feature control register. Those bits different for K2 are
+ * listed separately
+ */
+#define KL_MBCR_MB0_PCI_ENABLE 0x00000800 /* exist ? */
+#define KL_MBCR_MB0_IDE_ENABLE 0x00001000
+#define KL_MBCR_MB0_FLOPPY_ENABLE 0x00002000 /* exist ? */
+#define KL_MBCR_MB0_SOUND_ENABLE 0x00004000 /* hrm... */
+#define KL_MBCR_MB0_DEV_MASK 0x00007800
+#define KL_MBCR_MB0_DEV_POWER 0x00000400
+#define KL_MBCR_MB0_DEV_RESET 0x00000200
+#define KL_MBCR_MB0_ENABLE 0x00000100
+#define KL_MBCR_MB1_PCI_ENABLE 0x08000000 /* exist ? */
+#define KL_MBCR_MB1_IDE_ENABLE 0x10000000
+#define KL_MBCR_MB1_FLOPPY_ENABLE 0x20000000 /* exist ? */
+#define KL_MBCR_MB1_SOUND_ENABLE 0x40000000 /* hrm... */
+#define KL_MBCR_MB1_DEV_MASK 0x78000000
+#define KL_MBCR_MB1_DEV_POWER 0x04000000
+#define KL_MBCR_MB1_DEV_RESET 0x02000000
+#define KL_MBCR_MB1_ENABLE 0x01000000
+
+#define KL0_SCC_B_INTF_ENABLE 0x00000001 /* (KL Only) */
+#define KL0_SCC_A_INTF_ENABLE 0x00000002
+#define KL0_SCC_SLOWPCLK 0x00000004
+#define KL0_SCC_RESET 0x00000008
+#define KL0_SCCA_ENABLE 0x00000010
+#define KL0_SCCB_ENABLE 0x00000020
+#define KL0_SCC_CELL_ENABLE 0x00000040
+#define KL0_IRDA_HIGH_BAND 0x00000100 /* (KL Only) */
+#define KL0_IRDA_SOURCE2_SEL 0x00000200 /* (KL Only) */
+#define KL0_IRDA_SOURCE1_SEL 0x00000400 /* (KL Only) */
+#define KL0_PG_USB0_PMI_ENABLE 0x00000400 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_RESET 0x00000800 /* (KL Only) */
+#define KL0_PG_USB0_REF_SUSPEND_SEL 0x00000800 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_DEFAULT1 0x00001000 /* (KL Only) */
+#define KL0_PG_USB0_REF_SUSPEND 0x00001000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_DEFAULT0 0x00002000 /* (KL Only) */
+#define KL0_PG_USB0_PAD_SUSPEND 0x00002000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_FAST_CONNECT 0x00004000 /* (KL Only) */
+#define KL0_PG_USB1_PMI_ENABLE 0x00004000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_ENABLE 0x00008000 /* (KL Only) */
+#define KL0_PG_USB1_REF_SUSPEND_SEL 0x00008000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_CLK32_ENABLE 0x00010000 /* (KL Only) */
+#define KL0_PG_USB1_REF_SUSPEND 0x00010000 /* (Pangea/Intrepid Only) */
+#define KL0_IRDA_CLK19_ENABLE 0x00020000 /* (KL Only) */
+#define KL0_PG_USB1_PAD_SUSPEND 0x00020000 /* (Pangea/Intrepid Only) */
+#define KL0_USB0_PAD_SUSPEND0 0x00040000
+#define KL0_USB0_PAD_SUSPEND1 0x00080000
+#define KL0_USB0_CELL_ENABLE 0x00100000
+#define KL0_USB1_PAD_SUSPEND0 0x00400000
+#define KL0_USB1_PAD_SUSPEND1 0x00800000
+#define KL0_USB1_CELL_ENABLE 0x01000000
+#define KL0_USB_REF_SUSPEND 0x10000000 /* (KL Only) */
+
+#define KL0_SERIAL_ENABLE (KL0_SCC_B_INTF_ENABLE | \
+ KL0_SCC_SLOWPCLK | \
+ KL0_SCC_CELL_ENABLE | KL0_SCCA_ENABLE)
+
+#define KL1_USB2_PMI_ENABLE 0x00000001 /* Intrepid only */
+#define KL1_AUDIO_SEL_22MCLK 0x00000002 /* KL/Pangea only */
+#define KL1_USB2_REF_SUSPEND_SEL 0x00000002 /* Intrepid only */
+#define KL1_USB2_REF_SUSPEND 0x00000004 /* Intrepid only */
+#define KL1_AUDIO_CLK_ENABLE_BIT 0x00000008 /* KL/Pangea only */
+#define KL1_USB2_PAD_SUSPEND_SEL 0x00000008 /* Intrepid only */
+#define KL1_USB2_PAD_SUSPEND0 0x00000010 /* Intrepid only */
+#define KL1_AUDIO_CLK_OUT_ENABLE 0x00000020 /* KL/Pangea only */
+#define KL1_USB2_PAD_SUSPEND1 0x00000020 /* Intrepid only */
+#define KL1_AUDIO_CELL_ENABLE 0x00000040 /* KL/Pangea only */
+#define KL1_USB2_CELL_ENABLE 0x00000040 /* Intrepid only */
+#define KL1_AUDIO_CHOOSE 0x00000080 /* KL/Pangea only */
+#define KL1_I2S0_CHOOSE 0x00000200 /* KL Only */
+#define KL1_I2S0_CELL_ENABLE 0x00000400
+#define KL1_I2S0_CLK_ENABLE_BIT 0x00001000
+#define KL1_I2S0_ENABLE 0x00002000
+#define KL1_I2S1_CELL_ENABLE 0x00020000
+#define KL1_I2S1_CLK_ENABLE_BIT 0x00080000
+#define KL1_I2S1_ENABLE 0x00100000
+#define KL1_EIDE0_ENABLE 0x00800000 /* KL/Intrepid Only */
+#define KL1_EIDE0_RESET_N 0x01000000 /* KL/Intrepid Only */
+#define KL1_EIDE1_ENABLE 0x04000000 /* KL Only */
+#define KL1_EIDE1_RESET_N 0x08000000 /* KL Only */
+#define KL1_UIDE_ENABLE 0x20000000 /* KL/Pangea Only */
+#define KL1_UIDE_RESET_N 0x40000000 /* KL/Pangea Only */
+
+#define KL2_IOBUS_ENABLE 0x00000002
+#define KL2_SLEEP_STATE_BIT 0x00000100 /* KL Only */
+#define KL2_PG_STOP_ALL_CLOCKS 0x00000100 /* Pangea Only */
+#define KL2_MPIC_ENABLE 0x00020000
+#define KL2_CARDSLOT_RESET 0x00040000 /* Pangea/Intrepid Only */
+#define KL2_ALT_DATA_OUT 0x02000000 /* KL Only ??? */
+#define KL2_MEM_IS_BIG 0x04000000
+#define KL2_CARDSEL_16 0x08000000
+
+#define KL3_SHUTDOWN_PLL_TOTAL 0x00000001 /* KL/Pangea only */
+#define KL3_SHUTDOWN_PLLKW6 0x00000002 /* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL3 0x00000002 /* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW4 0x00000004 /* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL2 0x00000004 /* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW35 0x00000008 /* KL/Pangea only */
+#define KL3_IT_SHUTDOWN_PLL1 0x00000008 /* Intrepid only */
+#define KL3_SHUTDOWN_PLLKW12 0x00000010 /* KL Only */
+#define KL3_IT_ENABLE_PLL3_SHUTDOWN 0x00000010 /* Intrepid only */
+#define KL3_PLL_RESET 0x00000020 /* KL/Pangea only */
+#define KL3_IT_ENABLE_PLL2_SHUTDOWN 0x00000020 /* Intrepid only */
+#define KL3_IT_ENABLE_PLL1_SHUTDOWN 0x00000010 /* Intrepid only */
+#define KL3_SHUTDOWN_PLL2X 0x00000080 /* KL Only */
+#define KL3_CLK66_ENABLE 0x00000100 /* KL Only */
+#define KL3_CLK49_ENABLE 0x00000200
+#define KL3_CLK45_ENABLE 0x00000400
+#define KL3_CLK31_ENABLE 0x00000800 /* KL/Pangea only */
+#define KL3_TIMER_CLK18_ENABLE 0x00001000
+#define KL3_I2S1_CLK18_ENABLE 0x00002000
+#define KL3_I2S0_CLK18_ENABLE 0x00004000
+#define KL3_VIA_CLK16_ENABLE 0x00008000 /* KL/Pangea only */
+#define KL3_IT_VIA_CLK32_ENABLE 0x00008000 /* Intrepid only */
+#define KL3_STOPPING33_ENABLED 0x00080000 /* KL Only */
+#define KL3_PG_PLL_ENABLE_TEST 0x00080000 /* Pangea Only */
+
+/* Intrepid USB bus 2, port 0,1 */
+#define KL3_IT_PORT_WAKEUP_ENABLE(p) (0x00080000 << ((p)<<3))
+#define KL3_IT_PORT_RESUME_WAKE_EN(p) (0x00040000 << ((p)<<3))
+#define KL3_IT_PORT_CONNECT_WAKE_EN(p) (0x00020000 << ((p)<<3))
+#define KL3_IT_PORT_DISCONNECT_WAKE_EN(p) (0x00010000 << ((p)<<3))
+#define KL3_IT_PORT_RESUME_STAT(p) (0x00300000 << ((p)<<3))
+#define KL3_IT_PORT_CONNECT_STAT(p) (0x00200000 << ((p)<<3))
+#define KL3_IT_PORT_DISCONNECT_STAT(p) (0x00100000 << ((p)<<3))
+
+/* Port 0,1 : bus 0, port 2,3 : bus 1 */
+#define KL4_PORT_WAKEUP_ENABLE(p) (0x00000008 << ((p)<<3))
+#define KL4_PORT_RESUME_WAKE_EN(p) (0x00000004 << ((p)<<3))
+#define KL4_PORT_CONNECT_WAKE_EN(p) (0x00000002 << ((p)<<3))
+#define KL4_PORT_DISCONNECT_WAKE_EN(p) (0x00000001 << ((p)<<3))
+#define KL4_PORT_RESUME_STAT(p) (0x00000040 << ((p)<<3))
+#define KL4_PORT_CONNECT_STAT(p) (0x00000020 << ((p)<<3))
+#define KL4_PORT_DISCONNECT_STAT(p) (0x00000010 << ((p)<<3))
+
+/* Pangea and Intrepid only */
+#define KL5_VIA_USE_CLK31 0000000001 /* Pangea Only */
+#define KL5_SCC_USE_CLK31 0x00000002 /* Pangea Only */
+#define KL5_PWM_CLK32_EN 0x00000004
+#define KL5_CLK3_68_EN 0x00000010
+#define KL5_CLK32_EN 0x00000020
+
+
+/* K2 definitions */
+#define K2_FCR0_USB0_SWRESET 0x00200000
+#define K2_FCR0_USB1_SWRESET 0x02000000
+#define K2_FCR0_RING_PME_DISABLE 0x08000000
+
+#define K2_FCR1_PCI1_BUS_RESET_N 0x00000010
+#define K2_FCR1_PCI1_SLEEP_RESET_EN 0x00000020
+#define K2_FCR1_I2S0_CELL_ENABLE 0x00000400
+#define K2_FCR1_I2S0_RESET 0x00000800
+#define K2_FCR1_I2S0_CLK_ENABLE_BIT 0x00001000
+#define K2_FCR1_I2S0_ENABLE 0x00002000
+
+#define K2_FCR1_PCI1_CLK_ENABLE 0x00004000
+#define K2_FCR1_FW_CLK_ENABLE 0x00008000
+#define K2_FCR1_FW_RESET_N 0x00010000
+#define K2_FCR1_GMAC_CLK_ENABLE 0x00400000
+#define K2_FCR1_GMAC_POWER_DOWN 0x00800000
+#define K2_FCR1_GMAC_RESET_N 0x01000000
+#define K2_FCR1_SATA_CLK_ENABLE 0x02000000
+#define K2_FCR1_SATA_POWER_DOWN 0x04000000
+#define K2_FCR1_SATA_RESET_N 0x08000000
+#define K2_FCR1_UATA_CLK_ENABLE 0x10000000
+#define K2_FCR1_UATA_RESET_N 0x40000000
+#define K2_FCR1_UATA_CHOOSE_CLK66 0x80000000
+
diff --git a/include/asm-powerpc/kmap_types.h b/include/asm-powerpc/kmap_types.h
new file mode 100644
index 00000000000..b6bac6f61c1
--- /dev/null
+++ b/include/asm-powerpc/kmap_types.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_POWERPC_KMAP_TYPES_H
+#define _ASM_POWERPC_KMAP_TYPES_H
+
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+enum km_type {
+ KM_BOUNCE_READ,
+ KM_SKB_SUNRPC_DATA,
+ KM_SKB_DATA_SOFTIRQ,
+ KM_USER0,
+ KM_USER1,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
+ KM_IRQ0,
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
+ KM_TYPE_NR
+};
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
new file mode 100644
index 00000000000..b2f09f17fbe
--- /dev/null
+++ b/include/asm-powerpc/kprobes.h
@@ -0,0 +1,66 @@
+#ifndef _ASM_POWERPC_KPROBES_H
+#define _ASM_POWERPC_KPROBES_H
+/*
+ * Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ * Probes initial implementation ( includes suggestions from
+ * Rusty Russell).
+ * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli
+ * <ananth@in.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/ptrace.h>
+
+struct pt_regs;
+
+typedef unsigned int kprobe_opcode_t;
+#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
+#define MAX_INSN_SIZE 1
+
+#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
+#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
+#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
+#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
+
+#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry)
+
+#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
+ IS_TWI(instr) || IS_TDI(instr))
+
+#define ARCH_SUPPORTS_KRETPROBES
+void kretprobe_trampoline(void);
+
+/* Architecture specific copy of original instruction */
+struct arch_specific_insn {
+ /* copy of original instruction */
+ kprobe_opcode_t *insn;
+};
+
+#ifdef CONFIG_KPROBES
+extern int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+#else /* !CONFIG_KPROBES */
+static inline int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return 0;
+}
+#endif
+#endif /* _ASM_POWERPC_KPROBES_H */
diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h
new file mode 100644
index 00000000000..ea0afe34354
--- /dev/null
+++ b/include/asm-powerpc/lmb.h
@@ -0,0 +1,81 @@
+#ifndef _PPC64_LMB_H
+#define _PPC64_LMB_H
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 2001 Peter Bergner, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <asm/prom.h>
+
+#define MAX_LMB_REGIONS 128
+
+#define LMB_ALLOC_ANYWHERE 0
+
+struct lmb_property {
+ unsigned long base;
+ unsigned long size;
+};
+
+struct lmb_region {
+ unsigned long cnt;
+ unsigned long size;
+ struct lmb_property region[MAX_LMB_REGIONS+1];
+};
+
+struct lmb {
+ unsigned long debug;
+ unsigned long rmo_size;
+ struct lmb_region memory;
+ struct lmb_region reserved;
+};
+
+extern struct lmb lmb;
+
+extern void __init lmb_init(void);
+extern void __init lmb_analyze(void);
+extern long __init lmb_add(unsigned long, unsigned long);
+extern long __init lmb_reserve(unsigned long, unsigned long);
+extern unsigned long __init lmb_alloc(unsigned long, unsigned long);
+extern unsigned long __init lmb_alloc_base(unsigned long, unsigned long,
+ unsigned long);
+extern unsigned long __init lmb_phys_mem_size(void);
+extern unsigned long __init lmb_end_of_DRAM(void);
+extern unsigned long __init lmb_abs_to_phys(unsigned long);
+extern void __init lmb_enforce_memory_limit(unsigned long);
+
+extern void lmb_dump_all(void);
+
+extern unsigned long io_hole_start;
+
+static inline unsigned long
+lmb_size_bytes(struct lmb_region *type, unsigned long region_nr)
+{
+ return type->region[region_nr].size;
+}
+static inline unsigned long
+lmb_size_pages(struct lmb_region *type, unsigned long region_nr)
+{
+ return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT;
+}
+static inline unsigned long
+lmb_start_pfn(struct lmb_region *type, unsigned long region_nr)
+{
+ return type->region[region_nr].base >> PAGE_SHIFT;
+}
+static inline unsigned long
+lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
+{
+ return lmb_start_pfn(type, region_nr) +
+ lmb_size_pages(type, region_nr);
+}
+
+#endif /* _PPC64_LMB_H */
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
new file mode 100644
index 00000000000..629ca964b97
--- /dev/null
+++ b/include/asm-powerpc/machdep.h
@@ -0,0 +1,285 @@
+#ifndef _ASM_POWERPC_MACHDEP_H
+#define _ASM_POWERPC_MACHDEP_H
+#ifdef __KERNEL__
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/setup.h>
+
+/* We export this macro for external modules like Alsa to know if
+ * ppc_md.feature_call is implemented or not
+ */
+#define CONFIG_PPC_HAS_FEATURE_CALLS
+
+struct pt_regs;
+struct pci_bus;
+struct device_node;
+struct iommu_table;
+struct rtc_time;
+struct file;
+
+#ifdef CONFIG_SMP
+struct smp_ops_t {
+ void (*message_pass)(int target, int msg);
+ int (*probe)(void);
+ void (*kick_cpu)(int nr);
+ void (*setup_cpu)(int nr);
+ void (*take_timebase)(void);
+ void (*give_timebase)(void);
+ int (*cpu_enable)(unsigned int nr);
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+};
+#endif
+
+struct machdep_calls {
+#ifdef CONFIG_PPC64
+ void (*hpte_invalidate)(unsigned long slot,
+ unsigned long va,
+ int large,
+ int local);
+ long (*hpte_updatepp)(unsigned long slot,
+ unsigned long newpp,
+ unsigned long va,
+ int large,
+ int local);
+ void (*hpte_updateboltedpp)(unsigned long newpp,
+ unsigned long ea);
+ long (*hpte_insert)(unsigned long hpte_group,
+ unsigned long va,
+ unsigned long prpn,
+ unsigned long vflags,
+ unsigned long rflags);
+ long (*hpte_remove)(unsigned long hpte_group);
+ void (*flush_hash_range)(unsigned long number, int local);
+
+ /* special for kexec, to be called in real mode, linar mapping is
+ * destroyed as well */
+ void (*hpte_clear_all)(void);
+
+ void (*tce_build)(struct iommu_table * tbl,
+ long index,
+ long npages,
+ unsigned long uaddr,
+ enum dma_data_direction direction);
+ void (*tce_free)(struct iommu_table *tbl,
+ long index,
+ long npages);
+ void (*tce_flush)(struct iommu_table *tbl);
+ void (*iommu_dev_setup)(struct pci_dev *dev);
+ void (*iommu_bus_setup)(struct pci_bus *bus);
+ void (*irq_bus_setup)(struct pci_bus *bus);
+ int (*set_dabr)(unsigned long dabr);
+#endif
+
+ int (*probe)(int platform);
+ void (*setup_arch)(void);
+ void (*init_early)(void);
+ /* Optional, may be NULL. */
+ void (*show_cpuinfo)(struct seq_file *m);
+ void (*show_percpuinfo)(struct seq_file *m, int i);
+
+ void (*init_IRQ)(void);
+ int (*get_irq)(struct pt_regs *);
+ void (*cpu_irq_down)(int secondary);
+
+ /* PCI stuff */
+ /* Called after scanning the bus, before allocating resources */
+ void (*pcibios_fixup)(void);
+ int (*pci_probe_mode)(struct pci_bus *);
+
+ void (*restart)(char *cmd);
+ void (*power_off)(void);
+ void (*halt)(void);
+ void (*panic)(char *str);
+ void (*cpu_die)(void);
+
+ long (*time_init)(void); /* Optional, may be NULL */
+
+ int (*set_rtc_time)(struct rtc_time *);
+ void (*get_rtc_time)(struct rtc_time *);
+ unsigned long (*get_boot_time)(void);
+ unsigned char (*rtc_read_val)(int addr);
+ void (*rtc_write_val)(int addr, unsigned char val);
+
+ void (*calibrate_decr)(void);
+
+ void (*progress)(char *, unsigned short);
+
+ /* Interface for platform error logging */
+ void (*log_error)(char *buf, unsigned int err_type, int fatal);
+
+ unsigned char (*nvram_read_val)(int addr);
+ void (*nvram_write_val)(int addr, unsigned char val);
+ ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
+ ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
+ ssize_t (*nvram_size)(void);
+ void (*nvram_sync)(void);
+
+ /* Exception handlers */
+ void (*system_reset_exception)(struct pt_regs *regs);
+ int (*machine_check_exception)(struct pt_regs *regs);
+
+ /* Motherboard/chipset features. This is a kind of general purpose
+ * hook used to control some machine specific features (like reset
+ * lines, chip power control, etc...).
+ */
+ long (*feature_call)(unsigned int feature, ...);
+
+ /* Check availability of legacy devices like i8042 */
+ int (*check_legacy_ioport)(unsigned int baseport);
+
+ /* Get legacy PCI/IDE interrupt mapping */
+ int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel);
+
+ /* Get access protection for /dev/mem */
+ pgprot_t (*phys_mem_access_prot)(struct file *file,
+ unsigned long pfn,
+ unsigned long size,
+ pgprot_t vma_prot);
+
+ /* Idle loop for this platform, leave empty for default idle loop */
+ void (*idle_loop)(void);
+
+ /* Function to enable performance monitor counters for this
+ platform, called once per cpu. */
+ void (*enable_pmcs)(void);
+
+#ifdef CONFIG_PPC32 /* XXX for now */
+ /* A general init function, called by ppc_init in init/main.c.
+ May be NULL. */
+ void (*init)(void);
+
+ void (*idle)(void);
+ void (*power_save)(void);
+
+ void (*heartbeat)(void);
+ unsigned long heartbeat_reset;
+ unsigned long heartbeat_count;
+
+ void (*setup_io_mappings)(void);
+
+ void (*early_serial_map)(void);
+ void (*kgdb_map_scc)(void);
+
+ /*
+ * optional PCI "hooks"
+ */
+
+ /* Called after PPC generic resource fixup to perform
+ machine specific fixups */
+ void (*pcibios_fixup_resources)(struct pci_dev *);
+
+ /* Called for each PCI bus in the system when it's probed */
+ void (*pcibios_fixup_bus)(struct pci_bus *);
+
+ /* Called when pci_enable_device() is called (initial=0) or
+ * when a device with no assigned resource is found (initial=1).
+ * Returns 0 to allow assignment/enabling of the device. */
+ int (*pcibios_enable_device_hook)(struct pci_dev *, int initial);
+
+ /* For interrupt routing */
+ unsigned char (*pci_swizzle)(struct pci_dev *, unsigned char *);
+ int (*pci_map_irq)(struct pci_dev *, unsigned char, unsigned char);
+
+ /* Called in indirect_* to avoid touching devices */
+ int (*pci_exclude_device)(unsigned char, unsigned char);
+
+ /* Called at then very end of pcibios_init() */
+ void (*pcibios_after_init)(void);
+
+ /* this is for modules, since _machine can be a define -- Cort */
+ int ppc_machine;
+
+#ifdef CONFIG_KEXEC
+ /* Called to shutdown machine specific hardware not already controlled
+ * by other drivers.
+ * XXX Should we move this one out of kexec scope?
+ */
+ void (*machine_shutdown)(void);
+
+ /* Called to do the minimal shutdown needed to run a kexec'd kernel
+ * to run successfully.
+ * XXX Should we move this one out of kexec scope?
+ */
+ void (*machine_crash_shutdown)(void);
+
+ /* Called to do what every setup is needed on image and the
+ * reboot code buffer. Returns 0 on success.
+ * Provide your own (maybe dummy) implementation if your platform
+ * claims to support kexec.
+ */
+ int (*machine_kexec_prepare)(struct kimage *image);
+
+ /* Called to handle any machine specific cleanup on image */
+ void (*machine_kexec_cleanup)(struct kimage *image);
+
+ /* Called to perform the _real_ kexec.
+ * Do NOT allocate memory or fail here. We are past the point of
+ * no return.
+ */
+ void (*machine_kexec)(struct kimage *image);
+#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_PPC32 */
+};
+
+extern void default_idle(void);
+extern void native_idle(void);
+
+extern struct machdep_calls ppc_md;
+extern char cmd_line[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_PPC_PMAC
+/*
+ * Power macintoshes have either a CUDA, PMU or SMU controlling
+ * system reset, power, NVRAM, RTC.
+ */
+typedef enum sys_ctrler_kind {
+ SYS_CTRLER_UNKNOWN = 0,
+ SYS_CTRLER_CUDA = 1,
+ SYS_CTRLER_PMU = 2,
+ SYS_CTRLER_SMU = 3,
+} sys_ctrler_t;
+extern sys_ctrler_t sys_ctrler;
+
+#endif /* CONFIG_PPC_PMAC */
+
+extern void setup_pci_ptrs(void);
+
+#ifdef CONFIG_SMP
+/* Poor default implementations */
+extern void __devinit smp_generic_give_timebase(void);
+extern void __devinit smp_generic_take_timebase(void);
+#endif /* CONFIG_SMP */
+
+
+/* Functions to produce codes on the leds.
+ * The SRC code should be unique for the message category and should
+ * be limited to the lower 24 bits (the upper 8 are set by these funcs),
+ * and (for boot & dump) should be sorted numerically in the order
+ * the events occur.
+ */
+/* Print a boot progress message. */
+void ppc64_boot_msg(unsigned int src, const char *msg);
+/* Print a termination message (print only -- does not stop the kernel) */
+void ppc64_terminate_msg(unsigned int src, const char *msg);
+
+static inline void log_error(char *buf, unsigned int err_type, int fatal)
+{
+ if (ppc_md.log_error)
+ ppc_md.log_error(buf, err_type, fatal);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_MACHDEP_H */
diff --git a/include/asm-powerpc/macio.h b/include/asm-powerpc/macio.h
new file mode 100644
index 00000000000..b553dd4b139
--- /dev/null
+++ b/include/asm-powerpc/macio.h
@@ -0,0 +1,140 @@
+#ifndef __MACIO_ASIC_H__
+#define __MACIO_ASIC_H__
+
+#include <asm/of_device.h>
+
+extern struct bus_type macio_bus_type;
+
+/* MacIO device driver is defined later */
+struct macio_driver;
+struct macio_chip;
+
+#define MACIO_DEV_COUNT_RESOURCES 8
+#define MACIO_DEV_COUNT_IRQS 8
+
+/*
+ * the macio_bus structure is used to describe a "virtual" bus
+ * within a MacIO ASIC. It's typically provided by a macio_pci_asic
+ * PCI device, but could be provided differently as well (nubus
+ * machines using a fake OF tree).
+ *
+ * The pdev field can be NULL on non-PCI machines
+ */
+struct macio_bus
+{
+ struct macio_chip *chip; /* macio_chip (private use) */
+ int index; /* macio chip index in system */
+#ifdef CONFIG_PCI
+ struct pci_dev *pdev; /* PCI device hosting this bus */
+#endif
+};
+
+/*
+ * the macio_dev structure is used to describe a device
+ * within an Apple MacIO ASIC.
+ */
+struct macio_dev
+{
+ struct macio_bus *bus; /* macio bus this device is on */
+ struct macio_dev *media_bay; /* Device is part of a media bay */
+ struct of_device ofdev;
+ int n_resources;
+ struct resource resource[MACIO_DEV_COUNT_RESOURCES];
+ int n_interrupts;
+ struct resource interrupt[MACIO_DEV_COUNT_IRQS];
+};
+#define to_macio_device(d) container_of(d, struct macio_dev, ofdev.dev)
+#define of_to_macio_device(d) container_of(d, struct macio_dev, ofdev)
+
+extern struct macio_dev *macio_dev_get(struct macio_dev *dev);
+extern void macio_dev_put(struct macio_dev *dev);
+
+/*
+ * Accessors to resources & interrupts and other device
+ * fields
+ */
+
+static inline int macio_resource_count(struct macio_dev *dev)
+{
+ return dev->n_resources;
+}
+
+static inline unsigned long macio_resource_start(struct macio_dev *dev, int resource_no)
+{
+ return dev->resource[resource_no].start;
+}
+
+static inline unsigned long macio_resource_end(struct macio_dev *dev, int resource_no)
+{
+ return dev->resource[resource_no].end;
+}
+
+static inline unsigned long macio_resource_len(struct macio_dev *dev, int resource_no)
+{
+ struct resource *res = &dev->resource[resource_no];
+ if (res->start == 0 || res->end == 0 || res->end < res->start)
+ return 0;
+ return res->end - res->start + 1;
+}
+
+extern int macio_request_resource(struct macio_dev *dev, int resource_no, const char *name);
+extern void macio_release_resource(struct macio_dev *dev, int resource_no);
+extern int macio_request_resources(struct macio_dev *dev, const char *name);
+extern void macio_release_resources(struct macio_dev *dev);
+
+static inline int macio_irq_count(struct macio_dev *dev)
+{
+ return dev->n_interrupts;
+}
+
+static inline int macio_irq(struct macio_dev *dev, int irq_no)
+{
+ return dev->interrupt[irq_no].start;
+}
+
+static inline void macio_set_drvdata(struct macio_dev *dev, void *data)
+{
+ dev_set_drvdata(&dev->ofdev.dev, data);
+}
+
+static inline void* macio_get_drvdata(struct macio_dev *dev)
+{
+ return dev_get_drvdata(&dev->ofdev.dev);
+}
+
+static inline struct device_node *macio_get_of_node(struct macio_dev *mdev)
+{
+ return mdev->ofdev.node;
+}
+
+#ifdef CONFIG_PCI
+static inline struct pci_dev *macio_get_pci_dev(struct macio_dev *mdev)
+{
+ return mdev->bus->pdev;
+}
+#endif
+
+/*
+ * A driver for a mac-io chip based device
+ */
+struct macio_driver
+{
+ char *name;
+ struct of_device_id *match_table;
+ struct module *owner;
+
+ int (*probe)(struct macio_dev* dev, const struct of_device_id *match);
+ int (*remove)(struct macio_dev* dev);
+
+ int (*suspend)(struct macio_dev* dev, pm_message_t state);
+ int (*resume)(struct macio_dev* dev);
+ int (*shutdown)(struct macio_dev* dev);
+
+ struct device_driver driver;
+};
+#define to_macio_driver(drv) container_of(drv,struct macio_driver, driver)
+
+extern int macio_register_driver(struct macio_driver *);
+extern void macio_unregister_driver(struct macio_driver *);
+
+#endif /* __MACIO_ASIC_H__ */
diff --git a/include/asm-powerpc/mediabay.h b/include/asm-powerpc/mediabay.h
new file mode 100644
index 00000000000..9daa3252d7b
--- /dev/null
+++ b/include/asm-powerpc/mediabay.h
@@ -0,0 +1,31 @@
+/*
+ * mediabay.h: definitions for using the media bay
+ * on PowerBook 3400 and similar computers.
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ */
+#ifndef _PPC_MEDIABAY_H
+#define _PPC_MEDIABAY_H
+
+#ifdef __KERNEL__
+
+#define MB_FD 0 /* media bay contains floppy drive (automatic eject ?) */
+#define MB_FD1 1 /* media bay contains floppy drive (manual eject ?) */
+#define MB_SOUND 2 /* sound device ? */
+#define MB_CD 3 /* media bay contains ATA drive such as CD or ZIP */
+#define MB_PCI 5 /* media bay contains a PCI device */
+#define MB_POWER 6 /* media bay contains a Power device (???) */
+#define MB_NO 7 /* media bay contains nothing */
+
+int check_media_bay(struct device_node *which_bay, int what);
+int check_media_bay_by_base(unsigned long base, int what);
+
+/* Number of bays in the machine or 0 */
+extern int media_bay_count;
+
+/* called by pmac-ide.c to register IDE controller for media bay */
+extern int media_bay_set_ide_infos(struct device_node* which_bay,
+ unsigned long base, int irq, int index);
+
+#endif /* __KERNEL__ */
+#endif /* _PPC_MEDIABAY_H */
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
new file mode 100644
index 00000000000..7083d1f7426
--- /dev/null
+++ b/include/asm-powerpc/mpic.h
@@ -0,0 +1,287 @@
+#ifndef _ASM_POWERPC_MPIC_H
+#define _ASM_POWERPC_MPIC_H
+
+#include <linux/irq.h>
+
+/*
+ * Global registers
+ */
+
+#define MPIC_GREG_BASE 0x01000
+
+#define MPIC_GREG_FEATURE_0 0x00000
+#define MPIC_GREG_FEATURE_LAST_SRC_MASK 0x07ff0000
+#define MPIC_GREG_FEATURE_LAST_SRC_SHIFT 16
+#define MPIC_GREG_FEATURE_LAST_CPU_MASK 0x00001f00
+#define MPIC_GREG_FEATURE_LAST_CPU_SHIFT 8
+#define MPIC_GREG_FEATURE_VERSION_MASK 0xff
+#define MPIC_GREG_FEATURE_1 0x00010
+#define MPIC_GREG_GLOBAL_CONF_0 0x00020
+#define MPIC_GREG_GCONF_RESET 0x80000000
+#define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000
+#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
+#define MPIC_GREG_GLOBAL_CONF_1 0x00030
+#define MPIC_GREG_VENDOR_0 0x00040
+#define MPIC_GREG_VENDOR_1 0x00050
+#define MPIC_GREG_VENDOR_2 0x00060
+#define MPIC_GREG_VENDOR_3 0x00070
+#define MPIC_GREG_VENDOR_ID 0x00080
+#define MPIC_GREG_VENDOR_ID_STEPPING_MASK 0x00ff0000
+#define MPIC_GREG_VENDOR_ID_STEPPING_SHIFT 16
+#define MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
+#define MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT 8
+#define MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
+#define MPIC_GREG_PROCESSOR_INIT 0x00090
+#define MPIC_GREG_IPI_VECTOR_PRI_0 0x000a0
+#define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0
+#define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0
+#define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0
+#define MPIC_GREG_SPURIOUS 0x000e0
+#define MPIC_GREG_TIMER_FREQ 0x000f0
+
+/*
+ *
+ * Timer registers
+ */
+#define MPIC_TIMER_BASE 0x01100
+#define MPIC_TIMER_STRIDE 0x40
+
+#define MPIC_TIMER_CURRENT_CNT 0x00000
+#define MPIC_TIMER_BASE_CNT 0x00010
+#define MPIC_TIMER_VECTOR_PRI 0x00020
+#define MPIC_TIMER_DESTINATION 0x00030
+
+/*
+ * Per-Processor registers
+ */
+
+#define MPIC_CPU_THISBASE 0x00000
+#define MPIC_CPU_BASE 0x20000
+#define MPIC_CPU_STRIDE 0x01000
+
+#define MPIC_CPU_IPI_DISPATCH_0 0x00040
+#define MPIC_CPU_IPI_DISPATCH_1 0x00050
+#define MPIC_CPU_IPI_DISPATCH_2 0x00060
+#define MPIC_CPU_IPI_DISPATCH_3 0x00070
+#define MPIC_CPU_CURRENT_TASK_PRI 0x00080
+#define MPIC_CPU_TASKPRI_MASK 0x0000000f
+#define MPIC_CPU_WHOAMI 0x00090
+#define MPIC_CPU_WHOAMI_MASK 0x0000001f
+#define MPIC_CPU_INTACK 0x000a0
+#define MPIC_CPU_EOI 0x000b0
+
+/*
+ * Per-source registers
+ */
+
+#define MPIC_IRQ_BASE 0x10000
+#define MPIC_IRQ_STRIDE 0x00020
+#define MPIC_IRQ_VECTOR_PRI 0x00000
+#define MPIC_VECPRI_MASK 0x80000000
+#define MPIC_VECPRI_ACTIVITY 0x40000000 /* Read Only */
+#define MPIC_VECPRI_PRIORITY_MASK 0x000f0000
+#define MPIC_VECPRI_PRIORITY_SHIFT 16
+#define MPIC_VECPRI_VECTOR_MASK 0x000007ff
+#define MPIC_VECPRI_POLARITY_POSITIVE 0x00800000
+#define MPIC_VECPRI_POLARITY_NEGATIVE 0x00000000
+#define MPIC_VECPRI_POLARITY_MASK 0x00800000
+#define MPIC_VECPRI_SENSE_LEVEL 0x00400000
+#define MPIC_VECPRI_SENSE_EDGE 0x00000000
+#define MPIC_VECPRI_SENSE_MASK 0x00400000
+#define MPIC_IRQ_DESTINATION 0x00010
+
+#define MPIC_MAX_IRQ_SOURCES 2048
+#define MPIC_MAX_CPUS 32
+#define MPIC_MAX_ISU 32
+
+/*
+ * Special vector numbers (internal use only)
+ */
+#define MPIC_VEC_SPURRIOUS 255
+#define MPIC_VEC_IPI_3 254
+#define MPIC_VEC_IPI_2 253
+#define MPIC_VEC_IPI_1 252
+#define MPIC_VEC_IPI_0 251
+
+/* unused */
+#define MPIC_VEC_TIMER_3 250
+#define MPIC_VEC_TIMER_2 249
+#define MPIC_VEC_TIMER_1 248
+#define MPIC_VEC_TIMER_0 247
+
+/* Type definition of the cascade handler */
+typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data);
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+/* Fixup table entry */
+struct mpic_irq_fixup
+{
+ u8 __iomem *base;
+ unsigned int irq;
+};
+#endif /* CONFIG_MPIC_BROKEN_U3 */
+
+
+/* The instance data of a given MPIC */
+struct mpic
+{
+ /* The "linux" controller struct */
+ hw_irq_controller hc_irq;
+#ifdef CONFIG_SMP
+ hw_irq_controller hc_ipi;
+#endif
+ const char *name;
+ /* Flags */
+ unsigned int flags;
+ /* How many irq sources in a given ISU */
+ unsigned int isu_size;
+ unsigned int isu_shift;
+ unsigned int isu_mask;
+ /* Offset of irq vector numbers */
+ unsigned int irq_offset;
+ unsigned int irq_count;
+ /* Offset of ipi vector numbers */
+ unsigned int ipi_offset;
+ /* Number of sources */
+ unsigned int num_sources;
+ /* Number of CPUs */
+ unsigned int num_cpus;
+ /* cascade handler */
+ mpic_cascade_t cascade;
+ void *cascade_data;
+ unsigned int cascade_vec;
+ /* senses array */
+ unsigned char *senses;
+ unsigned int senses_count;
+
+#ifdef CONFIG_MPIC_BROKEN_U3
+ /* The fixup table */
+ struct mpic_irq_fixup *fixups;
+ spinlock_t fixup_lock;
+#endif
+
+ /* The various ioremap'ed bases */
+ volatile u32 __iomem *gregs;
+ volatile u32 __iomem *tmregs;
+ volatile u32 __iomem *cpuregs[MPIC_MAX_CPUS];
+ volatile u32 __iomem *isus[MPIC_MAX_ISU];
+
+ /* link */
+ struct mpic *next;
+};
+
+/* This is the primary controller, only that one has IPIs and
+ * has afinity control. A non-primary MPIC always uses CPU0
+ * registers only
+ */
+#define MPIC_PRIMARY 0x00000001
+/* Set this for a big-endian MPIC */
+#define MPIC_BIG_ENDIAN 0x00000002
+/* Broken U3 MPIC */
+#define MPIC_BROKEN_U3 0x00000004
+/* Broken IPI registers (autodetected) */
+#define MPIC_BROKEN_IPI 0x00000008
+/* MPIC wants a reset */
+#define MPIC_WANTS_RESET 0x00000010
+
+/* Allocate the controller structure and setup the linux irq descs
+ * for the range if interrupts passed in. No HW initialization is
+ * actually performed.
+ *
+ * @phys_addr: physial base address of the MPIC
+ * @flags: flags, see constants above
+ * @isu_size: number of interrupts in an ISU. Use 0 to use a
+ * standard ISU-less setup (aka powermac)
+ * @irq_offset: first irq number to assign to this mpic
+ * @irq_count: number of irqs to use with this mpic IRQ sources. Pass 0
+ * to match the number of sources
+ * @ipi_offset: first irq number to assign to this mpic IPI sources,
+ * used only on primary mpic
+ * @senses: array of sense values
+ * @senses_num: number of entries in the array
+ *
+ * Note about the sense array. If none is passed, all interrupts are
+ * setup to be level negative unless MPIC_BROKEN_U3 is set in which
+ * case they are edge positive (and the array is ignored anyway).
+ * The values in the array start at the first source of the MPIC,
+ * that is senses[0] correspond to linux irq "irq_offset".
+ */
+extern struct mpic *mpic_alloc(unsigned long phys_addr,
+ unsigned int flags,
+ unsigned int isu_size,
+ unsigned int irq_offset,
+ unsigned int irq_count,
+ unsigned int ipi_offset,
+ unsigned char *senses,
+ unsigned int senses_num,
+ const char *name);
+
+/* Assign ISUs, to call before mpic_init()
+ *
+ * @mpic: controller structure as returned by mpic_alloc()
+ * @isu_num: ISU number
+ * @phys_addr: physical address of the ISU
+ */
+extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
+ unsigned long phys_addr);
+
+/* Initialize the controller. After this has been called, none of the above
+ * should be called again for this mpic
+ */
+extern void mpic_init(struct mpic *mpic);
+
+/* Setup a cascade. Currently, only one cascade is supported this
+ * way, though you can always do a normal request_irq() and add
+ * other cascades this way. You should call this _after_ having
+ * added all the ISUs
+ *
+ * @irq_no: "linux" irq number of the cascade (that is offset'ed vector)
+ * @handler: cascade handler function
+ */
+extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder,
+ void *data);
+
+/*
+ * All of the following functions must only be used after the
+ * ISUs have been assigned and the controller fully initialized
+ * with mpic_init()
+ */
+
+
+/* Change/Read the priority of an interrupt. Default is 8 for irqs and
+ * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the
+ * IPI number is then the offset'ed (linux irq number mapped to the IPI)
+ */
+extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri);
+extern unsigned int mpic_irq_get_priority(unsigned int irq);
+
+/* Setup a non-boot CPU */
+extern void mpic_setup_this_cpu(void);
+
+/* Clean up for kexec (or cpu offline or ...) */
+extern void mpic_teardown_this_cpu(int secondary);
+
+/* Get the current cpu priority for this cpu (0..15) */
+extern int mpic_cpu_get_priority(void);
+
+/* Set the current cpu priority for this cpu */
+extern void mpic_cpu_set_priority(int prio);
+
+/* Request IPIs on primary mpic */
+extern void mpic_request_ipis(void);
+
+/* Send an IPI (non offseted number 0..3) */
+extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
+
+/* Send a message (IPI) to a given target (cpu number or MSG_*) */
+void smp_mpic_message_pass(int target, int msg);
+
+/* Fetch interrupt from a given mpic */
+extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
+/* This one gets to the primary mpic */
+extern int mpic_get_irq(struct pt_regs *regs);
+
+/* global mpic for pSeries */
+extern struct mpic *pSeries_mpic;
+
+#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/include/asm-powerpc/numnodes.h b/include/asm-powerpc/numnodes.h
new file mode 100644
index 00000000000..795533aca09
--- /dev/null
+++ b/include/asm-powerpc/numnodes.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_POWERPC_MAX_NUMNODES_H
+#define _ASM_POWERPC_MAX_NUMNODES_H
+
+/* Max 16 Nodes */
+#define NODES_SHIFT 4
+
+#endif /* _ASM_POWERPC_MAX_NUMNODES_H */
diff --git a/include/asm-powerpc/of_device.h b/include/asm-powerpc/of_device.h
new file mode 100644
index 00000000000..ddb16aae0bd
--- /dev/null
+++ b/include/asm-powerpc/of_device.h
@@ -0,0 +1,64 @@
+#ifndef _ASM_POWERPC_OF_DEVICE_H
+#define _ASM_POWERPC_OF_DEVICE_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <asm/prom.h>
+
+/*
+ * The of_platform_bus_type is a bus type used by drivers that do not
+ * attach to a macio or similar bus but still use OF probing
+ * mecanism
+ */
+extern struct bus_type of_platform_bus_type;
+
+/*
+ * The of_device is a kind of "base class" that is a superset of
+ * struct device for use by devices attached to an OF node and
+ * probed using OF properties
+ */
+struct of_device
+{
+ struct device_node *node; /* OF device node */
+ u64 dma_mask; /* DMA mask */
+ struct device dev; /* Generic device interface */
+};
+#define to_of_device(d) container_of(d, struct of_device, dev)
+
+extern const struct of_device_id *of_match_device(
+ const struct of_device_id *matches, const struct of_device *dev);
+
+extern struct of_device *of_dev_get(struct of_device *dev);
+extern void of_dev_put(struct of_device *dev);
+
+/*
+ * An of_platform_driver driver is attached to a basic of_device on
+ * the "platform bus" (of_platform_bus_type)
+ */
+struct of_platform_driver
+{
+ char *name;
+ struct of_device_id *match_table;
+ struct module *owner;
+
+ int (*probe)(struct of_device* dev, const struct of_device_id *match);
+ int (*remove)(struct of_device* dev);
+
+ int (*suspend)(struct of_device* dev, pm_message_t state);
+ int (*resume)(struct of_device* dev);
+ int (*shutdown)(struct of_device* dev);
+
+ struct device_driver driver;
+};
+#define to_of_platform_driver(drv) container_of(drv,struct of_platform_driver, driver)
+
+extern int of_register_driver(struct of_platform_driver *drv);
+extern void of_unregister_driver(struct of_platform_driver *drv);
+extern int of_device_register(struct of_device *ofdev);
+extern void of_device_unregister(struct of_device *ofdev);
+extern struct of_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent);
+extern void of_release_dev(struct device *dev);
+
+#endif /* _ASM_POWERPC_OF_DEVICE_H */
diff --git a/include/asm-powerpc/ohare.h b/include/asm-powerpc/ohare.h
new file mode 100644
index 00000000000..023b5977223
--- /dev/null
+++ b/include/asm-powerpc/ohare.h
@@ -0,0 +1,48 @@
+/*
+ * ohare.h: definitions for using the "O'Hare" I/O controller chip.
+ *
+ * Copyright (C) 1997 Paul Mackerras.
+ *
+ * BenH: Changed to match those of heathrow (but not all of them). Please
+ * check if I didn't break anything (especially the media bay).
+ */
+
+/* offset from ohare base for feature control register */
+#define OHARE_MBCR 0x34
+#define OHARE_FCR 0x38
+
+/*
+ * Bits in feature control register.
+ * These were mostly derived by experiment on a powerbook 3400
+ * and may differ for other machines.
+ */
+#define OH_SCC_RESET 1
+#define OH_BAY_POWER_N 2 /* a guess */
+#define OH_BAY_PCI_ENABLE 4 /* a guess */
+#define OH_BAY_IDE_ENABLE 8
+#define OH_BAY_FLOPPY_ENABLE 0x10
+#define OH_IDE0_ENABLE 0x20
+#define OH_IDE0_RESET_N 0x40 /* a guess */
+#define OH_BAY_DEV_MASK 0x1c
+#define OH_BAY_RESET_N 0x80
+#define OH_IOBUS_ENABLE 0x100 /* IOBUS seems to be IDE */
+#define OH_SCC_ENABLE 0x200
+#define OH_MESH_ENABLE 0x400
+#define OH_FLOPPY_ENABLE 0x800
+#define OH_SCCA_IO 0x4000
+#define OH_SCCB_IO 0x8000
+#define OH_VIA_ENABLE 0x10000 /* Is apparently wrong, to be verified */
+#define OH_IDE1_RESET_N 0x800000
+
+/*
+ * Bits to set in the feature control register on PowerBooks.
+ */
+#define PBOOK_FEATURES (OH_IDE_ENABLE | OH_SCC_ENABLE | \
+ OH_MESH_ENABLE | OH_SCCA_IO | OH_SCCB_IO)
+
+/*
+ * A magic value to put into the feature control register of the
+ * "ohare" I/O controller on Starmaxes to enable the IDE CD interface.
+ * Contributed by Harry Eaton.
+ */
+#define STARMAX_FEATURES 0xbeff7a
diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h
new file mode 100644
index 00000000000..8013cd273ce
--- /dev/null
+++ b/include/asm-powerpc/oprofile_impl.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * Based on alpha version.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
+#define _ASM_POWERPC_OPROFILE_IMPL_H
+
+#define OP_MAX_COUNTER 8
+
+/* Per-counter configuration as set via oprofilefs. */
+struct op_counter_config {
+#ifdef __powerpc64__
+ unsigned long valid;
+#endif
+ unsigned long enabled;
+ unsigned long event;
+ unsigned long count;
+ unsigned long kernel;
+#ifdef __powerpc64__
+ /* We dont support per counter user/kernel selection */
+#endif
+ unsigned long user;
+ unsigned long unit_mask;
+};
+
+/* System-wide configuration as set via oprofilefs. */
+struct op_system_config {
+#ifdef __powerpc64__
+ unsigned long mmcr0;
+ unsigned long mmcr1;
+ unsigned long mmcra;
+#endif
+ unsigned long enable_kernel;
+ unsigned long enable_user;
+#ifdef __powerpc64__
+ unsigned long backtrace_spinlocks;
+#endif
+};
+
+/* Per-arch configuration */
+struct op_powerpc_model {
+ void (*reg_setup) (struct op_counter_config *,
+ struct op_system_config *,
+ int num_counters);
+#ifdef __powerpc64__
+ void (*cpu_setup) (void *);
+#endif
+ void (*start) (struct op_counter_config *);
+ void (*stop) (void);
+ void (*handle_interrupt) (struct pt_regs *,
+ struct op_counter_config *);
+ int num_counters;
+};
+
+#ifdef __powerpc64__
+extern struct op_powerpc_model op_model_rs64;
+extern struct op_powerpc_model op_model_power4;
+
+static inline unsigned int ctr_read(unsigned int i)
+{
+ switch(i) {
+ case 0:
+ return mfspr(SPRN_PMC1);
+ case 1:
+ return mfspr(SPRN_PMC2);
+ case 2:
+ return mfspr(SPRN_PMC3);
+ case 3:
+ return mfspr(SPRN_PMC4);
+ case 4:
+ return mfspr(SPRN_PMC5);
+ case 5:
+ return mfspr(SPRN_PMC6);
+ case 6:
+ return mfspr(SPRN_PMC7);
+ case 7:
+ return mfspr(SPRN_PMC8);
+ default:
+ return 0;
+ }
+}
+
+static inline void ctr_write(unsigned int i, unsigned int val)
+{
+ switch(i) {
+ case 0:
+ mtspr(SPRN_PMC1, val);
+ break;
+ case 1:
+ mtspr(SPRN_PMC2, val);
+ break;
+ case 2:
+ mtspr(SPRN_PMC3, val);
+ break;
+ case 3:
+ mtspr(SPRN_PMC4, val);
+ break;
+ case 4:
+ mtspr(SPRN_PMC5, val);
+ break;
+ case 5:
+ mtspr(SPRN_PMC6, val);
+ break;
+ case 6:
+ mtspr(SPRN_PMC7, val);
+ break;
+ case 7:
+ mtspr(SPRN_PMC8, val);
+ break;
+ default:
+ break;
+ }
+}
+#endif /* __powerpc64__ */
+
+#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/include/asm-powerpc/pSeries_reconfig.h b/include/asm-powerpc/pSeries_reconfig.h
new file mode 100644
index 00000000000..c0db1ea7f7d
--- /dev/null
+++ b/include/asm-powerpc/pSeries_reconfig.h
@@ -0,0 +1,25 @@
+#ifndef _PPC64_PSERIES_RECONFIG_H
+#define _PPC64_PSERIES_RECONFIG_H
+
+#include <linux/notifier.h>
+
+/*
+ * Use this API if your code needs to know about OF device nodes being
+ * added or removed on pSeries systems.
+ */
+
+#define PSERIES_RECONFIG_ADD 0x0001
+#define PSERIES_RECONFIG_REMOVE 0x0002
+
+#ifdef CONFIG_PPC_PSERIES
+extern int pSeries_reconfig_notifier_register(struct notifier_block *);
+extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
+#else /* !CONFIG_PPC_PSERIES */
+static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { }
+#endif /* CONFIG_PPC_PSERIES */
+
+#endif /* _PPC64_PSERIES_RECONFIG_H */
diff --git a/include/asm-powerpc/parport.h b/include/asm-powerpc/parport.h
new file mode 100644
index 00000000000..d86b410a6f8
--- /dev/null
+++ b/include/asm-powerpc/parport.h
@@ -0,0 +1,18 @@
+/*
+ * parport.h: platform-specific PC-style parport initialisation
+ *
+ * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
+ *
+ * This file should only be included by drivers/parport/parport_pc.c.
+ */
+
+#ifndef _ASM_POWERPC_PARPORT_H
+#define _ASM_POWERPC_PARPORT_H
+
+static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
+{
+ return parport_pc_find_isa_ports (autoirq, autodma);
+}
+
+#endif /* !(_ASM_POWERPC_PARPORT_H) */
diff --git a/include/asm-powerpc/pmac_feature.h b/include/asm-powerpc/pmac_feature.h
new file mode 100644
index 00000000000..e9683bcff19
--- /dev/null
+++ b/include/asm-powerpc/pmac_feature.h
@@ -0,0 +1,380 @@
+/*
+ * Definition of platform feature hooks for PowerMacs
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Paul Mackerras &
+ * Ben. Herrenschmidt.
+ *
+ *
+ * Note: I removed media-bay details from the feature stuff, I believe it's
+ * not worth it, the media-bay driver can directly use the mac-io
+ * ASIC registers.
+ *
+ * Implementation note: Currently, none of these functions will block.
+ * However, they may internally protect themselves with a spinlock
+ * for way too long. Be prepared for at least some of these to block
+ * in the future.
+ *
+ * Unless specifically defined, the result code is assumed to be an
+ * error when negative, 0 is the default success result. Some functions
+ * may return additional positive result values.
+ *
+ * To keep implementation simple, all feature calls are assumed to have
+ * the prototype parameters (struct device_node* node, int value).
+ * When either is not used, pass 0.
+ */
+
+#ifdef __KERNEL__
+#ifndef __PPC_ASM_PMAC_FEATURE_H
+#define __PPC_ASM_PMAC_FEATURE_H
+
+#include <asm/macio.h>
+#include <asm/machdep.h>
+
+/*
+ * Known Mac motherboard models
+ *
+ * Please, report any error here to benh@kernel.crashing.org, thanks !
+ *
+ * Note that I don't fully maintain this list for Core99 & MacRISC2
+ * and I'm considering removing all NewWorld entries from it and
+ * entirely rely on the model string.
+ */
+
+/* PowerSurge are the first generation of PCI Pmacs. This include
+ * all of the Grand-Central based machines. We currently don't
+ * differenciate most of them.
+ */
+#define PMAC_TYPE_PSURGE 0x10 /* PowerSurge */
+#define PMAC_TYPE_ANS 0x11 /* Apple Network Server */
+
+/* Here is the infamous serie of OHare based machines
+ */
+#define PMAC_TYPE_COMET 0x20 /* Beleived to be PowerBook 2400 */
+#define PMAC_TYPE_HOOPER 0x21 /* Beleived to be PowerBook 3400 */
+#define PMAC_TYPE_KANGA 0x22 /* PowerBook 3500 (first G3) */
+#define PMAC_TYPE_ALCHEMY 0x23 /* Alchemy motherboard base */
+#define PMAC_TYPE_GAZELLE 0x24 /* Spartacus, some 5xxx/6xxx */
+#define PMAC_TYPE_UNKNOWN_OHARE 0x2f /* Unknown, but OHare based */
+
+/* Here are the Heathrow based machines
+ * FIXME: Differenciate wallstreet,mainstreet,wallstreetII
+ */
+#define PMAC_TYPE_GOSSAMER 0x30 /* Gossamer motherboard */
+#define PMAC_TYPE_SILK 0x31 /* Desktop PowerMac G3 */
+#define PMAC_TYPE_WALLSTREET 0x32 /* Wallstreet/Mainstreet PowerBook*/
+#define PMAC_TYPE_UNKNOWN_HEATHROW 0x3f /* Unknown but heathrow based */
+
+/* Here are newworld machines based on Paddington (heathrow derivative)
+ */
+#define PMAC_TYPE_101_PBOOK 0x40 /* 101 PowerBook (aka Lombard) */
+#define PMAC_TYPE_ORIG_IMAC 0x41 /* First generation iMac */
+#define PMAC_TYPE_YOSEMITE 0x42 /* B&W G3 */
+#define PMAC_TYPE_YIKES 0x43 /* Yikes G4 (PCI graphics) */
+#define PMAC_TYPE_UNKNOWN_PADDINGTON 0x4f /* Unknown but paddington based */
+
+/* Core99 machines based on UniNorth 1.0 and 1.5
+ *
+ * Note: A single entry here may cover several actual models according
+ * to the device-tree. (Sawtooth is most tower G4s, FW_IMAC is most
+ * FireWire based iMacs, etc...). Those machines are too similar to be
+ * distinguished here, when they need to be differencied, use the
+ * device-tree "model" or "compatible" property.
+ */
+#define PMAC_TYPE_ORIG_IBOOK 0x40 /* First iBook model (no firewire) */
+#define PMAC_TYPE_SAWTOOTH 0x41 /* Desktop G4s */
+#define PMAC_TYPE_FW_IMAC 0x42 /* FireWire iMacs (except Pangea based) */
+#define PMAC_TYPE_FW_IBOOK 0x43 /* FireWire iBooks (except iBook2) */
+#define PMAC_TYPE_CUBE 0x44 /* Cube PowerMac */
+#define PMAC_TYPE_QUICKSILVER 0x45 /* QuickSilver G4s */
+#define PMAC_TYPE_PISMO 0x46 /* Pismo PowerBook */
+#define PMAC_TYPE_TITANIUM 0x47 /* Titanium PowerBook */
+#define PMAC_TYPE_TITANIUM2 0x48 /* Titanium II PowerBook (no L3, M6) */
+#define PMAC_TYPE_TITANIUM3 0x49 /* Titanium III PowerBook (with L3 & M7) */
+#define PMAC_TYPE_TITANIUM4 0x50 /* Titanium IV PowerBook (with L3 & M9) */
+#define PMAC_TYPE_EMAC 0x50 /* eMac */
+#define PMAC_TYPE_UNKNOWN_CORE99 0x5f
+
+/* MacRisc2 with UniNorth 2.0 */
+#define PMAC_TYPE_RACKMAC 0x80 /* XServe */
+#define PMAC_TYPE_WINDTUNNEL 0x81
+
+/* MacRISC2 machines based on the Pangea chipset
+ */
+#define PMAC_TYPE_PANGEA_IMAC 0x100 /* Flower Power iMac */
+#define PMAC_TYPE_IBOOK2 0x101 /* iBook2 (polycarbonate) */
+#define PMAC_TYPE_FLAT_PANEL_IMAC 0x102 /* Flat panel iMac */
+#define PMAC_TYPE_UNKNOWN_PANGEA 0x10f
+
+/* MacRISC2 machines based on the Intrepid chipset
+ */
+#define PMAC_TYPE_UNKNOWN_INTREPID 0x11f /* Generic */
+
+/* MacRISC4 / G5 machines. We don't have per-machine selection here anymore,
+ * but rather machine families
+ */
+#define PMAC_TYPE_POWERMAC_G5 0x150 /* U3 & U3H based */
+#define PMAC_TYPE_POWERMAC_G5_U3L 0x151 /* U3L based desktop */
+#define PMAC_TYPE_IMAC_G5 0x152 /* iMac G5 */
+#define PMAC_TYPE_XSERVE_G5 0x153 /* Xserve G5 */
+#define PMAC_TYPE_UNKNOWN_K2 0x19f /* Any other K2 based */
+
+/*
+ * Motherboard flags
+ */
+
+#define PMAC_MB_CAN_SLEEP 0x00000001
+#define PMAC_MB_HAS_FW_POWER 0x00000002
+#define PMAC_MB_OLD_CORE99 0x00000004
+#define PMAC_MB_MOBILE 0x00000008
+#define PMAC_MB_MAY_SLEEP 0x00000010
+
+/*
+ * Feature calls supported on pmac
+ *
+ */
+
+/*
+ * Use this inline wrapper
+ */
+struct device_node;
+
+static inline long pmac_call_feature(int selector, struct device_node* node,
+ long param, long value)
+{
+ if (!ppc_md.feature_call)
+ return -ENODEV;
+ return ppc_md.feature_call(selector, node, param, value);
+}
+
+/* PMAC_FTR_SERIAL_ENABLE (struct device_node* node, int param, int value)
+ * enable/disable an SCC side. Pass the node corresponding to the
+ * channel side as a parameter.
+ * param is the type of port
+ * if param is ored with PMAC_SCC_FLAG_XMON, then the SCC is locked enabled
+ * for use by xmon.
+ */
+#define PMAC_FTR_SCC_ENABLE PMAC_FTR_DEF(0)
+ #define PMAC_SCC_ASYNC 0
+ #define PMAC_SCC_IRDA 1
+ #define PMAC_SCC_I2S1 2
+ #define PMAC_SCC_FLAG_XMON 0x00001000
+
+/* PMAC_FTR_MODEM_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the internal modem.
+ */
+#define PMAC_FTR_MODEM_ENABLE PMAC_FTR_DEF(1)
+
+/* PMAC_FTR_SWIM3_ENABLE (struct device_node* node, 0,int value)
+ * enable/disable the swim3 (floppy) cell of a mac-io ASIC
+ */
+#define PMAC_FTR_SWIM3_ENABLE PMAC_FTR_DEF(2)
+
+/* PMAC_FTR_MESH_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the mesh (scsi) cell of a mac-io ASIC
+ */
+#define PMAC_FTR_MESH_ENABLE PMAC_FTR_DEF(3)
+
+/* PMAC_FTR_IDE_ENABLE (struct device_node* node, int busID, int value)
+ * enable/disable an IDE port of a mac-io ASIC
+ * pass the busID parameter
+ */
+#define PMAC_FTR_IDE_ENABLE PMAC_FTR_DEF(4)
+
+/* PMAC_FTR_IDE_RESET (struct device_node* node, int busID, int value)
+ * assert(1)/release(0) an IDE reset line (mac-io IDE only)
+ */
+#define PMAC_FTR_IDE_RESET PMAC_FTR_DEF(5)
+
+/* PMAC_FTR_BMAC_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the bmac (ethernet) cell of a mac-io ASIC, also drive
+ * it's reset line
+ */
+#define PMAC_FTR_BMAC_ENABLE PMAC_FTR_DEF(6)
+
+/* PMAC_FTR_GMAC_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the gmac (ethernet) cell of an uninorth ASIC. This
+ * control the cell's clock.
+ */
+#define PMAC_FTR_GMAC_ENABLE PMAC_FTR_DEF(7)
+
+/* PMAC_FTR_GMAC_PHY_RESET (struct device_node* node, 0, 0)
+ * Perform a HW reset of the PHY connected to a gmac controller.
+ * Pass the gmac device node, not the PHY node.
+ */
+#define PMAC_FTR_GMAC_PHY_RESET PMAC_FTR_DEF(8)
+
+/* PMAC_FTR_SOUND_CHIP_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the sound chip, whatever it is and provided it can
+ * acually be controlled
+ */
+#define PMAC_FTR_SOUND_CHIP_ENABLE PMAC_FTR_DEF(9)
+
+/* -- add various tweaks related to sound routing -- */
+
+/* PMAC_FTR_AIRPORT_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the airport card
+ */
+#define PMAC_FTR_AIRPORT_ENABLE PMAC_FTR_DEF(10)
+
+/* PMAC_FTR_RESET_CPU (NULL, int cpu_nr, 0)
+ * toggle the reset line of a CPU on an uninorth-based SMP machine
+ */
+#define PMAC_FTR_RESET_CPU PMAC_FTR_DEF(11)
+
+/* PMAC_FTR_USB_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable an USB cell, along with the power of the USB "pad"
+ * on keylargo based machines
+ */
+#define PMAC_FTR_USB_ENABLE PMAC_FTR_DEF(12)
+
+/* PMAC_FTR_1394_ENABLE (struct device_node* node, 0, int value)
+ * enable/disable the firewire cell of an uninorth ASIC.
+ */
+#define PMAC_FTR_1394_ENABLE PMAC_FTR_DEF(13)
+
+/* PMAC_FTR_1394_CABLE_POWER (struct device_node* node, 0, int value)
+ * enable/disable the firewire cable power supply of the uninorth
+ * firewire cell
+ */
+#define PMAC_FTR_1394_CABLE_POWER PMAC_FTR_DEF(14)
+
+/* PMAC_FTR_SLEEP_STATE (struct device_node* node, 0, int value)
+ * set the sleep state of the motherboard.
+ *
+ * Pass -1 as value to query for sleep capability
+ * Pass 1 to set IOs to sleep
+ * Pass 0 to set IOs to wake
+ */
+#define PMAC_FTR_SLEEP_STATE PMAC_FTR_DEF(15)
+
+/* PMAC_FTR_GET_MB_INFO (NULL, selector, 0)
+ *
+ * returns some motherboard infos.
+ * selector: 0 - model id
+ * 1 - model flags (capabilities)
+ * 2 - model name (cast to const char *)
+ */
+#define PMAC_FTR_GET_MB_INFO PMAC_FTR_DEF(16)
+#define PMAC_MB_INFO_MODEL 0
+#define PMAC_MB_INFO_FLAGS 1
+#define PMAC_MB_INFO_NAME 2
+
+/* PMAC_FTR_READ_GPIO (NULL, int index, 0)
+ *
+ * read a GPIO from a mac-io controller of type KeyLargo or Pangea.
+ * the value returned is a byte (positive), or a negative error code
+ */
+#define PMAC_FTR_READ_GPIO PMAC_FTR_DEF(17)
+
+/* PMAC_FTR_WRITE_GPIO (NULL, int index, int value)
+ *
+ * write a GPIO of a mac-io controller of type KeyLargo or Pangea.
+ */
+#define PMAC_FTR_WRITE_GPIO PMAC_FTR_DEF(18)
+
+/* PMAC_FTR_ENABLE_MPIC
+ *
+ * Enable the MPIC cell
+ */
+#define PMAC_FTR_ENABLE_MPIC PMAC_FTR_DEF(19)
+
+/* PMAC_FTR_AACK_DELAY_ENABLE (NULL, int enable, 0)
+ *
+ * Enable/disable the AACK delay on the northbridge for systems using DFS
+ */
+#define PMAC_FTR_AACK_DELAY_ENABLE PMAC_FTR_DEF(20)
+
+/* PMAC_FTR_DEVICE_CAN_WAKE
+ *
+ * Used by video drivers to inform system that they can actually perform
+ * wakeup from sleep
+ */
+#define PMAC_FTR_DEVICE_CAN_WAKE PMAC_FTR_DEF(22)
+
+
+/* Don't use those directly, they are for the sake of pmac_setup.c */
+extern long pmac_do_feature_call(unsigned int selector, ...);
+extern void pmac_feature_init(void);
+
+/* Video suspend tweak */
+extern void pmac_set_early_video_resume(void (*proc)(void *data), void *data);
+extern void pmac_call_early_video_resume(void);
+
+#define PMAC_FTR_DEF(x) ((_MACH_Pmac << 16) | (x))
+
+/* The AGP driver registers itself here */
+extern void pmac_register_agp_pm(struct pci_dev *bridge,
+ int (*suspend)(struct pci_dev *bridge),
+ int (*resume)(struct pci_dev *bridge));
+
+/* Those are meant to be used by video drivers to deal with AGP
+ * suspend resume properly
+ */
+extern void pmac_suspend_agp_for_card(struct pci_dev *dev);
+extern void pmac_resume_agp_for_card(struct pci_dev *dev);
+
+/* Used by the via-pmu driver for suspend/resume
+ */
+extern void pmac_tweak_clock_spreading(int enable);
+
+/*
+ * The part below is for use by macio_asic.c only, do not rely
+ * on the data structures or constants below in a normal driver
+ *
+ */
+
+#define MAX_MACIO_CHIPS 2
+
+enum {
+ macio_unknown = 0,
+ macio_grand_central,
+ macio_ohare,
+ macio_ohareII,
+ macio_heathrow,
+ macio_gatwick,
+ macio_paddington,
+ macio_keylargo,
+ macio_pangea,
+ macio_intrepid,
+ macio_keylargo2,
+};
+
+struct macio_chip
+{
+ struct device_node *of_node;
+ int type;
+ const char *name;
+ int rev;
+ volatile u32 __iomem *base;
+ unsigned long flags;
+
+ /* For use by macio_asic PCI driver */
+ struct macio_bus lbus;
+};
+
+extern struct macio_chip macio_chips[MAX_MACIO_CHIPS];
+
+#define MACIO_FLAG_SCCA_ON 0x00000001
+#define MACIO_FLAG_SCCB_ON 0x00000002
+#define MACIO_FLAG_SCC_LOCKED 0x00000004
+#define MACIO_FLAG_AIRPORT_ON 0x00000010
+#define MACIO_FLAG_FW_SUPPORTED 0x00000020
+
+extern struct macio_chip* macio_find(struct device_node* child, int type);
+
+#define MACIO_FCR32(macio, r) ((macio)->base + ((r) >> 2))
+#define MACIO_FCR8(macio, r) (((volatile u8 __iomem *)((macio)->base)) + (r))
+
+#define MACIO_IN32(r) (in_le32(MACIO_FCR32(macio,r)))
+#define MACIO_OUT32(r,v) (out_le32(MACIO_FCR32(macio,r), (v)))
+#define MACIO_BIS(r,v) (MACIO_OUT32((r), MACIO_IN32(r) | (v)))
+#define MACIO_BIC(r,v) (MACIO_OUT32((r), MACIO_IN32(r) & ~(v)))
+#define MACIO_IN8(r) (in_8(MACIO_FCR8(macio,r)))
+#define MACIO_OUT8(r,v) (out_8(MACIO_FCR8(macio,r), (v)))
+
+#endif /* __PPC_ASM_PMAC_FEATURE_H */
+#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/pmac_low_i2c.h b/include/asm-powerpc/pmac_low_i2c.h
new file mode 100644
index 00000000000..809a5963d5e
--- /dev/null
+++ b/include/asm-powerpc/pmac_low_i2c.h
@@ -0,0 +1,43 @@
+/*
+ * include/asm-ppc/pmac_low_i2c.h
+ *
+ * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef __PMAC_LOW_I2C_H__
+#define __PMAC_LOW_I2C_H__
+
+/* i2c mode (based on the platform functions format) */
+enum {
+ pmac_low_i2c_mode_dumb = 1,
+ pmac_low_i2c_mode_std = 2,
+ pmac_low_i2c_mode_stdsub = 3,
+ pmac_low_i2c_mode_combined = 4,
+};
+
+/* RW bit in address */
+enum {
+ pmac_low_i2c_read = 0x01,
+ pmac_low_i2c_write = 0x00
+};
+
+/* Init, called early during boot */
+extern void pmac_init_low_i2c(void);
+
+/* Locking functions exposed to i2c-keywest */
+int pmac_low_i2c_lock(struct device_node *np);
+int pmac_low_i2c_unlock(struct device_node *np);
+
+/* Access functions for platform code */
+int pmac_low_i2c_open(struct device_node *np, int channel);
+int pmac_low_i2c_close(struct device_node *np);
+int pmac_low_i2c_setmode(struct device_node *np, int mode);
+int pmac_low_i2c_xfer(struct device_node *np, u8 addrdir, u8 subaddr, u8 *data, int len);
+
+
+#endif /* __PMAC_LOW_I2C_H__ */
diff --git a/include/asm-powerpc/pmc.h b/include/asm-powerpc/pmc.h
new file mode 100644
index 00000000000..2f3c3fc2b79
--- /dev/null
+++ b/include/asm-powerpc/pmc.h
@@ -0,0 +1,46 @@
+/*
+ * pmc.h
+ * Copyright (C) 2004 David Gibson, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _POWERPC_PMC_H
+#define _POWERPC_PMC_H
+
+#include <asm/ptrace.h>
+
+typedef void (*perf_irq_t)(struct pt_regs *);
+
+int reserve_pmc_hardware(perf_irq_t new_perf_irq);
+void release_pmc_hardware(void);
+
+#ifdef CONFIG_PPC64
+void power4_enable_pmcs(void);
+#endif
+
+#ifdef CONFIG_FSL_BOOKE
+void init_pmc_stop(int ctr);
+void set_pmc_event(int ctr, int event);
+void set_pmc_user_kernel(int ctr, int user, int kernel);
+void set_pmc_marked(int ctr, int mark0, int mark1);
+void pmc_start_ctr(int ctr, int enable);
+void pmc_start_ctrs(int enable);
+void pmc_stop_ctrs(void);
+void dump_pmcs(void);
+
+extern struct op_powerpc_model op_model_fsl_booke;
+#endif
+
+#endif /* _POWERPC_PMC_H */
diff --git a/include/asm-powerpc/posix_types.h b/include/asm-powerpc/posix_types.h
new file mode 100644
index 00000000000..c6391077224
--- /dev/null
+++ b/include/asm-powerpc/posix_types.h
@@ -0,0 +1,129 @@
+#ifndef _ASM_POWERPC_POSIX_TYPES_H
+#define _ASM_POWERPC_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long __kernel_ino_t;
+typedef unsigned int __kernel_mode_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+typedef long __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef long __kernel_suseconds_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+typedef unsigned int __kernel_old_uid_t;
+typedef unsigned int __kernel_old_gid_t;
+
+#ifdef __powerpc64__
+typedef unsigned long __kernel_nlink_t;
+typedef int __kernel_ipc_pid_t;
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef unsigned long __kernel_old_dev_t;
+#else
+typedef unsigned short __kernel_nlink_t;
+typedef short __kernel_ipc_pid_t;
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef unsigned int __kernel_old_dev_t;
+#endif
+
+#ifdef __powerpc64__
+typedef long long __kernel_loff_t;
+#else
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+#endif
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+#ifndef __GNUC__
+
+#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
+#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
+#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
+#define __FD_ZERO(set) \
+ ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
+
+#else /* __GNUC__ */
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) \
+ || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 0)
+/* With GNU C, use inline functions instead so args are evaluated only once: */
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+{
+ unsigned long *tmp = (unsigned long *)p->fds_bits;
+ int i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 16:
+ tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
+ tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
+
+ case 8:
+ tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
+
+ case 4:
+ tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
+ return;
+ }
+ }
+ i = __FDSET_LONGS;
+ while (i) {
+ i--;
+ *tmp = 0;
+ tmp++;
+ }
+}
+
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+#endif /* __GNUC__ */
+#endif /* _ASM_POWERPC_POSIX_TYPES_H */
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
new file mode 100644
index 00000000000..a88728fba8f
--- /dev/null
+++ b/include/asm-powerpc/ppc-pci.h
@@ -0,0 +1,54 @@
+/*
+ * c 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_PPC_PCI_H
+#define _ASM_POWERPC_PPC_PCI_H
+
+#include <linux/pci.h>
+#include <asm/pci-bridge.h>
+
+extern unsigned long isa_io_base;
+
+extern void pci_setup_pci_controller(struct pci_controller *hose);
+extern void pci_setup_phb_io(struct pci_controller *hose, int primary);
+extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
+
+
+extern struct list_head hose_list;
+extern int global_phb_number;
+
+extern unsigned long find_and_init_phbs(void);
+
+extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */
+
+/* PCI device_node operations */
+struct device_node;
+typedef void *(*traverse_func)(struct device_node *me, void *data);
+void *traverse_pci_devices(struct device_node *start, traverse_func pre,
+ void *data);
+
+void pci_devs_phb_init(void);
+void pci_devs_phb_init_dynamic(struct pci_controller *phb);
+
+/* PCI address cache management routines */
+void pci_addr_cache_insert_device(struct pci_dev *dev);
+void pci_addr_cache_remove_device(struct pci_dev *dev);
+
+/* From rtas_pci.h */
+void init_pci_config_tokens (void);
+unsigned long get_phb_buid (struct device_node *);
+
+/* From pSeries_pci.h */
+extern void pSeries_final_fixup(void);
+extern void pSeries_irq_bus_setup(struct pci_bus *bus);
+
+extern unsigned long pci_probe_only;
+extern unsigned long pci_assign_all_buses;
+extern int pci_read_irq_line(struct pci_dev *pci_dev);
+
+#endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
new file mode 100644
index 00000000000..c534ca41224
--- /dev/null
+++ b/include/asm-powerpc/ppc_asm.h
@@ -0,0 +1,518 @@
+/*
+ * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
+ */
+#ifndef _ASM_POWERPC_PPC_ASM_H
+#define _ASM_POWERPC_PPC_ASM_H
+
+#include <linux/stringify.h>
+#include <linux/config.h>
+
+#ifdef __ASSEMBLY__
+
+/*
+ * Macros for storing registers into and loading registers from
+ * exception frames.
+ */
+#ifdef __powerpc64__
+#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
+#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
+#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
+#else
+#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
+#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
+#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
+ SAVE_10GPRS(22, base)
+#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); \
+ REST_10GPRS(22, base)
+#endif
+
+
+#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
+#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
+#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
+#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
+#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
+#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
+#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
+#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+
+#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
+#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
+#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
+#define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
+#define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
+#define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
+#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*(n)(base)
+#define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base)
+#define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base)
+#define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base)
+#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
+#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
+
+#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,b,base
+#define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
+#define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
+#define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
+#define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
+#define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
+#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,b,base
+#define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
+#define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
+#define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
+#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
+#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
+
+#define SAVE_EVR(n,s,base) evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
+#define SAVE_2EVRS(n,s,base) SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
+#define SAVE_4EVRS(n,s,base) SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
+#define SAVE_8EVRS(n,s,base) SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
+#define SAVE_16EVRS(n,s,base) SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
+#define SAVE_32EVRS(n,s,base) SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
+#define REST_EVR(n,s,base) lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
+#define REST_2EVRS(n,s,base) REST_EVR(n,s,base); REST_EVR(n+1,s,base)
+#define REST_4EVRS(n,s,base) REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
+#define REST_8EVRS(n,s,base) REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
+#define REST_16EVRS(n,s,base) REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
+#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
+
+/* Macros to adjust thread priority for hardware multithreading */
+#define HMT_VERY_LOW or 31,31,31 # very low priority
+#define HMT_LOW or 1,1,1
+#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority
+#define HMT_MEDIUM or 2,2,2
+#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority
+#define HMT_HIGH or 3,3,3
+
+/* handle instructions that older assemblers may not know */
+#define RFCI .long 0x4c000066 /* rfci instruction */
+#define RFDI .long 0x4c00004e /* rfdi instruction */
+#define RFMCI .long 0x4c00004c /* rfmci instruction */
+
+#ifdef CONFIG_PPC64
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+#define _GLOBAL(name) \
+ .section ".text"; \
+ .align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _KPROBE(name) \
+ .section ".kprobes.text","a"; \
+ .align 2 ; \
+ .globl name; \
+ .globl GLUE(.,name); \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#define _STATIC(name) \
+ .section ".text"; \
+ .align 2 ; \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+GLUE(.,name):
+
+#else /* 32-bit */
+
+#define _GLOBAL(n) \
+ .text; \
+ .stabs __stringify(n:F-1),N_FUN,0,0,n;\
+ .globl n; \
+n:
+
+#define _KPROBE(n) \
+ .section ".kprobes.text","a"; \
+ .globl n; \
+n:
+
+#endif
+
+/*
+ * LOADADDR( rn, name )
+ * loads the address of 'name' into 'rn'
+ *
+ * LOADBASE( rn, name )
+ * loads the address (possibly without the low 16 bits) of 'name' into 'rn'
+ * suitable for base+disp addressing
+ */
+#ifdef __powerpc64__
+#define LOADADDR(rn,name) \
+ lis rn,name##@highest; \
+ ori rn,rn,name##@higher; \
+ rldicr rn,rn,32,31; \
+ oris rn,rn,name##@h; \
+ ori rn,rn,name##@l
+
+#define LOADBASE(rn,name) \
+ ld rn,name@got(r2)
+
+#define OFF(name) 0
+
+#define SET_REG_TO_CONST(reg, value) \
+ lis reg,(((value)>>48)&0xFFFF); \
+ ori reg,reg,(((value)>>32)&0xFFFF); \
+ rldicr reg,reg,32,31; \
+ oris reg,reg,(((value)>>16)&0xFFFF); \
+ ori reg,reg,((value)&0xFFFF);
+
+#define SET_REG_TO_LABEL(reg, label) \
+ lis reg,(label)@highest; \
+ ori reg,reg,(label)@higher; \
+ rldicr reg,reg,32,31; \
+ oris reg,reg,(label)@h; \
+ ori reg,reg,(label)@l;
+
+/* operations for longs and pointers */
+#define LDL ld
+#define STL std
+#define CMPI cmpdi
+#define SZL 8
+
+/* offsets for stack frame layout */
+#define LRSAVE 16
+
+#else /* 32-bit */
+#define LOADADDR(rn,name) \
+ lis rn,name@ha; \
+ addi rn,rn,name@l
+
+#define LOADBASE(rn,name) \
+ lis rn,name@ha
+
+#define OFF(name) name@l
+
+/* operations for longs and pointers */
+#define LDL lwz
+#define STL stw
+#define CMPI cmpwi
+#define SZL 4
+
+/* offsets for stack frame layout */
+#define LRSAVE 4
+
+#endif
+
+/* various errata or part fixups */
+#ifdef CONFIG_PPC601_SYNC_FIX
+#define SYNC \
+BEGIN_FTR_SECTION \
+ sync; \
+ isync; \
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#define SYNC_601 \
+BEGIN_FTR_SECTION \
+ sync; \
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#define ISYNC_601 \
+BEGIN_FTR_SECTION \
+ isync; \
+END_FTR_SECTION_IFSET(CPU_FTR_601)
+#else
+#define SYNC
+#define SYNC_601
+#define ISYNC_601
+#endif
+
+
+#ifndef CONFIG_SMP
+#define TLBSYNC
+#else /* CONFIG_SMP */
+/* tlbsync is not implemented on 601 */
+#define TLBSYNC \
+BEGIN_FTR_SECTION \
+ tlbsync; \
+ sync; \
+END_FTR_SECTION_IFCLR(CPU_FTR_601)
+#endif
+
+
+/*
+ * This instruction is not implemented on the PPC 603 or 601; however, on
+ * the 403GCX and 405GP tlbia IS defined and tlbie is not.
+ * All of these instructions exist in the 8xx, they have magical powers,
+ * and they must be used.
+ */
+
+#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
+#define tlbia \
+ li r4,1024; \
+ mtctr r4; \
+ lis r4,KERNELBASE@h; \
+0: tlbie r4; \
+ addi r4,r4,0x1000; \
+ bdnz 0b
+#endif
+
+
+#ifdef CONFIG_IBM405_ERR77
+#define PPC405_ERR77(ra,rb) dcbt ra, rb;
+#define PPC405_ERR77_SYNC sync;
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+
+
+#ifdef CONFIG_IBM440EP_ERR42
+#define PPC440EP_ERR42 isync
+#else
+#define PPC440EP_ERR42
+#endif
+
+
+#if defined(CONFIG_BOOKE)
+#define toreal(rd)
+#define fromreal(rd)
+
+#define tophys(rd,rs) \
+ addis rd,rs,0
+
+#define tovirt(rd,rs) \
+ addis rd,rs,0
+
+#elif defined(CONFIG_PPC64)
+#define toreal(rd) /* we can access c000... in real mode */
+#define fromreal(rd)
+
+#define tophys(rd,rs) \
+ clrldi rd,rs,2
+
+#define tovirt(rd,rs) \
+ rotldi rd,rs,16; \
+ ori rd,rd,((KERNELBASE>>48)&0xFFFF);\
+ rotldi rd,rd,48
+#else
+/*
+ * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
+ * physical base address of RAM at compile time.
+ */
+#define toreal(rd) tophys(rd,rd)
+#define fromreal(rd) tovirt(rd,rd)
+
+#define tophys(rd,rs) \
+0: addis rd,rs,-KERNELBASE@h; \
+ .section ".vtop_fixup","aw"; \
+ .align 1; \
+ .long 0b; \
+ .previous
+
+#define tovirt(rd,rs) \
+0: addis rd,rs,KERNELBASE@h; \
+ .section ".ptov_fixup","aw"; \
+ .align 1; \
+ .long 0b; \
+ .previous
+#endif
+
+#ifdef CONFIG_PPC64
+#define RFI rfid
+#define MTMSRD(r) mtmsrd r
+
+#else
+#define FIX_SRR1(ra, rb)
+#ifndef CONFIG_40x
+#define RFI rfi
+#else
+#define RFI rfi; b . /* Prevent prefetch past rfi */
+#endif
+#define MTMSRD(r) mtmsr r
+#define CLR_TOP32(r)
+#endif
+
+/* The boring bits... */
+
+/* Condition Register Bit Fields */
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+
+/* General Purpose Registers (GPRs) */
+
+#define r0 0
+#define r1 1
+#define r2 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+
+
+/* Floating Point Registers (FPRs) */
+
+#define fr0 0
+#define fr1 1
+#define fr2 2
+#define fr3 3
+#define fr4 4
+#define fr5 5
+#define fr6 6
+#define fr7 7
+#define fr8 8
+#define fr9 9
+#define fr10 10
+#define fr11 11
+#define fr12 12
+#define fr13 13
+#define fr14 14
+#define fr15 15
+#define fr16 16
+#define fr17 17
+#define fr18 18
+#define fr19 19
+#define fr20 20
+#define fr21 21
+#define fr22 22
+#define fr23 23
+#define fr24 24
+#define fr25 25
+#define fr26 26
+#define fr27 27
+#define fr28 28
+#define fr29 29
+#define fr30 30
+#define fr31 31
+
+/* AltiVec Registers (VPRs) */
+
+#define vr0 0
+#define vr1 1
+#define vr2 2
+#define vr3 3
+#define vr4 4
+#define vr5 5
+#define vr6 6
+#define vr7 7
+#define vr8 8
+#define vr9 9
+#define vr10 10
+#define vr11 11
+#define vr12 12
+#define vr13 13
+#define vr14 14
+#define vr15 15
+#define vr16 16
+#define vr17 17
+#define vr18 18
+#define vr19 19
+#define vr20 20
+#define vr21 21
+#define vr22 22
+#define vr23 23
+#define vr24 24
+#define vr25 25
+#define vr26 26
+#define vr27 27
+#define vr28 28
+#define vr29 29
+#define vr30 30
+#define vr31 31
+
+/* SPE Registers (EVPRs) */
+
+#define evr0 0
+#define evr1 1
+#define evr2 2
+#define evr3 3
+#define evr4 4
+#define evr5 5
+#define evr6 6
+#define evr7 7
+#define evr8 8
+#define evr9 9
+#define evr10 10
+#define evr11 11
+#define evr12 12
+#define evr13 13
+#define evr14 14
+#define evr15 15
+#define evr16 16
+#define evr17 17
+#define evr18 18
+#define evr19 19
+#define evr20 20
+#define evr21 21
+#define evr22 22
+#define evr23 23
+#define evr24 24
+#define evr25 25
+#define evr26 26
+#define evr27 27
+#define evr28 28
+#define evr29 29
+#define evr30 30
+#define evr31 31
+
+/* some stab codes */
+#define N_FUN 36
+#define N_RSYM 64
+#define N_SLINE 68
+#define N_SO 100
+
+#define ASM_CONST(x) x
+#else
+ #define __ASM_CONST(x) x##UL
+ #define ASM_CONST(x) __ASM_CONST(x)
+
+#ifdef CONFIG_PPC64
+#define DATAL ".llong"
+#else
+#define DATAL ".long"
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
new file mode 100644
index 00000000000..1dc4bf7b52b
--- /dev/null
+++ b/include/asm-powerpc/processor.h
@@ -0,0 +1,281 @@
+#ifndef _ASM_POWERPC_PROCESSOR_H
+#define _ASM_POWERPC_PROCESSOR_H
+
+/*
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <asm/reg.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#ifdef CONFIG_PPC64
+#include <asm/systemcfg.h>
+#endif
+
+#ifdef CONFIG_PPC32
+/* 32-bit platform types */
+/* We only need to define a new _MACH_xxx for machines which are part of
+ * a configuration which supports more than one type of different machine.
+ * This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac.
+ * -- Tom
+ */
+#define _MACH_prep 0x00000001
+#define _MACH_Pmac 0x00000002 /* pmac or pmac clone (non-chrp) */
+#define _MACH_chrp 0x00000004 /* chrp machine */
+
+/* see residual.h for these */
+#define _PREP_Motorola 0x01 /* motorola prep */
+#define _PREP_Firm 0x02 /* firmworks prep */
+#define _PREP_IBM 0x00 /* ibm prep */
+#define _PREP_Bull 0x03 /* bull prep */
+
+/* these are arbitrary */
+#define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
+#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
+#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
+
+#ifdef CONFIG_PPC_MULTIPLATFORM
+extern int _machine;
+
+/* what kind of prep workstation we are */
+extern int _prep_type;
+extern int _chrp_type;
+
+/*
+ * This is used to identify the board type from a given PReP board
+ * vendor. Board revision is also made available.
+ */
+extern unsigned char ucSystemType;
+extern unsigned char ucBoardRev;
+extern unsigned char ucBoardRevMaj, ucBoardRevMin;
+#else
+#define _machine 0
+#endif /* CONFIG_PPC_MULTIPLATFORM */
+#endif /* CONFIG_PPC32 */
+
+#ifdef CONFIG_PPC64
+/* Platforms supported by PPC64 */
+#define PLATFORM_PSERIES 0x0100
+#define PLATFORM_PSERIES_LPAR 0x0101
+#define PLATFORM_ISERIES_LPAR 0x0201
+#define PLATFORM_LPAR 0x0001
+#define PLATFORM_POWERMAC 0x0400
+#define PLATFORM_MAPLE 0x0500
+#define PLATFORM_CELL 0x1000
+
+/* Compatibility with drivers coming from PPC32 world */
+#define _machine (systemcfg->platform)
+#define _MACH_Pmac PLATFORM_POWERMAC
+#endif
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+/* Macros for adjusting thread priority (hardware multi-threading) */
+#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
+#define HMT_low() asm volatile("or 1,1,1 # low priority")
+#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
+#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
+#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
+#define HMT_high() asm volatile("or 3,3,3 # high priority")
+
+#ifdef __KERNEL__
+
+extern int have_of;
+
+struct task_struct;
+void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
+void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+extern void prepare_to_copy(struct task_struct *tsk);
+
+/* Create a new kernel thread. */
+extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+/* Lazy FPU handling on uni-processor */
+extern struct task_struct *last_task_used_math;
+extern struct task_struct *last_task_used_altivec;
+extern struct task_struct *last_task_used_spe;
+
+#ifdef CONFIG_PPC32
+#define TASK_SIZE (CONFIG_TASK_SIZE)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3)
+#endif
+
+#ifdef CONFIG_PPC64
+/* 64-bit user address space is 44-bits (16TB user VM) */
+#define TASK_SIZE_USER64 (0x0000100000000000UL)
+
+/*
+ * 32-bit user address space is 4GB - 1 page
+ * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
+ */
+#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
+
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_USER32 : TASK_SIZE_USER64)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
+
+#define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \
+ TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
+#endif
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct {
+ unsigned long ksp; /* Kernel stack pointer */
+#ifdef CONFIG_PPC64
+ unsigned long ksp_vsid;
+#endif
+ struct pt_regs *regs; /* Pointer to saved register state */
+ mm_segment_t fs; /* for get_fs() validation */
+#ifdef CONFIG_PPC32
+ void *pgdir; /* root of page-table tree */
+ signed long last_syscall;
+#endif
+#if defined(CONFIG_4xx) || defined (CONFIG_BOOKE)
+ unsigned long dbcr0; /* debug control register values */
+ unsigned long dbcr1;
+#endif
+ double fpr[32]; /* Complete floating point set */
+ struct { /* fpr ... fpscr must be contiguous */
+
+ unsigned int pad;
+ unsigned int val; /* Floating point status */
+ } fpscr;
+ int fpexc_mode; /* floating-point exception mode */
+#ifdef CONFIG_PPC64
+ unsigned long start_tb; /* Start purr when proc switched in */
+ unsigned long accum_tb; /* Total accumilated purr for process */
+ unsigned long vdso_base; /* base of the vDSO library */
+#endif
+ unsigned long dabr; /* Data address breakpoint register */
+#ifdef CONFIG_ALTIVEC
+ /* Complete AltiVec register set */
+ vector128 vr[32] __attribute((aligned(16)));
+ /* AltiVec status */
+ vector128 vscr __attribute((aligned(16)));
+ unsigned long vrsave;
+ int used_vr; /* set if process has used altivec */
+#endif /* CONFIG_ALTIVEC */
+#ifdef CONFIG_SPE
+ unsigned long evr[32]; /* upper 32-bits of SPE regs */
+ u64 acc; /* Accumulator */
+ unsigned long spefscr; /* SPE & eFP status */
+ int used_spe; /* set if process has used spe */
+#endif /* CONFIG_SPE */
+};
+
+#define ARCH_MIN_TASKALIGN 16
+
+#define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
+
+
+#ifdef CONFIG_PPC32
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .fs = KERNEL_DS, \
+ .pgdir = swapper_pg_dir, \
+ .fpexc_mode = MSR_FE0 | MSR_FE1, \
+}
+#else
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
+ .fs = KERNEL_DS, \
+ .fpr = {0}, \
+ .fpscr = { .val = 0, }, \
+ .fpexc_mode = MSR_FE0|MSR_FE1, \
+}
+#endif
+
+/*
+ * Return saved PC of a blocked thread. For now, this is the "user" PC
+ */
+#define thread_saved_pc(tsk) \
+ ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
+#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
+
+/* Get/set floating-point exception mode */
+#define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
+#define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
+
+extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
+extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
+
+static inline unsigned int __unpack_fe01(unsigned long msr_bits)
+{
+ return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
+}
+
+static inline unsigned long __pack_fe01(unsigned int fpmode)
+{
+ return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
+}
+
+#ifdef CONFIG_PPC64
+#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
+#else
+#define cpu_relax() barrier()
+#endif
+
+/*
+ * Prefetch macros.
+ */
+#define ARCH_HAS_PREFETCH
+#define ARCH_HAS_PREFETCHW
+#define ARCH_HAS_SPINLOCK_PREFETCH
+
+static inline void prefetch(const void *x)
+{
+ if (unlikely(!x))
+ return;
+
+ __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
+}
+
+static inline void prefetchw(const void *x)
+{
+ if (unlikely(!x))
+ return;
+
+ __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
+}
+
+#define spin_lock_prefetch(x) prefetchw(x)
+
+#ifdef CONFIG_PPC64
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
new file mode 100644
index 00000000000..3a0104fa046
--- /dev/null
+++ b/include/asm-powerpc/prom.h
@@ -0,0 +1,219 @@
+#ifndef _POWERPC_PROM_H
+#define _POWERPC_PROM_H
+#ifdef __KERNEL__
+
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh computers.
+ *
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <asm/atomic.h>
+
+/* Definitions used by the flattened device tree */
+#define OF_DT_HEADER 0xd00dfeed /* marker */
+#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
+#define OF_DT_END_NODE 0x2 /* End node */
+#define OF_DT_PROP 0x3 /* Property: name off, size,
+ * content */
+#define OF_DT_NOP 0x4 /* nop */
+#define OF_DT_END 0x9
+
+#define OF_DT_VERSION 0x10
+
+/*
+ * This is what gets passed to the kernel by prom_init or kexec
+ *
+ * The dt struct contains the device tree structure, full pathes and
+ * property contents. The dt strings contain a separate block with just
+ * the strings for the property names, and is fully page aligned and
+ * self contained in a page, so that it can be kept around by the kernel,
+ * each property name appears only once in this page (cheap compression)
+ *
+ * the mem_rsvmap contains a map of reserved ranges of physical memory,
+ * passing it here instead of in the device-tree itself greatly simplifies
+ * the job of everybody. It's just a list of u64 pairs (base/size) that
+ * ends when size is 0
+ */
+struct boot_param_header
+{
+ u32 magic; /* magic word OF_DT_HEADER */
+ u32 totalsize; /* total size of DT block */
+ u32 off_dt_struct; /* offset to structure */
+ u32 off_dt_strings; /* offset to strings */
+ u32 off_mem_rsvmap; /* offset to memory reserve map */
+ u32 version; /* format version */
+ u32 last_comp_version; /* last compatible version */
+ /* version 2 fields below */
+ u32 boot_cpuid_phys; /* Physical CPU id we're booting on */
+ /* version 3 fields below */
+ u32 dt_strings_size; /* size of the DT strings block */
+};
+
+
+
+typedef u32 phandle;
+typedef u32 ihandle;
+
+struct address_range {
+ unsigned long space;
+ unsigned long address;
+ unsigned long size;
+};
+
+struct interrupt_info {
+ int line;
+ int sense; /* +ve/-ve logic, edge or level, etc. */
+};
+
+struct pci_address {
+ u32 a_hi;
+ u32 a_mid;
+ u32 a_lo;
+};
+
+struct isa_address {
+ u32 a_hi;
+ u32 a_lo;
+};
+
+struct isa_range {
+ struct isa_address isa_addr;
+ struct pci_address pci_addr;
+ unsigned int size;
+};
+
+struct reg_property {
+ unsigned long address;
+ unsigned long size;
+};
+
+struct reg_property32 {
+ unsigned int address;
+ unsigned int size;
+};
+
+struct reg_property64 {
+ u64 address;
+ u64 size;
+};
+
+struct property {
+ char *name;
+ int length;
+ unsigned char *value;
+ struct property *next;
+};
+
+struct device_node {
+ char *name;
+ char *type;
+ phandle node;
+ phandle linux_phandle;
+ int n_addrs;
+ struct address_range *addrs;
+ int n_intrs;
+ struct interrupt_info *intrs;
+ char *full_name;
+
+ struct property *properties;
+ struct device_node *parent;
+ struct device_node *child;
+ struct device_node *sibling;
+ struct device_node *next; /* next device of same type */
+ struct device_node *allnext; /* next in list of all nodes */
+ struct proc_dir_entry *pde; /* this node's proc directory */
+ struct kref kref;
+ unsigned long _flags;
+ void *data;
+};
+
+extern struct device_node *of_chosen;
+
+/* flag descriptions */
+#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
+
+#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
+#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
+{
+ dn->pde = de;
+}
+
+
+/* OBSOLETE: Old style node lookup */
+extern struct device_node *find_devices(const char *name);
+extern struct device_node *find_type_devices(const char *type);
+extern struct device_node *find_path_device(const char *path);
+extern struct device_node *find_compatible_devices(const char *type,
+ const char *compat);
+extern struct device_node *find_all_nodes(void);
+
+/* New style node lookup */
+extern struct device_node *of_find_node_by_name(struct device_node *from,
+ const char *name);
+extern struct device_node *of_find_node_by_type(struct device_node *from,
+ const char *type);
+extern struct device_node *of_find_compatible_node(struct device_node *from,
+ const char *type, const char *compat);
+extern struct device_node *of_find_node_by_path(const char *path);
+extern struct device_node *of_find_node_by_phandle(phandle handle);
+extern struct device_node *of_find_all_nodes(struct device_node *prev);
+extern struct device_node *of_get_parent(const struct device_node *node);
+extern struct device_node *of_get_next_child(const struct device_node *node,
+ struct device_node *prev);
+extern struct device_node *of_node_get(struct device_node *node);
+extern void of_node_put(struct device_node *node);
+
+/* For updating the device tree at runtime */
+extern void of_attach_node(struct device_node *);
+extern void of_detach_node(const struct device_node *);
+
+/* Other Prototypes */
+extern void finish_device_tree(void);
+extern void unflatten_device_tree(void);
+extern void early_init_devtree(void *);
+extern int device_is_compatible(struct device_node *device, const char *);
+extern int machine_is_compatible(const char *compat);
+extern unsigned char *get_property(struct device_node *node, const char *name,
+ int *lenp);
+extern void print_properties(struct device_node *node);
+extern int prom_n_addr_cells(struct device_node* np);
+extern int prom_n_size_cells(struct device_node* np);
+extern int prom_n_intr_cells(struct device_node* np);
+extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
+extern void prom_add_property(struct device_node* np, struct property* prop);
+
+#ifdef CONFIG_PPC32
+/*
+ * PCI <-> OF matching functions
+ * (XXX should these be here?)
+ */
+struct pci_bus;
+struct pci_dev;
+extern int pci_device_from_OF_node(struct device_node *node,
+ u8* bus, u8* devfn);
+extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
+extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
+extern void pci_create_OF_bus_map(void);
+#endif
+
+extern struct resource *request_OF_resource(struct device_node* node,
+ int index, const char* name_postfix);
+extern int release_OF_resource(struct device_node* node, int index);
+
+#endif /* __KERNEL__ */
+#endif /* _POWERPC_PROM_H */
diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h
new file mode 100644
index 00000000000..1f7ecdb0b6c
--- /dev/null
+++ b/include/asm-powerpc/ptrace.h
@@ -0,0 +1,248 @@
+#ifndef _ASM_POWERPC_PTRACE_H
+#define _ASM_POWERPC_PTRACE_H
+
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ *
+ * this should only contain volatile regs
+ * since we can keep non-volatile in the thread_struct
+ * should set this up when only volatiles are saved
+ * by intr code.
+ *
+ * Since this is going on the stack, *CARE MUST BE TAKEN* to insure
+ * that the overall structure is a multiple of 16 bytes in length.
+ *
+ * Note that the offsets of the fields in this struct correspond with
+ * the PT_* values below. This simplifies arch/powerpc/kernel/ptrace.c.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long gpr[32];
+ unsigned long nip;
+ unsigned long msr;
+ unsigned long orig_gpr3; /* Used for restarting system calls */
+ unsigned long ctr;
+ unsigned long link;
+ unsigned long xer;
+ unsigned long ccr;
+#ifdef __powerpc64__
+ unsigned long softe; /* Soft enabled/disabled */
+#else
+ unsigned long mq; /* 601 only (not used at present) */
+ /* Used on APUS to hold IPL value. */
+#endif
+ unsigned long trap; /* Reason for being here */
+ /* N.B. for critical exceptions on 4xx, the dar and dsisr
+ fields are overloaded to hold srr0 and srr1. */
+ unsigned long dar; /* Fault registers */
+ unsigned long dsisr; /* on 4xx/Book-E used for ESR */
+ unsigned long result; /* Result of a system call */
+};
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+
+#ifdef __powerpc64__
+
+#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
+
+/* Size of dummy stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE 128
+#define __SIGNAL_FRAMESIZE32 64
+
+#else /* __powerpc64__ */
+
+#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
+
+/* Size of stack frame allocated when calling signal handler. */
+#define __SIGNAL_FRAMESIZE 64
+
+#endif /* __powerpc64__ */
+
+#ifndef __ASSEMBLY__
+
+#define instruction_pointer(regs) ((regs)->nip)
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+#ifdef __powerpc64__
+#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
+#else
+#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
+#endif
+
+#define force_successful_syscall_return() \
+ do { \
+ current_thread_info()->syscall_noerror = 1; \
+ } while(0)
+
+/*
+ * We use the least-significant bit of the trap field to indicate
+ * whether we have saved the full set of registers, or only a
+ * partial set. A 1 there means the partial set.
+ * On 4xx we use the next bit to indicate whether the exception
+ * is a critical exception (1 means it is).
+ */
+#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
+#ifndef __powerpc64__
+#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) == 0)
+#endif /* ! __powerpc64__ */
+#define TRAP(regs) ((regs)->trap & ~0xF)
+#ifdef __powerpc64__
+#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
+#else
+#define CHECK_FULL_REGS(regs) \
+do { \
+ if ((regs)->trap & 1) \
+ printk(KERN_CRIT "%s: partial register set\n", __FUNCTION__); \
+} while (0)
+#endif /* __powerpc64__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+/*
+ * Offsets used by 'ptrace' system call interface.
+ * These can't be changed without breaking binary compatibility
+ * with MkLinux, etc.
+ */
+#define PT_R0 0
+#define PT_R1 1
+#define PT_R2 2
+#define PT_R3 3
+#define PT_R4 4
+#define PT_R5 5
+#define PT_R6 6
+#define PT_R7 7
+#define PT_R8 8
+#define PT_R9 9
+#define PT_R10 10
+#define PT_R11 11
+#define PT_R12 12
+#define PT_R13 13
+#define PT_R14 14
+#define PT_R15 15
+#define PT_R16 16
+#define PT_R17 17
+#define PT_R18 18
+#define PT_R19 19
+#define PT_R20 20
+#define PT_R21 21
+#define PT_R22 22
+#define PT_R23 23
+#define PT_R24 24
+#define PT_R25 25
+#define PT_R26 26
+#define PT_R27 27
+#define PT_R28 28
+#define PT_R29 29
+#define PT_R30 30
+#define PT_R31 31
+
+#define PT_NIP 32
+#define PT_MSR 33
+#ifdef __KERNEL__
+#define PT_ORIG_R3 34
+#endif
+#define PT_CTR 35
+#define PT_LNK 36
+#define PT_XER 37
+#define PT_CCR 38
+#ifndef __powerpc64__
+#define PT_MQ 39
+#else
+#define PT_SOFTE 39
+#define PT_TRAP 40
+#define PT_DAR 41
+#define PT_DSISR 42
+#define PT_RESULT 43
+#endif
+
+#define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
+
+#ifndef __powerpc64__
+
+#define PT_FPR31 (PT_FPR0 + 2*31)
+#define PT_FPSCR (PT_FPR0 + 2*32 + 1)
+
+#else /* __powerpc64__ */
+
+#define PT_FPSCR (PT_FPR0 + 32) /* each FP reg occupies 1 slot in 64-bit space */
+
+#ifdef __KERNEL__
+#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
+#endif
+
+#define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */
+#define PT_VSCR (PT_VR0 + 32*2 + 1)
+#define PT_VRSAVE (PT_VR0 + 33*2)
+
+#ifdef __KERNEL__
+#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
+#define PT_VSCR_32 (PT_VR0 + 32*4 + 3)
+#define PT_VRSAVE_32 (PT_VR0 + 33*4)
+#endif
+
+#endif /* __powerpc64__ */
+
+/*
+ * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * The transfer totals 34 quadword. Quadwords 0-31 contain the
+ * corresponding vector registers. Quadword 32 contains the vscr as the
+ * last word (offset 12) within that quadword. Quadword 33 contains the
+ * vrsave as the first word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the same
+ * structures. This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ */
+#define PTRACE_GETVRREGS 18
+#define PTRACE_SETVRREGS 19
+
+#ifndef __powerpc64__
+/* Get/set all the upper 32-bits of the SPE registers, accumulator, and
+ * spefscr, in one go */
+#define PTRACE_GETEVRREGS 20
+#define PTRACE_SETEVRREGS 21
+#endif /* __powerpc64__ */
+
+/*
+ * Get or set a debug register. The first 16 are DABR registers and the
+ * second 16 are IABR registers.
+ */
+#define PTRACE_GET_DEBUGREG 25
+#define PTRACE_SET_DEBUGREG 26
+
+#ifdef __powerpc64__
+/* Additional PTRACE requests implemented on PowerPC. */
+#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
+#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
+#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */
+#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */
+
+/* Calls to trace a 64bit program from a 32bit program */
+#define PPC_PTRACE_PEEKTEXT_3264 0x95
+#define PPC_PTRACE_PEEKDATA_3264 0x94
+#define PPC_PTRACE_POKETEXT_3264 0x93
+#define PPC_PTRACE_POKEDATA_3264 0x92
+#define PPC_PTRACE_PEEKUSR_3264 0x91
+#define PPC_PTRACE_POKEUSR_3264 0x90
+#endif /* __powerpc64__ */
+
+#endif /* _ASM_POWERPC_PTRACE_H */
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
new file mode 100644
index 00000000000..da848412f11
--- /dev/null
+++ b/include/asm-powerpc/reg.h
@@ -0,0 +1,613 @@
+/*
+ * Contains the definition of registers common to all PowerPC variants.
+ * If a register definition has been changed in a different PowerPC
+ * variant, we will case it in #ifndef XXX ... #endif, and have the
+ * number used in the Programming Environments Manual For 32-Bit
+ * Implementations of the PowerPC Architecture (a.k.a. Green Book) here.
+ */
+
+#ifndef _ASM_POWERPC_REG_H
+#define _ASM_POWERPC_REG_H
+#ifdef __KERNEL__
+
+#include <linux/stringify.h>
+#include <asm/cputable.h>
+
+/* Pickup Book E specific registers. */
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#include <asm/reg_booke.h>
+#endif
+
+#define MSR_SF_LG 63 /* Enable 64 bit mode */
+#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
+#define MSR_HV_LG 60 /* Hypervisor state */
+#define MSR_VEC_LG 25 /* Enable AltiVec */
+#define MSR_POW_LG 18 /* Enable Power Management */
+#define MSR_WE_LG 18 /* Wait State Enable */
+#define MSR_TGPR_LG 17 /* TLB Update registers in use */
+#define MSR_CE_LG 17 /* Critical Interrupt Enable */
+#define MSR_ILE_LG 16 /* Interrupt Little Endian */
+#define MSR_EE_LG 15 /* External Interrupt Enable */
+#define MSR_PR_LG 14 /* Problem State / Privilege Level */
+#define MSR_FP_LG 13 /* Floating Point enable */
+#define MSR_ME_LG 12 /* Machine Check Enable */
+#define MSR_FE0_LG 11 /* Floating Exception mode 0 */
+#define MSR_SE_LG 10 /* Single Step */
+#define MSR_BE_LG 9 /* Branch Trace */
+#define MSR_DE_LG 9 /* Debug Exception Enable */
+#define MSR_FE1_LG 8 /* Floating Exception mode 1 */
+#define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
+#define MSR_IR_LG 5 /* Instruction Relocate */
+#define MSR_DR_LG 4 /* Data Relocate */
+#define MSR_PE_LG 3 /* Protection Enable */
+#define MSR_PX_LG 2 /* Protection Exclusive Mode */
+#define MSR_PMM_LG 2 /* Performance monitor */
+#define MSR_RI_LG 1 /* Recoverable Exception */
+#define MSR_LE_LG 0 /* Little Endian */
+
+#ifdef __ASSEMBLY__
+#define __MASK(X) (1<<(X))
+#else
+#define __MASK(X) (1UL<<(X))
+#endif
+
+#ifdef CONFIG_PPC64
+#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
+#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
+#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
+#else
+/* so tests for these bits fail on 32-bit */
+#define MSR_SF 0
+#define MSR_ISF 0
+#define MSR_HV 0
+#endif
+
+#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
+#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
+#define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
+#define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
+#define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
+#define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
+#define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
+#define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
+#define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
+#define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
+#define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
+#define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
+#define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
+#define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
+#define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
+#define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
+#define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
+#define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
+#define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
+#define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
+#ifndef MSR_PMM
+#define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
+#endif
+#define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
+#define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
+
+#ifdef CONFIG_PPC64
+#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
+#define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
+
+#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
+#define MSR_USER64 MSR_USER32 | MSR_SF
+
+#else /* 32-bit */
+/* Default MSR for kernel mode. */
+#ifndef MSR_KERNEL /* reg_booke.h also defines this */
+#ifdef CONFIG_APUS_FAST_EXCEPT
+#define MSR_KERNEL (MSR_ME|MSR_IP|MSR_RI|MSR_IR|MSR_DR)
+#else
+#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
+#endif
+#endif
+
+#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
+#endif
+
+/* Floating Point Status and Control Register (FPSCR) Fields */
+#define FPSCR_FX 0x80000000 /* FPU exception summary */
+#define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
+#define FPSCR_VX 0x20000000 /* Invalid operation summary */
+#define FPSCR_OX 0x10000000 /* Overflow exception summary */
+#define FPSCR_UX 0x08000000 /* Underflow exception summary */
+#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
+#define FPSCR_XX 0x02000000 /* Inexact exception summary */
+#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
+#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
+#define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
+#define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
+#define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
+#define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
+#define FPSCR_FR 0x00040000 /* Fraction rounded */
+#define FPSCR_FI 0x00020000 /* Fraction inexact */
+#define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
+#define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
+#define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
+#define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
+#define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
+#define FPSCR_VE 0x00000080 /* Invalid op exception enable */
+#define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
+#define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
+#define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
+#define FPSCR_XE 0x00000008 /* FP inexact exception enable */
+#define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
+#define FPSCR_RN 0x00000003 /* FPU rounding control */
+
+/* Special Purpose Registers (SPRNs)*/
+#define SPRN_CTR 0x009 /* Count Register */
+#define SPRN_CTRLF 0x088
+#define SPRN_CTRLT 0x098
+#define CTRL_RUNLATCH 0x1
+#define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
+#define DABR_TRANSLATION (1UL << 2)
+#define SPRN_DAR 0x013 /* Data Address Register */
+#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+#define DSISR_NOHPTE 0x40000000 /* no translation found */
+#define DSISR_PROTFAULT 0x08000000 /* protection fault */
+#define DSISR_ISSTORE 0x02000000 /* access was a store */
+#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
+#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
+#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
+#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
+#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
+#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
+#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
+#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
+#define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */
+#define SPRN_DBAT1U 0x21A /* Data BAT 1 Upper Register */
+#define SPRN_DBAT2L 0x21D /* Data BAT 2 Lower Register */
+#define SPRN_DBAT2U 0x21C /* Data BAT 2 Upper Register */
+#define SPRN_DBAT3L 0x21F /* Data BAT 3 Lower Register */
+#define SPRN_DBAT3U 0x21E /* Data BAT 3 Upper Register */
+#define SPRN_DBAT4L 0x239 /* Data BAT 4 Lower Register */
+#define SPRN_DBAT4U 0x238 /* Data BAT 4 Upper Register */
+#define SPRN_DBAT5L 0x23B /* Data BAT 5 Lower Register */
+#define SPRN_DBAT5U 0x23A /* Data BAT 5 Upper Register */
+#define SPRN_DBAT6L 0x23D /* Data BAT 6 Lower Register */
+#define SPRN_DBAT6U 0x23C /* Data BAT 6 Upper Register */
+#define SPRN_DBAT7L 0x23F /* Data BAT 7 Lower Register */
+#define SPRN_DBAT7U 0x23E /* Data BAT 7 Upper Register */
+
+#define SPRN_DEC 0x016 /* Decrement Register */
+#define SPRN_DER 0x095 /* Debug Enable Regsiter */
+#define DER_RSTE 0x40000000 /* Reset Interrupt */
+#define DER_CHSTPE 0x20000000 /* Check Stop */
+#define DER_MCIE 0x10000000 /* Machine Check Interrupt */
+#define DER_EXTIE 0x02000000 /* External Interrupt */
+#define DER_ALIE 0x01000000 /* Alignment Interrupt */
+#define DER_PRIE 0x00800000 /* Program Interrupt */
+#define DER_FPUVIE 0x00400000 /* FP Unavailable Interrupt */
+#define DER_DECIE 0x00200000 /* Decrementer Interrupt */
+#define DER_SYSIE 0x00040000 /* System Call Interrupt */
+#define DER_TRE 0x00020000 /* Trace Interrupt */
+#define DER_SEIE 0x00004000 /* FP SW Emulation Interrupt */
+#define DER_ITLBMSE 0x00002000 /* Imp. Spec. Instruction TLB Miss */
+#define DER_ITLBERE 0x00001000 /* Imp. Spec. Instruction TLB Error */
+#define DER_DTLBMSE 0x00000800 /* Imp. Spec. Data TLB Miss */
+#define DER_DTLBERE 0x00000400 /* Imp. Spec. Data TLB Error */
+#define DER_LBRKE 0x00000008 /* Load/Store Breakpoint Interrupt */
+#define DER_IBRKE 0x00000004 /* Instruction Breakpoint Interrupt */
+#define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
+#define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
+#define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
+#define SPRN_EAR 0x11A /* External Address Register */
+#define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
+#define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
+#define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
+#define HID0_EMCP (1<<31) /* Enable Machine Check pin */
+#define HID0_EBA (1<<29) /* Enable Bus Address Parity */
+#define HID0_EBD (1<<28) /* Enable Bus Data Parity */
+#define HID0_SBCLK (1<<27)
+#define HID0_EICE (1<<26)
+#define HID0_TBEN (1<<26) /* Timebase enable - 745x */
+#define HID0_ECLK (1<<25)
+#define HID0_PAR (1<<24)
+#define HID0_STEN (1<<24) /* Software table search enable - 745x */
+#define HID0_HIGH_BAT (1<<23) /* Enable high BATs - 7455 */
+#define HID0_DOZE (1<<23)
+#define HID0_NAP (1<<22)
+#define HID0_SLEEP (1<<21)
+#define HID0_DPM (1<<20)
+#define HID0_BHTCLR (1<<18) /* Clear branch history table - 7450 */
+#define HID0_XAEN (1<<17) /* Extended addressing enable - 7450 */
+#define HID0_NHR (1<<16) /* Not hard reset (software bit-7450)*/
+#define HID0_ICE (1<<15) /* Instruction Cache Enable */
+#define HID0_DCE (1<<14) /* Data Cache Enable */
+#define HID0_ILOCK (1<<13) /* Instruction Cache Lock */
+#define HID0_DLOCK (1<<12) /* Data Cache Lock */
+#define HID0_ICFI (1<<11) /* Instr. Cache Flash Invalidate */
+#define HID0_DCI (1<<10) /* Data Cache Invalidate */
+#define HID0_SPD (1<<9) /* Speculative disable */
+#define HID0_DAPUEN (1<<8) /* Debug APU enable */
+#define HID0_SGE (1<<7) /* Store Gathering Enable */
+#define HID0_SIED (1<<7) /* Serial Instr. Execution [Disable] */
+#define HID0_DFCA (1<<6) /* Data Cache Flush Assist */
+#define HID0_LRSTK (1<<4) /* Link register stack - 745x */
+#define HID0_BTIC (1<<5) /* Branch Target Instr Cache Enable */
+#define HID0_ABE (1<<3) /* Address Broadcast Enable */
+#define HID0_FOLD (1<<3) /* Branch Folding enable - 745x */
+#define HID0_BHTE (1<<2) /* Branch History Table Enable */
+#define HID0_BTCD (1<<1) /* Branch target cache disable */
+#define HID0_NOPDST (1<<1) /* No-op dst, dstt, etc. instr. */
+#define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */
+
+#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
+#define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */
+#define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */
+#define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */
+#define HID1_PC1 (1<<15) /* 7450 PLL_CFG[1] */
+#define HID1_PC2 (1<<14) /* 7450 PLL_CFG[2] */
+#define HID1_PC3 (1<<13) /* 7450 PLL_CFG[3] */
+#define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */
+#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
+#define HID1_PS (1<<16) /* 750FX PLL selection */
+#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
+#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
+#define SPRN_HID4 0x3F4 /* 970 HID4 */
+#define SPRN_HID5 0x3F6 /* 970 HID5 */
+#define SPRN_HID6 0x3F9 /* BE HID 6 */
+#define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
+#define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
+#define SPRN_TSCR 0x399 /* Thread switch control on BE */
+#define SPRN_TTR 0x39A /* Thread switch timeout on BE */
+#define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
+#define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
+#define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
+#define SPRN_TSC 0x3FD /* Thread switch control on others */
+#define SPRN_TST 0x3FC /* Thread switch timeout on others */
+#if !defined(SPRN_IAC1) && !defined(SPRN_IAC2)
+#define SPRN_IAC1 0x3F4 /* Instruction Address Compare 1 */
+#define SPRN_IAC2 0x3F5 /* Instruction Address Compare 2 */
+#endif
+#define SPRN_IBAT0L 0x211 /* Instruction BAT 0 Lower Register */
+#define SPRN_IBAT0U 0x210 /* Instruction BAT 0 Upper Register */
+#define SPRN_IBAT1L 0x213 /* Instruction BAT 1 Lower Register */
+#define SPRN_IBAT1U 0x212 /* Instruction BAT 1 Upper Register */
+#define SPRN_IBAT2L 0x215 /* Instruction BAT 2 Lower Register */
+#define SPRN_IBAT2U 0x214 /* Instruction BAT 2 Upper Register */
+#define SPRN_IBAT3L 0x217 /* Instruction BAT 3 Lower Register */
+#define SPRN_IBAT3U 0x216 /* Instruction BAT 3 Upper Register */
+#define SPRN_IBAT4L 0x231 /* Instruction BAT 4 Lower Register */
+#define SPRN_IBAT4U 0x230 /* Instruction BAT 4 Upper Register */
+#define SPRN_IBAT5L 0x233 /* Instruction BAT 5 Lower Register */
+#define SPRN_IBAT5U 0x232 /* Instruction BAT 5 Upper Register */
+#define SPRN_IBAT6L 0x235 /* Instruction BAT 6 Lower Register */
+#define SPRN_IBAT6U 0x234 /* Instruction BAT 6 Upper Register */
+#define SPRN_IBAT7L 0x237 /* Instruction BAT 7 Lower Register */
+#define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */
+#define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */
+#define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */
+#define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */
+#define ICTRL_EICE 0x08000000 /* enable icache parity errs */
+#define ICTRL_EDC 0x04000000 /* enable dcache parity errs */
+#define ICTRL_EICP 0x00000100 /* enable icache par. check */
+#define SPRN_IMISS 0x3D4 /* Instruction TLB Miss Register */
+#define SPRN_IMMR 0x27E /* Internal Memory Map Register */
+#define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
+#define SPRN_L2CR2 0x3f8
+#define L2CR_L2E 0x80000000 /* L2 enable */
+#define L2CR_L2PE 0x40000000 /* L2 parity enable */
+#define L2CR_L2SIZ_MASK 0x30000000 /* L2 size mask */
+#define L2CR_L2SIZ_256KB 0x10000000 /* L2 size 256KB */
+#define L2CR_L2SIZ_512KB 0x20000000 /* L2 size 512KB */
+#define L2CR_L2SIZ_1MB 0x30000000 /* L2 size 1MB */
+#define L2CR_L2CLK_MASK 0x0e000000 /* L2 clock mask */
+#define L2CR_L2CLK_DISABLED 0x00000000 /* L2 clock disabled */
+#define L2CR_L2CLK_DIV1 0x02000000 /* L2 clock / 1 */
+#define L2CR_L2CLK_DIV1_5 0x04000000 /* L2 clock / 1.5 */
+#define L2CR_L2CLK_DIV2 0x08000000 /* L2 clock / 2 */
+#define L2CR_L2CLK_DIV2_5 0x0a000000 /* L2 clock / 2.5 */
+#define L2CR_L2CLK_DIV3 0x0c000000 /* L2 clock / 3 */
+#define L2CR_L2RAM_MASK 0x01800000 /* L2 RAM type mask */
+#define L2CR_L2RAM_FLOW 0x00000000 /* L2 RAM flow through */
+#define L2CR_L2RAM_PIPE 0x01000000 /* L2 RAM pipelined */
+#define L2CR_L2RAM_PIPE_LW 0x01800000 /* L2 RAM pipelined latewr */
+#define L2CR_L2DO 0x00400000 /* L2 data only */
+#define L2CR_L2I 0x00200000 /* L2 global invalidate */
+#define L2CR_L2CTL 0x00100000 /* L2 RAM control */
+#define L2CR_L2WT 0x00080000 /* L2 write-through */
+#define L2CR_L2TS 0x00040000 /* L2 test support */
+#define L2CR_L2OH_MASK 0x00030000 /* L2 output hold mask */
+#define L2CR_L2OH_0_5 0x00000000 /* L2 output hold 0.5 ns */
+#define L2CR_L2OH_1_0 0x00010000 /* L2 output hold 1.0 ns */
+#define L2CR_L2SL 0x00008000 /* L2 DLL slow */
+#define L2CR_L2DF 0x00004000 /* L2 differential clock */
+#define L2CR_L2BYP 0x00002000 /* L2 DLL bypass */
+#define L2CR_L2IP 0x00000001 /* L2 GI in progress */
+#define L2CR_L2IO_745x 0x00100000 /* L2 instr. only (745x) */
+#define L2CR_L2DO_745x 0x00010000 /* L2 data only (745x) */
+#define L2CR_L2REP_745x 0x00001000 /* L2 repl. algorithm (745x) */
+#define L2CR_L2HWF_745x 0x00000800 /* L2 hardware flush (745x) */
+#define SPRN_L3CR 0x3FA /* Level 3 Cache Control Regsiter */
+#define L3CR_L3E 0x80000000 /* L3 enable */
+#define L3CR_L3PE 0x40000000 /* L3 data parity enable */
+#define L3CR_L3APE 0x20000000 /* L3 addr parity enable */
+#define L3CR_L3SIZ 0x10000000 /* L3 size */
+#define L3CR_L3CLKEN 0x08000000 /* L3 clock enable */
+#define L3CR_L3RES 0x04000000 /* L3 special reserved bit */
+#define L3CR_L3CLKDIV 0x03800000 /* L3 clock divisor */
+#define L3CR_L3IO 0x00400000 /* L3 instruction only */
+#define L3CR_L3SPO 0x00040000 /* L3 sample point override */
+#define L3CR_L3CKSP 0x00030000 /* L3 clock sample point */
+#define L3CR_L3PSP 0x0000e000 /* L3 P-clock sample point */
+#define L3CR_L3REP 0x00001000 /* L3 replacement algorithm */
+#define L3CR_L3HWF 0x00000800 /* L3 hardware flush */
+#define L3CR_L3I 0x00000400 /* L3 global invalidate */
+#define L3CR_L3RT 0x00000300 /* L3 SRAM type */
+#define L3CR_L3NIRCA 0x00000080 /* L3 non-integer ratio clock adj. */
+#define L3CR_L3DO 0x00000040 /* L3 data only mode */
+#define L3CR_PMEN 0x00000004 /* L3 private memory enable */
+#define L3CR_PMSIZ 0x00000001 /* L3 private memory size */
+
+#define SPRN_MSSCR0 0x3f6 /* Memory Subsystem Control Register 0 */
+#define SPRN_MSSSR0 0x3f7 /* Memory Subsystem Status Register 1 */
+#define SPRN_LDSTCR 0x3f8 /* Load/Store control register */
+#define SPRN_LDSTDB 0x3f4 /* */
+#define SPRN_LR 0x008 /* Link Register */
+#ifndef SPRN_PIR
+#define SPRN_PIR 0x3FF /* Processor Identification Register */
+#endif
+#define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
+#define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
+#define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
+#define SPRN_PVR 0x11F /* Processor Version Register */
+#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
+#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
+#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
+#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
+#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
+#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
+#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
+#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
+#define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */
+#define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */
+#define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */
+#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
+#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
+#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
+#ifndef SPRN_SVR
+#define SPRN_SVR 0x11E /* System Version Register */
+#endif
+#define SPRN_THRM1 0x3FC /* Thermal Management Register 1 */
+/* these bits were defined in inverted endian sense originally, ugh, confusing */
+#define THRM1_TIN (1 << 31)
+#define THRM1_TIV (1 << 30)
+#define THRM1_THRES(x) ((x&0x7f)<<23)
+#define THRM3_SITV(x) ((x&0x3fff)<<1)
+#define THRM1_TID (1<<2)
+#define THRM1_TIE (1<<1)
+#define THRM1_V (1<<0)
+#define SPRN_THRM2 0x3FD /* Thermal Management Register 2 */
+#define SPRN_THRM3 0x3FE /* Thermal Management Register 3 */
+#define THRM3_E (1<<0)
+#define SPRN_TLBMISS 0x3D4 /* 980 7450 TLB Miss Register */
+#define SPRN_UMMCR0 0x3A8 /* User Monitor Mode Control Register 0 */
+#define SPRN_UMMCR1 0x3AC /* User Monitor Mode Control Register 0 */
+#define SPRN_UPMC1 0x3A9 /* User Performance Counter Register 1 */
+#define SPRN_UPMC2 0x3AA /* User Performance Counter Register 2 */
+#define SPRN_UPMC3 0x3AD /* User Performance Counter Register 3 */
+#define SPRN_UPMC4 0x3AE /* User Performance Counter Register 4 */
+#define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
+#define SPRN_VRSAVE 0x100 /* Vector Register Save Register */
+#define SPRN_XER 0x001 /* Fixed Point Exception Register */
+
+/* Performance monitor SPRs */
+#ifdef CONFIG_PPC64
+#define SPRN_MMCR0 795
+#define MMCR0_FC 0x80000000UL /* freeze counters */
+#define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
+#define MMCR0_KERNEL_DISABLE MMCR0_FCS
+#define MMCR0_FCP 0x20000000UL /* freeze in problem state */
+#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
+#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
+#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
+#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
+#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
+#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
+#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
+#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
+#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
+#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
+#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
+#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
+#define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
+#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
+#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
+#define SPRN_MMCR1 798
+#define SPRN_MMCRA 0x312
+#define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
+#define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
+#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
+#define SPRN_PMC1 787
+#define SPRN_PMC2 788
+#define SPRN_PMC3 789
+#define SPRN_PMC4 790
+#define SPRN_PMC5 791
+#define SPRN_PMC6 792
+#define SPRN_PMC7 793
+#define SPRN_PMC8 794
+#define SPRN_SIAR 780
+#define SPRN_SDAR 781
+
+#else /* 32-bit */
+#define SPRN_MMCR0 0x3B8 /* Monitor Mode Control Register 0 */
+#define SPRN_MMCR1 0x3BC /* Monitor Mode Control Register 1 */
+#define SPRN_PMC1 0x3B9 /* Performance Counter Register 1 */
+#define SPRN_PMC2 0x3BA /* Performance Counter Register 2 */
+#define SPRN_PMC3 0x3BD /* Performance Counter Register 3 */
+#define SPRN_PMC4 0x3BE /* Performance Counter Register 4 */
+
+/* Bit definitions for MMCR0 and PMC1 / PMC2. */
+#define MMCR0_PMC1_CYCLES (1 << 7)
+#define MMCR0_PMC1_ICACHEMISS (5 << 7)
+#define MMCR0_PMC1_DTLB (6 << 7)
+#define MMCR0_PMC2_DCACHEMISS 0x6
+#define MMCR0_PMC2_CYCLES 0x1
+#define MMCR0_PMC2_ITLB 0x7
+#define MMCR0_PMC2_LOADMISSTIME 0x5
+#define MMCR0_PMXE (1 << 26)
+#endif
+
+/* Processor Version Register (PVR) field extraction */
+
+#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
+#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
+
+#define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv))
+
+/*
+ * IBM has further subdivided the standard PowerPC 16-bit version and
+ * revision subfields of the PVR for the PowerPC 403s into the following:
+ */
+
+#define PVR_FAM(pvr) (((pvr) >> 20) & 0xFFF) /* Family field */
+#define PVR_MEM(pvr) (((pvr) >> 16) & 0xF) /* Member field */
+#define PVR_CORE(pvr) (((pvr) >> 12) & 0xF) /* Core field */
+#define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */
+#define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */
+#define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */
+
+/* Processor Version Numbers */
+
+#define PVR_403GA 0x00200000
+#define PVR_403GB 0x00200100
+#define PVR_403GC 0x00200200
+#define PVR_403GCX 0x00201400
+#define PVR_405GP 0x40110000
+#define PVR_STB03XXX 0x40310000
+#define PVR_NP405H 0x41410000
+#define PVR_NP405L 0x41610000
+#define PVR_601 0x00010000
+#define PVR_602 0x00050000
+#define PVR_603 0x00030000
+#define PVR_603e 0x00060000
+#define PVR_603ev 0x00070000
+#define PVR_603r 0x00071000
+#define PVR_604 0x00040000
+#define PVR_604e 0x00090000
+#define PVR_604r 0x000A0000
+#define PVR_620 0x00140000
+#define PVR_740 0x00080000
+#define PVR_750 PVR_740
+#define PVR_740P 0x10080000
+#define PVR_750P PVR_740P
+#define PVR_7400 0x000C0000
+#define PVR_7410 0x800C0000
+#define PVR_7450 0x80000000
+#define PVR_8540 0x80200000
+#define PVR_8560 0x80200000
+/*
+ * For the 8xx processors, all of them report the same PVR family for
+ * the PowerPC core. The various versions of these processors must be
+ * differentiated by the version number in the Communication Processor
+ * Module (CPM).
+ */
+#define PVR_821 0x00500000
+#define PVR_823 PVR_821
+#define PVR_850 PVR_821
+#define PVR_860 PVR_821
+#define PVR_8240 0x00810100
+#define PVR_8245 0x80811014
+#define PVR_8260 PVR_8240
+
+/* 64-bit processors */
+/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
+#define PV_NORTHSTAR 0x0033
+#define PV_PULSAR 0x0034
+#define PV_POWER4 0x0035
+#define PV_ICESTAR 0x0036
+#define PV_SSTAR 0x0037
+#define PV_POWER4p 0x0038
+#define PV_970 0x0039
+#define PV_POWER5 0x003A
+#define PV_POWER5p 0x003B
+#define PV_970FX 0x003C
+#define PV_630 0x0040
+#define PV_630p 0x0041
+#define PV_970MP 0x0044
+#define PV_BE 0x0070
+
+/*
+ * Number of entries in the SLB. If this ever changes we should handle
+ * it with a use a cpu feature fixup.
+ */
+#define SLB_NUM_ENTRIES 64
+
+/* Macros for setting and retrieving special purpose registers */
+#ifndef __ASSEMBLY__
+#define mfmsr() ({unsigned long rval; \
+ asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+#ifdef CONFIG_PPC64
+#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
+ : : "r" (v))
+#define mtmsrd(v) __mtmsrd((v), 0)
+#define mtmsr(v) mtmsrd(v)
+#else
+#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
+#endif
+
+#define mfspr(rn) ({unsigned long rval; \
+ asm volatile("mfspr %0," __stringify(rn) \
+ : "=r" (rval)); rval;})
+#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
+
+#define mftb() ({unsigned long rval; \
+ asm volatile("mftb %0" : "=r" (rval)); rval;})
+#define mftbl() ({unsigned long rval; \
+ asm volatile("mftbl %0" : "=r" (rval)); rval;})
+
+#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
+#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
+
+#ifdef CONFIG_PPC32
+#define mfsrin(v) ({unsigned int rval; \
+ asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
+ rval;})
+#endif
+
+#define proc_trap() asm volatile("trap")
+
+#ifdef CONFIG_PPC64
+static inline void ppc64_runlatch_on(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL)) {
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl |= CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
+}
+
+static inline void ppc64_runlatch_off(void)
+{
+ unsigned long ctrl;
+
+ if (cpu_has_feature(CPU_FTR_CTRL)) {
+ ctrl = mfspr(SPRN_CTRLF);
+ ctrl &= ~CTRL_RUNLATCH;
+ mtspr(SPRN_CTRLT, ctrl);
+ }
+}
+#endif
+
+#define __get_SP() ({unsigned long sp; \
+ asm volatile("mr %0,1": "=r" (sp)); sp;})
+
+#else /* __ASSEMBLY__ */
+
+#define RUNLATCH_ON(REG) \
+BEGIN_FTR_SECTION \
+ mfspr (REG),SPRN_CTRLF; \
+ ori (REG),(REG),CTRL_RUNLATCH; \
+ mtspr SPRN_CTRLT,(REG); \
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_REG_H */
diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h
new file mode 100644
index 00000000000..d1bb611ea62
--- /dev/null
+++ b/include/asm-powerpc/rtas.h
@@ -0,0 +1,232 @@
+#ifndef _POWERPC_RTAS_H
+#define _POWERPC_RTAS_H
+
+#include <linux/spinlock.h>
+#include <asm/page.h>
+
+/*
+ * Definitions for talking to the RTAS on CHRP machines.
+ *
+ * Copyright (C) 2001 Peter Bergner
+ * Copyright (C) 2001 PPC 64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define RTAS_UNKNOWN_SERVICE (-1)
+#define RTAS_INSTANTIATE_MAX (1UL<<30) /* Don't instantiate rtas at/above this value */
+
+/* Buffer size for ppc_rtas system call. */
+#define RTAS_RMOBUF_MAX (64 * 1024)
+
+/* RTAS return status codes */
+#define RTAS_BUSY -2 /* RTAS Busy */
+#define RTAS_EXTENDED_DELAY_MIN 9900
+#define RTAS_EXTENDED_DELAY_MAX 9905
+
+/*
+ * In general to call RTAS use rtas_token("string") to lookup
+ * an RTAS token for the given string (e.g. "event-scan").
+ * To actually perform the call use
+ * ret = rtas_call(token, n_in, n_out, ...)
+ * Where n_in is the number of input parameters and
+ * n_out is the number of output parameters
+ *
+ * If the "string" is invalid on this system, RTAS_UNKNOWN_SERVICE
+ * will be returned as a token. rtas_call() does look for this
+ * token and error out gracefully so rtas_call(rtas_token("str"), ...)
+ * may be safely used for one-shot calls to RTAS.
+ *
+ */
+
+typedef u32 rtas_arg_t;
+
+struct rtas_args {
+ u32 token;
+ u32 nargs;
+ u32 nret;
+ rtas_arg_t args[16];
+ rtas_arg_t *rets; /* Pointer to return values in args[]. */
+};
+
+extern struct rtas_args rtas_stop_self_args;
+
+struct rtas_t {
+ unsigned long entry; /* physical address pointer */
+ unsigned long base; /* physical address pointer */
+ unsigned long size;
+ spinlock_t lock;
+ struct rtas_args args;
+ struct device_node *dev; /* virtual address pointer */
+};
+
+/* RTAS event classes */
+#define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */
+#define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */
+#define RTAS_POWERMGM_EVENTS 0x20000000 /* set bit 2 */
+#define RTAS_HOTPLUG_EVENTS 0x10000000 /* set bit 3 */
+#define RTAS_EVENT_SCAN_ALL_EVENTS 0xf0000000
+
+/* RTAS event severity */
+#define RTAS_SEVERITY_FATAL 0x5
+#define RTAS_SEVERITY_ERROR 0x4
+#define RTAS_SEVERITY_ERROR_SYNC 0x3
+#define RTAS_SEVERITY_WARNING 0x2
+#define RTAS_SEVERITY_EVENT 0x1
+#define RTAS_SEVERITY_NO_ERROR 0x0
+
+/* RTAS event disposition */
+#define RTAS_DISP_FULLY_RECOVERED 0x0
+#define RTAS_DISP_LIMITED_RECOVERY 0x1
+#define RTAS_DISP_NOT_RECOVERED 0x2
+
+/* RTAS event initiator */
+#define RTAS_INITIATOR_UNKNOWN 0x0
+#define RTAS_INITIATOR_CPU 0x1
+#define RTAS_INITIATOR_PCI 0x2
+#define RTAS_INITIATOR_ISA 0x3
+#define RTAS_INITIATOR_MEMORY 0x4
+#define RTAS_INITIATOR_POWERMGM 0x5
+
+/* RTAS event target */
+#define RTAS_TARGET_UNKNOWN 0x0
+#define RTAS_TARGET_CPU 0x1
+#define RTAS_TARGET_PCI 0x2
+#define RTAS_TARGET_ISA 0x3
+#define RTAS_TARGET_MEMORY 0x4
+#define RTAS_TARGET_POWERMGM 0x5
+
+/* RTAS event type */
+#define RTAS_TYPE_RETRY 0x01
+#define RTAS_TYPE_TCE_ERR 0x02
+#define RTAS_TYPE_INTERN_DEV_FAIL 0x03
+#define RTAS_TYPE_TIMEOUT 0x04
+#define RTAS_TYPE_DATA_PARITY 0x05
+#define RTAS_TYPE_ADDR_PARITY 0x06
+#define RTAS_TYPE_CACHE_PARITY 0x07
+#define RTAS_TYPE_ADDR_INVALID 0x08
+#define RTAS_TYPE_ECC_UNCORR 0x09
+#define RTAS_TYPE_ECC_CORR 0x0a
+#define RTAS_TYPE_EPOW 0x40
+#define RTAS_TYPE_PLATFORM 0xE0
+#define RTAS_TYPE_IO 0xE1
+#define RTAS_TYPE_INFO 0xE2
+#define RTAS_TYPE_DEALLOC 0xE3
+#define RTAS_TYPE_DUMP 0xE4
+/* I don't add PowerMGM events right now, this is a different topic */
+#define RTAS_TYPE_PMGM_POWER_SW_ON 0x60
+#define RTAS_TYPE_PMGM_POWER_SW_OFF 0x61
+#define RTAS_TYPE_PMGM_LID_OPEN 0x62
+#define RTAS_TYPE_PMGM_LID_CLOSE 0x63
+#define RTAS_TYPE_PMGM_SLEEP_BTN 0x64
+#define RTAS_TYPE_PMGM_WAKE_BTN 0x65
+#define RTAS_TYPE_PMGM_BATTERY_WARN 0x66
+#define RTAS_TYPE_PMGM_BATTERY_CRIT 0x67
+#define RTAS_TYPE_PMGM_SWITCH_TO_BAT 0x68
+#define RTAS_TYPE_PMGM_SWITCH_TO_AC 0x69
+#define RTAS_TYPE_PMGM_KBD_OR_MOUSE 0x6a
+#define RTAS_TYPE_PMGM_ENCLOS_OPEN 0x6b
+#define RTAS_TYPE_PMGM_ENCLOS_CLOSED 0x6c
+#define RTAS_TYPE_PMGM_RING_INDICATE 0x6d
+#define RTAS_TYPE_PMGM_LAN_ATTENTION 0x6e
+#define RTAS_TYPE_PMGM_TIME_ALARM 0x6f
+#define RTAS_TYPE_PMGM_CONFIG_CHANGE 0x70
+#define RTAS_TYPE_PMGM_SERVICE_PROC 0x71
+
+struct rtas_error_log {
+ unsigned long version:8; /* Architectural version */
+ unsigned long severity:3; /* Severity level of error */
+ unsigned long disposition:2; /* Degree of recovery */
+ unsigned long extended:1; /* extended log present? */
+ unsigned long /* reserved */ :2; /* Reserved for future use */
+ unsigned long initiator:4; /* Initiator of event */
+ unsigned long target:4; /* Target of failed operation */
+ unsigned long type:8; /* General event or error*/
+ unsigned long extended_log_length:32; /* length in bytes */
+ unsigned char buffer[1];
+};
+
+/*
+ * This can be set by the rtas_flash module so that it can get called
+ * as the absolutely last thing before the kernel terminates.
+ */
+extern void (*rtas_flash_term_hook)(int);
+
+extern struct rtas_t rtas;
+
+extern void enter_rtas(unsigned long);
+extern int rtas_token(const char *service);
+extern int rtas_call(int token, int, int, int *, ...);
+extern void call_rtas_display_status(unsigned char);
+extern void rtas_restart(char *cmd);
+extern void rtas_power_off(void);
+extern void rtas_halt(void);
+extern void rtas_os_term(char *str);
+extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_power_level(int powerdomain, int *level);
+extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+extern int rtas_set_indicator(int indicator, int index, int new_value);
+extern void rtas_progress(char *s, unsigned short hex);
+extern void rtas_initialize(void);
+
+struct rtc_time;
+extern unsigned long rtas_get_boot_time(void);
+extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
+extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
+
+/* Given an RTAS status code of 9900..9905 compute the hinted delay */
+unsigned int rtas_extended_busy_delay_time(int status);
+static inline int rtas_is_extended_busy(int status)
+{
+ return status >= 9900 && status <= 9909;
+}
+
+extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
+
+/* Error types logged. */
+#define ERR_FLAG_ALREADY_LOGGED 0x0
+#define ERR_FLAG_BOOT 0x1 /* log was pulled from NVRAM on boot */
+#define ERR_TYPE_RTAS_LOG 0x2 /* from rtas event-scan */
+#define ERR_TYPE_KERNEL_PANIC 0x4 /* from panic() */
+
+/* All the types and not flags */
+#define ERR_TYPE_MASK (ERR_TYPE_RTAS_LOG | ERR_TYPE_KERNEL_PANIC)
+
+#define RTAS_DEBUG KERN_DEBUG "RTAS: "
+
+#define RTAS_ERROR_LOG_MAX 2048
+
+/*
+ * Return the firmware-specified size of the error log buffer
+ * for all rtas calls that require an error buffer argument.
+ * This includes 'check-exception' and 'rtas-last-error'.
+ */
+extern int rtas_get_error_log_max(void);
+
+/* Event Scan Parameters */
+#define EVENT_SCAN_ALL_EVENTS 0xf0000000
+#define SURVEILLANCE_TOKEN 9000
+#define LOG_NUMBER 64 /* must be a power of two */
+#define LOG_NUMBER_MASK (LOG_NUMBER-1)
+
+/* Some RTAS ops require a data buffer and that buffer must be < 4G.
+ * Rather than having a memory allocator, just use this buffer
+ * (get the lock first), make the RTAS call. Copy the data instead
+ * of holding the buffer for long.
+ */
+
+#define RTAS_DATA_BUF_SIZE 4096
+extern spinlock_t rtas_data_buf_lock;
+extern char rtas_data_buf[RTAS_DATA_BUF_SIZE];
+
+extern void rtas_stop_self(void);
+
+/* RMO buffer reserved for user-space RTAS use */
+extern unsigned long rtas_rmo_buf;
+
+#define GLOBAL_INTERRUPT_QUEUE 9005
+
+#endif /* _POWERPC_RTAS_H */
diff --git a/include/asm-powerpc/rtc.h b/include/asm-powerpc/rtc.h
new file mode 100644
index 00000000000..f5802926b6c
--- /dev/null
+++ b/include/asm-powerpc/rtc.h
@@ -0,0 +1,78 @@
+/*
+ * Real-time clock definitions and interfaces
+ *
+ * Author: Tom Rini <trini@mvista.com>
+ *
+ * 2002 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Based on:
+ * include/asm-m68k/rtc.h
+ *
+ * Copyright Richard Zidlicky
+ * implementation details for genrtc/q40rtc driver
+ *
+ * And the old drivers/macintosh/rtc.c which was heavily based on:
+ * Linux/SPARC Real Time Clock Driver
+ * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
+ *
+ * With additional work by Paul Mackerras and Franz Sirl.
+ */
+
+#ifndef __ASM_POWERPC_RTC_H__
+#define __ASM_POWERPC_RTC_H__
+
+#ifdef __KERNEL__
+
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/time.h>
+
+#define RTC_PIE 0x40 /* periodic interrupt enable */
+#define RTC_AIE 0x20 /* alarm interrupt enable */
+#define RTC_UIE 0x10 /* update-finished interrupt enable */
+
+/* some dummy definitions */
+#define RTC_BATT_BAD 0x100 /* battery bad */
+#define RTC_SQWE 0x08 /* enable square-wave output */
+#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
+#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
+#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
+
+static inline unsigned int get_rtc_time(struct rtc_time *time)
+{
+ if (ppc_md.get_rtc_time)
+ ppc_md.get_rtc_time(time);
+ return RTC_24H;
+}
+
+/* Set the current date and time in the real time clock. */
+static inline int set_rtc_time(struct rtc_time *time)
+{
+ if (ppc_md.set_rtc_time)
+ return ppc_md.set_rtc_time(time);
+ return -EINVAL;
+}
+
+static inline unsigned int get_rtc_ss(void)
+{
+ struct rtc_time h;
+
+ get_rtc_time(&h);
+ return h.tm_sec;
+}
+
+static inline int get_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+static inline int set_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_POWERPC_RTC_H__ */
diff --git a/include/asm-powerpc/rwsem.h b/include/asm-powerpc/rwsem.h
new file mode 100644
index 00000000000..79bae4933b7
--- /dev/null
+++ b/include/asm-powerpc/rwsem.h
@@ -0,0 +1,168 @@
+#ifndef _ASM_POWERPC_RWSEM_H
+#define _ASM_POWERPC_RWSEM_H
+
+#ifdef __KERNEL__
+
+/*
+ * include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
+ * in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
+ * by Paul Mackerras <paulus@samba.org>.
+ */
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ /* XXX this should be able to be an atomic_t -- paulus */
+ signed int count;
+#define RWSEM_UNLOCKED_VALUE 0x00000000
+#define RWSEM_ACTIVE_BIAS 0x00000001
+#define RWSEM_ACTIVE_MASK 0x0000ffff
+#define RWSEM_WAITING_BIAS (-0x00010000)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#if RWSEM_DEBUG
+ int debug;
+#endif
+};
+
+/*
+ * initialisation
+ */
+#if RWSEM_DEBUG
+#define __RWSEM_DEBUG_INIT , 0
+#else
+#define __RWSEM_DEBUG_INIT /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEBUG_INIT }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+ sem->debug = 0;
+#endif
+}
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
+ rwsem_down_read_failed(sem);
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ while ((tmp = sem->count) >= 0) {
+ if (tmp == cmpxchg(&sem->count, tmp,
+ tmp + RWSEM_ACTIVE_READ_BIAS)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count));
+ if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
+ rwsem_down_write_failed(sem);
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_dec_return((atomic_t *)(&sem->count));
+ if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count)) < 0))
+ rwsem_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+{
+ atomic_add(delta, (atomic_t *)(&sem->count));
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
+ if (tmp < 0)
+ rwsem_downgrade_wake(sem);
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+{
+ return atomic_add_return(delta, (atomic_t *)(&sem->count));
+}
+
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return (sem->count != 0);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_RWSEM_H */
diff --git a/include/asm-powerpc/scatterlist.h b/include/asm-powerpc/scatterlist.h
new file mode 100644
index 00000000000..8c992d1491d
--- /dev/null
+++ b/include/asm-powerpc/scatterlist.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_POWERPC_SCATTERLIST_H
+#define _ASM_POWERPC_SCATTERLIST_H
+/*
+ * Copyright (C) 2001 PPC64 Team, IBM Corp
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <asm/dma.h>
+
+struct scatterlist {
+ struct page *page;
+ unsigned int offset;
+ unsigned int length;
+
+ /* For TCE support */
+ dma_addr_t dma_address;
+ u32 dma_length;
+};
+
+/*
+ * These macros should be used after a dma_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+#ifdef __powerpc64__
+#define sg_dma_len(sg) ((sg)->dma_length)
+#else
+#define sg_dma_len(sg) ((sg)->length)
+#endif
+
+#ifdef __powerpc64__
+#define ISA_DMA_THRESHOLD (~0UL)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/include/asm-powerpc/seccomp.h b/include/asm-powerpc/seccomp.h
new file mode 100644
index 00000000000..1e1cfe12882
--- /dev/null
+++ b/include/asm-powerpc/seccomp.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_POWERPC_SECCOMP_H
+
+#include <linux/thread_info.h>
+#include <linux/unistd.h>
+
+#define __NR_seccomp_read __NR_read
+#define __NR_seccomp_write __NR_write
+#define __NR_seccomp_exit __NR_exit
+#define __NR_seccomp_sigreturn __NR_rt_sigreturn
+
+#define __NR_seccomp_read_32 __NR_read
+#define __NR_seccomp_write_32 __NR_write
+#define __NR_seccomp_exit_32 __NR_exit
+#define __NR_seccomp_sigreturn_32 __NR_sigreturn
+
+#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/include/asm-powerpc/sections.h b/include/asm-powerpc/sections.h
new file mode 100644
index 00000000000..47be2ac2a92
--- /dev/null
+++ b/include/asm-powerpc/sections.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_SECTIONS_H
+#define _ASM_POWERPC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+#ifdef __powerpc64__
+
+extern char _end[];
+
+static inline int in_kernel_text(unsigned long addr)
+{
+ if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
+ return 1;
+
+ return 0;
+}
+
+#endif
+
+#endif /* _ASM_POWERPC_SECTIONS_H */
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h
new file mode 100644
index 00000000000..57369d2cade
--- /dev/null
+++ b/include/asm-powerpc/semaphore.h
@@ -0,0 +1,95 @@
+#ifndef _ASM_POWERPC_SEMAPHORE_H
+#define _ASM_POWERPC_SEMAPHORE_H
+
+/*
+ * Remove spinlock-based RW semaphores; RW semaphore definitions are
+ * now in rwsem.h and we use the generic lib/rwsem.c implementation.
+ * Rework semaphores to use atomic_dec_if_positive.
+ * -- Paul Mackerras (paulus@samba.org)
+ */
+
+#ifdef __KERNEL__
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+
+struct semaphore {
+ /*
+ * Note that any negative value of count is equivalent to 0,
+ * but additionally indicates that some process(es) might be
+ * sleeping on `wait'.
+ */
+ atomic_t count;
+ wait_queue_head_t wait;
+};
+
+#define __SEMAPHORE_INITIALIZER(name, n) \
+{ \
+ .count = ATOMIC_INIT(n), \
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+}
+
+#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
+
+static inline void sema_init (struct semaphore *sem, int val)
+{
+ atomic_set(&sem->count, val);
+ init_waitqueue_head(&sem->wait);
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+ sema_init(sem, 0);
+}
+
+extern void __down(struct semaphore * sem);
+extern int __down_interruptible(struct semaphore * sem);
+extern void __up(struct semaphore * sem);
+
+static inline void down(struct semaphore * sem)
+{
+ might_sleep();
+
+ /*
+ * Try to get the semaphore, take the slow path if we fail.
+ */
+ if (unlikely(atomic_dec_return(&sem->count) < 0))
+ __down(sem);
+}
+
+static inline int down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+
+ might_sleep();
+
+ if (unlikely(atomic_dec_return(&sem->count) < 0))
+ ret = __down_interruptible(sem);
+ return ret;
+}
+
+static inline int down_trylock(struct semaphore * sem)
+{
+ return atomic_dec_if_positive(&sem->count) < 0;
+}
+
+static inline void up(struct semaphore * sem)
+{
+ if (unlikely(atomic_inc_return(&sem->count) <= 0))
+ __up(sem);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_SEMAPHORE_H */
diff --git a/include/asm-powerpc/sigcontext.h b/include/asm-powerpc/sigcontext.h
new file mode 100644
index 00000000000..165d630e1cf
--- /dev/null
+++ b/include/asm-powerpc/sigcontext.h
@@ -0,0 +1,52 @@
+#ifndef _ASM_POWERPC_SIGCONTEXT_H
+#define _ASM_POWERPC_SIGCONTEXT_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/compiler.h>
+#include <asm/ptrace.h>
+#ifdef __powerpc64__
+#include <asm/elf.h>
+#endif
+
+struct sigcontext {
+ unsigned long _unused[4];
+ int signal;
+#ifdef __powerpc64__
+ int _pad0;
+#endif
+ unsigned long handler;
+ unsigned long oldmask;
+ struct pt_regs __user *regs;
+#ifdef __powerpc64__
+ elf_gregset_t gp_regs;
+ elf_fpregset_t fp_regs;
+/*
+ * To maintain compatibility with current implementations the sigcontext is
+ * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
+ * followed by an unstructured (vmx_reserve) field of 69 doublewords. This
+ * allows the array of vector registers to be quadword aligned independent of
+ * the alignment of the containing sigcontext or ucontext. It is the
+ * responsibility of the code setting the sigcontext to set this pointer to
+ * either NULL (if this processor does not support the VMX feature) or the
+ * address of the first quadword within the allocated (vmx_reserve) area.
+ *
+ * The pointer (v_regs) of vector type (elf_vrreg_t) is type compatible with
+ * an array of 34 quadword entries (elf_vrregset_t). The entries with
+ * indexes 0-31 contain the corresponding vector registers. The entry with
+ * index 32 contains the vscr as the last word (offset 12) within the
+ * quadword. This allows the vscr to be stored as either a quadword (since
+ * it must be copied via a vector register to/from storage) or as a word.
+ * The entry with index 33 contains the vrsave as the first word (offset 0)
+ * within the quadword.
+ */
+ elf_vrreg_t __user *v_regs;
+ long vmx_reserve[ELF_NVRREG+ELF_NVRREG+1];
+#endif
+};
+
+#endif /* _ASM_POWERPC_SIGCONTEXT_H */
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
new file mode 100644
index 00000000000..8bcdd0faefe
--- /dev/null
+++ b/include/asm-powerpc/smp.h
@@ -0,0 +1,119 @@
+/*
+ * smp.h: PowerPC-specific SMP code.
+ *
+ * Original was a copy of sparc smp.h. Now heavily modified
+ * for PPC.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_SMP_H
+#define _ASM_POWERPC_SMP_H
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#endif
+
+extern int boot_cpuid;
+extern int boot_cpuid_phys;
+
+extern void cpu_die(void);
+
+#ifdef CONFIG_SMP
+
+extern void smp_send_debugger_break(int cpu);
+struct pt_regs;
+extern void smp_message_recv(int, struct pt_regs *);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void fixup_irqs(cpumask_t map);
+int generic_cpu_disable(void);
+int generic_cpu_enable(unsigned int cpu);
+void generic_cpu_die(unsigned int cpu);
+void generic_mach_cpu_die(void);
+#endif
+
+#ifdef CONFIG_PPC64
+#define raw_smp_processor_id() (get_paca()->paca_index)
+#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
+#else
+/* 32-bit */
+extern int smp_hw_index[];
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
+#define get_hard_smp_processor_id(cpu) (smp_hw_index[(cpu)])
+#define set_hard_smp_processor_id(cpu, phys)\
+ (smp_hw_index[(cpu)] = (phys))
+#endif
+
+extern cpumask_t cpu_sibling_map[NR_CPUS];
+
+/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
+ *
+ * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
+ * in /proc/interrupts will be wrong!!! --Troy */
+#define PPC_MSG_CALL_FUNCTION 0
+#define PPC_MSG_RESCHEDULE 1
+/* This is unused now */
+#if 0
+#define PPC_MSG_MIGRATE_TASK 2
+#endif
+#define PPC_MSG_DEBUGGER_BREAK 3
+
+void smp_init_iSeries(void);
+void smp_init_pSeries(void);
+void smp_init_cell(void);
+void smp_setup_cpu_maps(void);
+
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+
+#else
+/* for UP */
+#define smp_setup_cpu_maps()
+#define smp_release_cpus()
+
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC64
+#define get_hard_smp_processor_id(CPU) (paca[(CPU)].hw_cpu_id)
+#define set_hard_smp_processor_id(CPU, VAL) \
+ do { (paca[(CPU)].hw_cpu_id = (VAL)); } while (0)
+#else
+/* 32-bit */
+#ifndef CONFIG_SMP
+#define get_hard_smp_processor_id(cpu) boot_cpuid_phys
+#define set_hard_smp_processor_id(cpu, phys)
+#endif
+#endif
+
+extern int smt_enabled_at_boot;
+
+extern int smp_mpic_probe(void);
+extern void smp_mpic_setup_cpu(int cpu);
+extern void smp_generic_kick_cpu(int nr);
+
+extern void smp_generic_give_timebase(void);
+extern void smp_generic_take_timebase(void);
+
+extern struct smp_ops_t *smp_ops;
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SMP_H) */
diff --git a/include/asm-powerpc/smu.h b/include/asm-powerpc/smu.h
new file mode 100644
index 00000000000..dee8eefe47b
--- /dev/null
+++ b/include/asm-powerpc/smu.h
@@ -0,0 +1,379 @@
+#ifndef _SMU_H
+#define _SMU_H
+
+/*
+ * Definitions for talking to the SMU chip in newer G5 PowerMacs
+ */
+
+#include <linux/config.h>
+#include <linux/list.h>
+
+/*
+ * Known SMU commands
+ *
+ * Most of what is below comes from looking at the Open Firmware driver,
+ * though this is still incomplete and could use better documentation here
+ * or there...
+ */
+
+
+/*
+ * Partition info commands
+ *
+ * I do not know what those are for at this point
+ */
+#define SMU_CMD_PARTITION_COMMAND 0x3e
+
+
+/*
+ * Fan control
+ *
+ * This is a "mux" for fan control commands, first byte is the
+ * "sub" command.
+ */
+#define SMU_CMD_FAN_COMMAND 0x4a
+
+
+/*
+ * Battery access
+ *
+ * Same command number as the PMU, could it be same syntax ?
+ */
+#define SMU_CMD_BATTERY_COMMAND 0x6f
+#define SMU_CMD_GET_BATTERY_INFO 0x00
+
+/*
+ * Real time clock control
+ *
+ * This is a "mux", first data byte contains the "sub" command.
+ * The "RTC" part of the SMU controls the date, time, powerup
+ * timer, but also a PRAM
+ *
+ * Dates are in BCD format on 7 bytes:
+ * [sec] [min] [hour] [weekday] [month day] [month] [year]
+ * with month being 1 based and year minus 100
+ */
+#define SMU_CMD_RTC_COMMAND 0x8e
+#define SMU_CMD_RTC_SET_PWRUP_TIMER 0x00 /* i: 7 bytes date */
+#define SMU_CMD_RTC_GET_PWRUP_TIMER 0x01 /* o: 7 bytes date */
+#define SMU_CMD_RTC_STOP_PWRUP_TIMER 0x02
+#define SMU_CMD_RTC_SET_PRAM_BYTE_ACC 0x20 /* i: 1 byte (address?) */
+#define SMU_CMD_RTC_SET_PRAM_AUTOINC 0x21 /* i: 1 byte (data?) */
+#define SMU_CMD_RTC_SET_PRAM_LO_BYTES 0x22 /* i: 10 bytes */
+#define SMU_CMD_RTC_SET_PRAM_HI_BYTES 0x23 /* i: 10 bytes */
+#define SMU_CMD_RTC_GET_PRAM_BYTE 0x28 /* i: 1 bytes (address?) */
+#define SMU_CMD_RTC_GET_PRAM_LO_BYTES 0x29 /* o: 10 bytes */
+#define SMU_CMD_RTC_GET_PRAM_HI_BYTES 0x2a /* o: 10 bytes */
+#define SMU_CMD_RTC_SET_DATETIME 0x80 /* i: 7 bytes date */
+#define SMU_CMD_RTC_GET_DATETIME 0x81 /* o: 7 bytes date */
+
+ /*
+ * i2c commands
+ *
+ * To issue an i2c command, first is to send a parameter block to the
+ * the SMU. This is a command of type 0x9a with 9 bytes of header
+ * eventually followed by data for a write:
+ *
+ * 0: bus number (from device-tree usually, SMU has lots of busses !)
+ * 1: transfer type/format (see below)
+ * 2: device address. For combined and combined4 type transfers, this
+ * is the "write" version of the address (bit 0x01 cleared)
+ * 3: subaddress length (0..3)
+ * 4: subaddress byte 0 (or only byte for subaddress length 1)
+ * 5: subaddress byte 1
+ * 6: subaddress byte 2
+ * 7: combined address (device address for combined mode data phase)
+ * 8: data length
+ *
+ * The transfer types are the same good old Apple ones it seems,
+ * that is:
+ * - 0x00: Simple transfer
+ * - 0x01: Subaddress transfer (addr write + data tx, no restart)
+ * - 0x02: Combined transfer (addr write + restart + data tx)
+ *
+ * This is then followed by actual data for a write.
+ *
+ * At this point, the OF driver seems to have a limitation on transfer
+ * sizes of 0xd bytes on reads and 0x5 bytes on writes. I do not know
+ * wether this is just an OF limit due to some temporary buffer size
+ * or if this is an SMU imposed limit. This driver has the same limitation
+ * for now as I use a 0x10 bytes temporary buffer as well
+ *
+ * Once that is completed, a response is expected from the SMU. This is
+ * obtained via a command of type 0x9a with a length of 1 byte containing
+ * 0 as the data byte. OF also fills the rest of the data buffer with 0xff's
+ * though I can't tell yet if this is actually necessary. Once this command
+ * is complete, at this point, all I can tell is what OF does. OF tests
+ * byte 0 of the reply:
+ * - on read, 0xfe or 0xfc : bus is busy, wait (see below) or nak ?
+ * - on read, 0x00 or 0x01 : reply is in buffer (after the byte 0)
+ * - on write, < 0 -> failure (immediate exit)
+ * - else, OF just exists (without error, weird)
+ *
+ * So on read, there is this wait-for-busy thing when getting a 0xfc or
+ * 0xfe result. OF does a loop of up to 64 retries, waiting 20ms and
+ * doing the above again until either the retries expire or the result
+ * is no longer 0xfe or 0xfc
+ *
+ * The Darwin I2C driver is less subtle though. On any non-success status
+ * from the response command, it waits 5ms and tries again up to 20 times,
+ * it doesn't differenciate between fatal errors or "busy" status.
+ *
+ * This driver provides an asynchronous paramblock based i2c command
+ * interface to be used either directly by low level code or by a higher
+ * level driver interfacing to the linux i2c layer. The current
+ * implementation of this relies on working timers & timer interrupts
+ * though, so be careful of calling context for now. This may be "fixed"
+ * in the future by adding a polling facility.
+ */
+#define SMU_CMD_I2C_COMMAND 0x9a
+ /* transfer types */
+#define SMU_I2C_TRANSFER_SIMPLE 0x00
+#define SMU_I2C_TRANSFER_STDSUB 0x01
+#define SMU_I2C_TRANSFER_COMBINED 0x02
+
+/*
+ * Power supply control
+ *
+ * The "sub" command is an ASCII string in the data, the
+ * data lenght is that of the string.
+ *
+ * The VSLEW command can be used to get or set the voltage slewing.
+ * - lenght 5 (only "VSLEW") : it returns "DONE" and 3 bytes of
+ * reply at data offset 6, 7 and 8.
+ * - lenght 8 ("VSLEWxyz") has 3 additional bytes appended, and is
+ * used to set the voltage slewing point. The SMU replies with "DONE"
+ * I yet have to figure out their exact meaning of those 3 bytes in
+ * both cases.
+ *
+ */
+#define SMU_CMD_POWER_COMMAND 0xaa
+#define SMU_CMD_POWER_RESTART "RESTART"
+#define SMU_CMD_POWER_SHUTDOWN "SHUTDOWN"
+#define SMU_CMD_POWER_VOLTAGE_SLEW "VSLEW"
+
+/* Misc commands
+ *
+ * This command seem to be a grab bag of various things
+ */
+#define SMU_CMD_MISC_df_COMMAND 0xdf
+#define SMU_CMD_MISC_df_SET_DISPLAY_LIT 0x02 /* i: 1 byte */
+#define SMU_CMD_MISC_df_NMI_OPTION 0x04
+
+/*
+ * Version info commands
+ *
+ * I haven't quite tried to figure out how these work
+ */
+#define SMU_CMD_VERSION_COMMAND 0xea
+
+
+/*
+ * Misc commands
+ *
+ * This command seem to be a grab bag of various things
+ */
+#define SMU_CMD_MISC_ee_COMMAND 0xee
+#define SMU_CMD_MISC_ee_GET_DATABLOCK_REC 0x02
+#define SMU_CMD_MISC_ee_LEDS_CTRL 0x04 /* i: 00 (00,01) [00] */
+#define SMU_CMD_MISC_ee_GET_DATA 0x05 /* i: 00 , o: ?? */
+
+
+
+/*
+ * - Kernel side interface -
+ */
+
+#ifdef __KERNEL__
+
+/*
+ * Asynchronous SMU commands
+ *
+ * Fill up this structure and submit it via smu_queue_command(),
+ * and get notified by the optional done() callback, or because
+ * status becomes != 1
+ */
+
+struct smu_cmd;
+
+struct smu_cmd
+{
+ /* public */
+ u8 cmd; /* command */
+ int data_len; /* data len */
+ int reply_len; /* reply len */
+ void *data_buf; /* data buffer */
+ void *reply_buf; /* reply buffer */
+ int status; /* command status */
+ void (*done)(struct smu_cmd *cmd, void *misc);
+ void *misc;
+
+ /* private */
+ struct list_head link;
+};
+
+/*
+ * Queues an SMU command, all fields have to be initialized
+ */
+extern int smu_queue_cmd(struct smu_cmd *cmd);
+
+/*
+ * Simple command wrapper. This structure embeds a small buffer
+ * to ease sending simple SMU commands from the stack
+ */
+struct smu_simple_cmd
+{
+ struct smu_cmd cmd;
+ u8 buffer[16];
+};
+
+/*
+ * Queues a simple command. All fields will be initialized by that
+ * function
+ */
+extern int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
+ unsigned int data_len,
+ void (*done)(struct smu_cmd *cmd, void *misc),
+ void *misc,
+ ...);
+
+/*
+ * Completion helper. Pass it to smu_queue_simple or as 'done'
+ * member to smu_queue_cmd, it will call complete() on the struct
+ * completion passed in the "misc" argument
+ */
+extern void smu_done_complete(struct smu_cmd *cmd, void *misc);
+
+/*
+ * Synchronous helpers. Will spin-wait for completion of a command
+ */
+extern void smu_spinwait_cmd(struct smu_cmd *cmd);
+
+static inline void smu_spinwait_simple(struct smu_simple_cmd *scmd)
+{
+ smu_spinwait_cmd(&scmd->cmd);
+}
+
+/*
+ * Poll routine to call if blocked with irqs off
+ */
+extern void smu_poll(void);
+
+
+/*
+ * Init routine, presence check....
+ */
+extern int smu_init(void);
+extern int smu_present(void);
+struct of_device;
+extern struct of_device *smu_get_ofdev(void);
+
+
+/*
+ * Common command wrappers
+ */
+extern void smu_shutdown(void);
+extern void smu_restart(void);
+struct rtc_time;
+extern int smu_get_rtc_time(struct rtc_time *time, int spinwait);
+extern int smu_set_rtc_time(struct rtc_time *time, int spinwait);
+
+/*
+ * SMU command buffer absolute address, exported by pmac_setup,
+ * this is allocated very early during boot.
+ */
+extern unsigned long smu_cmdbuf_abs;
+
+
+/*
+ * Kenrel asynchronous i2c interface
+ */
+
+/* SMU i2c header, exactly matches i2c header on wire */
+struct smu_i2c_param
+{
+ u8 bus; /* SMU bus ID (from device tree) */
+ u8 type; /* i2c transfer type */
+ u8 devaddr; /* device address (includes direction) */
+ u8 sublen; /* subaddress length */
+ u8 subaddr[3]; /* subaddress */
+ u8 caddr; /* combined address, filled by SMU driver */
+ u8 datalen; /* length of transfer */
+ u8 data[7]; /* data */
+};
+
+#define SMU_I2C_READ_MAX 0x0d
+#define SMU_I2C_WRITE_MAX 0x05
+
+struct smu_i2c_cmd
+{
+ /* public */
+ struct smu_i2c_param info;
+ void (*done)(struct smu_i2c_cmd *cmd, void *misc);
+ void *misc;
+ int status; /* 1 = pending, 0 = ok, <0 = fail */
+
+ /* private */
+ struct smu_cmd scmd;
+ int read;
+ int stage;
+ int retries;
+ u8 pdata[0x10];
+ struct list_head link;
+};
+
+/*
+ * Call this to queue an i2c command to the SMU. You must fill info,
+ * including info.data for a write, done and misc.
+ * For now, no polling interface is provided so you have to use completion
+ * callback.
+ */
+extern int smu_queue_i2c(struct smu_i2c_cmd *cmd);
+
+
+#endif /* __KERNEL__ */
+
+/*
+ * - Userland interface -
+ */
+
+/*
+ * A given instance of the device can be configured for 2 different
+ * things at the moment:
+ *
+ * - sending SMU commands (default at open() time)
+ * - receiving SMU events (not yet implemented)
+ *
+ * Commands are written with write() of a command block. They can be
+ * "driver" commands (for example to switch to event reception mode)
+ * or real SMU commands. They are made of a header followed by command
+ * data if any.
+ *
+ * For SMU commands (not for driver commands), you can then read() back
+ * a reply. The reader will be blocked or not depending on how the device
+ * file is opened. poll() isn't implemented yet. The reply will consist
+ * of a header as well, followed by the reply data if any. You should
+ * always provide a buffer large enough for the maximum reply data, I
+ * recommand one page.
+ *
+ * It is illegal to send SMU commands through a file descriptor configured
+ * for events reception
+ *
+ */
+struct smu_user_cmd_hdr
+{
+ __u32 cmdtype;
+#define SMU_CMDTYPE_SMU 0 /* SMU command */
+#define SMU_CMDTYPE_WANTS_EVENTS 1 /* switch fd to events mode */
+
+ __u8 cmd; /* SMU command byte */
+ __u32 data_len; /* Lenght of data following */
+};
+
+struct smu_user_reply_hdr
+{
+ __u32 status; /* Command status */
+ __u32 reply_len; /* Lenght of data follwing */
+};
+
+#endif /* _SMU_H */
diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h
new file mode 100644
index 00000000000..1c95ab99deb
--- /dev/null
+++ b/include/asm-powerpc/sparsemem.h
@@ -0,0 +1,16 @@
+#ifndef _ASM_POWERPC_SPARSEMEM_H
+#define _ASM_POWERPC_SPARSEMEM_H 1
+
+#ifdef CONFIG_SPARSEMEM
+/*
+ * SECTION_SIZE_BITS 2^N: how big each section will be
+ * MAX_PHYSADDR_BITS 2^N: how much physical address space we have
+ * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS 24
+#define MAX_PHYSADDR_BITS 38
+#define MAX_PHYSMEM_BITS 36
+
+#endif /* CONFIG_SPARSEMEM */
+
+#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/include/asm-powerpc/spinlock_types.h b/include/asm-powerpc/spinlock_types.h
new file mode 100644
index 00000000000..74236c9f05b
--- /dev/null
+++ b/include/asm-powerpc/spinlock_types.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+#define _ASM_POWERPC_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} raw_spinlock_t;
+
+#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile signed int lock;
+} raw_rwlock_t;
+
+#define __RAW_RW_LOCK_UNLOCKED { 0 }
+
+#endif
diff --git a/include/asm-powerpc/sstep.h b/include/asm-powerpc/sstep.h
new file mode 100644
index 00000000000..630a9889c07
--- /dev/null
+++ b/include/asm-powerpc/sstep.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct pt_regs;
+
+/*
+ * We don't allow single-stepping an mtmsrd that would clear
+ * MSR_RI, since that would make the exception unrecoverable.
+ * Since we need to single-step to proceed from a breakpoint,
+ * we don't allow putting a breakpoint on an mtmsrd instruction.
+ * Similarly we don't allow breakpoints on rfid instructions.
+ * These macros tell us if an instruction is a mtmsrd or rfid.
+ * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
+ * and an mtmsrd (64-bit).
+ */
+#define IS_MTMSRD(instr) (((instr) & 0xfc0007be) == 0x7c000124)
+#define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024)
+
+/* Emulate instructions that cause a transfer of control. */
+extern int emulate_step(struct pt_regs *regs, unsigned int instr);
diff --git a/include/asm-powerpc/stat.h b/include/asm-powerpc/stat.h
new file mode 100644
index 00000000000..e4edc510b53
--- /dev/null
+++ b/include/asm-powerpc/stat.h
@@ -0,0 +1,81 @@
+#ifndef _ASM_POWERPC_STAT_H
+#define _ASM_POWERPC_STAT_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+
+#define STAT_HAVE_NSEC 1
+
+#ifndef __powerpc64__
+struct __old_kernel_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+#endif /* !__powerpc64__ */
+
+struct stat {
+ unsigned long st_dev;
+ ino_t st_ino;
+#ifdef __powerpc64__
+ nlink_t st_nlink;
+ mode_t st_mode;
+#else
+ mode_t st_mode;
+ nlink_t st_nlink;
+#endif
+ uid_t st_uid;
+ gid_t st_gid;
+ unsigned long st_rdev;
+ off_t st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+#ifdef __powerpc64__
+ unsigned long __unused6;
+#endif
+};
+
+/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
+struct stat64 {
+ unsigned long long st_dev; /* Device. */
+ unsigned long long st_ino; /* File serial number. */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long long st_rdev; /* Device number, if device. */
+ unsigned short __pad2;
+ long long st_size; /* Size of file, in bytes. */
+ int st_blksize; /* Optimal block size for I/O. */
+ long long st_blocks; /* Number 512-byte blocks allocated. */
+ int st_atime; /* Time of last access. */
+ unsigned int st_atime_nsec;
+ int st_mtime; /* Time of last modification. */
+ unsigned int st_mtime_nsec;
+ int st_ctime; /* Time of last status change. */
+ unsigned int st_ctime_nsec;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+
+#endif /* _ASM_POWERPC_STAT_H */
diff --git a/include/asm-powerpc/statfs.h b/include/asm-powerpc/statfs.h
new file mode 100644
index 00000000000..67024026c10
--- /dev/null
+++ b/include/asm-powerpc/statfs.h
@@ -0,0 +1,60 @@
+#ifndef _ASM_POWERPC_STATFS_H
+#define _ASM_POWERPC_STATFS_H
+
+/* For ppc32 we just use the generic definitions, not so simple on ppc64 */
+
+#ifndef __powerpc64__
+#include <asm-generic/statfs.h>
+#else
+
+#ifndef __KERNEL_STRICT_NAMES
+#include <linux/types.h>
+typedef __kernel_fsid_t fsid_t;
+#endif
+
+/*
+ * We're already 64-bit, so duplicate the definition
+ */
+struct statfs {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_frsize;
+ long f_spare[5];
+};
+
+struct statfs64 {
+ long f_type;
+ long f_bsize;
+ long f_blocks;
+ long f_bfree;
+ long f_bavail;
+ long f_files;
+ long f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_frsize;
+ long f_spare[5];
+};
+
+struct compat_statfs64 {
+ __u32 f_type;
+ __u32 f_bsize;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_bavail;
+ __u64 f_files;
+ __u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ __u32 f_namelen;
+ __u32 f_frsize;
+ __u32 f_spare[5];
+};
+#endif /* ! __powerpc64__ */
+#endif
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
new file mode 100644
index 00000000000..4660c0394a7
--- /dev/null
+++ b/include/asm-powerpc/synch.h
@@ -0,0 +1,51 @@
+#ifndef _ASM_POWERPC_SYNCH_H
+#define _ASM_POWERPC_SYNCH_H
+
+#include <linux/config.h>
+
+#ifdef __powerpc64__
+#define __SUBARCH_HAS_LWSYNC
+#endif
+
+#ifdef __SUBARCH_HAS_LWSYNC
+# define LWSYNC lwsync
+#else
+# define LWSYNC sync
+#endif
+
+
+/*
+ * Arguably the bitops and *xchg operations don't imply any memory barrier
+ * or SMP ordering, but in fact a lot of drivers expect them to imply
+ * both, since they do on x86 cpus.
+ */
+#ifdef CONFIG_SMP
+#define EIEIO_ON_SMP "eieio\n"
+#define ISYNC_ON_SMP "\n\tisync"
+#define SYNC_ON_SMP __stringify(LWSYNC) "\n"
+#else
+#define EIEIO_ON_SMP
+#define ISYNC_ON_SMP
+#define SYNC_ON_SMP
+#endif
+
+static inline void eieio(void)
+{
+ __asm__ __volatile__ ("eieio" : : : "memory");
+}
+
+static inline void isync(void)
+{
+ __asm__ __volatile__ ("isync" : : : "memory");
+}
+
+#ifdef CONFIG_SMP
+#define eieio_on_smp() eieio()
+#define isync_on_smp() isync()
+#else
+#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
+#define isync_on_smp() __asm__ __volatile__("": : :"memory")
+#endif
+
+#endif /* _ASM_POWERPC_SYNCH_H */
+
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
new file mode 100644
index 00000000000..b5da0b851e0
--- /dev/null
+++ b/include/asm-powerpc/system.h
@@ -0,0 +1,411 @@
+/*
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _ASM_POWERPC_SYSTEM_H
+#define _ASM_POWERPC_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+
+#include <asm/hw_irq.h>
+#include <asm/ppc_asm.h>
+#include <asm/atomic.h>
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory). The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ * read_barrier_depends() prevents data-dependent loads being reordered
+ * across this point (nop on PPC).
+ *
+ * We have to use the sync instructions for mb(), since lwsync doesn't
+ * order loads with respect to previous stores. Lwsync is fine for
+ * rmb(), though. Note that lwsync is interpreted as sync by
+ * 32-bit and older 64-bit CPUs.
+ *
+ * For wmb(), we use sync since wmb is used in drivers to order
+ * stores to system memory with respect to writes to the device.
+ * However, smp_wmb() can be a lighter-weight eieio barrier on
+ * SMP since it is only used to order updates to system memory.
+ */
+#define mb() __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
+#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define read_barrier_depends() do { } while(0)
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
+#define smp_read_barrier_depends() read_barrier_depends()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do { } while(0)
+#endif /* CONFIG_SMP */
+
+#ifdef __KERNEL__
+struct task_struct;
+struct pt_regs;
+
+#ifdef CONFIG_DEBUGGER
+
+extern int (*__debugger)(struct pt_regs *regs);
+extern int (*__debugger_ipi)(struct pt_regs *regs);
+extern int (*__debugger_bpt)(struct pt_regs *regs);
+extern int (*__debugger_sstep)(struct pt_regs *regs);
+extern int (*__debugger_iabr_match)(struct pt_regs *regs);
+extern int (*__debugger_dabr_match)(struct pt_regs *regs);
+extern int (*__debugger_fault_handler)(struct pt_regs *regs);
+
+#define DEBUGGER_BOILERPLATE(__NAME) \
+static inline int __NAME(struct pt_regs *regs) \
+{ \
+ if (unlikely(__ ## __NAME)) \
+ return __ ## __NAME(regs); \
+ return 0; \
+}
+
+DEBUGGER_BOILERPLATE(debugger)
+DEBUGGER_BOILERPLATE(debugger_ipi)
+DEBUGGER_BOILERPLATE(debugger_bpt)
+DEBUGGER_BOILERPLATE(debugger_sstep)
+DEBUGGER_BOILERPLATE(debugger_iabr_match)
+DEBUGGER_BOILERPLATE(debugger_dabr_match)
+DEBUGGER_BOILERPLATE(debugger_fault_handler)
+
+#ifdef CONFIG_XMON
+extern void xmon_init(int enable);
+#endif
+
+#else
+static inline int debugger(struct pt_regs *regs) { return 0; }
+static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
+static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
+static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
+static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
+static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
+#endif
+
+extern int set_dabr(unsigned long dabr);
+extern void print_backtrace(unsigned long *);
+extern void show_regs(struct pt_regs * regs);
+extern void flush_instruction_cache(void);
+extern void hard_reset_now(void);
+extern void poweroff_now(void);
+
+#ifdef CONFIG_6xx
+extern long _get_L2CR(void);
+extern long _get_L3CR(void);
+extern void _set_L2CR(unsigned long);
+extern void _set_L3CR(unsigned long);
+#else
+#define _get_L2CR() 0L
+#define _get_L3CR() 0L
+#define _set_L2CR(val) do { } while(0)
+#define _set_L3CR(val) do { } while(0)
+#endif
+
+extern void via_cuda_init(void);
+extern void read_rtc_time(void);
+extern void pmac_find_display(void);
+extern void giveup_fpu(struct task_struct *);
+extern void disable_kernel_fp(void);
+extern void enable_kernel_fp(void);
+extern void flush_fp_to_thread(struct task_struct *);
+extern void enable_kernel_altivec(void);
+extern void giveup_altivec(struct task_struct *);
+extern void load_up_altivec(struct task_struct *);
+extern int emulate_altivec(struct pt_regs *);
+extern void giveup_spe(struct task_struct *);
+extern void load_up_spe(struct task_struct *);
+extern int fix_alignment(struct pt_regs *);
+extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
+extern void cvt_df(double *from, float *to, struct thread_struct *thread);
+
+#ifdef CONFIG_ALTIVEC
+extern void flush_altivec_to_thread(struct task_struct *);
+#else
+static inline void flush_altivec_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+#ifdef CONFIG_SPE
+extern void flush_spe_to_thread(struct task_struct *);
+#else
+static inline void flush_spe_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+extern int call_rtas(const char *, int, int, unsigned long *, ...);
+extern void cacheable_memzero(void *p, unsigned int nb);
+extern void *cacheable_memcpy(void *, const void *, unsigned int);
+extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+extern void bad_page_fault(struct pt_regs *, unsigned long, int);
+extern int die(const char *, struct pt_regs *, long);
+extern void _exception(int, struct pt_regs *, int, unsigned long);
+#ifdef CONFIG_BOOKE_WDT
+extern u32 booke_wdt_enabled;
+extern u32 booke_wdt_period;
+#endif /* CONFIG_BOOKE_WDT */
+
+/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
+extern unsigned char e2a(unsigned char);
+
+struct device_node;
+extern void note_scsi_host(struct device_node *, void *);
+
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
+
+struct thread_struct;
+extern struct task_struct *_switch(struct thread_struct *prev,
+ struct thread_struct *next);
+
+extern unsigned int rtas_data;
+extern int mem_init_done; /* set on boot once kmalloc can be called */
+extern unsigned long memory_limit;
+
+extern int powersave_nap; /* set if nap mode can be used in idle loop */
+
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __inline__ unsigned long
+__xchg_u32(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __inline__ unsigned long
+__xchg_u64(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stdcx. %3,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
+ : "cc", "memory");
+
+ return prev;
+}
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg().
+ */
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(volatile void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64(ptr, x);
+#endif
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#define xchg(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
+ })
+
+#define tas(ptr) (xchg((ptr),1))
+
+/*
+ * Compare and exchange - if *p == old, set it to new,
+ * and return the old value of *p.
+ */
+#define __HAVE_ARCH_CMPXCHG 1
+
+static __inline__ unsigned long
+__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+ EIEIO_ON_SMP
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
+#ifdef CONFIG_PPC64
+static __inline__ unsigned long
+__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+#endif
+
+/* This function doesn't exist, so you'll get a linker error
+ if something tries to do an invalid cmpxchg(). */
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64(ptr, old, new);
+#endif
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
+#ifdef CONFIG_PPC64
+/*
+ * We handle most unaligned accesses in hardware. On the other hand
+ * unaligned DMA can be very expensive on some ppc64 IO chips (it does
+ * powers of 2 writes until it reaches sufficient alignment).
+ *
+ * Based on this we disable the IP header alignment in network drivers.
+ */
+#define NET_IP_ALIGN 0
+#endif
+
+#define arch_align_stack(x) (x)
+
+/* Used in very early kernel initialization. */
+extern unsigned long reloc_offset(void);
+extern unsigned long add_reloc_offset(unsigned long);
+extern void reloc_got2(unsigned long);
+
+#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
+
+static inline void create_instruction(unsigned long addr, unsigned int instr)
+{
+ unsigned int *p;
+ p = (unsigned int *)addr;
+ *p = instr;
+ asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p));
+}
+
+/* Flags for create_branch:
+ * "b" == create_branch(addr, target, 0);
+ * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE);
+ * "bl" == create_branch(addr, target, BRANCH_SET_LINK);
+ * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
+ */
+#define BRANCH_SET_LINK 0x1
+#define BRANCH_ABSOLUTE 0x2
+
+static inline void create_branch(unsigned long addr,
+ unsigned long target, int flags)
+{
+ unsigned int instruction;
+
+ if (! (flags & BRANCH_ABSOLUTE))
+ target = target - addr;
+
+ /* Mask out the flags and target, so they don't step on each other. */
+ instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC);
+
+ create_instruction(addr, instruction);
+}
+
+static inline void create_function_call(unsigned long addr, void * func)
+{
+ unsigned long func_addr;
+
+#ifdef CONFIG_PPC64
+ /*
+ * On PPC64 the function pointer actually points to the function's
+ * descriptor. The first entry in the descriptor is the address
+ * of the function text.
+ */
+ func_addr = *(unsigned long *)func;
+#else
+ func_addr = (unsigned long)func;
+#endif
+ create_branch(addr, func_addr, BRANCH_SET_LINK);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/include/asm-powerpc/termios.h b/include/asm-powerpc/termios.h
index c5b8e5358f8..7f80a019b6a 100644
--- a/include/asm-powerpc/termios.h
+++ b/include/asm-powerpc/termios.h
@@ -94,142 +94,9 @@ struct termio {
#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
#endif
-#define FIOCLEX _IO('f', 1)
-#define FIONCLEX _IO('f', 2)
-#define FIOASYNC _IOW('f', 125, int)
-#define FIONBIO _IOW('f', 126, int)
-#define FIONREAD _IOR('f', 127, int)
-#define TIOCINQ FIONREAD
-
-#define TIOCGETP _IOR('t', 8, struct sgttyb)
-#define TIOCSETP _IOW('t', 9, struct sgttyb)
-#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
-
-#define TIOCSETC _IOW('t', 17, struct tchars)
-#define TIOCGETC _IOR('t', 18, struct tchars)
-#define TCGETS _IOR('t', 19, struct termios)
-#define TCSETS _IOW('t', 20, struct termios)
-#define TCSETSW _IOW('t', 21, struct termios)
-#define TCSETSF _IOW('t', 22, struct termios)
-
-#define TCGETA _IOR('t', 23, struct termio)
-#define TCSETA _IOW('t', 24, struct termio)
-#define TCSETAW _IOW('t', 25, struct termio)
-#define TCSETAF _IOW('t', 28, struct termio)
-
-#define TCSBRK _IO('t', 29)
-#define TCXONC _IO('t', 30)
-#define TCFLSH _IO('t', 31)
-
-#define TIOCSWINSZ _IOW('t', 103, struct winsize)
-#define TIOCGWINSZ _IOR('t', 104, struct winsize)
-#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
-#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
-#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
-
-#define TIOCGLTC _IOR('t', 116, struct ltchars)
-#define TIOCSLTC _IOW('t', 117, struct ltchars)
-#define TIOCSPGRP _IOW('t', 118, int)
-#define TIOCGPGRP _IOR('t', 119, int)
-
-#define TIOCEXCL 0x540C
-#define TIOCNXCL 0x540D
-#define TIOCSCTTY 0x540E
-
-#define TIOCSTI 0x5412
-#define TIOCMGET 0x5415
-#define TIOCMBIS 0x5416
-#define TIOCMBIC 0x5417
-#define TIOCMSET 0x5418
-#define TIOCGSOFTCAR 0x5419
-#define TIOCSSOFTCAR 0x541A
-#define TIOCLINUX 0x541C
-#define TIOCCONS 0x541D
-#define TIOCGSERIAL 0x541E
-#define TIOCSSERIAL 0x541F
-#define TIOCPKT 0x5420
-
-#define TIOCNOTTY 0x5422
-#define TIOCSETD 0x5423
-#define TIOCGETD 0x5424
-#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
-
-#define TIOCSERCONFIG 0x5453
-#define TIOCSERGWILD 0x5454
-#define TIOCSERSWILD 0x5455
-#define TIOCGLCKTRMIOS 0x5456
-#define TIOCSLCKTRMIOS 0x5457
-#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
-#define TIOCSERGETLSR 0x5459 /* Get line status register */
-#define TIOCSERGETMULTI 0x545A /* Get multiport config */
-#define TIOCSERSETMULTI 0x545B /* Set multiport config */
-
-#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
-
-/* Used for packet mode */
-#define TIOCPKT_DATA 0
-#define TIOCPKT_FLUSHREAD 1
-#define TIOCPKT_FLUSHWRITE 2
-#define TIOCPKT_STOP 4
-#define TIOCPKT_START 8
-#define TIOCPKT_NOSTOP 16
-#define TIOCPKT_DOSTOP 32
-
-/* modem lines */
-#define TIOCM_LE 0x001
-#define TIOCM_DTR 0x002
-#define TIOCM_RTS 0x004
-#define TIOCM_ST 0x008
-#define TIOCM_SR 0x010
-#define TIOCM_CTS 0x020
-#define TIOCM_CAR 0x040
-#define TIOCM_RNG 0x080
-#define TIOCM_DSR 0x100
-#define TIOCM_CD TIOCM_CAR
-#define TIOCM_RI TIOCM_RNG
-#define TIOCM_OUT1 0x2000
-#define TIOCM_OUT2 0x4000
-#define TIOCM_LOOP 0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
-
#ifdef __KERNEL__
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
- unsigned short __tmp; \
- get_user(__tmp,&(termio)->x); \
- (termios)->x = (0xffff0000 & (termios)->x) | __tmp; \
-}
-
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
- SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
- copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
- put_user((termios)->c_iflag, &(termio)->c_iflag); \
- put_user((termios)->c_oflag, &(termio)->c_oflag); \
- put_user((termios)->c_cflag, &(termio)->c_cflag); \
- put_user((termios)->c_lflag, &(termio)->c_lflag); \
- put_user((termios)->c_line, &(termio)->c_line); \
- copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#include <asm-generic/termios.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
new file mode 100644
index 00000000000..ab17db79f69
--- /dev/null
+++ b/include/asm-powerpc/thread_info.h
@@ -0,0 +1,142 @@
+/* thread_info.h: PowerPC low-level thread information
+ * adapted from the i386 version by Paul Mackerras
+ *
+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds and Dave Miller
+ */
+
+#ifndef _ASM_POWERPC_THREAD_INFO_H
+#define _ASM_POWERPC_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+/* We have 8k stacks on ppc32 and 16k on ppc64 */
+
+#ifdef CONFIG_PPC64
+#define THREAD_SHIFT 14
+#else
+#define THREAD_SHIFT 13
+#endif
+
+#define THREAD_SIZE (1 << THREAD_SHIFT)
+
+#ifndef __ASSEMBLY__
+#include <linux/config.h>
+#include <linux/cache.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <linux/stringify.h>
+
+/*
+ * low level task data.
+ */
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ int cpu; /* cpu we're on */
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
+ struct restart_block restart_block;
+ /* set by force_successful_syscall_return */
+ unsigned char syscall_noerror;
+ /* low level flags - has atomic operations done on it */
+ unsigned long flags ____cacheline_aligned_in_smp;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+ .flags = 0, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* thread information allocation */
+
+#ifdef CONFIG_DEBUG_STACK_USAGE
+#define THREAD_INFO_GFP GFP_KERNEL | __GFP_ZERO
+#else
+#define THREAD_INFO_GFP GFP_KERNEL
+#endif
+
+#if THREAD_SHIFT >= PAGE_SHIFT
+
+#define THREAD_ORDER (THREAD_SHIFT - PAGE_SHIFT)
+
+#define alloc_thread_info(tsk) \
+ ((struct thread_info *)__get_free_pages(THREAD_INFO_GFP, THREAD_ORDER))
+#define free_thread_info(ti) free_pages((unsigned long)ti, THREAD_ORDER)
+
+#else /* THREAD_SHIFT < PAGE_SHIFT */
+
+#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, THREAD_INFO_GFP)
+#define free_thread_info(ti) kfree(ti)
+
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
+
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("r1");
+
+ /* gcc4, at least, is smart enough to turn this into a single
+ * rlwinm for ppc32 and clrrdi for ppc64 */
+ return (struct thread_info *)(sp & ~(THREAD_SIZE-1));
+}
+
+#endif /* __ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * thread information flag bit numbers
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling
+ TIF_NEED_RESCHED */
+#define TIF_32BIT 5 /* 32 bit binary */
+/* #define SPARE 6 */
+#define TIF_ABI_PENDING 7 /* 32/64 bit switch needed */
+#define TIF_SYSCALL_AUDIT 8 /* syscall auditing active */
+#define TIF_SINGLESTEP 9 /* singlestepping active */
+#define TIF_MEMDIE 10
+#define TIF_SECCOMP 11 /* secure computing */
+
+/* as above, but as bit values */
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+#define _TIF_32BIT (1<<TIF_32BIT)
+/* #define _SPARE (1<<SPARE) */
+#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_SECCOMP (1<<TIF_SECCOMP)
+#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
+
+#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
+ _TIF_NEED_RESCHED)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_THREAD_INFO_H */
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
new file mode 100644
index 00000000000..d9b86a17271
--- /dev/null
+++ b/include/asm-powerpc/time.h
@@ -0,0 +1,226 @@
+/*
+ * Common time prototypes and such for all ppc machines.
+ *
+ * Written by Cort Dougan (cort@cs.nmt.edu) to merge
+ * Paul Mackerras' version and mine for PReP and Pmac.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __POWERPC_TIME_H
+#define __POWERPC_TIME_H
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+#include <asm/processor.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/iseries/hv_call.h>
+#endif
+
+/* time.c */
+extern unsigned long tb_ticks_per_jiffy;
+extern unsigned long tb_ticks_per_usec;
+extern unsigned long tb_ticks_per_sec;
+extern u64 tb_to_xs;
+extern unsigned tb_to_us;
+extern unsigned long tb_last_stamp;
+extern u64 tb_last_jiffy;
+
+DECLARE_PER_CPU(unsigned long, last_jiffy);
+
+struct rtc_time;
+extern void to_tm(int tim, struct rtc_time * tm);
+extern time_t last_rtc_update;
+
+extern void generic_calibrate_decr(void);
+extern void wakeup_decrementer(void);
+
+/* Some sane defaults: 125 MHz timebase, 1GHz processor */
+extern unsigned long ppc_proc_freq;
+#define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8)
+extern unsigned long ppc_tb_freq;
+#define DEFAULT_TB_FREQ 125000000UL
+
+/*
+ * By putting all of this stuff into a single struct we
+ * reduce the number of cache lines touched by do_gettimeofday.
+ * Both by collecting all of the data in one cache line and
+ * by touching only one TOC entry on ppc64.
+ */
+struct gettimeofday_vars {
+ u64 tb_to_xs;
+ u64 stamp_xsec;
+ u64 tb_orig_stamp;
+};
+
+struct gettimeofday_struct {
+ unsigned long tb_ticks_per_sec;
+ struct gettimeofday_vars vars[2];
+ struct gettimeofday_vars * volatile varp;
+ unsigned var_idx;
+ unsigned tb_to_us;
+};
+
+struct div_result {
+ u64 result_high;
+ u64 result_low;
+};
+
+/* Accessor functions for the timebase (RTC on 601) registers. */
+/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
+#ifdef CONFIG_6xx
+#define __USE_RTC() (!cpu_has_feature(CPU_FTR_USE_TB))
+#else
+#define __USE_RTC() 0
+#endif
+
+/* On ppc64 this gets us the whole timebase; on ppc32 just the lower half */
+static inline unsigned long get_tbl(void)
+{
+ unsigned long tbl;
+
+#if defined(CONFIG_403GCX)
+ asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
+#else
+ asm volatile("mftb %0" : "=r" (tbl));
+#endif
+ return tbl;
+}
+
+static inline unsigned int get_tbu(void)
+{
+ unsigned int tbu;
+
+#if defined(CONFIG_403GCX)
+ asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
+#else
+ asm volatile("mftbu %0" : "=r" (tbu));
+#endif
+ return tbu;
+}
+
+static inline unsigned int get_rtcl(void)
+{
+ unsigned int rtcl;
+
+ asm volatile("mfrtcl %0" : "=r" (rtcl));
+ return rtcl;
+}
+
+static inline u64 get_rtc(void)
+{
+ unsigned int hi, lo, hi2;
+
+ do {
+ asm volatile("mfrtcu %0; mfrtcl %1; mfrtcu %2"
+ : "=r" (hi), "=r" (lo), "=r" (hi2));
+ } while (hi2 != hi);
+ return (u64)hi * 1000000000 + lo;
+}
+
+#ifdef CONFIG_PPC64
+static inline u64 get_tb(void)
+{
+ return mftb();
+}
+#else
+static inline u64 get_tb(void)
+{
+ unsigned int tbhi, tblo, tbhi2;
+
+ do {
+ tbhi = get_tbu();
+ tblo = get_tbl();
+ tbhi2 = get_tbu();
+ } while (tbhi != tbhi2);
+
+ return ((u64)tbhi << 32) | tblo;
+}
+#endif
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+ mtspr(SPRN_TBWL, 0);
+ mtspr(SPRN_TBWU, upper);
+ mtspr(SPRN_TBWL, lower);
+}
+
+/* Accessor functions for the decrementer register.
+ * The 4xx doesn't even have a decrementer. I tried to use the
+ * generic timer interrupt code, which seems OK, with the 4xx PIT
+ * in auto-reload mode. The problem is PIT stops counting when it
+ * hits zero. If it would wrap, we could use it just like a decrementer.
+ */
+static inline unsigned int get_dec(void)
+{
+#if defined(CONFIG_40x)
+ return (mfspr(SPRN_PIT));
+#else
+ return (mfspr(SPRN_DEC));
+#endif
+}
+
+static inline void set_dec(int val)
+{
+#if defined(CONFIG_40x)
+ return; /* Have to let it auto-reload */
+#elif defined(CONFIG_8xx_CPU6)
+ set_dec_cpu6(val);
+#else
+#ifdef CONFIG_PPC_ISERIES
+ struct paca_struct *lpaca = get_paca();
+ int cur_dec;
+
+ if (lpaca->lppaca.shared_proc) {
+ lpaca->lppaca.virtual_decr = val;
+ cur_dec = get_dec();
+ if (cur_dec > val)
+ HvCall_setVirtualDecr();
+ } else
+#endif
+ mtspr(SPRN_DEC, val);
+#endif /* not 40x or 8xx_CPU6 */
+}
+
+static inline unsigned long tb_ticks_since(unsigned long tstamp)
+{
+ if (__USE_RTC()) {
+ int delta = get_rtcl() - (unsigned int) tstamp;
+ return delta < 0 ? delta + 1000000000 : delta;
+ }
+ return get_tbl() - tstamp;
+}
+
+#define mulhwu(x,y) \
+({unsigned z; asm ("mulhwu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+
+#ifdef CONFIG_PPC64
+#define mulhdu(x,y) \
+({unsigned long z; asm ("mulhdu %0,%1,%2" : "=r" (z) : "r" (x), "r" (y)); z;})
+#else
+extern u64 mulhdu(u64, u64);
+#endif
+
+extern void smp_space_timers(unsigned int);
+
+extern unsigned mulhwu_scale_factor(unsigned, unsigned);
+extern void div128_by_32(u64 dividend_high, u64 dividend_low,
+ unsigned divisor, struct div_result *dr);
+
+/* Used to store Processor Utilization register (purr) values */
+
+struct cpu_usage {
+ u64 current_tb; /* Holds the current purr register values */
+};
+
+DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
+
+#endif /* __KERNEL__ */
+#endif /* __PPC64_TIME_H */
diff --git a/include/asm-powerpc/tlb.h b/include/asm-powerpc/tlb.h
new file mode 100644
index 00000000000..56659f12177
--- /dev/null
+++ b/include/asm-powerpc/tlb.h
@@ -0,0 +1,70 @@
+/*
+ * TLB shootdown specifics for powerpc
+ *
+ * Copyright (C) 2002 Anton Blanchard, IBM Corp.
+ * Copyright (C) 2002 Paul Mackerras, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_TLB_H
+#define _ASM_POWERPC_TLB_H
+
+#include <linux/config.h>
+#ifndef __powerpc64__
+#include <asm/pgtable.h>
+#endif
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#ifndef __powerpc64__
+#include <asm/page.h>
+#include <asm/mmu.h>
+#endif
+
+struct mmu_gather;
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+
+#if !defined(CONFIG_PPC_STD_MMU)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#elif defined(__powerpc64__)
+
+extern void pte_free_finish(void);
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ flush_tlb_pending();
+ pte_free_finish();
+}
+
+#else
+
+extern void tlb_flush(struct mmu_gather *tlb);
+
+#endif
+
+/* Get the generic bits... */
+#include <asm-generic/tlb.h>
+
+#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
+
+#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
+
+#else
+extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
+ unsigned long address);
+
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
+ unsigned long address)
+{
+ if (pte_val(*ptep) & _PAGE_HASHPTE)
+ flush_hash_entry(tlb->mm, ptep, address);
+}
+
+#endif
+#endif /* __ASM_POWERPC_TLB_H */
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h
new file mode 100644
index 00000000000..ca3655672bb
--- /dev/null
+++ b/include/asm-powerpc/tlbflush.h
@@ -0,0 +1,146 @@
+#ifndef _ASM_POWERPC_TLBFLUSH_H
+#define _ASM_POWERPC_TLBFLUSH_H
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+struct mm_struct;
+
+#ifdef CONFIG_PPC64
+
+#include <linux/percpu.h>
+#include <asm/page.h>
+
+#define PPC64_TLB_BATCH_NR 192
+
+struct ppc64_tlb_batch {
+ unsigned long index;
+ struct mm_struct *mm;
+ pte_t pte[PPC64_TLB_BATCH_NR];
+ unsigned long vaddr[PPC64_TLB_BATCH_NR];
+ unsigned int large;
+};
+DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
+
+extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
+
+static inline void flush_tlb_pending(void)
+{
+ struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
+
+ if (batch->index)
+ __flush_tlb_pending(batch);
+ put_cpu_var(ppc64_tlb_batch);
+}
+
+extern void flush_hash_page(unsigned long va, pte_t pte, int local);
+void flush_hash_range(unsigned long number, int local);
+
+#else /* CONFIG_PPC64 */
+
+#include <linux/mm.h>
+
+extern void _tlbie(unsigned long address);
+extern void _tlbia(void);
+
+/*
+ * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
+ * flush_tlb_kernel_range are best implemented as tlbia vs
+ * specific tlbie's
+ */
+
+#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx)
+#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory")
+#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE)
+#define flush_tlb_pending() _tlbia()
+#endif
+
+/*
+ * This gets called at the end of handling a page fault, when
+ * the kernel has put a new PTE into the page table for the process.
+ * We use it to ensure coherency between the i-cache and d-cache
+ * for the page which has just been mapped in.
+ * On machines which use an MMU hash table, we use this to put a
+ * corresponding HPTE into the hash table ahead of time, instead of
+ * waiting for the inevitable extra hash-table miss exception.
+ */
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+
+#endif /* CONFIG_PPC64 */
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \
+ defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx)
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ flush_tlb_pending();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+#ifdef CONFIG_PPC64
+ flush_tlb_pending();
+#else
+ _tlbie(vmaddr);
+#endif
+}
+
+static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+#ifndef CONFIG_PPC64
+ _tlbie(vmaddr);
+#endif
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ flush_tlb_pending();
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_pending();
+}
+
+#else /* 6xx, 7xx, 7xxx cpus */
+
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+#endif
+
+/*
+ * This is called in munmap when we have freed up some page-table
+ * pages. We don't need to do anything here, there's nothing special
+ * about our page-table pages. -- paulus
+ */
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+#endif /*__KERNEL__ */
+#endif /* _ASM_POWERPC_TLBFLUSH_H */
diff --git a/include/asm-powerpc/types.h b/include/asm-powerpc/types.h
new file mode 100644
index 00000000000..ec3c2ee8bf8
--- /dev/null
+++ b/include/asm-powerpc/types.h
@@ -0,0 +1,110 @@
+#ifndef _ASM_POWERPC_TYPES_H
+#define _ASM_POWERPC_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifdef __powerpc64__
+typedef unsigned int umode_t;
+#else
+typedef unsigned short umode_t;
+#endif
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#ifdef __powerpc64__
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+#else
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+#endif /* __powerpc64__ */
+
+typedef struct {
+ __u32 u[4];
+} __attribute((aligned(16))) __vector128;
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __powerpc64__
+#define BITS_PER_LONG 64
+#else
+#define BITS_PER_LONG 32
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+#ifdef __powerpc64__
+typedef signed long s64;
+typedef unsigned long u64;
+#else
+typedef signed long long s64;
+typedef unsigned long long u64;
+#endif
+
+typedef __vector128 vector128;
+
+#ifdef __powerpc64__
+typedef u64 dma_addr_t;
+#else
+typedef u32 dma_addr_t;
+#endif
+typedef u64 dma64_addr_t;
+
+typedef struct {
+ unsigned long entry;
+ unsigned long toc;
+ unsigned long env;
+} func_descr_t;
+
+#ifdef CONFIG_LBD
+typedef u64 sector_t;
+#define HAVE_SECTOR_T
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_POWERPC_TYPES_H */
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
new file mode 100644
index 00000000000..33af730f0d1
--- /dev/null
+++ b/include/asm-powerpc/uaccess.h
@@ -0,0 +1,468 @@
+#ifndef _ARCH_POWERPC_UACCESS_H
+#define _ARCH_POWERPC_UACCESS_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <asm/processor.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ *
+ * The fs/ds values are now the highest legal address in the "segment".
+ * This simplifies the checking in the routines below.
+ */
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(~0UL)
+#ifdef __powerpc64__
+/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
+#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
+#else
+#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
+#endif
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->thread.fs)
+#define set_fs(val) (current->thread.fs = (val))
+
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#ifdef __powerpc64__
+/*
+ * This check is sufficient because there is a large enough
+ * gap between user addresses and the kernel addresses
+ */
+#define __access_ok(addr, size, segment) \
+ (((addr) <= (segment).seg) && ((size) <= (segment).seg))
+
+#else
+
+#define __access_ok(addr, size, segment) \
+ (((addr) <= (segment).seg) && \
+ (((size) == 0) || (((size) - 1) <= ((segment).seg - (addr)))))
+
+#endif
+
+#define access_ok(type, addr, size) \
+ (__chk_user_ptr(addr), \
+ __access_ok((__force unsigned long)(addr), (size), get_fs()))
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ unsigned long insn;
+ unsigned long fixup;
+};
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the ugliness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments. (Of course, the
+ * exception handling means that it's no longer "just"...)
+ *
+ * The "user64" versions of the user access functions are versions that
+ * allow access of 64-bit data. The "get_user" functions do not
+ * properly handle 64-bit data because the value gets down cast to a long.
+ * The "put_user" functions already handle 64-bit data properly but we add
+ * "user64" versions for completeness
+ */
+#define get_user(x, ptr) \
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#ifndef __powerpc64__
+#define __get_user64(x, ptr) \
+ __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user64(x, ptr) __put_user(x, ptr)
+#endif
+
+#define __get_user_unaligned __get_user
+#define __put_user_unaligned __put_user
+
+extern long __put_user_bad(void);
+
+#ifdef __powerpc64__
+#define __EX_TABLE_ALIGN "3"
+#define __EX_TABLE_TYPE "llong"
+#else
+#define __EX_TABLE_ALIGN "2"
+#define __EX_TABLE_TYPE "long"
+#endif
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: " op " %1,0(%2) # put_user\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align " __EX_TABLE_ALIGN "\n" \
+ " ."__EX_TABLE_TYPE" 1b,3b\n" \
+ ".previous" \
+ : "=r" (err) \
+ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+
+#ifdef __powerpc64__
+#define __put_user_asm2(x, ptr, retval) \
+ __put_user_asm(x, ptr, retval, "std")
+#else /* __powerpc64__ */
+#define __put_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: stw %1,0(%2)\n" \
+ "2: stw %1+1,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: li %0,%3\n" \
+ " b 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align " __EX_TABLE_ALIGN "\n" \
+ " ." __EX_TABLE_TYPE " 1b,4b\n" \
+ " ." __EX_TABLE_TYPE " 2b,4b\n" \
+ ".previous" \
+ : "=r" (err) \
+ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+#endif /* __powerpc64__ */
+
+#define __put_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
+ case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
+ case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
+ case 8: __put_user_asm2(x, ptr, retval); break; \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ long __pu_err; \
+ might_sleep(); \
+ __chk_user_ptr(ptr); \
+ __put_user_size((x), (ptr), (size), __pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ might_sleep(); \
+ if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+ __put_user_size((x), __pu_addr, (size), __pu_err); \
+ __pu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_asm(x, addr, err, op) \
+ __asm__ __volatile__( \
+ "1: "op" %1,0(%2) # get_user\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: li %0,%3\n" \
+ " li %1,0\n" \
+ " b 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align "__EX_TABLE_ALIGN "\n" \
+ " ." __EX_TABLE_TYPE " 1b,3b\n" \
+ ".previous" \
+ : "=r" (err), "=r" (x) \
+ : "b" (addr), "i" (-EFAULT), "0" (err))
+
+#ifdef __powerpc64__
+#define __get_user_asm2(x, addr, err) \
+ __get_user_asm(x, addr, err, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2(x, addr, err) \
+ __asm__ __volatile__( \
+ "1: lwz %1,0(%2)\n" \
+ "2: lwz %1+1,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: li %0,%3\n" \
+ " li %1,0\n" \
+ " li %1+1,0\n" \
+ " b 3b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ " .align " __EX_TABLE_ALIGN "\n" \
+ " ." __EX_TABLE_TYPE " 1b,4b\n" \
+ " ." __EX_TABLE_TYPE " 2b,4b\n" \
+ ".previous" \
+ : "=r" (err), "=&r" (x) \
+ : "b" (addr), "i" (-EFAULT), "0" (err))
+#endif /* __powerpc64__ */
+
+#define __get_user_size(x, ptr, size, retval) \
+do { \
+ retval = 0; \
+ __chk_user_ptr(ptr); \
+ if (size > sizeof(x)) \
+ (x) = __get_user_bad(); \
+ switch (size) { \
+ case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
+ case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
+ case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
+ case 8: __get_user_asm2(x, ptr, retval); break; \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ unsigned long __gu_val; \
+ __chk_user_ptr(ptr); \
+ might_sleep(); \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#ifndef __powerpc64__
+#define __get_user64_nocheck(x, ptr, size) \
+({ \
+ long __gu_err; \
+ long long __gu_val; \
+ __chk_user_ptr(ptr); \
+ might_sleep(); \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+#endif /* __powerpc64__ */
+
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT; \
+ unsigned long __gu_val = 0; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ might_sleep(); \
+ if (access_ok(VERIFY_READ, __gu_addr, (size))) \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+/* more complex routines */
+
+extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+#ifndef __powerpc64__
+
+extern inline unsigned long copy_from_user(void *to,
+ const void __user *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ if ((unsigned long)from < TASK_SIZE) {
+ over = (unsigned long)from + n - TASK_SIZE;
+ return __copy_tofrom_user((__force void __user *)to, from,
+ n - over) + over;
+ }
+ return n;
+}
+
+extern inline unsigned long copy_to_user(void __user *to,
+ const void *from, unsigned long n)
+{
+ unsigned long over;
+
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_tofrom_user(to, (__force void __user *)from, n);
+ if ((unsigned long)to < TASK_SIZE) {
+ over = (unsigned long)to + n - TASK_SIZE;
+ return __copy_tofrom_user(to, (__force void __user *)from,
+ n - over) + over;
+ }
+ return n;
+}
+
+#else /* __powerpc64__ */
+
+#define __copy_in_user(to, from, size) \
+ __copy_tofrom_user((to), (from), (size))
+
+extern unsigned long copy_from_user(void *to, const void __user *from,
+ unsigned long n);
+extern unsigned long copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+extern unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n);
+
+#endif /* __powerpc64__ */
+
+static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+{
+ if (__builtin_constant_p(n) && (n <= 8)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __get_user_size(*(u8 *)to, from, 1, ret);
+ break;
+ case 2:
+ __get_user_size(*(u16 *)to, from, 2, ret);
+ break;
+ case 4:
+ __get_user_size(*(u32 *)to, from, 4, ret);
+ break;
+ case 8:
+ __get_user_size(*(u64 *)to, from, 8, ret);
+ break;
+ }
+ if (ret == 0)
+ return 0;
+ }
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+}
+
+static inline unsigned long __copy_to_user_inatomic(void __user *to,
+ const void *from, unsigned long n)
+{
+ if (__builtin_constant_p(n) && (n <= 8)) {
+ unsigned long ret;
+
+ switch (n) {
+ case 1:
+ __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
+ break;
+ case 2:
+ __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
+ break;
+ case 4:
+ __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
+ break;
+ case 8:
+ __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
+ break;
+ }
+ if (ret == 0)
+ return 0;
+ }
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+}
+
+static inline unsigned long __copy_from_user(void *to,
+ const void __user *from, unsigned long size)
+{
+ might_sleep();
+ return __copy_from_user_inatomic(to, from, size);
+}
+
+static inline unsigned long __copy_to_user(void __user *to,
+ const void *from, unsigned long size)
+{
+ might_sleep();
+ return __copy_to_user_inatomic(to, from, size);
+}
+
+extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+static inline unsigned long clear_user(void __user *addr, unsigned long size)
+{
+ might_sleep();
+ if (likely(access_ok(VERIFY_WRITE, addr, size)))
+ return __clear_user(addr, size);
+ if ((unsigned long)addr < TASK_SIZE) {
+ unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+ return __clear_user(addr, size - over) + over;
+ }
+ return size;
+}
+
+extern int __strncpy_from_user(char *dst, const char __user *src, long count);
+
+static inline long strncpy_from_user(char *dst, const char __user *src,
+ long count)
+{
+ might_sleep();
+ if (likely(access_ok(VERIFY_READ, src, 1)))
+ return __strncpy_from_user(dst, src, count);
+ return -EFAULT;
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+extern int __strnlen_user(const char __user *str, long len, unsigned long top);
+
+/*
+ * Returns the length of the string at str (including the null byte),
+ * or 0 if we hit a page we can't access,
+ * or something > len if we didn't find a null byte.
+ *
+ * The `top' parameter to __strnlen_user is to make sure that
+ * we can never overflow from the user area into kernel space.
+ */
+static inline int strnlen_user(const char __user *str, long len)
+{
+ unsigned long top = current->thread.fs.seg;
+
+ if ((unsigned long)str > top)
+ return 0;
+ return __strnlen_user(str, len, top);
+}
+
+#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/include/asm-powerpc/ucontext.h b/include/asm-powerpc/ucontext.h
new file mode 100644
index 00000000000..d9a4ddf0cc8
--- /dev/null
+++ b/include/asm-powerpc/ucontext.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_UCONTEXT_H
+#define _ASM_POWERPC_UCONTEXT_H
+
+#ifdef __powerpc64__
+#include <asm/sigcontext.h>
+#else
+#include <asm/elf.h>
+#endif
+#include <asm/signal.h>
+
+#ifndef __powerpc64__
+struct mcontext {
+ elf_gregset_t mc_gregs;
+ elf_fpregset_t mc_fregs;
+ unsigned long mc_pad[2];
+ elf_vrregset_t mc_vregs __attribute__((__aligned__(16)));
+};
+#endif
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext __user *uc_link;
+ stack_t uc_stack;
+#ifndef __powerpc64__
+ int uc_pad[7];
+ struct mcontext __user *uc_regs;/* points to uc_mcontext field */
+#endif
+ sigset_t uc_sigmask;
+ /* glibc has 1024-bit signal masks, ours are 64-bit */
+#ifdef __powerpc64__
+ sigset_t __unused[15]; /* Allow for uc_sigmask growth */
+ struct sigcontext uc_mcontext; /* last for extensibility */
+#else
+ int uc_maskext[30];
+ int uc_pad2[3];
+ struct mcontext uc_mcontext;
+#endif
+};
+
+#endif /* _ASM_POWERPC_UCONTEXT_H */
diff --git a/include/asm-powerpc/uninorth.h b/include/asm-powerpc/uninorth.h
new file mode 100644
index 00000000000..f737732c386
--- /dev/null
+++ b/include/asm-powerpc/uninorth.h
@@ -0,0 +1,229 @@
+/*
+ * uninorth.h: definitions for using the "UniNorth" host bridge chip
+ * from Apple. This chip is used on "Core99" machines
+ * This also includes U2 used on more recent MacRISC2/3
+ * machines and U3 (G5)
+ *
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_UNINORTH_H__
+#define __ASM_UNINORTH_H__
+
+/*
+ * Uni-N and U3 config space reg. definitions
+ *
+ * (Little endian)
+ */
+
+/* Address ranges selection. This one should work with Bandit too */
+/* Not U3 */
+#define UNI_N_ADDR_SELECT 0x48
+#define UNI_N_ADDR_COARSE_MASK 0xffff0000 /* 256Mb regions at *0000000 */
+#define UNI_N_ADDR_FINE_MASK 0x0000ffff /* 16Mb regions at f*000000 */
+
+/* AGP registers */
+/* Not U3 */
+#define UNI_N_CFG_GART_BASE 0x8c
+#define UNI_N_CFG_AGP_BASE 0x90
+#define UNI_N_CFG_GART_CTRL 0x94
+#define UNI_N_CFG_INTERNAL_STATUS 0x98
+#define UNI_N_CFG_GART_DUMMY_PAGE 0xa4
+
+/* UNI_N_CFG_GART_CTRL bits definitions */
+#define UNI_N_CFG_GART_INVAL 0x00000001
+#define UNI_N_CFG_GART_ENABLE 0x00000100
+#define UNI_N_CFG_GART_2xRESET 0x00010000
+#define UNI_N_CFG_GART_DISSBADET 0x00020000
+/* The following seems to only be used only on U3 <j.glisse@gmail.com> */
+#define U3_N_CFG_GART_SYNCMODE 0x00040000
+#define U3_N_CFG_GART_PERFRD 0x00080000
+#define U3_N_CFG_GART_B2BGNT 0x00200000
+#define U3_N_CFG_GART_FASTDDR 0x00400000
+
+/* My understanding of UniNorth AGP as of UniNorth rev 1.0x,
+ * revision 1.5 (x4 AGP) may need further changes.
+ *
+ * AGP_BASE register contains the base address of the AGP aperture on
+ * the AGP bus. It doesn't seem to be visible to the CPU as of UniNorth 1.x,
+ * even if decoding of this address range is enabled in the address select
+ * register. Apparently, the only supported bases are 256Mb multiples
+ * (high 4 bits of that register).
+ *
+ * GART_BASE register appear to contain the physical address of the GART
+ * in system memory in the high address bits (page aligned), and the
+ * GART size in the low order bits (number of GART pages)
+ *
+ * The GART format itself is one 32bits word per physical memory page.
+ * This word contains, in little-endian format (!!!), the physical address
+ * of the page in the high bits, and what appears to be an "enable" bit
+ * in the LSB bit (0) that must be set to 1 when the entry is valid.
+ *
+ * Obviously, the GART is not cache coherent and so any change to it
+ * must be flushed to memory (or maybe just make the GART space non
+ * cachable). AGP memory itself doens't seem to be cache coherent neither.
+ *
+ * In order to invalidate the GART (which is probably necessary to inval
+ * the bridge internal TLBs), the following sequence has to be written,
+ * in order, to the GART_CTRL register:
+ *
+ * UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_INVAL
+ * UNI_N_CFG_GART_ENABLE
+ * UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_2xRESET
+ * UNI_N_CFG_GART_ENABLE
+ *
+ * As far as AGP "features" are concerned, it looks like fast write may
+ * not be supported but this has to be confirmed.
+ *
+ * Turning on AGP seem to require a double invalidate operation, one before
+ * setting the AGP command register, on after.
+ *
+ * Turning off AGP seems to require the following sequence: first wait
+ * for the AGP to be idle by reading the internal status register, then
+ * write in that order to the GART_CTRL register:
+ *
+ * UNI_N_CFG_GART_ENABLE | UNI_N_CFG_GART_INVAL
+ * 0
+ * UNI_N_CFG_GART_2xRESET
+ * 0
+ */
+
+/*
+ * Uni-N memory mapped reg. definitions
+ *
+ * Those registers are Big-Endian !!
+ *
+ * Their meaning come from either Darwin and/or from experiments I made with
+ * the bootrom, I'm not sure about their exact meaning yet
+ *
+ */
+
+/* Version of the UniNorth chip */
+#define UNI_N_VERSION 0x0000 /* Known versions: 3,7 and 8 */
+
+#define UNI_N_VERSION_107 0x0003 /* 1.0.7 */
+#define UNI_N_VERSION_10A 0x0007 /* 1.0.10 */
+#define UNI_N_VERSION_150 0x0011 /* 1.5 */
+#define UNI_N_VERSION_200 0x0024 /* 2.0 */
+#define UNI_N_VERSION_PANGEA 0x00C0 /* Integrated U1 + K */
+#define UNI_N_VERSION_INTREPID 0x00D2 /* Integrated U2 + K */
+#define UNI_N_VERSION_300 0x0030 /* 3.0 (U3 on G5) */
+
+/* This register is used to enable/disable various clocks */
+#define UNI_N_CLOCK_CNTL 0x0020
+#define UNI_N_CLOCK_CNTL_PCI 0x00000001 /* PCI2 clock control */
+#define UNI_N_CLOCK_CNTL_GMAC 0x00000002 /* GMAC clock control */
+#define UNI_N_CLOCK_CNTL_FW 0x00000004 /* FireWire clock control */
+#define UNI_N_CLOCK_CNTL_ATA100 0x00000010 /* ATA-100 clock control (U2) */
+
+/* Power Management control */
+#define UNI_N_POWER_MGT 0x0030
+#define UNI_N_POWER_MGT_NORMAL 0x00
+#define UNI_N_POWER_MGT_IDLE2 0x01
+#define UNI_N_POWER_MGT_SLEEP 0x02
+
+/* This register is configured by Darwin depending on the UniN
+ * revision
+ */
+#define UNI_N_ARB_CTRL 0x0040
+#define UNI_N_ARB_CTRL_QACK_DELAY_SHIFT 15
+#define UNI_N_ARB_CTRL_QACK_DELAY_MASK 0x0e1f8000
+#define UNI_N_ARB_CTRL_QACK_DELAY 0x30
+#define UNI_N_ARB_CTRL_QACK_DELAY105 0x00
+
+/* This one _might_ return the CPU number of the CPU reading it;
+ * the bootROM decides whether to boot or to sleep/spinloop depending
+ * on this register beeing 0 or not
+ */
+#define UNI_N_CPU_NUMBER 0x0050
+
+/* This register appear to be read by the bootROM to decide what
+ * to do on a non-recoverable reset (powerup or wakeup)
+ */
+#define UNI_N_HWINIT_STATE 0x0070
+#define UNI_N_HWINIT_STATE_SLEEPING 0x01
+#define UNI_N_HWINIT_STATE_RUNNING 0x02
+/* This last bit appear to be used by the bootROM to know the second
+ * CPU has started and will enter it's sleep loop with IP=0
+ */
+#define UNI_N_HWINIT_STATE_CPU1_FLAG 0x10000000
+
+/* This register controls AACK delay, which is set when 2004 iBook/PowerBook
+ * is in low speed mode.
+ */
+#define UNI_N_AACK_DELAY 0x0100
+#define UNI_N_AACK_DELAY_ENABLE 0x00000001
+
+/* Clock status for Intrepid */
+#define UNI_N_CLOCK_STOP_STATUS0 0x0150
+#define UNI_N_CLOCK_STOPPED_EXTAGP 0x00200000
+#define UNI_N_CLOCK_STOPPED_AGPDEL 0x00100000
+#define UNI_N_CLOCK_STOPPED_I2S0_45_49 0x00080000
+#define UNI_N_CLOCK_STOPPED_I2S0_18 0x00040000
+#define UNI_N_CLOCK_STOPPED_I2S1_45_49 0x00020000
+#define UNI_N_CLOCK_STOPPED_I2S1_18 0x00010000
+#define UNI_N_CLOCK_STOPPED_TIMER 0x00008000
+#define UNI_N_CLOCK_STOPPED_SCC_RTCLK18 0x00004000
+#define UNI_N_CLOCK_STOPPED_SCC_RTCLK32 0x00002000
+#define UNI_N_CLOCK_STOPPED_SCC_VIA32 0x00001000
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT0 0x00000800
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT1 0x00000400
+#define UNI_N_CLOCK_STOPPED_SCC_SLOT2 0x00000200
+#define UNI_N_CLOCK_STOPPED_PCI_FBCLKO 0x00000100
+#define UNI_N_CLOCK_STOPPED_VEO0 0x00000080
+#define UNI_N_CLOCK_STOPPED_VEO1 0x00000040
+#define UNI_N_CLOCK_STOPPED_USB0 0x00000020
+#define UNI_N_CLOCK_STOPPED_USB1 0x00000010
+#define UNI_N_CLOCK_STOPPED_USB2 0x00000008
+#define UNI_N_CLOCK_STOPPED_32 0x00000004
+#define UNI_N_CLOCK_STOPPED_45 0x00000002
+#define UNI_N_CLOCK_STOPPED_49 0x00000001
+
+#define UNI_N_CLOCK_STOP_STATUS1 0x0160
+#define UNI_N_CLOCK_STOPPED_PLL4REF 0x00080000
+#define UNI_N_CLOCK_STOPPED_CPUDEL 0x00040000
+#define UNI_N_CLOCK_STOPPED_CPU 0x00020000
+#define UNI_N_CLOCK_STOPPED_BUF_REFCKO 0x00010000
+#define UNI_N_CLOCK_STOPPED_PCI2 0x00008000
+#define UNI_N_CLOCK_STOPPED_FW 0x00004000
+#define UNI_N_CLOCK_STOPPED_GB 0x00002000
+#define UNI_N_CLOCK_STOPPED_ATA66 0x00001000
+#define UNI_N_CLOCK_STOPPED_ATA100 0x00000800
+#define UNI_N_CLOCK_STOPPED_MAX 0x00000400
+#define UNI_N_CLOCK_STOPPED_PCI1 0x00000200
+#define UNI_N_CLOCK_STOPPED_KLPCI 0x00000100
+#define UNI_N_CLOCK_STOPPED_USB0PCI 0x00000080
+#define UNI_N_CLOCK_STOPPED_USB1PCI 0x00000040
+#define UNI_N_CLOCK_STOPPED_USB2PCI 0x00000020
+#define UNI_N_CLOCK_STOPPED_7PCI1 0x00000008
+#define UNI_N_CLOCK_STOPPED_AGP 0x00000004
+#define UNI_N_CLOCK_STOPPED_PCI0 0x00000002
+#define UNI_N_CLOCK_STOPPED_18 0x00000001
+
+/* Intrepid registe to OF do-platform-clockspreading */
+#define UNI_N_CLOCK_SPREADING 0x190
+
+/* Uninorth 1.5 rev. has additional perf. monitor registers at 0xf00-0xf50 */
+
+
+/*
+ * U3 specific registers
+ */
+
+
+/* U3 Toggle */
+#define U3_TOGGLE_REG 0x00e0
+#define U3_PMC_START_STOP 0x0001
+#define U3_MPIC_RESET 0x0002
+#define U3_MPIC_OUTPUT_ENABLE 0x0004
+
+/* U3 API PHY Config 1 */
+#define U3_API_PHY_CONFIG_1 0x23030
+
+/* U3 HyperTransport registers */
+#define U3_HT_CONFIG_BASE 0x70000
+#define U3_HT_LINK_COMMAND 0x100
+#define U3_HT_LINK_CONFIG 0x110
+#define U3_HT_LINK_FREQ 0x120
+
+#endif /* __ASM_UNINORTH_H__ */
+#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
new file mode 100644
index 00000000000..0991dfceef1
--- /dev/null
+++ b/include/asm-powerpc/unistd.h
@@ -0,0 +1,509 @@
+#ifndef _ASM_PPC_UNISTD_H_
+#define _ASM_PPC_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_query_module 166
+#define __NR_poll 167
+#define __NR_nfsservctl 168
+#define __NR_setresgid 169
+#define __NR_getresgid 170
+#define __NR_prctl 171
+#define __NR_rt_sigreturn 172
+#define __NR_rt_sigaction 173
+#define __NR_rt_sigprocmask 174
+#define __NR_rt_sigpending 175
+#define __NR_rt_sigtimedwait 176
+#define __NR_rt_sigqueueinfo 177
+#define __NR_rt_sigsuspend 178
+#define __NR_pread64 179
+#define __NR_pwrite64 180
+#define __NR_chown 181
+#define __NR_getcwd 182
+#define __NR_capget 183
+#define __NR_capset 184
+#define __NR_sigaltstack 185
+#define __NR_sendfile 186
+#define __NR_getpmsg 187 /* some people actually want streams */
+#define __NR_putpmsg 188 /* some people actually want streams */
+#define __NR_vfork 189
+#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
+#define __NR_readahead 191
+#ifndef __powerpc64__ /* these are 32-bit only */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#endif
+#define __NR_pciconfig_read 198
+#define __NR_pciconfig_write 199
+#define __NR_pciconfig_iobase 200
+#define __NR_multiplexer 201
+#define __NR_getdents64 202
+#define __NR_pivot_root 203
+#ifndef __powerpc64__
+#define __NR_fcntl64 204
+#endif
+#define __NR_madvise 205
+#define __NR_mincore 206
+#define __NR_gettid 207
+#define __NR_tkill 208
+#define __NR_setxattr 209
+#define __NR_lsetxattr 210
+#define __NR_fsetxattr 211
+#define __NR_getxattr 212
+#define __NR_lgetxattr 213
+#define __NR_fgetxattr 214
+#define __NR_listxattr 215
+#define __NR_llistxattr 216
+#define __NR_flistxattr 217
+#define __NR_removexattr 218
+#define __NR_lremovexattr 219
+#define __NR_fremovexattr 220
+#define __NR_futex 221
+#define __NR_sched_setaffinity 222
+#define __NR_sched_getaffinity 223
+/* 224 currently unused */
+#define __NR_tuxcall 225
+#ifndef __powerpc64__
+#define __NR_sendfile64 226
+#endif
+#define __NR_io_setup 227
+#define __NR_io_destroy 228
+#define __NR_io_getevents 229
+#define __NR_io_submit 230
+#define __NR_io_cancel 231
+#define __NR_set_tid_address 232
+#define __NR_fadvise64 233
+#define __NR_exit_group 234
+#define __NR_lookup_dcookie 235
+#define __NR_epoll_create 236
+#define __NR_epoll_ctl 237
+#define __NR_epoll_wait 238
+#define __NR_remap_file_pages 239
+#define __NR_timer_create 240
+#define __NR_timer_settime 241
+#define __NR_timer_gettime 242
+#define __NR_timer_getoverrun 243
+#define __NR_timer_delete 244
+#define __NR_clock_settime 245
+#define __NR_clock_gettime 246
+#define __NR_clock_getres 247
+#define __NR_clock_nanosleep 248
+#define __NR_swapcontext 249
+#define __NR_tgkill 250
+#define __NR_utimes 251
+#define __NR_statfs64 252
+#define __NR_fstatfs64 253
+#ifndef __powerpc64__
+#define __NR_fadvise64_64 254
+#endif
+#define __NR_rtas 255
+#define __NR_sys_debug_setcontext 256
+/* Number 257 is reserved for vserver */
+/* 258 currently unused */
+#define __NR_mbind 259
+#define __NR_get_mempolicy 260
+#define __NR_set_mempolicy 261
+#define __NR_mq_open 262
+#define __NR_mq_unlink 263
+#define __NR_mq_timedsend 264
+#define __NR_mq_timedreceive 265
+#define __NR_mq_notify 266
+#define __NR_mq_getsetattr 267
+#define __NR_kexec_load 268
+#define __NR_add_key 269
+#define __NR_request_key 270
+#define __NR_keyctl 271
+#define __NR_waitid 272
+#define __NR_ioprio_set 273
+#define __NR_ioprio_get 274
+#define __NR_inotify_init 275
+#define __NR_inotify_add_watch 276
+#define __NR_inotify_rm_watch 277
+
+#define __NR_syscalls 278
+
+#ifdef __KERNEL__
+#define __NR__exit __NR_exit
+#define NR_syscalls __NR_syscalls
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* On powerpc a system call basically clobbers the same registers like a
+ * function call, with the exception of LR (which is needed for the
+ * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
+ * an error return status).
+ */
+
+#define __syscall_nr(nr, type, name, args...) \
+ unsigned long __sc_ret, __sc_err; \
+ { \
+ register unsigned long __sc_0 __asm__ ("r0"); \
+ register unsigned long __sc_3 __asm__ ("r3"); \
+ register unsigned long __sc_4 __asm__ ("r4"); \
+ register unsigned long __sc_5 __asm__ ("r5"); \
+ register unsigned long __sc_6 __asm__ ("r6"); \
+ register unsigned long __sc_7 __asm__ ("r7"); \
+ register unsigned long __sc_8 __asm__ ("r8"); \
+ \
+ __sc_loadargs_##nr(name, args); \
+ __asm__ __volatile__ \
+ ("sc \n\t" \
+ "mfcr %0 " \
+ : "=&r" (__sc_0), \
+ "=&r" (__sc_3), "=&r" (__sc_4), \
+ "=&r" (__sc_5), "=&r" (__sc_6), \
+ "=&r" (__sc_7), "=&r" (__sc_8) \
+ : __sc_asm_input_##nr \
+ : "cr0", "ctr", "memory", \
+ "r9", "r10","r11", "r12"); \
+ __sc_ret = __sc_3; \
+ __sc_err = __sc_0; \
+ } \
+ if (__sc_err & 0x10000000) \
+ { \
+ errno = __sc_ret; \
+ __sc_ret = -1; \
+ } \
+ return (type) __sc_ret
+
+#define __sc_loadargs_0(name, dummy...) \
+ __sc_0 = __NR_##name
+#define __sc_loadargs_1(name, arg1) \
+ __sc_loadargs_0(name); \
+ __sc_3 = (unsigned long) (arg1)
+#define __sc_loadargs_2(name, arg1, arg2) \
+ __sc_loadargs_1(name, arg1); \
+ __sc_4 = (unsigned long) (arg2)
+#define __sc_loadargs_3(name, arg1, arg2, arg3) \
+ __sc_loadargs_2(name, arg1, arg2); \
+ __sc_5 = (unsigned long) (arg3)
+#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \
+ __sc_loadargs_3(name, arg1, arg2, arg3); \
+ __sc_6 = (unsigned long) (arg4)
+#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \
+ __sc_loadargs_4(name, arg1, arg2, arg3, arg4); \
+ __sc_7 = (unsigned long) (arg5)
+#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
+ __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5); \
+ __sc_8 = (unsigned long) (arg6)
+
+#define __sc_asm_input_0 "0" (__sc_0)
+#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
+#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
+#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
+#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
+#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
+#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8)
+
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+ __syscall_nr(0, type, name); \
+}
+
+#define _syscall1(type,name,type1,arg1) \
+type name(type1 arg1) \
+{ \
+ __syscall_nr(1, type, name, arg1); \
+}
+
+#define _syscall2(type,name,type1,arg1,type2,arg2) \
+type name(type1 arg1, type2 arg2) \
+{ \
+ __syscall_nr(2, type, name, arg1, arg2); \
+}
+
+#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
+type name(type1 arg1, type2 arg2, type3 arg3) \
+{ \
+ __syscall_nr(3, type, name, arg1, arg2, arg3); \
+}
+
+#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
+{ \
+ __syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \
+}
+
+#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
+{ \
+ __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
+}
+#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
+type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
+{ \
+ __syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
+}
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SGETMASK
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#ifdef CONFIG_PPC32
+#define __ARCH_WANT_OLD_STAT
+#endif
+#ifdef CONFIG_PPC64
+#define __ARCH_WANT_COMPAT_SYS_TIME
+#endif
+
+/*
+ * System call prototypes.
+ */
+#ifdef __KERNEL_SYSCALLS__
+extern pid_t setsid(void);
+extern int write(int fd, const char *buf, off_t count);
+extern int read(int fd, char *buf, off_t count);
+extern off_t lseek(int fd, off_t offset, int count);
+extern int dup(int fd);
+extern int execve(const char *file, char **argv, char **envp);
+extern int open(const char *file, int flag, int mode);
+extern int close(int fd);
+extern pid_t waitpid(pid_t pid, int *wait_stat, int options);
+#endif /* __KERNEL_SYSCALLS__ */
+
+/*
+ * Functions that implement syscalls.
+ */
+unsigned long sys_mmap(unsigned long addr, size_t len, unsigned long prot,
+ unsigned long flags, unsigned long fd, off_t offset);
+unsigned long sys_mmap2(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+struct pt_regs;
+int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
+ unsigned long a3, unsigned long a4, unsigned long a5,
+ struct pt_regs *regs);
+int sys_clone(unsigned long clone_flags, unsigned long usp,
+ int __user *parent_tidp, void __user *child_threadptr,
+ int __user *child_tidp, int p6, struct pt_regs *regs);
+int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5, unsigned long p6,
+ struct pt_regs *regs);
+int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5, unsigned long p6,
+ struct pt_regs *regs);
+int sys_pipe(int __user *fildes);
+struct sigaction;
+long sys_rt_sigaction(int sig, const struct sigaction __user *act,
+ struct sigaction __user *oact, size_t sigsetsize);
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#ifdef CONFIG_PPC32
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+#else
+#define cond_syscall(x) asm(".weak\t." #x "\n\t.set\t." #x ",.sys_ni_syscall")
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PPC_UNISTD_H_ */
diff --git a/include/asm-powerpc/vga.h b/include/asm-powerpc/vga.h
new file mode 100644
index 00000000000..f8d350aabf1
--- /dev/null
+++ b/include/asm-powerpc/vga.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_POWERPC_VGA_H_
+#define _ASM_POWERPC_VGA_H_
+
+#ifdef __KERNEL__
+
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+
+
+#include <asm/io.h>
+
+#include <linux/config.h>
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+
+#define VT_BUF_HAVE_RW
+/*
+ * These are only needed for supporting VGA or MDA text mode, which use little
+ * endian byte ordering.
+ * In other cases, we can optimize by using native byte ordering and
+ * <linux/vt_buffer.h> has already done the right job for us.
+ */
+
+static inline void scr_writew(u16 val, volatile u16 *addr)
+{
+ st_le16(addr, val);
+}
+
+static inline u16 scr_readw(volatile const u16 *addr)
+{
+ return ld_le16(addr);
+}
+
+#define VT_BUF_HAVE_MEMCPYW
+#define scr_memcpyw memcpy
+
+#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
+
+extern unsigned long vgacon_remap_base;
+
+#ifdef __powerpc64__
+#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0))
+#else
+#define VGA_MAP_MEM(x) (x + vgacon_remap_base)
+#endif
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_VGA_H_ */
diff --git a/include/asm-powerpc/vio.h b/include/asm-powerpc/vio.h
new file mode 100644
index 00000000000..e0ccf108277
--- /dev/null
+++ b/include/asm-powerpc/vio.h
@@ -0,0 +1,106 @@
+/*
+ * IBM PowerPC Virtual I/O Infrastructure Support.
+ *
+ * Copyright (c) 2003 IBM Corp.
+ * Dave Engebretsen engebret@us.ibm.com
+ * Santiago Leon santil@us.ibm.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_POWERPC_VIO_H
+#define _ASM_POWERPC_VIO_H
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+
+#include <asm/hvcall.h>
+#include <asm/scatterlist.h>
+
+/*
+ * Architecture-specific constants for drivers to
+ * extract attributes of the device using vio_get_attribute()
+ */
+#define VETH_MAC_ADDR "local-mac-address"
+#define VETH_MCAST_FILTER_SIZE "ibm,mac-address-filters"
+
+/* End architecture-specific constants */
+
+#define h_vio_signal(ua, mode) \
+ plpar_hcall_norets(H_VIO_SIGNAL, ua, mode)
+
+#define VIO_IRQ_DISABLE 0UL
+#define VIO_IRQ_ENABLE 1UL
+
+struct iommu_table;
+
+/*
+ * The vio_dev structure is used to describe virtual I/O devices.
+ */
+struct vio_dev {
+ struct iommu_table *iommu_table; /* vio_map_* uses this */
+ char *name;
+ char *type;
+ uint32_t unit_address;
+ unsigned int irq;
+ struct device dev;
+};
+
+struct vio_driver {
+ struct list_head node;
+ const struct vio_device_id *id_table;
+ int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
+ int (*remove)(struct vio_dev *dev);
+ void (*shutdown)(struct vio_dev *dev);
+ unsigned long driver_data;
+ struct device_driver driver;
+};
+
+struct vio_bus_ops {
+ int (*match)(const struct vio_device_id *id, const struct vio_dev *dev);
+ void (*unregister_device)(struct vio_dev *);
+ void (*release_device)(struct device *);
+};
+
+extern struct dma_mapping_ops vio_dma_ops;
+extern struct bus_type vio_bus_type;
+extern struct vio_dev vio_bus_device;
+
+extern int vio_register_driver(struct vio_driver *drv);
+extern void vio_unregister_driver(struct vio_driver *drv);
+
+extern struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev);
+extern void __devinit vio_unregister_device(struct vio_dev *dev);
+
+extern int vio_bus_init(struct vio_bus_ops *);
+
+#ifdef CONFIG_PPC_PSERIES
+struct device_node;
+
+extern struct vio_dev * __devinit vio_register_device_node(
+ struct device_node *node_vdev);
+extern struct vio_dev *vio_find_node(struct device_node *vnode);
+extern const void *vio_get_attribute(struct vio_dev *vdev, void *which,
+ int *length);
+extern int vio_enable_interrupts(struct vio_dev *dev);
+extern int vio_disable_interrupts(struct vio_dev *dev);
+#endif
+
+static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct vio_driver, driver);
+}
+
+static inline struct vio_dev *to_vio_dev(struct device *dev)
+{
+ return container_of(dev, struct vio_dev, dev);
+}
+
+#endif /* _ASM_POWERPC_VIO_H */
diff --git a/include/asm-powerpc/xmon.h b/include/asm-powerpc/xmon.h
new file mode 100644
index 00000000000..43f7129984c
--- /dev/null
+++ b/include/asm-powerpc/xmon.h
@@ -0,0 +1,12 @@
+#ifndef __PPC_XMON_H
+#define __PPC_XMON_H
+#ifdef __KERNEL__
+
+struct pt_regs;
+
+extern int xmon(struct pt_regs *excp);
+extern void xmon_printf(const char *fmt, ...);
+extern void xmon_init(int);
+
+#endif
+#endif