summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sparc64/kernel
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/Makefile44
-rw-r--r--arch/sparc64/kernel/auxio.c152
-rw-r--r--arch/sparc64/kernel/binfmt_aout32.c424
-rw-r--r--arch/sparc64/kernel/binfmt_elf32.c159
-rw-r--r--arch/sparc64/kernel/central.c457
-rw-r--r--arch/sparc64/kernel/chmc.c458
-rw-r--r--arch/sparc64/kernel/cpu.c124
-rw-r--r--arch/sparc64/kernel/devices.c144
-rw-r--r--arch/sparc64/kernel/dtlb_backend.S181
-rw-r--r--arch/sparc64/kernel/dtlb_base.S113
-rw-r--r--arch/sparc64/kernel/dtlb_prot.S54
-rw-r--r--arch/sparc64/kernel/ebus.c644
-rw-r--r--arch/sparc64/kernel/entry.S1919
-rw-r--r--arch/sparc64/kernel/etrap.S301
-rw-r--r--arch/sparc64/kernel/head.S782
-rw-r--r--arch/sparc64/kernel/idprom.c49
-rw-r--r--arch/sparc64/kernel/init_task.c35
-rw-r--r--arch/sparc64/kernel/ioctl32.c597
-rw-r--r--arch/sparc64/kernel/iommu_common.c231
-rw-r--r--arch/sparc64/kernel/iommu_common.h48
-rw-r--r--arch/sparc64/kernel/irq.c1269
-rw-r--r--arch/sparc64/kernel/isa.c329
-rw-r--r--arch/sparc64/kernel/itlb_base.S83
-rw-r--r--arch/sparc64/kernel/kprobes.c394
-rw-r--r--arch/sparc64/kernel/module.c209
-rw-r--r--arch/sparc64/kernel/pci.c805
-rw-r--r--arch/sparc64/kernel/pci_common.c1040
-rw-r--r--arch/sparc64/kernel/pci_impl.h49
-rw-r--r--arch/sparc64/kernel/pci_iommu.c855
-rw-r--r--arch/sparc64/kernel/pci_psycho.c1560
-rw-r--r--arch/sparc64/kernel/pci_sabre.c1702
-rw-r--r--arch/sparc64/kernel/pci_schizo.c2187
-rw-r--r--arch/sparc64/kernel/power.c150
-rw-r--r--arch/sparc64/kernel/process.c869
-rw-r--r--arch/sparc64/kernel/ptrace.c646
-rw-r--r--arch/sparc64/kernel/rtrap.S362
-rw-r--r--arch/sparc64/kernel/sbus.c1243
-rw-r--r--arch/sparc64/kernel/semaphore.c251
-rw-r--r--arch/sparc64/kernel/setup.c731
-rw-r--r--arch/sparc64/kernel/signal.c688
-rw-r--r--arch/sparc64/kernel/signal32.c1469
-rw-r--r--arch/sparc64/kernel/smp.c1244
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c432
-rw-r--r--arch/sparc64/kernel/starfire.c123
-rw-r--r--arch/sparc64/kernel/sunos_ioctl32.c275
-rw-r--r--arch/sparc64/kernel/sys32.S327
-rw-r--r--arch/sparc64/kernel/sys_sparc.c723
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c1118
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c1343
-rw-r--r--arch/sparc64/kernel/systbls.S251
-rw-r--r--arch/sparc64/kernel/time.c1195
-rw-r--r--arch/sparc64/kernel/trampoline.S368
-rw-r--r--arch/sparc64/kernel/traps.c2118
-rw-r--r--arch/sparc64/kernel/ttable.S280
-rw-r--r--arch/sparc64/kernel/unaligned.c729
-rw-r--r--arch/sparc64/kernel/us2e_cpufreq.c400
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c255
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S106
-rw-r--r--arch/sparc64/kernel/winfixup.S417
59 files changed, 35511 insertions, 0 deletions
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
new file mode 100644
index 00000000000..093281bdf85
--- /dev/null
+++ b/arch/sparc64/kernel/Makefile
@@ -0,0 +1,44 @@
+# $Id: Makefile,v 1.70 2002/02/09 19:49:30 davem Exp $
+# Makefile for the linux kernel.
+#
+
+EXTRA_AFLAGS := -ansi
+EXTRA_CFLAGS := -Werror
+
+extra-y := head.o init_task.o vmlinux.lds
+
+obj-y := process.o setup.o cpu.o idprom.o \
+ traps.o devices.o auxio.o \
+ irq.o ptrace.o time.o sys_sparc.o signal.o \
+ unaligned.o central.o pci.o starfire.o semaphore.o \
+ power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
+
+obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
+ pci_psycho.o pci_sabre.o pci_schizo.o
+obj-$(CONFIG_SMP) += smp.o trampoline.o
+obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o ioctl32.o
+obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
+obj-$(CONFIG_BINFMT_AOUT32) += binfmt_aout32.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
+obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+
+ifdef CONFIG_SUNOS_EMUL
+ obj-y += sys_sunos32.o sunos_ioctl32.o
+else
+ ifdef CONFIG_SOLARIS_EMUL
+ obj-y += sys_sunos32.o sunos_ioctl32.o
+ endif
+endif
+
+ifneq ($(NEW_GCC),y)
+ CMODEL_CFLAG := -mmedlow
+else
+ CMODEL_CFLAG := -m64 -mcmodel=medlow
+endif
+
+head.o: head.S ttable.S itlb_base.S dtlb_base.S dtlb_backend.S dtlb_prot.S \
+ etrap.S rtrap.S winfixup.S entry.S
+
+CFLAGS_ioctl32.o += -Ifs/
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
new file mode 100644
index 00000000000..a0716ccc2f4
--- /dev/null
+++ b/arch/sparc64/kernel/auxio.c
@@ -0,0 +1,152 @@
+/* auxio.c: Probing for the Sparc AUXIO register at boot time.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+
+#include <asm/oplib.h>
+#include <asm/io.h>
+#include <asm/sbus.h>
+#include <asm/ebus.h>
+#include <asm/auxio.h>
+
+/* This cannot be static, as it is referenced in entry.S */
+void __iomem *auxio_register = NULL;
+
+enum auxio_type {
+ AUXIO_TYPE_NODEV,
+ AUXIO_TYPE_SBUS,
+ AUXIO_TYPE_EBUS
+};
+
+static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
+static DEFINE_SPINLOCK(auxio_lock);
+
+static void __auxio_sbus_set(u8 bits_on, u8 bits_off)
+{
+ if (auxio_register) {
+ unsigned char regval;
+ unsigned long flags;
+ unsigned char newval;
+
+ spin_lock_irqsave(&auxio_lock, flags);
+
+ regval = sbus_readb(auxio_register);
+ newval = regval | bits_on;
+ newval &= ~bits_off;
+ newval &= ~AUXIO_AUX1_MASK;
+ sbus_writeb(newval, auxio_register);
+
+ spin_unlock_irqrestore(&auxio_lock, flags);
+ }
+}
+
+static void __auxio_ebus_set(u8 bits_on, u8 bits_off)
+{
+ if (auxio_register) {
+ unsigned char regval;
+ unsigned long flags;
+ unsigned char newval;
+
+ spin_lock_irqsave(&auxio_lock, flags);
+
+ regval = (u8)readl(auxio_register);
+ newval = regval | bits_on;
+ newval &= ~bits_off;
+ writel((u32)newval, auxio_register);
+
+ spin_unlock_irqrestore(&auxio_lock, flags);
+ }
+}
+
+static inline void __auxio_ebus_set_led(int on)
+{
+ (on) ? __auxio_ebus_set(AUXIO_PCIO_LED, 0) :
+ __auxio_ebus_set(0, AUXIO_PCIO_LED) ;
+}
+
+static inline void __auxio_sbus_set_led(int on)
+{
+ (on) ? __auxio_sbus_set(AUXIO_AUX1_LED, 0) :
+ __auxio_sbus_set(0, AUXIO_AUX1_LED) ;
+}
+
+void auxio_set_led(int on)
+{
+ switch(auxio_devtype) {
+ case AUXIO_TYPE_SBUS:
+ __auxio_sbus_set_led(on);
+ break;
+ case AUXIO_TYPE_EBUS:
+ __auxio_ebus_set_led(on);
+ break;
+ default:
+ break;
+ }
+}
+
+static inline void __auxio_sbus_set_lte(int on)
+{
+ (on) ? __auxio_sbus_set(AUXIO_AUX1_LTE, 0) :
+ __auxio_sbus_set(0, AUXIO_AUX1_LTE) ;
+}
+
+void auxio_set_lte(int on)
+{
+ switch(auxio_devtype) {
+ case AUXIO_TYPE_SBUS:
+ __auxio_sbus_set_lte(on);
+ break;
+ case AUXIO_TYPE_EBUS:
+ /* FALL-THROUGH */
+ default:
+ break;
+ }
+}
+
+void __init auxio_probe(void)
+{
+ struct sbus_bus *sbus;
+ struct sbus_dev *sdev = NULL;
+
+ for_each_sbus(sbus) {
+ for_each_sbusdev(sdev, sbus) {
+ if(!strcmp(sdev->prom_name, "auxio"))
+ goto found_sdev;
+ }
+ }
+
+found_sdev:
+ if (sdev) {
+ auxio_devtype = AUXIO_TYPE_SBUS;
+ auxio_register = sbus_ioremap(&sdev->resource[0], 0,
+ sdev->reg_addrs[0].reg_size,
+ "auxiliaryIO");
+ }
+#ifdef CONFIG_PCI
+ else {
+ struct linux_ebus *ebus;
+ struct linux_ebus_device *edev = NULL;
+
+ for_each_ebus(ebus) {
+ for_each_ebusdev(edev, ebus) {
+ if (!strcmp(edev->prom_name, "auxio"))
+ goto ebus_done;
+ }
+ }
+ ebus_done:
+ if (edev) {
+ auxio_devtype = AUXIO_TYPE_EBUS;
+ auxio_register =
+ ioremap(edev->resource[0].start, sizeof(u32));
+ }
+ }
+ auxio_set_led(AUXIO_LED_ON);
+#endif
+}
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
new file mode 100644
index 00000000000..b2854ef221d
--- /dev/null
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -0,0 +1,424 @@
+/*
+ * linux/fs/binfmt_aout.c
+ *
+ * Copyright (C) 1991, 1992, 1996 Linus Torvalds
+ *
+ * Hacked a bit by DaveM to make it work with 32-bit SunOS
+ * binaries on the sparc64 port.
+ */
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/a.out.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+
+static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs);
+static int load_aout32_library(struct file*);
+static int aout32_core_dump(long signr, struct pt_regs * regs, struct file *file);
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+static struct linux_binfmt aout32_format = {
+ NULL, THIS_MODULE, load_aout32_binary, load_aout32_library, aout32_core_dump,
+ PAGE_SIZE
+};
+
+static void set_brk(unsigned long start, unsigned long end)
+{
+ start = PAGE_ALIGN(start);
+ end = PAGE_ALIGN(end);
+ if (end <= start)
+ return;
+ down_write(&current->mm->mmap_sem);
+ do_brk(start, end - start);
+ up_write(&current->mm->mmap_sem);
+}
+
+/*
+ * These are the only things you should do on a core-file: use only these
+ * macros to write out all the necessary info.
+ */
+
+static int dump_write(struct file *file, const void *addr, int nr)
+{
+ return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+}
+
+#define DUMP_WRITE(addr, nr) \
+ if (!dump_write(file, (void *)(addr), (nr))) \
+ goto end_coredump;
+
+#define DUMP_SEEK(offset) \
+if (file->f_op->llseek) { \
+ if (file->f_op->llseek(file,(offset),0) != (offset)) \
+ goto end_coredump; \
+} else file->f_pos = (offset)
+
+/*
+ * Routine writes a core dump image in the current directory.
+ * Currently only a stub-function.
+ *
+ * Note that setuid/setgid files won't make a core-dump if the uid/gid
+ * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable"
+ * field, which also makes sure the core-dumps won't be recursive if the
+ * dumping of the process results in another error..
+ */
+
+static int aout32_core_dump(long signr, struct pt_regs *regs, struct file *file)
+{
+ mm_segment_t fs;
+ int has_dumped = 0;
+ unsigned long dump_start, dump_size;
+ struct user dump;
+# define START_DATA(u) (u.u_tsize)
+# define START_STACK(u) ((regs->u_regs[UREG_FP]) & ~(PAGE_SIZE - 1))
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
+ current->flags |= PF_DUMPCORE;
+ strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
+ dump.signal = signr;
+ dump_thread(regs, &dump);
+
+/* If the size of the dump file exceeds the rlimit, then see what would happen
+ if we wrote the stack, but not the data area. */
+ if ((dump.u_dsize+dump.u_ssize) >
+ current->signal->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_dsize = 0;
+
+/* Make sure we have enough room to write the stack and data areas. */
+ if ((dump.u_ssize) >
+ current->signal->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_ssize = 0;
+
+/* make sure we actually have a data and stack area to dump */
+ set_fs(USER_DS);
+ if (!access_ok(VERIFY_READ, (void __user *) START_DATA(dump), dump.u_dsize))
+ dump.u_dsize = 0;
+ if (!access_ok(VERIFY_READ, (void __user *) START_STACK(dump), dump.u_ssize))
+ dump.u_ssize = 0;
+
+ set_fs(KERNEL_DS);
+/* struct user */
+ DUMP_WRITE(&dump,sizeof(dump));
+/* now we start writing out the user space info */
+ set_fs(USER_DS);
+/* Dump the data area */
+ if (dump.u_dsize != 0) {
+ dump_start = START_DATA(dump);
+ dump_size = dump.u_dsize;
+ DUMP_WRITE(dump_start,dump_size);
+ }
+/* Now prepare to dump the stack area */
+ if (dump.u_ssize != 0) {
+ dump_start = START_STACK(dump);
+ dump_size = dump.u_ssize;
+ DUMP_WRITE(dump_start,dump_size);
+ }
+/* Finally dump the task struct. Not be used by gdb, but could be useful */
+ set_fs(KERNEL_DS);
+ DUMP_WRITE(current,sizeof(*current));
+end_coredump:
+ set_fs(fs);
+ return has_dumped;
+}
+
+/*
+ * create_aout32_tables() parses the env- and arg-strings in new user
+ * memory and creates the pointer tables from them, and puts their
+ * addresses on the "stack", returning the new stack pointer value.
+ */
+
+static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bprm)
+{
+ u32 __user *argv;
+ u32 __user *envp;
+ u32 __user *sp;
+ int argc = bprm->argc;
+ int envc = bprm->envc;
+
+ sp = (u32 __user *)((-(unsigned long)sizeof(char *))&(unsigned long)p);
+
+ /* This imposes the proper stack alignment for a new process. */
+ sp = (u32 __user *) (((unsigned long) sp) & ~7);
+ if ((envc+argc+3)&1)
+ --sp;
+
+ sp -= envc+1;
+ envp = sp;
+ sp -= argc+1;
+ argv = sp;
+ put_user(argc,--sp);
+ current->mm->arg_start = (unsigned long) p;
+ while (argc-->0) {
+ char c;
+ put_user(((u32)(unsigned long)(p)),argv++);
+ do {
+ get_user(c,p++);
+ } while (c);
+ }
+ put_user(NULL,argv);
+ current->mm->arg_end = current->mm->env_start = (unsigned long) p;
+ while (envc-->0) {
+ char c;
+ put_user(((u32)(unsigned long)(p)),envp++);
+ do {
+ get_user(c,p++);
+ } while (c);
+ }
+ put_user(NULL,envp);
+ current->mm->env_end = (unsigned long) p;
+ return sp;
+}
+
+/*
+ * These are the functions used to load a.out style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+ */
+
+static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+{
+ struct exec ex;
+ unsigned long error;
+ unsigned long fd_offset;
+ unsigned long rlim;
+ unsigned long orig_thr_flags;
+ int retval;
+
+ ex = *((struct exec *) bprm->buf); /* exec-header */
+ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
+ N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
+ N_TRSIZE(ex) || N_DRSIZE(ex) ||
+ bprm->file->f_dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+ return -ENOEXEC;
+ }
+
+ fd_offset = N_TXTOFF(ex);
+
+ /* Check initial limits. This avoids letting people circumvent
+ * size limits imposed on them by creating programs with large
+ * arrays in the data or bss.
+ */
+ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
+ if (ex.a_data + ex.a_bss > rlim)
+ return -ENOMEM;
+
+ /* Flush all traces of the currently running executable */
+ retval = flush_old_exec(bprm);
+ if (retval)
+ return retval;
+
+ /* OK, This is the point of no return */
+ set_personality(PER_SUNOS);
+
+ current->mm->end_code = ex.a_text +
+ (current->mm->start_code = N_TXTADDR(ex));
+ current->mm->end_data = ex.a_data +
+ (current->mm->start_data = N_DATADDR(ex));
+ current->mm->brk = ex.a_bss +
+ (current->mm->start_brk = N_BSSADDR(ex));
+
+ set_mm_counter(current->mm, rss, 0);
+ current->mm->mmap = NULL;
+ compute_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
+ if (N_MAGIC(ex) == NMAGIC) {
+ loff_t pos = fd_offset;
+ /* Fuck me plenty... */
+ down_write(&current->mm->mmap_sem);
+ error = do_brk(N_TXTADDR(ex), ex.a_text);
+ up_write(&current->mm->mmap_sem);
+ bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
+ ex.a_text, &pos);
+ down_write(&current->mm->mmap_sem);
+ error = do_brk(N_DATADDR(ex), ex.a_data);
+ up_write(&current->mm->mmap_sem);
+ bprm->file->f_op->read(bprm->file, (char __user *)N_DATADDR(ex),
+ ex.a_data, &pos);
+ goto beyond_if;
+ }
+
+ if (N_MAGIC(ex) == OMAGIC) {
+ loff_t pos = fd_offset;
+ down_write(&current->mm->mmap_sem);
+ do_brk(N_TXTADDR(ex) & PAGE_MASK,
+ ex.a_text+ex.a_data + PAGE_SIZE - 1);
+ up_write(&current->mm->mmap_sem);
+ bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
+ ex.a_text+ex.a_data, &pos);
+ } else {
+ static unsigned long error_time;
+ if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
+ (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time) > 5*HZ)
+ {
+ printk(KERN_NOTICE "executable not page aligned\n");
+ error_time = jiffies;
+ }
+
+ if (!bprm->file->f_op->mmap) {
+ loff_t pos = fd_offset;
+ down_write(&current->mm->mmap_sem);
+ do_brk(0, ex.a_text+ex.a_data);
+ up_write(&current->mm->mmap_sem);
+ bprm->file->f_op->read(bprm->file,
+ (char __user *)N_TXTADDR(ex),
+ ex.a_text+ex.a_data, &pos);
+ goto beyond_if;
+ }
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
+ PROT_READ | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset);
+ up_write(&current->mm->mmap_sem);
+
+ if (error != N_TXTADDR(ex)) {
+ send_sig(SIGKILL, current, 0);
+ return error;
+ }
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset + ex.a_text);
+ up_write(&current->mm->mmap_sem);
+ if (error != N_DATADDR(ex)) {
+ send_sig(SIGKILL, current, 0);
+ return error;
+ }
+ }
+beyond_if:
+ set_binfmt(&aout32_format);
+
+ set_brk(current->mm->start_brk, current->mm->brk);
+
+ /* Make sure STACK_TOP returns the right thing. */
+ orig_thr_flags = current_thread_info()->flags;
+ current_thread_info()->flags |= _TIF_32BIT;
+
+ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+ if (retval < 0) {
+ current_thread_info()->flags = orig_thr_flags;
+
+ /* Someone check-me: is this error path enough? */
+ send_sig(SIGKILL, current, 0);
+ return retval;
+ }
+
+ current->mm->start_stack =
+ (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm);
+ if (!(orig_thr_flags & _TIF_32BIT)) {
+ unsigned long pgd_cache = get_pgd_cache(current->mm->pgd);
+
+ __asm__ __volatile__("stxa\t%0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (pgd_cache),
+ "r" (TSB_REG), "i" (ASI_DMMU));
+ }
+ start_thread32(regs, ex.a_entry, current->mm->start_stack);
+ if (current->ptrace & PT_PTRACED)
+ send_sig(SIGTRAP, current, 0);
+ return 0;
+}
+
+/* N.B. Move to .h file and use code in fs/binfmt_aout.c? */
+static int load_aout32_library(struct file *file)
+{
+ struct inode * inode;
+ unsigned long bss, start_addr, len;
+ unsigned long error;
+ int retval;
+ struct exec ex;
+
+ inode = file->f_dentry->d_inode;
+
+ retval = -ENOEXEC;
+ error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
+ if (error != sizeof(ex))
+ goto out;
+
+ /* We come in here for the regular a.out style of shared libraries */
+ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
+ N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
+ inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+ goto out;
+ }
+
+ if (N_MAGIC(ex) == ZMAGIC && N_TXTOFF(ex) &&
+ (N_TXTOFF(ex) < inode->i_sb->s_blocksize)) {
+ printk("N_TXTOFF < BLOCK_SIZE. Please convert library\n");
+ goto out;
+ }
+
+ if (N_FLAGS(ex))
+ goto out;
+
+ /* For QMAGIC, the starting address is 0x20 into the page. We mask
+ this off to get the starting address for the page */
+
+ start_addr = ex.a_entry & 0xfffff000;
+
+ /* Now use mmap to map the library into memory. */
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
+ N_TXTOFF(ex));
+ up_write(&current->mm->mmap_sem);
+ retval = error;
+ if (error != start_addr)
+ goto out;
+
+ len = PAGE_ALIGN(ex.a_text + ex.a_data);
+ bss = ex.a_text + ex.a_data + ex.a_bss;
+ if (bss > len) {
+ down_write(&current->mm->mmap_sem);
+ error = do_brk(start_addr + len, bss - len);
+ up_write(&current->mm->mmap_sem);
+ retval = error;
+ if (error != start_addr + len)
+ goto out;
+ }
+ retval = 0;
+out:
+ return retval;
+}
+
+static int __init init_aout32_binfmt(void)
+{
+ return register_binfmt(&aout32_format);
+}
+
+static void __exit exit_aout32_binfmt(void)
+{
+ unregister_binfmt(&aout32_format);
+}
+
+module_init(init_aout32_binfmt);
+module_exit(exit_aout32_binfmt);
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
new file mode 100644
index 00000000000..a1a12d2aa35
--- /dev/null
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -0,0 +1,159 @@
+/*
+ * binfmt_elf32.c: Support 32-bit Sparc ELF binaries on Ultra.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#define ELF_ARCH EM_SPARC
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2MSB;
+
+/* For the most part we present code dumps in the format
+ * Solaris does.
+ */
+typedef unsigned int elf_greg_t;
+#define ELF_NGREG 38
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/* Format is:
+ * G0 --> G7
+ * O0 --> O7
+ * L0 --> L7
+ * I0 --> I7
+ * PSR, PC, nPC, Y, WIM, TBR
+ */
+#include <asm/psrcompat.h>
+#define ELF_CORE_COPY_REGS(__elf_regs, __pt_regs) \
+do { unsigned int *dest = &(__elf_regs[0]); \
+ struct pt_regs *src = (__pt_regs); \
+ unsigned int __user *sp; \
+ int i; \
+ for(i = 0; i < 16; i++) \
+ dest[i] = (unsigned int) src->u_regs[i];\
+ /* Don't try this at home kids... */ \
+ sp = (unsigned int __user *) (src->u_regs[14] & \
+ 0x00000000fffffffc); \
+ for(i = 0; i < 16; i++) \
+ __get_user(dest[i+16], &sp[i]); \
+ dest[32] = tstate_to_psr(src->tstate); \
+ dest[33] = (unsigned int) src->tpc; \
+ dest[34] = (unsigned int) src->tnpc; \
+ dest[35] = src->y; \
+ dest[36] = dest[37] = 0; /* XXX */ \
+} while(0);
+
+typedef struct {
+ union {
+ unsigned int pr_regs[32];
+ unsigned long pr_dregs[16];
+ } pr_fr;
+ unsigned int __unused;
+ unsigned int pr_fsr;
+ unsigned char pr_qcnt;
+ unsigned char pr_q_entrysize;
+ unsigned char pr_en;
+ unsigned int pr_q[64];
+} elf_fpregset_t;
+
+/* UltraSparc extensions. Still unused, but will be eventually. */
+typedef struct {
+ unsigned int pr_type;
+ unsigned int pr_align;
+ union {
+ struct {
+ union {
+ unsigned int pr_regs[32];
+ unsigned long pr_dregs[16];
+ long double pr_qregs[8];
+ } pr_xfr;
+ } pr_v8p;
+ unsigned int pr_xfsr;
+ unsigned int pr_fprs;
+ unsigned int pr_xg[8];
+ unsigned int pr_xo[8];
+ unsigned long pr_tstate;
+ unsigned int pr_filler[8];
+ } pr_un;
+} elf_xregset_t;
+
+#define elf_check_arch(x) (((x)->e_machine == EM_SPARC) || ((x)->e_machine == EM_SPARC32PLUS))
+
+#define ELF_ET_DYN_BASE 0x70000000
+
+
+#include <asm/processor.h>
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/elfcore.h>
+#include <linux/compat.h>
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned int pr_sigpend; /* Set of pending signals */
+ unsigned int pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_cutime; /* Cumulative user time */
+ struct compat_timeval pr_cstime; /* Cumulative system time */
+ elf_gregset_t pr_reg; /* GP registers */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned int pr_flag; /* flags */
+ u16 pr_uid;
+ u16 pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
+#include <linux/highuid.h>
+
+#undef NEW_TO_OLD_UID
+#undef NEW_TO_OLD_GID
+#define NEW_TO_OLD_UID(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
+#define NEW_TO_OLD_GID(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
+
+#include <linux/time.h>
+
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
+#define elf_addr_t u32
+#undef start_thread
+#define start_thread start_thread32
+#define init_elf_binfmt init_elf32_binfmt
+
+MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit SparcLinux binaries on the Ultra");
+MODULE_AUTHOR("Eric Youngdale, David S. Miller, Jakub Jelinek");
+
+#undef MODULE_DESCRIPTION
+#undef MODULE_AUTHOR
+
+#undef TASK_SIZE
+#define TASK_SIZE 0xf0000000
+
+#include "../../../fs/binfmt_elf.c"
diff --git a/arch/sparc64/kernel/central.c b/arch/sparc64/kernel/central.c
new file mode 100644
index 00000000000..3d184a78496
--- /dev/null
+++ b/arch/sparc64/kernel/central.c
@@ -0,0 +1,457 @@
+/* $Id: central.c,v 1.15 2001/12/19 00:29:51 davem Exp $
+ * central.c: Central FHC driver for Sunfire/Starfire/Wildfire.
+ *
+ * Copyright (C) 1997, 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/page.h>
+#include <asm/fhc.h>
+#include <asm/starfire.h>
+
+struct linux_central *central_bus = NULL;
+struct linux_fhc *fhc_list = NULL;
+
+#define IS_CENTRAL_FHC(__fhc) ((__fhc) == central_bus->child)
+
+static void central_probe_failure(int line)
+{
+ prom_printf("CENTRAL: Critical device probe failure at central.c:%d\n",
+ line);
+ prom_halt();
+}
+
+static void central_ranges_init(int cnode, struct linux_central *central)
+{
+ int success;
+
+ central->num_central_ranges = 0;
+ success = prom_getproperty(central->prom_node, "ranges",
+ (char *) central->central_ranges,
+ sizeof (central->central_ranges));
+ if (success != -1)
+ central->num_central_ranges = (success/sizeof(struct linux_prom_ranges));
+}
+
+static void fhc_ranges_init(int fnode, struct linux_fhc *fhc)
+{
+ int success;
+
+ fhc->num_fhc_ranges = 0;
+ success = prom_getproperty(fhc->prom_node, "ranges",
+ (char *) fhc->fhc_ranges,
+ sizeof (fhc->fhc_ranges));
+ if (success != -1)
+ fhc->num_fhc_ranges = (success/sizeof(struct linux_prom_ranges));
+}
+
+/* Range application routines are exported to various drivers,
+ * so do not __init this.
+ */
+static void adjust_regs(struct linux_prom_registers *regp, int nregs,
+ struct linux_prom_ranges *rangep, int nranges)
+{
+ int regc, rngc;
+
+ for (regc = 0; regc < nregs; regc++) {
+ for (rngc = 0; rngc < nranges; rngc++)
+ if (regp[regc].which_io == rangep[rngc].ot_child_space)
+ break; /* Fount it */
+ if (rngc == nranges) /* oops */
+ central_probe_failure(__LINE__);
+ regp[regc].which_io = rangep[rngc].ot_parent_space;
+ regp[regc].phys_addr -= rangep[rngc].ot_child_base;
+ regp[regc].phys_addr += rangep[rngc].ot_parent_base;
+ }
+}
+
+/* Apply probed fhc ranges to registers passed, if no ranges return. */
+void apply_fhc_ranges(struct linux_fhc *fhc,
+ struct linux_prom_registers *regs,
+ int nregs)
+{
+ if (fhc->num_fhc_ranges)
+ adjust_regs(regs, nregs, fhc->fhc_ranges,
+ fhc->num_fhc_ranges);
+}
+
+/* Apply probed central ranges to registers passed, if no ranges return. */
+void apply_central_ranges(struct linux_central *central,
+ struct linux_prom_registers *regs, int nregs)
+{
+ if (central->num_central_ranges)
+ adjust_regs(regs, nregs, central->central_ranges,
+ central->num_central_ranges);
+}
+
+void * __init central_alloc_bootmem(unsigned long size)
+{
+ void *ret;
+
+ ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+ if (ret != NULL)
+ memset(ret, 0, size);
+
+ return ret;
+}
+
+static unsigned long prom_reg_to_paddr(struct linux_prom_registers *r)
+{
+ unsigned long ret = ((unsigned long) r->which_io) << 32;
+
+ return ret | (unsigned long) r->phys_addr;
+}
+
+static void probe_other_fhcs(void)
+{
+ struct linux_prom64_registers fpregs[6];
+ char namebuf[128];
+ int node;
+
+ node = prom_getchild(prom_root_node);
+ node = prom_searchsiblings(node, "fhc");
+ if (node == 0)
+ central_probe_failure(__LINE__);
+ while (node) {
+ struct linux_fhc *fhc;
+ int board;
+ u32 tmp;
+
+ fhc = (struct linux_fhc *)
+ central_alloc_bootmem(sizeof(struct linux_fhc));
+ if (fhc == NULL)
+ central_probe_failure(__LINE__);
+
+ /* Link it into the FHC chain. */
+ fhc->next = fhc_list;
+ fhc_list = fhc;
+
+ /* Toplevel FHCs have no parent. */
+ fhc->parent = NULL;
+
+ fhc->prom_node = node;
+ prom_getstring(node, "name", namebuf, sizeof(namebuf));
+ strcpy(fhc->prom_name, namebuf);
+ fhc_ranges_init(node, fhc);
+
+ /* Non-central FHC's have 64-bit OBP format registers. */
+ if (prom_getproperty(node, "reg",
+ (char *)&fpregs[0], sizeof(fpregs)) == -1)
+ central_probe_failure(__LINE__);
+
+ /* Only central FHC needs special ranges applied. */
+ fhc->fhc_regs.pregs = fpregs[0].phys_addr;
+ fhc->fhc_regs.ireg = fpregs[1].phys_addr;
+ fhc->fhc_regs.ffregs = fpregs[2].phys_addr;
+ fhc->fhc_regs.sregs = fpregs[3].phys_addr;
+ fhc->fhc_regs.uregs = fpregs[4].phys_addr;
+ fhc->fhc_regs.tregs = fpregs[5].phys_addr;
+
+ board = prom_getintdefault(node, "board#", -1);
+ fhc->board = board;
+
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_JCTRL);
+ if ((tmp & FHC_JTAG_CTRL_MENAB) != 0)
+ fhc->jtag_master = 1;
+ else
+ fhc->jtag_master = 0;
+
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
+ printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] %s\n",
+ board,
+ (tmp & FHC_ID_VERS) >> 28,
+ (tmp & FHC_ID_PARTID) >> 12,
+ (tmp & FHC_ID_MANUF) >> 1,
+ (fhc->jtag_master ? "(JTAG Master)" : ""));
+
+ /* This bit must be set in all non-central FHC's in
+ * the system. When it is clear, this identifies
+ * the central board.
+ */
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+ tmp |= FHC_CONTROL_IXIST;
+ upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+
+ /* Look for the next FHC. */
+ node = prom_getsibling(node);
+ if (node == 0)
+ break;
+ node = prom_searchsiblings(node, "fhc");
+ if (node == 0)
+ break;
+ }
+}
+
+static void probe_clock_board(struct linux_central *central,
+ struct linux_fhc *fhc,
+ int cnode, int fnode)
+{
+ struct linux_prom_registers cregs[3];
+ int clknode, nslots, tmp, nregs;
+
+ clknode = prom_searchsiblings(prom_getchild(fnode), "clock-board");
+ if (clknode == 0 || clknode == -1)
+ central_probe_failure(__LINE__);
+
+ nregs = prom_getproperty(clknode, "reg", (char *)&cregs[0], sizeof(cregs));
+ if (nregs == -1)
+ central_probe_failure(__LINE__);
+
+ nregs /= sizeof(struct linux_prom_registers);
+ apply_fhc_ranges(fhc, &cregs[0], nregs);
+ apply_central_ranges(central, &cregs[0], nregs);
+ central->cfreg = prom_reg_to_paddr(&cregs[0]);
+ central->clkregs = prom_reg_to_paddr(&cregs[1]);
+
+ if (nregs == 2)
+ central->clkver = 0UL;
+ else
+ central->clkver = prom_reg_to_paddr(&cregs[2]);
+
+ tmp = upa_readb(central->clkregs + CLOCK_STAT1);
+ tmp &= 0xc0;
+ switch(tmp) {
+ case 0x40:
+ nslots = 16;
+ break;
+ case 0xc0:
+ nslots = 8;
+ break;
+ case 0x80:
+ if (central->clkver != 0UL &&
+ upa_readb(central->clkver) != 0) {
+ if ((upa_readb(central->clkver) & 0x80) != 0)
+ nslots = 4;
+ else
+ nslots = 5;
+ break;
+ }
+ default:
+ nslots = 4;
+ break;
+ };
+ central->slots = nslots;
+ printk("CENTRAL: Detected %d slot Enterprise system. cfreg[%02x] cver[%02x]\n",
+ central->slots, upa_readb(central->cfreg),
+ (central->clkver ? upa_readb(central->clkver) : 0x00));
+}
+
+static void ZAP(unsigned long iclr, unsigned long imap)
+{
+ u32 imap_tmp;
+
+ upa_writel(0, iclr);
+ upa_readl(iclr);
+ imap_tmp = upa_readl(imap);
+ imap_tmp &= ~(0x80000000);
+ upa_writel(imap_tmp, imap);
+ upa_readl(imap);
+}
+
+static void init_all_fhc_hw(void)
+{
+ struct linux_fhc *fhc;
+
+ for (fhc = fhc_list; fhc != NULL; fhc = fhc->next) {
+ u32 tmp;
+
+ /* Clear all of the interrupt mapping registers
+ * just in case OBP left them in a foul state.
+ */
+ ZAP(fhc->fhc_regs.ffregs + FHC_FFREGS_ICLR,
+ fhc->fhc_regs.ffregs + FHC_FFREGS_IMAP);
+ ZAP(fhc->fhc_regs.sregs + FHC_SREGS_ICLR,
+ fhc->fhc_regs.sregs + FHC_SREGS_IMAP);
+ ZAP(fhc->fhc_regs.uregs + FHC_UREGS_ICLR,
+ fhc->fhc_regs.uregs + FHC_UREGS_IMAP);
+ ZAP(fhc->fhc_regs.tregs + FHC_TREGS_ICLR,
+ fhc->fhc_regs.tregs + FHC_TREGS_IMAP);
+
+ /* Setup FHC control register. */
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+
+ /* All non-central boards have this bit set. */
+ if (! IS_CENTRAL_FHC(fhc))
+ tmp |= FHC_CONTROL_IXIST;
+
+ /* For all FHCs, clear the firmware synchronization
+ * line and both low power mode enables.
+ */
+ tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF |
+ FHC_CONTROL_SLINE);
+
+ upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+ upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+ }
+
+}
+
+void central_probe(void)
+{
+ struct linux_prom_registers fpregs[6];
+ struct linux_fhc *fhc;
+ char namebuf[128];
+ int cnode, fnode, err;
+
+ cnode = prom_finddevice("/central");
+ if (cnode == 0 || cnode == -1) {
+ if (this_is_starfire)
+ starfire_cpu_setup();
+ return;
+ }
+
+ /* Ok we got one, grab some memory for software state. */
+ central_bus = (struct linux_central *)
+ central_alloc_bootmem(sizeof(struct linux_central));
+ if (central_bus == NULL)
+ central_probe_failure(__LINE__);
+
+ fhc = (struct linux_fhc *)
+ central_alloc_bootmem(sizeof(struct linux_fhc));
+ if (fhc == NULL)
+ central_probe_failure(__LINE__);
+
+ /* First init central. */
+ central_bus->child = fhc;
+ central_bus->prom_node = cnode;
+
+ prom_getstring(cnode, "name", namebuf, sizeof(namebuf));
+ strcpy(central_bus->prom_name, namebuf);
+
+ central_ranges_init(cnode, central_bus);
+
+ /* And then central's FHC. */
+ fhc->next = fhc_list;
+ fhc_list = fhc;
+
+ fhc->parent = central_bus;
+ fnode = prom_searchsiblings(prom_getchild(cnode), "fhc");
+ if (fnode == 0 || fnode == -1)
+ central_probe_failure(__LINE__);
+
+ fhc->prom_node = fnode;
+ prom_getstring(fnode, "name", namebuf, sizeof(namebuf));
+ strcpy(fhc->prom_name, namebuf);
+
+ fhc_ranges_init(fnode, fhc);
+
+ /* Now, map in FHC register set. */
+ if (prom_getproperty(fnode, "reg", (char *)&fpregs[0], sizeof(fpregs)) == -1)
+ central_probe_failure(__LINE__);
+
+ apply_central_ranges(central_bus, &fpregs[0], 6);
+
+ fhc->fhc_regs.pregs = prom_reg_to_paddr(&fpregs[0]);
+ fhc->fhc_regs.ireg = prom_reg_to_paddr(&fpregs[1]);
+ fhc->fhc_regs.ffregs = prom_reg_to_paddr(&fpregs[2]);
+ fhc->fhc_regs.sregs = prom_reg_to_paddr(&fpregs[3]);
+ fhc->fhc_regs.uregs = prom_reg_to_paddr(&fpregs[4]);
+ fhc->fhc_regs.tregs = prom_reg_to_paddr(&fpregs[5]);
+
+ /* Obtain board number from board status register, Central's
+ * FHC lacks "board#" property.
+ */
+ err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_BSR);
+ fhc->board = (((err >> 16) & 0x01) |
+ ((err >> 12) & 0x0e));
+
+ fhc->jtag_master = 0;
+
+ /* Attach the clock board registers for CENTRAL. */
+ probe_clock_board(central_bus, fhc, cnode, fnode);
+
+ err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
+ printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] (CENTRAL)\n",
+ fhc->board,
+ ((err & FHC_ID_VERS) >> 28),
+ ((err & FHC_ID_PARTID) >> 12),
+ ((err & FHC_ID_MANUF) >> 1));
+
+ probe_other_fhcs();
+
+ init_all_fhc_hw();
+}
+
+static __inline__ void fhc_ledblink(struct linux_fhc *fhc, int on)
+{
+ u32 tmp;
+
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+
+ /* NOTE: reverse logic on this bit */
+ if (on)
+ tmp &= ~(FHC_CONTROL_RLED);
+ else
+ tmp |= FHC_CONTROL_RLED;
+ tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF | FHC_CONTROL_SLINE);
+
+ upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+ upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+}
+
+static __inline__ void central_ledblink(struct linux_central *central, int on)
+{
+ u8 tmp;
+
+ tmp = upa_readb(central->clkregs + CLOCK_CTRL);
+
+ /* NOTE: reverse logic on this bit */
+ if (on)
+ tmp &= ~(CLOCK_CTRL_RLED);
+ else
+ tmp |= CLOCK_CTRL_RLED;
+
+ upa_writeb(tmp, central->clkregs + CLOCK_CTRL);
+ upa_readb(central->clkregs + CLOCK_CTRL);
+}
+
+static struct timer_list sftimer;
+static int led_state;
+
+static void sunfire_timer(unsigned long __ignored)
+{
+ struct linux_fhc *fhc;
+
+ central_ledblink(central_bus, led_state);
+ for (fhc = fhc_list; fhc != NULL; fhc = fhc->next)
+ if (! IS_CENTRAL_FHC(fhc))
+ fhc_ledblink(fhc, led_state);
+ led_state = ! led_state;
+ sftimer.expires = jiffies + (HZ >> 1);
+ add_timer(&sftimer);
+}
+
+/* After PCI/SBUS busses have been probed, this is called to perform
+ * final initialization of all FireHose Controllers in the system.
+ */
+void firetruck_init(void)
+{
+ struct linux_central *central = central_bus;
+ u8 ctrl;
+
+ /* No central bus, nothing to do. */
+ if (central == NULL)
+ return;
+
+ /* OBP leaves it on, turn it off so clock board timer LED
+ * is in sync with FHC ones.
+ */
+ ctrl = upa_readb(central->clkregs + CLOCK_CTRL);
+ ctrl &= ~(CLOCK_CTRL_RLED);
+ upa_writeb(ctrl, central->clkregs + CLOCK_CTRL);
+
+ led_state = 0;
+ init_timer(&sftimer);
+ sftimer.data = 0;
+ sftimer.function = &sunfire_timer;
+ sftimer.expires = jiffies + (HZ >> 1);
+ add_timer(&sftimer);
+}
diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
new file mode 100644
index 00000000000..97cf912f085
--- /dev/null
+++ b/arch/sparc64/kernel/chmc.c
@@ -0,0 +1,458 @@
+/* $Id: chmc.c,v 1.4 2002/01/08 16:00:14 davem Exp $
+ * memctrlr.c: Driver for UltraSPARC-III memory controller.
+ *
+ * Copyright (C) 2001 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/spitfire.h>
+#include <asm/chmctrl.h>
+#include <asm/oplib.h>
+#include <asm/io.h>
+
+#define CHMCTRL_NDGRPS 2
+#define CHMCTRL_NDIMMS 4
+
+#define DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS)
+
+/* OBP memory-layout property format. */
+struct obp_map {
+ unsigned char dimm_map[144];
+ unsigned char pin_map[576];
+};
+
+#define DIMM_LABEL_SZ 8
+
+struct obp_mem_layout {
+ /* One max 8-byte string label per DIMM. Usually
+ * this matches the label on the motherboard where
+ * that DIMM resides.
+ */
+ char dimm_labels[DIMMS_PER_MC][DIMM_LABEL_SZ];
+
+ /* If symmetric use map[0], else it is
+ * asymmetric and map[1] should be used.
+ */
+ char symmetric;
+
+ struct obp_map map[2];
+};
+
+#define CHMCTRL_NBANKS 4
+
+struct bank_info {
+ struct mctrl_info *mp;
+ int bank_id;
+
+ u64 raw_reg;
+ int valid;
+ int uk;
+ int um;
+ int lk;
+ int lm;
+ int interleave;
+ unsigned long base;
+ unsigned long size;
+};
+
+struct mctrl_info {
+ struct list_head list;
+ int portid;
+ int index;
+
+ struct obp_mem_layout layout_prop;
+ int layout_size;
+
+ void __iomem *regs;
+
+ u64 timing_control1;
+ u64 timing_control2;
+ u64 timing_control3;
+ u64 timing_control4;
+ u64 memaddr_control;
+
+ struct bank_info logical_banks[CHMCTRL_NBANKS];
+};
+
+static LIST_HEAD(mctrl_list);
+
+/* Does BANK decode PHYS_ADDR? */
+static int bank_match(struct bank_info *bp, unsigned long phys_addr)
+{
+ unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT;
+ unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT;
+
+ /* Bank must be enabled to match. */
+ if (bp->valid == 0)
+ return 0;
+
+ /* Would BANK match upper bits? */
+ upper_bits ^= bp->um; /* What bits are different? */
+ upper_bits = ~upper_bits; /* Invert. */
+ upper_bits |= bp->uk; /* What bits don't matter for matching? */
+ upper_bits = ~upper_bits; /* Invert. */
+
+ if (upper_bits)
+ return 0;
+
+ /* Would BANK match lower bits? */
+ lower_bits ^= bp->lm; /* What bits are different? */
+ lower_bits = ~lower_bits; /* Invert. */
+ lower_bits |= bp->lk; /* What bits don't matter for matching? */
+ lower_bits = ~lower_bits; /* Invert. */
+
+ if (lower_bits)
+ return 0;
+
+ /* I always knew you'd be the one. */
+ return 1;
+}
+
+/* Given PHYS_ADDR, search memory controller banks for a match. */
+static struct bank_info *find_bank(unsigned long phys_addr)
+{
+ struct list_head *mctrl_head = &mctrl_list;
+ struct list_head *mctrl_entry = mctrl_head->next;
+
+ for (;;) {
+ struct mctrl_info *mp =
+ list_entry(mctrl_entry, struct mctrl_info, list);
+ int bank_no;
+
+ if (mctrl_entry == mctrl_head)
+ break;
+ mctrl_entry = mctrl_entry->next;
+
+ for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) {
+ struct bank_info *bp;
+
+ bp = &mp->logical_banks[bank_no];
+ if (bank_match(bp, phys_addr))
+ return bp;
+ }
+ }
+
+ return NULL;
+}
+
+/* This is the main purpose of this driver. */
+#define SYNDROME_MIN -1
+#define SYNDROME_MAX 144
+int chmc_getunumber(int syndrome_code,
+ unsigned long phys_addr,
+ char *buf, int buflen)
+{
+ struct bank_info *bp;
+ struct obp_mem_layout *prop;
+ int bank_in_controller, first_dimm;
+
+ bp = find_bank(phys_addr);
+ if (bp == NULL ||
+ syndrome_code < SYNDROME_MIN ||
+ syndrome_code > SYNDROME_MAX) {
+ buf[0] = '?';
+ buf[1] = '?';
+ buf[2] = '?';
+ buf[3] = '\0';
+ return 0;
+ }
+
+ prop = &bp->mp->layout_prop;
+ bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1);
+ first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1));
+ first_dimm *= CHMCTRL_NDIMMS;
+
+ if (syndrome_code != SYNDROME_MIN) {
+ struct obp_map *map;
+ int qword, where_in_line, where, map_index, map_offset;
+ unsigned int map_val;
+
+ /* Yaay, single bit error so we can figure out
+ * the exact dimm.
+ */
+ if (prop->symmetric)
+ map = &prop->map[0];
+ else
+ map = &prop->map[1];
+
+ /* Covert syndrome code into the way the bits are
+ * positioned on the bus.
+ */
+ if (syndrome_code < 144 - 16)
+ syndrome_code += 16;
+ else if (syndrome_code < 144)
+ syndrome_code -= (144 - 7);
+ else if (syndrome_code < (144 + 3))
+ syndrome_code -= (144 + 3 - 4);
+ else
+ syndrome_code -= 144 + 3;
+
+ /* All this magic has to do with how a cache line
+ * comes over the wire on Safari. A 64-bit line
+ * comes over in 4 quadword cycles, each of which
+ * transmit ECC/MTAG info as well as the actual
+ * data. 144 bits per quadword, 576 total.
+ */
+#define LINE_SIZE 64
+#define LINE_ADDR_MSK (LINE_SIZE - 1)
+#define QW_PER_LINE 4
+#define QW_BYTES (LINE_SIZE / QW_PER_LINE)
+#define QW_BITS 144
+#define LAST_BIT (576 - 1)
+
+ qword = (phys_addr & LINE_ADDR_MSK) / QW_BYTES;
+ where_in_line = ((3 - qword) * QW_BITS) + syndrome_code;
+ where = (LAST_BIT - where_in_line);
+ map_index = where >> 2;
+ map_offset = where & 0x3;
+ map_val = map->dimm_map[map_index];
+ map_val = ((map_val >> ((3 - map_offset) << 1)) & (2 - 1));
+
+ sprintf(buf, "%s, pin %3d",
+ prop->dimm_labels[first_dimm + map_val],
+ map->pin_map[where_in_line]);
+ } else {
+ int dimm;
+
+ /* Multi-bit error, we just dump out all the
+ * dimm labels associated with this bank.
+ */
+ for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) {
+ sprintf(buf, "%s ",
+ prop->dimm_labels[first_dimm + dimm]);
+ buf += strlen(buf);
+ }
+ }
+ return 0;
+}
+
+/* Accessing the registers is slightly complicated. If you want
+ * to get at the memory controller which is on the same processor
+ * the code is executing, you must use special ASI load/store else
+ * you go through the global mapping.
+ */
+static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
+{
+ unsigned long ret;
+ int this_cpu = get_cpu();
+
+ if (mp->portid == this_cpu) {
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (ret)
+ : "r" (offset), "i" (ASI_MCU_CTRL_REG));
+ } else {
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=r" (ret)
+ : "r" (mp->regs + offset),
+ "i" (ASI_PHYS_BYPASS_EC_E));
+ }
+ put_cpu();
+
+ return ret;
+}
+
+#if 0 /* currently unused */
+static void write_mcreg(struct mctrl_info *mp, unsigned long offset, u64 val)
+{
+ if (mp->portid == smp_processor_id()) {
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : : "r" (val),
+ "r" (offset), "i" (ASI_MCU_CTRL_REG));
+ } else {
+ __asm__ __volatile__("ldxa %0, [%1] %2"
+ : : "r" (val),
+ "r" (mp->regs + offset),
+ "i" (ASI_PHYS_BYPASS_EC_E));
+ }
+}
+#endif
+
+static void interpret_one_decode_reg(struct mctrl_info *mp, int which_bank, u64 val)
+{
+ struct bank_info *p = &mp->logical_banks[which_bank];
+
+ p->mp = mp;
+ p->bank_id = (CHMCTRL_NBANKS * mp->portid) + which_bank;
+ p->raw_reg = val;
+ p->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT;
+ p->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT;
+ p->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT;
+ p->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT;
+ p->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT;
+
+ p->base = (p->um);
+ p->base &= ~(p->uk);
+ p->base <<= PA_UPPER_BITS_SHIFT;
+
+ switch(p->lk) {
+ case 0xf:
+ default:
+ p->interleave = 1;
+ break;
+
+ case 0xe:
+ p->interleave = 2;
+ break;
+
+ case 0xc:
+ p->interleave = 4;
+ break;
+
+ case 0x8:
+ p->interleave = 8;
+ break;
+
+ case 0x0:
+ p->interleave = 16;
+ break;
+ };
+
+ /* UK[10] is reserved, and UK[11] is not set for the SDRAM
+ * bank size definition.
+ */
+ p->size = (((unsigned long)p->uk &
+ ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT;
+ p->size /= p->interleave;
+}
+
+static void fetch_decode_regs(struct mctrl_info *mp)
+{
+ if (mp->layout_size == 0)
+ return;
+
+ interpret_one_decode_reg(mp, 0,
+ read_mcreg(mp, CHMCTRL_DECODE1));
+ interpret_one_decode_reg(mp, 1,
+ read_mcreg(mp, CHMCTRL_DECODE2));
+ interpret_one_decode_reg(mp, 2,
+ read_mcreg(mp, CHMCTRL_DECODE3));
+ interpret_one_decode_reg(mp, 3,
+ read_mcreg(mp, CHMCTRL_DECODE4));
+}
+
+static int init_one_mctrl(int node, int index)
+{
+ struct mctrl_info *mp = kmalloc(sizeof(*mp), GFP_KERNEL);
+ int portid = prom_getintdefault(node, "portid", -1);
+ struct linux_prom64_registers p_reg_prop;
+ int t;
+
+ if (!mp)
+ return -1;
+ memset(mp, 0, sizeof(*mp));
+ if (portid == -1)
+ goto fail;
+
+ mp->portid = portid;
+ mp->layout_size = prom_getproplen(node, "memory-layout");
+ if (mp->layout_size < 0)
+ mp->layout_size = 0;
+ if (mp->layout_size > sizeof(mp->layout_prop))
+ goto fail;
+
+ if (mp->layout_size > 0)
+ prom_getproperty(node, "memory-layout",
+ (char *) &mp->layout_prop,
+ mp->layout_size);
+
+ t = prom_getproperty(node, "reg",
+ (char *) &p_reg_prop,
+ sizeof(p_reg_prop));
+ if (t < 0 || p_reg_prop.reg_size != 0x48)
+ goto fail;
+
+ mp->regs = ioremap(p_reg_prop.phys_addr, p_reg_prop.reg_size);
+ if (mp->regs == NULL)
+ goto fail;
+
+ if (mp->layout_size != 0UL) {
+ mp->timing_control1 = read_mcreg(mp, CHMCTRL_TCTRL1);
+ mp->timing_control2 = read_mcreg(mp, CHMCTRL_TCTRL2);
+ mp->timing_control3 = read_mcreg(mp, CHMCTRL_TCTRL3);
+ mp->timing_control4 = read_mcreg(mp, CHMCTRL_TCTRL4);
+ mp->memaddr_control = read_mcreg(mp, CHMCTRL_MACTRL);
+ }
+
+ fetch_decode_regs(mp);
+
+ mp->index = index;
+
+ list_add(&mp->list, &mctrl_list);
+
+ /* Report the device. */
+ printk(KERN_INFO "chmc%d: US3 memory controller at %p [%s]\n",
+ mp->index,
+ mp->regs, (mp->layout_size ? "ACTIVE" : "INACTIVE"));
+
+ return 0;
+
+fail:
+ if (mp) {
+ if (mp->regs != NULL)
+ iounmap(mp->regs);
+ kfree(mp);
+ }
+ return -1;
+}
+
+static int __init probe_for_string(char *name, int index)
+{
+ int node = prom_getchild(prom_root_node);
+
+ while ((node = prom_searchsiblings(node, name)) != 0) {
+ int ret = init_one_mctrl(node, index);
+
+ if (!ret)
+ index++;
+
+ node = prom_getsibling(node);
+ if (!node)
+ break;
+ }
+
+ return index;
+}
+
+static int __init chmc_init(void)
+{
+ int index;
+
+ /* This driver is only for cheetah platforms. */
+ if (tlb_type != cheetah && tlb_type != cheetah_plus)
+ return -ENODEV;
+
+ index = probe_for_string("memory-controller", 0);
+ index = probe_for_string("mc-us3", index);
+
+ return 0;
+}
+
+static void __exit chmc_cleanup(void)
+{
+ struct list_head *head = &mctrl_list;
+ struct list_head *tmp = head->next;
+
+ for (;;) {
+ struct mctrl_info *p =
+ list_entry(tmp, struct mctrl_info, list);
+ if (tmp == head)
+ break;
+ tmp = tmp->next;
+
+ list_del(&p->list);
+ iounmap(p->regs);
+ kfree(p);
+ }
+}
+
+module_init(chmc_init);
+module_exit(chmc_cleanup);
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
new file mode 100644
index 00000000000..48756958116
--- /dev/null
+++ b/arch/sparc64/kernel/cpu.c
@@ -0,0 +1,124 @@
+/* cpu.c: Dinky routines to look for the kind of Sparc cpu
+ * we are on.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <asm/asi.h>
+#include <asm/system.h>
+#include <asm/fpumacro.h>
+#include <asm/cpudata.h>
+
+DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
+
+struct cpu_iu_info {
+ short manuf;
+ short impl;
+ char* cpu_name; /* should be enough I hope... */
+};
+
+struct cpu_fp_info {
+ short manuf;
+ short impl;
+ char fpu_vers;
+ char* fp_name;
+};
+
+struct cpu_fp_info linux_sparc_fpu[] = {
+ { 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
+ { 0x22, 0x10, 0, "UltraSparc I integrated FPU"},
+ { 0x17, 0x11, 0, "UltraSparc II integrated FPU"},
+ { 0x17, 0x12, 0, "UltraSparc IIi integrated FPU"},
+ { 0x17, 0x13, 0, "UltraSparc IIe integrated FPU"},
+ { 0x3e, 0x14, 0, "UltraSparc III integrated FPU"},
+ { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"},
+ { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"},
+ { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"},
+};
+
+#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
+
+struct cpu_iu_info linux_sparc_chips[] = {
+ { 0x17, 0x10, "TI UltraSparc I (SpitFire)"},
+ { 0x22, 0x10, "TI UltraSparc I (SpitFire)"},
+ { 0x17, 0x11, "TI UltraSparc II (BlackBird)"},
+ { 0x17, 0x12, "TI UltraSparc IIi (Sabre)"},
+ { 0x17, 0x13, "TI UltraSparc IIe (Hummingbird)"},
+ { 0x3e, 0x14, "TI UltraSparc III (Cheetah)"},
+ { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"},
+ { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"},
+ { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"},
+};
+
+#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
+
+char *sparc_cpu_type = "cpu-oops";
+char *sparc_fpu_type = "fpu-oops";
+
+unsigned int fsr_storage;
+
+void __init cpu_probe(void)
+{
+ unsigned long ver, fpu_vers, manuf, impl, fprs;
+ int i;
+
+ fprs = fprs_read();
+ fprs_write(FPRS_FEF);
+ __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]"
+ : "=&r" (ver)
+ : "r" (&fpu_vers));
+ fprs_write(fprs);
+
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+
+ fpu_vers = ((fpu_vers >> 17) & 0x7);
+
+retry:
+ for (i = 0; i < NSPARCCHIPS; i++) {
+ if (linux_sparc_chips[i].manuf == manuf) {
+ if (linux_sparc_chips[i].impl == impl) {
+ sparc_cpu_type =
+ linux_sparc_chips[i].cpu_name;
+ break;
+ }
+ }
+ }
+
+ if (i == NSPARCCHIPS) {
+ /* Maybe it is a cheetah+ derivative, report it as cheetah+
+ * in that case until we learn the real names.
+ */
+ if (manuf == 0x3e &&
+ impl > 0x15) {
+ impl = 0x15;
+ goto retry;
+ } else {
+ printk("DEBUG: manuf[%lx] impl[%lx]\n",
+ manuf, impl);
+ }
+ sparc_cpu_type = "Unknown CPU";
+ }
+
+ for (i = 0; i < NSPARCFPU; i++) {
+ if (linux_sparc_fpu[i].manuf == manuf &&
+ linux_sparc_fpu[i].impl == impl) {
+ if (linux_sparc_fpu[i].fpu_vers == fpu_vers) {
+ sparc_fpu_type =
+ linux_sparc_fpu[i].fp_name;
+ break;
+ }
+ }
+ }
+
+ if (i == NSPARCFPU) {
+ printk("DEBUG: manuf[%lx] impl[%lx] fsr.vers[%lx]\n",
+ manuf, impl, fpu_vers);
+ sparc_fpu_type = "Unknown FPU";
+ }
+}
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
new file mode 100644
index 00000000000..d710274e516
--- /dev/null
+++ b/arch/sparc64/kernel/devices.c
@@ -0,0 +1,144 @@
+/* devices.c: Initial scan of the prom device tree for important
+ * Sparc device nodes which we need to find.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+
+#include <asm/page.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <asm/smp.h>
+#include <asm/spitfire.h>
+#include <asm/timer.h>
+#include <asm/cpudata.h>
+
+/* Used to synchronize acceses to NatSemi SUPER I/O chip configure
+ * operations in asm/ns87303.h
+ */
+DEFINE_SPINLOCK(ns87303_lock);
+
+extern void cpu_probe(void);
+extern void central_probe(void);
+
+static char *cpu_mid_prop(void)
+{
+ if (tlb_type == spitfire)
+ return "upa-portid";
+ return "portid";
+}
+
+static int check_cpu_node(int nd, int *cur_inst,
+ int (*compare)(int, int, void *), void *compare_arg,
+ int *prom_node, int *mid)
+{
+ char node_str[128];
+
+ prom_getstring(nd, "device_type", node_str, sizeof(node_str));
+ if (strcmp(node_str, "cpu"))
+ return -ENODEV;
+
+ if (!compare(nd, *cur_inst, compare_arg)) {
+ if (prom_node)
+ *prom_node = nd;
+ if (mid)
+ *mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
+ return 0;
+ }
+
+ (*cur_inst)++;
+
+ return -ENODEV;
+}
+
+static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
+ int *prom_node, int *mid)
+{
+ int nd, cur_inst, err;
+
+ nd = prom_root_node;
+ cur_inst = 0;
+
+ err = check_cpu_node(nd, &cur_inst,
+ compare, compare_arg,
+ prom_node, mid);
+ if (err == 0)
+ return 0;
+
+ nd = prom_getchild(nd);
+ while ((nd = prom_getsibling(nd)) != 0) {
+ err = check_cpu_node(nd, &cur_inst,
+ compare, compare_arg,
+ prom_node, mid);
+ if (err == 0)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int cpu_instance_compare(int nd, int instance, void *_arg)
+{
+ int desired_instance = (int) (long) _arg;
+
+ if (instance == desired_instance)
+ return 0;
+ return -ENODEV;
+}
+
+int cpu_find_by_instance(int instance, int *prom_node, int *mid)
+{
+ return __cpu_find_by(cpu_instance_compare, (void *)(long)instance,
+ prom_node, mid);
+}
+
+static int cpu_mid_compare(int nd, int instance, void *_arg)
+{
+ int desired_mid = (int) (long) _arg;
+ int this_mid;
+
+ this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
+ if (this_mid == desired_mid)
+ return 0;
+ return -ENODEV;
+}
+
+int cpu_find_by_mid(int mid, int *prom_node)
+{
+ return __cpu_find_by(cpu_mid_compare, (void *)(long)mid,
+ prom_node, NULL);
+}
+
+void __init device_scan(void)
+{
+ /* FIX ME FAST... -DaveM */
+ ioport_resource.end = 0xffffffffffffffffUL;
+
+ prom_printf("Booting Linux...\n");
+
+#ifndef CONFIG_SMP
+ {
+ int err, cpu_node;
+ err = cpu_find_by_instance(0, &cpu_node, NULL);
+ if (err) {
+ prom_printf("No cpu nodes, cannot continue\n");
+ prom_halt();
+ }
+ cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
+ "clock-frequency",
+ 0);
+ }
+#endif
+
+ central_probe();
+
+ cpu_probe();
+}
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
new file mode 100644
index 00000000000..b73a3c85877
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_backend.S
@@ -0,0 +1,181 @@
+/* $Id: dtlb_backend.S,v 1.16 2001/10/09 04:02:11 davem Exp $
+ * dtlb_backend.S: Back end to DTLB miss replacement strategy.
+ * This is included directly into the trap table.
+ *
+ * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+
+#if PAGE_SHIFT == 13
+#define SZ_BITS _PAGE_SZ8K
+#elif PAGE_SHIFT == 16
+#define SZ_BITS _PAGE_SZ64K
+#elif PAGE_SHIFT == 19
+#define SZ_BITS _PAGE_SZ512K
+#elif PAGE_SHIFT == 22
+#define SZ_BITS _PAGE_SZ4M
+#endif
+
+#define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS)
+
+#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
+#define VPTE_SHIFT (PAGE_SHIFT - 3)
+
+/* Ways we can get here:
+ *
+ * 1) Nucleus loads and stores to/from PA-->VA direct mappings at tl>1.
+ * 2) Nucleus loads and stores to/from user/kernel window save areas.
+ * 3) VPTE misses from dtlb_base and itlb_base.
+ *
+ * We need to extract out the PMD and PGDIR indexes from the
+ * linear virtual page table access address. The PTE index
+ * is at the bottom, but we are not concerned with it. Bits
+ * 0 to 2 are clear since each PTE is 8 bytes in size. Each
+ * PMD and PGDIR entry are 4 bytes in size. Thus, this
+ * address looks something like:
+ *
+ * |---------------------------------------------------------------|
+ * | ... | PGDIR index | PMD index | PTE index | |
+ * |---------------------------------------------------------------|
+ * 63 F E D C B A 3 2 0 <- bit nr
+ *
+ * The variable bits above are defined as:
+ * A --> 3 + (PAGE_SHIFT - log2(8))
+ * --> 3 + (PAGE_SHIFT - 3) - 1
+ * (ie. this is "bit 3" + PAGE_SIZE - size of PTE entry in bits - 1)
+ * B --> A + 1
+ * C --> B + (PAGE_SHIFT - log2(4))
+ * --> B + (PAGE_SHIFT - 2) - 1
+ * (ie. this is "bit B" + PAGE_SIZE - size of PMD entry in bits - 1)
+ * D --> C + 1
+ * E --> D + (PAGE_SHIFT - log2(4))
+ * --> D + (PAGE_SHIFT - 2) - 1
+ * (ie. this is "bit D" + PAGE_SIZE - size of PGDIR entry in bits - 1)
+ * F --> E + 1
+ *
+ * (Note how "B" always evalutes to PAGE_SHIFT, all the other constants
+ * cancel out.)
+ *
+ * For 8K PAGE_SIZE (thus, PAGE_SHIFT of 13) the bit numbers are:
+ * A --> 12
+ * B --> 13
+ * C --> 23
+ * D --> 24
+ * E --> 34
+ * F --> 35
+ *
+ * For 64K PAGE_SIZE (thus, PAGE_SHIFT of 16) the bit numbers are:
+ * A --> 15
+ * B --> 16
+ * C --> 29
+ * D --> 30
+ * E --> 43
+ * F --> 44
+ *
+ * Because bits both above and below each PGDIR and PMD index need to
+ * be masked out, and the index can be as long as 14 bits (when using a
+ * 64K PAGE_SIZE, and thus a PAGE_SHIFT of 16), we need 3 instructions
+ * to extract each index out.
+ *
+ * Shifts do not pair very well on UltraSPARC-I, II, IIi, and IIe, so
+ * we try to avoid using them for the entire operation. We could setup
+ * a mask anywhere from bit 31 down to bit 10 using the sethi instruction.
+ *
+ * We need a mask covering bits B --> C and one covering D --> E.
+ * For 8K PAGE_SIZE these masks are 0x00ffe000 and 0x7ff000000.
+ * For 64K PAGE_SIZE these masks are 0x3fff0000 and 0xfffc0000000.
+ * The second in each set cannot be loaded with a single sethi
+ * instruction, because the upper bits are past bit 32. We would
+ * need to use a sethi + a shift.
+ *
+ * For the time being, we use 2 shifts and a simple "and" mask.
+ * We shift left to clear the bits above the index, we shift down
+ * to clear the bits below the index (sans the log2(4 or 8) bits)
+ * and a mask to clear the log2(4 or 8) bits. We need therefore
+ * define 4 shift counts, all of which are relative to PAGE_SHIFT.
+ *
+ * Although unsupportable for other reasons, this does mean that
+ * 512K and 4MB page sizes would be generaally supported by the
+ * kernel. (ELF binaries would break with > 64K PAGE_SIZE since
+ * the sections are only aligned that strongly).
+ *
+ * The operations performed for extraction are thus:
+ *
+ * ((X << FOO_SHIFT_LEFT) >> FOO_SHIFT_RIGHT) & ~0x3
+ *
+ */
+
+#define A (3 + (PAGE_SHIFT - 3) - 1)
+#define B (A + 1)
+#define C (B + (PAGE_SHIFT - 2) - 1)
+#define D (C + 1)
+#define E (D + (PAGE_SHIFT - 2) - 1)
+#define F (E + 1)
+
+#define PMD_SHIFT_LEFT (64 - D)
+#define PMD_SHIFT_RIGHT (64 - (D - B) - 2)
+#define PGDIR_SHIFT_LEFT (64 - F)
+#define PGDIR_SHIFT_RIGHT (64 - (F - D) - 2)
+#define LOW_MASK_BITS 0x3
+
+/* TLB1 ** ICACHE line 1: tl1 DTLB and quick VPTE miss */
+ ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
+ add %g3, %g3, %g5 ! Compute VPTE base
+ cmp %g4, %g5 ! VPTE miss?
+ bgeu,pt %xcc, 1f ! Continue here
+ andcc %g4, TAG_CONTEXT_BITS, %g5 ! tl0 miss Nucleus test
+ ba,a,pt %xcc, from_tl1_trap ! Fall to tl0 miss
+1: sllx %g6, VPTE_SHIFT, %g4 ! Position TAG_ACCESS
+ or %g4, %g5, %g4 ! Prepare TAG_ACCESS
+
+/* TLB1 ** ICACHE line 2: Quick VPTE miss */
+ mov TSB_REG, %g1 ! Grab TSB reg
+ ldxa [%g1] ASI_DMMU, %g5 ! Doing PGD caching?
+ sllx %g6, PMD_SHIFT_LEFT, %g1 ! Position PMD offset
+ be,pn %xcc, sparc64_vpte_nucleus ! Is it from Nucleus?
+ srlx %g1, PMD_SHIFT_RIGHT, %g1 ! Mask PMD offset bits
+ brnz,pt %g5, sparc64_vpte_continue ! Yep, go like smoke
+ andn %g1, LOW_MASK_BITS, %g1 ! Final PMD mask
+ sllx %g6, PGDIR_SHIFT_LEFT, %g5 ! Position PGD offset
+
+/* TLB1 ** ICACHE line 3: Quick VPTE miss */
+ srlx %g5, PGDIR_SHIFT_RIGHT, %g5 ! Mask PGD offset bits
+ andn %g5, LOW_MASK_BITS, %g5 ! Final PGD mask
+ lduwa [%g7 + %g5] ASI_PHYS_USE_EC, %g5! Load PGD
+ brz,pn %g5, vpte_noent ! Valid?
+sparc64_kpte_continue:
+ sllx %g5, 11, %g5 ! Shift into place
+sparc64_vpte_continue:
+ lduwa [%g5 + %g1] ASI_PHYS_USE_EC, %g5! Load PMD
+ sllx %g5, 11, %g5 ! Shift into place
+ brz,pn %g5, vpte_noent ! Valid?
+
+/* TLB1 ** ICACHE line 4: Quick VPTE miss */
+ mov (VALID_SZ_BITS >> 61), %g1 ! upper vpte into %g1
+ sllx %g1, 61, %g1 ! finish calc
+ or %g5, VPTE_BITS, %g5 ! Prepare VPTE data
+ or %g5, %g1, %g5 ! ...
+ mov TLB_SFSR, %g1 ! Restore %g1 value
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load VPTE into TLB
+ stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS
+ retry ! Load PTE once again
+
+#undef SZ_BITS
+#undef VALID_SZ_BITS
+#undef VPTE_SHIFT
+#undef VPTE_BITS
+#undef A
+#undef B
+#undef C
+#undef D
+#undef E
+#undef F
+#undef PMD_SHIFT_LEFT
+#undef PMD_SHIFT_RIGHT
+#undef PGDIR_SHIFT_LEFT
+#undef PGDIR_SHIFT_RIGHT
+#undef LOW_MASK_BITS
+
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
new file mode 100644
index 00000000000..ded2fed23fc
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -0,0 +1,113 @@
+/* $Id: dtlb_base.S,v 1.17 2001/10/11 22:33:52 davem Exp $
+ * dtlb_base.S: Front end to DTLB miss replacement strategy.
+ * This is included directly into the trap table.
+ *
+ * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+
+/* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS)
+ * %g2 (KERN_HIGHBITS | KERN_LOWBITS)
+ * %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space)
+ * (0xffe0000000000000) Cheetah (64-bit VA space)
+ * %g7 __pa(current->mm->pgd)
+ *
+ * The VPTE base value is completely magic, but note that
+ * few places in the kernel other than these TLB miss
+ * handlers know anything about the VPTE mechanism or
+ * how it works (see VPTE_SIZE, TASK_SIZE and PTRS_PER_PGD).
+ * Consider the 44-bit VADDR Ultra-I/II case as an example:
+ *
+ * VA[0 : (1<<43)] produce VPTE index [%g3 : 0]
+ * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3]
+ *
+ * For Cheetah's 64-bit VADDR space this is:
+ *
+ * VA[0 : (1<<63)] produce VPTE index [%g3 : 0]
+ * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3]
+ *
+ * If you're paying attention you'll notice that this means half of
+ * the VPTE table is above %g3 and half is below, low VA addresses
+ * map progressively upwards from %g3, and high VA addresses map
+ * progressively upwards towards %g3. This trick was needed to make
+ * the same 8 instruction handler work both for Spitfire/Blackbird's
+ * peculiar VA space hole configuration and the full 64-bit VA space
+ * one of Cheetah at the same time.
+ */
+
+/* Ways we can get here:
+ *
+ * 1) Nucleus loads and stores to/from PA-->VA direct mappings.
+ * 2) Nucleus loads and stores to/from vmalloc() areas.
+ * 3) User loads and stores.
+ * 4) User space accesses by nucleus at tl0
+ */
+
+#if PAGE_SHIFT == 13
+/*
+ * To compute vpte offset, we need to do ((addr >> 13) << 3),
+ * which can be optimized to (addr >> 10) if bits 10/11/12 can
+ * be guaranteed to be 0 ... mmu_context.h does guarantee this
+ * by only using 10 bits in the hwcontext value.
+ */
+#define CREATE_VPTE_OFFSET1(r1, r2)
+#define CREATE_VPTE_OFFSET2(r1, r2) \
+ srax r1, 10, r2
+#define CREATE_VPTE_NOP nop
+#else
+#define CREATE_VPTE_OFFSET1(r1, r2) \
+ srax r1, PAGE_SHIFT, r2
+#define CREATE_VPTE_OFFSET2(r1, r2) \
+ sllx r2, 3, r2
+#define CREATE_VPTE_NOP
+#endif
+
+/* DTLB ** ICACHE line 1: Quick user TLB misses */
+ ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
+ andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
+from_tl1_trap:
+ rdpr %tl, %g5 ! For TL==3 test
+ CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
+ be,pn %xcc, 3f ! Yep, special processing
+ CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
+ cmp %g5, 4 ! Last trap level?
+ be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
+ nop ! delay slot
+
+/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
+ ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
+1: brgez,pn %g5, longpath ! Invalid, branch out
+ nop ! Delay-slot
+9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
+ retry ! Trap return
+3: brlz,pt %g4, 9b ! Kernel virtual map?
+ xor %g2, %g4, %g5 ! Finish bit twiddles
+ ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc
+
+/* DTLB ** ICACHE line 3: winfixups+real_faults */
+longpath:
+ rdpr %pstate, %g5 ! Move into alternate globals
+ wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
+ rdpr %tl, %g4 ! See where we came from.
+ cmp %g4, 1 ! Is etrap/rtrap window fault?
+ mov TLB_TAG_ACCESS, %g4 ! Prepare for fault processing
+ ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page
+ be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling
+ mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB
+
+/* DTLB ** ICACHE line 4: Unused... */
+ ba,a,pt %xcc, winfix_trampoline ! Call window fixup code
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ CREATE_VPTE_NOP
+
+#undef CREATE_VPTE_OFFSET1
+#undef CREATE_VPTE_OFFSET2
+#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S
new file mode 100644
index 00000000000..d848bb7374b
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_prot.S
@@ -0,0 +1,54 @@
+/* $Id: dtlb_prot.S,v 1.22 2001/04/11 23:40:32 davem Exp $
+ * dtlb_prot.S: DTLB protection trap strategy.
+ * This is included directly into the trap table.
+ *
+ * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+/* Ways we can get here:
+ *
+ * [TL == 0] 1) User stores to readonly pages.
+ * [TL == 0] 2) Nucleus stores to user readonly pages.
+ * [TL > 0] 3) Nucleus stores to user readonly stack frame.
+ */
+
+/* PROT ** ICACHE line 1: User DTLB protection trap */
+ stxa %g0, [%g1] ASI_DMMU ! Clear SFSR FaultValid bit
+ membar #Sync ! Synchronize ASI stores
+ rdpr %pstate, %g5 ! Move into alternate globals
+ wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
+ rdpr %tl, %g1 ! Need to do a winfixup?
+ cmp %g1, 1 ! Trap level >1?
+ mov TLB_TAG_ACCESS, %g4 ! Prepare reload of vaddr
+ nop
+
+/* PROT ** ICACHE line 2: More real fault processing */
+ bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
+ ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
+ ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
+ mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
+ nop
+ nop
+ nop
+ nop
+
+/* PROT ** ICACHE line 3: Unused... */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+/* PROT ** ICACHE line 4: Unused... */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
new file mode 100644
index 00000000000..6ffbeb70194
--- /dev/null
+++ b/arch/sparc64/kernel/ebus.c
@@ -0,0 +1,644 @@
+/* $Id: ebus.c,v 1.64 2001/11/08 04:41:33 davem Exp $
+ * ebus.c: PCI to EBus bridge device.
+ *
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pbm.h>
+#include <asm/ebus.h>
+#include <asm/oplib.h>
+#include <asm/bpp.h>
+#include <asm/irq.h>
+
+/* EBUS dma library. */
+
+#define EBDMA_CSR 0x00UL /* Control/Status */
+#define EBDMA_ADDR 0x04UL /* DMA Address */
+#define EBDMA_COUNT 0x08UL /* DMA Count */
+
+#define EBDMA_CSR_INT_PEND 0x00000001
+#define EBDMA_CSR_ERR_PEND 0x00000002
+#define EBDMA_CSR_DRAIN 0x00000004
+#define EBDMA_CSR_INT_EN 0x00000010
+#define EBDMA_CSR_RESET 0x00000080
+#define EBDMA_CSR_WRITE 0x00000100
+#define EBDMA_CSR_EN_DMA 0x00000200
+#define EBDMA_CSR_CYC_PEND 0x00000400
+#define EBDMA_CSR_DIAG_RD_DONE 0x00000800
+#define EBDMA_CSR_DIAG_WR_DONE 0x00001000
+#define EBDMA_CSR_EN_CNT 0x00002000
+#define EBDMA_CSR_TC 0x00004000
+#define EBDMA_CSR_DIS_CSR_DRN 0x00010000
+#define EBDMA_CSR_BURST_SZ_MASK 0x000c0000
+#define EBDMA_CSR_BURST_SZ_1 0x00080000
+#define EBDMA_CSR_BURST_SZ_4 0x00000000
+#define EBDMA_CSR_BURST_SZ_8 0x00040000
+#define EBDMA_CSR_BURST_SZ_16 0x000c0000
+#define EBDMA_CSR_DIAG_EN 0x00100000
+#define EBDMA_CSR_DIS_ERR_PEND 0x00400000
+#define EBDMA_CSR_TCI_DIS 0x00800000
+#define EBDMA_CSR_EN_NEXT 0x01000000
+#define EBDMA_CSR_DMA_ON 0x02000000
+#define EBDMA_CSR_A_LOADED 0x04000000
+#define EBDMA_CSR_NA_LOADED 0x08000000
+#define EBDMA_CSR_DEV_ID_MASK 0xf0000000
+
+#define EBUS_DMA_RESET_TIMEOUT 10000
+
+static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
+{
+ int i;
+ u32 val = 0;
+
+ writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
+ udelay(1);
+
+ if (no_drain)
+ return;
+
+ for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
+ val = readl(p->regs + EBDMA_CSR);
+
+ if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
+ break;
+ udelay(10);
+ }
+}
+
+static irqreturn_t ebus_dma_irq(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct ebus_dma_info *p = dev_id;
+ unsigned long flags;
+ u32 csr = 0;
+
+ spin_lock_irqsave(&p->lock, flags);
+ csr = readl(p->regs + EBDMA_CSR);
+ writel(csr, p->regs + EBDMA_CSR);
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (csr & EBDMA_CSR_ERR_PEND) {
+ printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
+ p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
+ return IRQ_HANDLED;
+ } else if (csr & EBDMA_CSR_INT_PEND) {
+ p->callback(p,
+ (csr & EBDMA_CSR_TC) ?
+ EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
+ p->client_cookie);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+
+}
+
+int ebus_dma_register(struct ebus_dma_info *p)
+{
+ u32 csr;
+
+ if (!p->regs)
+ return -EINVAL;
+ if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
+ EBUS_DMA_FLAG_TCI_DISABLE))
+ return -EINVAL;
+ if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
+ return -EINVAL;
+ if (!strlen(p->name))
+ return -EINVAL;
+
+ __ebus_dma_reset(p, 1);
+
+ csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
+
+ if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
+ csr |= EBDMA_CSR_TCI_DIS;
+
+ writel(csr, p->regs + EBDMA_CSR);
+
+ return 0;
+}
+EXPORT_SYMBOL(ebus_dma_register);
+
+int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
+{
+ unsigned long flags;
+ u32 csr;
+
+ if (on) {
+ if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
+ if (request_irq(p->irq, ebus_dma_irq, SA_SHIRQ, p->name, p))
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&p->lock, flags);
+ csr = readl(p->regs + EBDMA_CSR);
+ csr |= EBDMA_CSR_INT_EN;
+ writel(csr, p->regs + EBDMA_CSR);
+ spin_unlock_irqrestore(&p->lock, flags);
+ } else {
+ spin_lock_irqsave(&p->lock, flags);
+ csr = readl(p->regs + EBDMA_CSR);
+ csr &= ~EBDMA_CSR_INT_EN;
+ writel(csr, p->regs + EBDMA_CSR);
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
+ free_irq(p->irq, p);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ebus_dma_irq_enable);
+
+void ebus_dma_unregister(struct ebus_dma_info *p)
+{
+ unsigned long flags;
+ u32 csr;
+ int irq_on = 0;
+
+ spin_lock_irqsave(&p->lock, flags);
+ csr = readl(p->regs + EBDMA_CSR);
+ if (csr & EBDMA_CSR_INT_EN) {
+ csr &= ~EBDMA_CSR_INT_EN;
+ writel(csr, p->regs + EBDMA_CSR);
+ irq_on = 1;
+ }
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (irq_on)
+ free_irq(p->irq, p);
+}
+EXPORT_SYMBOL(ebus_dma_unregister);
+
+int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
+{
+ unsigned long flags;
+ u32 csr;
+ int err;
+
+ if (len >= (1 << 24))
+ return -EINVAL;
+
+ spin_lock_irqsave(&p->lock, flags);
+ csr = readl(p->regs + EBDMA_CSR);
+ err = -EINVAL;
+ if (!(csr & EBDMA_CSR_EN_DMA))
+ goto out;
+ err = -EBUSY;
+ if (csr & EBDMA_CSR_NA_LOADED)
+ goto out;
+
+ writel(len, p->regs + EBDMA_COUNT);
+ writel(bus_addr, p->regs + EBDMA_ADDR);
+ err = 0;
+
+out:
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return err;
+}
+EXPORT_SYMBOL(ebus_dma_request);
+
+void ebus_dma_prepare(struct ebus_dma_info *p, int write)
+{
+ unsigned long flags;
+ u32 csr;
+
+ spin_lock_irqsave(&p->lock, flags);
+ __ebus_dma_reset(p, 0);
+
+ csr = (EBDMA_CSR_INT_EN |
+ EBDMA_CSR_EN_CNT |
+ EBDMA_CSR_BURST_SZ_16 |
+ EBDMA_CSR_EN_NEXT);
+
+ if (write)
+ csr |= EBDMA_CSR_WRITE;
+ if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
+ csr |= EBDMA_CSR_TCI_DIS;
+
+ writel(csr, p->regs + EBDMA_CSR);
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+EXPORT_SYMBOL(ebus_dma_prepare);
+
+unsigned int ebus_dma_residue(struct ebus_dma_info *p)
+{
+ return readl(p->regs + EBDMA_COUNT);
+}
+EXPORT_SYMBOL(ebus_dma_residue);
+
+unsigned int ebus_dma_addr(struct ebus_dma_info *p)
+{
+ return readl(p->regs + EBDMA_ADDR);
+}
+EXPORT_SYMBOL(ebus_dma_addr);
+
+void ebus_dma_enable(struct ebus_dma_info *p, int on)
+{
+ unsigned long flags;
+ u32 orig_csr, csr;
+
+ spin_lock_irqsave(&p->lock, flags);
+ orig_csr = csr = readl(p->regs + EBDMA_CSR);
+ if (on)
+ csr |= EBDMA_CSR_EN_DMA;
+ else
+ csr &= ~EBDMA_CSR_EN_DMA;
+ if ((orig_csr & EBDMA_CSR_EN_DMA) !=
+ (csr & EBDMA_CSR_EN_DMA))
+ writel(csr, p->regs + EBDMA_CSR);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+EXPORT_SYMBOL(ebus_dma_enable);
+
+struct linux_ebus *ebus_chain = NULL;
+
+#ifdef CONFIG_SUN_AUXIO
+extern void auxio_probe(void);
+#endif
+
+static inline void *ebus_alloc(size_t size)
+{
+ void *mem;
+
+ mem = kmalloc(size, GFP_ATOMIC);
+ if (!mem)
+ panic("ebus_alloc: out of memory");
+ memset((char *)mem, 0, size);
+ return mem;
+}
+
+static void __init ebus_ranges_init(struct linux_ebus *ebus)
+{
+ int success;
+
+ ebus->num_ebus_ranges = 0;
+ success = prom_getproperty(ebus->prom_node, "ranges",
+ (char *)ebus->ebus_ranges,
+ sizeof(ebus->ebus_ranges));
+ if (success != -1)
+ ebus->num_ebus_ranges = (success/sizeof(struct linux_prom_ebus_ranges));
+}
+
+static void __init ebus_intmap_init(struct linux_ebus *ebus)
+{
+ int success;
+
+ ebus->num_ebus_intmap = 0;
+ success = prom_getproperty(ebus->prom_node, "interrupt-map",
+ (char *)ebus->ebus_intmap,
+ sizeof(ebus->ebus_intmap));
+ if (success == -1)
+ return;
+
+ ebus->num_ebus_intmap = (success/sizeof(struct linux_prom_ebus_intmap));
+
+ success = prom_getproperty(ebus->prom_node, "interrupt-map-mask",
+ (char *)&ebus->ebus_intmask,
+ sizeof(ebus->ebus_intmask));
+ if (success == -1) {
+ prom_printf("%s: can't get interrupt-map-mask\n", __FUNCTION__);
+ prom_halt();
+ }
+}
+
+int __init ebus_intmap_match(struct linux_ebus *ebus,
+ struct linux_prom_registers *reg,
+ int *interrupt)
+{
+ unsigned int hi, lo, irq;
+ int i;
+
+ if (!ebus->num_ebus_intmap)
+ return 0;
+
+ hi = reg->which_io & ebus->ebus_intmask.phys_hi;
+ lo = reg->phys_addr & ebus->ebus_intmask.phys_lo;
+ irq = *interrupt & ebus->ebus_intmask.interrupt;
+ for (i = 0; i < ebus->num_ebus_intmap; i++) {
+ if ((ebus->ebus_intmap[i].phys_hi == hi) &&
+ (ebus->ebus_intmap[i].phys_lo == lo) &&
+ (ebus->ebus_intmap[i].interrupt == irq)) {
+ *interrupt = ebus->ebus_intmap[i].cinterrupt;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+void __init fill_ebus_child(int node, struct linux_prom_registers *preg,
+ struct linux_ebus_child *dev, int non_standard_regs)
+{
+ int regs[PROMREG_MAX];
+ int irqs[PROMREG_MAX];
+ int i, len;
+
+ dev->prom_node = node;
+ prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
+ printk(" (%s)", dev->prom_name);
+
+ len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
+ dev->num_addrs = len / sizeof(regs[0]);
+
+ if (non_standard_regs) {
+ /* This is to handle reg properties which are not
+ * in the parent relative format. One example are
+ * children of the i2c device on CompactPCI systems.
+ *
+ * So, for such devices we just record the property
+ * raw in the child resources.
+ */
+ for (i = 0; i < dev->num_addrs; i++)
+ dev->resource[i].start = regs[i];
+ } else {
+ for (i = 0; i < dev->num_addrs; i++) {
+ int rnum = regs[i];
+ if (rnum >= dev->parent->num_addrs) {
+ prom_printf("UGH: property for %s was %d, need < %d\n",
+ dev->prom_name, len, dev->parent->num_addrs);
+ panic(__FUNCTION__);
+ }
+ dev->resource[i].start = dev->parent->resource[i].start;
+ dev->resource[i].end = dev->parent->resource[i].end;
+ dev->resource[i].flags = IORESOURCE_MEM;
+ dev->resource[i].name = dev->prom_name;
+ }
+ }
+
+ for (i = 0; i < PROMINTR_MAX; i++)
+ dev->irqs[i] = PCI_IRQ_NONE;
+
+ len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
+ if ((len == -1) || (len == 0)) {
+ dev->num_irqs = 0;
+ /*
+ * Oh, well, some PROMs don't export interrupts
+ * property to children of EBus devices...
+ *
+ * Be smart about PS/2 keyboard and mouse.
+ */
+ if (!strcmp(dev->parent->prom_name, "8042")) {
+ if (!strcmp(dev->prom_name, "kb_ps2")) {
+ dev->num_irqs = 1;
+ dev->irqs[0] = dev->parent->irqs[0];
+ } else {
+ dev->num_irqs = 1;
+ dev->irqs[0] = dev->parent->irqs[1];
+ }
+ }
+ } else {
+ dev->num_irqs = len / sizeof(irqs[0]);
+ for (i = 0; i < dev->num_irqs; i++) {
+ struct pci_pbm_info *pbm = dev->bus->parent;
+ struct pci_controller_info *p = pbm->parent;
+
+ if (ebus_intmap_match(dev->bus, preg, &irqs[i]) != -1) {
+ dev->irqs[i] = p->irq_build(pbm,
+ dev->bus->self,
+ irqs[i]);
+ } else {
+ /* If we get a bogus interrupt property, just
+ * record the raw value instead of punting.
+ */
+ dev->irqs[i] = irqs[i];
+ }
+ }
+ }
+}
+
+static int __init child_regs_nonstandard(struct linux_ebus_device *dev)
+{
+ if (!strcmp(dev->prom_name, "i2c") ||
+ !strcmp(dev->prom_name, "SUNW,lombus"))
+ return 1;
+ return 0;
+}
+
+void __init fill_ebus_device(int node, struct linux_ebus_device *dev)
+{
+ struct linux_prom_registers regs[PROMREG_MAX];
+ struct linux_ebus_child *child;
+ int irqs[PROMINTR_MAX];
+ int i, n, len;
+
+ dev->prom_node = node;
+ prom_getstring(node, "name", dev->prom_name, sizeof(dev->prom_name));
+ printk(" [%s", dev->prom_name);
+
+ len = prom_getproperty(node, "reg", (void *)regs, sizeof(regs));
+ if (len == -1) {
+ dev->num_addrs = 0;
+ goto probe_interrupts;
+ }
+
+ if (len % sizeof(struct linux_prom_registers)) {
+ prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
+ dev->prom_name, len,
+ (int)sizeof(struct linux_prom_registers));
+ prom_halt();
+ }
+ dev->num_addrs = len / sizeof(struct linux_prom_registers);
+
+ for (i = 0; i < dev->num_addrs; i++) {
+ /* XXX Learn how to interpret ebus ranges... -DaveM */
+ if (regs[i].which_io >= 0x10)
+ n = (regs[i].which_io - 0x10) >> 2;
+ else
+ n = regs[i].which_io;
+
+ dev->resource[i].start = dev->bus->self->resource[n].start;
+ dev->resource[i].start += (unsigned long)regs[i].phys_addr;
+ dev->resource[i].end =
+ (dev->resource[i].start + (unsigned long)regs[i].reg_size - 1UL);
+ dev->resource[i].flags = IORESOURCE_MEM;
+ dev->resource[i].name = dev->prom_name;
+ request_resource(&dev->bus->self->resource[n],
+ &dev->resource[i]);
+ }
+
+probe_interrupts:
+ for (i = 0; i < PROMINTR_MAX; i++)
+ dev->irqs[i] = PCI_IRQ_NONE;
+
+ len = prom_getproperty(node, "interrupts", (char *)&irqs, sizeof(irqs));
+ if ((len == -1) || (len == 0)) {
+ dev->num_irqs = 0;
+ } else {
+ dev->num_irqs = len / sizeof(irqs[0]);
+ for (i = 0; i < dev->num_irqs; i++) {
+ struct pci_pbm_info *pbm = dev->bus->parent;
+ struct pci_controller_info *p = pbm->parent;
+
+ if (ebus_intmap_match(dev->bus, &regs[0], &irqs[i]) != -1) {
+ dev->irqs[i] = p->irq_build(pbm,
+ dev->bus->self,
+ irqs[i]);
+ } else {
+ /* If we get a bogus interrupt property, just
+ * record the raw value instead of punting.
+ */
+ dev->irqs[i] = irqs[i];
+ }
+ }
+ }
+
+ if ((node = prom_getchild(node))) {
+ printk(" ->");
+ dev->children = ebus_alloc(sizeof(struct linux_ebus_child));
+
+ child = dev->children;
+ child->next = NULL;
+ child->parent = dev;
+ child->bus = dev->bus;
+ fill_ebus_child(node, &regs[0],
+ child, child_regs_nonstandard(dev));
+
+ while ((node = prom_getsibling(node)) != 0) {
+ child->next = ebus_alloc(sizeof(struct linux_ebus_child));
+
+ child = child->next;
+ child->next = NULL;
+ child->parent = dev;
+ child->bus = dev->bus;
+ fill_ebus_child(node, &regs[0],
+ child, child_regs_nonstandard(dev));
+ }
+ }
+ printk("]");
+}
+
+static struct pci_dev *find_next_ebus(struct pci_dev *start, int *is_rio_p)
+{
+ struct pci_dev *pdev = start;
+
+ do {
+ pdev = pci_find_device(PCI_VENDOR_ID_SUN, PCI_ANY_ID, pdev);
+ if (pdev &&
+ (pdev->device == PCI_DEVICE_ID_SUN_EBUS ||
+ pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
+ break;
+ } while (pdev != NULL);
+
+ if (pdev && (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS))
+ *is_rio_p = 1;
+ else
+ *is_rio_p = 0;
+
+ return pdev;
+}
+
+void __init ebus_init(void)
+{
+ struct pci_pbm_info *pbm;
+ struct linux_ebus_device *dev;
+ struct linux_ebus *ebus;
+ struct pci_dev *pdev;
+ struct pcidev_cookie *cookie;
+ int nd, ebusnd, is_rio;
+ int num_ebus = 0;
+
+ pdev = find_next_ebus(NULL, &is_rio);
+ if (!pdev) {
+ printk("ebus: No EBus's found.\n");
+ return;
+ }
+
+ cookie = pdev->sysdata;
+ ebusnd = cookie->prom_node;
+
+ ebus_chain = ebus = ebus_alloc(sizeof(struct linux_ebus));
+ ebus->next = NULL;
+ ebus->is_rio = is_rio;
+
+ while (ebusnd) {
+ /* SUNW,pci-qfe uses four empty ebuses on it.
+ I think we should not consider them here,
+ as they have half of the properties this
+ code expects and once we do PCI hot-plug,
+ we'd have to tweak with the ebus_chain
+ in the runtime after initialization. -jj */
+ if (!prom_getchild (ebusnd)) {
+ pdev = find_next_ebus(pdev, &is_rio);
+ if (!pdev) {
+ if (ebus == ebus_chain) {
+ ebus_chain = NULL;
+ printk("ebus: No EBus's found.\n");
+ return;
+ }
+ break;
+ }
+ ebus->is_rio = is_rio;
+ cookie = pdev->sysdata;
+ ebusnd = cookie->prom_node;
+ continue;
+ }
+ printk("ebus%d:", num_ebus);
+
+ prom_getstring(ebusnd, "name", ebus->prom_name, sizeof(ebus->prom_name));
+ ebus->index = num_ebus;
+ ebus->prom_node = ebusnd;
+ ebus->self = pdev;
+ ebus->parent = pbm = cookie->pbm;
+
+ ebus_ranges_init(ebus);
+ ebus_intmap_init(ebus);
+
+ nd = prom_getchild(ebusnd);
+ if (!nd)
+ goto next_ebus;
+
+ ebus->devices = ebus_alloc(sizeof(struct linux_ebus_device));
+
+ dev = ebus->devices;
+ dev->next = NULL;
+ dev->children = NULL;
+ dev->bus = ebus;
+ fill_ebus_device(nd, dev);
+
+ while ((nd = prom_getsibling(nd)) != 0) {
+ dev->next = ebus_alloc(sizeof(struct linux_ebus_device));
+
+ dev = dev->next;
+ dev->next = NULL;
+ dev->children = NULL;
+ dev->bus = ebus;
+ fill_ebus_device(nd, dev);
+ }
+
+ next_ebus:
+ printk("\n");
+
+ pdev = find_next_ebus(pdev, &is_rio);
+ if (!pdev)
+ break;
+
+ cookie = pdev->sysdata;
+ ebusnd = cookie->prom_node;
+
+ ebus->next = ebus_alloc(sizeof(struct linux_ebus));
+ ebus = ebus->next;
+ ebus->next = NULL;
+ ebus->is_rio = is_rio;
+ ++num_ebus;
+ }
+
+#ifdef CONFIG_SUN_AUXIO
+ auxio_probe();
+#endif
+}
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
new file mode 100644
index 00000000000..a47f2d0b1a2
--- /dev/null
+++ b/arch/sparc64/kernel/entry.S
@@ -0,0 +1,1919 @@
+/* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $
+ * arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
+ *
+ * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/smp.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/visasm.h>
+#include <asm/estate.h>
+#include <asm/auxio.h>
+
+/* #define SYSCALL_TRACING 1 */
+
+#define curptr g6
+
+#define NR_SYSCALLS 284 /* Each OS is different... */
+
+ .text
+ .align 32
+
+ .globl sparc64_vpte_patchme1
+ .globl sparc64_vpte_patchme2
+/*
+ * On a second level vpte miss, check whether the original fault is to the OBP
+ * range (note that this is only possible for instruction miss, data misses to
+ * obp range do not use vpte). If so, go back directly to the faulting address.
+ * This is because we want to read the tpc, otherwise we have no way of knowing
+ * the 8k aligned faulting address if we are using >8k kernel pagesize. This
+ * also ensures no vpte range addresses are dropped into tlb while obp is
+ * executing (see inherit_locked_prom_mappings() rant).
+ */
+sparc64_vpte_nucleus:
+ /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
+ mov 0xf, %g5
+ sllx %g5, 28, %g5
+
+ /* Is addr >= LOW_OBP_ADDRESS? */
+ cmp %g4, %g5
+ blu,pn %xcc, sparc64_vpte_patchme1
+ mov 0x1, %g5
+
+ /* Load 0x100000000, which is HI_OBP_ADDRESS. */
+ sllx %g5, 32, %g5
+
+ /* Is addr < HI_OBP_ADDRESS? */
+ cmp %g4, %g5
+ blu,pn %xcc, obp_iaddr_patch
+ nop
+
+ /* These two instructions are patched by paginig_init(). */
+sparc64_vpte_patchme1:
+ sethi %hi(0), %g5
+sparc64_vpte_patchme2:
+ or %g5, %lo(0), %g5
+
+ /* With kernel PGD in %g5, branch back into dtlb_backend. */
+ ba,pt %xcc, sparc64_kpte_continue
+ andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
+
+vpte_noent:
+ /* Restore previous TAG_ACCESS, %g5 is zero, and we will
+ * skip over the trap instruction so that the top level
+ * TLB miss handler will thing this %g5 value is just an
+ * invalid PTE, thus branching to full fault processing.
+ */
+ mov TLB_SFSR, %g1
+ stxa %g4, [%g1 + %g1] ASI_DMMU
+ done
+
+ .globl obp_iaddr_patch
+obp_iaddr_patch:
+ /* These two instructions patched by inherit_prom_mappings(). */
+ sethi %hi(0), %g5
+ or %g5, %lo(0), %g5
+
+ /* Behave as if we are at TL0. */
+ wrpr %g0, 1, %tl
+ rdpr %tpc, %g4 /* Find original faulting iaddr */
+ srlx %g4, 13, %g4 /* Throw out context bits */
+ sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
+
+ /* Restore previous TAG_ACCESS. */
+ mov TLB_SFSR, %g1
+ stxa %g4, [%g1 + %g1] ASI_IMMU
+
+ /* Get PMD offset. */
+ srlx %g4, 23, %g6
+ and %g6, 0x7ff, %g6
+ sllx %g6, 2, %g6
+
+ /* Load PMD, is it valid? */
+ lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brz,pn %g5, longpath
+ sllx %g5, 11, %g5
+
+ /* Get PTE offset. */
+ srlx %g4, 13, %g6
+ and %g6, 0x3ff, %g6
+ sllx %g6, 3, %g6
+
+ /* Load PTE. */
+ ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brgez,pn %g5, longpath
+ nop
+
+ /* TLB load and return from trap. */
+ stxa %g5, [%g0] ASI_ITLB_DATA_IN
+ retry
+
+ .globl obp_daddr_patch
+obp_daddr_patch:
+ /* These two instructions patched by inherit_prom_mappings(). */
+ sethi %hi(0), %g5
+ or %g5, %lo(0), %g5
+
+ /* Get PMD offset. */
+ srlx %g4, 23, %g6
+ and %g6, 0x7ff, %g6
+ sllx %g6, 2, %g6
+
+ /* Load PMD, is it valid? */
+ lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brz,pn %g5, longpath
+ sllx %g5, 11, %g5
+
+ /* Get PTE offset. */
+ srlx %g4, 13, %g6
+ and %g6, 0x3ff, %g6
+ sllx %g6, 3, %g6
+
+ /* Load PTE. */
+ ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brgez,pn %g5, longpath
+ nop
+
+ /* TLB load and return from trap. */
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN
+ retry
+
+/*
+ * On a first level data miss, check whether this is to the OBP range (note
+ * that such accesses can be made by prom, as well as by kernel using
+ * prom_getproperty on "address"), and if so, do not use vpte access ...
+ * rather, use information saved during inherit_prom_mappings() using 8k
+ * pagesize.
+ */
+kvmap:
+ /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
+ mov 0xf, %g5
+ sllx %g5, 28, %g5
+
+ /* Is addr >= LOW_OBP_ADDRESS? */
+ cmp %g4, %g5
+ blu,pn %xcc, vmalloc_addr
+ mov 0x1, %g5
+
+ /* Load 0x100000000, which is HI_OBP_ADDRESS. */
+ sllx %g5, 32, %g5
+
+ /* Is addr < HI_OBP_ADDRESS? */
+ cmp %g4, %g5
+ blu,pn %xcc, obp_daddr_patch
+ nop
+
+vmalloc_addr:
+ /* If we get here, a vmalloc addr accessed, load kernel VPTE. */
+ ldxa [%g3 + %g6] ASI_N, %g5
+ brgez,pn %g5, longpath
+ nop
+
+ /* PTE is valid, load into TLB and return from trap. */
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
+ retry
+
+ /* This is trivial with the new code... */
+ .globl do_fpdis
+do_fpdis:
+ sethi %hi(TSTATE_PEF), %g4 ! IEU0
+ rdpr %tstate, %g5
+ andcc %g5, %g4, %g0
+ be,pt %xcc, 1f
+ nop
+ rd %fprs, %g5
+ andcc %g5, FPRS_FEF, %g0
+ be,pt %xcc, 1f
+ nop
+
+ /* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
+ sethi %hi(109f), %g7
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ add %g0, %g0, %g0
+ ba,a,pt %xcc, rtrap_clr_l6
+
+1: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group
+ wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
+ andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
+ be,a,pt %icc, 1f ! CTI
+ clr %g7 ! IEU0
+ ldx [%g6 + TI_GSR], %g7 ! Load Group
+1: andcc %g5, FPRS_DL, %g0 ! IEU1
+ bne,pn %icc, 2f ! CTI
+ fzero %f0 ! FPA
+ andcc %g5, FPRS_DU, %g0 ! IEU1 Group
+ bne,pn %icc, 1f ! CTI
+ fzero %f2 ! FPA
+ faddd %f0, %f2, %f4
+ fmuld %f0, %f2, %f6
+ faddd %f0, %f2, %f8
+ fmuld %f0, %f2, %f10
+ faddd %f0, %f2, %f12
+ fmuld %f0, %f2, %f14
+ faddd %f0, %f2, %f16
+ fmuld %f0, %f2, %f18
+ faddd %f0, %f2, %f20
+ fmuld %f0, %f2, %f22
+ faddd %f0, %f2, %f24
+ fmuld %f0, %f2, %f26
+ faddd %f0, %f2, %f28
+ fmuld %f0, %f2, %f30
+ faddd %f0, %f2, %f32
+ fmuld %f0, %f2, %f34
+ faddd %f0, %f2, %f36
+ fmuld %f0, %f2, %f38
+ faddd %f0, %f2, %f40
+ fmuld %f0, %f2, %f42
+ faddd %f0, %f2, %f44
+ fmuld %f0, %f2, %f46
+ faddd %f0, %f2, %f48
+ fmuld %f0, %f2, %f50
+ faddd %f0, %f2, %f52
+ fmuld %f0, %f2, %f54
+ faddd %f0, %f2, %f56
+ fmuld %f0, %f2, %f58
+ b,pt %xcc, fpdis_exit2
+ faddd %f0, %f2, %f60
+1: mov SECONDARY_CONTEXT, %g3
+ add %g6, TI_FPREGS + 0x80, %g1
+ faddd %f0, %f2, %f4
+ fmuld %f0, %f2, %f6
+ ldxa [%g3] ASI_DMMU, %g5
+cplus_fptrap_insn_1:
+ sethi %hi(0), %g2
+ stxa %g2, [%g3] ASI_DMMU
+ membar #Sync
+ add %g6, TI_FPREGS + 0xc0, %g2
+ faddd %f0, %f2, %f8
+ fmuld %f0, %f2, %f10
+ ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ ldda [%g2] ASI_BLK_S, %f48
+ faddd %f0, %f2, %f12
+ fmuld %f0, %f2, %f14
+ faddd %f0, %f2, %f16
+ fmuld %f0, %f2, %f18
+ faddd %f0, %f2, %f20
+ fmuld %f0, %f2, %f22
+ faddd %f0, %f2, %f24
+ fmuld %f0, %f2, %f26
+ faddd %f0, %f2, %f28
+ fmuld %f0, %f2, %f30
+ b,pt %xcc, fpdis_exit
+ membar #Sync
+2: andcc %g5, FPRS_DU, %g0
+ bne,pt %icc, 3f
+ fzero %f32
+ mov SECONDARY_CONTEXT, %g3
+ fzero %f34
+ ldxa [%g3] ASI_DMMU, %g5
+ add %g6, TI_FPREGS, %g1
+cplus_fptrap_insn_2:
+ sethi %hi(0), %g2
+ stxa %g2, [%g3] ASI_DMMU
+ membar #Sync
+ add %g6, TI_FPREGS + 0x40, %g2
+ faddd %f32, %f34, %f36
+ fmuld %f32, %f34, %f38
+ ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ ldda [%g2] ASI_BLK_S, %f16
+ faddd %f32, %f34, %f40
+ fmuld %f32, %f34, %f42
+ faddd %f32, %f34, %f44
+ fmuld %f32, %f34, %f46
+ faddd %f32, %f34, %f48
+ fmuld %f32, %f34, %f50
+ faddd %f32, %f34, %f52
+ fmuld %f32, %f34, %f54
+ faddd %f32, %f34, %f56
+ fmuld %f32, %f34, %f58
+ faddd %f32, %f34, %f60
+ fmuld %f32, %f34, %f62
+ ba,pt %xcc, fpdis_exit
+ membar #Sync
+3: mov SECONDARY_CONTEXT, %g3
+ add %g6, TI_FPREGS, %g1
+ ldxa [%g3] ASI_DMMU, %g5
+cplus_fptrap_insn_3:
+ sethi %hi(0), %g2
+ stxa %g2, [%g3] ASI_DMMU
+ membar #Sync
+ mov 0x40, %g2
+ ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ ldda [%g1 + %g2] ASI_BLK_S, %f16
+ add %g1, 0x80, %g1
+ ldda [%g1] ASI_BLK_S, %f32
+ ldda [%g1 + %g2] ASI_BLK_S, %f48
+ membar #Sync
+fpdis_exit:
+ stxa %g5, [%g3] ASI_DMMU
+ membar #Sync
+fpdis_exit2:
+ wr %g7, 0, %gsr
+ ldx [%g6 + TI_XFSR], %fsr
+ rdpr %tstate, %g3
+ or %g3, %g4, %g3 ! anal...
+ wrpr %g3, %tstate
+ wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits
+ retry
+
+ .align 32
+fp_other_bounce:
+ call do_fpother
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ .globl do_fpother_check_fitos
+ .align 32
+do_fpother_check_fitos:
+ sethi %hi(fp_other_bounce - 4), %g7
+ or %g7, %lo(fp_other_bounce - 4), %g7
+
+ /* NOTE: Need to preserve %g7 until we fully commit
+ * to the fitos fixup.
+ */
+ stx %fsr, [%g6 + TI_XFSR]
+ rdpr %tstate, %g3
+ andcc %g3, TSTATE_PRIV, %g0
+ bne,pn %xcc, do_fptrap_after_fsr
+ nop
+ ldx [%g6 + TI_XFSR], %g3
+ srlx %g3, 14, %g1
+ and %g1, 7, %g1
+ cmp %g1, 2 ! Unfinished FP-OP
+ bne,pn %xcc, do_fptrap_after_fsr
+ sethi %hi(1 << 23), %g1 ! Inexact
+ andcc %g3, %g1, %g0
+ bne,pn %xcc, do_fptrap_after_fsr
+ rdpr %tpc, %g1
+ lduwa [%g1] ASI_AIUP, %g3 ! This cannot ever fail
+#define FITOS_MASK 0xc1f83fe0
+#define FITOS_COMPARE 0x81a01880
+ sethi %hi(FITOS_MASK), %g1
+ or %g1, %lo(FITOS_MASK), %g1
+ and %g3, %g1, %g1
+ sethi %hi(FITOS_COMPARE), %g2
+ or %g2, %lo(FITOS_COMPARE), %g2
+ cmp %g1, %g2
+ bne,pn %xcc, do_fptrap_after_fsr
+ nop
+ std %f62, [%g6 + TI_FPREGS + (62 * 4)]
+ sethi %hi(fitos_table_1), %g1
+ and %g3, 0x1f, %g2
+ or %g1, %lo(fitos_table_1), %g1
+ sllx %g2, 2, %g2
+ jmpl %g1 + %g2, %g0
+ ba,pt %xcc, fitos_emul_continue
+
+fitos_table_1:
+ fitod %f0, %f62
+ fitod %f1, %f62
+ fitod %f2, %f62
+ fitod %f3, %f62
+ fitod %f4, %f62
+ fitod %f5, %f62
+ fitod %f6, %f62
+ fitod %f7, %f62
+ fitod %f8, %f62
+ fitod %f9, %f62
+ fitod %f10, %f62
+ fitod %f11, %f62
+ fitod %f12, %f62
+ fitod %f13, %f62
+ fitod %f14, %f62
+ fitod %f15, %f62
+ fitod %f16, %f62
+ fitod %f17, %f62
+ fitod %f18, %f62
+ fitod %f19, %f62
+ fitod %f20, %f62
+ fitod %f21, %f62
+ fitod %f22, %f62
+ fitod %f23, %f62
+ fitod %f24, %f62
+ fitod %f25, %f62
+ fitod %f26, %f62
+ fitod %f27, %f62
+ fitod %f28, %f62
+ fitod %f29, %f62
+ fitod %f30, %f62
+ fitod %f31, %f62
+
+fitos_emul_continue:
+ sethi %hi(fitos_table_2), %g1
+ srl %g3, 25, %g2
+ or %g1, %lo(fitos_table_2), %g1
+ and %g2, 0x1f, %g2
+ sllx %g2, 2, %g2
+ jmpl %g1 + %g2, %g0
+ ba,pt %xcc, fitos_emul_fini
+
+fitos_table_2:
+ fdtos %f62, %f0
+ fdtos %f62, %f1
+ fdtos %f62, %f2
+ fdtos %f62, %f3
+ fdtos %f62, %f4
+ fdtos %f62, %f5
+ fdtos %f62, %f6
+ fdtos %f62, %f7
+ fdtos %f62, %f8
+ fdtos %f62, %f9
+ fdtos %f62, %f10
+ fdtos %f62, %f11
+ fdtos %f62, %f12
+ fdtos %f62, %f13
+ fdtos %f62, %f14
+ fdtos %f62, %f15
+ fdtos %f62, %f16
+ fdtos %f62, %f17
+ fdtos %f62, %f18
+ fdtos %f62, %f19
+ fdtos %f62, %f20
+ fdtos %f62, %f21
+ fdtos %f62, %f22
+ fdtos %f62, %f23
+ fdtos %f62, %f24
+ fdtos %f62, %f25
+ fdtos %f62, %f26
+ fdtos %f62, %f27
+ fdtos %f62, %f28
+ fdtos %f62, %f29
+ fdtos %f62, %f30
+ fdtos %f62, %f31
+
+fitos_emul_fini:
+ ldd [%g6 + TI_FPREGS + (62 * 4)], %f62
+ done
+
+ .globl do_fptrap
+ .align 32
+do_fptrap:
+ stx %fsr, [%g6 + TI_XFSR]
+do_fptrap_after_fsr:
+ ldub [%g6 + TI_FPSAVED], %g3
+ rd %fprs, %g1
+ or %g3, %g1, %g3
+ stb %g3, [%g6 + TI_FPSAVED]
+ rd %gsr, %g3
+ stx %g3, [%g6 + TI_GSR]
+ mov SECONDARY_CONTEXT, %g3
+ ldxa [%g3] ASI_DMMU, %g5
+cplus_fptrap_insn_4:
+ sethi %hi(0), %g2
+ stxa %g2, [%g3] ASI_DMMU
+ membar #Sync
+ add %g6, TI_FPREGS, %g2
+ andcc %g1, FPRS_DL, %g0
+ be,pn %icc, 4f
+ mov 0x40, %g3
+ stda %f0, [%g2] ASI_BLK_S
+ stda %f16, [%g2 + %g3] ASI_BLK_S
+ andcc %g1, FPRS_DU, %g0
+ be,pn %icc, 5f
+4: add %g2, 128, %g2
+ stda %f32, [%g2] ASI_BLK_S
+ stda %f48, [%g2 + %g3] ASI_BLK_S
+5: mov SECONDARY_CONTEXT, %g1
+ membar #Sync
+ stxa %g5, [%g1] ASI_DMMU
+ membar #Sync
+ ba,pt %xcc, etrap
+ wr %g0, 0, %fprs
+
+cplus_fptrap_1:
+ sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
+
+ .globl cheetah_plus_patch_fpdis
+cheetah_plus_patch_fpdis:
+ /* We configure the dTLB512_0 for 4MB pages and the
+ * dTLB512_1 for 8K pages when in context zero.
+ */
+ sethi %hi(cplus_fptrap_1), %o0
+ lduw [%o0 + %lo(cplus_fptrap_1)], %o1
+
+ set cplus_fptrap_insn_1, %o2
+ stw %o1, [%o2]
+ flush %o2
+ set cplus_fptrap_insn_2, %o2
+ stw %o1, [%o2]
+ flush %o2
+ set cplus_fptrap_insn_3, %o2
+ stw %o1, [%o2]
+ flush %o2
+ set cplus_fptrap_insn_4, %o2
+ stw %o1, [%o2]
+ flush %o2
+
+ retl
+ nop
+
+ /* The registers for cross calls will be:
+ *
+ * DATA 0: [low 32-bits] Address of function to call, jmp to this
+ * [high 32-bits] MMU Context Argument 0, place in %g5
+ * DATA 1: Address Argument 1, place in %g6
+ * DATA 2: Address Argument 2, place in %g7
+ *
+ * With this method we can do most of the cross-call tlb/cache
+ * flushing very quickly.
+ *
+ * Current CPU's IRQ worklist table is locked into %g1,
+ * don't touch.
+ */
+ .text
+ .align 32
+ .globl do_ivec
+do_ivec:
+ mov 0x40, %g3
+ ldxa [%g3 + %g0] ASI_INTR_R, %g3
+ sethi %hi(KERNBASE), %g4
+ cmp %g3, %g4
+ bgeu,pn %xcc, do_ivec_xcall
+ srlx %g3, 32, %g5
+ stxa %g0, [%g0] ASI_INTR_RECEIVE
+ membar #Sync
+
+ sethi %hi(ivector_table), %g2
+ sllx %g3, 5, %g3
+ or %g2, %lo(ivector_table), %g2
+ add %g2, %g3, %g3
+ ldx [%g3 + 0x08], %g2 /* irq_info */
+ ldub [%g3 + 0x04], %g4 /* pil */
+ brz,pn %g2, do_ivec_spurious
+ mov 1, %g2
+
+ sllx %g2, %g4, %g2
+ sllx %g4, 2, %g4
+ lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
+ stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
+ stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */
+ wr %g2, 0x0, %set_softint
+ retry
+do_ivec_xcall:
+ mov 0x50, %g1
+
+ ldxa [%g1 + %g0] ASI_INTR_R, %g1
+ srl %g3, 0, %g3
+ mov 0x60, %g7
+ ldxa [%g7 + %g0] ASI_INTR_R, %g7
+ stxa %g0, [%g0] ASI_INTR_RECEIVE
+ membar #Sync
+ ba,pt %xcc, 1f
+ nop
+
+ .align 32
+1: jmpl %g3, %g0
+ nop
+
+do_ivec_spurious:
+ stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */
+ rdpr %pstate, %g5
+
+ wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate
+ sethi %hi(109f), %g7
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ call catch_disabled_ivec
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ .globl save_alternate_globals
+save_alternate_globals: /* %o0 = save_area */
+ rdpr %pstate, %o5
+ andn %o5, PSTATE_IE, %o1
+ wrpr %o1, PSTATE_AG, %pstate
+ stx %g0, [%o0 + 0x00]
+ stx %g1, [%o0 + 0x08]
+ stx %g2, [%o0 + 0x10]
+ stx %g3, [%o0 + 0x18]
+ stx %g4, [%o0 + 0x20]
+ stx %g5, [%o0 + 0x28]
+ stx %g6, [%o0 + 0x30]
+ stx %g7, [%o0 + 0x38]
+ wrpr %o1, PSTATE_IG, %pstate
+ stx %g0, [%o0 + 0x40]
+ stx %g1, [%o0 + 0x48]
+ stx %g2, [%o0 + 0x50]
+ stx %g3, [%o0 + 0x58]
+ stx %g4, [%o0 + 0x60]
+ stx %g5, [%o0 + 0x68]
+ stx %g6, [%o0 + 0x70]
+ stx %g7, [%o0 + 0x78]
+ wrpr %o1, PSTATE_MG, %pstate
+ stx %g0, [%o0 + 0x80]
+ stx %g1, [%o0 + 0x88]
+ stx %g2, [%o0 + 0x90]
+ stx %g3, [%o0 + 0x98]
+ stx %g4, [%o0 + 0xa0]
+ stx %g5, [%o0 + 0xa8]
+ stx %g6, [%o0 + 0xb0]
+ stx %g7, [%o0 + 0xb8]
+ wrpr %o5, 0x0, %pstate
+ retl
+ nop
+
+ .globl restore_alternate_globals
+restore_alternate_globals: /* %o0 = save_area */
+ rdpr %pstate, %o5
+ andn %o5, PSTATE_IE, %o1
+ wrpr %o1, PSTATE_AG, %pstate
+ ldx [%o0 + 0x00], %g0
+ ldx [%o0 + 0x08], %g1
+ ldx [%o0 + 0x10], %g2
+ ldx [%o0 + 0x18], %g3
+ ldx [%o0 + 0x20], %g4
+ ldx [%o0 + 0x28], %g5
+ ldx [%o0 + 0x30], %g6
+ ldx [%o0 + 0x38], %g7
+ wrpr %o1, PSTATE_IG, %pstate
+ ldx [%o0 + 0x40], %g0
+ ldx [%o0 + 0x48], %g1
+ ldx [%o0 + 0x50], %g2
+ ldx [%o0 + 0x58], %g3
+ ldx [%o0 + 0x60], %g4
+ ldx [%o0 + 0x68], %g5
+ ldx [%o0 + 0x70], %g6
+ ldx [%o0 + 0x78], %g7
+ wrpr %o1, PSTATE_MG, %pstate
+ ldx [%o0 + 0x80], %g0
+ ldx [%o0 + 0x88], %g1
+ ldx [%o0 + 0x90], %g2
+ ldx [%o0 + 0x98], %g3
+ ldx [%o0 + 0xa0], %g4
+ ldx [%o0 + 0xa8], %g5
+ ldx [%o0 + 0xb0], %g6
+ ldx [%o0 + 0xb8], %g7
+ wrpr %o5, 0x0, %pstate
+ retl
+ nop
+
+ .globl getcc, setcc
+getcc:
+ ldx [%o0 + PT_V9_TSTATE], %o1
+ srlx %o1, 32, %o1
+ and %o1, 0xf, %o1
+ retl
+ stx %o1, [%o0 + PT_V9_G1]
+setcc:
+ ldx [%o0 + PT_V9_TSTATE], %o1
+ ldx [%o0 + PT_V9_G1], %o2
+ or %g0, %ulo(TSTATE_ICC), %o3
+ sllx %o3, 32, %o3
+ andn %o1, %o3, %o1
+ sllx %o2, 32, %o2
+ and %o2, %o3, %o2
+ or %o1, %o2, %o1
+ retl
+ stx %o1, [%o0 + PT_V9_TSTATE]
+
+ .globl utrap, utrap_ill
+utrap: brz,pn %g1, etrap
+ nop
+ save %sp, -128, %sp
+ rdpr %tstate, %l6
+ rdpr %cwp, %l7
+ andn %l6, TSTATE_CWP, %l6
+ wrpr %l6, %l7, %tstate
+ rdpr %tpc, %l6
+ rdpr %tnpc, %l7
+ wrpr %g1, 0, %tnpc
+ done
+utrap_ill:
+ call bad_trap
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+#ifdef CONFIG_BLK_DEV_FD
+ .globl floppy_hardint
+floppy_hardint:
+ wr %g0, (1 << 11), %clear_softint
+ sethi %hi(doing_pdma), %g1
+ ld [%g1 + %lo(doing_pdma)], %g2
+ brz,pn %g2, floppy_dosoftint
+ sethi %hi(fdc_status), %g3
+ ldx [%g3 + %lo(fdc_status)], %g3
+ sethi %hi(pdma_vaddr), %g5
+ ldx [%g5 + %lo(pdma_vaddr)], %g4
+ sethi %hi(pdma_size), %g5
+ ldx [%g5 + %lo(pdma_size)], %g5
+
+next_byte:
+ lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
+ andcc %g7, 0x80, %g0
+ be,pn %icc, floppy_fifo_emptied
+ andcc %g7, 0x20, %g0
+ be,pn %icc, floppy_overrun
+ andcc %g7, 0x40, %g0
+ be,pn %icc, floppy_write
+ sub %g5, 1, %g5
+
+ inc %g3
+ lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
+ dec %g3
+ orcc %g0, %g5, %g0
+ stb %g7, [%g4]
+ bne,pn %xcc, next_byte
+ add %g4, 1, %g4
+
+ b,pt %xcc, floppy_tdone
+ nop
+
+floppy_write:
+ ldub [%g4], %g7
+ orcc %g0, %g5, %g0
+ inc %g3
+ stba %g7, [%g3] ASI_PHYS_BYPASS_EC_E
+ dec %g3
+ bne,pn %xcc, next_byte
+ add %g4, 1, %g4
+
+floppy_tdone:
+ sethi %hi(pdma_vaddr), %g1
+ stx %g4, [%g1 + %lo(pdma_vaddr)]
+ sethi %hi(pdma_size), %g1
+ stx %g5, [%g1 + %lo(pdma_size)]
+ sethi %hi(auxio_register), %g1
+ ldx [%g1 + %lo(auxio_register)], %g7
+ lduba [%g7] ASI_PHYS_BYPASS_EC_E, %g5
+ or %g5, AUXIO_AUX1_FTCNT, %g5
+/* andn %g5, AUXIO_AUX1_MASK, %g5 */
+ stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E
+ andn %g5, AUXIO_AUX1_FTCNT, %g5
+/* andn %g5, AUXIO_AUX1_MASK, %g5 */
+
+ nop; nop; nop; nop; nop; nop;
+ nop; nop; nop; nop; nop; nop;
+
+ stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E
+ sethi %hi(doing_pdma), %g1
+ b,pt %xcc, floppy_dosoftint
+ st %g0, [%g1 + %lo(doing_pdma)]
+
+floppy_fifo_emptied:
+ sethi %hi(pdma_vaddr), %g1
+ stx %g4, [%g1 + %lo(pdma_vaddr)]
+ sethi %hi(pdma_size), %g1
+ stx %g5, [%g1 + %lo(pdma_size)]
+ sethi %hi(irq_action), %g1
+ or %g1, %lo(irq_action), %g1
+ ldx [%g1 + (11 << 3)], %g3 ! irqaction[floppy_irq]
+ ldx [%g3 + 0x08], %g4 ! action->flags>>48==ino
+ sethi %hi(ivector_table), %g3
+ srlx %g4, 48, %g4
+ or %g3, %lo(ivector_table), %g3
+ sllx %g4, 5, %g4
+ ldx [%g3 + %g4], %g4 ! &ivector_table[ino]
+ ldx [%g4 + 0x10], %g4 ! bucket->iclr
+ stwa %g0, [%g4] ASI_PHYS_BYPASS_EC_E ! ICLR_IDLE
+ membar #Sync ! probably not needed...
+ retry
+
+floppy_overrun:
+ sethi %hi(pdma_vaddr), %g1
+ stx %g4, [%g1 + %lo(pdma_vaddr)]
+ sethi %hi(pdma_size), %g1
+ stx %g5, [%g1 + %lo(pdma_size)]
+ sethi %hi(doing_pdma), %g1
+ st %g0, [%g1 + %lo(doing_pdma)]
+
+floppy_dosoftint:
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ sethi %hi(109f), %g7
+ b,pt %xcc, etrap_irq
+109: or %g7, %lo(109b), %g7
+
+ mov 11, %o0
+ mov 0, %o1
+ call sparc_floppy_irq
+ add %sp, PTREGS_OFF, %o2
+
+ b,pt %xcc, rtrap_irq
+ nop
+
+#endif /* CONFIG_BLK_DEV_FD */
+
+ /* XXX Here is stuff we still need to write... -DaveM XXX */
+ .globl netbsd_syscall
+netbsd_syscall:
+ retl
+ nop
+
+ /* These next few routines must be sure to clear the
+ * SFSR FaultValid bit so that the fast tlb data protection
+ * handler does not flush the wrong context and lock up the
+ * box.
+ */
+ .globl __do_data_access_exception
+ .globl __do_data_access_exception_tl1
+__do_data_access_exception_tl1:
+ rdpr %pstate, %g4
+ wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
+ mov TLB_SFSR, %g3
+ mov DMMU_SFAR, %g5
+ ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
+ ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
+ stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
+ membar #Sync
+ ba,pt %xcc, winfix_dax
+ rdpr %tpc, %g3
+__do_data_access_exception:
+ rdpr %pstate, %g4
+ wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
+ mov TLB_SFSR, %g3
+ mov DMMU_SFAR, %g5
+ ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
+ ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
+ stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
+ membar #Sync
+ sethi %hi(109f), %g7
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call data_access_exception
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ .globl __do_instruction_access_exception
+ .globl __do_instruction_access_exception_tl1
+__do_instruction_access_exception_tl1:
+ rdpr %pstate, %g4
+ wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
+ mov TLB_SFSR, %g3
+ mov DMMU_SFAR, %g5
+ ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
+ ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
+ stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
+ membar #Sync
+ sethi %hi(109f), %g7
+ ba,pt %xcc, etraptl1
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call instruction_access_exception_tl1
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+__do_instruction_access_exception:
+ rdpr %pstate, %g4
+ wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
+ mov TLB_SFSR, %g3
+ mov DMMU_SFAR, %g5
+ ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
+ ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
+ stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
+ membar #Sync
+ sethi %hi(109f), %g7
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call instruction_access_exception
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ /* This is the trap handler entry point for ECC correctable
+ * errors. They are corrected, but we listen for the trap
+ * so that the event can be logged.
+ *
+ * Disrupting errors are either:
+ * 1) single-bit ECC errors during UDB reads to system
+ * memory
+ * 2) data parity errors during write-back events
+ *
+ * As far as I can make out from the manual, the CEE trap
+ * is only for correctable errors during memory read
+ * accesses by the front-end of the processor.
+ *
+ * The code below is only for trap level 1 CEE events,
+ * as it is the only situation where we can safely record
+ * and log. For trap level >1 we just clear the CE bit
+ * in the AFSR and return.
+ */
+
+ /* Our trap handling infrastructure allows us to preserve
+ * two 64-bit values during etrap for arguments to
+ * subsequent C code. Therefore we encode the information
+ * as follows:
+ *
+ * value 1) Full 64-bits of AFAR
+ * value 2) Low 33-bits of AFSR, then bits 33-->42
+ * are UDBL error status and bits 43-->52
+ * are UDBH error status
+ */
+ .align 64
+ .globl cee_trap
+cee_trap:
+ ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR
+ ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR
+ sllx %g1, 31, %g1 ! Clear reserved bits
+ srlx %g1, 31, %g1 ! in AFSR
+
+ /* NOTE: UltraSparc-I/II have high and low UDB error
+ * registers, corresponding to the two UDB units
+ * present on those chips. UltraSparc-IIi only
+ * has a single UDB, called "SDB" in the manual.
+ * For IIi the upper UDB register always reads
+ * as zero so for our purposes things will just
+ * work with the checks below.
+ */
+ ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status
+ andcc %g3, (1 << 8), %g4 ! Check CE bit
+ sllx %g3, (64 - 10), %g3 ! Clear reserved bits
+ srlx %g3, (64 - 10), %g3 ! in UDB-Low error status
+
+ sllx %g3, (33 + 0), %g3 ! Shift up to encoding area
+ or %g1, %g3, %g1 ! Or it in
+ be,pn %xcc, 1f ! Branch if CE bit was clear
+ nop
+ stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL
+ membar #Sync ! Synchronize ASI stores
+1: mov 0x18, %g5 ! Addr of UDB-High error status
+ ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it
+
+ andcc %g3, (1 << 8), %g4 ! Check CE bit
+ sllx %g3, (64 - 10), %g3 ! Clear reserved bits
+ srlx %g3, (64 - 10), %g3 ! in UDB-High error status
+ sllx %g3, (33 + 10), %g3 ! Shift up to encoding area
+ or %g1, %g3, %g1 ! Or it in
+ be,pn %xcc, 1f ! Branch if CE bit was clear
+ nop
+ nop
+
+ stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH
+ membar #Sync ! Synchronize ASI stores
+1: mov 1, %g5 ! AFSR CE bit is
+ sllx %g5, 20, %g5 ! bit 20
+ stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR
+ membar #Sync ! Synchronize ASI stores
+ sllx %g2, (64 - 41), %g2 ! Clear reserved bits
+ srlx %g2, (64 - 41), %g2 ! in latched AFAR
+
+ andn %g2, 0x0f, %g2 ! Finish resv bit clearing
+ mov %g1, %g4 ! Move AFSR+UDB* into save reg
+ mov %g2, %g5 ! Move AFAR into save reg
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
+ rd %pc, %g7
+ mov %l4, %o0
+
+ mov %l5, %o1
+ call cee_log
+ add %sp, PTREGS_OFF, %o2
+ ba,a,pt %xcc, rtrap_irq
+
+ /* Capture I/D/E-cache state into per-cpu error scoreboard.
+ *
+ * %g1: (TL>=0) ? 1 : 0
+ * %g2: scratch
+ * %g3: scratch
+ * %g4: AFSR
+ * %g5: AFAR
+ * %g6: current thread ptr
+ * %g7: scratch
+ */
+#define CHEETAH_LOG_ERROR \
+ /* Put "TL1" software bit into AFSR. */ \
+ and %g1, 0x1, %g1; \
+ sllx %g1, 63, %g2; \
+ or %g4, %g2, %g4; \
+ /* Get log entry pointer for this cpu at this trap level. */ \
+ BRANCH_IF_JALAPENO(g2,g3,50f) \
+ ldxa [%g0] ASI_SAFARI_CONFIG, %g2; \
+ srlx %g2, 17, %g2; \
+ ba,pt %xcc, 60f; \
+ and %g2, 0x3ff, %g2; \
+50: ldxa [%g0] ASI_JBUS_CONFIG, %g2; \
+ srlx %g2, 17, %g2; \
+ and %g2, 0x1f, %g2; \
+60: sllx %g2, 9, %g2; \
+ sethi %hi(cheetah_error_log), %g3; \
+ ldx [%g3 + %lo(cheetah_error_log)], %g3; \
+ brz,pn %g3, 80f; \
+ nop; \
+ add %g3, %g2, %g3; \
+ sllx %g1, 8, %g1; \
+ add %g3, %g1, %g1; \
+ /* %g1 holds pointer to the top of the logging scoreboard */ \
+ ldx [%g1 + 0x0], %g7; \
+ cmp %g7, -1; \
+ bne,pn %xcc, 80f; \
+ nop; \
+ stx %g4, [%g1 + 0x0]; \
+ stx %g5, [%g1 + 0x8]; \
+ add %g1, 0x10, %g1; \
+ /* %g1 now points to D-cache logging area */ \
+ set 0x3ff8, %g2; /* DC_addr mask */ \
+ and %g5, %g2, %g2; /* DC_addr bits of AFAR */ \
+ srlx %g5, 12, %g3; \
+ or %g3, 1, %g3; /* PHYS tag + valid */ \
+10: ldxa [%g2] ASI_DCACHE_TAG, %g7; \
+ cmp %g3, %g7; /* TAG match? */ \
+ bne,pt %xcc, 13f; \
+ nop; \
+ /* Yep, what we want, capture state. */ \
+ stx %g2, [%g1 + 0x20]; \
+ stx %g7, [%g1 + 0x28]; \
+ /* A membar Sync is required before and after utag access. */ \
+ membar #Sync; \
+ ldxa [%g2] ASI_DCACHE_UTAG, %g7; \
+ membar #Sync; \
+ stx %g7, [%g1 + 0x30]; \
+ ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7; \
+ stx %g7, [%g1 + 0x38]; \
+ clr %g3; \
+12: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7; \
+ stx %g7, [%g1]; \
+ add %g3, (1 << 5), %g3; \
+ cmp %g3, (4 << 5); \
+ bl,pt %xcc, 12b; \
+ add %g1, 0x8, %g1; \
+ ba,pt %xcc, 20f; \
+ add %g1, 0x20, %g1; \
+13: sethi %hi(1 << 14), %g7; \
+ add %g2, %g7, %g2; \
+ srlx %g2, 14, %g7; \
+ cmp %g7, 4; \
+ bl,pt %xcc, 10b; \
+ nop; \
+ add %g1, 0x40, %g1; \
+20: /* %g1 now points to I-cache logging area */ \
+ set 0x1fe0, %g2; /* IC_addr mask */ \
+ and %g5, %g2, %g2; /* IC_addr bits of AFAR */ \
+ sllx %g2, 1, %g2; /* IC_addr[13:6]==VA[12:5] */ \
+ srlx %g5, (13 - 8), %g3; /* Make PTAG */ \
+ andn %g3, 0xff, %g3; /* Mask off undefined bits */ \
+21: ldxa [%g2] ASI_IC_TAG, %g7; \
+ andn %g7, 0xff, %g7; \
+ cmp %g3, %g7; \
+ bne,pt %xcc, 23f; \
+ nop; \
+ /* Yep, what we want, capture state. */ \
+ stx %g2, [%g1 + 0x40]; \
+ stx %g7, [%g1 + 0x48]; \
+ add %g2, (1 << 3), %g2; \
+ ldxa [%g2] ASI_IC_TAG, %g7; \
+ add %g2, (1 << 3), %g2; \
+ stx %g7, [%g1 + 0x50]; \
+ ldxa [%g2] ASI_IC_TAG, %g7; \
+ add %g2, (1 << 3), %g2; \
+ stx %g7, [%g1 + 0x60]; \
+ ldxa [%g2] ASI_IC_TAG, %g7; \
+ stx %g7, [%g1 + 0x68]; \
+ sub %g2, (3 << 3), %g2; \
+ ldxa [%g2] ASI_IC_STAG, %g7; \
+ stx %g7, [%g1 + 0x58]; \
+ clr %g3; \
+ srlx %g2, 2, %g2; \
+22: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7; \
+ stx %g7, [%g1]; \
+ add %g3, (1 << 3), %g3; \
+ cmp %g3, (8 << 3); \
+ bl,pt %xcc, 22b; \
+ add %g1, 0x8, %g1; \
+ ba,pt %xcc, 30f; \
+ add %g1, 0x30, %g1; \
+23: sethi %hi(1 << 14), %g7; \
+ add %g2, %g7, %g2; \
+ srlx %g2, 14, %g7; \
+ cmp %g7, 4; \
+ bl,pt %xcc, 21b; \
+ nop; \
+ add %g1, 0x70, %g1; \
+30: /* %g1 now points to E-cache logging area */ \
+ andn %g5, (32 - 1), %g2; /* E-cache subblock */ \
+ stx %g2, [%g1 + 0x20]; \
+ ldxa [%g2] ASI_EC_TAG_DATA, %g7; \
+ stx %g7, [%g1 + 0x28]; \
+ ldxa [%g2] ASI_EC_R, %g0; \
+ clr %g3; \
+31: ldxa [%g3] ASI_EC_DATA, %g7; \
+ stx %g7, [%g1 + %g3]; \
+ add %g3, 0x8, %g3; \
+ cmp %g3, 0x20; \
+ bl,pt %xcc, 31b; \
+ nop; \
+80: /* DONE */
+
+ /* These get patched into the trap table at boot time
+ * once we know we have a cheetah processor.
+ */
+ .globl cheetah_fecc_trap_vector, cheetah_fecc_trap_vector_tl1
+cheetah_fecc_trap_vector:
+ membar #Sync
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
+ andn %g1, DCU_DC | DCU_IC, %g1
+ stxa %g1, [%g0] ASI_DCU_CONTROL_REG
+ membar #Sync
+ sethi %hi(cheetah_fast_ecc), %g2
+ jmpl %g2 + %lo(cheetah_fast_ecc), %g0
+ mov 0, %g1
+cheetah_fecc_trap_vector_tl1:
+ membar #Sync
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
+ andn %g1, DCU_DC | DCU_IC, %g1
+ stxa %g1, [%g0] ASI_DCU_CONTROL_REG
+ membar #Sync
+ sethi %hi(cheetah_fast_ecc), %g2
+ jmpl %g2 + %lo(cheetah_fast_ecc), %g0
+ mov 1, %g1
+ .globl cheetah_cee_trap_vector, cheetah_cee_trap_vector_tl1
+cheetah_cee_trap_vector:
+ membar #Sync
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
+ andn %g1, DCU_IC, %g1
+ stxa %g1, [%g0] ASI_DCU_CONTROL_REG
+ membar #Sync
+ sethi %hi(cheetah_cee), %g2
+ jmpl %g2 + %lo(cheetah_cee), %g0
+ mov 0, %g1
+cheetah_cee_trap_vector_tl1:
+ membar #Sync
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
+ andn %g1, DCU_IC, %g1
+ stxa %g1, [%g0] ASI_DCU_CONTROL_REG
+ membar #Sync
+ sethi %hi(cheetah_cee), %g2
+ jmpl %g2 + %lo(cheetah_cee), %g0
+ mov 1, %g1
+ .globl cheetah_deferred_trap_vector, cheetah_deferred_trap_vector_tl1
+cheetah_deferred_trap_vector:
+ membar #Sync
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
+ andn %g1, DCU_DC | DCU_IC, %g1;
+ stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
+ membar #Sync;
+ sethi %hi(cheetah_deferred_trap), %g2
+ jmpl %g2 + %lo(cheetah_deferred_trap), %g0
+ mov 0, %g1
+cheetah_deferred_trap_vector_tl1:
+ membar #Sync;
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
+ andn %g1, DCU_DC | DCU_IC, %g1;
+ stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
+ membar #Sync;
+ sethi %hi(cheetah_deferred_trap), %g2
+ jmpl %g2 + %lo(cheetah_deferred_trap), %g0
+ mov 1, %g1
+
+ /* Cheetah+ specific traps. These are for the new I/D cache parity
+ * error traps. The first argument to cheetah_plus_parity_handler
+ * is encoded as follows:
+ *
+ * Bit0: 0=dcache,1=icache
+ * Bit1: 0=recoverable,1=unrecoverable
+ */
+ .globl cheetah_plus_dcpe_trap_vector, cheetah_plus_dcpe_trap_vector_tl1
+cheetah_plus_dcpe_trap_vector:
+ membar #Sync
+ sethi %hi(do_cheetah_plus_data_parity), %g7
+ jmpl %g7 + %lo(do_cheetah_plus_data_parity), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+
+do_cheetah_plus_data_parity:
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ mov 0x0, %o0
+ call cheetah_plus_parity_error
+ add %sp, PTREGS_OFF, %o1
+ ba,pt %xcc, rtrap
+ clr %l6
+
+cheetah_plus_dcpe_trap_vector_tl1:
+ membar #Sync
+ wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
+ sethi %hi(do_dcpe_tl1), %g3
+ jmpl %g3 + %lo(do_dcpe_tl1), %g0
+ nop
+ nop
+ nop
+ nop
+
+ .globl cheetah_plus_icpe_trap_vector, cheetah_plus_icpe_trap_vector_tl1
+cheetah_plus_icpe_trap_vector:
+ membar #Sync
+ sethi %hi(do_cheetah_plus_insn_parity), %g7
+ jmpl %g7 + %lo(do_cheetah_plus_insn_parity), %g0
+ nop
+ nop
+ nop
+ nop
+ nop
+
+do_cheetah_plus_insn_parity:
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ mov 0x1, %o0
+ call cheetah_plus_parity_error
+ add %sp, PTREGS_OFF, %o1
+ ba,pt %xcc, rtrap
+ clr %l6
+
+cheetah_plus_icpe_trap_vector_tl1:
+ membar #Sync
+ wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
+ sethi %hi(do_icpe_tl1), %g3
+ jmpl %g3 + %lo(do_icpe_tl1), %g0
+ nop
+ nop
+ nop
+ nop
+
+ /* If we take one of these traps when tl >= 1, then we
+ * jump to interrupt globals. If some trap level above us
+ * was also using interrupt globals, we cannot recover.
+ * We may use all interrupt global registers except %g6.
+ */
+ .globl do_dcpe_tl1, do_icpe_tl1
+do_dcpe_tl1:
+ rdpr %tl, %g1 ! Save original trap level
+ mov 1, %g2 ! Setup TSTATE checking loop
+ sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
+1: wrpr %g2, %tl ! Set trap level to check
+ rdpr %tstate, %g4 ! Read TSTATE for this level
+ andcc %g4, %g3, %g0 ! Interrupt globals in use?
+ bne,a,pn %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable
+ wrpr %g1, %tl ! Restore original trap level
+ add %g2, 1, %g2 ! Next trap level
+ cmp %g2, %g1 ! Hit them all yet?
+ ble,pt %icc, 1b ! Not yet
+ nop
+ wrpr %g1, %tl ! Restore original trap level
+do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
+ /* Reset D-cache parity */
+ sethi %hi(1 << 16), %g1 ! D-cache size
+ mov (1 << 5), %g2 ! D-cache line size
+ sub %g1, %g2, %g1 ! Move down 1 cacheline
+1: srl %g1, 14, %g3 ! Compute UTAG
+ membar #Sync
+ stxa %g3, [%g1] ASI_DCACHE_UTAG
+ membar #Sync
+ sub %g2, 8, %g3 ! 64-bit data word within line
+2: membar #Sync
+ stxa %g0, [%g1 + %g3] ASI_DCACHE_DATA
+ membar #Sync
+ subcc %g3, 8, %g3 ! Next 64-bit data word
+ bge,pt %icc, 2b
+ nop
+ subcc %g1, %g2, %g1 ! Next cacheline
+ bge,pt %icc, 1b
+ nop
+ ba,pt %xcc, dcpe_icpe_tl1_common
+ nop
+
+do_dcpe_tl1_fatal:
+ sethi %hi(1f), %g7
+ ba,pt %xcc, etraptl1
+1: or %g7, %lo(1b), %g7
+ mov 0x2, %o0
+ call cheetah_plus_parity_error
+ add %sp, PTREGS_OFF, %o1
+ ba,pt %xcc, rtrap
+ clr %l6
+
+do_icpe_tl1:
+ rdpr %tl, %g1 ! Save original trap level
+ mov 1, %g2 ! Setup TSTATE checking loop
+ sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
+1: wrpr %g2, %tl ! Set trap level to check
+ rdpr %tstate, %g4 ! Read TSTATE for this level
+ andcc %g4, %g3, %g0 ! Interrupt globals in use?
+ bne,a,pn %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable
+ wrpr %g1, %tl ! Restore original trap level
+ add %g2, 1, %g2 ! Next trap level
+ cmp %g2, %g1 ! Hit them all yet?
+ ble,pt %icc, 1b ! Not yet
+ nop
+ wrpr %g1, %tl ! Restore original trap level
+do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
+ /* Flush I-cache */
+ sethi %hi(1 << 15), %g1 ! I-cache size
+ mov (1 << 5), %g2 ! I-cache line size
+ sub %g1, %g2, %g1
+1: or %g1, (2 << 3), %g3
+ stxa %g0, [%g3] ASI_IC_TAG
+ membar #Sync
+ subcc %g1, %g2, %g1
+ bge,pt %icc, 1b
+ nop
+ ba,pt %xcc, dcpe_icpe_tl1_common
+ nop
+
+do_icpe_tl1_fatal:
+ sethi %hi(1f), %g7
+ ba,pt %xcc, etraptl1
+1: or %g7, %lo(1b), %g7
+ mov 0x3, %o0
+ call cheetah_plus_parity_error
+ add %sp, PTREGS_OFF, %o1
+ ba,pt %xcc, rtrap
+ clr %l6
+
+dcpe_icpe_tl1_common:
+ /* Flush D-cache, re-enable D/I caches in DCU and finally
+ * retry the trapping instruction.
+ */
+ sethi %hi(1 << 16), %g1 ! D-cache size
+ mov (1 << 5), %g2 ! D-cache line size
+ sub %g1, %g2, %g1
+1: stxa %g0, [%g1] ASI_DCACHE_TAG
+ membar #Sync
+ subcc %g1, %g2, %g1
+ bge,pt %icc, 1b
+ nop
+ ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
+ or %g1, (DCU_DC | DCU_IC), %g1
+ stxa %g1, [%g0] ASI_DCU_CONTROL_REG
+ membar #Sync
+ retry
+
+ /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
+ * in the trap table. That code has done a memory barrier
+ * and has disabled both the I-cache and D-cache in the DCU
+ * control register. The I-cache is disabled so that we may
+ * capture the corrupted cache line, and the D-cache is disabled
+ * because corrupt data may have been placed there and we don't
+ * want to reference it.
+ *
+ * %g1 is one if this trap occurred at %tl >= 1.
+ *
+ * Next, we turn off error reporting so that we don't recurse.
+ */
+ .globl cheetah_fast_ecc
+cheetah_fast_ecc:
+ ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
+ andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
+ stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
+ membar #Sync
+
+ /* Fetch and clear AFSR/AFAR */
+ ldxa [%g0] ASI_AFSR, %g4
+ ldxa [%g0] ASI_AFAR, %g5
+ stxa %g4, [%g0] ASI_AFSR
+ membar #Sync
+
+ CHEETAH_LOG_ERROR
+
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
+ rd %pc, %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call cheetah_fecc_handler
+ add %sp, PTREGS_OFF, %o0
+ ba,a,pt %xcc, rtrap_irq
+
+ /* Our caller has disabled I-cache and performed membar Sync. */
+ .globl cheetah_cee
+cheetah_cee:
+ ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
+ andn %g2, ESTATE_ERROR_CEEN, %g2
+ stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
+ membar #Sync
+
+ /* Fetch and clear AFSR/AFAR */
+ ldxa [%g0] ASI_AFSR, %g4
+ ldxa [%g0] ASI_AFAR, %g5
+ stxa %g4, [%g0] ASI_AFSR
+ membar #Sync
+
+ CHEETAH_LOG_ERROR
+
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
+ rd %pc, %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call cheetah_cee_handler
+ add %sp, PTREGS_OFF, %o0
+ ba,a,pt %xcc, rtrap_irq
+
+ /* Our caller has disabled I-cache+D-cache and performed membar Sync. */
+ .globl cheetah_deferred_trap
+cheetah_deferred_trap:
+ ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
+ andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
+ stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
+ membar #Sync
+
+ /* Fetch and clear AFSR/AFAR */
+ ldxa [%g0] ASI_AFSR, %g4
+ ldxa [%g0] ASI_AFAR, %g5
+ stxa %g4, [%g0] ASI_AFSR
+ membar #Sync
+
+ CHEETAH_LOG_ERROR
+
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
+ rd %pc, %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call cheetah_deferred_handler
+ add %sp, PTREGS_OFF, %o0
+ ba,a,pt %xcc, rtrap_irq
+
+ .globl __do_privact
+__do_privact:
+ mov TLB_SFSR, %g3
+ stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
+ membar #Sync
+ sethi %hi(109f), %g7
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ call do_privact
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ .globl do_mna
+do_mna:
+ rdpr %tl, %g3
+ cmp %g3, 1
+
+ /* Setup %g4/%g5 now as they are used in the
+ * winfixup code.
+ */
+ mov TLB_SFSR, %g3
+ mov DMMU_SFAR, %g4
+ ldxa [%g4] ASI_DMMU, %g4
+ ldxa [%g3] ASI_DMMU, %g5
+ stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
+ membar #Sync
+ bgu,pn %icc, winfix_mna
+ rdpr %tpc, %g3
+
+1: sethi %hi(109f), %g7
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call mem_address_unaligned
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ .globl do_lddfmna
+do_lddfmna:
+ sethi %hi(109f), %g7
+ mov TLB_SFSR, %g4
+ ldxa [%g4] ASI_DMMU, %g5
+ stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
+ membar #Sync
+ mov DMMU_SFAR, %g4
+ ldxa [%g4] ASI_DMMU, %g4
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call handle_lddfmna
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ .globl do_stdfmna
+do_stdfmna:
+ sethi %hi(109f), %g7
+ mov TLB_SFSR, %g4
+ ldxa [%g4] ASI_DMMU, %g5
+ stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
+ membar #Sync
+ mov DMMU_SFAR, %g4
+ ldxa [%g4] ASI_DMMU, %g4
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call handle_stdfmna
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ .globl breakpoint_trap
+breakpoint_trap:
+ call sparc_breakpoint
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ nop
+
+#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
+ defined(CONFIG_SOLARIS_EMUL_MODULE)
+ /* SunOS uses syscall zero as the 'indirect syscall' it looks
+ * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
+ * This is complete brain damage.
+ */
+ .globl sunos_indir
+sunos_indir:
+ srl %o0, 0, %o0
+ mov %o7, %l4
+ cmp %o0, NR_SYSCALLS
+ blu,a,pt %icc, 1f
+ sll %o0, 0x2, %o0
+ sethi %hi(sunos_nosys), %l6
+ b,pt %xcc, 2f
+ or %l6, %lo(sunos_nosys), %l6
+1: sethi %hi(sunos_sys_table), %l7
+ or %l7, %lo(sunos_sys_table), %l7
+ lduw [%l7 + %o0], %l6
+2: mov %o1, %o0
+ mov %o2, %o1
+ mov %o3, %o2
+ mov %o4, %o3
+ mov %o5, %o4
+ call %l6
+ mov %l4, %o7
+
+ .globl sunos_getpid
+sunos_getpid:
+ call sys_getppid
+ nop
+ call sys_getpid
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
+ b,pt %xcc, ret_sys_call
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+
+ /* SunOS getuid() returns uid in %o0 and euid in %o1 */
+ .globl sunos_getuid
+sunos_getuid:
+ call sys32_geteuid16
+ nop
+ call sys32_getuid16
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
+ b,pt %xcc, ret_sys_call
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+
+ /* SunOS getgid() returns gid in %o0 and egid in %o1 */
+ .globl sunos_getgid
+sunos_getgid:
+ call sys32_getegid16
+ nop
+ call sys32_getgid16
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I1]
+ b,pt %xcc, ret_sys_call
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+#endif
+
+ /* SunOS's execv() call only specifies the argv argument, the
+ * environment settings are the same as the calling processes.
+ */
+ .globl sunos_execv
+sys_execve:
+ sethi %hi(sparc_execve), %g1
+ ba,pt %xcc, execve_merge
+ or %g1, %lo(sparc_execve), %g1
+#ifdef CONFIG_COMPAT
+ .globl sys_execve
+sunos_execv:
+ stx %g0, [%sp + PTREGS_OFF + PT_V9_I2]
+ .globl sys32_execve
+sys32_execve:
+ sethi %hi(sparc32_execve), %g1
+ or %g1, %lo(sparc32_execve), %g1
+#endif
+execve_merge:
+ flushw
+ jmpl %g1, %g0
+ add %sp, PTREGS_OFF, %o0
+
+ .globl sys_pipe, sys_sigpause, sys_nis_syscall
+ .globl sys_sigsuspend, sys_rt_sigsuspend
+ .globl sys_rt_sigreturn
+ .globl sys_ptrace
+ .globl sys_sigaltstack
+ .align 32
+sys_pipe: ba,pt %xcc, sparc_pipe
+ add %sp, PTREGS_OFF, %o0
+sys_nis_syscall:ba,pt %xcc, c_sys_nis_syscall
+ add %sp, PTREGS_OFF, %o0
+sys_memory_ordering:
+ ba,pt %xcc, sparc_memory_ordering
+ add %sp, PTREGS_OFF, %o1
+sys_sigaltstack:ba,pt %xcc, do_sigaltstack
+ add %i6, STACK_BIAS, %o2
+#ifdef CONFIG_COMPAT
+ .globl sys32_sigstack
+sys32_sigstack: ba,pt %xcc, do_sys32_sigstack
+ mov %i6, %o2
+ .globl sys32_sigaltstack
+sys32_sigaltstack:
+ ba,pt %xcc, do_sys32_sigaltstack
+ mov %i6, %o2
+#endif
+ .align 32
+sys_sigsuspend: add %sp, PTREGS_OFF, %o0
+ call do_sigsuspend
+ add %o7, 1f-.-4, %o7
+ nop
+sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
+ add %sp, PTREGS_OFF, %o2
+ call do_rt_sigsuspend
+ add %o7, 1f-.-4, %o7
+ nop
+#ifdef CONFIG_COMPAT
+ .globl sys32_rt_sigsuspend
+sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
+ srl %o0, 0, %o0
+ add %sp, PTREGS_OFF, %o2
+ call do_rt_sigsuspend32
+ add %o7, 1f-.-4, %o7
+#endif
+ /* NOTE: %o0 has a correct value already */
+sys_sigpause: add %sp, PTREGS_OFF, %o1
+ call do_sigpause
+ add %o7, 1f-.-4, %o7
+ nop
+#ifdef CONFIG_COMPAT
+ .globl sys32_sigreturn
+sys32_sigreturn:
+ add %sp, PTREGS_OFF, %o0
+ call do_sigreturn32
+ add %o7, 1f-.-4, %o7
+ nop
+#endif
+sys_rt_sigreturn:
+ add %sp, PTREGS_OFF, %o0
+ call do_rt_sigreturn
+ add %o7, 1f-.-4, %o7
+ nop
+#ifdef CONFIG_COMPAT
+ .globl sys32_rt_sigreturn
+sys32_rt_sigreturn:
+ add %sp, PTREGS_OFF, %o0
+ call do_rt_sigreturn32
+ add %o7, 1f-.-4, %o7
+ nop
+#endif
+sys_ptrace: add %sp, PTREGS_OFF, %o0
+ call do_ptrace
+ add %o7, 1f-.-4, %o7
+ nop
+ .align 32
+1: ldx [%curptr + TI_FLAGS], %l5
+ andcc %l5, _TIF_SYSCALL_TRACE, %g0
+ be,pt %icc, rtrap
+ clr %l6
+ call syscall_trace
+ nop
+
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ /* This is how fork() was meant to be done, 8 instruction entry.
+ *
+ * I questioned the following code briefly, let me clear things
+ * up so you must not reason on it like I did.
+ *
+ * Know the fork_kpsr etc. we use in the sparc32 port? We don't
+ * need it here because the only piece of window state we copy to
+ * the child is the CWP register. Even if the parent sleeps,
+ * we are safe because we stuck it into pt_regs of the parent
+ * so it will not change.
+ *
+ * XXX This raises the question, whether we can do the same on
+ * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The
+ * XXX answer is yes. We stick fork_kpsr in UREG_G0 and
+ * XXX fork_kwim in UREG_G1 (global registers are considered
+ * XXX volatile across a system call in the sparc ABI I think
+ * XXX if it isn't we can use regs->y instead, anyone who depends
+ * XXX upon the Y register being preserved across a fork deserves
+ * XXX to lose).
+ *
+ * In fact we should take advantage of that fact for other things
+ * during system calls...
+ */
+ .globl sys_fork, sys_vfork, sys_clone, sparc_exit
+ .globl ret_from_syscall
+ .align 32
+sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */
+ sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0
+ or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
+ ba,pt %xcc, sys_clone
+sys_fork: clr %o1
+ mov SIGCHLD, %o0
+sys_clone: flushw
+ movrz %o1, %fp, %o1
+ mov 0, %o3
+ ba,pt %xcc, sparc_do_fork
+ add %sp, PTREGS_OFF, %o2
+ret_from_syscall:
+ /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in
+ * %o7 for us. Check performance counter stuff too.
+ */
+ andn %o7, _TIF_NEWCHILD, %l0
+ stx %l0, [%g6 + TI_FLAGS]
+ call schedule_tail
+ mov %g7, %o0
+ andcc %l0, _TIF_PERFCTR, %g0
+ be,pt %icc, 1f
+ nop
+ ldx [%g6 + TI_PCR], %o7
+ wr %g0, %o7, %pcr
+
+ /* Blackbird errata workaround. See commentary in
+ * smp.c:smp_percpu_timer_interrupt() for more
+ * information.
+ */
+ ba,pt %xcc, 99f
+ nop
+ .align 64
+99: wr %g0, %g0, %pic
+ rd %pic, %g0
+
+1: b,pt %xcc, ret_sys_call
+ ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0
+sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate
+ rdpr %otherwin, %g1
+ rdpr %cansave, %g3
+ add %g3, %g1, %g3
+ wrpr %g3, 0x0, %cansave
+ wrpr %g0, 0x0, %otherwin
+ wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate
+ ba,pt %xcc, sys_exit
+ stb %g0, [%g6 + TI_WSAVED]
+
+linux_sparc_ni_syscall:
+ sethi %hi(sys_ni_syscall), %l7
+ b,pt %xcc, 4f
+ or %l7, %lo(sys_ni_syscall), %l7
+
+linux_syscall_trace32:
+ call syscall_trace
+ nop
+ srl %i0, 0, %o0
+ mov %i4, %o4
+ srl %i1, 0, %o1
+ srl %i2, 0, %o2
+ b,pt %xcc, 2f
+ srl %i3, 0, %o3
+
+linux_syscall_trace:
+ call syscall_trace
+ nop
+ mov %i0, %o0
+ mov %i1, %o1
+ mov %i2, %o2
+ mov %i3, %o3
+ b,pt %xcc, 2f
+ mov %i4, %o4
+
+
+ /* Linux 32-bit and SunOS system calls enter here... */
+ .align 32
+ .globl linux_sparc_syscall32
+linux_sparc_syscall32:
+ /* Direct access to user regs, much faster. */
+ cmp %g1, NR_SYSCALLS ! IEU1 Group
+ bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
+ srl %i0, 0, %o0 ! IEU0
+ sll %g1, 2, %l4 ! IEU0 Group
+#ifdef SYSCALL_TRACING
+ call syscall_trace_entry
+ add %sp, PTREGS_OFF, %o0
+ srl %i0, 0, %o0
+#endif
+ srl %i4, 0, %o4 ! IEU1
+ lduw [%l7 + %l4], %l7 ! Load
+ srl %i1, 0, %o1 ! IEU0 Group
+ ldx [%curptr + TI_FLAGS], %l0 ! Load
+
+ srl %i5, 0, %o5 ! IEU1
+ srl %i2, 0, %o2 ! IEU0 Group
+ andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU0 Group
+ bne,pn %icc, linux_syscall_trace32 ! CTI
+ mov %i0, %l5 ! IEU1
+ call %l7 ! CTI Group brk forced
+ srl %i3, 0, %o3 ! IEU0
+ ba,a,pt %xcc, 3f
+
+ /* Linux native and SunOS system calls enter here... */
+ .align 32
+ .globl linux_sparc_syscall, ret_sys_call
+linux_sparc_syscall:
+ /* Direct access to user regs, much faster. */
+ cmp %g1, NR_SYSCALLS ! IEU1 Group
+ bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
+ mov %i0, %o0 ! IEU0
+ sll %g1, 2, %l4 ! IEU0 Group
+#ifdef SYSCALL_TRACING
+ call syscall_trace_entry
+ add %sp, PTREGS_OFF, %o0
+ mov %i0, %o0
+#endif
+ mov %i1, %o1 ! IEU1
+ lduw [%l7 + %l4], %l7 ! Load
+4: mov %i2, %o2 ! IEU0 Group
+ ldx [%curptr + TI_FLAGS], %l0 ! Load
+
+ mov %i3, %o3 ! IEU1
+ mov %i4, %o4 ! IEU0 Group
+ andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU1 Group+1 bubble
+ bne,pn %icc, linux_syscall_trace ! CTI Group
+ mov %i0, %l5 ! IEU0
+2: call %l7 ! CTI Group brk forced
+ mov %i5, %o5 ! IEU0
+ nop
+
+3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ret_sys_call:
+#ifdef SYSCALL_TRACING
+ mov %o0, %o1
+ call syscall_trace_exit
+ add %sp, PTREGS_OFF, %o0
+ mov %o1, %o0
+#endif
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+ sra %o0, 0, %o0
+ mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+ sllx %g2, 32, %g2
+
+ /* Check if force_successful_syscall_return()
+ * was invoked.
+ */
+ ldx [%curptr + TI_FLAGS], %l0
+ andcc %l0, _TIF_SYSCALL_SUCCESS, %g0
+ be,pt %icc, 1f
+ andn %l0, _TIF_SYSCALL_SUCCESS, %l0
+ ba,pt %xcc, 80f
+ stx %l0, [%curptr + TI_FLAGS]
+
+1:
+ cmp %o0, -ERESTART_RESTARTBLOCK
+ bgeu,pn %xcc, 1f
+ andcc %l0, _TIF_SYSCALL_TRACE, %l6
+80:
+ /* System call success, clear Carry condition code. */
+ andn %g3, %g2, %g3
+ stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+ bne,pn %icc, linux_syscall_trace2
+ add %l1, 0x4, %l2 ! npc = npc+4
+ stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+ ba,pt %xcc, rtrap_clr_l6
+ stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+
+1:
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+ andcc %l0, _TIF_SYSCALL_TRACE, %l6
+ sub %g0, %o0, %o0
+ or %g3, %g2, %g3
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ mov 1, %l6
+ stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+ bne,pn %icc, linux_syscall_trace2
+ add %l1, 0x4, %l2 ! npc = npc+4
+ stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+
+ b,pt %xcc, rtrap
+ stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+linux_syscall_trace2:
+ call syscall_trace
+ nop
+ stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+ ba,pt %xcc, rtrap
+ stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+
+ .align 32
+ .globl __flushw_user
+__flushw_user:
+ rdpr %otherwin, %g1
+ brz,pn %g1, 2f
+ clr %g2
+1: save %sp, -128, %sp
+ rdpr %otherwin, %g1
+ brnz,pt %g1, 1b
+ add %g2, 1, %g2
+1: sub %g2, 1, %g2
+ brnz,pt %g2, 1b
+ restore %g0, %g0, %g0
+2: retl
+ nop
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
new file mode 100644
index 00000000000..50d2af1d98a
--- /dev/null
+++ b/arch/sparc64/kernel/etrap.S
@@ -0,0 +1,301 @@
+/* $Id: etrap.S,v 1.46 2002/02/09 19:49:30 davem Exp $
+ * etrap.S: Preparing for entry into the kernel on Sparc V9.
+ *
+ * Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <linux/config.h>
+
+#include <asm/asi.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/spitfire.h>
+#include <asm/head.h>
+#include <asm/processor.h>
+#include <asm/mmu.h>
+
+#define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ)
+#define ETRAP_PSTATE1 (PSTATE_RMO | PSTATE_PRIV)
+#define ETRAP_PSTATE2 \
+ (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
+
+/*
+ * On entry, %g7 is return address - 0x4.
+ * %g4 and %g5 will be preserved %l4 and %l5 respectively.
+ */
+
+ .text
+ .align 64
+ .globl etrap, etrap_irq, etraptl1
+etrap: rdpr %pil, %g2
+etrap_irq:
+ rdpr %tstate, %g1
+ sllx %g2, 20, %g3
+ andcc %g1, TSTATE_PRIV, %g0
+ or %g1, %g3, %g1
+ bne,pn %xcc, 1f
+ sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2
+ wrpr %g0, 7, %cleanwin
+
+ sethi %hi(TASK_REGOFF), %g2
+ sethi %hi(TSTATE_PEF), %g3
+ or %g2, %lo(TASK_REGOFF), %g2
+ and %g1, %g3, %g3
+ brnz,pn %g3, 1f
+ add %g6, %g2, %g2
+ wr %g0, 0, %fprs
+1: rdpr %tpc, %g3
+
+ stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
+ rdpr %tnpc, %g1
+ stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
+ rd %y, %g3
+ stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
+ st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y]
+ save %g2, -STACK_BIAS, %sp ! Ordering here is critical
+ mov %g6, %l6
+
+ bne,pn %xcc, 3f
+ mov PRIMARY_CONTEXT, %l4
+ rdpr %canrestore, %g3
+ rdpr %wstate, %g2
+ wrpr %g0, 0, %canrestore
+ sll %g2, 3, %g2
+ mov 1, %l5
+ stb %l5, [%l6 + TI_FPDEPTH]
+
+ wrpr %g3, 0, %otherwin
+ wrpr %g2, 0, %wstate
+cplus_etrap_insn_1:
+ sethi %hi(0), %g3
+ sllx %g3, 32, %g3
+cplus_etrap_insn_2:
+ sethi %hi(0), %g2
+ or %g3, %g2, %g3
+ stxa %g3, [%l4] ASI_DMMU
+ flush %l6
+ wr %g0, ASI_AIUS, %asi
+2: wrpr %g0, 0x0, %tl
+ mov %g4, %l4
+ mov %g5, %l5
+
+ mov %g7, %l2
+ wrpr %g0, ETRAP_PSTATE1, %pstate
+ stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
+ stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
+ stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
+ stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
+ stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
+ stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
+
+ stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
+ stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
+ stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
+ stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
+ stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
+ stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
+ stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
+
+ stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
+ stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
+ wrpr %g0, ETRAP_PSTATE2, %pstate
+ mov %l6, %g6
+#ifdef CONFIG_SMP
+ mov TSB_REG, %g3
+ ldxa [%g3] ASI_IMMU, %g5
+#endif
+ jmpl %l2 + 0x4, %g0
+ ldx [%g6 + TI_TASK], %g4
+
+3: ldub [%l6 + TI_FPDEPTH], %l5
+ add %l6, TI_FPSAVED + 1, %l4
+ srl %l5, 1, %l3
+ add %l5, 2, %l5
+ stb %l5, [%l6 + TI_FPDEPTH]
+ ba,pt %xcc, 2b
+ stb %g0, [%l4 + %l3]
+ nop
+
+etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
+ * We place this right after pt_regs on the trap stack.
+ * The layout is:
+ * 0x00 TL1's TSTATE
+ * 0x08 TL1's TPC
+ * 0x10 TL1's TNPC
+ * 0x18 TL1's TT
+ * ...
+ * 0x58 TL4's TT
+ * 0x60 TL
+ */
+ sub %sp, ((4 * 8) * 4) + 8, %g2
+ rdpr %tl, %g1
+
+ wrpr %g0, 1, %tl
+ rdpr %tstate, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x00]
+ rdpr %tpc, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x08]
+ rdpr %tnpc, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x10]
+ rdpr %tt, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x18]
+
+ wrpr %g0, 2, %tl
+ rdpr %tstate, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x20]
+ rdpr %tpc, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x28]
+ rdpr %tnpc, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x30]
+ rdpr %tt, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x38]
+
+ wrpr %g0, 3, %tl
+ rdpr %tstate, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x40]
+ rdpr %tpc, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x48]
+ rdpr %tnpc, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x50]
+ rdpr %tt, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x58]
+
+ wrpr %g0, 4, %tl
+ rdpr %tstate, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x60]
+ rdpr %tpc, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x68]
+ rdpr %tnpc, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x70]
+ rdpr %tt, %g3
+ stx %g3, [%g2 + STACK_BIAS + 0x78]
+
+ wrpr %g1, %tl
+ stx %g1, [%g2 + STACK_BIAS + 0x80]
+
+ rdpr %tstate, %g1
+ sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2
+ ba,pt %xcc, 1b
+ andcc %g1, TSTATE_PRIV, %g0
+
+ .align 64
+ .globl scetrap
+scetrap: rdpr %pil, %g2
+ rdpr %tstate, %g1
+ sllx %g2, 20, %g3
+ andcc %g1, TSTATE_PRIV, %g0
+ or %g1, %g3, %g1
+ bne,pn %xcc, 1f
+ sub %sp, (STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS), %g2
+ wrpr %g0, 7, %cleanwin
+
+ sllx %g1, 51, %g3
+ sethi %hi(TASK_REGOFF), %g2
+ or %g2, %lo(TASK_REGOFF), %g2
+ brlz,pn %g3, 1f
+ add %g6, %g2, %g2
+ wr %g0, 0, %fprs
+1: rdpr %tpc, %g3
+ stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
+
+ rdpr %tnpc, %g1
+ stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
+ stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
+ save %g2, -STACK_BIAS, %sp ! Ordering here is critical
+ mov %g6, %l6
+ bne,pn %xcc, 2f
+ mov ASI_P, %l7
+ rdpr %canrestore, %g3
+
+ rdpr %wstate, %g2
+ wrpr %g0, 0, %canrestore
+ sll %g2, 3, %g2
+ mov PRIMARY_CONTEXT, %l4
+ wrpr %g3, 0, %otherwin
+ wrpr %g2, 0, %wstate
+cplus_etrap_insn_3:
+ sethi %hi(0), %g3
+ sllx %g3, 32, %g3
+cplus_etrap_insn_4:
+ sethi %hi(0), %g2
+ or %g3, %g2, %g3
+ stxa %g3, [%l4] ASI_DMMU
+ flush %l6
+
+ mov ASI_AIUS, %l7
+2: mov %g4, %l4
+ mov %g5, %l5
+ add %g7, 0x4, %l2
+ wrpr %g0, ETRAP_PSTATE1, %pstate
+ stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
+ stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
+ sllx %l7, 24, %l7
+
+ stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
+ rdpr %cwp, %l0
+ stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
+ stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
+ stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
+ stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
+ or %l7, %l0, %l7
+ sethi %hi(TSTATE_RMO | TSTATE_PEF), %l0
+
+ or %l7, %l0, %l7
+ wrpr %l2, %tnpc
+ wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
+ stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
+ stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
+ stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
+ stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
+ stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
+
+ stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
+ stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
+ mov %l6, %g6
+ stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
+#ifdef CONFIG_SMP
+ mov TSB_REG, %g3
+ ldxa [%g3] ASI_IMMU, %g5
+#endif
+ ldx [%g6 + TI_TASK], %g4
+ done
+
+#undef TASK_REGOFF
+#undef ETRAP_PSTATE1
+
+cplus_einsn_1:
+ sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
+cplus_einsn_2:
+ sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
+
+ .globl cheetah_plus_patch_etrap
+cheetah_plus_patch_etrap:
+ /* We configure the dTLB512_0 for 4MB pages and the
+ * dTLB512_1 for 8K pages when in context zero.
+ */
+ sethi %hi(cplus_einsn_1), %o0
+ sethi %hi(cplus_etrap_insn_1), %o2
+ lduw [%o0 + %lo(cplus_einsn_1)], %o1
+ or %o2, %lo(cplus_etrap_insn_1), %o2
+ stw %o1, [%o2]
+ flush %o2
+ sethi %hi(cplus_etrap_insn_3), %o2
+ or %o2, %lo(cplus_etrap_insn_3), %o2
+ stw %o1, [%o2]
+ flush %o2
+
+ sethi %hi(cplus_einsn_2), %o0
+ sethi %hi(cplus_etrap_insn_2), %o2
+ lduw [%o0 + %lo(cplus_einsn_2)], %o1
+ or %o2, %lo(cplus_etrap_insn_2), %o2
+ stw %o1, [%o2]
+ flush %o2
+ sethi %hi(cplus_etrap_insn_4), %o2
+ or %o2, %lo(cplus_etrap_insn_4), %o2
+ stw %o1, [%o2]
+ flush %o2
+
+ retl
+ nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
new file mode 100644
index 00000000000..8104a56ca2d
--- /dev/null
+++ b/arch/sparc64/kernel/head.S
@@ -0,0 +1,782 @@
+/* $Id: head.S,v 1.87 2002/02/09 19:49:31 davem Exp $
+ * head.S: Initial boot code for the Sparc64 port of Linux.
+ *
+ * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <asm/thread_info.h>
+#include <asm/asi.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/spitfire.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/errno.h>
+#include <asm/signal.h>
+#include <asm/processor.h>
+#include <asm/lsu.h>
+#include <asm/dcr.h>
+#include <asm/dcu.h>
+#include <asm/head.h>
+#include <asm/ttable.h>
+#include <asm/mmu.h>
+
+/* This section from from _start to sparc64_boot_end should fit into
+ * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
+ * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
+ * 0x0000.0000.0040.6000 and empty_bad_page, which is from
+ * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
+ */
+
+ .text
+ .globl start, _start, stext, _stext
+_start:
+start:
+_stext:
+stext:
+bootup_user_stack:
+! 0x0000000000404000
+ b sparc64_boot
+ flushw /* Flush register file. */
+
+/* This stuff has to be in sync with SILO and other potential boot loaders
+ * Fields should be kept upward compatible and whenever any change is made,
+ * HdrS version should be incremented.
+ */
+ .global root_flags, ram_flags, root_dev
+ .global sparc_ramdisk_image, sparc_ramdisk_size
+ .global sparc_ramdisk_image64
+
+ .ascii "HdrS"
+ .word LINUX_VERSION_CODE
+
+ /* History:
+ *
+ * 0x0300 : Supports being located at other than 0x4000
+ * 0x0202 : Supports kernel params string
+ * 0x0201 : Supports reboot_command
+ */
+ .half 0x0301 /* HdrS version */
+
+root_flags:
+ .half 1
+root_dev:
+ .half 0
+ram_flags:
+ .half 0
+sparc_ramdisk_image:
+ .word 0
+sparc_ramdisk_size:
+ .word 0
+ .xword reboot_command
+ .xword bootstr_info
+sparc_ramdisk_image64:
+ .xword 0
+ .word _end
+
+ /* We must be careful, 32-bit OpenBOOT will get confused if it
+ * tries to save away a register window to a 64-bit kernel
+ * stack address. Flush all windows, disable interrupts,
+ * remap if necessary, jump onto kernel trap table, then kernel
+ * stack, or else we die.
+ *
+ * PROM entry point is on %o4
+ */
+sparc64_boot:
+ BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
+ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
+ ba,pt %xcc, spitfire_boot
+ nop
+
+cheetah_plus_boot:
+ /* Preserve OBP chosen DCU and DCR register settings. */
+ ba,pt %xcc, cheetah_generic_boot
+ nop
+
+cheetah_boot:
+ mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
+ wr %g1, %asr18
+
+ sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
+ or %g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
+ sllx %g7, 32, %g7
+ or %g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7
+ stxa %g7, [%g0] ASI_DCU_CONTROL_REG
+ membar #Sync
+
+cheetah_generic_boot:
+ mov TSB_EXTENSION_P, %g3
+ stxa %g0, [%g3] ASI_DMMU
+ stxa %g0, [%g3] ASI_IMMU
+ membar #Sync
+
+ mov TSB_EXTENSION_S, %g3
+ stxa %g0, [%g3] ASI_DMMU
+ membar #Sync
+
+ mov TSB_EXTENSION_N, %g3
+ stxa %g0, [%g3] ASI_DMMU
+ stxa %g0, [%g3] ASI_IMMU
+ membar #Sync
+
+ wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
+ wr %g0, 0, %fprs
+
+ /* Just like for Spitfire, we probe itlb-2 for a mapping which
+ * matches our current %pc. We take the physical address in
+ * that mapping and use it to make our own.
+ */
+
+ /* %g5 holds the tlb data */
+ sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
+ sllx %g5, 32, %g5
+ or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
+
+ /* Put PADDR tlb data mask into %g3. */
+ sethi %uhi(_PAGE_PADDR), %g3
+ or %g3, %ulo(_PAGE_PADDR), %g3
+ sllx %g3, 32, %g3
+ sethi %hi(_PAGE_PADDR), %g7
+ or %g7, %lo(_PAGE_PADDR), %g7
+ or %g3, %g7, %g3
+
+ set 2 << 16, %l0 /* TLB entry walker. */
+ set 0x1fff, %l2 /* Page mask. */
+ rd %pc, %l3
+ andn %l3, %l2, %g2 /* vaddr comparator */
+
+1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
+ membar #Sync
+ andn %g1, %l2, %g1
+ cmp %g1, %g2
+ be,pn %xcc, cheetah_got_tlbentry
+ nop
+ and %l0, (127 << 3), %g1
+ cmp %g1, (127 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ /* Search the small TLB. OBP never maps us like that but
+ * newer SILO can.
+ */
+ clr %l0
+
+1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
+ membar #Sync
+ andn %g1, %l2, %g1
+ cmp %g1, %g2
+ be,pn %xcc, cheetah_got_tlbentry
+ nop
+ cmp %l0, (15 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ /* BUG() if we get here... */
+ ta 0x5
+
+cheetah_got_tlbentry:
+ ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0
+ ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
+ membar #Sync
+ and %g1, %g3, %g1
+ set 0x5fff, %l0
+ andn %g1, %l0, %g1
+ or %g5, %g1, %g5
+
+ /* Clear out any KERNBASE area entries. */
+ set 2 << 16, %l0
+ sethi %hi(KERNBASE), %g3
+ sethi %hi(KERNBASE<<1), %g7
+ mov TLB_TAG_ACCESS, %l7
+
+ /* First, check ITLB */
+1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
+ membar #Sync
+ andn %g1, %l2, %g1
+ cmp %g1, %g3
+ blu,pn %xcc, 2f
+ cmp %g1, %g7
+ bgeu,pn %xcc, 2f
+ nop
+ stxa %g0, [%l7] ASI_IMMU
+ membar #Sync
+ stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+
+2: and %l0, (127 << 3), %g1
+ cmp %g1, (127 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ /* Next, check DTLB */
+ set 2 << 16, %l0
+1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
+ membar #Sync
+ andn %g1, %l2, %g1
+ cmp %g1, %g3
+ blu,pn %xcc, 2f
+ cmp %g1, %g7
+ bgeu,pn %xcc, 2f
+ nop
+ stxa %g0, [%l7] ASI_DMMU
+ membar #Sync
+ stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+2: and %l0, (511 << 3), %g1
+ cmp %g1, (511 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ /* On Cheetah+, have to check second DTLB. */
+ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
+ ba,pt %xcc, 9f
+ nop
+
+2: set 3 << 16, %l0
+1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
+ membar #Sync
+ andn %g1, %l2, %g1
+ cmp %g1, %g3
+ blu,pn %xcc, 2f
+ cmp %g1, %g7
+ bgeu,pn %xcc, 2f
+ nop
+ stxa %g0, [%l7] ASI_DMMU
+ membar #Sync
+ stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+2: and %l0, (511 << 3), %g1
+ cmp %g1, (511 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+9:
+
+ /* Now lock the TTE we created into ITLB-0 and DTLB-0,
+ * entry 15 (and maybe 14 too).
+ */
+ sethi %hi(KERNBASE), %g3
+ set (0 << 16) | (15 << 3), %g7
+ stxa %g3, [%l7] ASI_DMMU
+ membar #Sync
+ stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ stxa %g3, [%l7] ASI_IMMU
+ membar #Sync
+ stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+ flush %g3
+ membar #Sync
+ sethi %hi(_end), %g3 /* Check for bigkernel case */
+ or %g3, %lo(_end), %g3
+ srl %g3, 23, %g3 /* Check if _end > 8M */
+ brz,pt %g3, 1f
+ sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
+ sethi %hi(0x400000), %g3
+ or %g3, %lo(0x400000), %g3
+ add %g5, %g3, %g5 /* New tte data */
+ andn %g5, (_PAGE_G), %g5
+ sethi %hi(KERNBASE+0x400000), %g3
+ or %g3, %lo(KERNBASE+0x400000), %g3
+ set (0 << 16) | (14 << 3), %g7
+ stxa %g3, [%l7] ASI_DMMU
+ membar #Sync
+ stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ stxa %g3, [%l7] ASI_IMMU
+ membar #Sync
+ stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+ flush %g3
+ membar #Sync
+ sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
+ ba,pt %xcc, 1f
+ nop
+
+1: set sun4u_init, %g2
+ jmpl %g2 + %g0, %g0
+ nop
+
+spitfire_boot:
+ /* Typically PROM has already enabled both MMU's and both on-chip
+ * caches, but we do it here anyway just to be paranoid.
+ */
+ mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
+ stxa %g1, [%g0] ASI_LSU_CONTROL
+ membar #Sync
+
+ /*
+ * Make sure we are in privileged mode, have address masking,
+ * using the ordinary globals and have enabled floating
+ * point.
+ *
+ * Again, typically PROM has left %pil at 13 or similar, and
+ * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
+ */
+ wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
+ wr %g0, 0, %fprs
+
+spitfire_create_mappings:
+ /* %g5 holds the tlb data */
+ sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
+ sllx %g5, 32, %g5
+ or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
+
+ /* Base of physical memory cannot reliably be assumed to be
+ * at 0x0! Figure out where it happens to be. -DaveM
+ */
+
+ /* Put PADDR tlb data mask into %g3. */
+ sethi %uhi(_PAGE_PADDR_SF), %g3
+ or %g3, %ulo(_PAGE_PADDR_SF), %g3
+ sllx %g3, 32, %g3
+ sethi %hi(_PAGE_PADDR_SF), %g7
+ or %g7, %lo(_PAGE_PADDR_SF), %g7
+ or %g3, %g7, %g3
+
+ /* Walk through entire ITLB, looking for entry which maps
+ * our %pc currently, stick PADDR from there into %g5 tlb data.
+ */
+ clr %l0 /* TLB entry walker. */
+ set 0x1fff, %l2 /* Page mask. */
+ rd %pc, %l3
+ andn %l3, %l2, %g2 /* vaddr comparator */
+1:
+ /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
+ ldxa [%l0] ASI_ITLB_TAG_READ, %g1
+ nop
+ nop
+ nop
+ andn %g1, %l2, %g1 /* Get vaddr */
+ cmp %g1, %g2
+ be,a,pn %xcc, spitfire_got_tlbentry
+ ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
+ cmp %l0, (63 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ /* BUG() if we get here... */
+ ta 0x5
+
+spitfire_got_tlbentry:
+ /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
+ nop
+ nop
+ nop
+ and %g1, %g3, %g1 /* Mask to just get paddr bits. */
+ set 0x5fff, %l3 /* Mask offset to get phys base. */
+ andn %g1, %l3, %g1
+
+ /* NOTE: We hold on to %g1 paddr base as we need it below to lock
+ * NOTE: the PROM cif code into the TLB.
+ */
+
+ or %g5, %g1, %g5 /* Or it into TAG being built. */
+
+ clr %l0 /* TLB entry walker. */
+ sethi %hi(KERNBASE), %g3 /* 4M lower limit */
+ sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
+ mov TLB_TAG_ACCESS, %l7
+1:
+ /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
+ ldxa [%l0] ASI_ITLB_TAG_READ, %g1
+ nop
+ nop
+ nop
+ andn %g1, %l2, %g1 /* Get vaddr */
+ cmp %g1, %g3
+ blu,pn %xcc, 2f
+ cmp %g1, %g7
+ bgeu,pn %xcc, 2f
+ nop
+ stxa %g0, [%l7] ASI_IMMU
+ stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+2:
+ cmp %l0, (63 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ nop; nop; nop
+
+ clr %l0 /* TLB entry walker. */
+1:
+ /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
+ ldxa [%l0] ASI_DTLB_TAG_READ, %g1
+ nop
+ nop
+ nop
+ andn %g1, %l2, %g1 /* Get vaddr */
+ cmp %g1, %g3
+ blu,pn %xcc, 2f
+ cmp %g1, %g7
+ bgeu,pn %xcc, 2f
+ nop
+ stxa %g0, [%l7] ASI_DMMU
+ stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2:
+ cmp %l0, (63 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ nop; nop; nop
+
+
+ /* PROM never puts any TLB entries into the MMU with the lock bit
+ * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
+ */
+
+ sethi %hi(KERNBASE), %g3
+ mov (63 << 3), %g7
+ stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
+ stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
+ membar #Sync
+ stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
+ stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
+ membar #Sync
+ flush %g3
+ membar #Sync
+ sethi %hi(_end), %g3 /* Check for bigkernel case */
+ or %g3, %lo(_end), %g3
+ srl %g3, 23, %g3 /* Check if _end > 8M */
+ brz,pt %g3, 2f
+ sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
+ sethi %hi(0x400000), %g3
+ or %g3, %lo(0x400000), %g3
+ add %g5, %g3, %g5 /* New tte data */
+ andn %g5, (_PAGE_G), %g5
+ sethi %hi(KERNBASE+0x400000), %g3
+ or %g3, %lo(KERNBASE+0x400000), %g3
+ mov (62 << 3), %g7
+ stxa %g3, [%l7] ASI_DMMU
+ stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+ stxa %g3, [%l7] ASI_IMMU
+ stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+ flush %g3
+ membar #Sync
+ sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
+2: ba,pt %xcc, 1f
+ nop
+1:
+ set sun4u_init, %g2
+ jmpl %g2 + %g0, %g0
+ nop
+
+sun4u_init:
+ /* Set ctx 0 */
+ mov PRIMARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ mov SECONDARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ /* We are now safely (we hope) in Nucleus context (0), rewrite
+ * the KERNBASE TTE's so they no longer have the global bit set.
+ * Don't forget to setup TAG_ACCESS first 8-)
+ */
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g3, [%g2] ASI_IMMU
+ stxa %g3, [%g2] ASI_DMMU
+ membar #Sync
+
+ BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
+
+ ba,pt %xcc, spitfire_tlb_fixup
+ nop
+
+cheetah_tlb_fixup:
+ set (0 << 16) | (15 << 3), %g7
+ ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0
+ ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
+ andn %g1, (_PAGE_G), %g1
+ stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+
+ ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0
+ ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
+ andn %g1, (_PAGE_G), %g1
+ stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ /* Kill instruction prefetch queues. */
+ flush %g3
+ membar #Sync
+
+ mov 2, %g2 /* Set TLB type to cheetah+. */
+ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
+
+ mov 1, %g2 /* Set TLB type to cheetah. */
+
+1: sethi %hi(tlb_type), %g1
+ stw %g2, [%g1 + %lo(tlb_type)]
+
+ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
+ ba,pt %xcc, 2f
+ nop
+
+1: /* Patch context register writes to support nucleus page
+ * size correctly.
+ */
+ call cheetah_plus_patch_etrap
+ nop
+ call cheetah_plus_patch_rtrap
+ nop
+ call cheetah_plus_patch_fpdis
+ nop
+ call cheetah_plus_patch_winfixup
+ nop
+
+
+2: /* Patch copy/page operations to cheetah optimized versions. */
+ call cheetah_patch_copyops
+ nop
+ call cheetah_patch_cachetlbops
+ nop
+
+ ba,pt %xcc, tlb_fixup_done
+ nop
+
+spitfire_tlb_fixup:
+ mov (63 << 3), %g7
+ ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
+ andn %g1, (_PAGE_G), %g1
+ stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+
+ ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
+ andn %g1, (_PAGE_G), %g1
+ stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ /* Kill instruction prefetch queues. */
+ flush %g3
+ membar #Sync
+
+ /* Set TLB type to spitfire. */
+ mov 0, %g2
+ sethi %hi(tlb_type), %g1
+ stw %g2, [%g1 + %lo(tlb_type)]
+
+tlb_fixup_done:
+ sethi %hi(init_thread_union), %g6
+ or %g6, %lo(init_thread_union), %g6
+ ldx [%g6 + TI_TASK], %g4
+ mov %sp, %l6
+ mov %o4, %l7
+
+#if 0 /* We don't do it like this anymore, but for historical hack value
+ * I leave this snippet here to show how crazy we can be sometimes. 8-)
+ */
+
+ /* Setup "Linux Current Register", thanks Sun 8-) */
+ wr %g0, 0x1, %pcr
+
+ /* Blackbird errata workaround. See commentary in
+ * smp.c:smp_percpu_timer_interrupt() for more
+ * information.
+ */
+ ba,pt %xcc, 99f
+ nop
+ .align 64
+99: wr %g6, %g0, %pic
+ rd %pic, %g0
+#endif
+
+ wr %g0, ASI_P, %asi
+ mov 1, %g1
+ sllx %g1, THREAD_SHIFT, %g1
+ sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+ add %g6, %g1, %sp
+ mov 0, %fp
+
+ /* Set per-cpu pointer initially to zero, this makes
+ * the boot-cpu use the in-kernel-image per-cpu areas
+ * before setup_per_cpu_area() is invoked.
+ */
+ clr %g5
+
+ wrpr %g0, 0, %wstate
+ wrpr %g0, 0x0, %tl
+
+ /* Clear the bss */
+ sethi %hi(__bss_start), %o0
+ or %o0, %lo(__bss_start), %o0
+ sethi %hi(_end), %o1
+ or %o1, %lo(_end), %o1
+ call __bzero
+ sub %o1, %o0, %o1
+
+ mov %l6, %o1 ! OpenPROM stack
+ call prom_init
+ mov %l7, %o0 ! OpenPROM cif handler
+
+ /* Off we go.... */
+ call start_kernel
+ nop
+ /* Not reached... */
+
+/* IMPORTANT NOTE: Whenever making changes here, check
+ * trampoline.S as well. -jj */
+ .globl setup_tba
+setup_tba: /* i0 = is_starfire */
+ save %sp, -160, %sp
+
+ rdpr %tba, %g7
+ sethi %hi(prom_tba), %o1
+ or %o1, %lo(prom_tba), %o1
+ stx %g7, [%o1]
+
+ /* Setup "Linux" globals 8-) */
+ rdpr %pstate, %o1
+ mov %g6, %o2
+ wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate
+ sethi %hi(sparc64_ttable_tl0), %g1
+ wrpr %g1, %tba
+ mov %o2, %g6
+
+ /* Set up MMU globals */
+ wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate
+
+ /* Set fixed globals used by dTLB miss handler. */
+#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
+#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
+
+ mov TSB_REG, %g1
+ stxa %g0, [%g1] ASI_DMMU
+ membar #Sync
+ stxa %g0, [%g1] ASI_IMMU
+ membar #Sync
+ mov TLB_SFSR, %g1
+ sethi %uhi(KERN_HIGHBITS), %g2
+ or %g2, %ulo(KERN_HIGHBITS), %g2
+ sllx %g2, 32, %g2
+ or %g2, KERN_LOWBITS, %g2
+
+ BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
+ ba,pt %xcc, spitfire_vpte_base
+ nop
+
+cheetah_vpte_base:
+ sethi %uhi(VPTE_BASE_CHEETAH), %g3
+ or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
+ ba,pt %xcc, 2f
+ sllx %g3, 32, %g3
+
+spitfire_vpte_base:
+ sethi %uhi(VPTE_BASE_SPITFIRE), %g3
+ or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
+ sllx %g3, 32, %g3
+
+2:
+ clr %g7
+#undef KERN_HIGHBITS
+#undef KERN_LOWBITS
+
+ /* Kill PROM timer */
+ sethi %hi(0x80000000), %o2
+ sllx %o2, 32, %o2
+ wr %o2, 0, %tick_cmpr
+
+ BRANCH_IF_ANY_CHEETAH(o2,o3,1f)
+
+ ba,pt %xcc, 2f
+ nop
+
+ /* Disable STICK_INT interrupts. */
+1:
+ sethi %hi(0x80000000), %o2
+ sllx %o2, 32, %o2
+ wr %o2, %asr25
+
+ /* Ok, we're done setting up all the state our trap mechanims needs,
+ * now get back into normal globals and let the PROM know what is up.
+ */
+2:
+ wrpr %g0, %g0, %wstate
+ wrpr %o1, PSTATE_IE, %pstate
+
+ call init_irqwork_curcpu
+ nop
+
+ call prom_set_trap_table
+ sethi %hi(sparc64_ttable_tl0), %o0
+
+ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
+ ba,pt %xcc, 2f
+ nop
+
+1: /* Start using proper page size encodings in ctx register. */
+ sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
+ mov PRIMARY_CONTEXT, %g1
+ sllx %g3, 32, %g3
+ sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
+ or %g3, %g2, %g3
+ stxa %g3, [%g1] ASI_DMMU
+ membar #Sync
+
+2:
+ rdpr %pstate, %o1
+ or %o1, PSTATE_IE, %o1
+ wrpr %o1, 0, %pstate
+
+ ret
+ restore
+
+/*
+ * The following skips make sure the trap table in ttable.S is aligned
+ * on a 32K boundary as required by the v9 specs for TBA register.
+ */
+sparc64_boot_end:
+ .skip 0x2000 + _start - sparc64_boot_end
+bootup_user_stack_end:
+ .skip 0x2000
+
+#ifdef CONFIG_SBUS
+/* This is just a hack to fool make depend config.h discovering
+ strategy: As the .S files below need config.h, but
+ make depend does not find it for them, we include config.h
+ in head.S */
+#endif
+
+! 0x0000000000408000
+
+#include "ttable.S"
+#include "systbls.S"
+
+ .align 1024
+ .globl swapper_pg_dir
+swapper_pg_dir:
+ .word 0
+
+#include "etrap.S"
+#include "rtrap.S"
+#include "winfixup.S"
+#include "entry.S"
+
+ /* This is just anal retentiveness on my part... */
+ .align 16384
+
+ .data
+ .align 8
+ .globl prom_tba, tlb_type
+prom_tba: .xword 0
+tlb_type: .word 0 /* Must NOT end up in BSS */
+ .section ".fixup",#alloc,#execinstr
+ .globl __ret_efault
+__ret_efault:
+ ret
+ restore %g0, -EFAULT, %o0
+
diff --git a/arch/sparc64/kernel/idprom.c b/arch/sparc64/kernel/idprom.c
new file mode 100644
index 00000000000..3b6789e09a7
--- /dev/null
+++ b/arch/sparc64/kernel/idprom.c
@@ -0,0 +1,49 @@
+/* $Id: idprom.c,v 1.3 1999/08/31 06:54:53 davem Exp $
+ * idprom.c: Routines to load the idprom into kernel addresses and
+ * interpret the data contained within.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/oplib.h>
+#include <asm/idprom.h>
+
+struct idprom *idprom;
+static struct idprom idprom_buffer;
+
+/* Calculate the IDPROM checksum (xor of the data bytes). */
+static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
+{
+ unsigned char cksum, i, *ptr = (unsigned char *)idprom;
+
+ for (i = cksum = 0; i <= 0x0E; i++)
+ cksum ^= *ptr++;
+
+ return cksum;
+}
+
+/* Create a local IDPROM copy and verify integrity. */
+void __init idprom_init(void)
+{
+ prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
+
+ idprom = &idprom_buffer;
+
+ if (idprom->id_format != 0x01) {
+ prom_printf("IDPROM: Warning, unknown format type!\n");
+ }
+
+ if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
+ prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n",
+ idprom->id_cksum, calc_idprom_cksum(idprom));
+ }
+
+ printk("Ethernet address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ idprom->id_ethaddr[0], idprom->id_ethaddr[1],
+ idprom->id_ethaddr[2], idprom->id_ethaddr[3],
+ idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
+}
diff --git a/arch/sparc64/kernel/init_task.c b/arch/sparc64/kernel/init_task.c
new file mode 100644
index 00000000000..329b38fa5c8
--- /dev/null
+++ b/arch/sparc64/kernel/init_task.c
@@ -0,0 +1,35 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/* .text section in head.S is aligned at 2 page boundary and this gets linked
+ * right after that so that the init_thread_union is aligned properly as well.
+ * We really don't need this special alignment like the Intel does, but
+ * I do it anyways for completeness.
+ */
+__asm__ (".text");
+union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+EXPORT_SYMBOL(init_task);
+
+__asm__(".data");
+struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
new file mode 100644
index 00000000000..43fc3173d48
--- /dev/null
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -0,0 +1,597 @@
+/* $Id: ioctl32.c,v 1.136 2002/01/14 09:49:52 davem Exp $
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ *
+ * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
+ * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 2003 Pavel Machek (pavel@suse.cz)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * ioctls.
+ */
+
+#define INCLUDES
+#include "compat_ioctl.c"
+#include <linux/ncp_fs.h>
+#include <linux/syscalls.h>
+#include <asm/fbio.h>
+#include <asm/kbio.h>
+#include <asm/vuid_event.h>
+#include <asm/envctrl.h>
+#include <asm/display7seg.h>
+#include <asm/openpromio.h>
+#include <asm/audioio.h>
+#include <asm/watchdog.h>
+
+/* Use this to get at 32-bit user passed pointers.
+ * See sys_sparc32.c for description about it.
+ */
+#define A(__x) compat_ptr(__x)
+
+static __inline__ void *alloc_user_space(long len)
+{
+ struct pt_regs *regs = current_thread_info()->kregs;
+ unsigned long usp = regs->u_regs[UREG_I6];
+
+ if (!(test_thread_flag(TIF_32BIT)))
+ usp += STACK_BIAS;
+
+ return (void *) (usp - len);
+}
+
+#define CODE
+#include "compat_ioctl.c"
+
+struct fbcmap32 {
+ int index; /* first element (0 origin) */
+ int count;
+ u32 red;
+ u32 green;
+ u32 blue;
+};
+
+#define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32)
+#define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32)
+
+static int fbiogetputcmap(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct fbcmap32 __user *argp = (void __user *)arg;
+ struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
+ u32 addr;
+ int ret;
+
+ ret = copy_in_user(p, argp, 2 * sizeof(int));
+ ret |= get_user(addr, &argp->red);
+ ret |= put_user(compat_ptr(addr), &p->red);
+ ret |= get_user(addr, &argp->green);
+ ret |= put_user(compat_ptr(addr), &p->green);
+ ret |= get_user(addr, &argp->blue);
+ ret |= put_user(compat_ptr(addr), &p->blue);
+ if (ret)
+ return -EFAULT;
+ return sys_ioctl(fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC, (unsigned long)p);
+}
+
+struct fbcursor32 {
+ short set; /* what to set, choose from the list above */
+ short enable; /* cursor on/off */
+ struct fbcurpos pos; /* cursor position */
+ struct fbcurpos hot; /* cursor hot spot */
+ struct fbcmap32 cmap; /* color map info */
+ struct fbcurpos size; /* cursor bit map size */
+ u32 image; /* cursor image bits */
+ u32 mask; /* cursor mask bits */
+};
+
+#define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32)
+#define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32)
+
+static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
+ struct fbcursor32 __user *argp = (void __user *)arg;
+ compat_uptr_t addr;
+ int ret;
+
+ ret = copy_in_user(p, argp,
+ 2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
+ ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
+ ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
+ ret |= get_user(addr, &argp->cmap.red);
+ ret |= put_user(compat_ptr(addr), &p->cmap.red);
+ ret |= get_user(addr, &argp->cmap.green);
+ ret |= put_user(compat_ptr(addr), &p->cmap.green);
+ ret |= get_user(addr, &argp->cmap.blue);
+ ret |= put_user(compat_ptr(addr), &p->cmap.blue);
+ ret |= get_user(addr, &argp->mask);
+ ret |= put_user(compat_ptr(addr), &p->mask);
+ ret |= get_user(addr, &argp->image);
+ ret |= put_user(compat_ptr(addr), &p->image);
+ if (ret)
+ return -EFAULT;
+ return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
+}
+
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+/* This really belongs in include/linux/drm.h -DaveM */
+#include "../../../drivers/char/drm/drm.h"
+
+typedef struct drm32_version {
+ int version_major; /* Major version */
+ int version_minor; /* Minor version */
+ int version_patchlevel;/* Patch level */
+ int name_len; /* Length of name buffer */
+ u32 name; /* Name of driver */
+ int date_len; /* Length of date buffer */
+ u32 date; /* User-space buffer to hold date */
+ int desc_len; /* Length of desc buffer */
+ u32 desc; /* User-space buffer to hold desc */
+} drm32_version_t;
+#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
+
+static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
+ drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int n;
+ int ret;
+
+ if (clear_user(p, 3 * sizeof(int)) ||
+ get_user(n, &uversion->name_len) ||
+ put_user(n, &p->name_len) ||
+ get_user(addr, &uversion->name) ||
+ put_user(compat_ptr(addr), &p->name) ||
+ get_user(n, &uversion->date_len) ||
+ put_user(n, &p->date_len) ||
+ get_user(addr, &uversion->date) ||
+ put_user(compat_ptr(addr), &p->date) ||
+ get_user(n, &uversion->desc_len) ||
+ put_user(n, &p->desc_len) ||
+ get_user(addr, &uversion->desc) ||
+ put_user(compat_ptr(addr), &p->desc))
+ return -EFAULT;
+
+ ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
+ if (ret)
+ return ret;
+
+ if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
+ get_user(n, &p->name_len) ||
+ put_user(n, &uversion->name_len) ||
+ get_user(n, &p->date_len) ||
+ put_user(n, &uversion->date_len) ||
+ get_user(n, &p->desc_len) ||
+ put_user(n, &uversion->desc_len))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm32_unique {
+ int unique_len; /* Length of unique */
+ u32 unique; /* Unique name for driver instantiation */
+} drm32_unique_t;
+#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
+#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
+
+static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
+ drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int n;
+ int ret;
+
+ if (get_user(n, &uarg->unique_len) ||
+ put_user(n, &p->unique_len) ||
+ get_user(addr, &uarg->unique) ||
+ put_user(compat_ptr(addr), &p->unique))
+ return -EFAULT;
+
+ if (cmd == DRM32_IOCTL_GET_UNIQUE)
+ ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
+ else
+ ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
+
+ if (ret)
+ return ret;
+
+ if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm32_map {
+ u32 offset; /* Requested physical address (0 for SAREA)*/
+ u32 size; /* Requested physical size (bytes) */
+ drm_map_type_t type; /* Type of memory to map */
+ drm_map_flags_t flags; /* Flags */
+ u32 handle; /* User-space: "Handle" to pass to mmap */
+ /* Kernel-space: kernel-virtual address */
+ int mtrr; /* MTRR slot used */
+ /* Private data */
+} drm32_map_t;
+#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
+
+static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_map_t __user *uarg = (drm32_map_t __user *) arg;
+ drm_map_t karg;
+ mm_segment_t old_fs;
+ u32 tmp;
+ int ret;
+
+ ret = get_user(karg.offset, &uarg->offset);
+ ret |= get_user(karg.size, &uarg->size);
+ ret |= get_user(karg.type, &uarg->type);
+ ret |= get_user(karg.flags, &uarg->flags);
+ ret |= get_user(tmp, &uarg->handle);
+ ret |= get_user(karg.mtrr, &uarg->mtrr);
+ if (ret)
+ return -EFAULT;
+
+ karg.handle = (void *) (unsigned long) tmp;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ ret = put_user(karg.offset, &uarg->offset);
+ ret |= put_user(karg.size, &uarg->size);
+ ret |= put_user(karg.type, &uarg->type);
+ ret |= put_user(karg.flags, &uarg->flags);
+ tmp = (u32) (long)karg.handle;
+ ret |= put_user(tmp, &uarg->handle);
+ ret |= put_user(karg.mtrr, &uarg->mtrr);
+ if (ret)
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+typedef struct drm32_buf_info {
+ int count; /* Entries in list */
+ u32 list; /* (drm_buf_desc_t *) */
+} drm32_buf_info_t;
+#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
+
+static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
+ drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int n;
+ int ret;
+
+ if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
+ get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
+ return -EFAULT;
+
+ ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
+ if (ret)
+ return ret;
+
+ if (get_user(n, &p->count) || put_user(n, &uarg->count))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm32_buf_free {
+ int count;
+ u32 list; /* (int *) */
+} drm32_buf_free_t;
+#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
+
+static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
+ drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int n;
+
+ if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
+ get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
+ return -EFAULT;
+
+ return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
+}
+
+typedef struct drm32_buf_pub {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int used; /* Amount of buffer in use (for DMA) */
+ u32 address; /* Address of buffer (void *) */
+} drm32_buf_pub_t;
+
+typedef struct drm32_buf_map {
+ int count; /* Length of buflist */
+ u32 virtual; /* Mmaped area in user-virtual (void *) */
+ u32 list; /* Buffer information (drm_buf_pub_t *) */
+} drm32_buf_map_t;
+#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
+
+static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
+ drm32_buf_pub_t __user *ulist;
+ drm_buf_map_t __user *arg64;
+ drm_buf_pub_t __user *list;
+ int orig_count, ret, i;
+ int n;
+ compat_uptr_t addr;
+
+ if (get_user(orig_count, &uarg->count))
+ return -EFAULT;
+
+ arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
+ (size_t)orig_count * sizeof(drm_buf_pub_t));
+ list = (void __user *)(arg64 + 1);
+
+ if (put_user(orig_count, &arg64->count) ||
+ put_user(list, &arg64->list) ||
+ get_user(addr, &uarg->virtual) ||
+ put_user(compat_ptr(addr), &arg64->virtual) ||
+ get_user(addr, &uarg->list))
+ return -EFAULT;
+
+ ulist = compat_ptr(addr);
+
+ for (i = 0; i < orig_count; i++) {
+ if (get_user(n, &ulist[i].idx) ||
+ put_user(n, &list[i].idx) ||
+ get_user(n, &ulist[i].total) ||
+ put_user(n, &list[i].total) ||
+ get_user(n, &ulist[i].used) ||
+ put_user(n, &list[i].used) ||
+ get_user(addr, &ulist[i].address) ||
+ put_user(compat_ptr(addr), &list[i].address))
+ return -EFAULT;
+ }
+
+ ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < orig_count; i++) {
+ void __user *p;
+ if (get_user(n, &list[i].idx) ||
+ put_user(n, &ulist[i].idx) ||
+ get_user(n, &list[i].total) ||
+ put_user(n, &ulist[i].total) ||
+ get_user(n, &list[i].used) ||
+ put_user(n, &ulist[i].used) ||
+ get_user(p, &list[i].address) ||
+ put_user((unsigned long)p, &ulist[i].address))
+ return -EFAULT;
+ }
+
+ if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm32_dma {
+ /* Indices here refer to the offset into
+ buflist in drm_buf_get_t. */
+ int context; /* Context handle */
+ int send_count; /* Number of buffers to send */
+ u32 send_indices; /* List of handles to buffers (int *) */
+ u32 send_sizes; /* Lengths of data to send (int *) */
+ drm_dma_flags_t flags; /* Flags */
+ int request_count; /* Number of buffers requested */
+ int request_size; /* Desired size for buffers */
+ u32 request_indices; /* Buffer information (int *) */
+ u32 request_sizes; /* (int *) */
+ int granted_count; /* Number of buffers granted */
+} drm32_dma_t;
+#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
+
+/* RED PEN The DRM layer blindly dereferences the send/request
+ * index/size arrays even though they are userland
+ * pointers. -DaveM
+ */
+static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
+ drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int ret;
+
+ if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
+ get_user(addr, &uarg->send_indices) ||
+ put_user(compat_ptr(addr), &p->send_indices) ||
+ get_user(addr, &uarg->send_sizes) ||
+ put_user(compat_ptr(addr), &p->send_sizes) ||
+ copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
+ copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
+ copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
+ get_user(addr, &uarg->request_indices) ||
+ put_user(compat_ptr(addr), &p->request_indices) ||
+ get_user(addr, &uarg->request_sizes) ||
+ put_user(compat_ptr(addr), &p->request_sizes) ||
+ copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
+ return -EFAULT;
+
+ ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
+ if (ret)
+ return ret;
+
+ if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
+ copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
+ copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
+ copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
+ copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
+ return -EFAULT;
+
+ return 0;
+}
+
+typedef struct drm32_ctx_res {
+ int count;
+ u32 contexts; /* (drm_ctx_t *) */
+} drm32_ctx_res_t;
+#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
+
+static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
+ drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
+ compat_uptr_t addr;
+ int ret;
+
+ if (copy_in_user(p, uarg, sizeof(int)) ||
+ get_user(addr, &uarg->contexts) ||
+ put_user(compat_ptr(addr), &p->contexts))
+ return -EFAULT;
+
+ ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
+ if (ret)
+ return ret;
+
+ if (copy_in_user(uarg, p, sizeof(int)))
+ return -EFAULT;
+
+ return 0;
+}
+
+#endif
+
+typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
+
+#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
+#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl32_handler_t)(handler), NULL },
+#define IOCTL_TABLE_START \
+ struct ioctl_trans ioctl_start[] = {
+#define IOCTL_TABLE_END \
+ };
+
+IOCTL_TABLE_START
+#include <linux/compat_ioctl.h>
+#define DECLARES
+#include "compat_ioctl.c"
+COMPATIBLE_IOCTL(TIOCSTART)
+COMPATIBLE_IOCTL(TIOCSTOP)
+COMPATIBLE_IOCTL(TIOCSLTC)
+COMPATIBLE_IOCTL(FBIOGTYPE)
+COMPATIBLE_IOCTL(FBIOSATTR)
+COMPATIBLE_IOCTL(FBIOGATTR)
+COMPATIBLE_IOCTL(FBIOSVIDEO)
+COMPATIBLE_IOCTL(FBIOGVIDEO)
+COMPATIBLE_IOCTL(FBIOGCURSOR32) /* This is not implemented yet. Later it should be converted... */
+COMPATIBLE_IOCTL(FBIOSCURPOS)
+COMPATIBLE_IOCTL(FBIOGCURPOS)
+COMPATIBLE_IOCTL(FBIOGCURMAX)
+/* Little k */
+COMPATIBLE_IOCTL(KIOCTYPE)
+COMPATIBLE_IOCTL(KIOCLAYOUT)
+COMPATIBLE_IOCTL(KIOCGTRANS)
+COMPATIBLE_IOCTL(KIOCTRANS)
+COMPATIBLE_IOCTL(KIOCCMD)
+COMPATIBLE_IOCTL(KIOCSDIRECT)
+COMPATIBLE_IOCTL(KIOCSLED)
+COMPATIBLE_IOCTL(KIOCGLED)
+COMPATIBLE_IOCTL(KIOCSRATE)
+COMPATIBLE_IOCTL(KIOCGRATE)
+COMPATIBLE_IOCTL(VUIDSFORMAT)
+COMPATIBLE_IOCTL(VUIDGFORMAT)
+/* Little v, the video4linux ioctls */
+COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
+COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
+COMPATIBLE_IOCTL(ENVCTRL_RD_WARNING_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_SHUTDOWN_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_FAN_STATUS)
+COMPATIBLE_IOCTL(ENVCTRL_RD_VOLTAGE_STATUS)
+COMPATIBLE_IOCTL(ENVCTRL_RD_SCSI_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_ETHERNET_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_MTHRBD_TEMPERATURE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_VOLTAGE)
+COMPATIBLE_IOCTL(ENVCTRL_RD_GLOBALADDRESS)
+/* COMPATIBLE_IOCTL(D7SIOCRD) same value as ENVCTRL_RD_VOLTAGE_STATUS */
+COMPATIBLE_IOCTL(D7SIOCWR)
+COMPATIBLE_IOCTL(D7SIOCTM)
+/* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have
+ * embedded pointers in the arg which we'd need to clean up...
+ */
+COMPATIBLE_IOCTL(OPROMGETOPT)
+COMPATIBLE_IOCTL(OPROMSETOPT)
+COMPATIBLE_IOCTL(OPROMNXTOPT)
+COMPATIBLE_IOCTL(OPROMSETOPT2)
+COMPATIBLE_IOCTL(OPROMNEXT)
+COMPATIBLE_IOCTL(OPROMCHILD)
+COMPATIBLE_IOCTL(OPROMGETPROP)
+COMPATIBLE_IOCTL(OPROMNXTPROP)
+COMPATIBLE_IOCTL(OPROMU2P)
+COMPATIBLE_IOCTL(OPROMGETCONS)
+COMPATIBLE_IOCTL(OPROMGETFBNAME)
+COMPATIBLE_IOCTL(OPROMGETBOOTARGS)
+COMPATIBLE_IOCTL(OPROMSETCUR)
+COMPATIBLE_IOCTL(OPROMPCI2NODE)
+COMPATIBLE_IOCTL(OPROMPATH2NODE)
+/* Big L */
+COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
+COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
+/* Big A */
+COMPATIBLE_IOCTL(AUDIO_GETINFO)
+COMPATIBLE_IOCTL(AUDIO_SETINFO)
+COMPATIBLE_IOCTL(AUDIO_DRAIN)
+COMPATIBLE_IOCTL(AUDIO_GETDEV)
+COMPATIBLE_IOCTL(AUDIO_GETDEV_SUNOS)
+COMPATIBLE_IOCTL(AUDIO_FLUSH)
+COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
+COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
+COMPATIBLE_IOCTL(DRM_IOCTL_AUTH_MAGIC)
+COMPATIBLE_IOCTL(DRM_IOCTL_BLOCK)
+COMPATIBLE_IOCTL(DRM_IOCTL_UNBLOCK)
+COMPATIBLE_IOCTL(DRM_IOCTL_CONTROL)
+COMPATIBLE_IOCTL(DRM_IOCTL_ADD_BUFS)
+COMPATIBLE_IOCTL(DRM_IOCTL_MARK_BUFS)
+COMPATIBLE_IOCTL(DRM_IOCTL_ADD_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_RM_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_MOD_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_GET_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_SWITCH_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_NEW_CTX)
+COMPATIBLE_IOCTL(DRM_IOCTL_ADD_DRAW)
+COMPATIBLE_IOCTL(DRM_IOCTL_RM_DRAW)
+COMPATIBLE_IOCTL(DRM_IOCTL_LOCK)
+COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK)
+COMPATIBLE_IOCTL(DRM_IOCTL_FINISH)
+#endif /* DRM */
+COMPATIBLE_IOCTL(WIOCSTART)
+COMPATIBLE_IOCTL(WIOCSTOP)
+COMPATIBLE_IOCTL(WIOCGSTAT)
+/* And these ioctls need translation */
+/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
+HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap)
+HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap)
+HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor)
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version)
+HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique)
+HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique)
+HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap)
+HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs)
+HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs)
+HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs)
+HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma)
+HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx)
+#endif /* DRM */
+#if 0
+HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl)
+HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl)
+HANDLE_IOCTL(RTC32_EPOCH_READ, do_rtc_ioctl)
+HANDLE_IOCTL(RTC32_EPOCH_SET, do_rtc_ioctl)
+#endif
+/* take care of sizeof(sizeof()) breakage */
+IOCTL_TABLE_END
+
+int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
new file mode 100644
index 00000000000..12c93a3eee2
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -0,0 +1,231 @@
+/* $Id: iommu_common.c,v 1.9 2001/12/17 07:05:09 davem Exp $
+ * iommu_common.c: UltraSparc SBUS/PCI common iommu code.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include "iommu_common.h"
+
+/* You are _strongly_ advised to enable the following debugging code
+ * any time you make changes to the sg code below, run it for a while
+ * with filesystems mounted read-only before buying the farm... -DaveM
+ */
+
+#ifdef VERIFY_SG
+static int verify_lengths(struct scatterlist *sg, int nents, int npages)
+{
+ int sg_len, dma_len;
+ int i, pgcount;
+
+ sg_len = 0;
+ for (i = 0; i < nents; i++)
+ sg_len += sg[i].length;
+
+ dma_len = 0;
+ for (i = 0; i < nents && sg[i].dma_length; i++)
+ dma_len += sg[i].dma_length;
+
+ if (sg_len != dma_len) {
+ printk("verify_lengths: Error, different, sg[%d] dma[%d]\n",
+ sg_len, dma_len);
+ return -1;
+ }
+
+ pgcount = 0;
+ for (i = 0; i < nents && sg[i].dma_length; i++) {
+ unsigned long start, end;
+
+ start = sg[i].dma_address;
+ start = start & IO_PAGE_MASK;
+
+ end = sg[i].dma_address + sg[i].dma_length;
+ end = (end + (IO_PAGE_SIZE - 1)) & IO_PAGE_MASK;
+
+ pgcount += ((end - start) >> IO_PAGE_SHIFT);
+ }
+
+ if (pgcount != npages) {
+ printk("verify_lengths: Error, page count wrong, "
+ "npages[%d] pgcount[%d]\n",
+ npages, pgcount);
+ return -1;
+ }
+
+ /* This test passes... */
+ return 0;
+}
+
+static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg, int nents, iopte_t **__iopte)
+{
+ struct scatterlist *sg = *__sg;
+ iopte_t *iopte = *__iopte;
+ u32 dlen = dma_sg->dma_length;
+ u32 daddr;
+ unsigned int sglen;
+ unsigned long sgaddr;
+
+ daddr = dma_sg->dma_address;
+ sglen = sg->length;
+ sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
+ while (dlen > 0) {
+ unsigned long paddr;
+
+ /* SG and DMA_SG must begin at the same sub-page boundary. */
+ if ((sgaddr & ~IO_PAGE_MASK) != (daddr & ~IO_PAGE_MASK)) {
+ printk("verify_one_map: Wrong start offset "
+ "sg[%08lx] dma[%08x]\n",
+ sgaddr, daddr);
+ nents = -1;
+ goto out;
+ }
+
+ /* Verify the IOPTE points to the right page. */
+ paddr = iopte_val(*iopte) & IOPTE_PAGE;
+ if ((paddr + PAGE_OFFSET) != (sgaddr & IO_PAGE_MASK)) {
+ printk("verify_one_map: IOPTE[%08lx] maps the "
+ "wrong page, should be [%08lx]\n",
+ iopte_val(*iopte), (sgaddr & IO_PAGE_MASK) - PAGE_OFFSET);
+ nents = -1;
+ goto out;
+ }
+
+ /* If this SG crosses a page, adjust to that next page
+ * boundary and loop.
+ */
+ if ((sgaddr & IO_PAGE_MASK) ^ ((sgaddr + sglen - 1) & IO_PAGE_MASK)) {
+ unsigned long next_page, diff;
+
+ next_page = (sgaddr + IO_PAGE_SIZE) & IO_PAGE_MASK;
+ diff = next_page - sgaddr;
+ sgaddr += diff;
+ daddr += diff;
+ sglen -= diff;
+ dlen -= diff;
+ if (dlen > 0)
+ iopte++;
+ continue;
+ }
+
+ /* SG wholly consumed within this page. */
+ daddr += sglen;
+ dlen -= sglen;
+
+ if (dlen > 0 && ((daddr & ~IO_PAGE_MASK) == 0))
+ iopte++;
+
+ sg++;
+ if (--nents <= 0)
+ break;
+ sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
+ sglen = sg->length;
+ }
+ if (dlen < 0) {
+ /* Transfer overrun, big problems. */
+ printk("verify_one_map: Transfer overrun by %d bytes.\n",
+ -dlen);
+ nents = -1;
+ } else {
+ /* Advance to next dma_sg implies that the next iopte will
+ * begin it.
+ */
+ iopte++;
+ }
+
+out:
+ *__sg = sg;
+ *__iopte = iopte;
+ return nents;
+}
+
+static int verify_maps(struct scatterlist *sg, int nents, iopte_t *iopte)
+{
+ struct scatterlist *dma_sg = sg;
+ struct scatterlist *orig_dma_sg = dma_sg;
+ int orig_nents = nents;
+
+ for (;;) {
+ nents = verify_one_map(dma_sg, &sg, nents, &iopte);
+ if (nents <= 0)
+ break;
+ dma_sg++;
+ if (dma_sg->dma_length == 0)
+ break;
+ }
+
+ if (nents > 0) {
+ printk("verify_maps: dma maps consumed by some sgs remain (%d)\n",
+ nents);
+ return -1;
+ }
+
+ if (nents < 0) {
+ printk("verify_maps: Error, messed up mappings, "
+ "at sg %d dma_sg %d\n",
+ (int) (orig_nents + nents), (int) (dma_sg - orig_dma_sg));
+ return -1;
+ }
+
+ /* This test passes... */
+ return 0;
+}
+
+void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages)
+{
+ if (verify_lengths(sg, nents, npages) < 0 ||
+ verify_maps(sg, nents, iopte) < 0) {
+ int i;
+
+ printk("verify_sglist: Crap, messed up mappings, dumping, iodma at ");
+ printk("%016lx.\n", sg->dma_address & IO_PAGE_MASK);
+
+ for (i = 0; i < nents; i++) {
+ printk("sg(%d): page_addr(%p) off(%x) length(%x) "
+ "dma_address[%016lx] dma_length[%016lx]\n",
+ i,
+ page_address(sg[i].page), sg[i].offset,
+ sg[i].length,
+ sg[i].dma_address, sg[i].dma_length);
+ }
+ }
+
+ /* Seems to be ok */
+}
+#endif
+
+unsigned long prepare_sg(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *dma_sg = sg;
+ unsigned long prev;
+ u32 dent_addr, dent_len;
+
+ prev = (unsigned long) (page_address(sg->page) + sg->offset);
+ prev += (unsigned long) (dent_len = sg->length);
+ dent_addr = (u32) ((unsigned long)(page_address(sg->page) + sg->offset)
+ & (IO_PAGE_SIZE - 1UL));
+ while (--nents) {
+ unsigned long addr;
+
+ sg++;
+ addr = (unsigned long) (page_address(sg->page) + sg->offset);
+ if (! VCONTIG(prev, addr)) {
+ dma_sg->dma_address = dent_addr;
+ dma_sg->dma_length = dent_len;
+ dma_sg++;
+
+ dent_addr = ((dent_addr +
+ dent_len +
+ (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT);
+ dent_addr <<= IO_PAGE_SHIFT;
+ dent_addr += addr & (IO_PAGE_SIZE - 1UL);
+ dent_len = 0;
+ }
+ dent_len += sg->length;
+ prev = addr + sg->length;
+ }
+ dma_sg->dma_address = dent_addr;
+ dma_sg->dma_length = dent_len;
+
+ return ((unsigned long) dent_addr +
+ (unsigned long) dent_len +
+ (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT;
+}
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
new file mode 100644
index 00000000000..ad791014419
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -0,0 +1,48 @@
+/* $Id: iommu_common.h,v 1.5 2001/12/11 09:41:01 davem Exp $
+ * iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/iommu.h>
+#include <asm/scatterlist.h>
+
+/*
+ * These give mapping size of each iommu pte/tlb.
+ */
+#define IO_PAGE_SHIFT 13
+#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT)
+#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1))
+#define IO_PAGE_ALIGN(addr) (((addr)+IO_PAGE_SIZE-1)&IO_PAGE_MASK)
+
+#define IO_TSB_ENTRIES (128*1024)
+#define IO_TSB_SIZE (IO_TSB_ENTRIES * 8)
+
+/*
+ * This is the hardwired shift in the iotlb tag/data parts.
+ */
+#define IOMMU_PAGE_SHIFT 13
+
+/* You are _strongly_ advised to enable the following debugging code
+ * any time you make changes to the sg code below, run it for a while
+ * with filesystems mounted read-only before buying the farm... -DaveM
+ */
+#undef VERIFY_SG
+
+#ifdef VERIFY_SG
+extern void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages);
+#endif
+
+/* Two addresses are "virtually contiguous" if and only if:
+ * 1) They are equal, or...
+ * 2) They are both on a page boundary
+ */
+#define VCONTIG(__X, __Y) (((__X) == (__Y)) || \
+ (((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
+
+extern unsigned long prepare_sg(struct scatterlist *sg, int nents);
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
new file mode 100644
index 00000000000..a38cb5036df
--- /dev/null
+++ b/arch/sparc64/kernel/irq.c
@@ -0,0 +1,1269 @@
+/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
+ * irq.c: UltraSparc IRQ handling/init/registry.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/sbus.h>
+#include <asm/iommu.h>
+#include <asm/upa.h>
+#include <asm/oplib.h>
+#include <asm/timer.h>
+#include <asm/smp.h>
+#include <asm/starfire.h>
+#include <asm/uaccess.h>
+#include <asm/cache.h>
+#include <asm/cpudata.h>
+
+#ifdef CONFIG_SMP
+static void distribute_irqs(void);
+#endif
+
+/* UPA nodes send interrupt packet to UltraSparc with first data reg
+ * value low 5 (7 on Starfire) bits holding the IRQ identifier being
+ * delivered. We must translate this into a non-vector IRQ so we can
+ * set the softint on this cpu.
+ *
+ * To make processing these packets efficient and race free we use
+ * an array of irq buckets below. The interrupt vector handler in
+ * entry.S feeds incoming packets into per-cpu pil-indexed lists.
+ * The IVEC handler does not need to act atomically, the PIL dispatch
+ * code uses CAS to get an atomic snapshot of the list and clear it
+ * at the same time.
+ */
+
+struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
+
+/* This has to be in the main kernel image, it cannot be
+ * turned into per-cpu data. The reason is that the main
+ * kernel image is locked into the TLB and this structure
+ * is accessed from the vectored interrupt trap handler. If
+ * access to this structure takes a TLB miss it could cause
+ * the 5-level sparc v9 trap stack to overflow.
+ */
+struct irq_work_struct {
+ unsigned int irq_worklists[16];
+};
+struct irq_work_struct __irq_work[NR_CPUS];
+#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
+
+#ifdef CONFIG_PCI
+/* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
+ * It is used for PCI only to synchronize DMA transfers with IRQ delivery
+ * for devices behind busses other than APB on Sabre systems.
+ *
+ * Currently these physical addresses are just config space accesses
+ * to the command register for that device.
+ */
+unsigned long pci_dma_wsync;
+unsigned long dma_sync_reg_table[256];
+unsigned char dma_sync_reg_table_entry = 0;
+#endif
+
+/* This is based upon code in the 32-bit Sparc kernel written mostly by
+ * David Redman (djhr@tadpole.co.uk).
+ */
+#define MAX_STATIC_ALLOC 4
+static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
+static int static_irq_count;
+
+/* This is exported so that fast IRQ handlers can get at it... -DaveM */
+struct irqaction *irq_action[NR_IRQS+1] = {
+ NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
+};
+
+/* This only synchronizes entities which modify IRQ handler
+ * state and some selected user-level spots that want to
+ * read things in the table. IRQ handler processing orders
+ * its' accesses such that no locking is needed.
+ */
+static DEFINE_SPINLOCK(irq_action_lock);
+
+static void register_irq_proc (unsigned int irq);
+
+/*
+ * Upper 2b of irqaction->flags holds the ino.
+ * irqaction->mask holds the smp affinity information.
+ */
+#define put_ino_in_irqaction(action, irq) \
+ action->flags &= 0xffffffffffffUL; \
+ if (__bucket(irq) == &pil0_dummy_bucket) \
+ action->flags |= 0xdeadUL << 48; \
+ else \
+ action->flags |= __irq_ino(irq) << 48;
+#define get_ino_in_irqaction(action) (action->flags >> 48)
+
+#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
+#define get_smpaff_in_irqaction(action) ((action)->mask)
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ unsigned long flags;
+ int i = *(loff_t *) v;
+ struct irqaction *action;
+#ifdef CONFIG_SMP
+ int j;
+#endif
+
+ spin_lock_irqsave(&irq_action_lock, flags);
+ if (i <= NR_IRQS) {
+ if (!(action = *(i + irq_action)))
+ goto out_unlock;
+ seq_printf(p, "%3d: ", i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j = 0; j < NR_CPUS; j++) {
+ if (!cpu_online(j))
+ continue;
+ seq_printf(p, "%10u ",
+ kstat_cpu(j).irqs[i]);
+ }
+#endif
+ seq_printf(p, " %s:%lx", action->name,
+ get_ino_in_irqaction(action));
+ for (action = action->next; action; action = action->next) {
+ seq_printf(p, ", %s:%lx", action->name,
+ get_ino_in_irqaction(action));
+ }
+ seq_putc(p, '\n');
+ }
+out_unlock:
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+
+ return 0;
+}
+
+/* Now these are always passed a true fully specified sun4u INO. */
+void enable_irq(unsigned int irq)
+{
+ struct ino_bucket *bucket = __bucket(irq);
+ unsigned long imap;
+ unsigned long tid;
+
+ imap = bucket->imap;
+ if (imap == 0UL)
+ return;
+
+ preempt_disable();
+
+ if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ unsigned long ver;
+
+ __asm__ ("rdpr %%ver, %0" : "=r" (ver));
+ if ((ver >> 32) == 0x003e0016) {
+ /* We set it to our JBUS ID. */
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=r" (tid)
+ : "i" (ASI_JBUS_CONFIG));
+ tid = ((tid & (0x1fUL<<17)) << 9);
+ tid &= IMAP_TID_JBUS;
+ } else {
+ /* We set it to our Safari AID. */
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=r" (tid)
+ : "i" (ASI_SAFARI_CONFIG));
+ tid = ((tid & (0x3ffUL<<17)) << 9);
+ tid &= IMAP_AID_SAFARI;
+ }
+ } else if (this_is_starfire == 0) {
+ /* We set it to our UPA MID. */
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=r" (tid)
+ : "i" (ASI_UPA_CONFIG));
+ tid = ((tid & UPA_CONFIG_MID) << 9);
+ tid &= IMAP_TID_UPA;
+ } else {
+ tid = (starfire_translate(imap, smp_processor_id()) << 26);
+ tid &= IMAP_TID_UPA;
+ }
+
+ /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
+ * of this SYSIO's preconfigured IGN in the SYSIO Control
+ * Register, the hardware just mirrors that value here.
+ * However for Graphics and UPA Slave devices the full
+ * IMAP_INR field can be set by the programmer here.
+ *
+ * Things like FFB can now be handled via the new IRQ mechanism.
+ */
+ upa_writel(tid | IMAP_VALID, imap);
+
+ preempt_enable();
+}
+
+/* This now gets passed true ino's as well. */
+void disable_irq(unsigned int irq)
+{
+ struct ino_bucket *bucket = __bucket(irq);
+ unsigned long imap;
+
+ imap = bucket->imap;
+ if (imap != 0UL) {
+ u32 tmp;
+
+ /* NOTE: We do not want to futz with the IRQ clear registers
+ * and move the state to IDLE, the SCSI code does call
+ * disable_irq() to assure atomicity in the queue cmd
+ * SCSI adapter driver code. Thus we'd lose interrupts.
+ */
+ tmp = upa_readl(imap);
+ tmp &= ~IMAP_VALID;
+ upa_writel(tmp, imap);
+ }
+}
+
+/* The timer is the one "weird" interrupt which is generated by
+ * the CPU %tick register and not by some normal vectored interrupt
+ * source. To handle this special case, we use this dummy INO bucket.
+ */
+static struct ino_bucket pil0_dummy_bucket = {
+ 0, /* irq_chain */
+ 0, /* pil */
+ 0, /* pending */
+ 0, /* flags */
+ 0, /* __unused */
+ NULL, /* irq_info */
+ 0UL, /* iclr */
+ 0UL, /* imap */
+};
+
+unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
+{
+ struct ino_bucket *bucket;
+ int ino;
+
+ if (pil == 0) {
+ if (iclr != 0UL || imap != 0UL) {
+ prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
+ iclr, imap);
+ prom_halt();
+ }
+ return __irq(&pil0_dummy_bucket);
+ }
+
+ /* RULE: Both must be specified in all other cases. */
+ if (iclr == 0UL || imap == 0UL) {
+ prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
+ pil, inofixup, iclr, imap);
+ prom_halt();
+ }
+
+ ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
+ if (ino > NUM_IVECS) {
+ prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
+ ino, pil, inofixup, iclr, imap);
+ prom_halt();
+ }
+
+ /* Ok, looks good, set it up. Don't touch the irq_chain or
+ * the pending flag.
+ */
+ bucket = &ivector_table[ino];
+ if ((bucket->flags & IBF_ACTIVE) ||
+ (bucket->irq_info != NULL)) {
+ /* This is a gross fatal error if it happens here. */
+ prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
+ prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
+ ino, pil, inofixup, iclr, imap);
+ prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
+ bucket->pil, bucket->iclr, bucket->imap);
+ prom_printf("IRQ: Cannot continue, halting...\n");
+ prom_halt();
+ }
+ bucket->imap = imap;
+ bucket->iclr = iclr;
+ bucket->pil = pil;
+ bucket->flags = 0;
+
+ bucket->irq_info = NULL;
+
+ return __irq(bucket);
+}
+
+static void atomic_bucket_insert(struct ino_bucket *bucket)
+{
+ unsigned long pstate;
+ unsigned int *ent;
+
+ __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+ __asm__ __volatile__("wrpr %0, %1, %%pstate"
+ : : "r" (pstate), "i" (PSTATE_IE));
+ ent = irq_work(smp_processor_id(), bucket->pil);
+ bucket->irq_chain = *ent;
+ *ent = __irq(bucket);
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
+}
+
+int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char *name, void *dev_id)
+{
+ struct irqaction *action, *tmp = NULL;
+ struct ino_bucket *bucket = __bucket(irq);
+ unsigned long flags;
+ int pending = 0;
+
+ if ((bucket != &pil0_dummy_bucket) &&
+ (bucket < &ivector_table[0] ||
+ bucket >= &ivector_table[NUM_IVECS])) {
+ unsigned int *caller;
+
+ __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
+ printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
+ "from %p, irq %08x.\n", caller, irq);
+ return -EINVAL;
+ }
+ if (!handler)
+ return -EINVAL;
+
+ if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block. In SA_STATIC_ALLOC case,
+ * random driver's kmalloc will fail, but it is safe.
+ * If already initialized, random driver will not reinit.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ spin_lock_irqsave(&irq_action_lock, flags);
+
+ action = *(bucket->pil + irq_action);
+ if (action) {
+ if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
+ for (tmp = action; tmp->next; tmp = tmp->next)
+ ;
+ else {
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ return -EBUSY;
+ }
+ action = NULL; /* Or else! */
+ }
+
+ /* If this is flagged as statically allocated then we use our
+ * private struct which is never freed.
+ */
+ if (irqflags & SA_STATIC_ALLOC) {
+ if (static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
+ "using kmalloc\n", irq, name);
+ }
+ if (action == NULL)
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+ GFP_ATOMIC);
+
+ if (!action) {
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ return -ENOMEM;
+ }
+
+ if (bucket == &pil0_dummy_bucket) {
+ bucket->irq_info = action;
+ bucket->flags |= IBF_ACTIVE;
+ } else {
+ if ((bucket->flags & IBF_ACTIVE) != 0) {
+ void *orig = bucket->irq_info;
+ void **vector = NULL;
+
+ if ((bucket->flags & IBF_PCI) == 0) {
+ printk("IRQ: Trying to share non-PCI bucket.\n");
+ goto free_and_ebusy;
+ }
+ if ((bucket->flags & IBF_MULTI) == 0) {
+ vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
+ if (vector == NULL)
+ goto free_and_enomem;
+
+ /* We might have slept. */
+ if ((bucket->flags & IBF_MULTI) != 0) {
+ int ent;
+
+ kfree(vector);
+ vector = (void **)bucket->irq_info;
+ for(ent = 0; ent < 4; ent++) {
+ if (vector[ent] == NULL) {
+ vector[ent] = action;
+ break;
+ }
+ }
+ if (ent == 4)
+ goto free_and_ebusy;
+ } else {
+ vector[0] = orig;
+ vector[1] = action;
+ vector[2] = NULL;
+ vector[3] = NULL;
+ bucket->irq_info = vector;
+ bucket->flags |= IBF_MULTI;
+ }
+ } else {
+ int ent;
+
+ vector = (void **)orig;
+ for (ent = 0; ent < 4; ent++) {
+ if (vector[ent] == NULL) {
+ vector[ent] = action;
+ break;
+ }
+ }
+ if (ent == 4)
+ goto free_and_ebusy;
+ }
+ } else {
+ bucket->irq_info = action;
+ bucket->flags |= IBF_ACTIVE;
+ }
+ pending = bucket->pending;
+ if (pending)
+ bucket->pending = 0;
+ }
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->name = name;
+ action->next = NULL;
+ action->dev_id = dev_id;
+ put_ino_in_irqaction(action, irq);
+ put_smpaff_in_irqaction(action, CPU_MASK_NONE);
+
+ if (tmp)
+ tmp->next = action;
+ else
+ *(bucket->pil + irq_action) = action;
+
+ enable_irq(irq);
+
+ /* We ate the IVEC already, this makes sure it does not get lost. */
+ if (pending) {
+ atomic_bucket_insert(bucket);
+ set_softint(1 << bucket->pil);
+ }
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
+ register_irq_proc(__irq_ino(irq));
+
+#ifdef CONFIG_SMP
+ distribute_irqs();
+#endif
+ return 0;
+
+free_and_ebusy:
+ kfree(action);
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ return -EBUSY;
+
+free_and_enomem:
+ kfree(action);
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ return -ENOMEM;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction *action;
+ struct irqaction *tmp = NULL;
+ unsigned long flags;
+ struct ino_bucket *bucket = __bucket(irq), *bp;
+
+ if ((bucket != &pil0_dummy_bucket) &&
+ (bucket < &ivector_table[0] ||
+ bucket >= &ivector_table[NUM_IVECS])) {
+ unsigned int *caller;
+
+ __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
+ printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
+ "from %p, irq %08x.\n", caller, irq);
+ return;
+ }
+
+ spin_lock_irqsave(&irq_action_lock, flags);
+
+ action = *(bucket->pil + irq_action);
+ if (!action->handler) {
+ printk("Freeing free IRQ %d\n", bucket->pil);
+ return;
+ }
+ if (dev_id) {
+ for ( ; action; action = action->next) {
+ if (action->dev_id == dev_id)
+ break;
+ tmp = action;
+ }
+ if (!action) {
+ printk("Trying to free free shared IRQ %d\n", bucket->pil);
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ return;
+ }
+ } else if (action->flags & SA_SHIRQ) {
+ printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ return;
+ }
+
+ if (action->flags & SA_STATIC_ALLOC) {
+ printk("Attempt to free statically allocated IRQ %d (%s)\n",
+ bucket->pil, action->name);
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ return;
+ }
+
+ if (action && tmp)
+ tmp->next = action->next;
+ else
+ *(bucket->pil + irq_action) = action->next;
+
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+
+ synchronize_irq(irq);
+
+ spin_lock_irqsave(&irq_action_lock, flags);
+
+ if (bucket != &pil0_dummy_bucket) {
+ unsigned long imap = bucket->imap;
+ void **vector, *orig;
+ int ent;
+
+ orig = bucket->irq_info;
+ vector = (void **)orig;
+
+ if ((bucket->flags & IBF_MULTI) != 0) {
+ int other = 0;
+ void *orphan = NULL;
+ for (ent = 0; ent < 4; ent++) {
+ if (vector[ent] == action)
+ vector[ent] = NULL;
+ else if (vector[ent] != NULL) {
+ orphan = vector[ent];
+ other++;
+ }
+ }
+
+ /* Only free when no other shared irq
+ * uses this bucket.
+ */
+ if (other) {
+ if (other == 1) {
+ /* Convert back to non-shared bucket. */
+ bucket->irq_info = orphan;
+ bucket->flags &= ~(IBF_MULTI);
+ kfree(vector);
+ }
+ goto out;
+ }
+ } else {
+ bucket->irq_info = NULL;
+ }
+
+ /* This unique interrupt source is now inactive. */
+ bucket->flags &= ~IBF_ACTIVE;
+
+ /* See if any other buckets share this bucket's IMAP
+ * and are still active.
+ */
+ for (ent = 0; ent < NUM_IVECS; ent++) {
+ bp = &ivector_table[ent];
+ if (bp != bucket &&
+ bp->imap == imap &&
+ (bp->flags & IBF_ACTIVE) != 0)
+ break;
+ }
+
+ /* Only disable when no other sub-irq levels of
+ * the same IMAP are active.
+ */
+ if (ent == NUM_IVECS)
+ disable_irq(irq);
+ }
+
+out:
+ kfree(action);
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+}
+
+EXPORT_SYMBOL(free_irq);
+
+#ifdef CONFIG_SMP
+void synchronize_irq(unsigned int irq)
+{
+ struct ino_bucket *bucket = __bucket(irq);
+
+#if 0
+ /* The following is how I wish I could implement this.
+ * Unfortunately the ICLR registers are read-only, you can
+ * only write ICLR_foo values to them. To get the current
+ * IRQ status you would need to get at the IRQ diag registers
+ * in the PCI/SBUS controller and the layout of those vary
+ * from one controller to the next, sigh... -DaveM
+ */
+ unsigned long iclr = bucket->iclr;
+
+ while (1) {
+ u32 tmp = upa_readl(iclr);
+
+ if (tmp == ICLR_TRANSMIT ||
+ tmp == ICLR_PENDING) {
+ cpu_relax();
+ continue;
+ }
+ break;
+ }
+#else
+ /* So we have to do this with a INPROGRESS bit just like x86. */
+ while (bucket->flags & IBF_INPROGRESS)
+ cpu_relax();
+#endif
+}
+#endif /* CONFIG_SMP */
+
+void catch_disabled_ivec(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
+
+ /* We can actually see this on Ultra/PCI PCI cards, which are bridges
+ * to other devices. Here a single IMAP enabled potentially multiple
+ * unique interrupt sources (which each do have a unique ICLR register.
+ *
+ * So what we do is just register that the IVEC arrived, when registered
+ * for real the request_irq() code will check the bit and signal
+ * a local CPU interrupt for it.
+ */
+#if 0
+ printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
+ bucket - &ivector_table[0], regs->tpc);
+#endif
+ *irq_work(cpu, 0) = 0;
+ bucket->pending = 1;
+}
+
+/* Tune this... */
+#define FORWARD_VOLUME 12
+
+#ifdef CONFIG_SMP
+
+static inline void redirect_intr(int cpu, struct ino_bucket *bp)
+{
+ /* Ok, here is what is going on:
+ * 1) Retargeting IRQs on Starfire is very
+ * expensive so just forget about it on them.
+ * 2) Moving around very high priority interrupts
+ * is a losing game.
+ * 3) If the current cpu is idle, interrupts are
+ * useful work, so keep them here. But do not
+ * pass to our neighbour if he is not very idle.
+ * 4) If sysadmin explicitly asks for directed intrs,
+ * Just Do It.
+ */
+ struct irqaction *ap = bp->irq_info;
+ cpumask_t cpu_mask;
+ unsigned int buddy, ticks;
+
+ cpu_mask = get_smpaff_in_irqaction(ap);
+ cpus_and(cpu_mask, cpu_mask, cpu_online_map);
+ if (cpus_empty(cpu_mask))
+ cpu_mask = cpu_online_map;
+
+ if (this_is_starfire != 0 ||
+ bp->pil >= 10 || current->pid == 0)
+ goto out;
+
+ /* 'cpu' is the MID (ie. UPAID), calculate the MID
+ * of our buddy.
+ */
+ buddy = cpu + 1;
+ if (buddy >= NR_CPUS)
+ buddy = 0;
+
+ ticks = 0;
+ while (!cpu_isset(buddy, cpu_mask)) {
+ if (++buddy >= NR_CPUS)
+ buddy = 0;
+ if (++ticks > NR_CPUS) {
+ put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
+ goto out;
+ }
+ }
+
+ if (buddy == cpu)
+ goto out;
+
+ /* Voo-doo programming. */
+ if (cpu_data(buddy).idle_volume < FORWARD_VOLUME)
+ goto out;
+
+ /* This just so happens to be correct on Cheetah
+ * at the moment.
+ */
+ buddy <<= 26;
+
+ /* Push it to our buddy. */
+ upa_writel(buddy | IMAP_VALID, bp->imap);
+
+out:
+ return;
+}
+
+#endif
+
+void handler_irq(int irq, struct pt_regs *regs)
+{
+ struct ino_bucket *bp, *nbp;
+ int cpu = smp_processor_id();
+
+#ifndef CONFIG_SMP
+ /*
+ * Check for TICK_INT on level 14 softint.
+ */
+ {
+ unsigned long clr_mask = 1 << irq;
+ unsigned long tick_mask = tick_ops->softint_mask;
+
+ if ((irq == 14) && (get_softint() & tick_mask)) {
+ irq = 0;
+ clr_mask = tick_mask;
+ }
+ clear_softint(clr_mask);
+ }
+#else
+ int should_forward = 1;
+
+ clear_softint(1 << irq);
+#endif
+
+ irq_enter();
+ kstat_this_cpu.irqs[irq]++;
+
+ /* Sliiiick... */
+#ifndef CONFIG_SMP
+ bp = ((irq != 0) ?
+ __bucket(xchg32(irq_work(cpu, irq), 0)) :
+ &pil0_dummy_bucket);
+#else
+ bp = __bucket(xchg32(irq_work(cpu, irq), 0));
+#endif
+ for ( ; bp != NULL; bp = nbp) {
+ unsigned char flags = bp->flags;
+ unsigned char random = 0;
+
+ nbp = __bucket(bp->irq_chain);
+ bp->irq_chain = 0;
+
+ bp->flags |= IBF_INPROGRESS;
+
+ if ((flags & IBF_ACTIVE) != 0) {
+#ifdef CONFIG_PCI
+ if ((flags & IBF_DMA_SYNC) != 0) {
+ upa_readl(dma_sync_reg_table[bp->synctab_ent]);
+ upa_readq(pci_dma_wsync);
+ }
+#endif
+ if ((flags & IBF_MULTI) == 0) {
+ struct irqaction *ap = bp->irq_info;
+ int ret;
+
+ ret = ap->handler(__irq(bp), ap->dev_id, regs);
+ if (ret == IRQ_HANDLED)
+ random |= ap->flags;
+ } else {
+ void **vector = (void **)bp->irq_info;
+ int ent;
+ for (ent = 0; ent < 4; ent++) {
+ struct irqaction *ap = vector[ent];
+ if (ap != NULL) {
+ int ret;
+
+ ret = ap->handler(__irq(bp),
+ ap->dev_id,
+ regs);
+ if (ret == IRQ_HANDLED)
+ random |= ap->flags;
+ }
+ }
+ }
+ /* Only the dummy bucket lacks IMAP/ICLR. */
+ if (bp->pil != 0) {
+#ifdef CONFIG_SMP
+ if (should_forward) {
+ redirect_intr(cpu, bp);
+ should_forward = 0;
+ }
+#endif
+ upa_writel(ICLR_IDLE, bp->iclr);
+
+ /* Test and add entropy */
+ if (random & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ }
+ } else
+ bp->pending = 1;
+
+ bp->flags &= ~IBF_INPROGRESS;
+ }
+ irq_exit();
+}
+
+#ifdef CONFIG_BLK_DEV_FD
+extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
+
+void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
+{
+ struct irqaction *action = *(irq + irq_action);
+ struct ino_bucket *bucket;
+ int cpu = smp_processor_id();
+
+ irq_enter();
+ kstat_this_cpu.irqs[irq]++;
+
+ *(irq_work(cpu, irq)) = 0;
+ bucket = get_ino_in_irqaction(action) + ivector_table;
+
+ bucket->flags |= IBF_INPROGRESS;
+
+ floppy_interrupt(irq, dev_cookie, regs);
+ upa_writel(ICLR_IDLE, bucket->iclr);
+
+ bucket->flags &= ~IBF_INPROGRESS;
+
+ irq_exit();
+}
+#endif
+
+/* The following assumes that the branch lies before the place we
+ * are branching to. This is the case for a trap vector...
+ * You have been warned.
+ */
+#define SPARC_BRANCH(dest_addr, inst_addr) \
+ (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
+
+#define SPARC_NOP (0x01000000)
+
+static void install_fast_irq(unsigned int cpu_irq,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *))
+{
+ extern unsigned long sparc64_ttable_tl0;
+ unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
+ unsigned int *insns;
+
+ ttent += 0x820;
+ ttent += (cpu_irq - 1) << 5;
+ insns = (unsigned int *) ttent;
+ insns[0] = SPARC_BRANCH(((unsigned long) handler),
+ ((unsigned long)&insns[0]));
+ insns[1] = SPARC_NOP;
+ __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
+}
+
+int request_fast_irq(unsigned int irq,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char *name, void *dev_id)
+{
+ struct irqaction *action;
+ struct ino_bucket *bucket = __bucket(irq);
+ unsigned long flags;
+
+ /* No pil0 dummy buckets allowed here. */
+ if (bucket < &ivector_table[0] ||
+ bucket >= &ivector_table[NUM_IVECS]) {
+ unsigned int *caller;
+
+ __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
+ printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
+ "from %p, irq %08x.\n", caller, irq);
+ return -EINVAL;
+ }
+
+ if (!handler)
+ return -EINVAL;
+
+ if ((bucket->pil == 0) || (bucket->pil == 14)) {
+ printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&irq_action_lock, flags);
+
+ action = *(bucket->pil + irq_action);
+ if (action) {
+ if (action->flags & SA_SHIRQ)
+ panic("Trying to register fast irq when already shared.\n");
+ if (irqflags & SA_SHIRQ)
+ panic("Trying to register fast irq as shared.\n");
+ printk("request_fast_irq: Trying to register yet already owned.\n");
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ return -EBUSY;
+ }
+
+ /*
+ * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
+ * support smp intr affinity in this path.
+ */
+ if (irqflags & SA_STATIC_ALLOC) {
+ if (static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
+ "using kmalloc\n", bucket->pil, name);
+ }
+ if (action == NULL)
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+ GFP_ATOMIC);
+ if (!action) {
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+ return -ENOMEM;
+ }
+ install_fast_irq(bucket->pil, handler);
+
+ bucket->irq_info = action;
+ bucket->flags |= IBF_ACTIVE;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->dev_id = NULL;
+ action->name = name;
+ action->next = NULL;
+ put_ino_in_irqaction(action, irq);
+ put_smpaff_in_irqaction(action, CPU_MASK_NONE);
+
+ *(bucket->pil + irq_action) = action;
+ enable_irq(irq);
+
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+
+#ifdef CONFIG_SMP
+ distribute_irqs();
+#endif
+ return 0;
+}
+
+/* We really don't need these at all on the Sparc. We only have
+ * stubs here because they are exported to modules.
+ */
+unsigned long probe_irq_on(void)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+int probe_irq_off(unsigned long mask)
+{
+ return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+#ifdef CONFIG_SMP
+static int retarget_one_irq(struct irqaction *p, int goal_cpu)
+{
+ struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
+ unsigned long imap = bucket->imap;
+ unsigned int tid;
+
+ while (!cpu_online(goal_cpu)) {
+ if (++goal_cpu >= NR_CPUS)
+ goal_cpu = 0;
+ }
+
+ if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ tid = goal_cpu << 26;
+ tid &= IMAP_AID_SAFARI;
+ } else if (this_is_starfire == 0) {
+ tid = goal_cpu << 26;
+ tid &= IMAP_TID_UPA;
+ } else {
+ tid = (starfire_translate(imap, goal_cpu) << 26);
+ tid &= IMAP_TID_UPA;
+ }
+ upa_writel(tid | IMAP_VALID, imap);
+
+ while (!cpu_online(goal_cpu)) {
+ if (++goal_cpu >= NR_CPUS)
+ goal_cpu = 0;
+ }
+
+ return goal_cpu;
+}
+
+/* Called from request_irq. */
+static void distribute_irqs(void)
+{
+ unsigned long flags;
+ int cpu, level;
+
+ spin_lock_irqsave(&irq_action_lock, flags);
+ cpu = 0;
+
+ /*
+ * Skip the timer at [0], and very rare error/power intrs at [15].
+ * Also level [12], it causes problems on Ex000 systems.
+ */
+ for (level = 1; level < NR_IRQS; level++) {
+ struct irqaction *p = irq_action[level];
+ if (level == 12) continue;
+ while(p) {
+ cpu = retarget_one_irq(p, cpu);
+ p = p->next;
+ }
+ }
+ spin_unlock_irqrestore(&irq_action_lock, flags);
+}
+#endif
+
+
+struct sun5_timer *prom_timers;
+static u64 prom_limit0, prom_limit1;
+
+static void map_prom_timers(void)
+{
+ unsigned int addr[3];
+ int tnode, err;
+
+ /* PROM timer node hangs out in the top level of device siblings... */
+ tnode = prom_finddevice("/counter-timer");
+
+ /* Assume if node is not present, PROM uses different tick mechanism
+ * which we should not care about.
+ */
+ if (tnode == 0 || tnode == -1) {
+ prom_timers = (struct sun5_timer *) 0;
+ return;
+ }
+
+ /* If PROM is really using this, it must be mapped by him. */
+ err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
+ if (err == -1) {
+ prom_printf("PROM does not have timer mapped, trying to continue.\n");
+ prom_timers = (struct sun5_timer *) 0;
+ return;
+ }
+ prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
+}
+
+static void kill_prom_timer(void)
+{
+ if (!prom_timers)
+ return;
+
+ /* Save them away for later. */
+ prom_limit0 = prom_timers->limit0;
+ prom_limit1 = prom_timers->limit1;
+
+ /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
+ * We turn both off here just to be paranoid.
+ */
+ prom_timers->limit0 = 0;
+ prom_timers->limit1 = 0;
+
+ /* Wheee, eat the interrupt packet too... */
+ __asm__ __volatile__(
+" mov 0x40, %%g2\n"
+" ldxa [%%g0] %0, %%g1\n"
+" ldxa [%%g2] %1, %%g1\n"
+" stxa %%g0, [%%g0] %0\n"
+" membar #Sync\n"
+ : /* no outputs */
+ : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
+ : "g1", "g2");
+}
+
+void enable_prom_timer(void)
+{
+ if (!prom_timers)
+ return;
+
+ /* Set it to whatever was there before. */
+ prom_timers->limit1 = prom_limit1;
+ prom_timers->count1 = 0;
+ prom_timers->limit0 = prom_limit0;
+ prom_timers->count0 = 0;
+}
+
+void init_irqwork_curcpu(void)
+{
+ register struct irq_work_struct *workp asm("o2");
+ register unsigned long tmp asm("o3");
+ int cpu = hard_smp_processor_id();
+
+ memset(__irq_work + cpu, 0, sizeof(*workp));
+
+ /* Make sure we are called with PSTATE_IE disabled. */
+ __asm__ __volatile__("rdpr %%pstate, %0\n\t"
+ : "=r" (tmp));
+ if (tmp & PSTATE_IE) {
+ prom_printf("BUG: init_irqwork_curcpu() called with "
+ "PSTATE_IE enabled, bailing.\n");
+ __asm__ __volatile__("mov %%i7, %0\n\t"
+ : "=r" (tmp));
+ prom_printf("BUG: Called from %lx\n", tmp);
+ prom_halt();
+ }
+
+ /* Set interrupt globals. */
+ workp = &__irq_work[cpu];
+ __asm__ __volatile__(
+ "rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate\n\t"
+ "mov %2, %%g6\n\t"
+ "wrpr %0, 0x0, %%pstate\n\t"
+ : "=&r" (tmp)
+ : "i" (PSTATE_IG), "r" (workp));
+}
+
+/* Only invoked on boot processor. */
+void __init init_IRQ(void)
+{
+ map_prom_timers();
+ kill_prom_timer();
+ memset(&ivector_table[0], 0, sizeof(ivector_table));
+
+ /* We need to clear any IRQ's pending in the soft interrupt
+ * registers, a spurious one could be left around from the
+ * PROM timer which we just disabled.
+ */
+ clear_softint(get_softint());
+
+ /* Now that ivector table is initialized, it is safe
+ * to receive IRQ vector traps. We will normally take
+ * one or two right now, in case some device PROM used
+ * to boot us wants to speak to us. We just ignore them.
+ */
+ __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
+ "or %%g1, %0, %%g1\n\t"
+ "wrpr %%g1, 0x0, %%pstate"
+ : /* No outputs */
+ : "i" (PSTATE_IE)
+ : "g1");
+}
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NUM_IVECS];
+
+#ifdef CONFIG_SMP
+
+static int irq_affinity_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct ino_bucket *bp = ivector_table + (long)data;
+ struct irqaction *ap = bp->irq_info;
+ cpumask_t mask;
+ int len;
+
+ mask = get_smpaff_in_irqaction(ap);
+ if (cpus_empty(mask))
+ mask = cpu_online_map;
+
+ len = cpumask_scnprintf(page, count, mask);
+ if (count - len < 2)
+ return -EINVAL;
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
+{
+ struct ino_bucket *bp = ivector_table + irq;
+
+ /* Users specify affinity in terms of hw cpu ids.
+ * As soon as we do this, handler_irq() might see and take action.
+ */
+ put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
+
+ /* Migration is simply done by the next cpu to service this
+ * interrupt.
+ */
+}
+
+static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ int irq = (long) data, full_count = count, err;
+ cpumask_t new_value;
+
+ err = cpumask_parse(buffer, count, new_value);
+
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ cpus_and(new_value, new_value, cpu_online_map);
+ if (cpus_empty(new_value))
+ return -EINVAL;
+
+ set_intr_affinity(irq, new_value);
+
+ return full_count;
+}
+
+#endif
+
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || irq_dir[irq])
+ return;
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%x", irq);
+
+ /* create /proc/irq/1234 */
+ irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+#ifdef CONFIG_SMP
+ /* XXX SMP affinity not supported on starfire yet. */
+ if (this_is_starfire == 0) {
+ struct proc_dir_entry *entry;
+
+ /* create /proc/irq/1234/smp_affinity */
+ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = (void *)(long)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+ }
+ }
+#endif
+}
+
+void init_irq_proc (void)
+{
+ /* create /proc/irq */
+ root_irq_dir = proc_mkdir("irq", NULL);
+}
+
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
new file mode 100644
index 00000000000..30862abee61
--- /dev/null
+++ b/arch/sparc64/kernel/isa.c
@@ -0,0 +1,329 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <asm/oplib.h>
+#include <asm/isa.h>
+
+struct sparc_isa_bridge *isa_chain;
+
+static void __init fatal_err(const char *reason)
+{
+ prom_printf("ISA: fatal error, %s.\n", reason);
+}
+
+static void __init report_dev(struct sparc_isa_device *isa_dev, int child)
+{
+ if (child)
+ printk(" (%s)", isa_dev->prom_name);
+ else
+ printk(" [%s", isa_dev->prom_name);
+}
+
+static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev,
+ struct linux_prom_registers *pregs,
+ int pregs_size)
+{
+ unsigned long base, len;
+ int prop_len;
+
+ prop_len = prom_getproperty(isa_dev->prom_node, "reg",
+ (char *) pregs, pregs_size);
+
+ if (prop_len <= 0)
+ return;
+
+ /* Only the first one is interesting. */
+ len = pregs[0].reg_size;
+ base = (((unsigned long)pregs[0].which_io << 32) |
+ (unsigned long)pregs[0].phys_addr);
+ base += isa_dev->bus->parent->io_space.start;
+
+ isa_dev->resource.start = base;
+ isa_dev->resource.end = (base + len - 1UL);
+ isa_dev->resource.flags = IORESOURCE_IO;
+ isa_dev->resource.name = isa_dev->prom_name;
+
+ request_resource(&isa_dev->bus->parent->io_space,
+ &isa_dev->resource);
+}
+
+/* I can't believe they didn't put a real INO in the isa device
+ * interrupts property. The whole point of the OBP properties
+ * is to shield the kernel from IRQ routing details.
+ *
+ * The P1275 standard for ISA devices seems to also have been
+ * totally ignored.
+ *
+ * On later systems, an interrupt-map and interrupt-map-mask scheme
+ * akin to EBUS is used.
+ */
+static struct {
+ int obp_irq;
+ int pci_ino;
+} grover_irq_table[] = {
+ { 1, 0x00 }, /* dma, unknown ino at this point */
+ { 2, 0x27 }, /* floppy */
+ { 3, 0x22 }, /* parallel */
+ { 4, 0x2b }, /* serial */
+ { 5, 0x25 }, /* acpi power management */
+
+ { 0, 0x00 } /* end of table */
+};
+
+static int __init isa_dev_get_irq_using_imap(struct sparc_isa_device *isa_dev,
+ struct sparc_isa_bridge *isa_br,
+ int *interrupt,
+ struct linux_prom_registers *pregs)
+{
+ unsigned int hi, lo, irq;
+ int i;
+
+ hi = pregs->which_io & isa_br->isa_intmask.phys_hi;
+ lo = pregs->phys_addr & isa_br->isa_intmask.phys_lo;
+ irq = *interrupt & isa_br->isa_intmask.interrupt;
+ for (i = 0; i < isa_br->num_isa_intmap; i++) {
+ if ((isa_br->isa_intmap[i].phys_hi == hi) &&
+ (isa_br->isa_intmap[i].phys_lo == lo) &&
+ (isa_br->isa_intmap[i].interrupt == irq)) {
+ *interrupt = isa_br->isa_intmap[i].cinterrupt;
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev,
+ struct linux_prom_registers *pregs)
+{
+ int irq_prop;
+
+ irq_prop = prom_getintdefault(isa_dev->prom_node,
+ "interrupts", -1);
+ if (irq_prop <= 0) {
+ goto no_irq;
+ } else {
+ struct pci_controller_info *pcic;
+ struct pci_pbm_info *pbm;
+ int i;
+
+ if (isa_dev->bus->num_isa_intmap) {
+ if (!isa_dev_get_irq_using_imap(isa_dev,
+ isa_dev->bus,
+ &irq_prop,
+ pregs))
+ goto route_irq;
+ }
+
+ for (i = 0; grover_irq_table[i].obp_irq != 0; i++) {
+ if (grover_irq_table[i].obp_irq == irq_prop) {
+ int ino = grover_irq_table[i].pci_ino;
+
+ if (ino == 0)
+ goto no_irq;
+
+ irq_prop = ino;
+ goto route_irq;
+ }
+ }
+ goto no_irq;
+
+route_irq:
+ pbm = isa_dev->bus->parent;
+ pcic = pbm->parent;
+ isa_dev->irq = pcic->irq_build(pbm, NULL, irq_prop);
+ return;
+ }
+
+no_irq:
+ isa_dev->irq = PCI_IRQ_NONE;
+}
+
+static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev)
+{
+ int node = prom_getchild(parent_isa_dev->prom_node);
+
+ if (node == 0)
+ return;
+
+ printk(" ->");
+ while (node != 0) {
+ struct linux_prom_registers regs[PROMREG_MAX];
+ struct sparc_isa_device *isa_dev;
+ int prop_len;
+
+ isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
+ if (!isa_dev) {
+ fatal_err("cannot allocate child isa_dev");
+ prom_halt();
+ }
+
+ memset(isa_dev, 0, sizeof(*isa_dev));
+
+ /* Link it in to parent. */
+ isa_dev->next = parent_isa_dev->child;
+ parent_isa_dev->child = isa_dev;
+
+ isa_dev->bus = parent_isa_dev->bus;
+ isa_dev->prom_node = node;
+ prop_len = prom_getproperty(node, "name",
+ (char *) isa_dev->prom_name,
+ sizeof(isa_dev->prom_name));
+ if (prop_len <= 0) {
+ fatal_err("cannot get child isa_dev OBP node name");
+ prom_halt();
+ }
+
+ prop_len = prom_getproperty(node, "compatible",
+ (char *) isa_dev->compatible,
+ sizeof(isa_dev->compatible));
+
+ /* Not having this is OK. */
+ if (prop_len <= 0)
+ isa_dev->compatible[0] = '\0';
+
+ isa_dev_get_resource(isa_dev, regs, sizeof(regs));
+ isa_dev_get_irq(isa_dev, regs);
+
+ report_dev(isa_dev, 1);
+
+ node = prom_getsibling(node);
+ }
+}
+
+static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
+{
+ int node = prom_getchild(isa_br->prom_node);
+
+ while (node != 0) {
+ struct linux_prom_registers regs[PROMREG_MAX];
+ struct sparc_isa_device *isa_dev;
+ int prop_len;
+
+ isa_dev = kmalloc(sizeof(*isa_dev), GFP_KERNEL);
+ if (!isa_dev) {
+ fatal_err("cannot allocate isa_dev");
+ prom_halt();
+ }
+
+ memset(isa_dev, 0, sizeof(*isa_dev));
+
+ /* Link it in. */
+ isa_dev->next = NULL;
+ if (isa_br->devices == NULL) {
+ isa_br->devices = isa_dev;
+ } else {
+ struct sparc_isa_device *tmp = isa_br->devices;
+
+ while (tmp->next)
+ tmp = tmp->next;
+
+ tmp->next = isa_dev;
+ }
+
+ isa_dev->bus = isa_br;
+ isa_dev->prom_node = node;
+ prop_len = prom_getproperty(node, "name",
+ (char *) isa_dev->prom_name,
+ sizeof(isa_dev->prom_name));
+ if (prop_len <= 0) {
+ fatal_err("cannot get isa_dev OBP node name");
+ prom_halt();
+ }
+
+ prop_len = prom_getproperty(node, "compatible",
+ (char *) isa_dev->compatible,
+ sizeof(isa_dev->compatible));
+
+ /* Not having this is OK. */
+ if (prop_len <= 0)
+ isa_dev->compatible[0] = '\0';
+
+ isa_dev_get_resource(isa_dev, regs, sizeof(regs));
+ isa_dev_get_irq(isa_dev, regs);
+
+ report_dev(isa_dev, 0);
+
+ isa_fill_children(isa_dev);
+
+ printk("]");
+
+ node = prom_getsibling(node);
+ }
+}
+
+void __init isa_init(void)
+{
+ struct pci_dev *pdev;
+ unsigned short vendor, device;
+ int index = 0;
+
+ vendor = PCI_VENDOR_ID_AL;
+ device = PCI_DEVICE_ID_AL_M1533;
+
+ pdev = NULL;
+ while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) {
+ struct pcidev_cookie *pdev_cookie;
+ struct pci_pbm_info *pbm;
+ struct sparc_isa_bridge *isa_br;
+ int prop_len;
+
+ pdev_cookie = pdev->sysdata;
+ if (!pdev_cookie) {
+ printk("ISA: Warning, ISA bridge ignored due to "
+ "lack of OBP data.\n");
+ continue;
+ }
+ pbm = pdev_cookie->pbm;
+
+ isa_br = kmalloc(sizeof(*isa_br), GFP_KERNEL);
+ if (!isa_br) {
+ fatal_err("cannot allocate sparc_isa_bridge");
+ prom_halt();
+ }
+
+ memset(isa_br, 0, sizeof(*isa_br));
+
+ /* Link it in. */
+ isa_br->next = isa_chain;
+ isa_chain = isa_br;
+
+ isa_br->parent = pbm;
+ isa_br->self = pdev;
+ isa_br->index = index++;
+ isa_br->prom_node = pdev_cookie->prom_node;
+ strncpy(isa_br->prom_name, pdev_cookie->prom_name,
+ sizeof(isa_br->prom_name));
+
+ prop_len = prom_getproperty(isa_br->prom_node,
+ "ranges",
+ (char *) isa_br->isa_ranges,
+ sizeof(isa_br->isa_ranges));
+ if (prop_len <= 0)
+ isa_br->num_isa_ranges = 0;
+ else
+ isa_br->num_isa_ranges =
+ (prop_len / sizeof(struct linux_prom_isa_ranges));
+
+ prop_len = prom_getproperty(isa_br->prom_node,
+ "interrupt-map",
+ (char *) isa_br->isa_intmap,
+ sizeof(isa_br->isa_intmap));
+ if (prop_len <= 0)
+ isa_br->num_isa_intmap = 0;
+ else
+ isa_br->num_isa_intmap =
+ (prop_len / sizeof(struct linux_prom_isa_intmap));
+
+ prop_len = prom_getproperty(isa_br->prom_node,
+ "interrupt-map-mask",
+ (char *) &(isa_br->isa_intmask),
+ sizeof(isa_br->isa_intmask));
+
+ printk("isa%d:", isa_br->index);
+
+ isa_fill_devices(isa_br);
+
+ printk("\n");
+ }
+}
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S
new file mode 100644
index 00000000000..b5e32dfa4fb
--- /dev/null
+++ b/arch/sparc64/kernel/itlb_base.S
@@ -0,0 +1,83 @@
+/* $Id: itlb_base.S,v 1.12 2002/02/09 19:49:30 davem Exp $
+ * itlb_base.S: Front end to ITLB miss replacement strategy.
+ * This is included directly into the trap table.
+ *
+ * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#if PAGE_SHIFT == 13
+/*
+ * To compute vpte offset, we need to do ((addr >> 13) << 3),
+ * which can be optimized to (addr >> 10) if bits 10/11/12 can
+ * be guaranteed to be 0 ... mmu_context.h does guarantee this
+ * by only using 10 bits in the hwcontext value.
+ */
+#define CREATE_VPTE_OFFSET1(r1, r2) \
+ srax r1, 10, r2
+#define CREATE_VPTE_OFFSET2(r1, r2)
+#define CREATE_VPTE_NOP nop
+#else /* PAGE_SHIFT */
+#define CREATE_VPTE_OFFSET1(r1, r2) \
+ srax r1, PAGE_SHIFT, r2
+#define CREATE_VPTE_OFFSET2(r1, r2) \
+ sllx r2, 3, r2
+#define CREATE_VPTE_NOP
+#endif /* PAGE_SHIFT */
+
+
+/* Ways we can get here:
+ *
+ * 1) Nucleus instruction misses from module code.
+ * 2) All user instruction misses.
+ *
+ * All real page faults merge their code paths to the
+ * sparc64_realfault_common label below.
+ */
+
+/* ITLB ** ICACHE line 1: Quick user TLB misses */
+ ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS
+ CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
+ CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
+ ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE
+1: brgez,pn %g5, 3f ! Not valid, branch out
+ sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot
+ andcc %g5, %g4, %g0 ! Executable?
+ be,pn %xcc, 3f ! Nope, branch.
+ nop ! Delay-slot
+2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
+ retry ! Trap return
+3: rdpr %pstate, %g4 ! Move into alternate globals
+
+/* ITLB ** ICACHE line 2: Real faults */
+ wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate
+ rdpr %tpc, %g5 ! And load faulting VA
+ mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB
+sparc64_realfault_common: ! Called by TL0 dtlb_miss too
+ stb %g4, [%g6 + TI_FAULT_CODE]
+ stx %g5, [%g6 + TI_FAULT_ADDR]
+ ba,pt %xcc, etrap ! Save state
+1: rd %pc, %g7 ! ...
+ nop
+
+/* ITLB ** ICACHE line 3: Finish faults + window fixups */
+ call do_sparc64_fault ! Call fault handler
+ add %sp, PTREGS_OFF, %o0! Compute pt_regs arg
+ ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
+ nop
+winfix_trampoline:
+ rdpr %tpc, %g3 ! Prepare winfixup TNPC
+ or %g3, 0x7c, %g3 ! Compute offset to branch
+ wrpr %g3, %tnpc ! Write it into TNPC
+ done ! Do it to it
+
+/* ITLB ** ICACHE line 4: Unused... */
+ nop
+ nop
+ nop
+ nop
+ CREATE_VPTE_NOP
+
+#undef CREATE_VPTE_OFFSET1
+#undef CREATE_VPTE_OFFSET2
+#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
new file mode 100644
index 00000000000..7066d7ba667
--- /dev/null
+++ b/arch/sparc64/kernel/kprobes.c
@@ -0,0 +1,394 @@
+/* arch/sparc64/kernel/kprobes.c
+ *
+ * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#include <asm/kdebug.h>
+#include <asm/signal.h>
+
+/* We do not have hardware single-stepping on sparc64.
+ * So we implement software single-stepping with breakpoint
+ * traps. The top-level scheme is similar to that used
+ * in the x86 kprobes implementation.
+ *
+ * In the kprobe->ainsn.insn[] array we store the original
+ * instruction at index zero and a break instruction at
+ * index one.
+ *
+ * When we hit a kprobe we:
+ * - Run the pre-handler
+ * - Remember "regs->tnpc" and interrupt level stored in
+ * "regs->tstate" so we can restore them later
+ * - Disable PIL interrupts
+ * - Set regs->tpc to point to kprobe->ainsn.insn[0]
+ * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
+ * - Mark that we are actively in a kprobe
+ *
+ * At this point we wait for the second breakpoint at
+ * kprobe->ainsn.insn[1] to hit. When it does we:
+ * - Run the post-handler
+ * - Set regs->tpc to "remembered" regs->tnpc stored above,
+ * restore the PIL interrupt level in "regs->tstate" as well
+ * - Make any adjustments necessary to regs->tnpc in order
+ * to handle relative branches correctly. See below.
+ * - Mark that we are no longer actively in a kprobe.
+ */
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
+void arch_copy_kprobe(struct kprobe *p)
+{
+ p->ainsn.insn[0] = *p->addr;
+ p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
+}
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+}
+
+/* kprobe_status settings */
+#define KPROBE_HIT_ACTIVE 0x00000001
+#define KPROBE_HIT_SS 0x00000002
+
+static struct kprobe *current_kprobe;
+static unsigned long current_kprobe_orig_tnpc;
+static unsigned long current_kprobe_orig_tstate_pil;
+static unsigned int kprobe_status;
+
+static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ current_kprobe_orig_tnpc = regs->tnpc;
+ current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
+ regs->tstate |= TSTATE_PIL;
+
+ /*single step inline, if it a breakpoint instruction*/
+ if (p->opcode == BREAKPOINT_INSTRUCTION) {
+ regs->tpc = (unsigned long) p->addr;
+ regs->tnpc = current_kprobe_orig_tnpc;
+ } else {
+ regs->tpc = (unsigned long) &p->ainsn.insn[0];
+ regs->tnpc = (unsigned long) &p->ainsn.insn[1];
+ }
+}
+
+static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
+{
+ *p->addr = p->opcode;
+ flushi(p->addr);
+
+ regs->tpc = (unsigned long) p->addr;
+ regs->tnpc = current_kprobe_orig_tnpc;
+ regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
+ current_kprobe_orig_tstate_pil);
+}
+
+static int kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ void *addr = (void *) regs->tpc;
+ int ret = 0;
+
+ preempt_disable();
+
+ if (kprobe_running()) {
+ /* We *are* holding lock here, so this is safe.
+ * Disarm the probe we just hit, and ignore it.
+ */
+ p = get_kprobe(addr);
+ if (p) {
+ if (kprobe_status == KPROBE_HIT_SS) {
+ regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
+ current_kprobe_orig_tstate_pil);
+ unlock_kprobes();
+ goto no_kprobe;
+ }
+ disarm_kprobe(p, regs);
+ ret = 1;
+ } else {
+ p = current_kprobe;
+ if (p->break_handler && p->break_handler(p, regs))
+ goto ss_probe;
+ }
+ /* If it's not ours, can't be delete race, (we hold lock). */
+ goto no_kprobe;
+ }
+
+ lock_kprobes();
+ p = get_kprobe(addr);
+ if (!p) {
+ unlock_kprobes();
+ if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+ }
+ /* Not one of ours: let kernel handle it */
+ goto no_kprobe;
+ }
+
+ kprobe_status = KPROBE_HIT_ACTIVE;
+ current_kprobe = p;
+ if (p->pre_handler && p->pre_handler(p, regs))
+ return 1;
+
+ss_probe:
+ prepare_singlestep(p, regs);
+ kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+}
+
+/* If INSN is a relative control transfer instruction,
+ * return the corrected branch destination value.
+ *
+ * The original INSN location was REAL_PC, it actually
+ * executed at PC and produced destination address NPC.
+ */
+static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc,
+ unsigned long pc, unsigned long npc)
+{
+ /* Branch not taken, no mods necessary. */
+ if (npc == pc + 0x4UL)
+ return real_pc + 0x4UL;
+
+ /* The three cases are call, branch w/prediction,
+ * and traditional branch.
+ */
+ if ((insn & 0xc0000000) == 0x40000000 ||
+ (insn & 0xc1c00000) == 0x00400000 ||
+ (insn & 0xc1c00000) == 0x00800000) {
+ /* The instruction did all the work for us
+ * already, just apply the offset to the correct
+ * instruction location.
+ */
+ return (real_pc + (npc - pc));
+ }
+
+ return real_pc + 0x4UL;
+}
+
+/* If INSN is an instruction which writes it's PC location
+ * into a destination register, fix that up.
+ */
+static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc)
+{
+ unsigned long *slot = NULL;
+
+ /* Simplest cast is call, which always uses %o7 */
+ if ((insn & 0xc0000000) == 0x40000000) {
+ slot = &regs->u_regs[UREG_I7];
+ }
+
+ /* Jmpl encodes the register inside of the opcode */
+ if ((insn & 0xc1f80000) == 0x81c00000) {
+ unsigned long rd = ((insn >> 25) & 0x1f);
+
+ if (rd <= 15) {
+ slot = &regs->u_regs[rd];
+ } else {
+ /* Hard case, it goes onto the stack. */
+ flushw_all();
+
+ rd -= 16;
+ slot = (unsigned long *)
+ (regs->u_regs[UREG_FP] + STACK_BIAS);
+ slot += rd;
+ }
+ }
+ if (slot != NULL)
+ *slot = real_pc;
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the breakpoint
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap.
+ */
+static void resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+ u32 insn = p->ainsn.insn[0];
+
+ regs->tpc = current_kprobe_orig_tnpc;
+ regs->tnpc = relbranch_fixup(insn,
+ (unsigned long) p->addr,
+ (unsigned long) &p->ainsn.insn[0],
+ regs->tnpc);
+ retpc_fixup(regs, insn, (unsigned long) p->addr);
+
+ regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
+ current_kprobe_orig_tstate_pil);
+}
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+ if (!kprobe_running())
+ return 0;
+
+ if (current_kprobe->post_handler)
+ current_kprobe->post_handler(current_kprobe, regs, 0);
+
+ resume_execution(current_kprobe, regs);
+
+ unlock_kprobes();
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+/* Interrupts disabled, kprobe_lock held. */
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ if (current_kprobe->fault_handler
+ && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ return 1;
+
+ if (kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(current_kprobe, regs);
+
+ unlock_kprobes();
+ preempt_enable_no_resched();
+ }
+ return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ struct die_args *args = (struct die_args *)data;
+ switch (val) {
+ case DIE_DEBUG:
+ if (kprobe_handler(args->regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_DEBUG_2:
+ if (post_kprobe_handler(args->regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_GPF:
+ if (kprobe_running() &&
+ kprobe_fault_handler(args->regs, args->trapnr))
+ return NOTIFY_STOP;
+ break;
+ case DIE_PAGE_FAULT:
+ if (kprobe_running() &&
+ kprobe_fault_handler(args->regs, args->trapnr))
+ return NOTIFY_STOP;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs)
+{
+ BUG_ON(trap_level != 0x170 && trap_level != 0x171);
+
+ if (user_mode(regs)) {
+ local_irq_enable();
+ bad_trap(regs, trap_level);
+ return;
+ }
+
+ /* trap_level == 0x170 --> ta 0x70
+ * trap_level == 0x171 --> ta 0x71
+ */
+ if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
+ (trap_level == 0x170) ? "debug" : "debug_2",
+ regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
+ bad_trap(regs, trap_level);
+}
+
+/* Jprobes support. */
+static struct pt_regs jprobe_saved_regs;
+static struct pt_regs *jprobe_saved_regs_location;
+static struct sparc_stackf jprobe_saved_stack;
+
+int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+
+ jprobe_saved_regs_location = regs;
+ memcpy(&jprobe_saved_regs, regs, sizeof(*regs));
+
+ /* Save a whole stack frame, this gets arguments
+ * pushed onto the stack after using up all the
+ * arg registers.
+ */
+ memcpy(&jprobe_saved_stack,
+ (char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
+ sizeof(jprobe_saved_stack));
+
+ regs->tpc = (unsigned long) jp->entry;
+ regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
+ regs->tstate |= TSTATE_PIL;
+
+ return 1;
+}
+
+void jprobe_return(void)
+{
+ preempt_enable_no_resched();
+ __asm__ __volatile__(
+ ".globl jprobe_return_trap_instruction\n"
+"jprobe_return_trap_instruction:\n\t"
+ "ta 0x70");
+}
+
+extern void jprobe_return_trap_instruction(void);
+
+extern void __show_regs(struct pt_regs * regs);
+
+int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ u32 *addr = (u32 *) regs->tpc;
+
+ if (addr == (u32 *) jprobe_return_trap_instruction) {
+ if (jprobe_saved_regs_location != regs) {
+ printk("JPROBE: Current regs (%p) does not match "
+ "saved regs (%p).\n",
+ regs, jprobe_saved_regs_location);
+ printk("JPROBE: Saved registers\n");
+ __show_regs(jprobe_saved_regs_location);
+ printk("JPROBE: Current registers\n");
+ __show_regs(regs);
+ BUG();
+ }
+ /* Restore old register state. Do pt_regs
+ * first so that UREG_FP is the original one for
+ * the stack frame restore.
+ */
+ memcpy(regs, &jprobe_saved_regs, sizeof(*regs));
+
+ memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
+ &jprobe_saved_stack,
+ sizeof(jprobe_saved_stack));
+
+ return 1;
+ }
+ return 0;
+}
diff --git a/arch/sparc64/kernel/module.c b/arch/sparc64/kernel/module.c
new file mode 100644
index 00000000000..6c83e372f75
--- /dev/null
+++ b/arch/sparc64/kernel/module.c
@@ -0,0 +1,209 @@
+/* Kernel module help for sparc64.
+ *
+ * Copyright (C) 2001 Rusty Russell.
+ * Copyright (C) 2002 David S. Miller.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+#include <asm/processor.h>
+#include <asm/spitfire.h>
+
+static void *module_map(unsigned long size)
+{
+ struct vm_struct *area;
+
+ size = PAGE_ALIGN(size);
+ if (!size || size > MODULES_LEN)
+ return NULL;
+
+ area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
+ if (!area)
+ return NULL;
+
+ return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
+}
+
+void *module_alloc(unsigned long size)
+{
+ void *ret;
+
+ /* We handle the zero case fine, unlike vmalloc */
+ if (size == 0)
+ return NULL;
+
+ ret = module_map(size);
+ if (!ret)
+ ret = ERR_PTR(-ENOMEM);
+ else
+ memset(ret, 0, size);
+
+ return ret;
+}
+
+/* Free memory returned from module_core_alloc/module_init_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+ /* FIXME: If module_region == mod->init_region, trim exception
+ table entries. */
+}
+
+/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ unsigned int symidx;
+ Elf64_Sym *sym;
+ const char *strtab;
+ int i;
+
+ for (symidx = 0; sechdrs[symidx].sh_type != SHT_SYMTAB; symidx++) {
+ if (symidx == hdr->e_shnum-1) {
+ printk("%s: no symtab found.\n", mod->name);
+ return -ENOEXEC;
+ }
+ }
+ sym = (Elf64_Sym *)sechdrs[symidx].sh_addr;
+ strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
+
+ for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
+ if (sym[i].st_shndx == SHN_UNDEF &&
+ ELF64_ST_TYPE(sym[i].st_info) == STT_REGISTER)
+ sym[i].st_shndx = SHN_ABS;
+ }
+ return 0;
+}
+
+int apply_relocate(Elf64_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n",
+ me->name);
+ return -ENOEXEC;
+}
+
+int apply_relocate_add(Elf64_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf64_Sym *sym;
+ u8 *location;
+ u32 *loc32;
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ Elf64_Addr v;
+
+ /* This is where to make the change */
+ location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ loc32 = (u32 *) location;
+
+ BUG_ON(((u64)location >> (u64)32) != (u64)0);
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ + ELF64_R_SYM(rel[i].r_info);
+ v = sym->st_value + rel[i].r_addend;
+
+ switch (ELF64_R_TYPE(rel[i].r_info) & 0xff) {
+ case R_SPARC_64:
+ location[0] = v >> 56;
+ location[1] = v >> 48;
+ location[2] = v >> 40;
+ location[3] = v >> 32;
+ location[4] = v >> 24;
+ location[5] = v >> 16;
+ location[6] = v >> 8;
+ location[7] = v >> 0;
+ break;
+
+ case R_SPARC_32:
+ location[0] = v >> 24;
+ location[1] = v >> 16;
+ location[2] = v >> 8;
+ location[3] = v >> 0;
+ break;
+
+ case R_SPARC_WDISP30:
+ v -= (Elf64_Addr) location;
+ *loc32 = (*loc32 & ~0x3fffffff) |
+ ((v >> 2) & 0x3fffffff);
+ break;
+
+ case R_SPARC_WDISP22:
+ v -= (Elf64_Addr) location;
+ *loc32 = (*loc32 & ~0x3fffff) |
+ ((v >> 2) & 0x3fffff);
+ break;
+
+ case R_SPARC_WDISP19:
+ v -= (Elf64_Addr) location;
+ *loc32 = (*loc32 & ~0x7ffff) |
+ ((v >> 2) & 0x7ffff);
+ break;
+
+ case R_SPARC_LO10:
+ *loc32 = (*loc32 & ~0x3ff) | (v & 0x3ff);
+ break;
+
+ case R_SPARC_HI22:
+ *loc32 = (*loc32 & ~0x3fffff) |
+ ((v >> 10) & 0x3fffff);
+ break;
+
+ case R_SPARC_OLO10:
+ *loc32 = (*loc32 & ~0x1fff) |
+ (((v & 0x3ff) +
+ (ELF64_R_TYPE(rel[i].r_info) >> 8))
+ & 0x1fff);
+ break;
+
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %x\n",
+ me->name,
+ (int) (ELF64_R_TYPE(rel[i].r_info) & 0xff));
+ return -ENOEXEC;
+ };
+ }
+ return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ /* Cheetah's I-cache is fully coherent. */
+ if (tlb_type == spitfire) {
+ unsigned long va;
+
+ flushw_all();
+ for (va = 0; va < (PAGE_SIZE << 1); va += 32)
+ spitfire_put_icache_tag(va, 0x0);
+ __asm__ __volatile__("flush %g6");
+ }
+
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
new file mode 100644
index 00000000000..bba140d98b1
--- /dev/null
+++ b/arch/sparc64/kernel/pci.c
@@ -0,0 +1,805 @@
+/* $Id: pci.c,v 1.39 2002/01/05 01:13:43 davem Exp $
+ * pci.c: UltraSparc PCI controller support.
+ *
+ * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pbm.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/ebus.h>
+#include <asm/isa.h>
+
+unsigned long pci_memspace_mask = 0xffffffffUL;
+
+#ifndef CONFIG_PCI
+/* A "nop" PCI implementation. */
+asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len,
+ unsigned char *buf)
+{
+ return 0;
+}
+asmlinkage int sys_pciconfig_write(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len,
+ unsigned char *buf)
+{
+ return 0;
+}
+#else
+
+/* List of all PCI controllers found in the system. */
+struct pci_controller_info *pci_controller_root = NULL;
+
+/* Each PCI controller found gets a unique index. */
+int pci_num_controllers = 0;
+
+/* At boot time the user can give the kernel a command
+ * line option which controls if and how PCI devices
+ * are reordered at PCI bus probing time.
+ */
+int pci_device_reorder = 0;
+
+volatile int pci_poke_in_progress;
+volatile int pci_poke_cpu = -1;
+volatile int pci_poke_faulted;
+
+static DEFINE_SPINLOCK(pci_poke_lock);
+
+void pci_config_read8(u8 *addr, u8 *ret)
+{
+ unsigned long flags;
+ u8 byte;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "lduba [%1] %2, %0\n\t"
+ "membar #Sync"
+ : "=r" (byte)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ if (!pci_poke_faulted)
+ *ret = byte;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_read16(u16 *addr, u16 *ret)
+{
+ unsigned long flags;
+ u16 word;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "lduha [%1] %2, %0\n\t"
+ "membar #Sync"
+ : "=r" (word)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ if (!pci_poke_faulted)
+ *ret = word;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_read32(u32 *addr, u32 *ret)
+{
+ unsigned long flags;
+ u32 dword;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "lduwa [%1] %2, %0\n\t"
+ "membar #Sync"
+ : "=r" (dword)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ if (!pci_poke_faulted)
+ *ret = dword;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_write8(u8 *addr, u8 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "stba %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_write16(u16 *addr, u16 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "stha %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+void pci_config_write32(u32 *addr, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_poke_lock, flags);
+ pci_poke_cpu = smp_processor_id();
+ pci_poke_in_progress = 1;
+ pci_poke_faulted = 0;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "stwa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
+ : "memory");
+ pci_poke_in_progress = 0;
+ pci_poke_cpu = -1;
+ spin_unlock_irqrestore(&pci_poke_lock, flags);
+}
+
+/* Probe for all PCI controllers in the system. */
+extern void sabre_init(int, char *);
+extern void psycho_init(int, char *);
+extern void schizo_init(int, char *);
+extern void schizo_plus_init(int, char *);
+extern void tomatillo_init(int, char *);
+
+static struct {
+ char *model_name;
+ void (*init)(int, char *);
+} pci_controller_table[] __initdata = {
+ { "SUNW,sabre", sabre_init },
+ { "pci108e,a000", sabre_init },
+ { "pci108e,a001", sabre_init },
+ { "SUNW,psycho", psycho_init },
+ { "pci108e,8000", psycho_init },
+ { "SUNW,schizo", schizo_init },
+ { "pci108e,8001", schizo_init },
+ { "SUNW,schizo+", schizo_plus_init },
+ { "pci108e,8002", schizo_plus_init },
+ { "SUNW,tomatillo", tomatillo_init },
+ { "pci108e,a801", tomatillo_init },
+};
+#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
+ sizeof(pci_controller_table[0]))
+
+static int __init pci_controller_init(char *model_name, int namelen, int node)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
+ if (!strncmp(model_name,
+ pci_controller_table[i].model_name,
+ namelen)) {
+ pci_controller_table[i].init(node, model_name);
+ return 1;
+ }
+ }
+ printk("PCI: Warning unknown controller, model name [%s]\n",
+ model_name);
+ printk("PCI: Ignoring controller...\n");
+
+ return 0;
+}
+
+static int __init pci_is_controller(char *model_name, int namelen, int node)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_CONTROLLER_TYPES; i++) {
+ if (!strncmp(model_name,
+ pci_controller_table[i].model_name,
+ namelen)) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int __init pci_controller_scan(int (*handler)(char *, int, int))
+{
+ char namebuf[64];
+ int node;
+ int count = 0;
+
+ node = prom_getchild(prom_root_node);
+ while ((node = prom_searchsiblings(node, "pci")) != 0) {
+ int len;
+
+ if ((len = prom_getproperty(node, "model", namebuf, sizeof(namebuf))) > 0 ||
+ (len = prom_getproperty(node, "compatible", namebuf, sizeof(namebuf))) > 0) {
+ int item_len = 0;
+
+ /* Our value may be a multi-valued string in the
+ * case of some compatible properties. For sanity,
+ * only try the first one. */
+
+ while (namebuf[item_len] && len) {
+ len--;
+ item_len++;
+ }
+
+ if (handler(namebuf, item_len, node))
+ count++;
+ }
+
+ node = prom_getsibling(node);
+ if (!node)
+ break;
+ }
+
+ return count;
+}
+
+
+/* Is there some PCI controller in the system? */
+int __init pcic_present(void)
+{
+ return pci_controller_scan(pci_is_controller);
+}
+
+/* Find each controller in the system, attach and initialize
+ * software state structure for each and link into the
+ * pci_controller_root. Setup the controller enough such
+ * that bus scanning can be done.
+ */
+static void __init pci_controller_probe(void)
+{
+ printk("PCI: Probing for controllers.\n");
+
+ pci_controller_scan(pci_controller_init);
+}
+
+static void __init pci_scan_each_controller_bus(void)
+{
+ struct pci_controller_info *p;
+
+ for (p = pci_controller_root; p; p = p->next)
+ p->scan_bus(p);
+}
+
+/* Reorder the pci_dev chain, so that onboard devices come first
+ * and then come the pluggable cards.
+ */
+static void __init pci_reorder_devs(void)
+{
+ struct list_head *pci_onboard = &pci_devices;
+ struct list_head *walk = pci_onboard->next;
+
+ while (walk != pci_onboard) {
+ struct pci_dev *pdev = pci_dev_g(walk);
+ struct list_head *walk_next = walk->next;
+
+ if (pdev->irq && (__irq_ino(pdev->irq) & 0x20)) {
+ list_del(walk);
+ list_add(walk, pci_onboard);
+ }
+
+ walk = walk_next;
+ }
+}
+
+extern void clock_probe(void);
+extern void power_init(void);
+
+static int __init pcibios_init(void)
+{
+ pci_controller_probe();
+ if (pci_controller_root == NULL)
+ return 0;
+
+ pci_scan_each_controller_bus();
+
+ if (pci_device_reorder)
+ pci_reorder_devs();
+
+ isa_init();
+ ebus_init();
+ clock_probe();
+ power_init();
+
+ return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+void pcibios_fixup_bus(struct pci_bus *pbus)
+{
+ struct pci_pbm_info *pbm = pbus->sysdata;
+
+ /* Generic PCI bus probing sets these to point at
+ * &io{port,mem}_resouce which is wrong for us.
+ */
+ pbus->resource[0] = &pbm->io_space;
+ pbus->resource[1] = &pbm->mem_space;
+}
+
+int pci_claim_resource(struct pci_dev *pdev, int resource)
+{
+ struct pci_pbm_info *pbm = pdev->bus->sysdata;
+ struct resource *res = &pdev->resource[resource];
+ struct resource *root;
+
+ if (!pbm)
+ return -EINVAL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &pbm->io_space;
+ else
+ root = &pbm->mem_space;
+
+ pbm->parent->resource_adjust(pdev, res, root);
+
+ return request_resource(root, res);
+}
+
+/*
+ * Given the PCI bus a device resides on, try to
+ * find an acceptable resource allocation for a
+ * specific device resource..
+ */
+static int pci_assign_bus_resource(const struct pci_bus *bus,
+ struct pci_dev *dev,
+ struct resource *res,
+ unsigned long size,
+ unsigned long min,
+ int resno)
+{
+ unsigned int type_mask;
+ int i;
+
+ type_mask = IORESOURCE_IO | IORESOURCE_MEM;
+ for (i = 0 ; i < 4; i++) {
+ struct resource *r = bus->resource[i];
+ if (!r)
+ continue;
+
+ /* type_mask must match */
+ if ((res->flags ^ r->flags) & type_mask)
+ continue;
+
+ /* Ok, try it out.. */
+ if (allocate_resource(r, res, size, min, -1, size, NULL, NULL) < 0)
+ continue;
+
+ /* PCI config space updated by caller. */
+ return 0;
+ }
+ return -EBUSY;
+}
+
+int pci_assign_resource(struct pci_dev *pdev, int resource)
+{
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ struct pci_pbm_info *pbm = pcp->pbm;
+ struct resource *res = &pdev->resource[resource];
+ unsigned long min, size;
+ int err;
+
+ if (res->flags & IORESOURCE_IO)
+ min = pbm->io_space.start + 0x400UL;
+ else
+ min = pbm->mem_space.start;
+
+ size = res->end - res->start + 1;
+
+ err = pci_assign_bus_resource(pdev->bus, pdev, res, size, min, resource);
+
+ if (err < 0) {
+ printk("PCI: Failed to allocate resource %d for %s\n",
+ resource, pci_name(pdev));
+ } else {
+ /* Update PCI config space. */
+ pbm->parent->base_address_update(pdev, resource);
+ }
+
+ return err;
+}
+
+/* Sort resources by alignment */
+void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r;
+ struct resource_list *list, *tmp;
+ unsigned long r_align;
+
+ r = &dev->resource[i];
+ r_align = r->end - r->start;
+
+ if (!(r->flags) || r->parent)
+ continue;
+ if (!r_align) {
+ printk(KERN_WARNING "PCI: Ignore bogus resource %d "
+ "[%lx:%lx] of %s\n",
+ i, r->start, r->end, pci_name(dev));
+ continue;
+ }
+ r_align = (i < PCI_BRIDGE_RESOURCES) ? r_align + 1 : r->start;
+ for (list = head; ; list = list->next) {
+ unsigned long align = 0;
+ struct resource_list *ln = list->next;
+ int idx;
+
+ if (ln) {
+ idx = ln->res - &ln->dev->resource[0];
+ align = (idx < PCI_BRIDGE_RESOURCES) ?
+ ln->res->end - ln->res->start + 1 :
+ ln->res->start;
+ }
+ if (r_align > align) {
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ panic("pdev_sort_resources(): "
+ "kmalloc() failed!\n");
+ tmp->next = ln;
+ tmp->res = r;
+ tmp->dev = dev;
+ list->next = tmp;
+ break;
+ }
+ }
+ }
+}
+
+void pcibios_update_irq(struct pci_dev *pdev, int irq)
+{
+}
+
+void pcibios_align_resource(void *data, struct resource *res,
+ unsigned long size, unsigned long align)
+{
+}
+
+int pcibios_enable_device(struct pci_dev *pdev, int mask)
+{
+ return 0;
+}
+
+void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region,
+ struct resource *res)
+{
+ struct pci_pbm_info *pbm = pdev->bus->sysdata;
+ struct resource zero_res, *root;
+
+ zero_res.start = 0;
+ zero_res.end = 0;
+ zero_res.flags = res->flags;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &pbm->io_space;
+ else
+ root = &pbm->mem_space;
+
+ pbm->parent->resource_adjust(pdev, &zero_res, root);
+
+ region->start = res->start - zero_res.start;
+ region->end = res->end - zero_res.start;
+}
+
+void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
+ struct pci_bus_region *region)
+{
+ struct pci_pbm_info *pbm = pdev->bus->sysdata;
+ struct resource *root;
+
+ res->start = region->start;
+ res->end = region->end;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &pbm->io_space;
+ else
+ root = &pbm->mem_space;
+
+ pbm->parent->resource_adjust(pdev, res, root);
+}
+
+char * __init pcibios_setup(char *str)
+{
+ if (!strcmp(str, "onboardfirst")) {
+ pci_device_reorder = 1;
+ return NULL;
+ }
+ if (!strcmp(str, "noreorder")) {
+ pci_device_reorder = 0;
+ return NULL;
+ }
+ return str;
+}
+
+/* Platform support for /proc/bus/pci/X/Y mmap()s. */
+
+/* If the user uses a host-bridge as the PCI device, he may use
+ * this to perform a raw mmap() of the I/O or MEM space behind
+ * that controller.
+ *
+ * This can be useful for execution of x86 PCI bios initialization code
+ * on a PCI card, like the xfree86 int10 stuff does.
+ */
+static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ struct pci_pbm_info *pbm;
+ struct pci_controller_info *p;
+ unsigned long space_size, user_offset, user_size;
+
+ if (!pcp)
+ return -ENXIO;
+ pbm = pcp->pbm;
+ if (!pbm)
+ return -ENXIO;
+
+ p = pbm->parent;
+ if (p->pbms_same_domain) {
+ unsigned long lowest, highest;
+
+ lowest = ~0UL; highest = 0UL;
+ if (mmap_state == pci_mmap_io) {
+ if (p->pbm_A.io_space.flags) {
+ lowest = p->pbm_A.io_space.start;
+ highest = p->pbm_A.io_space.end + 1;
+ }
+ if (p->pbm_B.io_space.flags) {
+ if (lowest > p->pbm_B.io_space.start)
+ lowest = p->pbm_B.io_space.start;
+ if (highest < p->pbm_B.io_space.end + 1)
+ highest = p->pbm_B.io_space.end + 1;
+ }
+ space_size = highest - lowest;
+ } else {
+ if (p->pbm_A.mem_space.flags) {
+ lowest = p->pbm_A.mem_space.start;
+ highest = p->pbm_A.mem_space.end + 1;
+ }
+ if (p->pbm_B.mem_space.flags) {
+ if (lowest > p->pbm_B.mem_space.start)
+ lowest = p->pbm_B.mem_space.start;
+ if (highest < p->pbm_B.mem_space.end + 1)
+ highest = p->pbm_B.mem_space.end + 1;
+ }
+ space_size = highest - lowest;
+ }
+ } else {
+ if (mmap_state == pci_mmap_io) {
+ space_size = (pbm->io_space.end -
+ pbm->io_space.start) + 1;
+ } else {
+ space_size = (pbm->mem_space.end -
+ pbm->mem_space.start) + 1;
+ }
+ }
+
+ /* Make sure the request is in range. */
+ user_offset = vma->vm_pgoff << PAGE_SHIFT;
+ user_size = vma->vm_end - vma->vm_start;
+
+ if (user_offset >= space_size ||
+ (user_offset + user_size) > space_size)
+ return -EINVAL;
+
+ if (p->pbms_same_domain) {
+ unsigned long lowest = ~0UL;
+
+ if (mmap_state == pci_mmap_io) {
+ if (p->pbm_A.io_space.flags)
+ lowest = p->pbm_A.io_space.start;
+ if (p->pbm_B.io_space.flags &&
+ lowest > p->pbm_B.io_space.start)
+ lowest = p->pbm_B.io_space.start;
+ } else {
+ if (p->pbm_A.mem_space.flags)
+ lowest = p->pbm_A.mem_space.start;
+ if (p->pbm_B.mem_space.flags &&
+ lowest > p->pbm_B.mem_space.start)
+ lowest = p->pbm_B.mem_space.start;
+ }
+ vma->vm_pgoff = (lowest + user_offset) >> PAGE_SHIFT;
+ } else {
+ if (mmap_state == pci_mmap_io) {
+ vma->vm_pgoff = (pbm->io_space.start +
+ user_offset) >> PAGE_SHIFT;
+ } else {
+ vma->vm_pgoff = (pbm->mem_space.start +
+ user_offset) >> PAGE_SHIFT;
+ }
+ }
+
+ return 0;
+}
+
+/* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding
+ * to the 32-bit pci bus offset for DEV requested by the user.
+ *
+ * Basically, the user finds the base address for his device which he wishes
+ * to mmap. They read the 32-bit value from the config space base register,
+ * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
+ * offset parameter of mmap on /proc/bus/pci/XXX for that device.
+ *
+ * Returns negative error code on failure, zero on success.
+ */
+static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long user32 = user_offset & pci_memspace_mask;
+ unsigned long largest_base, this_base, addr32;
+ int i;
+
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
+ return __pci_mmap_make_offset_bus(dev, vma, mmap_state);
+
+ /* Figure out which base address this is for. */
+ largest_base = 0UL;
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ struct resource *rp = &dev->resource[i];
+
+ /* Active? */
+ if (!rp->flags)
+ continue;
+
+ /* Same type? */
+ if (i == PCI_ROM_RESOURCE) {
+ if (mmap_state != pci_mmap_mem)
+ continue;
+ } else {
+ if ((mmap_state == pci_mmap_io &&
+ (rp->flags & IORESOURCE_IO) == 0) ||
+ (mmap_state == pci_mmap_mem &&
+ (rp->flags & IORESOURCE_MEM) == 0))
+ continue;
+ }
+
+ this_base = rp->start;
+
+ addr32 = (this_base & PAGE_MASK) & pci_memspace_mask;
+
+ if (mmap_state == pci_mmap_io)
+ addr32 &= 0xffffff;
+
+ if (addr32 <= user32 && this_base > largest_base)
+ largest_base = this_base;
+ }
+
+ if (largest_base == 0UL)
+ return -EINVAL;
+
+ /* Now construct the final physical address. */
+ if (mmap_state == pci_mmap_io)
+ vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT);
+ else
+ vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT);
+
+ return 0;
+}
+
+/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
+ * mapping.
+ */
+static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ vma->vm_flags |= (VM_IO | VM_RESERVED);
+}
+
+/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
+ * device mapping.
+ */
+static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state)
+{
+ /* Our io_remap_page_range/io_remap_pfn_range takes care of this,
+ do nothing. */
+}
+
+/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
+ * for this architecture. The region in the process to map is described by vm_start
+ * and vm_end members of VMA, the base physical address is found in vm_pgoff.
+ * The pci device structure is provided so that architectures may make mapping
+ * decisions on a per-device or per-bus basis.
+ *
+ * Returns a negative error code on failure, zero on success.
+ */
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state,
+ int write_combine)
+{
+ int ret;
+
+ ret = __pci_mmap_make_offset(dev, vma, mmap_state);
+ if (ret < 0)
+ return ret;
+
+ __pci_mmap_set_flags(dev, vma, mmap_state);
+ __pci_mmap_set_pgprot(dev, vma, mmap_state);
+
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+
+ vma->vm_flags |= VM_IO;
+ return 0;
+}
+
+/* Return the domain nuber for this pci bus */
+
+int pci_domain_nr(struct pci_bus *pbus)
+{
+ struct pci_pbm_info *pbm = pbus->sysdata;
+ int ret;
+
+ if (pbm == NULL || pbm->parent == NULL) {
+ ret = -ENXIO;
+ } else {
+ struct pci_controller_info *p = pbm->parent;
+
+ ret = p->index;
+ if (p->pbms_same_domain == 0)
+ ret = ((ret << 1) +
+ ((pbm == &pbm->parent->pbm_B) ? 1 : 0));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(pci_domain_nr);
+
+int pcibios_prep_mwi(struct pci_dev *dev)
+{
+ /* We set correct PCI_CACHE_LINE_SIZE register values for every
+ * device probed on this platform. So there is nothing to check
+ * and this always succeeds.
+ */
+ return 0;
+}
+
+#endif /* !(CONFIG_PCI) */
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
new file mode 100644
index 00000000000..58310aacea2
--- /dev/null
+++ b/arch/sparc64/kernel/pci_common.c
@@ -0,0 +1,1040 @@
+/* $Id: pci_common.c,v 1.29 2002/02/01 00:56:03 davem Exp $
+ * pci_common.c: PCI controller common support.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <asm/pbm.h>
+
+/* Fix self device of BUS and hook it into BUS->self.
+ * The pci_scan_bus does not do this for the host bridge.
+ */
+void __init pci_fixup_host_bridge_self(struct pci_bus *pbus)
+{
+ struct pci_dev *pdev;
+
+ list_for_each_entry(pdev, &pbus->devices, bus_list) {
+ if (pdev->class >> 8 == PCI_CLASS_BRIDGE_HOST) {
+ pbus->self = pdev;
+ return;
+ }
+ }
+
+ prom_printf("PCI: Critical error, cannot find host bridge PDEV.\n");
+ prom_halt();
+}
+
+/* Find the OBP PROM device tree node for a PCI device.
+ * Return zero if not found.
+ */
+static int __init find_device_prom_node(struct pci_pbm_info *pbm,
+ struct pci_dev *pdev,
+ int bus_prom_node,
+ struct linux_prom_pci_registers *pregs,
+ int *nregs)
+{
+ int node;
+
+ /*
+ * Return the PBM's PROM node in case we are it's PCI device,
+ * as the PBM's reg property is different to standard PCI reg
+ * properties. We would delete this device entry otherwise,
+ * which confuses XFree86's device probing...
+ */
+ if ((pdev->bus->number == pbm->pci_bus->number) && (pdev->devfn == 0) &&
+ (pdev->vendor == PCI_VENDOR_ID_SUN) &&
+ (pdev->device == PCI_DEVICE_ID_SUN_PBM ||
+ pdev->device == PCI_DEVICE_ID_SUN_SCHIZO ||
+ pdev->device == PCI_DEVICE_ID_SUN_TOMATILLO ||
+ pdev->device == PCI_DEVICE_ID_SUN_SABRE ||
+ pdev->device == PCI_DEVICE_ID_SUN_HUMMINGBIRD)) {
+ *nregs = 0;
+ return bus_prom_node;
+ }
+
+ node = prom_getchild(bus_prom_node);
+ while (node != 0) {
+ int err = prom_getproperty(node, "reg",
+ (char *)pregs,
+ sizeof(*pregs) * PROMREG_MAX);
+ if (err == 0 || err == -1)
+ goto do_next_sibling;
+ if (((pregs[0].phys_hi >> 8) & 0xff) == pdev->devfn) {
+ *nregs = err / sizeof(*pregs);
+ return node;
+ }
+
+ do_next_sibling:
+ node = prom_getsibling(node);
+ }
+ return 0;
+}
+
+/* Older versions of OBP on PCI systems encode 64-bit MEM
+ * space assignments incorrectly, this fixes them up. We also
+ * take the opportunity here to hide other kinds of bogus
+ * assignments.
+ */
+static void __init fixup_obp_assignments(struct pci_dev *pdev,
+ struct pcidev_cookie *pcp)
+{
+ int i;
+
+ if (pdev->vendor == PCI_VENDOR_ID_AL &&
+ (pdev->device == PCI_DEVICE_ID_AL_M7101 ||
+ pdev->device == PCI_DEVICE_ID_AL_M1533)) {
+ int i;
+
+ /* Zap all of the normal resources, they are
+ * meaningless and generate bogus resource collision
+ * messages. This is OpenBoot's ill-fated attempt to
+ * represent the implicit resources that these devices
+ * have.
+ */
+ pcp->num_prom_assignments = 0;
+ for (i = 0; i < 6; i++) {
+ pdev->resource[i].start =
+ pdev->resource[i].end =
+ pdev->resource[i].flags = 0;
+ }
+ pdev->resource[PCI_ROM_RESOURCE].start =
+ pdev->resource[PCI_ROM_RESOURCE].end =
+ pdev->resource[PCI_ROM_RESOURCE].flags = 0;
+ return;
+ }
+
+ for (i = 0; i < pcp->num_prom_assignments; i++) {
+ struct linux_prom_pci_registers *ap;
+ int space;
+
+ ap = &pcp->prom_assignments[i];
+ space = ap->phys_hi >> 24;
+ if ((space & 0x3) == 2 &&
+ (space & 0x4) != 0) {
+ ap->phys_hi &= ~(0x7 << 24);
+ ap->phys_hi |= 0x3 << 24;
+ }
+ }
+}
+
+/* Fill in the PCI device cookie sysdata for the given
+ * PCI device. This cookie is the means by which one
+ * can get to OBP and PCI controller specific information
+ * for a PCI device.
+ */
+static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm,
+ struct pci_dev *pdev,
+ int bus_prom_node)
+{
+ struct linux_prom_pci_registers pregs[PROMREG_MAX];
+ struct pcidev_cookie *pcp;
+ int device_prom_node, nregs, err;
+
+ device_prom_node = find_device_prom_node(pbm, pdev, bus_prom_node,
+ pregs, &nregs);
+ if (device_prom_node == 0) {
+ /* If it is not in the OBP device tree then
+ * there must be a damn good reason for it.
+ *
+ * So what we do is delete the device from the
+ * PCI device tree completely. This scenario
+ * is seen, for example, on CP1500 for the
+ * second EBUS/HappyMeal pair if the external
+ * connector for it is not present.
+ */
+ pci_remove_bus_device(pdev);
+ return;
+ }
+
+ pcp = kmalloc(sizeof(*pcp), GFP_ATOMIC);
+ if (pcp == NULL) {
+ prom_printf("PCI_COOKIE: Fatal malloc error, aborting...\n");
+ prom_halt();
+ }
+ pcp->pbm = pbm;
+ pcp->prom_node = device_prom_node;
+ memcpy(pcp->prom_regs, pregs, sizeof(pcp->prom_regs));
+ pcp->num_prom_regs = nregs;
+ err = prom_getproperty(device_prom_node, "name",
+ pcp->prom_name, sizeof(pcp->prom_name));
+ if (err > 0)
+ pcp->prom_name[err] = 0;
+ else
+ pcp->prom_name[0] = 0;
+
+ err = prom_getproperty(device_prom_node,
+ "assigned-addresses",
+ (char *)pcp->prom_assignments,
+ sizeof(pcp->prom_assignments));
+ if (err == 0 || err == -1)
+ pcp->num_prom_assignments = 0;
+ else
+ pcp->num_prom_assignments =
+ (err / sizeof(pcp->prom_assignments[0]));
+
+ if (strcmp(pcp->prom_name, "ebus") == 0) {
+ struct linux_prom_ebus_ranges erng[PROM_PCIRNG_MAX];
+ int iter;
+
+ /* EBUS is special... */
+ err = prom_getproperty(device_prom_node, "ranges",
+ (char *)&erng[0], sizeof(erng));
+ if (err == 0 || err == -1) {
+ prom_printf("EBUS: Fatal error, no range property\n");
+ prom_halt();
+ }
+ err = (err / sizeof(erng[0]));
+ for(iter = 0; iter < err; iter++) {
+ struct linux_prom_ebus_ranges *ep = &erng[iter];
+ struct linux_prom_pci_registers *ap;
+
+ ap = &pcp->prom_assignments[iter];
+
+ ap->phys_hi = ep->parent_phys_hi;
+ ap->phys_mid = ep->parent_phys_mid;
+ ap->phys_lo = ep->parent_phys_lo;
+ ap->size_hi = 0;
+ ap->size_lo = ep->size;
+ }
+ pcp->num_prom_assignments = err;
+ }
+
+ fixup_obp_assignments(pdev, pcp);
+
+ pdev->sysdata = pcp;
+}
+
+void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus,
+ struct pci_pbm_info *pbm,
+ int prom_node)
+{
+ struct pci_dev *pdev, *pdev_next;
+ struct pci_bus *this_pbus, *pbus_next;
+
+ /* This must be _safe because the cookie fillin
+ routine can delete devices from the tree. */
+ list_for_each_entry_safe(pdev, pdev_next, &pbus->devices, bus_list)
+ pdev_cookie_fillin(pbm, pdev, prom_node);
+
+ list_for_each_entry_safe(this_pbus, pbus_next, &pbus->children, node) {
+ struct pcidev_cookie *pcp = this_pbus->self->sysdata;
+
+ pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node);
+ }
+}
+
+static void __init bad_assignment(struct pci_dev *pdev,
+ struct linux_prom_pci_registers *ap,
+ struct resource *res,
+ int do_prom_halt)
+{
+ prom_printf("PCI: Bogus PROM assignment. BUS[%02x] DEVFN[%x]\n",
+ pdev->bus->number, pdev->devfn);
+ if (ap)
+ prom_printf("PCI: phys[%08x:%08x:%08x] size[%08x:%08x]\n",
+ ap->phys_hi, ap->phys_mid, ap->phys_lo,
+ ap->size_hi, ap->size_lo);
+ if (res)
+ prom_printf("PCI: RES[%016lx-->%016lx:(%lx)]\n",
+ res->start, res->end, res->flags);
+ prom_printf("Please email this information to davem@redhat.com\n");
+ if (do_prom_halt)
+ prom_halt();
+}
+
+static struct resource *
+__init get_root_resource(struct linux_prom_pci_registers *ap,
+ struct pci_pbm_info *pbm)
+{
+ int space = (ap->phys_hi >> 24) & 3;
+
+ switch (space) {
+ case 0:
+ /* Configuration space, silently ignore it. */
+ return NULL;
+
+ case 1:
+ /* 16-bit IO space */
+ return &pbm->io_space;
+
+ case 2:
+ /* 32-bit MEM space */
+ return &pbm->mem_space;
+
+ case 3:
+ /* 64-bit MEM space, these are allocated out of
+ * the 32-bit mem_space range for the PBM, ie.
+ * we just zero out the upper 32-bits.
+ */
+ return &pbm->mem_space;
+
+ default:
+ printk("PCI: What is resource space %x? "
+ "Tell davem@redhat.com about it!\n", space);
+ return NULL;
+ };
+}
+
+static struct resource *
+__init get_device_resource(struct linux_prom_pci_registers *ap,
+ struct pci_dev *pdev)
+{
+ struct resource *res;
+ int breg = (ap->phys_hi & 0xff);
+
+ switch (breg) {
+ case PCI_ROM_ADDRESS:
+ /* Unfortunately I have seen several cases where
+ * buggy FCODE uses a space value of '1' (I/O space)
+ * in the register property for the ROM address
+ * so disable this sanity check for now.
+ */
+#if 0
+ {
+ int space = (ap->phys_hi >> 24) & 3;
+
+ /* It had better be MEM space. */
+ if (space != 2)
+ bad_assignment(pdev, ap, NULL, 0);
+ }
+#endif
+ res = &pdev->resource[PCI_ROM_RESOURCE];
+ break;
+
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_1:
+ case PCI_BASE_ADDRESS_2:
+ case PCI_BASE_ADDRESS_3:
+ case PCI_BASE_ADDRESS_4:
+ case PCI_BASE_ADDRESS_5:
+ res = &pdev->resource[(breg - PCI_BASE_ADDRESS_0) / 4];
+ break;
+
+ default:
+ bad_assignment(pdev, ap, NULL, 0);
+ res = NULL;
+ break;
+ };
+
+ return res;
+}
+
+static int __init pdev_resource_collisions_expected(struct pci_dev *pdev)
+{
+ if (pdev->vendor != PCI_VENDOR_ID_SUN)
+ return 0;
+
+ if (pdev->device == PCI_DEVICE_ID_SUN_RIO_EBUS ||
+ pdev->device == PCI_DEVICE_ID_SUN_RIO_1394 ||
+ pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
+ return 1;
+
+ return 0;
+}
+
+static void __init pdev_record_assignments(struct pci_pbm_info *pbm,
+ struct pci_dev *pdev)
+{
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ int i;
+
+ for (i = 0; i < pcp->num_prom_assignments; i++) {
+ struct linux_prom_pci_registers *ap;
+ struct resource *root, *res;
+
+ /* The format of this property is specified in
+ * the PCI Bus Binding to IEEE1275-1994.
+ */
+ ap = &pcp->prom_assignments[i];
+ root = get_root_resource(ap, pbm);
+ res = get_device_resource(ap, pdev);
+ if (root == NULL || res == NULL ||
+ res->flags == 0)
+ continue;
+
+ /* Ok we know which resource this PROM assignment is
+ * for, sanity check it.
+ */
+ if ((res->start & 0xffffffffUL) != ap->phys_lo)
+ bad_assignment(pdev, ap, res, 1);
+
+ /* If it is a 64-bit MEM space assignment, verify that
+ * the resource is too and that the upper 32-bits match.
+ */
+ if (((ap->phys_hi >> 24) & 3) == 3) {
+ if (((res->flags & IORESOURCE_MEM) == 0) ||
+ ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+ != PCI_BASE_ADDRESS_MEM_TYPE_64))
+ bad_assignment(pdev, ap, res, 1);
+ if ((res->start >> 32) != ap->phys_mid)
+ bad_assignment(pdev, ap, res, 1);
+
+ /* PBM cannot generate cpu initiated PIOs
+ * to the full 64-bit space. Therefore the
+ * upper 32-bits better be zero. If it is
+ * not, just skip it and we will assign it
+ * properly ourselves.
+ */
+ if ((res->start >> 32) != 0UL) {
+ printk(KERN_ERR "PCI: OBP assigns out of range MEM address "
+ "%016lx for region %ld on device %s\n",
+ res->start, (res - &pdev->resource[0]), pci_name(pdev));
+ continue;
+ }
+ }
+
+ /* Adjust the resource into the physical address space
+ * of this PBM.
+ */
+ pbm->parent->resource_adjust(pdev, res, root);
+
+ if (request_resource(root, res) < 0) {
+ /* OK, there is some conflict. But this is fine
+ * since we'll reassign it in the fixup pass.
+ *
+ * We notify the user that OBP made an error if it
+ * is a case we don't expect.
+ */
+ if (!pdev_resource_collisions_expected(pdev)) {
+ printk(KERN_ERR "PCI: Address space collision on region %ld "
+ "[%016lx:%016lx] of device %s\n",
+ (res - &pdev->resource[0]),
+ res->start, res->end,
+ pci_name(pdev));
+ }
+ }
+ }
+}
+
+void __init pci_record_assignments(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus;
+
+ list_for_each_entry(dev, &pbus->devices, bus_list)
+ pdev_record_assignments(pbm, dev);
+
+ list_for_each_entry(bus, &pbus->children, node)
+ pci_record_assignments(pbm, bus);
+}
+
+/* Return non-zero if PDEV has implicit I/O resources even
+ * though it may not have an I/O base address register
+ * active.
+ */
+static int __init has_implicit_io(struct pci_dev *pdev)
+{
+ int class = pdev->class >> 8;
+
+ if (class == PCI_CLASS_NOT_DEFINED ||
+ class == PCI_CLASS_NOT_DEFINED_VGA ||
+ class == PCI_CLASS_STORAGE_IDE ||
+ (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
+ return 1;
+
+ return 0;
+}
+
+static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
+ struct pci_dev *pdev)
+{
+ u32 reg;
+ u16 cmd;
+ int i, io_seen, mem_seen;
+
+ io_seen = mem_seen = 0;
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *root, *res;
+ unsigned long size, min, max, align;
+
+ res = &pdev->resource[i];
+
+ if (res->flags & IORESOURCE_IO)
+ io_seen++;
+ else if (res->flags & IORESOURCE_MEM)
+ mem_seen++;
+
+ /* If it is already assigned or the resource does
+ * not exist, there is nothing to do.
+ */
+ if (res->parent != NULL || res->flags == 0UL)
+ continue;
+
+ /* Determine the root we allocate from. */
+ if (res->flags & IORESOURCE_IO) {
+ root = &pbm->io_space;
+ min = root->start + 0x400UL;
+ max = root->end;
+ } else {
+ root = &pbm->mem_space;
+ min = root->start;
+ max = min + 0x80000000UL;
+ }
+
+ size = res->end - res->start;
+ align = size + 1;
+ if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) {
+ /* uh oh */
+ prom_printf("PCI: Failed to allocate resource %d for %s\n",
+ i, pci_name(pdev));
+ prom_halt();
+ }
+
+ /* Update PCI config space. */
+ pbm->parent->base_address_update(pdev, i);
+ }
+
+ /* Special case, disable the ROM. Several devices
+ * act funny (ie. do not respond to memory space writes)
+ * when it is left enabled. A good example are Qlogic,ISP
+ * adapters.
+ */
+ pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &reg);
+ reg &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_write_config_dword(pdev, PCI_ROM_ADDRESS, reg);
+
+ /* If we saw I/O or MEM resources, enable appropriate
+ * bits in PCI command register.
+ */
+ if (io_seen || mem_seen) {
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (io_seen || has_implicit_io(pdev))
+ cmd |= PCI_COMMAND_IO;
+ if (mem_seen)
+ cmd |= PCI_COMMAND_MEMORY;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ /* If this is a PCI bridge or an IDE controller,
+ * enable bus mastering. In the former case also
+ * set the cache line size correctly.
+ */
+ if (((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) ||
+ (((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) &&
+ ((pdev->class & 0x80) != 0))) {
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
+ pci_write_config_byte(pdev,
+ PCI_CACHE_LINE_SIZE,
+ (64 / sizeof(u32)));
+ }
+}
+
+void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus;
+
+ list_for_each_entry(dev, &pbus->devices, bus_list)
+ pdev_assign_unassigned(pbm, dev);
+
+ list_for_each_entry(bus, &pbus->children, node)
+ pci_assign_unassigned(pbm, bus);
+}
+
+static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt)
+{
+ struct linux_prom_pci_intmap bridge_local_intmap[PROM_PCIIMAP_MAX], *intmap;
+ struct linux_prom_pci_intmask bridge_local_intmask, *intmask;
+ struct pcidev_cookie *dev_pcp = pdev->sysdata;
+ struct pci_pbm_info *pbm = dev_pcp->pbm;
+ struct linux_prom_pci_registers *pregs = dev_pcp->prom_regs;
+ unsigned int hi, mid, lo, irq;
+ int i, num_intmap, map_slot;
+
+ intmap = &pbm->pbm_intmap[0];
+ intmask = &pbm->pbm_intmask;
+ num_intmap = pbm->num_pbm_intmap;
+ map_slot = 0;
+
+ /* If we are underneath a PCI bridge, use PROM register
+ * property of the parent bridge which is closest to
+ * the PBM.
+ *
+ * However if that parent bridge has interrupt map/mask
+ * properties of its own we use the PROM register property
+ * of the next child device on the path to PDEV.
+ *
+ * In detail the two cases are (note that the 'X' below is the
+ * 'next child on the path to PDEV' mentioned above):
+ *
+ * 1) PBM --> PCI bus lacking int{map,mask} --> X ... PDEV
+ *
+ * Here we use regs of 'PCI bus' device.
+ *
+ * 2) PBM --> PCI bus with int{map,mask} --> X ... PDEV
+ *
+ * Here we use regs of 'X'. Note that X can be PDEV.
+ */
+ if (pdev->bus->number != pbm->pci_first_busno) {
+ struct pcidev_cookie *bus_pcp, *regs_pcp;
+ struct pci_dev *bus_dev, *regs_dev;
+ int plen;
+
+ bus_dev = pdev->bus->self;
+ regs_dev = pdev;
+
+ while (bus_dev->bus &&
+ bus_dev->bus->number != pbm->pci_first_busno) {
+ regs_dev = bus_dev;
+ bus_dev = bus_dev->bus->self;
+ }
+
+ regs_pcp = regs_dev->sysdata;
+ pregs = regs_pcp->prom_regs;
+
+ bus_pcp = bus_dev->sysdata;
+
+ /* But if the PCI bridge has it's own interrupt map
+ * and mask properties, use that and the regs of the
+ * PCI entity at the next level down on the path to the
+ * device.
+ */
+ plen = prom_getproperty(bus_pcp->prom_node, "interrupt-map",
+ (char *) &bridge_local_intmap[0],
+ sizeof(bridge_local_intmap));
+ if (plen != -1) {
+ intmap = &bridge_local_intmap[0];
+ num_intmap = plen / sizeof(struct linux_prom_pci_intmap);
+ plen = prom_getproperty(bus_pcp->prom_node,
+ "interrupt-map-mask",
+ (char *) &bridge_local_intmask,
+ sizeof(bridge_local_intmask));
+ if (plen == -1) {
+ printk("pci_intmap_match: Warning! Bridge has intmap "
+ "but no intmask.\n");
+ printk("pci_intmap_match: Trying to recover.\n");
+ return 0;
+ }
+
+ if (pdev->bus->self != bus_dev)
+ map_slot = 1;
+ } else {
+ pregs = bus_pcp->prom_regs;
+ map_slot = 1;
+ }
+ }
+
+ if (map_slot) {
+ *interrupt = ((*interrupt
+ - 1
+ + PCI_SLOT(pdev->devfn)) & 0x3) + 1;
+ }
+
+ hi = pregs->phys_hi & intmask->phys_hi;
+ mid = pregs->phys_mid & intmask->phys_mid;
+ lo = pregs->phys_lo & intmask->phys_lo;
+ irq = *interrupt & intmask->interrupt;
+
+ for (i = 0; i < num_intmap; i++) {
+ if (intmap[i].phys_hi == hi &&
+ intmap[i].phys_mid == mid &&
+ intmap[i].phys_lo == lo &&
+ intmap[i].interrupt == irq) {
+ *interrupt = intmap[i].cinterrupt;
+ printk("PCI-IRQ: Routing bus[%2x] slot[%2x] map[%d] to INO[%02x]\n",
+ pdev->bus->number, PCI_SLOT(pdev->devfn),
+ map_slot, *interrupt);
+ return 1;
+ }
+ }
+
+ /* We will run this code even if pbm->num_pbm_intmap is zero, just so
+ * we can apply the slot mapping to the PROM interrupt property value.
+ * So do not spit out these warnings in that case.
+ */
+ if (num_intmap != 0) {
+ /* Print it both to OBP console and kernel one so that if bootup
+ * hangs here the user has the information to report.
+ */
+ prom_printf("pci_intmap_match: bus %02x, devfn %02x: ",
+ pdev->bus->number, pdev->devfn);
+ prom_printf("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
+ pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
+ prom_printf("Please email this information to davem@redhat.com\n");
+
+ printk("pci_intmap_match: bus %02x, devfn %02x: ",
+ pdev->bus->number, pdev->devfn);
+ printk("IRQ [%08x.%08x.%08x.%08x] not found in interrupt-map\n",
+ pregs->phys_hi, pregs->phys_mid, pregs->phys_lo, *interrupt);
+ printk("Please email this information to davem@redhat.com\n");
+ }
+
+ return 0;
+}
+
+static void __init pdev_fixup_irq(struct pci_dev *pdev)
+{
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ struct pci_pbm_info *pbm = pcp->pbm;
+ struct pci_controller_info *p = pbm->parent;
+ unsigned int portid = pbm->portid;
+ unsigned int prom_irq;
+ int prom_node = pcp->prom_node;
+ int err;
+
+ /* If this is an empty EBUS device, sometimes OBP fails to
+ * give it a valid fully specified interrupts property.
+ * The EBUS hooked up to SunHME on PCI I/O boards of
+ * Ex000 systems is one such case.
+ *
+ * The interrupt is not important so just ignore it.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+ pdev->device == PCI_DEVICE_ID_SUN_EBUS &&
+ !prom_getchild(prom_node)) {
+ pdev->irq = 0;
+ return;
+ }
+
+ err = prom_getproperty(prom_node, "interrupts",
+ (char *)&prom_irq, sizeof(prom_irq));
+ if (err == 0 || err == -1) {
+ pdev->irq = 0;
+ return;
+ }
+
+ /* Fully specified already? */
+ if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) {
+ pdev->irq = p->irq_build(pbm, pdev, prom_irq);
+ goto have_irq;
+ }
+
+ /* An onboard device? (bit 5 set) */
+ if ((prom_irq & PCI_IRQ_INO) & 0x20) {
+ pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq));
+ goto have_irq;
+ }
+
+ /* Can we find a matching entry in the interrupt-map? */
+ if (pci_intmap_match(pdev, &prom_irq)) {
+ pdev->irq = p->irq_build(pbm, pdev, (portid << 6) | prom_irq);
+ goto have_irq;
+ }
+
+ /* Ok, we have to do it the hard way. */
+ {
+ unsigned int bus, slot, line;
+
+ bus = (pbm == &pbm->parent->pbm_B) ? (1 << 4) : 0;
+
+ /* If we have a legal interrupt property, use it as
+ * the IRQ line.
+ */
+ if (prom_irq > 0 && prom_irq < 5) {
+ line = ((prom_irq - 1) & 3);
+ } else {
+ u8 pci_irq_line;
+
+ /* Else just directly consult PCI config space. */
+ pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pci_irq_line);
+ line = ((pci_irq_line - 1) & 3);
+ }
+
+ /* Now figure out the slot.
+ *
+ * Basically, device number zero on the top-level bus is
+ * always the PCI host controller. Slot 0 is then device 1.
+ * PBM A supports two external slots (0 and 1), and PBM B
+ * supports 4 external slots (0, 1, 2, and 3). On-board PCI
+ * devices are wired to device numbers outside of these
+ * ranges. -DaveM
+ */
+ if (pdev->bus->number == pbm->pci_first_busno) {
+ slot = PCI_SLOT(pdev->devfn) - pbm->pci_first_slot;
+ } else {
+ struct pci_dev *bus_dev;
+
+ /* Underneath a bridge, use slot number of parent
+ * bridge which is closest to the PBM.
+ */
+ bus_dev = pdev->bus->self;
+ while (bus_dev->bus &&
+ bus_dev->bus->number != pbm->pci_first_busno)
+ bus_dev = bus_dev->bus->self;
+
+ slot = PCI_SLOT(bus_dev->devfn) - pbm->pci_first_slot;
+ }
+ slot = slot << 2;
+
+ pdev->irq = p->irq_build(pbm, pdev,
+ ((portid << 6) & PCI_IRQ_IGN) |
+ (bus | slot | line));
+ }
+
+have_irq:
+ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE,
+ pdev->irq & PCI_IRQ_INO);
+}
+
+void __init pci_fixup_irq(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus;
+
+ list_for_each_entry(dev, &pbus->devices, bus_list)
+ pdev_fixup_irq(dev);
+
+ list_for_each_entry(bus, &pbus->children, node)
+ pci_fixup_irq(pbm, bus);
+}
+
+static void pdev_setup_busmastering(struct pci_dev *pdev, int is_66mhz)
+{
+ u16 cmd;
+ u8 hdr_type, min_gnt, ltimer;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MASTER;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+ /* Read it back, if the mastering bit did not
+ * get set, the device does not support bus
+ * mastering so we have nothing to do here.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if ((cmd & PCI_COMMAND_MASTER) == 0)
+ return;
+
+ /* Set correct cache line size, 64-byte on all
+ * Sparc64 PCI systems. Note that the value is
+ * measured in 32-bit words.
+ */
+ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+ 64 / sizeof(u32));
+
+ pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr_type);
+ hdr_type &= ~0x80;
+ if (hdr_type != PCI_HEADER_TYPE_NORMAL)
+ return;
+
+ /* If the latency timer is already programmed with a non-zero
+ * value, assume whoever set it (OBP or whoever) knows what
+ * they are doing.
+ */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ltimer);
+ if (ltimer != 0)
+ return;
+
+ /* XXX Since I'm tipping off the min grant value to
+ * XXX choose a suitable latency timer value, I also
+ * XXX considered making use of the max latency value
+ * XXX as well. Unfortunately I've seen too many bogusly
+ * XXX low settings for it to the point where it lacks
+ * XXX any usefulness. In one case, an ethernet card
+ * XXX claimed a min grant of 10 and a max latency of 5.
+ * XXX Now, if I had two such cards on the same bus I
+ * XXX could not set the desired burst period (calculated
+ * XXX from min grant) without violating the max latency
+ * XXX bound. Duh...
+ * XXX
+ * XXX I blame dumb PC bios implementors for stuff like
+ * XXX this, most of them don't even try to do something
+ * XXX sensible with latency timer values and just set some
+ * XXX default value (usually 32) into every device.
+ */
+
+ pci_read_config_byte(pdev, PCI_MIN_GNT, &min_gnt);
+
+ if (min_gnt == 0) {
+ /* If no min_gnt setting then use a default
+ * value.
+ */
+ if (is_66mhz)
+ ltimer = 16;
+ else
+ ltimer = 32;
+ } else {
+ int shift_factor;
+
+ if (is_66mhz)
+ shift_factor = 2;
+ else
+ shift_factor = 3;
+
+ /* Use a default value when the min_gnt value
+ * is erroneously high.
+ */
+ if (((unsigned int) min_gnt << shift_factor) > 512 ||
+ ((min_gnt << shift_factor) & 0xff) == 0) {
+ ltimer = 8 << shift_factor;
+ } else {
+ ltimer = min_gnt << shift_factor;
+ }
+ }
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ltimer);
+}
+
+void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus)
+{
+ struct pci_dev *pdev;
+ int all_are_66mhz;
+ u16 status;
+
+ if (pbm->is_66mhz_capable == 0) {
+ all_are_66mhz = 0;
+ goto out;
+ }
+
+ all_are_66mhz = 1;
+ list_for_each_entry(pdev, &pbus->devices, bus_list) {
+ pci_read_config_word(pdev, PCI_STATUS, &status);
+ if (!(status & PCI_STATUS_66MHZ)) {
+ all_are_66mhz = 0;
+ break;
+ }
+ }
+out:
+ pbm->all_devs_66mhz = all_are_66mhz;
+
+ printk("PCI%d(PBM%c): Bus running at %dMHz\n",
+ pbm->parent->index,
+ (pbm == &pbm->parent->pbm_A) ? 'A' : 'B',
+ (all_are_66mhz ? 66 : 33));
+}
+
+void pci_setup_busmastering(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus;
+ int is_66mhz;
+
+ is_66mhz = pbm->is_66mhz_capable && pbm->all_devs_66mhz;
+
+ list_for_each_entry(dev, &pbus->devices, bus_list)
+ pdev_setup_busmastering(dev, is_66mhz);
+
+ list_for_each_entry(bus, &pbus->children, node)
+ pci_setup_busmastering(pbm, bus);
+}
+
+void pci_register_legacy_regions(struct resource *io_res,
+ struct resource *mem_res)
+{
+ struct resource *p;
+
+ /* VGA Video RAM. */
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return;
+
+ memset(p, 0, sizeof(*p));
+ p->name = "Video RAM area";
+ p->start = mem_res->start + 0xa0000UL;
+ p->end = p->start + 0x1ffffUL;
+ p->flags = IORESOURCE_BUSY;
+ request_resource(mem_res, p);
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return;
+
+ memset(p, 0, sizeof(*p));
+ p->name = "System ROM";
+ p->start = mem_res->start + 0xf0000UL;
+ p->end = p->start + 0xffffUL;
+ p->flags = IORESOURCE_BUSY;
+ request_resource(mem_res, p);
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return;
+
+ memset(p, 0, sizeof(*p));
+ p->name = "Video ROM";
+ p->start = mem_res->start + 0xc0000UL;
+ p->end = p->start + 0x7fffUL;
+ p->flags = IORESOURCE_BUSY;
+ request_resource(mem_res, p);
+}
+
+/* Generic helper routines for PCI error reporting. */
+void pci_scan_for_target_abort(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm,
+ struct pci_bus *pbus)
+{
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+
+ list_for_each_entry(pdev, &pbus->devices, bus_list) {
+ u16 status, error_bits;
+
+ pci_read_config_word(pdev, PCI_STATUS, &status);
+ error_bits =
+ (status & (PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT));
+ if (error_bits) {
+ pci_write_config_word(pdev, PCI_STATUS, error_bits);
+ printk("PCI%d(PBM%c): Device [%s] saw Target Abort [%016x]\n",
+ p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
+ pci_name(pdev), status);
+ }
+ }
+
+ list_for_each_entry(bus, &pbus->children, node)
+ pci_scan_for_target_abort(p, pbm, bus);
+}
+
+void pci_scan_for_master_abort(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm,
+ struct pci_bus *pbus)
+{
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+
+ list_for_each_entry(pdev, &pbus->devices, bus_list) {
+ u16 status, error_bits;
+
+ pci_read_config_word(pdev, PCI_STATUS, &status);
+ error_bits =
+ (status & (PCI_STATUS_REC_MASTER_ABORT));
+ if (error_bits) {
+ pci_write_config_word(pdev, PCI_STATUS, error_bits);
+ printk("PCI%d(PBM%c): Device [%s] received Master Abort [%016x]\n",
+ p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
+ pci_name(pdev), status);
+ }
+ }
+
+ list_for_each_entry(bus, &pbus->children, node)
+ pci_scan_for_master_abort(p, pbm, bus);
+}
+
+void pci_scan_for_parity_error(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm,
+ struct pci_bus *pbus)
+{
+ struct pci_dev *pdev;
+ struct pci_bus *bus;
+
+ list_for_each_entry(pdev, &pbus->devices, bus_list) {
+ u16 status, error_bits;
+
+ pci_read_config_word(pdev, PCI_STATUS, &status);
+ error_bits =
+ (status & (PCI_STATUS_PARITY |
+ PCI_STATUS_DETECTED_PARITY));
+ if (error_bits) {
+ pci_write_config_word(pdev, PCI_STATUS, error_bits);
+ printk("PCI%d(PBM%c): Device [%s] saw Parity Error [%016x]\n",
+ p->index, ((pbm == &p->pbm_A) ? 'A' : 'B'),
+ pci_name(pdev), status);
+ }
+ }
+
+ list_for_each_entry(bus, &pbus->children, node)
+ pci_scan_for_parity_error(p, pbm, bus);
+}
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
new file mode 100644
index 00000000000..6c320596254
--- /dev/null
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -0,0 +1,49 @@
+/* $Id: pci_impl.h,v 1.9 2001/06/13 06:34:30 davem Exp $
+ * pci_impl.h: Helper definitions for PCI controller support.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef PCI_IMPL_H
+#define PCI_IMPL_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+
+extern struct pci_controller_info *pci_controller_root;
+
+extern int pci_num_controllers;
+
+/* PCI bus scanning and fixup support. */
+extern void pci_fixup_host_bridge_self(struct pci_bus *pbus);
+extern void pci_fill_in_pbm_cookies(struct pci_bus *pbus,
+ struct pci_pbm_info *pbm,
+ int prom_node);
+extern void pci_record_assignments(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus);
+extern void pci_assign_unassigned(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus);
+extern void pci_fixup_irq(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus);
+extern void pci_determine_66mhz_disposition(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus);
+extern void pci_setup_busmastering(struct pci_pbm_info *pbm,
+ struct pci_bus *pbus);
+extern void pci_register_legacy_regions(struct resource *io_res,
+ struct resource *mem_res);
+
+/* Error reporting support. */
+extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
+extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
+extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
+
+/* Configuration space access. */
+extern void pci_config_read8(u8 *addr, u8 *ret);
+extern void pci_config_read16(u16 *addr, u16 *ret);
+extern void pci_config_read32(u32 *addr, u32 *ret);
+extern void pci_config_write8(u8 *addr, u8 val);
+extern void pci_config_write16(u16 *addr, u16 val);
+extern void pci_config_write32(u32 *addr, u32 val);
+
+#endif /* !(PCI_IMPL_H) */
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
new file mode 100644
index 00000000000..292983413ae
--- /dev/null
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -0,0 +1,855 @@
+/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
+ * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/pbm.h>
+
+#include "iommu_common.h"
+
+#define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
+ ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
+
+/* Accessing IOMMU and Streaming Buffer registers.
+ * REG parameter is a physical address. All registers
+ * are 64-bits in size.
+ */
+#define pci_iommu_read(__reg) \
+({ u64 __ret; \
+ __asm__ __volatile__("ldxa [%1] %2, %0" \
+ : "=r" (__ret) \
+ : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
+ : "memory"); \
+ __ret; \
+})
+#define pci_iommu_write(__reg, __val) \
+ __asm__ __volatile__("stxa %0, [%1] %2" \
+ : /* no outputs */ \
+ : "r" (__val), "r" (__reg), \
+ "i" (ASI_PHYS_BYPASS_EC_E))
+
+/* Must be invoked under the IOMMU lock. */
+static void __iommu_flushall(struct pci_iommu *iommu)
+{
+ unsigned long tag;
+ int entry;
+
+ tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
+ for (entry = 0; entry < 16; entry++) {
+ pci_iommu_write(tag, 0);
+ tag += 8;
+ }
+
+ /* Ensure completion of previous PIO writes. */
+ (void) pci_iommu_read(iommu->write_complete_reg);
+
+ /* Now update everyone's flush point. */
+ for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
+ iommu->alloc_info[entry].flush =
+ iommu->alloc_info[entry].next;
+ }
+}
+
+#define IOPTE_CONSISTENT(CTX) \
+ (IOPTE_VALID | IOPTE_CACHE | \
+ (((CTX) << 47) & IOPTE_CONTEXT))
+
+#define IOPTE_STREAMING(CTX) \
+ (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
+
+/* Existing mappings are never marked invalid, instead they
+ * are pointed to a dummy page.
+ */
+#define IOPTE_IS_DUMMY(iommu, iopte) \
+ ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
+
+static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
+{
+ unsigned long val = iopte_val(*iopte);
+
+ val &= ~IOPTE_PAGE;
+ val |= iommu->dummy_page_pa;
+
+ iopte_val(*iopte) = val;
+}
+
+void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
+{
+ int i;
+
+ tsbsize /= sizeof(iopte_t);
+
+ for (i = 0; i < tsbsize; i++)
+ iopte_make_dummy(iommu, &iommu->page_table[i]);
+}
+
+static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
+{
+ iopte_t *iopte, *limit, *first;
+ unsigned long cnum, ent, flush_point;
+
+ cnum = 0;
+ while ((1UL << cnum) < npages)
+ cnum++;
+ iopte = (iommu->page_table +
+ (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
+
+ if (cnum == 0)
+ limit = (iommu->page_table +
+ iommu->lowest_consistent_map);
+ else
+ limit = (iopte +
+ (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
+
+ iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
+ flush_point = iommu->alloc_info[cnum].flush;
+
+ first = iopte;
+ for (;;) {
+ if (IOPTE_IS_DUMMY(iommu, iopte)) {
+ if ((iopte + (1 << cnum)) >= limit)
+ ent = 0;
+ else
+ ent = ent + 1;
+ iommu->alloc_info[cnum].next = ent;
+ if (ent == flush_point)
+ __iommu_flushall(iommu);
+ break;
+ }
+ iopte += (1 << cnum);
+ ent++;
+ if (iopte >= limit) {
+ iopte = (iommu->page_table +
+ (cnum <<
+ (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
+ ent = 0;
+ }
+ if (ent == flush_point)
+ __iommu_flushall(iommu);
+ if (iopte == first)
+ goto bad;
+ }
+
+ /* I've got your streaming cluster right here buddy boy... */
+ return iopte;
+
+bad:
+ printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
+ npages);
+ return NULL;
+}
+
+static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
+ unsigned long npages, unsigned long ctx)
+{
+ unsigned long cnum, ent;
+
+ cnum = 0;
+ while ((1UL << cnum) < npages)
+ cnum++;
+
+ ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
+ >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
+
+ /* If the global flush might not have caught this entry,
+ * adjust the flush point such that we will flush before
+ * ever trying to reuse it.
+ */
+#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
+ if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
+ iommu->alloc_info[cnum].flush = ent;
+#undef between
+}
+
+/* We allocate consistent mappings from the end of cluster zero. */
+static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
+{
+ iopte_t *iopte;
+
+ iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
+ while (iopte > iommu->page_table) {
+ iopte--;
+ if (IOPTE_IS_DUMMY(iommu, iopte)) {
+ unsigned long tmp = npages;
+
+ while (--tmp) {
+ iopte--;
+ if (!IOPTE_IS_DUMMY(iommu, iopte))
+ break;
+ }
+ if (tmp == 0) {
+ u32 entry = (iopte - iommu->page_table);
+
+ if (entry < iommu->lowest_consistent_map)
+ iommu->lowest_consistent_map = entry;
+ return iopte;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Allocate and map kernel buffer of size SIZE using consistent mode
+ * DMA for PCI device PDEV. Return non-NULL cpu-side address if
+ * successful and set *DMA_ADDRP to the PCI side dma address.
+ */
+void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+{
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ iopte_t *iopte;
+ unsigned long flags, order, first_page, ctx;
+ void *ret;
+ int npages;
+
+ size = IO_PAGE_ALIGN(size);
+ order = get_order(size);
+ if (order >= 10)
+ return NULL;
+
+ first_page = __get_free_pages(GFP_ATOMIC, order);
+ if (first_page == 0UL)
+ return NULL;
+ memset((char *)first_page, 0, PAGE_SIZE << order);
+
+ pcp = pdev->sysdata;
+ iommu = pcp->pbm->iommu;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
+ if (iopte == NULL) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ free_pages(first_page, order);
+ return NULL;
+ }
+
+ *dma_addrp = (iommu->page_table_map_base +
+ ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
+ ret = (void *) first_page;
+ npages = size >> IO_PAGE_SHIFT;
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = iommu->iommu_cur_ctx++;
+ first_page = __pa(first_page);
+ while (npages--) {
+ iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
+ IOPTE_WRITE |
+ (first_page & IOPTE_PAGE));
+ iopte++;
+ first_page += IO_PAGE_SIZE;
+ }
+
+ {
+ int i;
+ u32 daddr = *dma_addrp;
+
+ npages = size >> IO_PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ pci_iommu_write(iommu->iommu_flush, daddr);
+ daddr += IO_PAGE_SIZE;
+ }
+ }
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return ret;
+}
+
+/* Free and unmap a consistent DMA translation. */
+void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
+{
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ iopte_t *iopte;
+ unsigned long flags, order, npages, i, ctx;
+
+ npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
+ pcp = pdev->sysdata;
+ iommu = pcp->pbm->iommu;
+ iopte = iommu->page_table +
+ ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ if ((iopte - iommu->page_table) ==
+ iommu->lowest_consistent_map) {
+ iopte_t *walk = iopte + npages;
+ iopte_t *limit;
+
+ limit = (iommu->page_table +
+ (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
+ while (walk < limit) {
+ if (!IOPTE_IS_DUMMY(iommu, walk))
+ break;
+ walk++;
+ }
+ iommu->lowest_consistent_map =
+ (walk - iommu->page_table);
+ }
+
+ /* Data for consistent mappings cannot enter the streaming
+ * buffers, so we only need to update the TSB. We flush
+ * the IOMMU here as well to prevent conflicts with the
+ * streaming mapping deferred tlb flush scheme.
+ */
+
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
+
+ for (i = 0; i < npages; i++, iopte++)
+ iopte_make_dummy(iommu, iopte);
+
+ if (iommu->iommu_ctxflush) {
+ pci_iommu_write(iommu->iommu_ctxflush, ctx);
+ } else {
+ for (i = 0; i < npages; i++) {
+ u32 daddr = dvma + (i << IO_PAGE_SHIFT);
+
+ pci_iommu_write(iommu->iommu_flush, daddr);
+ }
+ }
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ order = get_order(size);
+ if (order < 10)
+ free_pages((unsigned long)cpu, order);
+}
+
+/* Map a single buffer at PTR of SZ bytes for PCI DMA
+ * in streaming mode.
+ */
+dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
+{
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ struct pci_strbuf *strbuf;
+ iopte_t *base;
+ unsigned long flags, npages, oaddr;
+ unsigned long i, base_paddr, ctx;
+ u32 bus_addr, ret;
+ unsigned long iopte_protection;
+
+ pcp = pdev->sysdata;
+ iommu = pcp->pbm->iommu;
+ strbuf = &pcp->pbm->stc;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ oaddr = (unsigned long)ptr;
+ npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
+ npages >>= IO_PAGE_SHIFT;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ base = alloc_streaming_cluster(iommu, npages);
+ if (base == NULL)
+ goto bad;
+ bus_addr = (iommu->page_table_map_base +
+ ((base - iommu->page_table) << IO_PAGE_SHIFT));
+ ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
+ base_paddr = __pa(oaddr & IO_PAGE_MASK);
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = iommu->iommu_cur_ctx++;
+ if (strbuf->strbuf_enabled)
+ iopte_protection = IOPTE_STREAMING(ctx);
+ else
+ iopte_protection = IOPTE_CONSISTENT(ctx);
+ if (direction != PCI_DMA_TODEVICE)
+ iopte_protection |= IOPTE_WRITE;
+
+ for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
+ iopte_val(*base) = iopte_protection | base_paddr;
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return ret;
+
+bad:
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return PCI_DMA_ERROR_CODE;
+}
+
+/* Unmap a single streaming mode DMA translation. */
+void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+{
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ struct pci_strbuf *strbuf;
+ iopte_t *base;
+ unsigned long flags, npages, i, ctx;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ pcp = pdev->sysdata;
+ iommu = pcp->pbm->iommu;
+ strbuf = &pcp->pbm->stc;
+
+ npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
+ npages >>= IO_PAGE_SHIFT;
+ base = iommu->page_table +
+ ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+#ifdef DEBUG_PCI_IOMMU
+ if (IOPTE_IS_DUMMY(iommu, base))
+ printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
+ bus_addr, sz, __builtin_return_address(0));
+#endif
+ bus_addr &= IO_PAGE_MASK;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ /* Record the context, if any. */
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
+
+ /* Step 1: Kick data out of streaming buffers if necessary. */
+ if (strbuf->strbuf_enabled) {
+ u32 vaddr = bus_addr;
+
+ PCI_STC_FLUSHFLAG_INIT(strbuf);
+ if (strbuf->strbuf_ctxflush &&
+ iommu->iommu_ctxflush) {
+ unsigned long matchreg, flushreg;
+
+ flushreg = strbuf->strbuf_ctxflush;
+ matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
+ do {
+ pci_iommu_write(flushreg, ctx);
+ } while(((long)pci_iommu_read(matchreg)) < 0L);
+ } else {
+ for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
+ pci_iommu_write(strbuf->strbuf_pflush, vaddr);
+ }
+
+ pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
+ (void) pci_iommu_read(iommu->write_complete_reg);
+ while (!PCI_STC_FLUSHFLAG_SET(strbuf))
+ membar("#LoadLoad");
+ }
+
+ /* Step 2: Clear out first TSB entry. */
+ iopte_make_dummy(iommu, base);
+
+ free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
+ npages, ctx);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+#define SG_ENT_PHYS_ADDRESS(SG) \
+ (__pa(page_address((SG)->page)) + (SG)->offset)
+
+static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
+ int nused, int nelems, unsigned long iopte_protection)
+{
+ struct scatterlist *dma_sg = sg;
+ struct scatterlist *sg_end = sg + nelems;
+ int i;
+
+ for (i = 0; i < nused; i++) {
+ unsigned long pteval = ~0UL;
+ u32 dma_npages;
+
+ dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
+ dma_sg->dma_length +
+ ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
+ do {
+ unsigned long offset;
+ signed int len;
+
+ /* If we are here, we know we have at least one
+ * more page to map. So walk forward until we
+ * hit a page crossing, and begin creating new
+ * mappings from that spot.
+ */
+ for (;;) {
+ unsigned long tmp;
+
+ tmp = SG_ENT_PHYS_ADDRESS(sg);
+ len = sg->length;
+ if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = tmp & IO_PAGE_MASK;
+ offset = tmp & (IO_PAGE_SIZE - 1UL);
+ break;
+ }
+ if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
+ offset = 0UL;
+ len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
+ break;
+ }
+ sg++;
+ }
+
+ pteval = iopte_protection | (pteval & IOPTE_PAGE);
+ while (len > 0) {
+ *iopte++ = __iopte(pteval);
+ pteval += IO_PAGE_SIZE;
+ len -= (IO_PAGE_SIZE - offset);
+ offset = 0;
+ dma_npages--;
+ }
+
+ pteval = (pteval & IOPTE_PAGE) + len;
+ sg++;
+
+ /* Skip over any tail mappings we've fully mapped,
+ * adjusting pteval along the way. Stop when we
+ * detect a page crossing event.
+ */
+ while (sg < sg_end &&
+ (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+ (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
+ ((pteval ^
+ (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
+ pteval += sg->length;
+ sg++;
+ }
+ if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
+ pteval = ~0UL;
+ } while (dma_npages != 0);
+ dma_sg++;
+ }
+}
+
+/* Map a set of buffers described by SGLIST with NELEMS array
+ * elements in streaming mode for PCI DMA.
+ * When making changes here, inspect the assembly output. I was having
+ * hard time to kepp this routine out of using stack slots for holding variables.
+ */
+int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+{
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ struct pci_strbuf *strbuf;
+ unsigned long flags, ctx, npages, iopte_protection;
+ iopte_t *base;
+ u32 dma_base;
+ struct scatterlist *sgtmp;
+ int used;
+
+ /* Fast path single entry scatterlists. */
+ if (nelems == 1) {
+ sglist->dma_address =
+ pci_map_single(pdev,
+ (page_address(sglist->page) + sglist->offset),
+ sglist->length, direction);
+ sglist->dma_length = sglist->length;
+ return 1;
+ }
+
+ pcp = pdev->sysdata;
+ iommu = pcp->pbm->iommu;
+ strbuf = &pcp->pbm->stc;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ /* Step 1: Prepare scatter list. */
+
+ npages = prepare_sg(sglist, nelems);
+
+ /* Step 2: Allocate a cluster. */
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ base = alloc_streaming_cluster(iommu, npages);
+ if (base == NULL)
+ goto bad;
+ dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
+
+ /* Step 3: Normalize DMA addresses. */
+ used = nelems;
+
+ sgtmp = sglist;
+ while (used && sgtmp->dma_length) {
+ sgtmp->dma_address += dma_base;
+ sgtmp++;
+ used--;
+ }
+ used = nelems - used;
+
+ /* Step 4: Choose a context if necessary. */
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = iommu->iommu_cur_ctx++;
+
+ /* Step 5: Create the mappings. */
+ if (strbuf->strbuf_enabled)
+ iopte_protection = IOPTE_STREAMING(ctx);
+ else
+ iopte_protection = IOPTE_CONSISTENT(ctx);
+ if (direction != PCI_DMA_TODEVICE)
+ iopte_protection |= IOPTE_WRITE;
+ fill_sg (base, sglist, used, nelems, iopte_protection);
+#ifdef VERIFY_SG
+ verify_sglist(sglist, nelems, base, npages);
+#endif
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return used;
+
+bad:
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ return PCI_DMA_ERROR_CODE;
+}
+
+/* Unmap a set of streaming mode DMA translations. */
+void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+{
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ struct pci_strbuf *strbuf;
+ iopte_t *base;
+ unsigned long flags, ctx, i, npages;
+ u32 bus_addr;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ pcp = pdev->sysdata;
+ iommu = pcp->pbm->iommu;
+ strbuf = &pcp->pbm->stc;
+
+ bus_addr = sglist->dma_address & IO_PAGE_MASK;
+
+ for (i = 1; i < nelems; i++)
+ if (sglist[i].dma_length == 0)
+ break;
+ i--;
+ npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
+
+ base = iommu->page_table +
+ ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+
+#ifdef DEBUG_PCI_IOMMU
+ if (IOPTE_IS_DUMMY(iommu, base))
+ printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
+#endif
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ /* Record the context, if any. */
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
+
+ /* Step 1: Kick data out of streaming buffers if necessary. */
+ if (strbuf->strbuf_enabled) {
+ u32 vaddr = (u32) bus_addr;
+
+ PCI_STC_FLUSHFLAG_INIT(strbuf);
+ if (strbuf->strbuf_ctxflush &&
+ iommu->iommu_ctxflush) {
+ unsigned long matchreg, flushreg;
+
+ flushreg = strbuf->strbuf_ctxflush;
+ matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
+ do {
+ pci_iommu_write(flushreg, ctx);
+ } while(((long)pci_iommu_read(matchreg)) < 0L);
+ } else {
+ for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
+ pci_iommu_write(strbuf->strbuf_pflush, vaddr);
+ }
+
+ pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
+ (void) pci_iommu_read(iommu->write_complete_reg);
+ while (!PCI_STC_FLUSHFLAG_SET(strbuf))
+ membar("#LoadLoad");
+ }
+
+ /* Step 2: Clear out first TSB entry. */
+ iopte_make_dummy(iommu, base);
+
+ free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
+ npages, ctx);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+/* Make physical memory consistent for a single
+ * streaming mode DMA translation after a transfer.
+ */
+void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
+{
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ struct pci_strbuf *strbuf;
+ unsigned long flags, ctx, npages;
+
+ pcp = pdev->sysdata;
+ iommu = pcp->pbm->iommu;
+ strbuf = &pcp->pbm->stc;
+
+ if (!strbuf->strbuf_enabled)
+ return;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
+ npages >>= IO_PAGE_SHIFT;
+ bus_addr &= IO_PAGE_MASK;
+
+ /* Step 1: Record the context, if any. */
+ ctx = 0;
+ if (iommu->iommu_ctxflush &&
+ strbuf->strbuf_ctxflush) {
+ iopte_t *iopte;
+
+ iopte = iommu->page_table +
+ ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
+ ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
+ }
+
+ /* Step 2: Kick data out of streaming buffers. */
+ PCI_STC_FLUSHFLAG_INIT(strbuf);
+ if (iommu->iommu_ctxflush &&
+ strbuf->strbuf_ctxflush) {
+ unsigned long matchreg, flushreg;
+
+ flushreg = strbuf->strbuf_ctxflush;
+ matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
+ do {
+ pci_iommu_write(flushreg, ctx);
+ } while(((long)pci_iommu_read(matchreg)) < 0L);
+ } else {
+ unsigned long i;
+
+ for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
+ pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
+ }
+
+ /* Step 3: Perform flush synchronization sequence. */
+ pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
+ (void) pci_iommu_read(iommu->write_complete_reg);
+ while (!PCI_STC_FLUSHFLAG_SET(strbuf))
+ membar("#LoadLoad");
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+/* Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ */
+void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
+{
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ struct pci_strbuf *strbuf;
+ unsigned long flags, ctx;
+
+ pcp = pdev->sysdata;
+ iommu = pcp->pbm->iommu;
+ strbuf = &pcp->pbm->stc;
+
+ if (!strbuf->strbuf_enabled)
+ return;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ /* Step 1: Record the context, if any. */
+ ctx = 0;
+ if (iommu->iommu_ctxflush &&
+ strbuf->strbuf_ctxflush) {
+ iopte_t *iopte;
+
+ iopte = iommu->page_table +
+ ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
+ ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
+ }
+
+ /* Step 2: Kick data out of streaming buffers. */
+ PCI_STC_FLUSHFLAG_INIT(strbuf);
+ if (iommu->iommu_ctxflush &&
+ strbuf->strbuf_ctxflush) {
+ unsigned long matchreg, flushreg;
+
+ flushreg = strbuf->strbuf_ctxflush;
+ matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
+ do {
+ pci_iommu_write(flushreg, ctx);
+ } while (((long)pci_iommu_read(matchreg)) < 0L);
+ } else {
+ unsigned long i, npages;
+ u32 bus_addr;
+
+ bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
+
+ for(i = 1; i < nelems; i++)
+ if (!sglist[i].dma_length)
+ break;
+ i--;
+ npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
+ for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
+ pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
+ }
+
+ /* Step 3: Perform flush synchronization sequence. */
+ pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
+ (void) pci_iommu_read(iommu->write_complete_reg);
+ while (!PCI_STC_FLUSHFLAG_SET(strbuf))
+ membar("#LoadLoad");
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
+{
+ struct pci_dev *ali_isa_bridge;
+ u8 val;
+
+ /* ALI sound chips generate 31-bits of DMA, a special register
+ * determines what bit 31 is emitted as.
+ */
+ ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
+ PCI_DEVICE_ID_AL_M1533,
+ NULL);
+
+ pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
+ if (set_bit)
+ val |= 0x01;
+ else
+ val &= ~0x01;
+ pci_write_config_byte(ali_isa_bridge, 0x7e, val);
+ pci_dev_put(ali_isa_bridge);
+}
+
+int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
+{
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ u64 dma_addr_mask;
+
+ if (pdev == NULL) {
+ dma_addr_mask = 0xffffffff;
+ } else {
+ struct pci_iommu *iommu = pcp->pbm->iommu;
+
+ dma_addr_mask = iommu->dma_addr_mask;
+
+ if (pdev->vendor == PCI_VENDOR_ID_AL &&
+ pdev->device == PCI_DEVICE_ID_AL_M5451 &&
+ device_mask == 0x7fffffff) {
+ ali_sound_dma_hack(pdev,
+ (dma_addr_mask & 0x80000000) != 0);
+ return 1;
+ }
+ }
+
+ if (device_mask >= (1UL << 32UL))
+ return 0;
+
+ return (device_mask & dma_addr_mask) == dma_addr_mask;
+}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
new file mode 100644
index 00000000000..3567fa879e1
--- /dev/null
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -0,0 +1,1560 @@
+/* $Id: pci_psycho.c,v 1.33 2002/02/01 00:58:33 davem Exp $
+ * pci_psycho.c: PSYCHO/U2P specific PCI controller support.
+ *
+ * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <asm/pbm.h>
+#include <asm/iommu.h>
+#include <asm/irq.h>
+#include <asm/starfire.h>
+
+#include "pci_impl.h"
+#include "iommu_common.h"
+
+/* All PSYCHO registers are 64-bits. The following accessor
+ * routines are how they are accessed. The REG parameter
+ * is a physical address.
+ */
+#define psycho_read(__reg) \
+({ u64 __ret; \
+ __asm__ __volatile__("ldxa [%1] %2, %0" \
+ : "=r" (__ret) \
+ : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
+ : "memory"); \
+ __ret; \
+})
+#define psycho_write(__reg, __val) \
+ __asm__ __volatile__("stxa %0, [%1] %2" \
+ : /* no outputs */ \
+ : "r" (__val), "r" (__reg), \
+ "i" (ASI_PHYS_BYPASS_EC_E) \
+ : "memory")
+
+/* Misc. PSYCHO PCI controller register offsets and definitions. */
+#define PSYCHO_CONTROL 0x0010UL
+#define PSYCHO_CONTROL_IMPL 0xf000000000000000UL /* Implementation of this PSYCHO*/
+#define PSYCHO_CONTROL_VER 0x0f00000000000000UL /* Version of this PSYCHO */
+#define PSYCHO_CONTROL_MID 0x00f8000000000000UL /* UPA Module ID of PSYCHO */
+#define PSYCHO_CONTROL_IGN 0x0007c00000000000UL /* Interrupt Group Number */
+#define PSYCHO_CONTROL_RESV 0x00003ffffffffff0UL /* Reserved */
+#define PSYCHO_CONTROL_APCKEN 0x0000000000000008UL /* Address Parity Check Enable */
+#define PSYCHO_CONTROL_APERR 0x0000000000000004UL /* Incoming System Addr Parerr */
+#define PSYCHO_CONTROL_IAP 0x0000000000000002UL /* Invert UPA Parity */
+#define PSYCHO_CONTROL_MODE 0x0000000000000001UL /* PSYCHO clock mode */
+#define PSYCHO_PCIA_CTRL 0x2000UL
+#define PSYCHO_PCIB_CTRL 0x4000UL
+#define PSYCHO_PCICTRL_RESV1 0xfffffff000000000UL /* Reserved */
+#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL /* Streaming byte hole error */
+#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL /* SERR signal asserted */
+#define PSYCHO_PCICTRL_SPEED 0x0000000200000000UL /* PCI speed (1 is U2P clock) */
+#define PSYCHO_PCICTRL_RESV2 0x00000001ffc00000UL /* Reserved */
+#define PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking */
+#define PSYCHO_PCICTRL_RESV3 0x00000000001ff800UL /* Reserved */
+#define PSYCHO_PCICTRL_SBH_INT 0x0000000000000400UL /* Streaming byte hole int enab */
+#define PSYCHO_PCICTRL_WEN 0x0000000000000200UL /* Power Mgmt Wake Enable */
+#define PSYCHO_PCICTRL_EEN 0x0000000000000100UL /* PCI Error Interrupt Enable */
+#define PSYCHO_PCICTRL_RESV4 0x00000000000000c0UL /* Reserved */
+#define PSYCHO_PCICTRL_AEN 0x000000000000003fUL /* PCI DVMA Arbitration Enable */
+
+/* U2P Programmer's Manual, page 13-55, configuration space
+ * address format:
+ *
+ * 32 24 23 16 15 11 10 8 7 2 1 0
+ * ---------------------------------------------------------
+ * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
+ * ---------------------------------------------------------
+ */
+#define PSYCHO_CONFIG_BASE(PBM) \
+ ((PBM)->config_space | (1UL << 24))
+#define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG) \
+ (((unsigned long)(BUS) << 16) | \
+ ((unsigned long)(DEVFN) << 8) | \
+ ((unsigned long)(REG)))
+
+static void *psycho_pci_config_mkaddr(struct pci_pbm_info *pbm,
+ unsigned char bus,
+ unsigned int devfn,
+ int where)
+{
+ if (!pbm)
+ return NULL;
+ return (void *)
+ (PSYCHO_CONFIG_BASE(pbm) |
+ PSYCHO_CONFIG_ENCODE(bus, devfn, where));
+}
+
+static int psycho_out_of_range(struct pci_pbm_info *pbm,
+ unsigned char bus,
+ unsigned char devfn)
+{
+ return ((pbm->parent == 0) ||
+ ((pbm == &pbm->parent->pbm_B) &&
+ (bus == pbm->pci_first_busno) &&
+ PCI_SLOT(devfn) > 8) ||
+ ((pbm == &pbm->parent->pbm_A) &&
+ (bus == pbm->pci_first_busno) &&
+ PCI_SLOT(devfn) > 8));
+}
+
+/* PSYCHO PCI configuration space accessors. */
+
+static int psycho_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct pci_pbm_info *pbm = bus_dev->sysdata;
+ unsigned char bus = bus_dev->number;
+ u32 *addr;
+ u16 tmp16;
+ u8 tmp8;
+
+ switch (size) {
+ case 1:
+ *value = 0xff;
+ break;
+ case 2:
+ *value = 0xffff;
+ break;
+ case 4:
+ *value = 0xffffffff;
+ break;
+ }
+
+ addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (psycho_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+ switch (size) {
+ case 1:
+ pci_config_read8((u8 *)addr, &tmp8);
+ *value = (u32) tmp8;
+ break;
+
+ case 2:
+ if (where & 0x01) {
+ printk("pci_read_config_word: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_read16((u16 *)addr, &tmp16);
+ *value = (u32) tmp16;
+ break;
+
+ case 4:
+ if (where & 0x03) {
+ printk("pci_read_config_dword: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_read32(addr, value);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int psycho_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct pci_pbm_info *pbm = bus_dev->sysdata;
+ unsigned char bus = bus_dev->number;
+ u32 *addr;
+
+ addr = psycho_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (psycho_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ switch (size) {
+ case 1:
+ pci_config_write8((u8 *)addr, value);
+ break;
+
+ case 2:
+ if (where & 0x01) {
+ printk("pci_write_config_word: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_write16((u16 *)addr, value);
+ break;
+
+ case 4:
+ if (where & 0x03) {
+ printk("pci_write_config_dword: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_write32(addr, value);
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops psycho_ops = {
+ .read = psycho_read_pci_cfg,
+ .write = psycho_write_pci_cfg,
+};
+
+/* PSYCHO interrupt mapping support. */
+#define PSYCHO_IMAP_A_SLOT0 0x0c00UL
+#define PSYCHO_IMAP_B_SLOT0 0x0c20UL
+static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
+{
+ unsigned int bus = (ino & 0x10) >> 4;
+ unsigned int slot = (ino & 0x0c) >> 2;
+
+ if (bus == 0)
+ return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
+ else
+ return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
+}
+
+#define PSYCHO_IMAP_SCSI 0x1000UL
+#define PSYCHO_IMAP_ETH 0x1008UL
+#define PSYCHO_IMAP_BPP 0x1010UL
+#define PSYCHO_IMAP_AU_REC 0x1018UL
+#define PSYCHO_IMAP_AU_PLAY 0x1020UL
+#define PSYCHO_IMAP_PFAIL 0x1028UL
+#define PSYCHO_IMAP_KMS 0x1030UL
+#define PSYCHO_IMAP_FLPY 0x1038UL
+#define PSYCHO_IMAP_SHW 0x1040UL
+#define PSYCHO_IMAP_KBD 0x1048UL
+#define PSYCHO_IMAP_MS 0x1050UL
+#define PSYCHO_IMAP_SER 0x1058UL
+#define PSYCHO_IMAP_TIM0 0x1060UL
+#define PSYCHO_IMAP_TIM1 0x1068UL
+#define PSYCHO_IMAP_UE 0x1070UL
+#define PSYCHO_IMAP_CE 0x1078UL
+#define PSYCHO_IMAP_A_ERR 0x1080UL
+#define PSYCHO_IMAP_B_ERR 0x1088UL
+#define PSYCHO_IMAP_PMGMT 0x1090UL
+#define PSYCHO_IMAP_GFX 0x1098UL
+#define PSYCHO_IMAP_EUPA 0x10a0UL
+
+static unsigned long __onboard_imap_off[] = {
+/*0x20*/ PSYCHO_IMAP_SCSI,
+/*0x21*/ PSYCHO_IMAP_ETH,
+/*0x22*/ PSYCHO_IMAP_BPP,
+/*0x23*/ PSYCHO_IMAP_AU_REC,
+/*0x24*/ PSYCHO_IMAP_AU_PLAY,
+/*0x25*/ PSYCHO_IMAP_PFAIL,
+/*0x26*/ PSYCHO_IMAP_KMS,
+/*0x27*/ PSYCHO_IMAP_FLPY,
+/*0x28*/ PSYCHO_IMAP_SHW,
+/*0x29*/ PSYCHO_IMAP_KBD,
+/*0x2a*/ PSYCHO_IMAP_MS,
+/*0x2b*/ PSYCHO_IMAP_SER,
+/*0x2c*/ PSYCHO_IMAP_TIM0,
+/*0x2d*/ PSYCHO_IMAP_TIM1,
+/*0x2e*/ PSYCHO_IMAP_UE,
+/*0x2f*/ PSYCHO_IMAP_CE,
+/*0x30*/ PSYCHO_IMAP_A_ERR,
+/*0x31*/ PSYCHO_IMAP_B_ERR,
+/*0x32*/ PSYCHO_IMAP_PMGMT
+};
+#define PSYCHO_ONBOARD_IRQ_BASE 0x20
+#define PSYCHO_ONBOARD_IRQ_LAST 0x32
+#define psycho_onboard_imap_offset(__ino) \
+ __onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
+
+#define PSYCHO_ICLR_A_SLOT0 0x1400UL
+#define PSYCHO_ICLR_SCSI 0x1800UL
+
+#define psycho_iclr_offset(ino) \
+ ((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
+ (PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
+
+/* PCI PSYCHO INO number to Sparc PIL level. */
+static unsigned char psycho_pil_table[] = {
+/*0x00*/0, 0, 0, 0, /* PCI A slot 0 Int A, B, C, D */
+/*0x04*/0, 0, 0, 0, /* PCI A slot 1 Int A, B, C, D */
+/*0x08*/0, 0, 0, 0, /* PCI A slot 2 Int A, B, C, D */
+/*0x0c*/0, 0, 0, 0, /* PCI A slot 3 Int A, B, C, D */
+/*0x10*/0, 0, 0, 0, /* PCI B slot 0 Int A, B, C, D */
+/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
+/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
+/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
+/*0x20*/4, /* SCSI */
+/*0x21*/5, /* Ethernet */
+/*0x22*/8, /* Parallel Port */
+/*0x23*/13, /* Audio Record */
+/*0x24*/14, /* Audio Playback */
+/*0x25*/15, /* PowerFail */
+/*0x26*/4, /* second SCSI */
+/*0x27*/11, /* Floppy */
+/*0x28*/4, /* Spare Hardware */
+/*0x29*/9, /* Keyboard */
+/*0x2a*/4, /* Mouse */
+/*0x2b*/12, /* Serial */
+/*0x2c*/10, /* Timer 0 */
+/*0x2d*/11, /* Timer 1 */
+/*0x2e*/15, /* Uncorrectable ECC */
+/*0x2f*/15, /* Correctable ECC */
+/*0x30*/15, /* PCI Bus A Error */
+/*0x31*/15, /* PCI Bus B Error */
+/*0x32*/15, /* Power Management */
+};
+
+static int __init psycho_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
+{
+ int ret;
+
+ ret = psycho_pil_table[ino];
+ if (ret == 0 && pdev == NULL) {
+ ret = 4;
+ } else if (ret == 0) {
+ switch ((pdev->class >> 16) & 0xff) {
+ case PCI_BASE_CLASS_STORAGE:
+ ret = 4;
+ break;
+
+ case PCI_BASE_CLASS_NETWORK:
+ ret = 6;
+ break;
+
+ case PCI_BASE_CLASS_DISPLAY:
+ ret = 9;
+ break;
+
+ case PCI_BASE_CLASS_MULTIMEDIA:
+ case PCI_BASE_CLASS_MEMORY:
+ case PCI_BASE_CLASS_BRIDGE:
+ case PCI_BASE_CLASS_SERIAL:
+ ret = 10;
+ break;
+
+ default:
+ ret = 4;
+ break;
+ };
+ }
+
+ return ret;
+}
+
+static unsigned int __init psycho_irq_build(struct pci_pbm_info *pbm,
+ struct pci_dev *pdev,
+ unsigned int ino)
+{
+ struct ino_bucket *bucket;
+ unsigned long imap, iclr;
+ unsigned long imap_off, iclr_off;
+ int pil, inofixup = 0;
+
+ ino &= PCI_IRQ_INO;
+ if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
+ /* PCI slot */
+ imap_off = psycho_pcislot_imap_offset(ino);
+ } else {
+ /* Onboard device */
+ if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
+ prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
+ prom_halt();
+ }
+ imap_off = psycho_onboard_imap_offset(ino);
+ }
+
+ /* Now build the IRQ bucket. */
+ pil = psycho_ino_to_pil(pdev, ino);
+
+ if (PIL_RESERVED(pil))
+ BUG();
+
+ imap = pbm->controller_regs + imap_off;
+ imap += 4;
+
+ iclr_off = psycho_iclr_offset(ino);
+ iclr = pbm->controller_regs + iclr_off;
+ iclr += 4;
+
+ if ((ino & 0x20) == 0)
+ inofixup = ino & 0x03;
+
+ bucket = __bucket(build_irq(pil, inofixup, iclr, imap));
+ bucket->flags |= IBF_PCI;
+
+ return __irq(bucket);
+}
+
+/* PSYCHO error handling support. */
+enum psycho_error_type {
+ UE_ERR, CE_ERR, PCI_ERR
+};
+
+/* Helper function of IOMMU error checking, which checks out
+ * the state of the streaming buffers. The IOMMU lock is
+ * held when this is called.
+ *
+ * For the PCI error case we know which PBM (and thus which
+ * streaming buffer) caused the error, but for the uncorrectable
+ * error case we do not. So we always check both streaming caches.
+ */
+#define PSYCHO_STRBUF_CONTROL_A 0x2800UL
+#define PSYCHO_STRBUF_CONTROL_B 0x4800UL
+#define PSYCHO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
+#define PSYCHO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
+#define PSYCHO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
+#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
+#define PSYCHO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
+#define PSYCHO_STRBUF_FLUSH_A 0x2808UL
+#define PSYCHO_STRBUF_FLUSH_B 0x4808UL
+#define PSYCHO_STRBUF_FSYNC_A 0x2810UL
+#define PSYCHO_STRBUF_FSYNC_B 0x4810UL
+#define PSYCHO_STC_DATA_A 0xb000UL
+#define PSYCHO_STC_DATA_B 0xc000UL
+#define PSYCHO_STC_ERR_A 0xb400UL
+#define PSYCHO_STC_ERR_B 0xc400UL
+#define PSYCHO_STCERR_WRITE 0x0000000000000002UL /* Write Error */
+#define PSYCHO_STCERR_READ 0x0000000000000001UL /* Read Error */
+#define PSYCHO_STC_TAG_A 0xb800UL
+#define PSYCHO_STC_TAG_B 0xc800UL
+#define PSYCHO_STCTAG_PPN 0x0fffffff00000000UL /* Physical Page Number */
+#define PSYCHO_STCTAG_VPN 0x00000000ffffe000UL /* Virtual Page Number */
+#define PSYCHO_STCTAG_VALID 0x0000000000000002UL /* Valid */
+#define PSYCHO_STCTAG_WRITE 0x0000000000000001UL /* Writable */
+#define PSYCHO_STC_LINE_A 0xb900UL
+#define PSYCHO_STC_LINE_B 0xc900UL
+#define PSYCHO_STCLINE_LINDX 0x0000000001e00000UL /* LRU Index */
+#define PSYCHO_STCLINE_SPTR 0x00000000001f8000UL /* Dirty Data Start Pointer */
+#define PSYCHO_STCLINE_LADDR 0x0000000000007f00UL /* Line Address */
+#define PSYCHO_STCLINE_EPTR 0x00000000000000fcUL /* Dirty Data End Pointer */
+#define PSYCHO_STCLINE_VALID 0x0000000000000002UL /* Valid */
+#define PSYCHO_STCLINE_FOFN 0x0000000000000001UL /* Fetch Outstanding / Flush Necessary */
+
+static DEFINE_SPINLOCK(stc_buf_lock);
+static unsigned long stc_error_buf[128];
+static unsigned long stc_tag_buf[16];
+static unsigned long stc_line_buf[16];
+
+static void __psycho_check_one_stc(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm,
+ int is_pbm_a)
+{
+ struct pci_strbuf *strbuf = &pbm->stc;
+ unsigned long regbase = p->pbm_A.controller_regs;
+ unsigned long err_base, tag_base, line_base;
+ u64 control;
+ int i;
+
+ if (is_pbm_a) {
+ err_base = regbase + PSYCHO_STC_ERR_A;
+ tag_base = regbase + PSYCHO_STC_TAG_A;
+ line_base = regbase + PSYCHO_STC_LINE_A;
+ } else {
+ err_base = regbase + PSYCHO_STC_ERR_B;
+ tag_base = regbase + PSYCHO_STC_TAG_B;
+ line_base = regbase + PSYCHO_STC_LINE_B;
+ }
+
+ spin_lock(&stc_buf_lock);
+
+ /* This is __REALLY__ dangerous. When we put the
+ * streaming buffer into diagnostic mode to probe
+ * it's tags and error status, we _must_ clear all
+ * of the line tag valid bits before re-enabling
+ * the streaming buffer. If any dirty data lives
+ * in the STC when we do this, we will end up
+ * invalidating it before it has a chance to reach
+ * main memory.
+ */
+ control = psycho_read(strbuf->strbuf_control);
+ psycho_write(strbuf->strbuf_control,
+ (control | PSYCHO_STRBUF_CTRL_DENAB));
+ for (i = 0; i < 128; i++) {
+ unsigned long val;
+
+ val = psycho_read(err_base + (i * 8UL));
+ psycho_write(err_base + (i * 8UL), 0UL);
+ stc_error_buf[i] = val;
+ }
+ for (i = 0; i < 16; i++) {
+ stc_tag_buf[i] = psycho_read(tag_base + (i * 8UL));
+ stc_line_buf[i] = psycho_read(line_base + (i * 8UL));
+ psycho_write(tag_base + (i * 8UL), 0UL);
+ psycho_write(line_base + (i * 8UL), 0UL);
+ }
+
+ /* OK, state is logged, exit diagnostic mode. */
+ psycho_write(strbuf->strbuf_control, control);
+
+ for (i = 0; i < 16; i++) {
+ int j, saw_error, first, last;
+
+ saw_error = 0;
+ first = i * 8;
+ last = first + 8;
+ for (j = first; j < last; j++) {
+ unsigned long errval = stc_error_buf[j];
+ if (errval != 0) {
+ saw_error++;
+ printk("PSYCHO%d(PBM%c): STC_ERR(%d)[wr(%d)rd(%d)]\n",
+ p->index,
+ (is_pbm_a ? 'A' : 'B'),
+ j,
+ (errval & PSYCHO_STCERR_WRITE) ? 1 : 0,
+ (errval & PSYCHO_STCERR_READ) ? 1 : 0);
+ }
+ }
+ if (saw_error != 0) {
+ unsigned long tagval = stc_tag_buf[i];
+ unsigned long lineval = stc_line_buf[i];
+ printk("PSYCHO%d(PBM%c): STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)W(%d)]\n",
+ p->index,
+ (is_pbm_a ? 'A' : 'B'),
+ i,
+ ((tagval & PSYCHO_STCTAG_PPN) >> 19UL),
+ (tagval & PSYCHO_STCTAG_VPN),
+ ((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0),
+ ((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0));
+ printk("PSYCHO%d(PBM%c): STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
+ "V(%d)FOFN(%d)]\n",
+ p->index,
+ (is_pbm_a ? 'A' : 'B'),
+ i,
+ ((lineval & PSYCHO_STCLINE_LINDX) >> 21UL),
+ ((lineval & PSYCHO_STCLINE_SPTR) >> 15UL),
+ ((lineval & PSYCHO_STCLINE_LADDR) >> 8UL),
+ ((lineval & PSYCHO_STCLINE_EPTR) >> 2UL),
+ ((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0),
+ ((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0));
+ }
+ }
+
+ spin_unlock(&stc_buf_lock);
+}
+
+static void __psycho_check_stc_error(struct pci_controller_info *p,
+ unsigned long afsr,
+ unsigned long afar,
+ enum psycho_error_type type)
+{
+ struct pci_pbm_info *pbm;
+
+ pbm = &p->pbm_A;
+ if (pbm->stc.strbuf_enabled)
+ __psycho_check_one_stc(p, pbm, 1);
+
+ pbm = &p->pbm_B;
+ if (pbm->stc.strbuf_enabled)
+ __psycho_check_one_stc(p, pbm, 0);
+}
+
+/* When an Uncorrectable Error or a PCI Error happens, we
+ * interrogate the IOMMU state to see if it is the cause.
+ */
+#define PSYCHO_IOMMU_CONTROL 0x0200UL
+#define PSYCHO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
+#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
+#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
+#define PSYCHO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
+#define PSYCHO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
+#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
+#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
+#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
+#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
+#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
+#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
+#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
+#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
+#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
+#define PSYCHO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
+#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
+#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
+#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
+#define PSYCHO_IOMMU_TSBBASE 0x0208UL
+#define PSYCHO_IOMMU_FLUSH 0x0210UL
+#define PSYCHO_IOMMU_TAG 0xa580UL
+#define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL)
+#define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL)
+#define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL)
+#define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
+#define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL)
+#define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffUL
+#define PSYCHO_IOMMU_DATA 0xa600UL
+#define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
+#define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
+#define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL
+static void psycho_check_iommu_error(struct pci_controller_info *p,
+ unsigned long afsr,
+ unsigned long afar,
+ enum psycho_error_type type)
+{
+ struct pci_iommu *iommu = p->pbm_A.iommu;
+ unsigned long iommu_tag[16];
+ unsigned long iommu_data[16];
+ unsigned long flags;
+ u64 control;
+ int i;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ control = psycho_read(iommu->iommu_control);
+ if (control & PSYCHO_IOMMU_CTRL_XLTEERR) {
+ char *type_string;
+
+ /* Clear the error encountered bit. */
+ control &= ~PSYCHO_IOMMU_CTRL_XLTEERR;
+ psycho_write(iommu->iommu_control, control);
+
+ switch((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
+ case 0:
+ type_string = "Protection Error";
+ break;
+ case 1:
+ type_string = "Invalid Error";
+ break;
+ case 2:
+ type_string = "TimeOut Error";
+ break;
+ case 3:
+ default:
+ type_string = "ECC Error";
+ break;
+ };
+ printk("PSYCHO%d: IOMMU Error, type[%s]\n",
+ p->index, type_string);
+
+ /* Put the IOMMU into diagnostic mode and probe
+ * it's TLB for entries with error status.
+ *
+ * It is very possible for another DVMA to occur
+ * while we do this probe, and corrupt the system
+ * further. But we are so screwed at this point
+ * that we are likely to crash hard anyways, so
+ * get as much diagnostic information to the
+ * console as we can.
+ */
+ psycho_write(iommu->iommu_control,
+ control | PSYCHO_IOMMU_CTRL_DENAB);
+ for (i = 0; i < 16; i++) {
+ unsigned long base = p->pbm_A.controller_regs;
+
+ iommu_tag[i] =
+ psycho_read(base + PSYCHO_IOMMU_TAG + (i * 8UL));
+ iommu_data[i] =
+ psycho_read(base + PSYCHO_IOMMU_DATA + (i * 8UL));
+
+ /* Now clear out the entry. */
+ psycho_write(base + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
+ psycho_write(base + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
+ }
+
+ /* Leave diagnostic mode. */
+ psycho_write(iommu->iommu_control, control);
+
+ for (i = 0; i < 16; i++) {
+ unsigned long tag, data;
+
+ tag = iommu_tag[i];
+ if (!(tag & PSYCHO_IOMMU_TAG_ERR))
+ continue;
+
+ data = iommu_data[i];
+ switch((tag & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) {
+ case 0:
+ type_string = "Protection Error";
+ break;
+ case 1:
+ type_string = "Invalid Error";
+ break;
+ case 2:
+ type_string = "TimeOut Error";
+ break;
+ case 3:
+ default:
+ type_string = "ECC Error";
+ break;
+ };
+ printk("PSYCHO%d: IOMMU TAG(%d)[error(%s) wr(%d) str(%d) sz(%dK) vpg(%08lx)]\n",
+ p->index, i, type_string,
+ ((tag & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0),
+ ((tag & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0),
+ ((tag & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8),
+ (tag & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
+ printk("PSYCHO%d: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
+ p->index, i,
+ ((data & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
+ ((data & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
+ (data & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
+ }
+ }
+ __psycho_check_stc_error(p, afsr, afar, type);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+/* Uncorrectable Errors. Cause of the error and the address are
+ * recorded in the UE_AFSR and UE_AFAR of PSYCHO. They are errors
+ * relating to UPA interface transactions.
+ */
+#define PSYCHO_UE_AFSR 0x0030UL
+#define PSYCHO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
+#define PSYCHO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
+#define PSYCHO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
+#define PSYCHO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
+#define PSYCHO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
+#define PSYCHO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
+#define PSYCHO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
+#define PSYCHO_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
+#define PSYCHO_UEAFSR_DOFF 0x00000000e0000000UL /* Doubleword Offset */
+#define PSYCHO_UEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
+#define PSYCHO_UEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
+#define PSYCHO_UEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
+#define PSYCHO_UE_AFAR 0x0038UL
+
+static irqreturn_t psycho_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFSR;
+ unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_UE_AFAR;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ /* Latch uncorrectable error status. */
+ afar = psycho_read(afar_reg);
+ afsr = psycho_read(afsr_reg);
+
+ /* Clear the primary/secondary error status bits. */
+ error_bits = afsr &
+ (PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR |
+ PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR);
+ if (!error_bits)
+ return IRQ_NONE;
+ psycho_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("PSYCHO%d: Uncorrectable Error, primary error type[%s]\n",
+ p->index,
+ (((error_bits & PSYCHO_UEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & PSYCHO_UEAFSR_PDRD) ?
+ "DMA Read" :
+ ((error_bits & PSYCHO_UEAFSR_PDWR) ?
+ "DMA Write" : "???")))));
+ printk("PSYCHO%d: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n",
+ p->index,
+ (afsr & PSYCHO_UEAFSR_BMSK) >> 32UL,
+ (afsr & PSYCHO_UEAFSR_DOFF) >> 29UL,
+ (afsr & PSYCHO_UEAFSR_MID) >> 24UL,
+ ((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0));
+ printk("PSYCHO%d: UE AFAR [%016lx]\n", p->index, afar);
+ printk("PSYCHO%d: UE Secondary errors [", p->index);
+ reported = 0;
+ if (afsr & PSYCHO_UEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & PSYCHO_UEAFSR_SDRD) {
+ reported++;
+ printk("(DMA Read)");
+ }
+ if (afsr & PSYCHO_UEAFSR_SDWR) {
+ reported++;
+ printk("(DMA Write)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* Interrogate IOMMU for error status. */
+ psycho_check_iommu_error(p, afsr, afar, UE_ERR);
+
+ return IRQ_HANDLED;
+}
+
+/* Correctable Errors. */
+#define PSYCHO_CE_AFSR 0x0040UL
+#define PSYCHO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
+#define PSYCHO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
+#define PSYCHO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
+#define PSYCHO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
+#define PSYCHO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
+#define PSYCHO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
+#define PSYCHO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
+#define PSYCHO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
+#define PSYCHO_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
+#define PSYCHO_CEAFSR_DOFF 0x00000000e0000000UL /* Double Offset */
+#define PSYCHO_CEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
+#define PSYCHO_CEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
+#define PSYCHO_CEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
+#define PSYCHO_CE_AFAR 0x0040UL
+
+static irqreturn_t psycho_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ unsigned long afsr_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFSR;
+ unsigned long afar_reg = p->pbm_A.controller_regs + PSYCHO_CE_AFAR;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ /* Latch error status. */
+ afar = psycho_read(afar_reg);
+ afsr = psycho_read(afsr_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR |
+ PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR);
+ if (!error_bits)
+ return IRQ_NONE;
+ psycho_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("PSYCHO%d: Correctable Error, primary error type[%s]\n",
+ p->index,
+ (((error_bits & PSYCHO_CEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & PSYCHO_CEAFSR_PDRD) ?
+ "DMA Read" :
+ ((error_bits & PSYCHO_CEAFSR_PDWR) ?
+ "DMA Write" : "???")))));
+
+ /* XXX Use syndrome and afar to print out module string just like
+ * XXX UDB CE trap handler does... -DaveM
+ */
+ printk("PSYCHO%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
+ "UPA_MID[%02lx] was_block(%d)\n",
+ p->index,
+ (afsr & PSYCHO_CEAFSR_ESYND) >> 48UL,
+ (afsr & PSYCHO_CEAFSR_BMSK) >> 32UL,
+ (afsr & PSYCHO_CEAFSR_DOFF) >> 29UL,
+ (afsr & PSYCHO_CEAFSR_MID) >> 24UL,
+ ((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0));
+ printk("PSYCHO%d: CE AFAR [%016lx]\n", p->index, afar);
+ printk("PSYCHO%d: CE Secondary errors [", p->index);
+ reported = 0;
+ if (afsr & PSYCHO_CEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & PSYCHO_CEAFSR_SDRD) {
+ reported++;
+ printk("(DMA Read)");
+ }
+ if (afsr & PSYCHO_CEAFSR_SDWR) {
+ reported++;
+ printk("(DMA Write)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ return IRQ_HANDLED;
+}
+
+/* PCI Errors. They are signalled by the PCI bus module since they
+ * are associated with a specific bus segment.
+ */
+#define PSYCHO_PCI_AFSR_A 0x2010UL
+#define PSYCHO_PCI_AFSR_B 0x4010UL
+#define PSYCHO_PCIAFSR_PMA 0x8000000000000000UL /* Primary Master Abort Error */
+#define PSYCHO_PCIAFSR_PTA 0x4000000000000000UL /* Primary Target Abort Error */
+#define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
+#define PSYCHO_PCIAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
+#define PSYCHO_PCIAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort Error */
+#define PSYCHO_PCIAFSR_STA 0x0400000000000000UL /* Secondary Target Abort Error */
+#define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
+#define PSYCHO_PCIAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
+#define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000UL /* Reserved */
+#define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
+#define PSYCHO_PCIAFSR_BLK 0x0000000080000000UL /* Trans was block operation */
+#define PSYCHO_PCIAFSR_RESV2 0x0000000040000000UL /* Reserved */
+#define PSYCHO_PCIAFSR_MID 0x000000003e000000UL /* MID causing the error */
+#define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffUL /* Reserved */
+#define PSYCHO_PCI_AFAR_A 0x2018UL
+#define PSYCHO_PCI_AFAR_B 0x4018UL
+
+static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm, int is_pbm_a)
+{
+ unsigned long csr_reg, csr, csr_error_bits;
+ irqreturn_t ret = IRQ_NONE;
+ u16 stat;
+
+ if (is_pbm_a) {
+ csr_reg = pbm->controller_regs + PSYCHO_PCIA_CTRL;
+ } else {
+ csr_reg = pbm->controller_regs + PSYCHO_PCIB_CTRL;
+ }
+ csr = psycho_read(csr_reg);
+ csr_error_bits =
+ csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR);
+ if (csr_error_bits) {
+ /* Clear the errors. */
+ psycho_write(csr_reg, csr);
+
+ /* Log 'em. */
+ if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR)
+ printk("%s: PCI streaming byte hole error asserted.\n",
+ pbm->name);
+ if (csr_error_bits & PSYCHO_PCICTRL_SERR)
+ printk("%s: PCI SERR signal asserted.\n", pbm->name);
+ ret = IRQ_HANDLED;
+ }
+ pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
+ if (stat & (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
+ pbm->name, stat);
+ pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static irqreturn_t psycho_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_pbm_info *pbm = dev_id;
+ struct pci_controller_info *p = pbm->parent;
+ unsigned long afsr_reg, afar_reg;
+ unsigned long afsr, afar, error_bits;
+ int is_pbm_a, reported;
+
+ is_pbm_a = (pbm == &pbm->parent->pbm_A);
+ if (is_pbm_a) {
+ afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_A;
+ afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_A;
+ } else {
+ afsr_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFSR_B;
+ afar_reg = p->pbm_A.controller_regs + PSYCHO_PCI_AFAR_B;
+ }
+
+ /* Latch error status. */
+ afar = psycho_read(afar_reg);
+ afsr = psycho_read(afsr_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA |
+ PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR |
+ PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA |
+ PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR);
+ if (!error_bits)
+ return psycho_pcierr_intr_other(pbm, is_pbm_a);
+ psycho_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("PSYCHO%d(PBM%c): PCI Error, primary error type[%s]\n",
+ p->index, (is_pbm_a ? 'A' : 'B'),
+ (((error_bits & PSYCHO_PCIAFSR_PMA) ?
+ "Master Abort" :
+ ((error_bits & PSYCHO_PCIAFSR_PTA) ?
+ "Target Abort" :
+ ((error_bits & PSYCHO_PCIAFSR_PRTRY) ?
+ "Excessive Retries" :
+ ((error_bits & PSYCHO_PCIAFSR_PPERR) ?
+ "Parity Error" : "???"))))));
+ printk("PSYCHO%d(PBM%c): bytemask[%04lx] UPA_MID[%02lx] was_block(%d)\n",
+ p->index, (is_pbm_a ? 'A' : 'B'),
+ (afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL,
+ (afsr & PSYCHO_PCIAFSR_MID) >> 25UL,
+ (afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0);
+ printk("PSYCHO%d(PBM%c): PCI AFAR [%016lx]\n",
+ p->index, (is_pbm_a ? 'A' : 'B'), afar);
+ printk("PSYCHO%d(PBM%c): PCI Secondary errors [",
+ p->index, (is_pbm_a ? 'A' : 'B'));
+ reported = 0;
+ if (afsr & PSYCHO_PCIAFSR_SMA) {
+ reported++;
+ printk("(Master Abort)");
+ }
+ if (afsr & PSYCHO_PCIAFSR_STA) {
+ reported++;
+ printk("(Target Abort)");
+ }
+ if (afsr & PSYCHO_PCIAFSR_SRTRY) {
+ reported++;
+ printk("(Excessive Retries)");
+ }
+ if (afsr & PSYCHO_PCIAFSR_SPERR) {
+ reported++;
+ printk("(Parity Error)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* For the error types shown, scan PBM's PCI bus for devices
+ * which have logged that error type.
+ */
+
+ /* If we see a Target Abort, this could be the result of an
+ * IOMMU translation error of some sort. It is extremely
+ * useful to log this information as usually it indicates
+ * a bug in the IOMMU support code or a PCI device driver.
+ */
+ if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) {
+ psycho_check_iommu_error(p, afsr, afar, PCI_ERR);
+ pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
+ }
+ if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA))
+ pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
+
+ /* For excessive retries, PSYCHO/PBM will abort the device
+ * and there is no way to specifically check for excessive
+ * retries in the config space status registers. So what
+ * we hope is that we'll catch it via the master/target
+ * abort events.
+ */
+
+ if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR))
+ pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
+
+ return IRQ_HANDLED;
+}
+
+/* XXX What about PowerFail/PowerManagement??? -DaveM */
+#define PSYCHO_ECC_CTRL 0x0020
+#define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
+#define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
+#define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
+#define PSYCHO_UE_INO 0x2e
+#define PSYCHO_CE_INO 0x2f
+#define PSYCHO_PCIERR_A_INO 0x30
+#define PSYCHO_PCIERR_B_INO 0x31
+static void __init psycho_register_error_handlers(struct pci_controller_info *p)
+{
+ struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
+ unsigned long base = p->pbm_A.controller_regs;
+ unsigned int irq, portid = pbm->portid;
+ u64 tmp;
+
+ /* Build IRQs and register handlers. */
+ irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_UE_INO);
+ if (request_irq(irq, psycho_ue_intr,
+ SA_SHIRQ, "PSYCHO UE", p) < 0) {
+ prom_printf("PSYCHO%d: Cannot register UE interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_CE_INO);
+ if (request_irq(irq, psycho_ce_intr,
+ SA_SHIRQ, "PSYCHO CE", p) < 0) {
+ prom_printf("PSYCHO%d: Cannot register CE interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ pbm = &p->pbm_A;
+ irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_A_INO);
+ if (request_irq(irq, psycho_pcierr_intr,
+ SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_A) < 0) {
+ prom_printf("PSYCHO%d(PBMA): Cannot register PciERR interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ pbm = &p->pbm_B;
+ irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_B_INO);
+ if (request_irq(irq, psycho_pcierr_intr,
+ SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_B) < 0) {
+ prom_printf("PSYCHO%d(PBMB): Cannot register PciERR interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ /* Enable UE and CE interrupts for controller. */
+ psycho_write(base + PSYCHO_ECC_CTRL,
+ (PSYCHO_ECCCTRL_EE |
+ PSYCHO_ECCCTRL_UE |
+ PSYCHO_ECCCTRL_CE));
+
+ /* Enable PCI Error interrupts and clear error
+ * bits for each PBM.
+ */
+ tmp = psycho_read(base + PSYCHO_PCIA_CTRL);
+ tmp |= (PSYCHO_PCICTRL_SERR |
+ PSYCHO_PCICTRL_SBH_ERR |
+ PSYCHO_PCICTRL_EEN);
+ tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
+ psycho_write(base + PSYCHO_PCIA_CTRL, tmp);
+
+ tmp = psycho_read(base + PSYCHO_PCIB_CTRL);
+ tmp |= (PSYCHO_PCICTRL_SERR |
+ PSYCHO_PCICTRL_SBH_ERR |
+ PSYCHO_PCICTRL_EEN);
+ tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
+ psycho_write(base + PSYCHO_PCIB_CTRL, tmp);
+}
+
+/* PSYCHO boot time probing and initialization. */
+static void __init psycho_resource_adjust(struct pci_dev *pdev,
+ struct resource *res,
+ struct resource *root)
+{
+ res->start += root->start;
+ res->end += root->start;
+}
+
+static void __init psycho_base_address_update(struct pci_dev *pdev, int resource)
+{
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ struct pci_pbm_info *pbm = pcp->pbm;
+ struct resource *res, *root;
+ u32 reg;
+ int where, size, is_64bit;
+
+ res = &pdev->resource[resource];
+ if (resource < 6) {
+ where = PCI_BASE_ADDRESS_0 + (resource * 4);
+ } else if (resource == PCI_ROM_RESOURCE) {
+ where = pdev->rom_base_reg;
+ } else {
+ /* Somebody might have asked allocation of a non-standard resource */
+ return;
+ }
+
+ is_64bit = 0;
+ if (res->flags & IORESOURCE_IO)
+ root = &pbm->io_space;
+ else {
+ root = &pbm->mem_space;
+ if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+ == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ is_64bit = 1;
+ }
+
+ size = res->end - res->start;
+ pci_read_config_dword(pdev, where, &reg);
+ reg = ((reg & size) |
+ (((u32)(res->start - root->start)) & ~size));
+ if (resource == PCI_ROM_RESOURCE) {
+ reg |= PCI_ROM_ADDRESS_ENABLE;
+ res->flags |= IORESOURCE_ROM_ENABLE;
+ }
+ pci_write_config_dword(pdev, where, reg);
+
+ /* This knows that the upper 32-bits of the address
+ * must be zero. Our PCI common layer enforces this.
+ */
+ if (is_64bit)
+ pci_write_config_dword(pdev, where + 4, 0);
+}
+
+static void __init pbm_config_busmastering(struct pci_pbm_info *pbm)
+{
+ u8 *addr;
+
+ /* Set cache-line size to 64 bytes, this is actually
+ * a nop but I do it for completeness.
+ */
+ addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+ 0, PCI_CACHE_LINE_SIZE);
+ pci_config_write8(addr, 64 / sizeof(u32));
+
+ /* Set PBM latency timer to 64 PCI clocks. */
+ addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+ 0, PCI_LATENCY_TIMER);
+ pci_config_write8(addr, 64);
+}
+
+static void __init pbm_scan_bus(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm)
+{
+ struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
+
+ if (!cookie) {
+ prom_printf("PSYCHO: Critical allocation failure.\n");
+ prom_halt();
+ }
+
+ /* All we care about is the PBM. */
+ memset(cookie, 0, sizeof(*cookie));
+ cookie->pbm = pbm;
+
+ pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
+ p->pci_ops,
+ pbm);
+ pci_fixup_host_bridge_self(pbm->pci_bus);
+ pbm->pci_bus->self->sysdata = cookie;
+
+ pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
+ pci_record_assignments(pbm, pbm->pci_bus);
+ pci_assign_unassigned(pbm, pbm->pci_bus);
+ pci_fixup_irq(pbm, pbm->pci_bus);
+ pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
+ pci_setup_busmastering(pbm, pbm->pci_bus);
+}
+
+static void __init psycho_scan_bus(struct pci_controller_info *p)
+{
+ pbm_config_busmastering(&p->pbm_B);
+ p->pbm_B.is_66mhz_capable = 0;
+ pbm_config_busmastering(&p->pbm_A);
+ p->pbm_A.is_66mhz_capable = 1;
+ pbm_scan_bus(p, &p->pbm_B);
+ pbm_scan_bus(p, &p->pbm_A);
+
+ /* After the PCI bus scan is complete, we can register
+ * the error interrupt handlers.
+ */
+ psycho_register_error_handlers(p);
+}
+
+static void __init psycho_iommu_init(struct pci_controller_info *p)
+{
+ struct pci_iommu *iommu = p->pbm_A.iommu;
+ unsigned long tsbbase, i;
+ u64 control;
+
+ /* Setup initial software IOMMU state. */
+ spin_lock_init(&iommu->lock);
+ iommu->iommu_cur_ctx = 0;
+
+ /* Register addresses. */
+ iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
+ iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
+ iommu->iommu_flush = p->pbm_A.controller_regs + PSYCHO_IOMMU_FLUSH;
+ /* PSYCHO's IOMMU lacks ctx flushing. */
+ iommu->iommu_ctxflush = 0;
+
+ /* We use the main control register of PSYCHO as the write
+ * completion register.
+ */
+ iommu->write_complete_reg = p->pbm_A.controller_regs + PSYCHO_CONTROL;
+
+ /*
+ * Invalidate TLB Entries.
+ */
+ control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
+ control |= PSYCHO_IOMMU_CTRL_DENAB;
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
+ for(i = 0; i < 16; i++) {
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TAG + (i * 8UL), 0);
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_DATA + (i * 8UL), 0);
+ }
+
+ /* Leave diag mode enabled for full-flushing done
+ * in pci_iommu.c
+ */
+
+ iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
+ if (!iommu->dummy_page) {
+ prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
+ prom_halt();
+ }
+ memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
+ iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
+
+ /* Using assumed page size 8K with 128K entries we need 1MB iommu page
+ * table (128K ioptes * 8 bytes per iopte). This is
+ * page order 7 on UltraSparc.
+ */
+ tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
+ if (!tsbbase) {
+ prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
+ prom_halt();
+ }
+ iommu->page_table = (iopte_t *)tsbbase;
+ iommu->page_table_sz_bits = 17;
+ iommu->page_table_map_base = 0xc0000000;
+ iommu->dma_addr_mask = 0xffffffff;
+ pci_iommu_table_init(iommu, IO_TSB_SIZE);
+
+ /* We start with no consistent mappings. */
+ iommu->lowest_consistent_map =
+ 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
+
+ for (i = 0; i < PBM_NCLUSTERS; i++) {
+ iommu->alloc_info[i].flush = 0;
+ iommu->alloc_info[i].next = 0;
+ }
+
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
+
+ control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
+ control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
+ control |= (PSYCHO_IOMMU_TSBSZ_128K | PSYCHO_IOMMU_CTRL_ENAB);
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
+
+ /* If necessary, hook us up for starfire IRQ translations. */
+ if(this_is_starfire)
+ p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
+ else
+ p->starfire_cookie = NULL;
+}
+
+#define PSYCHO_IRQ_RETRY 0x1a00UL
+#define PSYCHO_PCIA_DIAG 0x2020UL
+#define PSYCHO_PCIB_DIAG 0x4020UL
+#define PSYCHO_PCIDIAG_RESV 0xffffffffffffff80UL /* Reserved */
+#define PSYCHO_PCIDIAG_DRETRY 0x0000000000000040UL /* Disable retry limit */
+#define PSYCHO_PCIDIAG_DISYNC 0x0000000000000020UL /* Disable DMA wr / irq sync */
+#define PSYCHO_PCIDIAG_DDWSYNC 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */
+#define PSYCHO_PCIDIAG_IDDPAR 0x0000000000000008UL /* Invert DMA data parity */
+#define PSYCHO_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO data parity */
+#define PSYCHO_PCIDIAG_IPAPAR 0x0000000000000002UL /* Invert PIO address parity */
+#define PSYCHO_PCIDIAG_LPBACK 0x0000000000000001UL /* Enable loopback mode */
+
+static void psycho_controller_hwinit(struct pci_controller_info *p)
+{
+ u64 tmp;
+
+ /* PROM sets the IRQ retry value too low, increase it. */
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 0xff);
+
+ /* Enable arbiter for all PCI slots. */
+ tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL);
+ tmp |= PSYCHO_PCICTRL_AEN;
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL, tmp);
+
+ tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL);
+ tmp |= PSYCHO_PCICTRL_AEN;
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_CTRL, tmp);
+
+ /* Disable DMA write / PIO read synchronization on
+ * both PCI bus segments.
+ * [ U2P Erratum 1243770, STP2223BGA data sheet ]
+ */
+ tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG);
+ tmp |= PSYCHO_PCIDIAG_DDWSYNC;
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIA_DIAG, tmp);
+
+ tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG);
+ tmp |= PSYCHO_PCIDIAG_DDWSYNC;
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_PCIB_DIAG, tmp);
+}
+
+static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm)
+{
+ char *name = pbm->name;
+
+ sprintf(name, "PSYCHO%d PBM%c",
+ p->index,
+ (pbm == &p->pbm_A ? 'A' : 'B'));
+ pbm->io_space.name = pbm->mem_space.name = name;
+
+ request_resource(&ioport_resource, &pbm->io_space);
+ request_resource(&iomem_resource, &pbm->mem_space);
+ pci_register_legacy_regions(&pbm->io_space,
+ &pbm->mem_space);
+}
+
+static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm,
+ int is_pbm_a)
+{
+ unsigned long base = pbm->controller_regs;
+ u64 control;
+
+ if (is_pbm_a) {
+ pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A;
+ pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A;
+ pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_A;
+ } else {
+ pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_B;
+ pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B;
+ pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B;
+ }
+ /* PSYCHO's streaming buffer lacks ctx flushing. */
+ pbm->stc.strbuf_ctxflush = 0;
+ pbm->stc.strbuf_ctxmatch_base = 0;
+
+ pbm->stc.strbuf_flushflag = (volatile unsigned long *)
+ ((((unsigned long)&pbm->stc.__flushflag_buf[0])
+ + 63UL)
+ & ~63UL);
+ pbm->stc.strbuf_flushflag_pa = (unsigned long)
+ __pa(pbm->stc.strbuf_flushflag);
+
+ /* Enable the streaming buffer. We have to be careful
+ * just in case OBP left it with LRU locking enabled.
+ *
+ * It is possible to control if PBM will be rerun on
+ * line misses. Currently I just retain whatever setting
+ * OBP left us with. All checks so far show it having
+ * a value of zero.
+ */
+#undef PSYCHO_STRBUF_RERUN_ENABLE
+#undef PSYCHO_STRBUF_RERUN_DISABLE
+ control = psycho_read(pbm->stc.strbuf_control);
+ control |= PSYCHO_STRBUF_CTRL_ENAB;
+ control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR);
+#ifdef PSYCHO_STRBUF_RERUN_ENABLE
+ control &= ~(PSYCHO_STRBUF_CTRL_RRDIS);
+#else
+#ifdef PSYCHO_STRBUF_RERUN_DISABLE
+ control |= PSYCHO_STRBUF_CTRL_RRDIS;
+#endif
+#endif
+ psycho_write(pbm->stc.strbuf_control, control);
+
+ pbm->stc.strbuf_enabled = 1;
+}
+
+#define PSYCHO_IOSPACE_A 0x002000000UL
+#define PSYCHO_IOSPACE_B 0x002010000UL
+#define PSYCHO_IOSPACE_SIZE 0x00000ffffUL
+#define PSYCHO_MEMSPACE_A 0x100000000UL
+#define PSYCHO_MEMSPACE_B 0x180000000UL
+#define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL
+
+static void psycho_pbm_init(struct pci_controller_info *p,
+ int prom_node, int is_pbm_a)
+{
+ unsigned int busrange[2];
+ struct pci_pbm_info *pbm;
+ int err;
+
+ if (is_pbm_a) {
+ pbm = &p->pbm_A;
+ pbm->pci_first_slot = 1;
+ pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_A;
+ pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_A;
+ } else {
+ pbm = &p->pbm_B;
+ pbm->pci_first_slot = 2;
+ pbm->io_space.start = pbm->controller_regs + PSYCHO_IOSPACE_B;
+ pbm->mem_space.start = pbm->controller_regs + PSYCHO_MEMSPACE_B;
+ }
+
+ pbm->chip_type = PBM_CHIP_TYPE_PSYCHO;
+ pbm->chip_version =
+ prom_getintdefault(prom_node, "version#", 0);
+ pbm->chip_revision =
+ prom_getintdefault(prom_node, "module-revision#", 0);
+
+ pbm->io_space.end = pbm->io_space.start + PSYCHO_IOSPACE_SIZE;
+ pbm->io_space.flags = IORESOURCE_IO;
+ pbm->mem_space.end = pbm->mem_space.start + PSYCHO_MEMSPACE_SIZE;
+ pbm->mem_space.flags = IORESOURCE_MEM;
+ pbm_register_toplevel_resources(p, pbm);
+
+ pbm->parent = p;
+ pbm->prom_node = prom_node;
+ prom_getstring(prom_node, "name",
+ pbm->prom_name,
+ sizeof(pbm->prom_name));
+
+ err = prom_getproperty(prom_node, "ranges",
+ (char *)pbm->pbm_ranges,
+ sizeof(pbm->pbm_ranges));
+ if (err != -1)
+ pbm->num_pbm_ranges =
+ (err / sizeof(struct linux_prom_pci_ranges));
+ else
+ pbm->num_pbm_ranges = 0;
+
+ err = prom_getproperty(prom_node, "interrupt-map",
+ (char *)pbm->pbm_intmap,
+ sizeof(pbm->pbm_intmap));
+ if (err != -1) {
+ pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+ err = prom_getproperty(prom_node, "interrupt-map-mask",
+ (char *)&pbm->pbm_intmask,
+ sizeof(pbm->pbm_intmask));
+ if (err == -1) {
+ prom_printf("PSYCHO-PBM: Fatal error, no "
+ "interrupt-map-mask.\n");
+ prom_halt();
+ }
+ } else {
+ pbm->num_pbm_intmap = 0;
+ memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+ }
+
+ err = prom_getproperty(prom_node, "bus-range",
+ (char *)&busrange[0],
+ sizeof(busrange));
+ if (err == 0 || err == -1) {
+ prom_printf("PSYCHO-PBM: Fatal error, no bus-range.\n");
+ prom_halt();
+ }
+ pbm->pci_first_busno = busrange[0];
+ pbm->pci_last_busno = busrange[1];
+
+ psycho_pbm_strbuf_init(p, pbm, is_pbm_a);
+}
+
+#define PSYCHO_CONFIGSPACE 0x001000000UL
+
+void __init psycho_init(int node, char *model_name)
+{
+ struct linux_prom64_registers pr_regs[3];
+ struct pci_controller_info *p;
+ struct pci_iommu *iommu;
+ u32 upa_portid;
+ int is_pbm_a, err;
+
+ upa_portid = prom_getintdefault(node, "upa-portid", 0xff);
+
+ for(p = pci_controller_root; p; p = p->next) {
+ if (p->pbm_A.portid == upa_portid) {
+ is_pbm_a = (p->pbm_A.prom_node == 0);
+ psycho_pbm_init(p, node, is_pbm_a);
+ return;
+ }
+ }
+
+ p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
+ if (!p) {
+ prom_printf("PSYCHO: Fatal memory allocation error.\n");
+ prom_halt();
+ }
+ memset(p, 0, sizeof(*p));
+ iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ if (!iommu) {
+ prom_printf("PSYCHO: Fatal memory allocation error.\n");
+ prom_halt();
+ }
+ memset(iommu, 0, sizeof(*iommu));
+ p->pbm_A.iommu = p->pbm_B.iommu = iommu;
+
+ p->next = pci_controller_root;
+ pci_controller_root = p;
+
+ p->pbm_A.portid = upa_portid;
+ p->pbm_B.portid = upa_portid;
+ p->index = pci_num_controllers++;
+ p->pbms_same_domain = 0;
+ p->scan_bus = psycho_scan_bus;
+ p->irq_build = psycho_irq_build;
+ p->base_address_update = psycho_base_address_update;
+ p->resource_adjust = psycho_resource_adjust;
+ p->pci_ops = &psycho_ops;
+
+ err = prom_getproperty(node, "reg",
+ (char *)&pr_regs[0],
+ sizeof(pr_regs));
+ if (err == 0 || err == -1) {
+ prom_printf("PSYCHO: Fatal error, no reg property.\n");
+ prom_halt();
+ }
+
+ p->pbm_A.controller_regs = pr_regs[2].phys_addr;
+ p->pbm_B.controller_regs = pr_regs[2].phys_addr;
+ printk("PCI: Found PSYCHO, control regs at %016lx\n",
+ p->pbm_A.controller_regs);
+
+ p->pbm_A.config_space = p->pbm_B.config_space =
+ (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
+ printk("PSYCHO: Shared PCI config space at %016lx\n",
+ p->pbm_A.config_space);
+
+ /*
+ * Psycho's PCI MEM space is mapped to a 2GB aligned area, so
+ * we need to adjust our MEM space mask.
+ */
+ pci_memspace_mask = 0x7fffffffUL;
+
+ psycho_controller_hwinit(p);
+
+ psycho_iommu_init(p);
+
+ is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
+ psycho_pbm_init(p, node, is_pbm_a);
+}
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
new file mode 100644
index 00000000000..5525d1ec4af
--- /dev/null
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -0,0 +1,1702 @@
+/* $Id: pci_sabre.c,v 1.42 2002/01/23 11:27:32 davem Exp $
+ * pci_sabre.c: Sabre specific PCI controller support.
+ *
+ * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <asm/apb.h>
+#include <asm/pbm.h>
+#include <asm/iommu.h>
+#include <asm/irq.h>
+#include <asm/smp.h>
+#include <asm/oplib.h>
+
+#include "pci_impl.h"
+#include "iommu_common.h"
+
+/* All SABRE registers are 64-bits. The following accessor
+ * routines are how they are accessed. The REG parameter
+ * is a physical address.
+ */
+#define sabre_read(__reg) \
+({ u64 __ret; \
+ __asm__ __volatile__("ldxa [%1] %2, %0" \
+ : "=r" (__ret) \
+ : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
+ : "memory"); \
+ __ret; \
+})
+#define sabre_write(__reg, __val) \
+ __asm__ __volatile__("stxa %0, [%1] %2" \
+ : /* no outputs */ \
+ : "r" (__val), "r" (__reg), \
+ "i" (ASI_PHYS_BYPASS_EC_E) \
+ : "memory")
+
+/* SABRE PCI controller register offsets and definitions. */
+#define SABRE_UE_AFSR 0x0030UL
+#define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
+#define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
+#define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
+#define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
+#define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */
+#define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */
+#define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
+#define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */
+#define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */
+#define SABRE_UECE_AFAR 0x0038UL
+#define SABRE_CE_AFSR 0x0040UL
+#define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
+#define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
+#define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
+#define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
+#define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */
+#define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
+#define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */
+#define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */
+#define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */
+#define SABRE_IOMMU_CONTROL 0x0200UL
+#define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */
+#define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */
+#define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */
+#define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */
+#define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
+#define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000
+#define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000
+#define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000
+#define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000
+#define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000
+#define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000
+#define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000
+#define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000
+#define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */
+#define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
+#define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
+#define SABRE_IOMMU_TSBBASE 0x0208UL
+#define SABRE_IOMMU_FLUSH 0x0210UL
+#define SABRE_IMAP_A_SLOT0 0x0c00UL
+#define SABRE_IMAP_B_SLOT0 0x0c20UL
+#define SABRE_IMAP_SCSI 0x1000UL
+#define SABRE_IMAP_ETH 0x1008UL
+#define SABRE_IMAP_BPP 0x1010UL
+#define SABRE_IMAP_AU_REC 0x1018UL
+#define SABRE_IMAP_AU_PLAY 0x1020UL
+#define SABRE_IMAP_PFAIL 0x1028UL
+#define SABRE_IMAP_KMS 0x1030UL
+#define SABRE_IMAP_FLPY 0x1038UL
+#define SABRE_IMAP_SHW 0x1040UL
+#define SABRE_IMAP_KBD 0x1048UL
+#define SABRE_IMAP_MS 0x1050UL
+#define SABRE_IMAP_SER 0x1058UL
+#define SABRE_IMAP_UE 0x1070UL
+#define SABRE_IMAP_CE 0x1078UL
+#define SABRE_IMAP_PCIERR 0x1080UL
+#define SABRE_IMAP_GFX 0x1098UL
+#define SABRE_IMAP_EUPA 0x10a0UL
+#define SABRE_ICLR_A_SLOT0 0x1400UL
+#define SABRE_ICLR_B_SLOT0 0x1480UL
+#define SABRE_ICLR_SCSI 0x1800UL
+#define SABRE_ICLR_ETH 0x1808UL
+#define SABRE_ICLR_BPP 0x1810UL
+#define SABRE_ICLR_AU_REC 0x1818UL
+#define SABRE_ICLR_AU_PLAY 0x1820UL
+#define SABRE_ICLR_PFAIL 0x1828UL
+#define SABRE_ICLR_KMS 0x1830UL
+#define SABRE_ICLR_FLPY 0x1838UL
+#define SABRE_ICLR_SHW 0x1840UL
+#define SABRE_ICLR_KBD 0x1848UL
+#define SABRE_ICLR_MS 0x1850UL
+#define SABRE_ICLR_SER 0x1858UL
+#define SABRE_ICLR_UE 0x1870UL
+#define SABRE_ICLR_CE 0x1878UL
+#define SABRE_ICLR_PCIERR 0x1880UL
+#define SABRE_WRSYNC 0x1c20UL
+#define SABRE_PCICTRL 0x2000UL
+#define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */
+#define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */
+#define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */
+#define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */
+#define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */
+#define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */
+#define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */
+#define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */
+#define SABRE_PIOAFSR 0x2010UL
+#define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */
+#define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */
+#define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
+#define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
+#define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */
+#define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */
+#define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
+#define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
+#define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */
+#define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */
+#define SABRE_PIOAFAR 0x2018UL
+#define SABRE_PCIDIAG 0x2020UL
+#define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */
+#define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */
+#define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */
+#define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */
+#define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */
+#define SABRE_PCITASR 0x2028UL
+#define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */
+#define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */
+#define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */
+#define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */
+#define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */
+#define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */
+#define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */
+#define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */
+#define SABRE_PIOBUF_DIAG 0x5000UL
+#define SABRE_DMABUF_DIAGLO 0x5100UL
+#define SABRE_DMABUF_DIAGHI 0x51c0UL
+#define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */
+#define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */
+#define SABRE_IOMMU_VADIAG 0xa400UL
+#define SABRE_IOMMU_TCDIAG 0xa408UL
+#define SABRE_IOMMU_TAG 0xa580UL
+#define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */
+#define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */
+#define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */
+#define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */
+#define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */
+#define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */
+#define SABRE_IOMMU_DATA 0xa600UL
+#define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */
+#define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */
+#define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */
+#define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */
+#define SABRE_PCI_IRQSTATE 0xa800UL
+#define SABRE_OBIO_IRQSTATE 0xa808UL
+#define SABRE_FFBCFG 0xf000UL
+#define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */
+#define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */
+#define SABRE_MCCTRL0 0xf010UL
+#define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */
+#define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */
+#define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */
+#define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */
+#define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */
+#define SABRE_MCCTRL1 0xf018UL
+#define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */
+#define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */
+#define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */
+#define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */
+#define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */
+#define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */
+#define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */
+#define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */
+#define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */
+#define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */
+#define SABRE_RESETCTRL 0xf020UL
+
+#define SABRE_CONFIGSPACE 0x001000000UL
+#define SABRE_IOSPACE 0x002000000UL
+#define SABRE_IOSPACE_SIZE 0x000ffffffUL
+#define SABRE_MEMSPACE 0x100000000UL
+#define SABRE_MEMSPACE_SIZE 0x07fffffffUL
+
+/* UltraSparc-IIi Programmer's Manual, page 325, PCI
+ * configuration space address format:
+ *
+ * 32 24 23 16 15 11 10 8 7 2 1 0
+ * ---------------------------------------------------------
+ * |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
+ * ---------------------------------------------------------
+ */
+#define SABRE_CONFIG_BASE(PBM) \
+ ((PBM)->config_space | (1UL << 24))
+#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
+ (((unsigned long)(BUS) << 16) | \
+ ((unsigned long)(DEVFN) << 8) | \
+ ((unsigned long)(REG)))
+
+static int hummingbird_p;
+static struct pci_bus *sabre_root_bus;
+
+static void *sabre_pci_config_mkaddr(struct pci_pbm_info *pbm,
+ unsigned char bus,
+ unsigned int devfn,
+ int where)
+{
+ if (!pbm)
+ return NULL;
+ return (void *)
+ (SABRE_CONFIG_BASE(pbm) |
+ SABRE_CONFIG_ENCODE(bus, devfn, where));
+}
+
+static int sabre_out_of_range(unsigned char devfn)
+{
+ if (hummingbird_p)
+ return 0;
+
+ return (((PCI_SLOT(devfn) == 0) && (PCI_FUNC(devfn) > 0)) ||
+ ((PCI_SLOT(devfn) == 1) && (PCI_FUNC(devfn) > 1)) ||
+ (PCI_SLOT(devfn) > 1));
+}
+
+static int __sabre_out_of_range(struct pci_pbm_info *pbm,
+ unsigned char bus,
+ unsigned char devfn)
+{
+ if (hummingbird_p)
+ return 0;
+
+ return ((pbm->parent == 0) ||
+ ((pbm == &pbm->parent->pbm_B) &&
+ (bus == pbm->pci_first_busno) &&
+ PCI_SLOT(devfn) > 8) ||
+ ((pbm == &pbm->parent->pbm_A) &&
+ (bus == pbm->pci_first_busno) &&
+ PCI_SLOT(devfn) > 8));
+}
+
+static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct pci_pbm_info *pbm = bus_dev->sysdata;
+ unsigned char bus = bus_dev->number;
+ u32 *addr;
+ u16 tmp16;
+ u8 tmp8;
+
+ switch (size) {
+ case 1:
+ *value = 0xff;
+ break;
+ case 2:
+ *value = 0xffff;
+ break;
+ case 4:
+ *value = 0xffffffff;
+ break;
+ }
+
+ addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (__sabre_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ switch (size) {
+ case 1:
+ pci_config_read8((u8 *) addr, &tmp8);
+ *value = tmp8;
+ break;
+
+ case 2:
+ if (where & 0x01) {
+ printk("pci_read_config_word: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_read16((u16 *) addr, &tmp16);
+ *value = tmp16;
+ break;
+
+ case 4:
+ if (where & 0x03) {
+ printk("pci_read_config_dword: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_read32(addr, value);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int sabre_read_pci_cfg(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (!bus->number && sabre_out_of_range(devfn)) {
+ switch (size) {
+ case 1:
+ *value = 0xff;
+ break;
+ case 2:
+ *value = 0xffff;
+ break;
+ case 4:
+ *value = 0xffffffff;
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (bus->number || PCI_SLOT(devfn))
+ return __sabre_read_pci_cfg(bus, devfn, where, size, value);
+
+ /* When accessing PCI config space of the PCI controller itself (bus
+ * 0, device slot 0, function 0) there are restrictions. Each
+ * register must be accessed as it's natural size. Thus, for example
+ * the Vendor ID must be accessed as a 16-bit quantity.
+ */
+
+ switch (size) {
+ case 1:
+ if (where < 8) {
+ u32 tmp32;
+ u16 tmp16;
+
+ __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
+ tmp16 = (u16) tmp32;
+ if (where & 1)
+ *value = tmp16 >> 8;
+ else
+ *value = tmp16 & 0xff;
+ } else
+ return __sabre_read_pci_cfg(bus, devfn, where, 1, value);
+ break;
+
+ case 2:
+ if (where < 8)
+ return __sabre_read_pci_cfg(bus, devfn, where, 2, value);
+ else {
+ u32 tmp32;
+ u8 tmp8;
+
+ __sabre_read_pci_cfg(bus, devfn, where, 1, &tmp32);
+ tmp8 = (u8) tmp32;
+ *value = tmp8;
+ __sabre_read_pci_cfg(bus, devfn, where + 1, 1, &tmp32);
+ tmp8 = (u8) tmp32;
+ *value |= tmp8 << 8;
+ }
+ break;
+
+ case 4: {
+ u32 tmp32;
+ u16 tmp16;
+
+ sabre_read_pci_cfg(bus, devfn, where, 2, &tmp32);
+ tmp16 = (u16) tmp32;
+ *value = tmp16;
+ sabre_read_pci_cfg(bus, devfn, where + 2, 2, &tmp32);
+ tmp16 = (u16) tmp32;
+ *value |= tmp16 << 16;
+ break;
+ }
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int __sabre_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct pci_pbm_info *pbm = bus_dev->sysdata;
+ unsigned char bus = bus_dev->number;
+ u32 *addr;
+
+ addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (__sabre_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ switch (size) {
+ case 1:
+ pci_config_write8((u8 *) addr, value);
+ break;
+
+ case 2:
+ if (where & 0x01) {
+ printk("pci_write_config_word: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_write16((u16 *) addr, value);
+ break;
+
+ case 4:
+ if (where & 0x03) {
+ printk("pci_write_config_dword: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_write32(addr, value);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int sabre_write_pci_cfg(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (bus->number)
+ return __sabre_write_pci_cfg(bus, devfn, where, size, value);
+
+ if (sabre_out_of_range(devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ switch (size) {
+ case 1:
+ if (where < 8) {
+ u32 tmp32;
+ u16 tmp16;
+
+ __sabre_read_pci_cfg(bus, devfn, where & ~1, 2, &tmp32);
+ tmp16 = (u16) tmp32;
+ if (where & 1) {
+ value &= 0x00ff;
+ value |= tmp16 << 8;
+ } else {
+ value &= 0xff00;
+ value |= tmp16;
+ }
+ tmp32 = (u32) tmp16;
+ return __sabre_write_pci_cfg(bus, devfn, where & ~1, 2, tmp32);
+ } else
+ return __sabre_write_pci_cfg(bus, devfn, where, 1, value);
+ break;
+ case 2:
+ if (where < 8)
+ return __sabre_write_pci_cfg(bus, devfn, where, 2, value);
+ else {
+ __sabre_write_pci_cfg(bus, devfn, where, 1, value & 0xff);
+ __sabre_write_pci_cfg(bus, devfn, where + 1, 1, value >> 8);
+ }
+ break;
+ case 4:
+ sabre_write_pci_cfg(bus, devfn, where, 2, value & 0xffff);
+ sabre_write_pci_cfg(bus, devfn, where + 2, 2, value >> 16);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops sabre_ops = {
+ .read = sabre_read_pci_cfg,
+ .write = sabre_write_pci_cfg,
+};
+
+static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
+{
+ unsigned int bus = (ino & 0x10) >> 4;
+ unsigned int slot = (ino & 0x0c) >> 2;
+
+ if (bus == 0)
+ return SABRE_IMAP_A_SLOT0 + (slot * 8);
+ else
+ return SABRE_IMAP_B_SLOT0 + (slot * 8);
+}
+
+static unsigned long __onboard_imap_off[] = {
+/*0x20*/ SABRE_IMAP_SCSI,
+/*0x21*/ SABRE_IMAP_ETH,
+/*0x22*/ SABRE_IMAP_BPP,
+/*0x23*/ SABRE_IMAP_AU_REC,
+/*0x24*/ SABRE_IMAP_AU_PLAY,
+/*0x25*/ SABRE_IMAP_PFAIL,
+/*0x26*/ SABRE_IMAP_KMS,
+/*0x27*/ SABRE_IMAP_FLPY,
+/*0x28*/ SABRE_IMAP_SHW,
+/*0x29*/ SABRE_IMAP_KBD,
+/*0x2a*/ SABRE_IMAP_MS,
+/*0x2b*/ SABRE_IMAP_SER,
+/*0x2c*/ 0 /* reserved */,
+/*0x2d*/ 0 /* reserved */,
+/*0x2e*/ SABRE_IMAP_UE,
+/*0x2f*/ SABRE_IMAP_CE,
+/*0x30*/ SABRE_IMAP_PCIERR,
+};
+#define SABRE_ONBOARD_IRQ_BASE 0x20
+#define SABRE_ONBOARD_IRQ_LAST 0x30
+#define sabre_onboard_imap_offset(__ino) \
+ __onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE]
+
+#define sabre_iclr_offset(ino) \
+ ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
+ (SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
+
+/* PCI SABRE INO number to Sparc PIL level. */
+static unsigned char sabre_pil_table[] = {
+/*0x00*/0, 0, 0, 0, /* PCI A slot 0 Int A, B, C, D */
+/*0x04*/0, 0, 0, 0, /* PCI A slot 1 Int A, B, C, D */
+/*0x08*/0, 0, 0, 0, /* PCI A slot 2 Int A, B, C, D */
+/*0x0c*/0, 0, 0, 0, /* PCI A slot 3 Int A, B, C, D */
+/*0x10*/0, 0, 0, 0, /* PCI B slot 0 Int A, B, C, D */
+/*0x14*/0, 0, 0, 0, /* PCI B slot 1 Int A, B, C, D */
+/*0x18*/0, 0, 0, 0, /* PCI B slot 2 Int A, B, C, D */
+/*0x1c*/0, 0, 0, 0, /* PCI B slot 3 Int A, B, C, D */
+/*0x20*/4, /* SCSI */
+/*0x21*/5, /* Ethernet */
+/*0x22*/8, /* Parallel Port */
+/*0x23*/13, /* Audio Record */
+/*0x24*/14, /* Audio Playback */
+/*0x25*/15, /* PowerFail */
+/*0x26*/4, /* second SCSI */
+/*0x27*/11, /* Floppy */
+/*0x28*/4, /* Spare Hardware */
+/*0x29*/9, /* Keyboard */
+/*0x2a*/4, /* Mouse */
+/*0x2b*/12, /* Serial */
+/*0x2c*/10, /* Timer 0 */
+/*0x2d*/11, /* Timer 1 */
+/*0x2e*/15, /* Uncorrectable ECC */
+/*0x2f*/15, /* Correctable ECC */
+/*0x30*/15, /* PCI Bus A Error */
+/*0x31*/15, /* PCI Bus B Error */
+/*0x32*/15, /* Power Management */
+};
+
+static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
+{
+ int ret;
+
+ if (pdev &&
+ pdev->vendor == PCI_VENDOR_ID_SUN &&
+ pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
+ return 9;
+
+ ret = sabre_pil_table[ino];
+ if (ret == 0 && pdev == NULL) {
+ ret = 4;
+ } else if (ret == 0) {
+ switch ((pdev->class >> 16) & 0xff) {
+ case PCI_BASE_CLASS_STORAGE:
+ ret = 4;
+ break;
+
+ case PCI_BASE_CLASS_NETWORK:
+ ret = 6;
+ break;
+
+ case PCI_BASE_CLASS_DISPLAY:
+ ret = 9;
+ break;
+
+ case PCI_BASE_CLASS_MULTIMEDIA:
+ case PCI_BASE_CLASS_MEMORY:
+ case PCI_BASE_CLASS_BRIDGE:
+ case PCI_BASE_CLASS_SERIAL:
+ ret = 10;
+ break;
+
+ default:
+ ret = 4;
+ break;
+ };
+ }
+ return ret;
+}
+
+static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
+ struct pci_dev *pdev,
+ unsigned int ino)
+{
+ struct ino_bucket *bucket;
+ unsigned long imap, iclr;
+ unsigned long imap_off, iclr_off;
+ int pil, inofixup = 0;
+
+ ino &= PCI_IRQ_INO;
+ if (ino < SABRE_ONBOARD_IRQ_BASE) {
+ /* PCI slot */
+ imap_off = sabre_pcislot_imap_offset(ino);
+ } else {
+ /* onboard device */
+ if (ino > SABRE_ONBOARD_IRQ_LAST) {
+ prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
+ prom_halt();
+ }
+ imap_off = sabre_onboard_imap_offset(ino);
+ }
+
+ /* Now build the IRQ bucket. */
+ pil = sabre_ino_to_pil(pdev, ino);
+
+ if (PIL_RESERVED(pil))
+ BUG();
+
+ imap = pbm->controller_regs + imap_off;
+ imap += 4;
+
+ iclr_off = sabre_iclr_offset(ino);
+ iclr = pbm->controller_regs + iclr_off;
+ iclr += 4;
+
+ if ((ino & 0x20) == 0)
+ inofixup = ino & 0x03;
+
+ bucket = __bucket(build_irq(pil, inofixup, iclr, imap));
+ bucket->flags |= IBF_PCI;
+
+ if (pdev) {
+ struct pcidev_cookie *pcp = pdev->sysdata;
+
+ /* When a device lives behind a bridge deeper in the
+ * PCI bus topology than APB, a special sequence must
+ * run to make sure all pending DMA transfers at the
+ * time of IRQ delivery are visible in the coherency
+ * domain by the cpu. This sequence is to perform
+ * a read on the far side of the non-APB bridge, then
+ * perform a read of Sabre's DMA write-sync register.
+ *
+ * Currently, the PCI_CONFIG register for the device
+ * is used for this read from the far side of the bridge.
+ */
+ if (pdev->bus->number != pcp->pbm->pci_first_busno) {
+ bucket->flags |= IBF_DMA_SYNC;
+ bucket->synctab_ent = dma_sync_reg_table_entry++;
+ dma_sync_reg_table[bucket->synctab_ent] =
+ (unsigned long) sabre_pci_config_mkaddr(
+ pcp->pbm,
+ pdev->bus->number, pdev->devfn, PCI_COMMAND);
+ }
+ }
+ return __irq(bucket);
+}
+
+/* SABRE error handling support. */
+static void sabre_check_iommu_error(struct pci_controller_info *p,
+ unsigned long afsr,
+ unsigned long afar)
+{
+ struct pci_iommu *iommu = p->pbm_A.iommu;
+ unsigned long iommu_tag[16];
+ unsigned long iommu_data[16];
+ unsigned long flags;
+ u64 control;
+ int i;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ control = sabre_read(iommu->iommu_control);
+ if (control & SABRE_IOMMUCTRL_ERR) {
+ char *type_string;
+
+ /* Clear the error encountered bit.
+ * NOTE: On Sabre this is write 1 to clear,
+ * which is different from Psycho.
+ */
+ sabre_write(iommu->iommu_control, control);
+ switch((control & SABRE_IOMMUCTRL_ERRSTS) >> 25UL) {
+ case 1:
+ type_string = "Invalid Error";
+ break;
+ case 3:
+ type_string = "ECC Error";
+ break;
+ default:
+ type_string = "Unknown";
+ break;
+ };
+ printk("SABRE%d: IOMMU Error, type[%s]\n",
+ p->index, type_string);
+
+ /* Enter diagnostic mode and probe for error'd
+ * entries in the IOTLB.
+ */
+ control &= ~(SABRE_IOMMUCTRL_ERRSTS | SABRE_IOMMUCTRL_ERR);
+ sabre_write(iommu->iommu_control,
+ (control | SABRE_IOMMUCTRL_DENAB));
+ for (i = 0; i < 16; i++) {
+ unsigned long base = p->pbm_A.controller_regs;
+
+ iommu_tag[i] =
+ sabre_read(base + SABRE_IOMMU_TAG + (i * 8UL));
+ iommu_data[i] =
+ sabre_read(base + SABRE_IOMMU_DATA + (i * 8UL));
+ sabre_write(base + SABRE_IOMMU_TAG + (i * 8UL), 0);
+ sabre_write(base + SABRE_IOMMU_DATA + (i * 8UL), 0);
+ }
+ sabre_write(iommu->iommu_control, control);
+
+ for (i = 0; i < 16; i++) {
+ unsigned long tag, data;
+
+ tag = iommu_tag[i];
+ if (!(tag & SABRE_IOMMUTAG_ERR))
+ continue;
+
+ data = iommu_data[i];
+ switch((tag & SABRE_IOMMUTAG_ERRSTS) >> 23UL) {
+ case 1:
+ type_string = "Invalid Error";
+ break;
+ case 3:
+ type_string = "ECC Error";
+ break;
+ default:
+ type_string = "Unknown";
+ break;
+ };
+ printk("SABRE%d: IOMMU TAG(%d)[RAW(%016lx)error(%s)wr(%d)sz(%dK)vpg(%08lx)]\n",
+ p->index, i, tag, type_string,
+ ((tag & SABRE_IOMMUTAG_WRITE) ? 1 : 0),
+ ((tag & SABRE_IOMMUTAG_SIZE) ? 64 : 8),
+ ((tag & SABRE_IOMMUTAG_VPN) << IOMMU_PAGE_SHIFT));
+ printk("SABRE%d: IOMMU DATA(%d)[RAW(%016lx)valid(%d)used(%d)cache(%d)ppg(%016lx)\n",
+ p->index, i, data,
+ ((data & SABRE_IOMMUDATA_VALID) ? 1 : 0),
+ ((data & SABRE_IOMMUDATA_USED) ? 1 : 0),
+ ((data & SABRE_IOMMUDATA_CACHE) ? 1 : 0),
+ ((data & SABRE_IOMMUDATA_PPN) << IOMMU_PAGE_SHIFT));
+ }
+ }
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static irqreturn_t sabre_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_UE_AFSR;
+ unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ /* Latch uncorrectable error status. */
+ afar = sabre_read(afar_reg);
+ afsr = sabre_read(afsr_reg);
+
+ /* Clear the primary/secondary error status bits. */
+ error_bits = afsr &
+ (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
+ SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
+ SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE);
+ if (!error_bits)
+ return IRQ_NONE;
+ sabre_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("SABRE%d: Uncorrectable Error, primary error type[%s%s]\n",
+ p->index,
+ ((error_bits & SABRE_UEAFSR_PDRD) ?
+ "DMA Read" :
+ ((error_bits & SABRE_UEAFSR_PDWR) ?
+ "DMA Write" : "???")),
+ ((error_bits & SABRE_UEAFSR_PDTE) ?
+ ":Translation Error" : ""));
+ printk("SABRE%d: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n",
+ p->index,
+ (afsr & SABRE_UEAFSR_BMSK) >> 32UL,
+ (afsr & SABRE_UEAFSR_OFF) >> 29UL,
+ ((afsr & SABRE_UEAFSR_BLK) ? 1 : 0));
+ printk("SABRE%d: UE AFAR [%016lx]\n", p->index, afar);
+ printk("SABRE%d: UE Secondary errors [", p->index);
+ reported = 0;
+ if (afsr & SABRE_UEAFSR_SDRD) {
+ reported++;
+ printk("(DMA Read)");
+ }
+ if (afsr & SABRE_UEAFSR_SDWR) {
+ reported++;
+ printk("(DMA Write)");
+ }
+ if (afsr & SABRE_UEAFSR_SDTE) {
+ reported++;
+ printk("(Translation Error)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* Interrogate IOMMU for error status. */
+ sabre_check_iommu_error(p, afsr, afar);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sabre_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ unsigned long afsr_reg = p->pbm_A.controller_regs + SABRE_CE_AFSR;
+ unsigned long afar_reg = p->pbm_A.controller_regs + SABRE_UECE_AFAR;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ /* Latch error status. */
+ afar = sabre_read(afar_reg);
+ afsr = sabre_read(afsr_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
+ SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR);
+ if (!error_bits)
+ return IRQ_NONE;
+ sabre_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("SABRE%d: Correctable Error, primary error type[%s]\n",
+ p->index,
+ ((error_bits & SABRE_CEAFSR_PDRD) ?
+ "DMA Read" :
+ ((error_bits & SABRE_CEAFSR_PDWR) ?
+ "DMA Write" : "???")));
+
+ /* XXX Use syndrome and afar to print out module string just like
+ * XXX UDB CE trap handler does... -DaveM
+ */
+ printk("SABRE%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
+ "was_block(%d)\n",
+ p->index,
+ (afsr & SABRE_CEAFSR_ESYND) >> 48UL,
+ (afsr & SABRE_CEAFSR_BMSK) >> 32UL,
+ (afsr & SABRE_CEAFSR_OFF) >> 29UL,
+ ((afsr & SABRE_CEAFSR_BLK) ? 1 : 0));
+ printk("SABRE%d: CE AFAR [%016lx]\n", p->index, afar);
+ printk("SABRE%d: CE Secondary errors [", p->index);
+ reported = 0;
+ if (afsr & SABRE_CEAFSR_SDRD) {
+ reported++;
+ printk("(DMA Read)");
+ }
+ if (afsr & SABRE_CEAFSR_SDWR) {
+ reported++;
+ printk("(DMA Write)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sabre_pcierr_intr_other(struct pci_controller_info *p)
+{
+ unsigned long csr_reg, csr, csr_error_bits;
+ irqreturn_t ret = IRQ_NONE;
+ u16 stat;
+
+ csr_reg = p->pbm_A.controller_regs + SABRE_PCICTRL;
+ csr = sabre_read(csr_reg);
+ csr_error_bits =
+ csr & SABRE_PCICTRL_SERR;
+ if (csr_error_bits) {
+ /* Clear the errors. */
+ sabre_write(csr_reg, csr);
+
+ /* Log 'em. */
+ if (csr_error_bits & SABRE_PCICTRL_SERR)
+ printk("SABRE%d: PCI SERR signal asserted.\n",
+ p->index);
+ ret = IRQ_HANDLED;
+ }
+ pci_read_config_word(sabre_root_bus->self,
+ PCI_STATUS, &stat);
+ if (stat & (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ printk("SABRE%d: PCI bus error, PCI_STATUS[%04x]\n",
+ p->index, stat);
+ pci_write_config_word(sabre_root_bus->self,
+ PCI_STATUS, 0xffff);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ unsigned long afsr_reg, afar_reg;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ afsr_reg = p->pbm_A.controller_regs + SABRE_PIOAFSR;
+ afar_reg = p->pbm_A.controller_regs + SABRE_PIOAFAR;
+
+ /* Latch error status. */
+ afar = sabre_read(afar_reg);
+ afsr = sabre_read(afsr_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_PTA |
+ SABRE_PIOAFSR_PRTRY | SABRE_PIOAFSR_PPERR |
+ SABRE_PIOAFSR_SMA | SABRE_PIOAFSR_STA |
+ SABRE_PIOAFSR_SRTRY | SABRE_PIOAFSR_SPERR);
+ if (!error_bits)
+ return sabre_pcierr_intr_other(p);
+ sabre_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("SABRE%d: PCI Error, primary error type[%s]\n",
+ p->index,
+ (((error_bits & SABRE_PIOAFSR_PMA) ?
+ "Master Abort" :
+ ((error_bits & SABRE_PIOAFSR_PTA) ?
+ "Target Abort" :
+ ((error_bits & SABRE_PIOAFSR_PRTRY) ?
+ "Excessive Retries" :
+ ((error_bits & SABRE_PIOAFSR_PPERR) ?
+ "Parity Error" : "???"))))));
+ printk("SABRE%d: bytemask[%04lx] was_block(%d)\n",
+ p->index,
+ (afsr & SABRE_PIOAFSR_BMSK) >> 32UL,
+ (afsr & SABRE_PIOAFSR_BLK) ? 1 : 0);
+ printk("SABRE%d: PCI AFAR [%016lx]\n", p->index, afar);
+ printk("SABRE%d: PCI Secondary errors [", p->index);
+ reported = 0;
+ if (afsr & SABRE_PIOAFSR_SMA) {
+ reported++;
+ printk("(Master Abort)");
+ }
+ if (afsr & SABRE_PIOAFSR_STA) {
+ reported++;
+ printk("(Target Abort)");
+ }
+ if (afsr & SABRE_PIOAFSR_SRTRY) {
+ reported++;
+ printk("(Excessive Retries)");
+ }
+ if (afsr & SABRE_PIOAFSR_SPERR) {
+ reported++;
+ printk("(Parity Error)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* For the error types shown, scan both PCI buses for devices
+ * which have logged that error type.
+ */
+
+ /* If we see a Target Abort, this could be the result of an
+ * IOMMU translation error of some sort. It is extremely
+ * useful to log this information as usually it indicates
+ * a bug in the IOMMU support code or a PCI device driver.
+ */
+ if (error_bits & (SABRE_PIOAFSR_PTA | SABRE_PIOAFSR_STA)) {
+ sabre_check_iommu_error(p, afsr, afar);
+ pci_scan_for_target_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
+ pci_scan_for_target_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
+ }
+ if (error_bits & (SABRE_PIOAFSR_PMA | SABRE_PIOAFSR_SMA)) {
+ pci_scan_for_master_abort(p, &p->pbm_A, p->pbm_A.pci_bus);
+ pci_scan_for_master_abort(p, &p->pbm_B, p->pbm_B.pci_bus);
+ }
+ /* For excessive retries, SABRE/PBM will abort the device
+ * and there is no way to specifically check for excessive
+ * retries in the config space status registers. So what
+ * we hope is that we'll catch it via the master/target
+ * abort events.
+ */
+
+ if (error_bits & (SABRE_PIOAFSR_PPERR | SABRE_PIOAFSR_SPERR)) {
+ pci_scan_for_parity_error(p, &p->pbm_A, p->pbm_A.pci_bus);
+ pci_scan_for_parity_error(p, &p->pbm_B, p->pbm_B.pci_bus);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* XXX What about PowerFail/PowerManagement??? -DaveM */
+#define SABRE_UE_INO 0x2e
+#define SABRE_CE_INO 0x2f
+#define SABRE_PCIERR_INO 0x30
+static void __init sabre_register_error_handlers(struct pci_controller_info *p)
+{
+ struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
+ unsigned long base = pbm->controller_regs;
+ unsigned long irq, portid = pbm->portid;
+ u64 tmp;
+
+ /* We clear the error bits in the appropriate AFSR before
+ * registering the handler so that we don't get spurious
+ * interrupts.
+ */
+ sabre_write(base + SABRE_UE_AFSR,
+ (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
+ SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
+ SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE));
+ irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_UE_INO);
+ if (request_irq(irq, sabre_ue_intr,
+ SA_SHIRQ, "SABRE UE", p) < 0) {
+ prom_printf("SABRE%d: Cannot register UE interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ sabre_write(base + SABRE_CE_AFSR,
+ (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
+ SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR));
+ irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_CE_INO);
+ if (request_irq(irq, sabre_ce_intr,
+ SA_SHIRQ, "SABRE CE", p) < 0) {
+ prom_printf("SABRE%d: Cannot register CE interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_PCIERR_INO);
+ if (request_irq(irq, sabre_pcierr_intr,
+ SA_SHIRQ, "SABRE PCIERR", p) < 0) {
+ prom_printf("SABRE%d: Cannot register PciERR interrupt.\n",
+ p->index);
+ prom_halt();
+ }
+
+ tmp = sabre_read(base + SABRE_PCICTRL);
+ tmp |= SABRE_PCICTRL_ERREN;
+ sabre_write(base + SABRE_PCICTRL, tmp);
+}
+
+static void __init sabre_resource_adjust(struct pci_dev *pdev,
+ struct resource *res,
+ struct resource *root)
+{
+ struct pci_pbm_info *pbm = pdev->bus->sysdata;
+ unsigned long base;
+
+ if (res->flags & IORESOURCE_IO)
+ base = pbm->controller_regs + SABRE_IOSPACE;
+ else
+ base = pbm->controller_regs + SABRE_MEMSPACE;
+
+ res->start += base;
+ res->end += base;
+}
+
+static void __init sabre_base_address_update(struct pci_dev *pdev, int resource)
+{
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ struct pci_pbm_info *pbm = pcp->pbm;
+ struct resource *res;
+ unsigned long base;
+ u32 reg;
+ int where, size, is_64bit;
+
+ res = &pdev->resource[resource];
+ if (resource < 6) {
+ where = PCI_BASE_ADDRESS_0 + (resource * 4);
+ } else if (resource == PCI_ROM_RESOURCE) {
+ where = pdev->rom_base_reg;
+ } else {
+ /* Somebody might have asked allocation of a non-standard resource */
+ return;
+ }
+
+ is_64bit = 0;
+ if (res->flags & IORESOURCE_IO)
+ base = pbm->controller_regs + SABRE_IOSPACE;
+ else {
+ base = pbm->controller_regs + SABRE_MEMSPACE;
+ if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+ == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ is_64bit = 1;
+ }
+
+ size = res->end - res->start;
+ pci_read_config_dword(pdev, where, &reg);
+ reg = ((reg & size) |
+ (((u32)(res->start - base)) & ~size));
+ if (resource == PCI_ROM_RESOURCE) {
+ reg |= PCI_ROM_ADDRESS_ENABLE;
+ res->flags |= IORESOURCE_ROM_ENABLE;
+ }
+ pci_write_config_dword(pdev, where, reg);
+
+ /* This knows that the upper 32-bits of the address
+ * must be zero. Our PCI common layer enforces this.
+ */
+ if (is_64bit)
+ pci_write_config_dword(pdev, where + 4, 0);
+}
+
+static void __init apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
+{
+ struct pci_dev *pdev;
+
+ list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
+
+ if (pdev->vendor == PCI_VENDOR_ID_SUN &&
+ pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
+ u32 word32;
+ u16 word16;
+
+ sabre_read_pci_cfg(pdev->bus, pdev->devfn,
+ PCI_COMMAND, 2, &word32);
+ word16 = (u16) word32;
+ word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_IO;
+ word32 = (u32) word16;
+ sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+ PCI_COMMAND, 2, word32);
+
+ /* Status register bits are "write 1 to clear". */
+ sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+ PCI_STATUS, 2, 0xffff);
+ sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+ PCI_SEC_STATUS, 2, 0xffff);
+
+ /* Use a primary/seconday latency timer value
+ * of 64.
+ */
+ sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+ PCI_LATENCY_TIMER, 1, 64);
+ sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+ PCI_SEC_LATENCY_TIMER, 1, 64);
+
+ /* Enable reporting/forwarding of master aborts,
+ * parity, and SERR.
+ */
+ sabre_write_pci_cfg(pdev->bus, pdev->devfn,
+ PCI_BRIDGE_CONTROL, 1,
+ (PCI_BRIDGE_CTL_PARITY |
+ PCI_BRIDGE_CTL_SERR |
+ PCI_BRIDGE_CTL_MASTER_ABORT));
+ }
+ }
+}
+
+static struct pcidev_cookie *alloc_bridge_cookie(struct pci_pbm_info *pbm)
+{
+ struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
+
+ if (!cookie) {
+ prom_printf("SABRE: Critical allocation failure.\n");
+ prom_halt();
+ }
+
+ /* All we care about is the PBM. */
+ memset(cookie, 0, sizeof(*cookie));
+ cookie->pbm = pbm;
+
+ return cookie;
+}
+
+static void __init sabre_scan_bus(struct pci_controller_info *p)
+{
+ static int once;
+ struct pci_bus *sabre_bus, *pbus;
+ struct pci_pbm_info *pbm;
+ struct pcidev_cookie *cookie;
+ int sabres_scanned;
+
+ /* The APB bridge speaks to the Sabre host PCI bridge
+ * at 66Mhz, but the front side of APB runs at 33Mhz
+ * for both segments.
+ */
+ p->pbm_A.is_66mhz_capable = 0;
+ p->pbm_B.is_66mhz_capable = 0;
+
+ /* This driver has not been verified to handle
+ * multiple SABREs yet, so trap this.
+ *
+ * Also note that the SABRE host bridge is hardwired
+ * to live at bus 0.
+ */
+ if (once != 0) {
+ prom_printf("SABRE: Multiple controllers unsupported.\n");
+ prom_halt();
+ }
+ once++;
+
+ cookie = alloc_bridge_cookie(&p->pbm_A);
+
+ sabre_bus = pci_scan_bus(p->pci_first_busno,
+ p->pci_ops,
+ &p->pbm_A);
+ pci_fixup_host_bridge_self(sabre_bus);
+ sabre_bus->self->sysdata = cookie;
+
+ sabre_root_bus = sabre_bus;
+
+ apb_init(p, sabre_bus);
+
+ sabres_scanned = 0;
+
+ list_for_each_entry(pbus, &sabre_bus->children, node) {
+
+ if (pbus->number == p->pbm_A.pci_first_busno) {
+ pbm = &p->pbm_A;
+ } else if (pbus->number == p->pbm_B.pci_first_busno) {
+ pbm = &p->pbm_B;
+ } else
+ continue;
+
+ cookie = alloc_bridge_cookie(pbm);
+ pbus->self->sysdata = cookie;
+
+ sabres_scanned++;
+
+ pbus->sysdata = pbm;
+ pbm->pci_bus = pbus;
+ pci_fill_in_pbm_cookies(pbus, pbm, pbm->prom_node);
+ pci_record_assignments(pbm, pbus);
+ pci_assign_unassigned(pbm, pbus);
+ pci_fixup_irq(pbm, pbus);
+ pci_determine_66mhz_disposition(pbm, pbus);
+ pci_setup_busmastering(pbm, pbus);
+ }
+
+ if (!sabres_scanned) {
+ /* Hummingbird, no APBs. */
+ pbm = &p->pbm_A;
+ sabre_bus->sysdata = pbm;
+ pbm->pci_bus = sabre_bus;
+ pci_fill_in_pbm_cookies(sabre_bus, pbm, pbm->prom_node);
+ pci_record_assignments(pbm, sabre_bus);
+ pci_assign_unassigned(pbm, sabre_bus);
+ pci_fixup_irq(pbm, sabre_bus);
+ pci_determine_66mhz_disposition(pbm, sabre_bus);
+ pci_setup_busmastering(pbm, sabre_bus);
+ }
+
+ sabre_register_error_handlers(p);
+}
+
+static void __init sabre_iommu_init(struct pci_controller_info *p,
+ int tsbsize, unsigned long dvma_offset,
+ u32 dma_mask)
+{
+ struct pci_iommu *iommu = p->pbm_A.iommu;
+ unsigned long tsbbase, i, order;
+ u64 control;
+
+ /* Setup initial software IOMMU state. */
+ spin_lock_init(&iommu->lock);
+ iommu->iommu_cur_ctx = 0;
+
+ /* Register addresses. */
+ iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
+ iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
+ iommu->iommu_flush = p->pbm_A.controller_regs + SABRE_IOMMU_FLUSH;
+ iommu->write_complete_reg = p->pbm_A.controller_regs + SABRE_WRSYNC;
+ /* Sabre's IOMMU lacks ctx flushing. */
+ iommu->iommu_ctxflush = 0;
+
+ /* Invalidate TLB Entries. */
+ control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
+ control |= SABRE_IOMMUCTRL_DENAB;
+ sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
+
+ for(i = 0; i < 16; i++) {
+ sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TAG + (i * 8UL), 0);
+ sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_DATA + (i * 8UL), 0);
+ }
+
+ /* Leave diag mode enabled for full-flushing done
+ * in pci_iommu.c
+ */
+
+ iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
+ if (!iommu->dummy_page) {
+ prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
+ prom_halt();
+ }
+ memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
+ iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
+
+ tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
+ if (!tsbbase) {
+ prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
+ prom_halt();
+ }
+ iommu->page_table = (iopte_t *)tsbbase;
+ iommu->page_table_map_base = dvma_offset;
+ iommu->dma_addr_mask = dma_mask;
+ pci_iommu_table_init(iommu, PAGE_SIZE << order);
+
+ sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
+
+ control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
+ control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
+ control |= SABRE_IOMMUCTRL_ENAB;
+ switch(tsbsize) {
+ case 64:
+ control |= SABRE_IOMMU_TSBSZ_64K;
+ iommu->page_table_sz_bits = 16;
+ break;
+ case 128:
+ control |= SABRE_IOMMU_TSBSZ_128K;
+ iommu->page_table_sz_bits = 17;
+ break;
+ default:
+ prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
+ prom_halt();
+ break;
+ }
+ sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
+
+ /* We start with no consistent mappings. */
+ iommu->lowest_consistent_map =
+ 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
+
+ for (i = 0; i < PBM_NCLUSTERS; i++) {
+ iommu->alloc_info[i].flush = 0;
+ iommu->alloc_info[i].next = 0;
+ }
+}
+
+static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm)
+{
+ char *name = pbm->name;
+ unsigned long ibase = p->pbm_A.controller_regs + SABRE_IOSPACE;
+ unsigned long mbase = p->pbm_A.controller_regs + SABRE_MEMSPACE;
+ unsigned int devfn;
+ unsigned long first, last, i;
+ u8 *addr, map;
+
+ sprintf(name, "SABRE%d PBM%c",
+ p->index,
+ (pbm == &p->pbm_A ? 'A' : 'B'));
+ pbm->io_space.name = pbm->mem_space.name = name;
+
+ devfn = PCI_DEVFN(1, (pbm == &p->pbm_A) ? 0 : 1);
+ addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_IO_ADDRESS_MAP);
+ map = 0;
+ pci_config_read8(addr, &map);
+
+ first = 8;
+ last = 0;
+ for (i = 0; i < 8; i++) {
+ if ((map & (1 << i)) != 0) {
+ if (first > i)
+ first = i;
+ if (last < i)
+ last = i;
+ }
+ }
+ pbm->io_space.start = ibase + (first << 21UL);
+ pbm->io_space.end = ibase + (last << 21UL) + ((1 << 21UL) - 1);
+ pbm->io_space.flags = IORESOURCE_IO;
+
+ addr = sabre_pci_config_mkaddr(pbm, 0, devfn, APB_MEM_ADDRESS_MAP);
+ map = 0;
+ pci_config_read8(addr, &map);
+
+ first = 8;
+ last = 0;
+ for (i = 0; i < 8; i++) {
+ if ((map & (1 << i)) != 0) {
+ if (first > i)
+ first = i;
+ if (last < i)
+ last = i;
+ }
+ }
+ pbm->mem_space.start = mbase + (first << 29UL);
+ pbm->mem_space.end = mbase + (last << 29UL) + ((1 << 29UL) - 1);
+ pbm->mem_space.flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
+ prom_printf("Cannot register PBM-%c's IO space.\n",
+ (pbm == &p->pbm_A ? 'A' : 'B'));
+ prom_halt();
+ }
+ if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
+ prom_printf("Cannot register PBM-%c's MEM space.\n",
+ (pbm == &p->pbm_A ? 'A' : 'B'));
+ prom_halt();
+ }
+
+ /* Register legacy regions if this PBM covers that area. */
+ if (pbm->io_space.start == ibase &&
+ pbm->mem_space.start == mbase)
+ pci_register_legacy_regions(&pbm->io_space,
+ &pbm->mem_space);
+}
+
+static void __init sabre_pbm_init(struct pci_controller_info *p, int sabre_node, u32 dma_begin)
+{
+ struct pci_pbm_info *pbm;
+ char namebuf[128];
+ u32 busrange[2];
+ int node, simbas_found;
+
+ simbas_found = 0;
+ node = prom_getchild(sabre_node);
+ while ((node = prom_searchsiblings(node, "pci")) != 0) {
+ int err;
+
+ err = prom_getproperty(node, "model", namebuf, sizeof(namebuf));
+ if ((err <= 0) || strncmp(namebuf, "SUNW,simba", err))
+ goto next_pci;
+
+ err = prom_getproperty(node, "bus-range",
+ (char *)&busrange[0], sizeof(busrange));
+ if (err == 0 || err == -1) {
+ prom_printf("APB: Error, cannot get PCI bus-range.\n");
+ prom_halt();
+ }
+
+ simbas_found++;
+ if (busrange[0] == 1)
+ pbm = &p->pbm_B;
+ else
+ pbm = &p->pbm_A;
+ pbm->chip_type = PBM_CHIP_TYPE_SABRE;
+ pbm->parent = p;
+ pbm->prom_node = node;
+ pbm->pci_first_slot = 1;
+ pbm->pci_first_busno = busrange[0];
+ pbm->pci_last_busno = busrange[1];
+
+ prom_getstring(node, "name", pbm->prom_name, sizeof(pbm->prom_name));
+ err = prom_getproperty(node, "ranges",
+ (char *)pbm->pbm_ranges,
+ sizeof(pbm->pbm_ranges));
+ if (err != -1)
+ pbm->num_pbm_ranges =
+ (err / sizeof(struct linux_prom_pci_ranges));
+ else
+ pbm->num_pbm_ranges = 0;
+
+ err = prom_getproperty(node, "interrupt-map",
+ (char *)pbm->pbm_intmap,
+ sizeof(pbm->pbm_intmap));
+ if (err != -1) {
+ pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+ err = prom_getproperty(node, "interrupt-map-mask",
+ (char *)&pbm->pbm_intmask,
+ sizeof(pbm->pbm_intmask));
+ if (err == -1) {
+ prom_printf("APB: Fatal error, no interrupt-map-mask.\n");
+ prom_halt();
+ }
+ } else {
+ pbm->num_pbm_intmap = 0;
+ memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+ }
+
+ pbm_register_toplevel_resources(p, pbm);
+
+ next_pci:
+ node = prom_getsibling(node);
+ if (!node)
+ break;
+ }
+ if (simbas_found == 0) {
+ int err;
+
+ /* No APBs underneath, probably this is a hummingbird
+ * system.
+ */
+ pbm = &p->pbm_A;
+ pbm->parent = p;
+ pbm->prom_node = sabre_node;
+ pbm->pci_first_busno = p->pci_first_busno;
+ pbm->pci_last_busno = p->pci_last_busno;
+
+ prom_getstring(sabre_node, "name", pbm->prom_name, sizeof(pbm->prom_name));
+ err = prom_getproperty(sabre_node, "ranges",
+ (char *) pbm->pbm_ranges,
+ sizeof(pbm->pbm_ranges));
+ if (err != -1)
+ pbm->num_pbm_ranges =
+ (err / sizeof(struct linux_prom_pci_ranges));
+ else
+ pbm->num_pbm_ranges = 0;
+
+ err = prom_getproperty(sabre_node, "interrupt-map",
+ (char *) pbm->pbm_intmap,
+ sizeof(pbm->pbm_intmap));
+
+ if (err != -1) {
+ pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+ err = prom_getproperty(sabre_node, "interrupt-map-mask",
+ (char *)&pbm->pbm_intmask,
+ sizeof(pbm->pbm_intmask));
+ if (err == -1) {
+ prom_printf("Hummingbird: Fatal error, no interrupt-map-mask.\n");
+ prom_halt();
+ }
+ } else {
+ pbm->num_pbm_intmap = 0;
+ memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+ }
+
+
+ sprintf(pbm->name, "SABRE%d PBM%c", p->index,
+ (pbm == &p->pbm_A ? 'A' : 'B'));
+ pbm->io_space.name = pbm->mem_space.name = pbm->name;
+
+ /* Hack up top-level resources. */
+ pbm->io_space.start = p->pbm_A.controller_regs + SABRE_IOSPACE;
+ pbm->io_space.end = pbm->io_space.start + (1UL << 24) - 1UL;
+ pbm->io_space.flags = IORESOURCE_IO;
+
+ pbm->mem_space.start = p->pbm_A.controller_regs + SABRE_MEMSPACE;
+ pbm->mem_space.end = pbm->mem_space.start + (unsigned long)dma_begin - 1UL;
+ pbm->mem_space.flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, &pbm->io_space) < 0) {
+ prom_printf("Cannot register Hummingbird's IO space.\n");
+ prom_halt();
+ }
+ if (request_resource(&iomem_resource, &pbm->mem_space) < 0) {
+ prom_printf("Cannot register Hummingbird's MEM space.\n");
+ prom_halt();
+ }
+
+ pci_register_legacy_regions(&pbm->io_space,
+ &pbm->mem_space);
+ }
+}
+
+void __init sabre_init(int pnode, char *model_name)
+{
+ struct linux_prom64_registers pr_regs[2];
+ struct pci_controller_info *p;
+ struct pci_iommu *iommu;
+ int tsbsize, err;
+ u32 busrange[2];
+ u32 vdma[2];
+ u32 upa_portid, dma_mask;
+ u64 clear_irq;
+
+ hummingbird_p = 0;
+ if (!strcmp(model_name, "pci108e,a001"))
+ hummingbird_p = 1;
+ else if (!strcmp(model_name, "SUNW,sabre")) {
+ char compat[64];
+
+ if (prom_getproperty(pnode, "compatible",
+ compat, sizeof(compat)) > 0 &&
+ !strcmp(compat, "pci108e,a001")) {
+ hummingbird_p = 1;
+ } else {
+ int cpu_node;
+
+ /* Of course, Sun has to encode things a thousand
+ * different ways, inconsistently.
+ */
+ cpu_find_by_instance(0, &cpu_node, NULL);
+ if (prom_getproperty(cpu_node, "name",
+ compat, sizeof(compat)) > 0 &&
+ !strcmp(compat, "SUNW,UltraSPARC-IIe"))
+ hummingbird_p = 1;
+ }
+ }
+
+ p = kmalloc(sizeof(*p), GFP_ATOMIC);
+ if (!p) {
+ prom_printf("SABRE: Error, kmalloc(pci_controller_info) failed.\n");
+ prom_halt();
+ }
+ memset(p, 0, sizeof(*p));
+
+ iommu = kmalloc(sizeof(*iommu), GFP_ATOMIC);
+ if (!iommu) {
+ prom_printf("SABRE: Error, kmalloc(pci_iommu) failed.\n");
+ prom_halt();
+ }
+ memset(iommu, 0, sizeof(*iommu));
+ p->pbm_A.iommu = p->pbm_B.iommu = iommu;
+
+ upa_portid = prom_getintdefault(pnode, "upa-portid", 0xff);
+
+ p->next = pci_controller_root;
+ pci_controller_root = p;
+
+ p->pbm_A.portid = upa_portid;
+ p->pbm_B.portid = upa_portid;
+ p->index = pci_num_controllers++;
+ p->pbms_same_domain = 1;
+ p->scan_bus = sabre_scan_bus;
+ p->irq_build = sabre_irq_build;
+ p->base_address_update = sabre_base_address_update;
+ p->resource_adjust = sabre_resource_adjust;
+ p->pci_ops = &sabre_ops;
+
+ /*
+ * Map in SABRE register set and report the presence of this SABRE.
+ */
+ err = prom_getproperty(pnode, "reg",
+ (char *)&pr_regs[0], sizeof(pr_regs));
+ if(err == 0 || err == -1) {
+ prom_printf("SABRE: Error, cannot get U2P registers "
+ "from PROM.\n");
+ prom_halt();
+ }
+
+ /*
+ * First REG in property is base of entire SABRE register space.
+ */
+ p->pbm_A.controller_regs = pr_regs[0].phys_addr;
+ p->pbm_B.controller_regs = pr_regs[0].phys_addr;
+ pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC;
+
+ printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n",
+ p->pbm_A.controller_regs, pci_dma_wsync);
+
+ /* Clear interrupts */
+
+ /* PCI first */
+ for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8)
+ sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
+
+ /* Then OBIO */
+ for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8)
+ sabre_write(p->pbm_A.controller_regs + clear_irq, 0x0UL);
+
+ /* Error interrupts are enabled later after the bus scan. */
+ sabre_write(p->pbm_A.controller_regs + SABRE_PCICTRL,
+ (SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR |
+ SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN));
+
+ /* Now map in PCI config space for entire SABRE. */
+ p->pbm_A.config_space = p->pbm_B.config_space =
+ (p->pbm_A.controller_regs + SABRE_CONFIGSPACE);
+ printk("SABRE: Shared PCI config space at %016lx\n",
+ p->pbm_A.config_space);
+
+ err = prom_getproperty(pnode, "virtual-dma",
+ (char *)&vdma[0], sizeof(vdma));
+ if(err == 0 || err == -1) {
+ prom_printf("SABRE: Error, cannot get virtual-dma property "
+ "from PROM.\n");
+ prom_halt();
+ }
+
+ dma_mask = vdma[0];
+ switch(vdma[1]) {
+ case 0x20000000:
+ dma_mask |= 0x1fffffff;
+ tsbsize = 64;
+ break;
+ case 0x40000000:
+ dma_mask |= 0x3fffffff;
+ tsbsize = 128;
+ break;
+
+ case 0x80000000:
+ dma_mask |= 0x7fffffff;
+ tsbsize = 128;
+ break;
+ default:
+ prom_printf("SABRE: strange virtual-dma size.\n");
+ prom_halt();
+ }
+
+ sabre_iommu_init(p, tsbsize, vdma[0], dma_mask);
+
+ printk("SABRE: DVMA at %08x [%08x]\n", vdma[0], vdma[1]);
+
+ err = prom_getproperty(pnode, "bus-range",
+ (char *)&busrange[0], sizeof(busrange));
+ if(err == 0 || err == -1) {
+ prom_printf("SABRE: Error, cannot get PCI bus-range "
+ " from PROM.\n");
+ prom_halt();
+ }
+
+ p->pci_first_busno = busrange[0];
+ p->pci_last_busno = busrange[1];
+
+ /*
+ * Look for APB underneath.
+ */
+ sabre_pbm_init(p, pnode, vdma[0]);
+}
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
new file mode 100644
index 00000000000..e93fcadc372
--- /dev/null
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -0,0 +1,2187 @@
+/* $Id: pci_schizo.c,v 1.24 2002/01/23 11:27:32 davem Exp $
+ * pci_schizo.c: SCHIZO/TOMATILLO specific PCI controller support.
+ *
+ * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <asm/pbm.h>
+#include <asm/iommu.h>
+#include <asm/irq.h>
+#include <asm/upa.h>
+
+#include "pci_impl.h"
+#include "iommu_common.h"
+
+/* All SCHIZO registers are 64-bits. The following accessor
+ * routines are how they are accessed. The REG parameter
+ * is a physical address.
+ */
+#define schizo_read(__reg) \
+({ u64 __ret; \
+ __asm__ __volatile__("ldxa [%1] %2, %0" \
+ : "=r" (__ret) \
+ : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
+ : "memory"); \
+ __ret; \
+})
+#define schizo_write(__reg, __val) \
+ __asm__ __volatile__("stxa %0, [%1] %2" \
+ : /* no outputs */ \
+ : "r" (__val), "r" (__reg), \
+ "i" (ASI_PHYS_BYPASS_EC_E) \
+ : "memory")
+
+/* This is a convention that at least Excalibur and Merlin
+ * follow. I suppose the SCHIZO used in Starcat and friends
+ * will do similar.
+ *
+ * The only way I could see this changing is if the newlink
+ * block requires more space in Schizo's address space than
+ * they predicted, thus requiring an address space reorg when
+ * the newer Schizo is taped out.
+ */
+
+/* Streaming buffer control register. */
+#define SCHIZO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
+#define SCHIZO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
+#define SCHIZO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
+#define SCHIZO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
+#define SCHIZO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
+
+/* IOMMU control register. */
+#define SCHIZO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
+#define SCHIZO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
+#define SCHIZO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
+#define SCHIZO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
+#define SCHIZO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
+#define SCHIZO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
+#define SCHIZO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
+#define SCHIZO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
+#define SCHIZO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
+#define SCHIZO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
+#define SCHIZO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
+#define SCHIZO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
+
+/* Schizo config space address format is nearly identical to
+ * that of PSYCHO:
+ *
+ * 32 24 23 16 15 11 10 8 7 2 1 0
+ * ---------------------------------------------------------
+ * |0 0 0 0 0 0 0 0 0| bus | device | function | reg | 0 0 |
+ * ---------------------------------------------------------
+ */
+#define SCHIZO_CONFIG_BASE(PBM) ((PBM)->config_space)
+#define SCHIZO_CONFIG_ENCODE(BUS, DEVFN, REG) \
+ (((unsigned long)(BUS) << 16) | \
+ ((unsigned long)(DEVFN) << 8) | \
+ ((unsigned long)(REG)))
+
+static void *schizo_pci_config_mkaddr(struct pci_pbm_info *pbm,
+ unsigned char bus,
+ unsigned int devfn,
+ int where)
+{
+ if (!pbm)
+ return NULL;
+ bus -= pbm->pci_first_busno;
+ return (void *)
+ (SCHIZO_CONFIG_BASE(pbm) |
+ SCHIZO_CONFIG_ENCODE(bus, devfn, where));
+}
+
+/* Just make sure the bus number is in range. */
+static int schizo_out_of_range(struct pci_pbm_info *pbm,
+ unsigned char bus,
+ unsigned char devfn)
+{
+ if (bus < pbm->pci_first_busno ||
+ bus > pbm->pci_last_busno)
+ return 1;
+ return 0;
+}
+
+/* SCHIZO PCI configuration space accessors. */
+
+static int schizo_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct pci_pbm_info *pbm = bus_dev->sysdata;
+ unsigned char bus = bus_dev->number;
+ u32 *addr;
+ u16 tmp16;
+ u8 tmp8;
+
+ switch (size) {
+ case 1:
+ *value = 0xff;
+ break;
+ case 2:
+ *value = 0xffff;
+ break;
+ case 4:
+ *value = 0xffffffff;
+ break;
+ }
+
+ addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (schizo_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+ switch (size) {
+ case 1:
+ pci_config_read8((u8 *)addr, &tmp8);
+ *value = tmp8;
+ break;
+
+ case 2:
+ if (where & 0x01) {
+ printk("pci_read_config_word: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_read16((u16 *)addr, &tmp16);
+ *value = tmp16;
+ break;
+
+ case 4:
+ if (where & 0x03) {
+ printk("pci_read_config_dword: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_read32(addr, value);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int schizo_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct pci_pbm_info *pbm = bus_dev->sysdata;
+ unsigned char bus = bus_dev->number;
+ u32 *addr;
+
+ addr = schizo_pci_config_mkaddr(pbm, bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (schizo_out_of_range(pbm, bus, devfn))
+ return PCIBIOS_SUCCESSFUL;
+
+ switch (size) {
+ case 1:
+ pci_config_write8((u8 *)addr, value);
+ break;
+
+ case 2:
+ if (where & 0x01) {
+ printk("pci_write_config_word: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+ pci_config_write16((u16 *)addr, value);
+ break;
+
+ case 4:
+ if (where & 0x03) {
+ printk("pci_write_config_dword: misaligned reg [%x]\n",
+ where);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ pci_config_write32(addr, value);
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops schizo_ops = {
+ .read = schizo_read_pci_cfg,
+ .write = schizo_write_pci_cfg,
+};
+
+/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
+ * imap/iclr registers are per-PBM.
+ */
+#define SCHIZO_IMAP_BASE 0x1000UL
+#define SCHIZO_ICLR_BASE 0x1400UL
+
+static unsigned long schizo_imap_offset(unsigned long ino)
+{
+ return SCHIZO_IMAP_BASE + (ino * 8UL);
+}
+
+static unsigned long schizo_iclr_offset(unsigned long ino)
+{
+ return SCHIZO_ICLR_BASE + (ino * 8UL);
+}
+
+/* PCI SCHIZO INO number to Sparc PIL level. This table only matters for
+ * INOs which will not have an associated PCI device struct, ie. onboard
+ * EBUS devices and PCI controller internal error interrupts.
+ */
+static unsigned char schizo_pil_table[] = {
+/*0x00*/0, 0, 0, 0, /* PCI slot 0 Int A, B, C, D */
+/*0x04*/0, 0, 0, 0, /* PCI slot 1 Int A, B, C, D */
+/*0x08*/0, 0, 0, 0, /* PCI slot 2 Int A, B, C, D */
+/*0x0c*/0, 0, 0, 0, /* PCI slot 3 Int A, B, C, D */
+/*0x10*/0, 0, 0, 0, /* PCI slot 4 Int A, B, C, D */
+/*0x14*/0, 0, 0, 0, /* PCI slot 5 Int A, B, C, D */
+/*0x18*/4, /* SCSI */
+/*0x19*/4, /* second SCSI */
+/*0x1a*/0, /* UNKNOWN */
+/*0x1b*/0, /* UNKNOWN */
+/*0x1c*/8, /* Parallel */
+/*0x1d*/5, /* Ethernet */
+/*0x1e*/8, /* Firewire-1394 */
+/*0x1f*/9, /* USB */
+/*0x20*/13, /* Audio Record */
+/*0x21*/14, /* Audio Playback */
+/*0x22*/12, /* Serial */
+/*0x23*/4, /* EBUS I2C */
+/*0x24*/10, /* RTC Clock */
+/*0x25*/11, /* Floppy */
+/*0x26*/0, /* UNKNOWN */
+/*0x27*/0, /* UNKNOWN */
+/*0x28*/0, /* UNKNOWN */
+/*0x29*/0, /* UNKNOWN */
+/*0x2a*/10, /* UPA 1 */
+/*0x2b*/10, /* UPA 2 */
+/*0x2c*/0, /* UNKNOWN */
+/*0x2d*/0, /* UNKNOWN */
+/*0x2e*/0, /* UNKNOWN */
+/*0x2f*/0, /* UNKNOWN */
+/*0x30*/15, /* Uncorrectable ECC */
+/*0x31*/15, /* Correctable ECC */
+/*0x32*/15, /* PCI Bus A Error */
+/*0x33*/15, /* PCI Bus B Error */
+/*0x34*/15, /* Safari Bus Error */
+/*0x35*/0, /* Reserved */
+/*0x36*/0, /* Reserved */
+/*0x37*/0, /* Reserved */
+/*0x38*/0, /* Reserved for NewLink */
+/*0x39*/0, /* Reserved for NewLink */
+/*0x3a*/0, /* Reserved for NewLink */
+/*0x3b*/0, /* Reserved for NewLink */
+/*0x3c*/0, /* Reserved for NewLink */
+/*0x3d*/0, /* Reserved for NewLink */
+/*0x3e*/0, /* Reserved for NewLink */
+/*0x3f*/0, /* Reserved for NewLink */
+};
+
+static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
+{
+ int ret;
+
+ if (pdev &&
+ pdev->vendor == PCI_VENDOR_ID_SUN &&
+ pdev->device == PCI_DEVICE_ID_SUN_RIO_USB)
+ return 9;
+
+ ret = schizo_pil_table[ino];
+ if (ret == 0 && pdev == NULL) {
+ ret = 4;
+ } else if (ret == 0) {
+ switch ((pdev->class >> 16) & 0xff) {
+ case PCI_BASE_CLASS_STORAGE:
+ ret = 4;
+ break;
+
+ case PCI_BASE_CLASS_NETWORK:
+ ret = 6;
+ break;
+
+ case PCI_BASE_CLASS_DISPLAY:
+ ret = 9;
+ break;
+
+ case PCI_BASE_CLASS_MULTIMEDIA:
+ case PCI_BASE_CLASS_MEMORY:
+ case PCI_BASE_CLASS_BRIDGE:
+ case PCI_BASE_CLASS_SERIAL:
+ ret = 10;
+ break;
+
+ default:
+ ret = 4;
+ break;
+ };
+ }
+
+ return ret;
+}
+
+static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
+ struct pci_dev *pdev,
+ unsigned int ino)
+{
+ struct ino_bucket *bucket;
+ unsigned long imap, iclr;
+ unsigned long imap_off, iclr_off;
+ int pil, ign_fixup;
+
+ ino &= PCI_IRQ_INO;
+ imap_off = schizo_imap_offset(ino);
+
+ /* Now build the IRQ bucket. */
+ pil = schizo_ino_to_pil(pdev, ino);
+
+ if (PIL_RESERVED(pil))
+ BUG();
+
+ imap = pbm->pbm_regs + imap_off;
+ imap += 4;
+
+ iclr_off = schizo_iclr_offset(ino);
+ iclr = pbm->pbm_regs + iclr_off;
+ iclr += 4;
+
+ /* On Schizo, no inofixup occurs. This is because each
+ * INO has it's own IMAP register. On Psycho and Sabre
+ * there is only one IMAP register for each PCI slot even
+ * though four different INOs can be generated by each
+ * PCI slot.
+ *
+ * But, for JBUS variants (essentially, Tomatillo), we have
+ * to fixup the lowest bit of the interrupt group number.
+ */
+ ign_fixup = 0;
+ if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
+ if (pbm->portid & 1)
+ ign_fixup = (1 << 6);
+ }
+
+ bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap));
+ bucket->flags |= IBF_PCI;
+
+ return __irq(bucket);
+}
+
+/* SCHIZO error handling support. */
+enum schizo_error_type {
+ UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR
+};
+
+static DEFINE_SPINLOCK(stc_buf_lock);
+static unsigned long stc_error_buf[128];
+static unsigned long stc_tag_buf[16];
+static unsigned long stc_line_buf[16];
+
+#define SCHIZO_UE_INO 0x30 /* Uncorrectable ECC error */
+#define SCHIZO_CE_INO 0x31 /* Correctable ECC error */
+#define SCHIZO_PCIERR_A_INO 0x32 /* PBM A PCI bus error */
+#define SCHIZO_PCIERR_B_INO 0x33 /* PBM B PCI bus error */
+#define SCHIZO_SERR_INO 0x34 /* Safari interface error */
+
+struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
+{
+ ino &= IMAP_INO;
+ if (p->pbm_A.ino_bitmap & (1UL << ino))
+ return &p->pbm_A;
+ if (p->pbm_B.ino_bitmap & (1UL << ino))
+ return &p->pbm_B;
+
+ printk("PCI%d: No ino_bitmap entry for ino[%x], bitmaps "
+ "PBM_A[%016lx] PBM_B[%016lx]",
+ p->index, ino,
+ p->pbm_A.ino_bitmap,
+ p->pbm_B.ino_bitmap);
+ printk("PCI%d: Using PBM_A, report this problem immediately.\n",
+ p->index);
+
+ return &p->pbm_A;
+}
+
+static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
+{
+ struct pci_pbm_info *pbm;
+ struct ino_bucket *bucket;
+ unsigned long iclr;
+
+ /* Do not clear the interrupt for the other PCI bus.
+ *
+ * This "ACK both PBM IRQs" only needs to be performed
+ * for chip-wide error interrupts.
+ */
+ if ((irq & IMAP_INO) == SCHIZO_PCIERR_A_INO ||
+ (irq & IMAP_INO) == SCHIZO_PCIERR_B_INO)
+ return;
+
+ pbm = pbm_for_ino(p, irq);
+ if (pbm == &p->pbm_A)
+ pbm = &p->pbm_B;
+ else
+ pbm = &p->pbm_A;
+
+ irq = schizo_irq_build(pbm, NULL,
+ (pbm->portid << 6) | (irq & IMAP_INO));
+ bucket = __bucket(irq);
+ iclr = bucket->iclr;
+
+ upa_writel(ICLR_IDLE, iclr);
+}
+
+#define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */
+#define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */
+#define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */
+
+#define SCHIZO_STCERR_WRITE 0x2UL
+#define SCHIZO_STCERR_READ 0x1UL
+
+#define SCHIZO_STCTAG_PPN 0x3fffffff00000000UL
+#define SCHIZO_STCTAG_VPN 0x00000000ffffe000UL
+#define SCHIZO_STCTAG_VALID 0x8000000000000000UL
+#define SCHIZO_STCTAG_READ 0x4000000000000000UL
+
+#define SCHIZO_STCLINE_LINDX 0x0000000007800000UL
+#define SCHIZO_STCLINE_SPTR 0x000000000007e000UL
+#define SCHIZO_STCLINE_LADDR 0x0000000000001fc0UL
+#define SCHIZO_STCLINE_EPTR 0x000000000000003fUL
+#define SCHIZO_STCLINE_VALID 0x0000000000600000UL
+#define SCHIZO_STCLINE_FOFN 0x0000000000180000UL
+
+static void __schizo_check_stc_error_pbm(struct pci_pbm_info *pbm,
+ enum schizo_error_type type)
+{
+ struct pci_strbuf *strbuf = &pbm->stc;
+ unsigned long regbase = pbm->pbm_regs;
+ unsigned long err_base, tag_base, line_base;
+ u64 control;
+ int i;
+
+ err_base = regbase + SCHIZO_STC_ERR;
+ tag_base = regbase + SCHIZO_STC_TAG;
+ line_base = regbase + SCHIZO_STC_LINE;
+
+ spin_lock(&stc_buf_lock);
+
+ /* This is __REALLY__ dangerous. When we put the
+ * streaming buffer into diagnostic mode to probe
+ * it's tags and error status, we _must_ clear all
+ * of the line tag valid bits before re-enabling
+ * the streaming buffer. If any dirty data lives
+ * in the STC when we do this, we will end up
+ * invalidating it before it has a chance to reach
+ * main memory.
+ */
+ control = schizo_read(strbuf->strbuf_control);
+ schizo_write(strbuf->strbuf_control,
+ (control | SCHIZO_STRBUF_CTRL_DENAB));
+ for (i = 0; i < 128; i++) {
+ unsigned long val;
+
+ val = schizo_read(err_base + (i * 8UL));
+ schizo_write(err_base + (i * 8UL), 0UL);
+ stc_error_buf[i] = val;
+ }
+ for (i = 0; i < 16; i++) {
+ stc_tag_buf[i] = schizo_read(tag_base + (i * 8UL));
+ stc_line_buf[i] = schizo_read(line_base + (i * 8UL));
+ schizo_write(tag_base + (i * 8UL), 0UL);
+ schizo_write(line_base + (i * 8UL), 0UL);
+ }
+
+ /* OK, state is logged, exit diagnostic mode. */
+ schizo_write(strbuf->strbuf_control, control);
+
+ for (i = 0; i < 16; i++) {
+ int j, saw_error, first, last;
+
+ saw_error = 0;
+ first = i * 8;
+ last = first + 8;
+ for (j = first; j < last; j++) {
+ unsigned long errval = stc_error_buf[j];
+ if (errval != 0) {
+ saw_error++;
+ printk("%s: STC_ERR(%d)[wr(%d)rd(%d)]\n",
+ pbm->name,
+ j,
+ (errval & SCHIZO_STCERR_WRITE) ? 1 : 0,
+ (errval & SCHIZO_STCERR_READ) ? 1 : 0);
+ }
+ }
+ if (saw_error != 0) {
+ unsigned long tagval = stc_tag_buf[i];
+ unsigned long lineval = stc_line_buf[i];
+ printk("%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)V(%d)R(%d)]\n",
+ pbm->name,
+ i,
+ ((tagval & SCHIZO_STCTAG_PPN) >> 19UL),
+ (tagval & SCHIZO_STCTAG_VPN),
+ ((tagval & SCHIZO_STCTAG_VALID) ? 1 : 0),
+ ((tagval & SCHIZO_STCTAG_READ) ? 1 : 0));
+
+ /* XXX Should spit out per-bank error information... -DaveM */
+ printk("%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)LADDR(%lx)EP(%lx)"
+ "V(%d)FOFN(%d)]\n",
+ pbm->name,
+ i,
+ ((lineval & SCHIZO_STCLINE_LINDX) >> 23UL),
+ ((lineval & SCHIZO_STCLINE_SPTR) >> 13UL),
+ ((lineval & SCHIZO_STCLINE_LADDR) >> 6UL),
+ ((lineval & SCHIZO_STCLINE_EPTR) >> 0UL),
+ ((lineval & SCHIZO_STCLINE_VALID) ? 1 : 0),
+ ((lineval & SCHIZO_STCLINE_FOFN) ? 1 : 0));
+ }
+ }
+
+ spin_unlock(&stc_buf_lock);
+}
+
+/* IOMMU is per-PBM in Schizo, so interrogate both for anonymous
+ * controller level errors.
+ */
+
+#define SCHIZO_IOMMU_TAG 0xa580UL
+#define SCHIZO_IOMMU_DATA 0xa600UL
+
+#define SCHIZO_IOMMU_TAG_CTXT 0x0000001ffe000000UL
+#define SCHIZO_IOMMU_TAG_ERRSTS 0x0000000001800000UL
+#define SCHIZO_IOMMU_TAG_ERR 0x0000000000400000UL
+#define SCHIZO_IOMMU_TAG_WRITE 0x0000000000200000UL
+#define SCHIZO_IOMMU_TAG_STREAM 0x0000000000100000UL
+#define SCHIZO_IOMMU_TAG_SIZE 0x0000000000080000UL
+#define SCHIZO_IOMMU_TAG_VPAGE 0x000000000007ffffUL
+
+#define SCHIZO_IOMMU_DATA_VALID 0x0000000100000000UL
+#define SCHIZO_IOMMU_DATA_CACHE 0x0000000040000000UL
+#define SCHIZO_IOMMU_DATA_PPAGE 0x000000003fffffffUL
+
+static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
+ enum schizo_error_type type)
+{
+ struct pci_iommu *iommu = pbm->iommu;
+ unsigned long iommu_tag[16];
+ unsigned long iommu_data[16];
+ unsigned long flags;
+ u64 control;
+ int i;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ control = schizo_read(iommu->iommu_control);
+ if (control & SCHIZO_IOMMU_CTRL_XLTEERR) {
+ unsigned long base;
+ char *type_string;
+
+ /* Clear the error encountered bit. */
+ control &= ~SCHIZO_IOMMU_CTRL_XLTEERR;
+ schizo_write(iommu->iommu_control, control);
+
+ switch((control & SCHIZO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
+ case 0:
+ type_string = "Protection Error";
+ break;
+ case 1:
+ type_string = "Invalid Error";
+ break;
+ case 2:
+ type_string = "TimeOut Error";
+ break;
+ case 3:
+ default:
+ type_string = "ECC Error";
+ break;
+ };
+ printk("%s: IOMMU Error, type[%s]\n",
+ pbm->name, type_string);
+
+ /* Put the IOMMU into diagnostic mode and probe
+ * it's TLB for entries with error status.
+ *
+ * It is very possible for another DVMA to occur
+ * while we do this probe, and corrupt the system
+ * further. But we are so screwed at this point
+ * that we are likely to crash hard anyways, so
+ * get as much diagnostic information to the
+ * console as we can.
+ */
+ schizo_write(iommu->iommu_control,
+ control | SCHIZO_IOMMU_CTRL_DENAB);
+
+ base = pbm->pbm_regs;
+
+ for (i = 0; i < 16; i++) {
+ iommu_tag[i] =
+ schizo_read(base + SCHIZO_IOMMU_TAG + (i * 8UL));
+ iommu_data[i] =
+ schizo_read(base + SCHIZO_IOMMU_DATA + (i * 8UL));
+
+ /* Now clear out the entry. */
+ schizo_write(base + SCHIZO_IOMMU_TAG + (i * 8UL), 0);
+ schizo_write(base + SCHIZO_IOMMU_DATA + (i * 8UL), 0);
+ }
+
+ /* Leave diagnostic mode. */
+ schizo_write(iommu->iommu_control, control);
+
+ for (i = 0; i < 16; i++) {
+ unsigned long tag, data;
+
+ tag = iommu_tag[i];
+ if (!(tag & SCHIZO_IOMMU_TAG_ERR))
+ continue;
+
+ data = iommu_data[i];
+ switch((tag & SCHIZO_IOMMU_TAG_ERRSTS) >> 23UL) {
+ case 0:
+ type_string = "Protection Error";
+ break;
+ case 1:
+ type_string = "Invalid Error";
+ break;
+ case 2:
+ type_string = "TimeOut Error";
+ break;
+ case 3:
+ default:
+ type_string = "ECC Error";
+ break;
+ };
+ printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
+ "sz(%dK) vpg(%08lx)]\n",
+ pbm->name, i, type_string,
+ (int)((tag & SCHIZO_IOMMU_TAG_CTXT) >> 25UL),
+ ((tag & SCHIZO_IOMMU_TAG_WRITE) ? 1 : 0),
+ ((tag & SCHIZO_IOMMU_TAG_STREAM) ? 1 : 0),
+ ((tag & SCHIZO_IOMMU_TAG_SIZE) ? 64 : 8),
+ (tag & SCHIZO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
+ printk("%s: IOMMU DATA(%d)[valid(%d) cache(%d) ppg(%016lx)]\n",
+ pbm->name, i,
+ ((data & SCHIZO_IOMMU_DATA_VALID) ? 1 : 0),
+ ((data & SCHIZO_IOMMU_DATA_CACHE) ? 1 : 0),
+ (data & SCHIZO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
+ }
+ }
+ if (pbm->stc.strbuf_enabled)
+ __schizo_check_stc_error_pbm(pbm, type);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static void schizo_check_iommu_error(struct pci_controller_info *p,
+ enum schizo_error_type type)
+{
+ schizo_check_iommu_error_pbm(&p->pbm_A, type);
+ schizo_check_iommu_error_pbm(&p->pbm_B, type);
+}
+
+/* Uncorrectable ECC error status gathering. */
+#define SCHIZO_UE_AFSR 0x10030UL
+#define SCHIZO_UE_AFAR 0x10038UL
+
+#define SCHIZO_UEAFSR_PPIO 0x8000000000000000UL /* Safari */
+#define SCHIZO_UEAFSR_PDRD 0x4000000000000000UL /* Safari/Tomatillo */
+#define SCHIZO_UEAFSR_PDWR 0x2000000000000000UL /* Safari */
+#define SCHIZO_UEAFSR_SPIO 0x1000000000000000UL /* Safari */
+#define SCHIZO_UEAFSR_SDMA 0x0800000000000000UL /* Safari/Tomatillo */
+#define SCHIZO_UEAFSR_ERRPNDG 0x0300000000000000UL /* Safari */
+#define SCHIZO_UEAFSR_BMSK 0x000003ff00000000UL /* Safari */
+#define SCHIZO_UEAFSR_QOFF 0x00000000c0000000UL /* Safari/Tomatillo */
+#define SCHIZO_UEAFSR_AID 0x000000001f000000UL /* Safari/Tomatillo */
+#define SCHIZO_UEAFSR_PARTIAL 0x0000000000800000UL /* Safari */
+#define SCHIZO_UEAFSR_OWNEDIN 0x0000000000400000UL /* Safari */
+#define SCHIZO_UEAFSR_MTAGSYND 0x00000000000f0000UL /* Safari */
+#define SCHIZO_UEAFSR_MTAG 0x000000000000e000UL /* Safari */
+#define SCHIZO_UEAFSR_ECCSYND 0x00000000000001ffUL /* Safari */
+
+static irqreturn_t schizo_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFSR;
+ unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_UE_AFAR;
+ unsigned long afsr, afar, error_bits;
+ int reported, limit;
+
+ /* Latch uncorrectable error status. */
+ afar = schizo_read(afar_reg);
+
+ /* If either of the error pending bits are set in the
+ * AFSR, the error status is being actively updated by
+ * the hardware and we must re-read to get a clean value.
+ */
+ limit = 1000;
+ do {
+ afsr = schizo_read(afsr_reg);
+ } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
+
+ /* Clear the primary/secondary error status bits. */
+ error_bits = afsr &
+ (SCHIZO_UEAFSR_PPIO | SCHIZO_UEAFSR_PDRD | SCHIZO_UEAFSR_PDWR |
+ SCHIZO_UEAFSR_SPIO | SCHIZO_UEAFSR_SDMA);
+ if (!error_bits)
+ return IRQ_NONE;
+ schizo_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("PCI%d: Uncorrectable Error, primary error type[%s]\n",
+ p->index,
+ (((error_bits & SCHIZO_UEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & SCHIZO_UEAFSR_PDRD) ?
+ "DMA Read" :
+ ((error_bits & SCHIZO_UEAFSR_PDWR) ?
+ "DMA Write" : "???")))));
+ printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
+ p->index,
+ (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
+ (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
+ (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
+ printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
+ p->index,
+ (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
+ (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
+ (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
+ (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
+ (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
+ printk("PCI%d: UE AFAR [%016lx]\n", p->index, afar);
+ printk("PCI%d: UE Secondary errors [", p->index);
+ reported = 0;
+ if (afsr & SCHIZO_UEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & SCHIZO_UEAFSR_SDMA) {
+ reported++;
+ printk("(DMA)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* Interrogate IOMMU for error status. */
+ schizo_check_iommu_error(p, UE_ERR);
+
+ schizo_clear_other_err_intr(p, irq);
+
+ return IRQ_HANDLED;
+}
+
+#define SCHIZO_CE_AFSR 0x10040UL
+#define SCHIZO_CE_AFAR 0x10048UL
+
+#define SCHIZO_CEAFSR_PPIO 0x8000000000000000UL
+#define SCHIZO_CEAFSR_PDRD 0x4000000000000000UL
+#define SCHIZO_CEAFSR_PDWR 0x2000000000000000UL
+#define SCHIZO_CEAFSR_SPIO 0x1000000000000000UL
+#define SCHIZO_CEAFSR_SDMA 0x0800000000000000UL
+#define SCHIZO_CEAFSR_ERRPNDG 0x0300000000000000UL
+#define SCHIZO_CEAFSR_BMSK 0x000003ff00000000UL
+#define SCHIZO_CEAFSR_QOFF 0x00000000c0000000UL
+#define SCHIZO_CEAFSR_AID 0x000000001f000000UL
+#define SCHIZO_CEAFSR_PARTIAL 0x0000000000800000UL
+#define SCHIZO_CEAFSR_OWNEDIN 0x0000000000400000UL
+#define SCHIZO_CEAFSR_MTAGSYND 0x00000000000f0000UL
+#define SCHIZO_CEAFSR_MTAG 0x000000000000e000UL
+#define SCHIZO_CEAFSR_ECCSYND 0x00000000000001ffUL
+
+static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ unsigned long afsr_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFSR;
+ unsigned long afar_reg = p->pbm_B.controller_regs + SCHIZO_CE_AFAR;
+ unsigned long afsr, afar, error_bits;
+ int reported, limit;
+
+ /* Latch error status. */
+ afar = schizo_read(afar_reg);
+
+ /* If either of the error pending bits are set in the
+ * AFSR, the error status is being actively updated by
+ * the hardware and we must re-read to get a clean value.
+ */
+ limit = 1000;
+ do {
+ afsr = schizo_read(afsr_reg);
+ } while ((afsr & SCHIZO_UEAFSR_ERRPNDG) != 0 && --limit);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SCHIZO_CEAFSR_PPIO | SCHIZO_CEAFSR_PDRD | SCHIZO_CEAFSR_PDWR |
+ SCHIZO_CEAFSR_SPIO | SCHIZO_CEAFSR_SDMA);
+ if (!error_bits)
+ return IRQ_NONE;
+ schizo_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("PCI%d: Correctable Error, primary error type[%s]\n",
+ p->index,
+ (((error_bits & SCHIZO_CEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & SCHIZO_CEAFSR_PDRD) ?
+ "DMA Read" :
+ ((error_bits & SCHIZO_CEAFSR_PDWR) ?
+ "DMA Write" : "???")))));
+
+ /* XXX Use syndrome and afar to print out module string just like
+ * XXX UDB CE trap handler does... -DaveM
+ */
+ printk("PCI%d: bytemask[%04lx] qword_offset[%lx] SAFARI_AID[%02lx]\n",
+ p->index,
+ (afsr & SCHIZO_UEAFSR_BMSK) >> 32UL,
+ (afsr & SCHIZO_UEAFSR_QOFF) >> 30UL,
+ (afsr & SCHIZO_UEAFSR_AID) >> 24UL);
+ printk("PCI%d: partial[%d] owned_in[%d] mtag[%lx] mtag_synd[%lx] ecc_sync[%lx]\n",
+ p->index,
+ (afsr & SCHIZO_UEAFSR_PARTIAL) ? 1 : 0,
+ (afsr & SCHIZO_UEAFSR_OWNEDIN) ? 1 : 0,
+ (afsr & SCHIZO_UEAFSR_MTAG) >> 13UL,
+ (afsr & SCHIZO_UEAFSR_MTAGSYND) >> 16UL,
+ (afsr & SCHIZO_UEAFSR_ECCSYND) >> 0UL);
+ printk("PCI%d: CE AFAR [%016lx]\n", p->index, afar);
+ printk("PCI%d: CE Secondary errors [", p->index);
+ reported = 0;
+ if (afsr & SCHIZO_CEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & SCHIZO_CEAFSR_SDMA) {
+ reported++;
+ printk("(DMA)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ schizo_clear_other_err_intr(p, irq);
+
+ return IRQ_HANDLED;
+}
+
+#define SCHIZO_PCI_AFSR 0x2010UL
+#define SCHIZO_PCI_AFAR 0x2018UL
+
+#define SCHIZO_PCIAFSR_PMA 0x8000000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PTA 0x4000000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PRTRY 0x2000000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PPERR 0x1000000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PTTO 0x0800000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_PUNUS 0x0400000000000000UL /* Schizo */
+#define SCHIZO_PCIAFSR_SMA 0x0200000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_STA 0x0100000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_SRTRY 0x0080000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_SPERR 0x0040000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_STTO 0x0020000000000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_SUNUS 0x0010000000000000UL /* Schizo */
+#define SCHIZO_PCIAFSR_BMSK 0x000003ff00000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_BLK 0x0000000080000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_CFG 0x0000000040000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_MEM 0x0000000020000000UL /* Schizo/Tomatillo */
+#define SCHIZO_PCIAFSR_IO 0x0000000010000000UL /* Schizo/Tomatillo */
+
+#define SCHIZO_PCI_CTRL (0x2000UL)
+#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */
+#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */
+#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */
+#define SCHIZO_PCICTRL_TTO_ERR (1UL << 38UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_RTRY_ERR (1UL << 37UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_DTO_ERR (1UL << 36UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_SBH_ERR (1UL << 35UL) /* Safari */
+#define SCHIZO_PCICTRL_SERR (1UL << 34UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_PCISPD (1UL << 33UL) /* Safari */
+#define SCHIZO_PCICTRL_MRM_PREF (1UL << 30UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_RDO_PREF (1UL << 29UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_RDL_PREF (1UL << 28UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_PTO (3UL << 24UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_PTO_SHIFT 24UL
+#define SCHIZO_PCICTRL_TRWSW (7UL << 21UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_F_TGT_A (1UL << 20UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_S_DTO_INT (1UL << 19UL) /* Safari */
+#define SCHIZO_PCICTRL_F_TGT_RT (1UL << 19UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_SBH_INT (1UL << 18UL) /* Safari */
+#define SCHIZO_PCICTRL_T_DTO_INT (1UL << 18UL) /* Tomatillo */
+#define SCHIZO_PCICTRL_EEN (1UL << 17UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_PARK (1UL << 16UL) /* Safari/Tomatillo */
+#define SCHIZO_PCICTRL_PCIRST (1UL << 8UL) /* Safari */
+#define SCHIZO_PCICTRL_ARB_S (0x3fUL << 0UL) /* Safari */
+#define SCHIZO_PCICTRL_ARB_T (0xffUL << 0UL) /* Tomatillo */
+
+static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
+{
+ unsigned long csr_reg, csr, csr_error_bits;
+ irqreturn_t ret = IRQ_NONE;
+ u16 stat;
+
+ csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
+ csr = schizo_read(csr_reg);
+ csr_error_bits =
+ csr & (SCHIZO_PCICTRL_BUS_UNUS |
+ SCHIZO_PCICTRL_TTO_ERR |
+ SCHIZO_PCICTRL_RTRY_ERR |
+ SCHIZO_PCICTRL_DTO_ERR |
+ SCHIZO_PCICTRL_SBH_ERR |
+ SCHIZO_PCICTRL_SERR);
+ if (csr_error_bits) {
+ /* Clear the errors. */
+ schizo_write(csr_reg, csr);
+
+ /* Log 'em. */
+ if (csr_error_bits & SCHIZO_PCICTRL_BUS_UNUS)
+ printk("%s: Bus unusable error asserted.\n",
+ pbm->name);
+ if (csr_error_bits & SCHIZO_PCICTRL_TTO_ERR)
+ printk("%s: PCI TRDY# timeout error asserted.\n",
+ pbm->name);
+ if (csr_error_bits & SCHIZO_PCICTRL_RTRY_ERR)
+ printk("%s: PCI excessive retry error asserted.\n",
+ pbm->name);
+ if (csr_error_bits & SCHIZO_PCICTRL_DTO_ERR)
+ printk("%s: PCI discard timeout error asserted.\n",
+ pbm->name);
+ if (csr_error_bits & SCHIZO_PCICTRL_SBH_ERR)
+ printk("%s: PCI streaming byte hole error asserted.\n",
+ pbm->name);
+ if (csr_error_bits & SCHIZO_PCICTRL_SERR)
+ printk("%s: PCI SERR signal asserted.\n",
+ pbm->name);
+ ret = IRQ_HANDLED;
+ }
+ pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
+ if (stat & (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR)) {
+ printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
+ pbm->name, stat);
+ pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_pbm_info *pbm = dev_id;
+ struct pci_controller_info *p = pbm->parent;
+ unsigned long afsr_reg, afar_reg, base;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ base = pbm->pbm_regs;
+
+ afsr_reg = base + SCHIZO_PCI_AFSR;
+ afar_reg = base + SCHIZO_PCI_AFAR;
+
+ /* Latch error status. */
+ afar = schizo_read(afar_reg);
+ afsr = schizo_read(afsr_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
+ SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
+ SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
+ SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
+ SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
+ SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS);
+ if (!error_bits)
+ return schizo_pcierr_intr_other(pbm);
+ schizo_write(afsr_reg, error_bits);
+
+ /* Log the error. */
+ printk("%s: PCI Error, primary error type[%s]\n",
+ pbm->name,
+ (((error_bits & SCHIZO_PCIAFSR_PMA) ?
+ "Master Abort" :
+ ((error_bits & SCHIZO_PCIAFSR_PTA) ?
+ "Target Abort" :
+ ((error_bits & SCHIZO_PCIAFSR_PRTRY) ?
+ "Excessive Retries" :
+ ((error_bits & SCHIZO_PCIAFSR_PPERR) ?
+ "Parity Error" :
+ ((error_bits & SCHIZO_PCIAFSR_PTTO) ?
+ "Timeout" :
+ ((error_bits & SCHIZO_PCIAFSR_PUNUS) ?
+ "Bus Unusable" : "???"))))))));
+ printk("%s: bytemask[%04lx] was_block(%d) space(%s)\n",
+ pbm->name,
+ (afsr & SCHIZO_PCIAFSR_BMSK) >> 32UL,
+ (afsr & SCHIZO_PCIAFSR_BLK) ? 1 : 0,
+ ((afsr & SCHIZO_PCIAFSR_CFG) ?
+ "Config" :
+ ((afsr & SCHIZO_PCIAFSR_MEM) ?
+ "Memory" :
+ ((afsr & SCHIZO_PCIAFSR_IO) ?
+ "I/O" : "???"))));
+ printk("%s: PCI AFAR [%016lx]\n",
+ pbm->name, afar);
+ printk("%s: PCI Secondary errors [",
+ pbm->name);
+ reported = 0;
+ if (afsr & SCHIZO_PCIAFSR_SMA) {
+ reported++;
+ printk("(Master Abort)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_STA) {
+ reported++;
+ printk("(Target Abort)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_SRTRY) {
+ reported++;
+ printk("(Excessive Retries)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_SPERR) {
+ reported++;
+ printk("(Parity Error)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_STTO) {
+ reported++;
+ printk("(Timeout)");
+ }
+ if (afsr & SCHIZO_PCIAFSR_SUNUS) {
+ reported++;
+ printk("(Bus Unusable)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* For the error types shown, scan PBM's PCI bus for devices
+ * which have logged that error type.
+ */
+
+ /* If we see a Target Abort, this could be the result of an
+ * IOMMU translation error of some sort. It is extremely
+ * useful to log this information as usually it indicates
+ * a bug in the IOMMU support code or a PCI device driver.
+ */
+ if (error_bits & (SCHIZO_PCIAFSR_PTA | SCHIZO_PCIAFSR_STA)) {
+ schizo_check_iommu_error(p, PCI_ERR);
+ pci_scan_for_target_abort(p, pbm, pbm->pci_bus);
+ }
+ if (error_bits & (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_SMA))
+ pci_scan_for_master_abort(p, pbm, pbm->pci_bus);
+
+ /* For excessive retries, PSYCHO/PBM will abort the device
+ * and there is no way to specifically check for excessive
+ * retries in the config space status registers. So what
+ * we hope is that we'll catch it via the master/target
+ * abort events.
+ */
+
+ if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR))
+ pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
+
+ schizo_clear_other_err_intr(p, irq);
+
+ return IRQ_HANDLED;
+}
+
+#define SCHIZO_SAFARI_ERRLOG 0x10018UL
+
+#define SAFARI_ERRLOG_ERROUT 0x8000000000000000UL
+
+#define BUS_ERROR_BADCMD 0x4000000000000000UL /* Schizo/Tomatillo */
+#define BUS_ERROR_SSMDIS 0x2000000000000000UL /* Safari */
+#define BUS_ERROR_BADMA 0x1000000000000000UL /* Safari */
+#define BUS_ERROR_BADMB 0x0800000000000000UL /* Safari */
+#define BUS_ERROR_BADMC 0x0400000000000000UL /* Safari */
+#define BUS_ERROR_SNOOP_GR 0x0000000000200000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_PCI 0x0000000000100000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_RD 0x0000000000080000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_RDS 0x0000000000020000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_RDSA 0x0000000000010000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_OWN 0x0000000000008000UL /* Tomatillo */
+#define BUS_ERROR_SNOOP_RDO 0x0000000000004000UL /* Tomatillo */
+#define BUS_ERROR_CPU1PS 0x0000000000002000UL /* Safari */
+#define BUS_ERROR_WDATA_PERR 0x0000000000002000UL /* Tomatillo */
+#define BUS_ERROR_CPU1PB 0x0000000000001000UL /* Safari */
+#define BUS_ERROR_CTRL_PERR 0x0000000000001000UL /* Tomatillo */
+#define BUS_ERROR_CPU0PS 0x0000000000000800UL /* Safari */
+#define BUS_ERROR_SNOOP_ERR 0x0000000000000800UL /* Tomatillo */
+#define BUS_ERROR_CPU0PB 0x0000000000000400UL /* Safari */
+#define BUS_ERROR_JBUS_ILL_B 0x0000000000000400UL /* Tomatillo */
+#define BUS_ERROR_CIQTO 0x0000000000000200UL /* Safari */
+#define BUS_ERROR_LPQTO 0x0000000000000100UL /* Safari */
+#define BUS_ERROR_JBUS_ILL_C 0x0000000000000100UL /* Tomatillo */
+#define BUS_ERROR_SFPQTO 0x0000000000000080UL /* Safari */
+#define BUS_ERROR_UFPQTO 0x0000000000000040UL /* Safari */
+#define BUS_ERROR_RD_PERR 0x0000000000000040UL /* Tomatillo */
+#define BUS_ERROR_APERR 0x0000000000000020UL /* Safari/Tomatillo */
+#define BUS_ERROR_UNMAP 0x0000000000000010UL /* Safari/Tomatillo */
+#define BUS_ERROR_BUSERR 0x0000000000000004UL /* Safari/Tomatillo */
+#define BUS_ERROR_TIMEOUT 0x0000000000000002UL /* Safari/Tomatillo */
+#define BUS_ERROR_ILL 0x0000000000000001UL /* Safari */
+
+/* We only expect UNMAP errors here. The rest of the Safari errors
+ * are marked fatal and thus cause a system reset.
+ */
+static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct pci_controller_info *p = dev_id;
+ u64 errlog;
+
+ errlog = schizo_read(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG);
+ schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRLOG,
+ errlog & ~(SAFARI_ERRLOG_ERROUT));
+
+ if (!(errlog & BUS_ERROR_UNMAP)) {
+ printk("PCI%d: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n",
+ p->index, errlog);
+
+ schizo_clear_other_err_intr(p, irq);
+ return IRQ_HANDLED;
+ }
+
+ printk("PCI%d: Safari/JBUS interrupt, UNMAPPED error, interrogating IOMMUs.\n",
+ p->index);
+ schizo_check_iommu_error(p, SAFARI_ERR);
+
+ schizo_clear_other_err_intr(p, irq);
+ return IRQ_HANDLED;
+}
+
+/* Nearly identical to PSYCHO equivalents... */
+#define SCHIZO_ECC_CTRL 0x10020UL
+#define SCHIZO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
+#define SCHIZO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
+#define SCHIZO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
+
+#define SCHIZO_SAFARI_ERRCTRL 0x10008UL
+#define SCHIZO_SAFERRCTRL_EN 0x8000000000000000UL
+#define SCHIZO_SAFARI_IRQCTRL 0x10010UL
+#define SCHIZO_SAFIRQCTRL_EN 0x8000000000000000UL
+
+/* How the Tomatillo IRQs are routed around is pure guesswork here.
+ *
+ * All the Tomatillo devices I see in prtconf dumps seem to have only
+ * a single PCI bus unit attached to it. It would seem they are seperate
+ * devices because their PortID (ie. JBUS ID) values are all different
+ * and thus the registers are mapped to totally different locations.
+ *
+ * However, two Tomatillo's look "similar" in that the only difference
+ * in their PortID is the lowest bit.
+ *
+ * So if we were to ignore this lower bit, it certainly looks like two
+ * PCI bus units of the same Tomatillo. I still have not really
+ * figured this out...
+ */
+static void __init tomatillo_register_error_handlers(struct pci_controller_info *p)
+{
+ struct pci_pbm_info *pbm;
+ unsigned int irq;
+ struct ino_bucket *bucket;
+ u64 tmp, err_mask, err_no_mask;
+
+ /* Build IRQs and register handlers. */
+ pbm = pbm_for_ino(p, SCHIZO_UE_INO);
+ irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
+ if (request_irq(irq, schizo_ue_intr,
+ SA_SHIRQ, "TOMATILLO UE", p) < 0) {
+ prom_printf("%s: Cannot register UE interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs +
+ schizo_imap_offset(SCHIZO_UE_INO) + 4));
+
+ pbm = pbm_for_ino(p, SCHIZO_CE_INO);
+ irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
+ if (request_irq(irq, schizo_ce_intr,
+ SA_SHIRQ, "TOMATILLO CE", p) < 0) {
+ prom_printf("%s: Cannot register CE interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs +
+ schizo_imap_offset(SCHIZO_CE_INO) + 4));
+
+ pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
+ irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
+ SCHIZO_PCIERR_A_INO));
+ if (request_irq(irq, schizo_pcierr_intr,
+ SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
+ prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs +
+ schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
+
+ pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
+ irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) |
+ SCHIZO_PCIERR_B_INO));
+ if (request_irq(irq, schizo_pcierr_intr,
+ SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) {
+ prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs +
+ schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
+
+ pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
+ irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
+ if (request_irq(irq, schizo_safarierr_intr,
+ SA_SHIRQ, "TOMATILLO SERR", p) < 0) {
+ prom_printf("%s: Cannot register SafariERR interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs +
+ schizo_imap_offset(SCHIZO_SERR_INO) + 4));
+
+ /* Enable UE and CE interrupts for controller. */
+ schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
+ (SCHIZO_ECCCTRL_EE |
+ SCHIZO_ECCCTRL_UE |
+ SCHIZO_ECCCTRL_CE));
+
+ schizo_write(p->pbm_B.controller_regs + SCHIZO_ECC_CTRL,
+ (SCHIZO_ECCCTRL_EE |
+ SCHIZO_ECCCTRL_UE |
+ SCHIZO_ECCCTRL_CE));
+
+ /* Enable PCI Error interrupts and clear error
+ * bits.
+ */
+ err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
+ SCHIZO_PCICTRL_TTO_ERR |
+ SCHIZO_PCICTRL_RTRY_ERR |
+ SCHIZO_PCICTRL_SERR |
+ SCHIZO_PCICTRL_EEN);
+
+ err_no_mask = SCHIZO_PCICTRL_DTO_ERR;
+
+ tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
+ tmp |= err_mask;
+ tmp &= ~err_no_mask;
+ schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+ tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
+ tmp |= err_mask;
+ tmp &= ~err_no_mask;
+ schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+ err_mask = (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
+ SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
+ SCHIZO_PCIAFSR_PTTO |
+ SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
+ SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
+ SCHIZO_PCIAFSR_STTO);
+
+ schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
+ schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR, err_mask);
+
+ err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SNOOP_GR |
+ BUS_ERROR_SNOOP_PCI | BUS_ERROR_SNOOP_RD |
+ BUS_ERROR_SNOOP_RDS | BUS_ERROR_SNOOP_RDSA |
+ BUS_ERROR_SNOOP_OWN | BUS_ERROR_SNOOP_RDO |
+ BUS_ERROR_WDATA_PERR | BUS_ERROR_CTRL_PERR |
+ BUS_ERROR_SNOOP_ERR | BUS_ERROR_JBUS_ILL_B |
+ BUS_ERROR_JBUS_ILL_C | BUS_ERROR_RD_PERR |
+ BUS_ERROR_APERR | BUS_ERROR_UNMAP |
+ BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT);
+
+ schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
+ (SCHIZO_SAFERRCTRL_EN | err_mask));
+ schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_ERRCTRL,
+ (SCHIZO_SAFERRCTRL_EN | err_mask));
+
+ schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
+ (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
+ schizo_write(p->pbm_B.controller_regs + SCHIZO_SAFARI_IRQCTRL,
+ (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
+}
+
+static void __init schizo_register_error_handlers(struct pci_controller_info *p)
+{
+ struct pci_pbm_info *pbm;
+ unsigned int irq;
+ struct ino_bucket *bucket;
+ u64 tmp, err_mask, err_no_mask;
+
+ /* Build IRQs and register handlers. */
+ pbm = pbm_for_ino(p, SCHIZO_UE_INO);
+ irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO);
+ if (request_irq(irq, schizo_ue_intr,
+ SA_SHIRQ, "SCHIZO UE", p) < 0) {
+ prom_printf("%s: Cannot register UE interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
+
+ pbm = pbm_for_ino(p, SCHIZO_CE_INO);
+ irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO);
+ if (request_irq(irq, schizo_ce_intr,
+ SA_SHIRQ, "SCHIZO CE", p) < 0) {
+ prom_printf("%s: Cannot register CE interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));
+
+ pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
+ irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO);
+ if (request_irq(irq, schizo_pcierr_intr,
+ SA_SHIRQ, "SCHIZO PCIERR", pbm) < 0) {
+ prom_printf("%s: Cannot register PBM A PciERR interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
+
+ pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
+ irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO);
+ if (request_irq(irq, schizo_pcierr_intr,
+ SA_SHIRQ, "SCHIZO PCIERR", &p->pbm_B) < 0) {
+ prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
+
+ pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
+ irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO);
+ if (request_irq(irq, schizo_safarierr_intr,
+ SA_SHIRQ, "SCHIZO SERR", p) < 0) {
+ prom_printf("%s: Cannot register SafariERR interrupt.\n",
+ pbm->name);
+ prom_halt();
+ }
+ bucket = __bucket(irq);
+ tmp = upa_readl(bucket->imap);
+ upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));
+
+ /* Enable UE and CE interrupts for controller. */
+ schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
+ (SCHIZO_ECCCTRL_EE |
+ SCHIZO_ECCCTRL_UE |
+ SCHIZO_ECCCTRL_CE));
+
+ err_mask = (SCHIZO_PCICTRL_BUS_UNUS |
+ SCHIZO_PCICTRL_ESLCK |
+ SCHIZO_PCICTRL_TTO_ERR |
+ SCHIZO_PCICTRL_RTRY_ERR |
+ SCHIZO_PCICTRL_SBH_ERR |
+ SCHIZO_PCICTRL_SERR |
+ SCHIZO_PCICTRL_EEN);
+
+ err_no_mask = (SCHIZO_PCICTRL_DTO_ERR |
+ SCHIZO_PCICTRL_SBH_INT);
+
+ /* Enable PCI Error interrupts and clear error
+ * bits for each PBM.
+ */
+ tmp = schizo_read(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL);
+ tmp |= err_mask;
+ tmp &= ~err_no_mask;
+ schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+ schizo_write(p->pbm_A.pbm_regs + SCHIZO_PCI_AFSR,
+ (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
+ SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
+ SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
+ SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
+ SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
+ SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
+
+ tmp = schizo_read(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL);
+ tmp |= err_mask;
+ tmp &= ~err_no_mask;
+ schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+ schizo_write(p->pbm_B.pbm_regs + SCHIZO_PCI_AFSR,
+ (SCHIZO_PCIAFSR_PMA | SCHIZO_PCIAFSR_PTA |
+ SCHIZO_PCIAFSR_PRTRY | SCHIZO_PCIAFSR_PPERR |
+ SCHIZO_PCIAFSR_PTTO | SCHIZO_PCIAFSR_PUNUS |
+ SCHIZO_PCIAFSR_SMA | SCHIZO_PCIAFSR_STA |
+ SCHIZO_PCIAFSR_SRTRY | SCHIZO_PCIAFSR_SPERR |
+ SCHIZO_PCIAFSR_STTO | SCHIZO_PCIAFSR_SUNUS));
+
+ /* Make all Safari error conditions fatal except unmapped
+ * errors which we make generate interrupts.
+ */
+ err_mask = (BUS_ERROR_BADCMD | BUS_ERROR_SSMDIS |
+ BUS_ERROR_BADMA | BUS_ERROR_BADMB |
+ BUS_ERROR_BADMC |
+ BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
+ BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB |
+ BUS_ERROR_CIQTO |
+ BUS_ERROR_LPQTO | BUS_ERROR_SFPQTO |
+ BUS_ERROR_UFPQTO | BUS_ERROR_APERR |
+ BUS_ERROR_BUSERR | BUS_ERROR_TIMEOUT |
+ BUS_ERROR_ILL);
+#if 1
+ /* XXX Something wrong with some Excalibur systems
+ * XXX Sun is shipping. The behavior on a 2-cpu
+ * XXX machine is that both CPU1 parity error bits
+ * XXX are set and are immediately set again when
+ * XXX their error status bits are cleared. Just
+ * XXX ignore them for now. -DaveM
+ */
+ err_mask &= ~(BUS_ERROR_CPU1PS | BUS_ERROR_CPU1PB |
+ BUS_ERROR_CPU0PS | BUS_ERROR_CPU0PB);
+#endif
+
+ schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_ERRCTRL,
+ (SCHIZO_SAFERRCTRL_EN | err_mask));
+
+ schizo_write(p->pbm_A.controller_regs + SCHIZO_SAFARI_IRQCTRL,
+ (SCHIZO_SAFIRQCTRL_EN | (BUS_ERROR_UNMAP)));
+}
+
+static void __init pbm_config_busmastering(struct pci_pbm_info *pbm)
+{
+ u8 *addr;
+
+ /* Set cache-line size to 64 bytes, this is actually
+ * a nop but I do it for completeness.
+ */
+ addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+ 0, PCI_CACHE_LINE_SIZE);
+ pci_config_write8(addr, 64 / sizeof(u32));
+
+ /* Set PBM latency timer to 64 PCI clocks. */
+ addr = schizo_pci_config_mkaddr(pbm, pbm->pci_first_busno,
+ 0, PCI_LATENCY_TIMER);
+ pci_config_write8(addr, 64);
+}
+
+static void __init pbm_scan_bus(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm)
+{
+ struct pcidev_cookie *cookie = kmalloc(sizeof(*cookie), GFP_KERNEL);
+
+ if (!cookie) {
+ prom_printf("%s: Critical allocation failure.\n", pbm->name);
+ prom_halt();
+ }
+
+ /* All we care about is the PBM. */
+ memset(cookie, 0, sizeof(*cookie));
+ cookie->pbm = pbm;
+
+ pbm->pci_bus = pci_scan_bus(pbm->pci_first_busno,
+ p->pci_ops,
+ pbm);
+ pci_fixup_host_bridge_self(pbm->pci_bus);
+ pbm->pci_bus->self->sysdata = cookie;
+
+ pci_fill_in_pbm_cookies(pbm->pci_bus, pbm, pbm->prom_node);
+ pci_record_assignments(pbm, pbm->pci_bus);
+ pci_assign_unassigned(pbm, pbm->pci_bus);
+ pci_fixup_irq(pbm, pbm->pci_bus);
+ pci_determine_66mhz_disposition(pbm, pbm->pci_bus);
+ pci_setup_busmastering(pbm, pbm->pci_bus);
+}
+
+static void __init __schizo_scan_bus(struct pci_controller_info *p,
+ int chip_type)
+{
+ if (!p->pbm_B.prom_node || !p->pbm_A.prom_node) {
+ printk("PCI: Only one PCI bus module of controller found.\n");
+ printk("PCI: Ignoring entire controller.\n");
+ return;
+ }
+
+ pbm_config_busmastering(&p->pbm_B);
+ p->pbm_B.is_66mhz_capable =
+ prom_getbool(p->pbm_B.prom_node, "66mhz-capable");
+ pbm_config_busmastering(&p->pbm_A);
+ p->pbm_A.is_66mhz_capable =
+ prom_getbool(p->pbm_A.prom_node, "66mhz-capable");
+ pbm_scan_bus(p, &p->pbm_B);
+ pbm_scan_bus(p, &p->pbm_A);
+
+ /* After the PCI bus scan is complete, we can register
+ * the error interrupt handlers.
+ */
+ if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
+ tomatillo_register_error_handlers(p);
+ else
+ schizo_register_error_handlers(p);
+}
+
+static void __init schizo_scan_bus(struct pci_controller_info *p)
+{
+ __schizo_scan_bus(p, PBM_CHIP_TYPE_SCHIZO);
+}
+
+static void __init tomatillo_scan_bus(struct pci_controller_info *p)
+{
+ __schizo_scan_bus(p, PBM_CHIP_TYPE_TOMATILLO);
+}
+
+static void __init schizo_base_address_update(struct pci_dev *pdev, int resource)
+{
+ struct pcidev_cookie *pcp = pdev->sysdata;
+ struct pci_pbm_info *pbm = pcp->pbm;
+ struct resource *res, *root;
+ u32 reg;
+ int where, size, is_64bit;
+
+ res = &pdev->resource[resource];
+ if (resource < 6) {
+ where = PCI_BASE_ADDRESS_0 + (resource * 4);
+ } else if (resource == PCI_ROM_RESOURCE) {
+ where = pdev->rom_base_reg;
+ } else {
+ /* Somebody might have asked allocation of a non-standard resource */
+ return;
+ }
+
+ is_64bit = 0;
+ if (res->flags & IORESOURCE_IO)
+ root = &pbm->io_space;
+ else {
+ root = &pbm->mem_space;
+ if ((res->flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK)
+ == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ is_64bit = 1;
+ }
+
+ size = res->end - res->start;
+ pci_read_config_dword(pdev, where, &reg);
+ reg = ((reg & size) |
+ (((u32)(res->start - root->start)) & ~size));
+ if (resource == PCI_ROM_RESOURCE) {
+ reg |= PCI_ROM_ADDRESS_ENABLE;
+ res->flags |= IORESOURCE_ROM_ENABLE;
+ }
+ pci_write_config_dword(pdev, where, reg);
+
+ /* This knows that the upper 32-bits of the address
+ * must be zero. Our PCI common layer enforces this.
+ */
+ if (is_64bit)
+ pci_write_config_dword(pdev, where + 4, 0);
+}
+
+static void __init schizo_resource_adjust(struct pci_dev *pdev,
+ struct resource *res,
+ struct resource *root)
+{
+ res->start += root->start;
+ res->end += root->start;
+}
+
+/* Use ranges property to determine where PCI MEM, I/O, and Config
+ * space are for this PCI bus module.
+ */
+static void schizo_determine_mem_io_space(struct pci_pbm_info *pbm)
+{
+ int i, saw_cfg, saw_mem, saw_io;
+
+ saw_cfg = saw_mem = saw_io = 0;
+ for (i = 0; i < pbm->num_pbm_ranges; i++) {
+ struct linux_prom_pci_ranges *pr = &pbm->pbm_ranges[i];
+ unsigned long a;
+ int type;
+
+ type = (pr->child_phys_hi >> 24) & 0x3;
+ a = (((unsigned long)pr->parent_phys_hi << 32UL) |
+ ((unsigned long)pr->parent_phys_lo << 0UL));
+
+ switch (type) {
+ case 0:
+ /* PCI config space, 16MB */
+ pbm->config_space = a;
+ saw_cfg = 1;
+ break;
+
+ case 1:
+ /* 16-bit IO space, 16MB */
+ pbm->io_space.start = a;
+ pbm->io_space.end = a + ((16UL*1024UL*1024UL) - 1UL);
+ pbm->io_space.flags = IORESOURCE_IO;
+ saw_io = 1;
+ break;
+
+ case 2:
+ /* 32-bit MEM space, 2GB */
+ pbm->mem_space.start = a;
+ pbm->mem_space.end = a + (0x80000000UL - 1UL);
+ pbm->mem_space.flags = IORESOURCE_MEM;
+ saw_mem = 1;
+ break;
+
+ default:
+ break;
+ };
+ }
+
+ if (!saw_cfg || !saw_io || !saw_mem) {
+ prom_printf("%s: Fatal error, missing %s PBM range.\n",
+ pbm->name,
+ ((!saw_cfg ?
+ "CFG" :
+ (!saw_io ?
+ "IO" : "MEM"))));
+ prom_halt();
+ }
+
+ printk("%s: PCI CFG[%lx] IO[%lx] MEM[%lx]\n",
+ pbm->name,
+ pbm->config_space,
+ pbm->io_space.start,
+ pbm->mem_space.start);
+}
+
+static void __init pbm_register_toplevel_resources(struct pci_controller_info *p,
+ struct pci_pbm_info *pbm)
+{
+ pbm->io_space.name = pbm->mem_space.name = pbm->name;
+
+ request_resource(&ioport_resource, &pbm->io_space);
+ request_resource(&iomem_resource, &pbm->mem_space);
+ pci_register_legacy_regions(&pbm->io_space,
+ &pbm->mem_space);
+}
+
+#define SCHIZO_STRBUF_CONTROL (0x02800UL)
+#define SCHIZO_STRBUF_FLUSH (0x02808UL)
+#define SCHIZO_STRBUF_FSYNC (0x02810UL)
+#define SCHIZO_STRBUF_CTXFLUSH (0x02818UL)
+#define SCHIZO_STRBUF_CTXMATCH (0x10000UL)
+
+static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
+{
+ unsigned long base = pbm->pbm_regs;
+ u64 control;
+
+ if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
+ /* TOMATILLO lacks streaming cache. */
+ return;
+ }
+
+ /* SCHIZO has context flushing. */
+ pbm->stc.strbuf_control = base + SCHIZO_STRBUF_CONTROL;
+ pbm->stc.strbuf_pflush = base + SCHIZO_STRBUF_FLUSH;
+ pbm->stc.strbuf_fsync = base + SCHIZO_STRBUF_FSYNC;
+ pbm->stc.strbuf_ctxflush = base + SCHIZO_STRBUF_CTXFLUSH;
+ pbm->stc.strbuf_ctxmatch_base = base + SCHIZO_STRBUF_CTXMATCH;
+
+ pbm->stc.strbuf_flushflag = (volatile unsigned long *)
+ ((((unsigned long)&pbm->stc.__flushflag_buf[0])
+ + 63UL)
+ & ~63UL);
+ pbm->stc.strbuf_flushflag_pa = (unsigned long)
+ __pa(pbm->stc.strbuf_flushflag);
+
+ /* Turn off LRU locking and diag mode, enable the
+ * streaming buffer and leave the rerun-disable
+ * setting however OBP set it.
+ */
+ control = schizo_read(pbm->stc.strbuf_control);
+ control &= ~(SCHIZO_STRBUF_CTRL_LPTR |
+ SCHIZO_STRBUF_CTRL_LENAB |
+ SCHIZO_STRBUF_CTRL_DENAB);
+ control |= SCHIZO_STRBUF_CTRL_ENAB;
+ schizo_write(pbm->stc.strbuf_control, control);
+
+ pbm->stc.strbuf_enabled = 1;
+}
+
+#define SCHIZO_IOMMU_CONTROL (0x00200UL)
+#define SCHIZO_IOMMU_TSBBASE (0x00208UL)
+#define SCHIZO_IOMMU_FLUSH (0x00210UL)
+#define SCHIZO_IOMMU_CTXFLUSH (0x00218UL)
+
+static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
+{
+ struct pci_iommu *iommu = pbm->iommu;
+ unsigned long tsbbase, i, tagbase, database, order;
+ u32 vdma[2], dma_mask;
+ u64 control;
+ int err, tsbsize;
+
+ err = prom_getproperty(pbm->prom_node, "virtual-dma",
+ (char *)&vdma[0], sizeof(vdma));
+ if (err == 0 || err == -1) {
+ /* No property, use default values. */
+ vdma[0] = 0xc0000000;
+ vdma[1] = 0x40000000;
+ }
+
+ dma_mask = vdma[0];
+ switch (vdma[1]) {
+ case 0x20000000:
+ dma_mask |= 0x1fffffff;
+ tsbsize = 64;
+ break;
+
+ case 0x40000000:
+ dma_mask |= 0x3fffffff;
+ tsbsize = 128;
+ break;
+
+ case 0x80000000:
+ dma_mask |= 0x7fffffff;
+ tsbsize = 128;
+ break;
+
+ default:
+ prom_printf("SCHIZO: strange virtual-dma size.\n");
+ prom_halt();
+ };
+
+ /* Setup initial software IOMMU state. */
+ spin_lock_init(&iommu->lock);
+ iommu->iommu_cur_ctx = 0;
+
+ /* Register addresses, SCHIZO has iommu ctx flushing. */
+ iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
+ iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
+ iommu->iommu_flush = pbm->pbm_regs + SCHIZO_IOMMU_FLUSH;
+ iommu->iommu_ctxflush = pbm->pbm_regs + SCHIZO_IOMMU_CTXFLUSH;
+
+ /* We use the main control/status register of SCHIZO as the write
+ * completion register.
+ */
+ iommu->write_complete_reg = pbm->controller_regs + 0x10000UL;
+
+ /*
+ * Invalidate TLB Entries.
+ */
+ control = schizo_read(iommu->iommu_control);
+ control |= SCHIZO_IOMMU_CTRL_DENAB;
+ schizo_write(iommu->iommu_control, control);
+
+ tagbase = SCHIZO_IOMMU_TAG, database = SCHIZO_IOMMU_DATA;
+
+ for(i = 0; i < 16; i++) {
+ schizo_write(pbm->pbm_regs + tagbase + (i * 8UL), 0);
+ schizo_write(pbm->pbm_regs + database + (i * 8UL), 0);
+ }
+
+ /* Leave diag mode enabled for full-flushing done
+ * in pci_iommu.c
+ */
+
+ iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
+ if (!iommu->dummy_page) {
+ prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
+ prom_halt();
+ }
+ memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
+ iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
+
+ /* Using assumed page size 8K with 128K entries we need 1MB iommu page
+ * table (128K ioptes * 8 bytes per iopte). This is
+ * page order 7 on UltraSparc.
+ */
+ order = get_order(tsbsize * 8 * 1024);
+ tsbbase = __get_free_pages(GFP_KERNEL, order);
+ if (!tsbbase) {
+ prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
+ prom_halt();
+ }
+
+ iommu->page_table = (iopte_t *)tsbbase;
+ iommu->page_table_map_base = vdma[0];
+ iommu->dma_addr_mask = dma_mask;
+ pci_iommu_table_init(iommu, PAGE_SIZE << order);
+
+ switch (tsbsize) {
+ case 64:
+ iommu->page_table_sz_bits = 16;
+ break;
+
+ case 128:
+ iommu->page_table_sz_bits = 17;
+ break;
+
+ default:
+ prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
+ prom_halt();
+ break;
+ };
+
+ /* We start with no consistent mappings. */
+ iommu->lowest_consistent_map =
+ 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
+
+ for (i = 0; i < PBM_NCLUSTERS; i++) {
+ iommu->alloc_info[i].flush = 0;
+ iommu->alloc_info[i].next = 0;
+ }
+
+ schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
+
+ control = schizo_read(iommu->iommu_control);
+ control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
+ switch (tsbsize) {
+ case 64:
+ control |= SCHIZO_IOMMU_TSBSZ_64K;
+ break;
+ case 128:
+ control |= SCHIZO_IOMMU_TSBSZ_128K;
+ break;
+ };
+
+ control |= SCHIZO_IOMMU_CTRL_ENAB;
+ schizo_write(iommu->iommu_control, control);
+}
+
+#define SCHIZO_PCI_IRQ_RETRY (0x1a00UL)
+#define SCHIZO_IRQ_RETRY_INF 0xffUL
+
+#define SCHIZO_PCI_DIAG (0x2020UL)
+#define SCHIZO_PCIDIAG_D_BADECC (1UL << 10UL) /* Disable BAD ECC errors (Schizo) */
+#define SCHIZO_PCIDIAG_D_BYPASS (1UL << 9UL) /* Disable MMU bypass mode (Schizo/Tomatillo) */
+#define SCHIZO_PCIDIAG_D_TTO (1UL << 8UL) /* Disable TTO errors (Schizo/Tomatillo) */
+#define SCHIZO_PCIDIAG_D_RTRYARB (1UL << 7UL) /* Disable retry arbitration (Schizo) */
+#define SCHIZO_PCIDIAG_D_RETRY (1UL << 6UL) /* Disable retry limit (Schizo/Tomatillo) */
+#define SCHIZO_PCIDIAG_D_INTSYNC (1UL << 5UL) /* Disable interrupt/DMA synch (Schizo/Tomatillo) */
+#define SCHIZO_PCIDIAG_I_DMA_PARITY (1UL << 3UL) /* Invert DMA parity (Schizo/Tomatillo) */
+#define SCHIZO_PCIDIAG_I_PIOD_PARITY (1UL << 2UL) /* Invert PIO data parity (Schizo/Tomatillo) */
+#define SCHIZO_PCIDIAG_I_PIOA_PARITY (1UL << 1UL) /* Invert PIO address parity (Schizo/Tomatillo) */
+
+#define TOMATILLO_PCI_IOC_CSR (0x2248UL)
+#define TOMATILLO_IOC_PART_WPENAB 0x0000000000080000UL
+#define TOMATILLO_IOC_RDMULT_PENAB 0x0000000000040000UL
+#define TOMATILLO_IOC_RDONE_PENAB 0x0000000000020000UL
+#define TOMATILLO_IOC_RDLINE_PENAB 0x0000000000010000UL
+#define TOMATILLO_IOC_RDMULT_PLEN 0x000000000000c000UL
+#define TOMATILLO_IOC_RDMULT_PLEN_SHIFT 14UL
+#define TOMATILLO_IOC_RDONE_PLEN 0x0000000000003000UL
+#define TOMATILLO_IOC_RDONE_PLEN_SHIFT 12UL
+#define TOMATILLO_IOC_RDLINE_PLEN 0x0000000000000c00UL
+#define TOMATILLO_IOC_RDLINE_PLEN_SHIFT 10UL
+#define TOMATILLO_IOC_PREF_OFF 0x00000000000003f8UL
+#define TOMATILLO_IOC_PREF_OFF_SHIFT 3UL
+#define TOMATILLO_IOC_RDMULT_CPENAB 0x0000000000000004UL
+#define TOMATILLO_IOC_RDONE_CPENAB 0x0000000000000002UL
+#define TOMATILLO_IOC_RDLINE_CPENAB 0x0000000000000001UL
+
+#define TOMATILLO_PCI_IOC_TDIAG (0x2250UL)
+#define TOMATILLO_PCI_IOC_DDIAG (0x2290UL)
+
+static void __init schizo_pbm_hw_init(struct pci_pbm_info *pbm)
+{
+ u64 tmp;
+
+ /* Set IRQ retry to infinity. */
+ schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY,
+ SCHIZO_IRQ_RETRY_INF);
+
+ /* Enable arbiter for all PCI slots. Also, disable PCI interval
+ * timer so that DTO (Discard TimeOuts) are not reported because
+ * some Schizo revisions report them erroneously.
+ */
+ tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL);
+ if (pbm->chip_type == PBM_CHIP_TYPE_SCHIZO_PLUS &&
+ pbm->chip_version == 0x5 &&
+ pbm->chip_revision == 0x1)
+ tmp |= 0x0f;
+ else
+ tmp |= 0xff;
+
+ tmp &= ~SCHIZO_PCICTRL_PTO;
+ if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
+ pbm->chip_version >= 0x2)
+ tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
+ else
+ tmp |= 0x1UL << SCHIZO_PCICTRL_PTO_SHIFT;
+
+ if (!prom_getbool(pbm->prom_node, "no-bus-parking"))
+ tmp |= SCHIZO_PCICTRL_PARK;
+
+ if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
+ pbm->chip_version <= 0x1)
+ tmp |= (1UL << 61);
+ else
+ tmp &= ~(1UL << 61);
+
+ if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
+ tmp |= (SCHIZO_PCICTRL_MRM_PREF |
+ SCHIZO_PCICTRL_RDO_PREF |
+ SCHIZO_PCICTRL_RDL_PREF);
+
+ schizo_write(pbm->pbm_regs + SCHIZO_PCI_CTRL, tmp);
+
+ tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_DIAG);
+ tmp &= ~(SCHIZO_PCIDIAG_D_RTRYARB |
+ SCHIZO_PCIDIAG_D_RETRY |
+ SCHIZO_PCIDIAG_D_INTSYNC);
+ schizo_write(pbm->pbm_regs + SCHIZO_PCI_DIAG, tmp);
+
+ if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
+ /* Clear prefetch lengths to workaround a bug in
+ * Jalapeno...
+ */
+ tmp = (TOMATILLO_IOC_PART_WPENAB |
+ (1 << TOMATILLO_IOC_PREF_OFF_SHIFT) |
+ TOMATILLO_IOC_RDMULT_CPENAB |
+ TOMATILLO_IOC_RDONE_CPENAB |
+ TOMATILLO_IOC_RDLINE_CPENAB);
+
+ schizo_write(pbm->pbm_regs + TOMATILLO_PCI_IOC_CSR,
+ tmp);
+ }
+}
+
+static void __init schizo_pbm_init(struct pci_controller_info *p,
+ int prom_node, u32 portid,
+ int chip_type)
+{
+ struct linux_prom64_registers pr_regs[4];
+ unsigned int busrange[2];
+ struct pci_pbm_info *pbm;
+ const char *chipset_name;
+ u32 ino_bitmap[2];
+ int is_pbm_a;
+ int err;
+
+ switch (chip_type) {
+ case PBM_CHIP_TYPE_TOMATILLO:
+ chipset_name = "TOMATILLO";
+ break;
+
+ case PBM_CHIP_TYPE_SCHIZO_PLUS:
+ chipset_name = "SCHIZO+";
+ break;
+
+ case PBM_CHIP_TYPE_SCHIZO:
+ default:
+ chipset_name = "SCHIZO";
+ break;
+ };
+
+ /* For SCHIZO, three OBP regs:
+ * 1) PBM controller regs
+ * 2) Schizo front-end controller regs (same for both PBMs)
+ * 3) PBM PCI config space
+ *
+ * For TOMATILLO, four OBP regs:
+ * 1) PBM controller regs
+ * 2) Tomatillo front-end controller regs
+ * 3) PBM PCI config space
+ * 4) Ichip regs
+ */
+ err = prom_getproperty(prom_node, "reg",
+ (char *)&pr_regs[0],
+ sizeof(pr_regs));
+ if (err == 0 || err == -1) {
+ prom_printf("%s: Fatal error, no reg property.\n",
+ chipset_name);
+ prom_halt();
+ }
+
+ is_pbm_a = ((pr_regs[0].phys_addr & 0x00700000) == 0x00600000);
+
+ if (is_pbm_a)
+ pbm = &p->pbm_A;
+ else
+ pbm = &p->pbm_B;
+
+ pbm->portid = portid;
+ pbm->parent = p;
+ pbm->prom_node = prom_node;
+ pbm->pci_first_slot = 1;
+
+ pbm->chip_type = chip_type;
+ pbm->chip_version =
+ prom_getintdefault(prom_node, "version#", 0);
+ pbm->chip_revision =
+ prom_getintdefault(prom_node, "module-revision#", 0);
+
+ pbm->pbm_regs = pr_regs[0].phys_addr;
+ pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL;
+
+ sprintf(pbm->name,
+ (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
+ "TOMATILLO%d PBM%c" :
+ "SCHIZO%d PBM%c"),
+ p->index,
+ (pbm == &p->pbm_A ? 'A' : 'B'));
+
+ printk("%s: ver[%x:%x], portid %x, "
+ "cregs[%lx] pregs[%lx]\n",
+ pbm->name,
+ pbm->chip_version, pbm->chip_revision,
+ pbm->portid,
+ pbm->controller_regs,
+ pbm->pbm_regs);
+
+ schizo_pbm_hw_init(pbm);
+
+ prom_getstring(prom_node, "name",
+ pbm->prom_name,
+ sizeof(pbm->prom_name));
+
+ err = prom_getproperty(prom_node, "ranges",
+ (char *) pbm->pbm_ranges,
+ sizeof(pbm->pbm_ranges));
+ if (err == 0 || err == -1) {
+ prom_printf("%s: Fatal error, no ranges property.\n",
+ pbm->name);
+ prom_halt();
+ }
+
+ pbm->num_pbm_ranges =
+ (err / sizeof(struct linux_prom_pci_ranges));
+
+ schizo_determine_mem_io_space(pbm);
+ pbm_register_toplevel_resources(p, pbm);
+
+ err = prom_getproperty(prom_node, "interrupt-map",
+ (char *)pbm->pbm_intmap,
+ sizeof(pbm->pbm_intmap));
+ if (err != -1) {
+ pbm->num_pbm_intmap = (err / sizeof(struct linux_prom_pci_intmap));
+ err = prom_getproperty(prom_node, "interrupt-map-mask",
+ (char *)&pbm->pbm_intmask,
+ sizeof(pbm->pbm_intmask));
+ if (err == -1) {
+ prom_printf("%s: Fatal error, no "
+ "interrupt-map-mask.\n", pbm->name);
+ prom_halt();
+ }
+ } else {
+ pbm->num_pbm_intmap = 0;
+ memset(&pbm->pbm_intmask, 0, sizeof(pbm->pbm_intmask));
+ }
+
+ err = prom_getproperty(prom_node, "ino-bitmap",
+ (char *) &ino_bitmap[0],
+ sizeof(ino_bitmap));
+ if (err == 0 || err == -1) {
+ prom_printf("%s: Fatal error, no ino-bitmap.\n", pbm->name);
+ prom_halt();
+ }
+ pbm->ino_bitmap = (((u64)ino_bitmap[1] << 32UL) |
+ ((u64)ino_bitmap[0] << 0UL));
+
+ err = prom_getproperty(prom_node, "bus-range",
+ (char *)&busrange[0],
+ sizeof(busrange));
+ if (err == 0 || err == -1) {
+ prom_printf("%s: Fatal error, no bus-range.\n", pbm->name);
+ prom_halt();
+ }
+ pbm->pci_first_busno = busrange[0];
+ pbm->pci_last_busno = busrange[1];
+
+ schizo_pbm_iommu_init(pbm);
+ schizo_pbm_strbuf_init(pbm);
+}
+
+static inline int portid_compare(u32 x, u32 y, int chip_type)
+{
+ if (chip_type == PBM_CHIP_TYPE_TOMATILLO) {
+ if (x == (y ^ 1))
+ return 1;
+ return 0;
+ }
+ return (x == y);
+}
+
+static void __init __schizo_init(int node, char *model_name, int chip_type)
+{
+ struct pci_controller_info *p;
+ struct pci_iommu *iommu;
+ int is_pbm_a;
+ u32 portid;
+
+ portid = prom_getintdefault(node, "portid", 0xff);
+
+ for(p = pci_controller_root; p; p = p->next) {
+ struct pci_pbm_info *pbm;
+
+ if (p->pbm_A.prom_node && p->pbm_B.prom_node)
+ continue;
+
+ pbm = (p->pbm_A.prom_node ?
+ &p->pbm_A :
+ &p->pbm_B);
+
+ if (portid_compare(pbm->portid, portid, chip_type)) {
+ is_pbm_a = (p->pbm_A.prom_node == 0);
+ schizo_pbm_init(p, node, portid, chip_type);
+ return;
+ }
+ }
+
+ p = kmalloc(sizeof(struct pci_controller_info), GFP_ATOMIC);
+ if (!p) {
+ prom_printf("SCHIZO: Fatal memory allocation error.\n");
+ prom_halt();
+ }
+ memset(p, 0, sizeof(*p));
+
+ iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ if (!iommu) {
+ prom_printf("SCHIZO: Fatal memory allocation error.\n");
+ prom_halt();
+ }
+ memset(iommu, 0, sizeof(*iommu));
+ p->pbm_A.iommu = iommu;
+
+ iommu = kmalloc(sizeof(struct pci_iommu), GFP_ATOMIC);
+ if (!iommu) {
+ prom_printf("SCHIZO: Fatal memory allocation error.\n");
+ prom_halt();
+ }
+ memset(iommu, 0, sizeof(*iommu));
+ p->pbm_B.iommu = iommu;
+
+ p->next = pci_controller_root;
+ pci_controller_root = p;
+
+ p->index = pci_num_controllers++;
+ p->pbms_same_domain = 0;
+ p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
+ tomatillo_scan_bus :
+ schizo_scan_bus);
+ p->irq_build = schizo_irq_build;
+ p->base_address_update = schizo_base_address_update;
+ p->resource_adjust = schizo_resource_adjust;
+ p->pci_ops = &schizo_ops;
+
+ /* Like PSYCHO we have a 2GB aligned area for memory space. */
+ pci_memspace_mask = 0x7fffffffUL;
+
+ schizo_pbm_init(p, node, portid, chip_type);
+}
+
+void __init schizo_init(int node, char *model_name)
+{
+ __schizo_init(node, model_name, PBM_CHIP_TYPE_SCHIZO);
+}
+
+void __init schizo_plus_init(int node, char *model_name)
+{
+ __schizo_init(node, model_name, PBM_CHIP_TYPE_SCHIZO_PLUS);
+}
+
+void __init tomatillo_init(int node, char *model_name)
+{
+ __schizo_init(node, model_name, PBM_CHIP_TYPE_TOMATILLO);
+}
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
new file mode 100644
index 00000000000..52f14e399b1
--- /dev/null
+++ b/arch/sparc64/kernel/power.c
@@ -0,0 +1,150 @@
+/* $Id: power.c,v 1.10 2001/12/11 01:57:16 davem Exp $
+ * power.c: Power management driver.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/system.h>
+#include <asm/ebus.h>
+#include <asm/auxio.h>
+
+#define __KERNEL_SYSCALLS__
+#include <linux/unistd.h>
+
+/*
+ * sysctl - toggle power-off restriction for serial console
+ * systems in machine_power_off()
+ */
+int scons_pwroff = 1;
+
+#ifdef CONFIG_PCI
+static void __iomem *power_reg;
+
+static DECLARE_WAIT_QUEUE_HEAD(powerd_wait);
+static int button_pressed;
+
+static irqreturn_t power_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ if (button_pressed == 0) {
+ button_pressed = 1;
+ wake_up(&powerd_wait);
+ }
+
+ /* FIXME: Check registers for status... */
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI */
+
+extern void machine_halt(void);
+extern void machine_alt_power_off(void);
+static void (*poweroff_method)(void) = machine_alt_power_off;
+
+void machine_power_off(void)
+{
+ if (!serial_console || scons_pwroff) {
+#ifdef CONFIG_PCI
+ if (power_reg) {
+ /* Both register bits seem to have the
+ * same effect, so until I figure out
+ * what the difference is...
+ */
+ writel(AUXIO_PCIO_CPWR_OFF | AUXIO_PCIO_SPWR_OFF, power_reg);
+ } else
+#endif /* CONFIG_PCI */
+ if (poweroff_method != NULL) {
+ poweroff_method();
+ /* not reached */
+ }
+ }
+ machine_halt();
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+#ifdef CONFIG_PCI
+static int powerd(void *__unused)
+{
+ static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
+ char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
+ DECLARE_WAITQUEUE(wait, current);
+
+ daemonize("powerd");
+
+ add_wait_queue(&powerd_wait, &wait);
+again:
+ for (;;) {
+ set_task_state(current, TASK_INTERRUPTIBLE);
+ if (button_pressed)
+ break;
+ flush_signals(current);
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&powerd_wait, &wait);
+
+ /* Ok, down we go... */
+ button_pressed = 0;
+ if (execve("/sbin/shutdown", argv, envp) < 0) {
+ printk("powerd: shutdown execution failed\n");
+ add_wait_queue(&powerd_wait, &wait);
+ goto again;
+ }
+ return 0;
+}
+
+static int __init has_button_interrupt(struct linux_ebus_device *edev)
+{
+ if (edev->irqs[0] == PCI_IRQ_NONE)
+ return 0;
+ if (!prom_node_has_property(edev->prom_node, "button"))
+ return 0;
+
+ return 1;
+}
+
+void __init power_init(void)
+{
+ struct linux_ebus *ebus;
+ struct linux_ebus_device *edev;
+ static int invoked;
+
+ if (invoked)
+ return;
+ invoked = 1;
+
+ for_each_ebus(ebus) {
+ for_each_ebusdev(edev, ebus) {
+ if (!strcmp(edev->prom_name, "power"))
+ goto found;
+ }
+ }
+ return;
+
+found:
+ power_reg = ioremap(edev->resource[0].start, 0x4);
+ printk("power: Control reg at %p ... ", power_reg);
+ poweroff_method = machine_halt; /* able to use the standard halt */
+ if (has_button_interrupt(edev)) {
+ if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
+ printk("Failed to start power daemon.\n");
+ return;
+ }
+ printk("powerd running.\n");
+
+ if (request_irq(edev->irqs[0],
+ power_handler, SA_SHIRQ, "power", NULL) < 0)
+ printk("power: Error, cannot register IRQ handler.\n");
+ } else {
+ printk("not using powerd.\n");
+ }
+}
+#endif /* CONFIG_PCI */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
new file mode 100644
index 00000000000..26d3ec41da1
--- /dev/null
+++ b/arch/sparc64/kernel/process.c
@@ -0,0 +1,869 @@
+/* $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $
+ * arch/sparc64/kernel/process.c
+ *
+ * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <stdarg.h>
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/config.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/init.h>
+
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/pstate.h>
+#include <asm/elf.h>
+#include <asm/fpumacro.h>
+#include <asm/head.h>
+#include <asm/cpudata.h>
+#include <asm/unistd.h>
+
+/* #define VERBOSE_SHOWREGS */
+
+/*
+ * Nothing special yet...
+ */
+void default_idle(void)
+{
+}
+
+#ifndef CONFIG_SMP
+
+/*
+ * the idle loop on a Sparc... ;)
+ */
+void cpu_idle(void)
+{
+ if (current->pid != 0)
+ return;
+
+ /* endless idle loop with no priority at all */
+ for (;;) {
+ /* If current->work.need_resched is zero we should really
+ * setup for a system wakup event and execute a shutdown
+ * instruction.
+ *
+ * But this requires writing back the contents of the
+ * L2 cache etc. so implement this later. -DaveM
+ */
+ while (!need_resched())
+ barrier();
+
+ schedule();
+ check_pgt_cache();
+ }
+ return;
+}
+
+#else
+
+/*
+ * the idle loop on a UltraMultiPenguin...
+ */
+#define idle_me_harder() (cpu_data(smp_processor_id()).idle_volume += 1)
+#define unidle_me() (cpu_data(smp_processor_id()).idle_volume = 0)
+void cpu_idle(void)
+{
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ while(1) {
+ if (need_resched()) {
+ unidle_me();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ schedule();
+ set_thread_flag(TIF_POLLING_NRFLAG);
+ check_pgt_cache();
+ }
+ idle_me_harder();
+
+ /* The store ordering is so that IRQ handlers on
+ * other cpus see our increasing idleness for the buddy
+ * redistribution algorithm. -DaveM
+ */
+ membar("#StoreStore | #StoreLoad");
+ }
+}
+
+#endif
+
+extern char reboot_command [];
+
+extern void (*prom_palette)(int);
+extern void (*prom_keyboard)(void);
+
+void machine_halt(void)
+{
+ if (!serial_console && prom_palette)
+ prom_palette (1);
+ if (prom_keyboard)
+ prom_keyboard();
+ prom_halt();
+ panic("Halt failed!");
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_alt_power_off(void)
+{
+ if (!serial_console && prom_palette)
+ prom_palette(1);
+ if (prom_keyboard)
+ prom_keyboard();
+ prom_halt_power_off();
+ panic("Power-off failed!");
+}
+
+void machine_restart(char * cmd)
+{
+ char *p;
+
+ p = strchr (reboot_command, '\n');
+ if (p) *p = 0;
+ if (!serial_console && prom_palette)
+ prom_palette (1);
+ if (prom_keyboard)
+ prom_keyboard();
+ if (cmd)
+ prom_reboot(cmd);
+ if (*reboot_command)
+ prom_reboot(reboot_command);
+ prom_reboot("");
+ panic("Reboot failed!");
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+static void show_regwindow32(struct pt_regs *regs)
+{
+ struct reg_window32 __user *rw;
+ struct reg_window32 r_w;
+ mm_segment_t old_fs;
+
+ __asm__ __volatile__ ("flushw");
+ rw = compat_ptr((unsigned)regs->u_regs[14]);
+ old_fs = get_fs();
+ set_fs (USER_DS);
+ if (copy_from_user (&r_w, rw, sizeof(r_w))) {
+ set_fs (old_fs);
+ return;
+ }
+
+ set_fs (old_fs);
+ printk("l0: %08x l1: %08x l2: %08x l3: %08x "
+ "l4: %08x l5: %08x l6: %08x l7: %08x\n",
+ r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
+ r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
+ printk("i0: %08x i1: %08x i2: %08x i3: %08x "
+ "i4: %08x i5: %08x i6: %08x i7: %08x\n",
+ r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
+ r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
+}
+
+static void show_regwindow(struct pt_regs *regs)
+{
+ struct reg_window __user *rw;
+ struct reg_window *rwk;
+ struct reg_window r_w;
+ mm_segment_t old_fs;
+
+ if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
+ __asm__ __volatile__ ("flushw");
+ rw = (struct reg_window __user *)
+ (regs->u_regs[14] + STACK_BIAS);
+ rwk = (struct reg_window *)
+ (regs->u_regs[14] + STACK_BIAS);
+ if (!(regs->tstate & TSTATE_PRIV)) {
+ old_fs = get_fs();
+ set_fs (USER_DS);
+ if (copy_from_user (&r_w, rw, sizeof(r_w))) {
+ set_fs (old_fs);
+ return;
+ }
+ rwk = &r_w;
+ set_fs (old_fs);
+ }
+ } else {
+ show_regwindow32(regs);
+ return;
+ }
+ printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
+ rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
+ printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
+ rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
+ printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
+ rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+ if (regs->tstate & TSTATE_PRIV)
+ print_symbol("I7: <%s>\n", rwk->ins[7]);
+}
+
+void show_stackframe(struct sparc_stackf *sf)
+{
+ unsigned long size;
+ unsigned long *stk;
+ int i;
+
+ printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
+ "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
+ sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
+ sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
+ printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
+ "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
+ sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
+ sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
+ printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
+ "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
+ (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
+ sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
+ sf->xxargs[0]);
+ size = ((unsigned long)sf->fp) - ((unsigned long)sf);
+ size -= STACKFRAME_SZ;
+ stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
+ i = 0;
+ do {
+ printk("s%d: %016lx\n", i++, *stk++);
+ } while ((size -= sizeof(unsigned long)));
+}
+
+void show_stackframe32(struct sparc_stackf32 *sf)
+{
+ unsigned long size;
+ unsigned *stk;
+ int i;
+
+ printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
+ sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
+ printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
+ sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
+ printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
+ sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
+ printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
+ sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
+ printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
+ "x3: %08x x4: %08x x5: %08x xx: %08x\n",
+ sf->structptr, sf->xargs[0], sf->xargs[1],
+ sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
+ sf->xxargs[0]);
+ size = ((unsigned long)sf->fp) - ((unsigned long)sf);
+ size -= STACKFRAME32_SZ;
+ stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
+ i = 0;
+ do {
+ printk("s%d: %08x\n", i++, *stk++);
+ } while ((size -= sizeof(unsigned)));
+}
+
+#ifdef CONFIG_SMP
+static DEFINE_SPINLOCK(regdump_lock);
+#endif
+
+void __show_regs(struct pt_regs * regs)
+{
+#ifdef CONFIG_SMP
+ unsigned long flags;
+
+ /* Protect against xcall ipis which might lead to livelock on the lock */
+ __asm__ __volatile__("rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (flags)
+ : "i" (PSTATE_IE));
+ spin_lock(&regdump_lock);
+#endif
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y, print_tainted());
+ print_symbol("TPC: <%s>\n", regs->tpc);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+ printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
+ regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
+ regs->u_regs[7]);
+ printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
+ regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
+ regs->u_regs[11]);
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+ print_symbol("RPC: <%s>\n", regs->u_regs[15]);
+ show_regwindow(regs);
+#ifdef CONFIG_SMP
+ spin_unlock(&regdump_lock);
+ __asm__ __volatile__("wrpr %0, 0, %%pstate"
+ : : "r" (flags));
+#endif
+}
+
+#ifdef VERBOSE_SHOWREGS
+static void idump_from_user (unsigned int *pc)
+{
+ int i;
+ int code;
+
+ if((((unsigned long) pc) & 3))
+ return;
+
+ pc -= 3;
+ for(i = -3; i < 6; i++) {
+ get_user(code, pc);
+ printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
+ pc++;
+ }
+ printk("\n");
+}
+#endif
+
+void show_regs(struct pt_regs *regs)
+{
+#ifdef VERBOSE_SHOWREGS
+ extern long etrap, etraptl1;
+#endif
+ __show_regs(regs);
+#ifdef CONFIG_SMP
+ {
+ extern void smp_report_regs(void);
+
+ smp_report_regs();
+ }
+#endif
+
+#ifdef VERBOSE_SHOWREGS
+ if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
+ regs->u_regs[14] >= (long)current - PAGE_SIZE &&
+ regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
+ printk ("*********parent**********\n");
+ __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
+ idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
+ printk ("*********endpar**********\n");
+ }
+#endif
+}
+
+void show_regs32(struct pt_regs32 *regs)
+{
+ printk("PSR: %08x PC: %08x NPC: %08x Y: %08x %s\n", regs->psr,
+ regs->pc, regs->npc, regs->y, print_tainted());
+ printk("g0: %08x g1: %08x g2: %08x g3: %08x ",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+ printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
+ regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
+ regs->u_regs[7]);
+ printk("o0: %08x o1: %08x o2: %08x o3: %08x ",
+ regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
+ regs->u_regs[11]);
+ printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+}
+
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+ struct thread_info *ti = tsk->thread_info;
+ unsigned long ret = 0xdeadbeefUL;
+
+ if (ti && ti->ksp) {
+ unsigned long *sp;
+ sp = (unsigned long *)(ti->ksp + STACK_BIAS);
+ if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
+ sp[14]) {
+ unsigned long *fp;
+ fp = (unsigned long *)(sp[14] + STACK_BIAS);
+ if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
+ ret = fp[15];
+ }
+ }
+ return ret;
+}
+
+/* Free current thread data structures etc.. */
+void exit_thread(void)
+{
+ struct thread_info *t = current_thread_info();
+
+ if (t->utraps) {
+ if (t->utraps[0] < 2)
+ kfree (t->utraps);
+ else
+ t->utraps[0]--;
+ }
+
+ if (test_and_clear_thread_flag(TIF_PERFCTR)) {
+ t->user_cntd0 = t->user_cntd1 = NULL;
+ t->pcr_reg = 0;
+ write_pcr(0);
+ }
+}
+
+void flush_thread(void)
+{
+ struct thread_info *t = current_thread_info();
+
+ if (t->flags & _TIF_ABI_PENDING)
+ t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
+
+ if (t->task->mm) {
+ unsigned long pgd_cache = 0UL;
+ if (test_thread_flag(TIF_32BIT)) {
+ struct mm_struct *mm = t->task->mm;
+ pgd_t *pgd0 = &mm->pgd[0];
+ pud_t *pud0 = pud_offset(pgd0, 0);
+
+ if (pud_none(*pud0)) {
+ pmd_t *page = pmd_alloc_one(mm, 0);
+ pud_set(pud0, page);
+ }
+ pgd_cache = get_pgd_cache(pgd0);
+ }
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (pgd_cache),
+ "r" (TSB_REG),
+ "i" (ASI_DMMU));
+ }
+ set_thread_wsaved(0);
+
+ /* Turn off performance counters if on. */
+ if (test_and_clear_thread_flag(TIF_PERFCTR)) {
+ t->user_cntd0 = t->user_cntd1 = NULL;
+ t->pcr_reg = 0;
+ write_pcr(0);
+ }
+
+ /* Clear FPU register state. */
+ t->fpsaved[0] = 0;
+
+ if (get_thread_current_ds() != ASI_AIUS)
+ set_fs(USER_DS);
+
+ /* Init new signal delivery disposition. */
+ clear_thread_flag(TIF_NEWSIGNALS);
+}
+
+/* It's a bit more tricky when 64-bit tasks are involved... */
+static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
+{
+ unsigned long fp, distance, rval;
+
+ if (!(test_thread_flag(TIF_32BIT))) {
+ csp += STACK_BIAS;
+ psp += STACK_BIAS;
+ __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
+ fp += STACK_BIAS;
+ } else
+ __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
+
+ /* Now 8-byte align the stack as this is mandatory in the
+ * Sparc ABI due to how register windows work. This hides
+ * the restriction from thread libraries etc. -DaveM
+ */
+ csp &= ~7UL;
+
+ distance = fp - psp;
+ rval = (csp - distance);
+ if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
+ rval = 0;
+ else if (test_thread_flag(TIF_32BIT)) {
+ if (put_user(((u32)csp),
+ &(((struct reg_window32 __user *)rval)->ins[6])))
+ rval = 0;
+ } else {
+ if (put_user(((u64)csp - STACK_BIAS),
+ &(((struct reg_window __user *)rval)->ins[6])))
+ rval = 0;
+ else
+ rval = rval - STACK_BIAS;
+ }
+
+ return rval;
+}
+
+/* Standard stuff. */
+static inline void shift_window_buffer(int first_win, int last_win,
+ struct thread_info *t)
+{
+ int i;
+
+ for (i = first_win; i < last_win; i++) {
+ t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
+ memcpy(&t->reg_window[i], &t->reg_window[i+1],
+ sizeof(struct reg_window));
+ }
+}
+
+void synchronize_user_stack(void)
+{
+ struct thread_info *t = current_thread_info();
+ unsigned long window;
+
+ flush_user_windows();
+ if ((window = get_thread_wsaved()) != 0) {
+ int winsize = sizeof(struct reg_window);
+ int bias = 0;
+
+ if (test_thread_flag(TIF_32BIT))
+ winsize = sizeof(struct reg_window32);
+ else
+ bias = STACK_BIAS;
+
+ window -= 1;
+ do {
+ unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
+ struct reg_window *rwin = &t->reg_window[window];
+
+ if (!copy_to_user((char __user *)sp, rwin, winsize)) {
+ shift_window_buffer(window, get_thread_wsaved() - 1, t);
+ set_thread_wsaved(get_thread_wsaved() - 1);
+ }
+ } while (window--);
+ }
+}
+
+void fault_in_user_windows(void)
+{
+ struct thread_info *t = current_thread_info();
+ unsigned long window;
+ int winsize = sizeof(struct reg_window);
+ int bias = 0;
+
+ if (test_thread_flag(TIF_32BIT))
+ winsize = sizeof(struct reg_window32);
+ else
+ bias = STACK_BIAS;
+
+ flush_user_windows();
+ window = get_thread_wsaved();
+
+ if (window != 0) {
+ window -= 1;
+ do {
+ unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
+ struct reg_window *rwin = &t->reg_window[window];
+
+ if (copy_to_user((char __user *)sp, rwin, winsize))
+ goto barf;
+ } while (window--);
+ }
+ set_thread_wsaved(0);
+ return;
+
+barf:
+ set_thread_wsaved(window + 1);
+ do_exit(SIGILL);
+}
+
+asmlinkage long sparc_do_fork(unsigned long clone_flags,
+ unsigned long stack_start,
+ struct pt_regs *regs,
+ unsigned long stack_size)
+{
+ int __user *parent_tid_ptr, *child_tid_ptr;
+
+#ifdef CONFIG_COMPAT
+ if (test_thread_flag(TIF_32BIT)) {
+ parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
+ child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
+ } else
+#endif
+ {
+ parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
+ child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
+ }
+
+ return do_fork(clone_flags, stack_start,
+ regs, stack_size,
+ parent_tid_ptr, child_tid_ptr);
+}
+
+/* Copy a Sparc thread. The fork() return value conventions
+ * under SunOS are nothing short of bletcherous:
+ * Parent --> %o0 == childs pid, %o1 == 0
+ * Child --> %o0 == parents pid, %o1 == 1
+ */
+int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+ unsigned long unused,
+ struct task_struct *p, struct pt_regs *regs)
+{
+ struct thread_info *t = p->thread_info;
+ char *child_trap_frame;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ p->thread.smp_lock_count = 0;
+ p->thread.smp_lock_pc = 0;
+#endif
+
+ /* Calculate offset to stack_frame & pt_regs */
+ child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
+ memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
+
+ t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
+ _TIF_NEWCHILD |
+ (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
+ t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
+ t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
+ t->fpsaved[0] = 0;
+
+ if (regs->tstate & TSTATE_PRIV) {
+ /* Special case, if we are spawning a kernel thread from
+ * a userspace task (via KMOD, NFS, or similar) we must
+ * disable performance counters in the child because the
+ * address space and protection realm are changing.
+ */
+ if (t->flags & _TIF_PERFCTR) {
+ t->user_cntd0 = t->user_cntd1 = NULL;
+ t->pcr_reg = 0;
+ t->flags &= ~_TIF_PERFCTR;
+ }
+ t->kregs->u_regs[UREG_FP] = t->ksp;
+ t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
+ flush_register_windows();
+ memcpy((void *)(t->ksp + STACK_BIAS),
+ (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
+ sizeof(struct sparc_stackf));
+ t->kregs->u_regs[UREG_G6] = (unsigned long) t;
+ t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
+ } else {
+ if (t->flags & _TIF_32BIT) {
+ sp &= 0x00000000ffffffffUL;
+ regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+ }
+ t->kregs->u_regs[UREG_FP] = sp;
+ t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
+ if (sp != regs->u_regs[UREG_FP]) {
+ unsigned long csp;
+
+ csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
+ if (!csp)
+ return -EFAULT;
+ t->kregs->u_regs[UREG_FP] = csp;
+ }
+ if (t->utraps)
+ t->utraps[0]++;
+ }
+
+ /* Set the return value for the child. */
+ t->kregs->u_regs[UREG_I0] = current->pid;
+ t->kregs->u_regs[UREG_I1] = 1;
+
+ /* Set the second return value for the parent. */
+ regs->u_regs[UREG_I1] = 0;
+
+ if (clone_flags & CLONE_SETTLS)
+ t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
+
+ return 0;
+}
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be free'd until both the parent and the child have exited.
+ */
+pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ long retval;
+
+ /* If the parent runs before fn(arg) is called by the child,
+ * the input registers of this function can be clobbered.
+ * So we stash 'fn' and 'arg' into global registers which
+ * will not be modified by the parent.
+ */
+ __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
+ "mov %5, %%g3\n\t" /* Save ARG into global */
+ "mov %1, %%g1\n\t" /* Clone syscall nr. */
+ "mov %2, %%o0\n\t" /* Clone flags. */
+ "mov 0, %%o1\n\t" /* usp arg == 0 */
+ "t 0x6d\n\t" /* Linux/Sparc clone(). */
+ "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
+ " mov %%o0, %0\n\t"
+ "jmpl %%g2, %%o7\n\t" /* Call the function. */
+ " mov %%g3, %%o0\n\t" /* Set arg in delay. */
+ "mov %3, %%g1\n\t"
+ "t 0x6d\n\t" /* Linux/Sparc exit(). */
+ /* Notreached by child. */
+ "1:" :
+ "=r" (retval) :
+ "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
+ "i" (__NR_exit), "r" (fn), "r" (arg) :
+ "g1", "g2", "g3", "o0", "o1", "memory", "cc");
+ return retval;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+ /* Only should be used for SunOS and ancient a.out
+ * SparcLinux binaries... Not worth implementing.
+ */
+ memset(dump, 0, sizeof(struct user));
+}
+
+typedef struct {
+ union {
+ unsigned int pr_regs[32];
+ unsigned long pr_dregs[16];
+ } pr_fr;
+ unsigned int __unused;
+ unsigned int pr_fsr;
+ unsigned char pr_qcnt;
+ unsigned char pr_q_entrysize;
+ unsigned char pr_en;
+ unsigned int pr_q[64];
+} elf_fpregset_t32;
+
+/*
+ * fill in the fpu structure for a core dump.
+ */
+int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
+{
+ unsigned long *kfpregs = current_thread_info()->fpregs;
+ unsigned long fprs = current_thread_info()->fpsaved[0];
+
+ if (test_thread_flag(TIF_32BIT)) {
+ elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
+
+ if (fprs & FPRS_DL)
+ memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
+ sizeof(unsigned int) * 32);
+ else
+ memset(&fpregs32->pr_fr.pr_regs[0], 0,
+ sizeof(unsigned int) * 32);
+ fpregs32->pr_qcnt = 0;
+ fpregs32->pr_q_entrysize = 8;
+ memset(&fpregs32->pr_q[0], 0,
+ (sizeof(unsigned int) * 64));
+ if (fprs & FPRS_FEF) {
+ fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
+ fpregs32->pr_en = 1;
+ } else {
+ fpregs32->pr_fsr = 0;
+ fpregs32->pr_en = 0;
+ }
+ } else {
+ if(fprs & FPRS_DL)
+ memcpy(&fpregs->pr_regs[0], kfpregs,
+ sizeof(unsigned int) * 32);
+ else
+ memset(&fpregs->pr_regs[0], 0,
+ sizeof(unsigned int) * 32);
+ if(fprs & FPRS_DU)
+ memcpy(&fpregs->pr_regs[16], kfpregs+16,
+ sizeof(unsigned int) * 32);
+ else
+ memset(&fpregs->pr_regs[16], 0,
+ sizeof(unsigned int) * 32);
+ if(fprs & FPRS_FEF) {
+ fpregs->pr_fsr = current_thread_info()->xfsr[0];
+ fpregs->pr_gsr = current_thread_info()->gsr[0];
+ } else {
+ fpregs->pr_fsr = fpregs->pr_gsr = 0;
+ }
+ fpregs->pr_fprs = fprs;
+ }
+ return 1;
+}
+
+/*
+ * sparc_execve() executes a new program after the asm stub has set
+ * things up for us. This should basically do what I want it to.
+ */
+asmlinkage int sparc_execve(struct pt_regs *regs)
+{
+ int error, base = 0;
+ char *filename;
+
+ /* User register window flush is done by entry.S */
+
+ /* Check for indirect call. */
+ if (regs->u_regs[UREG_G1] == 0)
+ base = 1;
+
+ filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename,
+ (char __user * __user *)
+ regs->u_regs[base + UREG_I1],
+ (char __user * __user *)
+ regs->u_regs[base + UREG_I2], regs);
+ putname(filename);
+ if (!error) {
+ fprs_write(0);
+ current_thread_info()->xfsr[0] = 0;
+ current_thread_info()->fpsaved[0] = 0;
+ regs->tstate &= ~TSTATE_PEF;
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ }
+out:
+ return error;
+}
+
+unsigned long get_wchan(struct task_struct *task)
+{
+ unsigned long pc, fp, bias = 0;
+ unsigned long thread_info_base;
+ struct reg_window *rw;
+ unsigned long ret = 0;
+ int count = 0;
+
+ if (!task || task == current ||
+ task->state == TASK_RUNNING)
+ goto out;
+
+ thread_info_base = (unsigned long) task->thread_info;
+ bias = STACK_BIAS;
+ fp = task->thread_info->ksp + bias;
+
+ do {
+ /* Bogus frame pointer? */
+ if (fp < (thread_info_base + sizeof(struct thread_info)) ||
+ fp >= (thread_info_base + THREAD_SIZE))
+ break;
+ rw = (struct reg_window *) fp;
+ pc = rw->ins[7];
+ if (!in_sched_functions(pc)) {
+ ret = pc;
+ goto out;
+ }
+ fp = rw->ins[6] + bias;
+ } while (++count < 16);
+
+out:
+ return ret;
+}
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
new file mode 100644
index 00000000000..1722dc51b0d
--- /dev/null
+++ b/arch/sparc64/kernel/ptrace.c
@@ -0,0 +1,646 @@
+/* ptrace.c: Sparc process tracing support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
+ * and David Mosberger.
+ *
+ * Added Linux support -miguel (weird, eh?, the original code was meant
+ * to emulate SunOS).
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/security.h>
+
+#include <asm/asi.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/psrcompat.h>
+#include <asm/visasm.h>
+#include <asm/spitfire.h>
+
+/* Returning from ptrace is a bit tricky because the syscall return
+ * low level code assumes any value returned which is negative and
+ * is a valid errno will mean setting the condition codes to indicate
+ * an error return. This doesn't work, so we have this hook.
+ */
+static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
+{
+ regs->u_regs[UREG_I0] = error;
+ regs->tstate |= (TSTATE_ICARRY | TSTATE_XCARRY);
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+}
+
+static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
+{
+ regs->u_regs[UREG_I0] = value;
+ regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+}
+
+static inline void
+pt_succ_return_linux(struct pt_regs *regs, unsigned long value, void __user *addr)
+{
+ if (test_thread_flag(TIF_32BIT)) {
+ if (put_user(value, (unsigned int __user *) addr)) {
+ pt_error_return(regs, EFAULT);
+ return;
+ }
+ } else {
+ if (put_user(value, (long __user *) addr)) {
+ pt_error_return(regs, EFAULT);
+ return;
+ }
+ }
+ regs->u_regs[UREG_I0] = 0;
+ regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+}
+
+static void
+pt_os_succ_return (struct pt_regs *regs, unsigned long val, void __user *addr)
+{
+ if (current->personality == PER_SUNOS)
+ pt_succ_return (regs, val);
+ else
+ pt_succ_return_linux (regs, val, addr);
+}
+
+/* #define ALLOW_INIT_TRACING */
+/* #define DEBUG_PTRACE */
+
+#ifdef DEBUG_PTRACE
+char *pt_rq [] = {
+ /* 0 */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
+ /* 4 */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
+ /* 8 */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
+ /* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
+ /* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
+ /* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
+ /* 24 */ "SYSCALL", ""
+};
+#endif
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* nothing to do */
+}
+
+asmlinkage void do_ptrace(struct pt_regs *regs)
+{
+ int request = regs->u_regs[UREG_I0];
+ pid_t pid = regs->u_regs[UREG_I1];
+ unsigned long addr = regs->u_regs[UREG_I2];
+ unsigned long data = regs->u_regs[UREG_I3];
+ unsigned long addr2 = regs->u_regs[UREG_I4];
+ struct task_struct *child;
+ int ret;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ addr &= 0xffffffffUL;
+ data &= 0xffffffffUL;
+ addr2 &= 0xffffffffUL;
+ }
+ lock_kernel();
+#ifdef DEBUG_PTRACE
+ {
+ char *s;
+
+ if ((request >= 0) && (request <= 24))
+ s = pt_rq [request];
+ else
+ s = "unknown";
+
+ if (request == PTRACE_POKEDATA && data == 0x91d02001){
+ printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
+ pid, addr, addr2);
+ } else
+ printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
+ s, request, pid, addr, data, addr2);
+ }
+#endif
+ if (request == PTRACE_TRACEME) {
+ int ret;
+
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED) {
+ pt_error_return(regs, EPERM);
+ goto out;
+ }
+ ret = security_ptrace(current->parent, current);
+ if (ret) {
+ pt_error_return(regs, -ret);
+ goto out;
+ }
+
+ /* set the ptrace bit in the process flags. */
+ current->ptrace |= PT_PTRACED;
+ pt_succ_return(regs, 0);
+ goto out;
+ }
+#ifndef ALLOW_INIT_TRACING
+ if (pid == 1) {
+ /* Can't dork with init. */
+ pt_error_return(regs, EPERM);
+ goto out;
+ }
+#endif
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+
+ if (!child) {
+ pt_error_return(regs, ESRCH);
+ goto out;
+ }
+
+ if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
+ || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
+ if (ptrace_attach(child)) {
+ pt_error_return(regs, EPERM);
+ goto out_tsk;
+ }
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0) {
+ pt_error_return(regs, -ret);
+ goto out_tsk;
+ }
+
+ if (!(test_thread_flag(TIF_32BIT)) &&
+ ((request == PTRACE_READDATA64) ||
+ (request == PTRACE_WRITEDATA64) ||
+ (request == PTRACE_READTEXT64) ||
+ (request == PTRACE_WRITETEXT64) ||
+ (request == PTRACE_PEEKTEXT64) ||
+ (request == PTRACE_POKETEXT64) ||
+ (request == PTRACE_PEEKDATA64) ||
+ (request == PTRACE_POKEDATA64))) {
+ addr = regs->u_regs[UREG_G2];
+ addr2 = regs->u_regs[UREG_G3];
+ request -= 30; /* wheee... */
+ }
+
+ switch(request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ unsigned long tmp64;
+ unsigned int tmp32;
+ int res, copied;
+
+ res = -EIO;
+ if (test_thread_flag(TIF_32BIT)) {
+ copied = access_process_vm(child, addr,
+ &tmp32, sizeof(tmp32), 0);
+ tmp64 = (unsigned long) tmp32;
+ if (copied == sizeof(tmp32))
+ res = 0;
+ } else {
+ copied = access_process_vm(child, addr,
+ &tmp64, sizeof(tmp64), 0);
+ if (copied == sizeof(tmp64))
+ res = 0;
+ }
+ if (res < 0)
+ pt_error_return(regs, -res);
+ else
+ pt_os_succ_return(regs, tmp64, (void __user *) data);
+ goto flush_and_out;
+ }
+
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA: {
+ unsigned long tmp64;
+ unsigned int tmp32;
+ int copied, res = -EIO;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ tmp32 = data;
+ copied = access_process_vm(child, addr,
+ &tmp32, sizeof(tmp32), 1);
+ if (copied == sizeof(tmp32))
+ res = 0;
+ } else {
+ tmp64 = data;
+ copied = access_process_vm(child, addr,
+ &tmp64, sizeof(tmp64), 1);
+ if (copied == sizeof(tmp64))
+ res = 0;
+ }
+ if (res < 0)
+ pt_error_return(regs, -res);
+ else
+ pt_succ_return(regs, res);
+ goto flush_and_out;
+ }
+
+ case PTRACE_GETREGS: {
+ struct pt_regs32 __user *pregs =
+ (struct pt_regs32 __user *) addr;
+ struct pt_regs *cregs = child->thread_info->kregs;
+ int rval;
+
+ if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
+ __put_user(cregs->tpc, (&pregs->pc)) ||
+ __put_user(cregs->tnpc, (&pregs->npc)) ||
+ __put_user(cregs->y, (&pregs->y))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ for (rval = 1; rval < 16; rval++)
+ if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ pt_succ_return(regs, 0);
+#ifdef DEBUG_PTRACE
+ printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
+#endif
+ goto out_tsk;
+ }
+
+ case PTRACE_GETREGS64: {
+ struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
+ struct pt_regs *cregs = child->thread_info->kregs;
+ unsigned long tpc = cregs->tpc;
+ int rval;
+
+ if ((child->thread_info->flags & _TIF_32BIT) != 0)
+ tpc &= 0xffffffff;
+ if (__put_user(cregs->tstate, (&pregs->tstate)) ||
+ __put_user(tpc, (&pregs->tpc)) ||
+ __put_user(cregs->tnpc, (&pregs->tnpc)) ||
+ __put_user(cregs->y, (&pregs->y))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ for (rval = 1; rval < 16; rval++)
+ if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ pt_succ_return(regs, 0);
+#ifdef DEBUG_PTRACE
+ printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
+#endif
+ goto out_tsk;
+ }
+
+ case PTRACE_SETREGS: {
+ struct pt_regs32 __user *pregs =
+ (struct pt_regs32 __user *) addr;
+ struct pt_regs *cregs = child->thread_info->kregs;
+ unsigned int psr, pc, npc, y;
+ int i;
+
+ /* Must be careful, tracing process can only set certain
+ * bits in the psr.
+ */
+ if (__get_user(psr, (&pregs->psr)) ||
+ __get_user(pc, (&pregs->pc)) ||
+ __get_user(npc, (&pregs->npc)) ||
+ __get_user(y, (&pregs->y))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ cregs->tstate &= ~(TSTATE_ICC);
+ cregs->tstate |= psr_to_tstate_icc(psr);
+ if (!((pc | npc) & 3)) {
+ cregs->tpc = pc;
+ cregs->tnpc = npc;
+ }
+ cregs->y = y;
+ for (i = 1; i < 16; i++) {
+ if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ }
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+ case PTRACE_SETREGS64: {
+ struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
+ struct pt_regs *cregs = child->thread_info->kregs;
+ unsigned long tstate, tpc, tnpc, y;
+ int i;
+
+ /* Must be careful, tracing process can only set certain
+ * bits in the psr.
+ */
+ if (__get_user(tstate, (&pregs->tstate)) ||
+ __get_user(tpc, (&pregs->tpc)) ||
+ __get_user(tnpc, (&pregs->tnpc)) ||
+ __get_user(y, (&pregs->y))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ if ((child->thread_info->flags & _TIF_32BIT) != 0) {
+ tpc &= 0xffffffff;
+ tnpc &= 0xffffffff;
+ }
+ tstate &= (TSTATE_ICC | TSTATE_XCC);
+ cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
+ cregs->tstate |= tstate;
+ if (!((tpc | tnpc) & 3)) {
+ cregs->tpc = tpc;
+ cregs->tnpc = tnpc;
+ }
+ cregs->y = y;
+ for (i = 1; i < 16; i++) {
+ if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ }
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+ case PTRACE_GETFPREGS: {
+ struct fps {
+ unsigned int regs[32];
+ unsigned int fsr;
+ unsigned int flags;
+ unsigned int extra;
+ unsigned int fpqd;
+ struct fq {
+ unsigned int insnaddr;
+ unsigned int insn;
+ } fpq[16];
+ };
+ struct fps __user *fps = (struct fps __user *) addr;
+ unsigned long *fpregs = child->thread_info->fpregs;
+
+ if (copy_to_user(&fps->regs[0], fpregs,
+ (32 * sizeof(unsigned int))) ||
+ __put_user(child->thread_info->xfsr[0], (&fps->fsr)) ||
+ __put_user(0, (&fps->fpqd)) ||
+ __put_user(0, (&fps->flags)) ||
+ __put_user(0, (&fps->extra)) ||
+ clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+ case PTRACE_GETFPREGS64: {
+ struct fps {
+ unsigned int regs[64];
+ unsigned long fsr;
+ };
+ struct fps __user *fps = (struct fps __user *) addr;
+ unsigned long *fpregs = child->thread_info->fpregs;
+
+ if (copy_to_user(&fps->regs[0], fpregs,
+ (64 * sizeof(unsigned int))) ||
+ __put_user(child->thread_info->xfsr[0], (&fps->fsr))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+ case PTRACE_SETFPREGS: {
+ struct fps {
+ unsigned int regs[32];
+ unsigned int fsr;
+ unsigned int flags;
+ unsigned int extra;
+ unsigned int fpqd;
+ struct fq {
+ unsigned int insnaddr;
+ unsigned int insn;
+ } fpq[16];
+ };
+ struct fps __user *fps = (struct fps __user *) addr;
+ unsigned long *fpregs = child->thread_info->fpregs;
+ unsigned fsr;
+
+ if (copy_from_user(fpregs, &fps->regs[0],
+ (32 * sizeof(unsigned int))) ||
+ __get_user(fsr, (&fps->fsr))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ child->thread_info->xfsr[0] &= 0xffffffff00000000UL;
+ child->thread_info->xfsr[0] |= fsr;
+ if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
+ child->thread_info->gsr[0] = 0;
+ child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+ case PTRACE_SETFPREGS64: {
+ struct fps {
+ unsigned int regs[64];
+ unsigned long fsr;
+ };
+ struct fps __user *fps = (struct fps __user *) addr;
+ unsigned long *fpregs = child->thread_info->fpregs;
+
+ if (copy_from_user(fpregs, &fps->regs[0],
+ (64 * sizeof(unsigned int))) ||
+ __get_user(child->thread_info->xfsr[0], (&fps->fsr))) {
+ pt_error_return(regs, EFAULT);
+ goto out_tsk;
+ }
+ if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
+ child->thread_info->gsr[0] = 0;
+ child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+ case PTRACE_READTEXT:
+ case PTRACE_READDATA: {
+ int res = ptrace_readdata(child, addr,
+ (char __user *)addr2, data);
+ if (res == data) {
+ pt_succ_return(regs, 0);
+ goto flush_and_out;
+ }
+ if (res >= 0)
+ res = -EIO;
+ pt_error_return(regs, -res);
+ goto flush_and_out;
+ }
+
+ case PTRACE_WRITETEXT:
+ case PTRACE_WRITEDATA: {
+ int res = ptrace_writedata(child, (char __user *) addr2,
+ addr, data);
+ if (res == data) {
+ pt_succ_return(regs, 0);
+ goto flush_and_out;
+ }
+ if (res >= 0)
+ res = -EIO;
+ pt_error_return(regs, -res);
+ goto flush_and_out;
+ }
+ case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
+ addr = 1;
+
+ case PTRACE_CONT: { /* restart after signal. */
+ if (data > _NSIG) {
+ pt_error_return(regs, EIO);
+ goto out_tsk;
+ }
+ if (addr != 1) {
+ unsigned long pc_mask = ~0UL;
+
+ if ((child->thread_info->flags & _TIF_32BIT) != 0)
+ pc_mask = 0xffffffff;
+
+ if (addr & 3) {
+ pt_error_return(regs, EINVAL);
+ goto out_tsk;
+ }
+#ifdef DEBUG_PTRACE
+ printk ("Original: %016lx %016lx\n",
+ child->thread_info->kregs->tpc,
+ child->thread_info->kregs->tnpc);
+ printk ("Continuing with %016lx %016lx\n", addr, addr+4);
+#endif
+ child->thread_info->kregs->tpc = (addr & pc_mask);
+ child->thread_info->kregs->tnpc = ((addr + 4) & pc_mask);
+ }
+
+ if (request == PTRACE_SYSCALL) {
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ } else {
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ }
+
+ child->exit_code = data;
+#ifdef DEBUG_PTRACE
+ printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
+ child->pid, child->exit_code,
+ child->thread_info->kregs->tpc,
+ child->thread_info->kregs->tnpc);
+
+#endif
+ wake_up_process(child);
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+/*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+ if (child->exit_state == EXIT_ZOMBIE) { /* already dead */
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+ child->exit_code = SIGKILL;
+ wake_up_process(child);
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+ case PTRACE_SUNDETACH: { /* detach a process that was attached. */
+ int error = ptrace_detach(child, data);
+ if (error) {
+ pt_error_return(regs, EIO);
+ goto out_tsk;
+ }
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+
+ /* PTRACE_DUMPCORE unsupported... */
+
+ default: {
+ int err = ptrace_request(child, request, addr, data);
+ if (err)
+ pt_error_return(regs, -err);
+ else
+ pt_succ_return(regs, 0);
+ goto out_tsk;
+ }
+ }
+flush_and_out:
+ {
+ unsigned long va;
+
+ if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ for (va = 0; va < (1 << 16); va += (1 << 5))
+ spitfire_put_dcache_tag(va, 0x0);
+ /* No need to mess with I-cache on Cheetah. */
+ } else {
+ for (va = 0; va < L1DCACHE_SIZE; va += 32)
+ spitfire_put_dcache_tag(va, 0x0);
+ if (request == PTRACE_PEEKTEXT ||
+ request == PTRACE_POKETEXT ||
+ request == PTRACE_READTEXT ||
+ request == PTRACE_WRITETEXT) {
+ for (va = 0; va < (PAGE_SIZE << 1); va += 32)
+ spitfire_put_icache_tag(va, 0x0);
+ __asm__ __volatile__("flush %g6");
+ }
+ }
+ }
+out_tsk:
+ if (child)
+ put_task_struct(child);
+out:
+ unlock_kernel();
+}
+
+asmlinkage void syscall_trace(void)
+{
+#ifdef DEBUG_PTRACE
+ printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
+#endif
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+#ifdef DEBUG_PTRACE
+ printk("%s [%d]: syscall_trace exit= %x\n", current->comm,
+ current->pid, current->exit_code);
+#endif
+ if (current->exit_code) {
+ send_sig (current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
new file mode 100644
index 00000000000..0696ed4b9d6
--- /dev/null
+++ b/arch/sparc64/kernel/rtrap.S
@@ -0,0 +1,362 @@
+/* $Id: rtrap.S,v 1.61 2002/02/09 19:49:31 davem Exp $
+ * rtrap.S: Preparing for return from trap on Sparc V9.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+
+#include <asm/asi.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/spitfire.h>
+#include <asm/head.h>
+#include <asm/visasm.h>
+#include <asm/processor.h>
+
+#define RTRAP_PSTATE (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+#define RTRAP_PSTATE_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
+#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+
+ /* Register %l6 keeps track of whether we are returning
+ * from a system call or not. It is cleared if we call
+ * do_notify_resume, and it must not be otherwise modified
+ * until we fully commit to returning to userspace.
+ */
+
+ .text
+ .align 32
+__handle_softirq:
+ call do_softirq
+ nop
+ ba,a,pt %xcc, __handle_softirq_continue
+ nop
+__handle_preemption:
+ call schedule
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ ba,pt %xcc, __handle_preemption_continue
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+
+__handle_user_windows:
+ call fault_in_user_windows
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ /* Redo sched+sig checks */
+ ldx [%g6 + TI_FLAGS], %l0
+ andcc %l0, _TIF_NEED_RESCHED, %g0
+
+ be,pt %xcc, 1f
+ nop
+ call schedule
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ ldx [%g6 + TI_FLAGS], %l0
+
+1: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+ be,pt %xcc, __handle_user_windows_continue
+ nop
+ clr %o0
+ mov %l5, %o2
+ mov %l6, %o3
+ add %sp, PTREGS_OFF, %o1
+ mov %l0, %o4
+
+ call do_notify_resume
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ clr %l6
+ /* Signal delivery can modify pt_regs tstate, so we must
+ * reload it.
+ */
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+ sethi %hi(0xf << 20), %l4
+ and %l1, %l4, %l4
+ ba,pt %xcc, __handle_user_windows_continue
+
+ andn %l1, %l4, %l1
+__handle_perfctrs:
+ call update_perfctrs
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ ldub [%g6 + TI_WSAVED], %o2
+ brz,pt %o2, 1f
+ nop
+ /* Redo userwin+sched+sig checks */
+ call fault_in_user_windows
+
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ ldx [%g6 + TI_FLAGS], %l0
+ andcc %l0, _TIF_NEED_RESCHED, %g0
+ be,pt %xcc, 1f
+
+ nop
+ call schedule
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ ldx [%g6 + TI_FLAGS], %l0
+1: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+
+ be,pt %xcc, __handle_perfctrs_continue
+ sethi %hi(TSTATE_PEF), %o0
+ clr %o0
+ mov %l5, %o2
+ mov %l6, %o3
+ add %sp, PTREGS_OFF, %o1
+ mov %l0, %o4
+ call do_notify_resume
+
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ clr %l6
+ /* Signal delivery can modify pt_regs tstate, so we must
+ * reload it.
+ */
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+ sethi %hi(0xf << 20), %l4
+ and %l1, %l4, %l4
+ andn %l1, %l4, %l1
+ ba,pt %xcc, __handle_perfctrs_continue
+
+ sethi %hi(TSTATE_PEF), %o0
+__handle_userfpu:
+ rd %fprs, %l5
+ andcc %l5, FPRS_FEF, %g0
+ sethi %hi(TSTATE_PEF), %o0
+ be,a,pn %icc, __handle_userfpu_continue
+ andn %l1, %o0, %l1
+ ba,a,pt %xcc, __handle_userfpu_continue
+
+__handle_signal:
+ clr %o0
+ mov %l5, %o2
+ mov %l6, %o3
+ add %sp, PTREGS_OFF, %o1
+ mov %l0, %o4
+ call do_notify_resume
+ wrpr %g0, RTRAP_PSTATE, %pstate
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ clr %l6
+
+ /* Signal delivery can modify pt_regs tstate, so we must
+ * reload it.
+ */
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+ sethi %hi(0xf << 20), %l4
+ and %l1, %l4, %l4
+ ba,pt %xcc, __handle_signal_continue
+ andn %l1, %l4, %l1
+
+ .align 64
+ .globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
+rtrap_irq:
+rtrap_clr_l6: clr %l6
+rtrap:
+ ldub [%g6 + TI_CPU], %l0
+ sethi %hi(irq_stat), %l2 ! &softirq_active
+ or %l2, %lo(irq_stat), %l2 ! &softirq_active
+irqsz_patchme: sllx %l0, 0, %l0
+ lduw [%l2 + %l0], %l1 ! softirq_pending
+ cmp %l1, 0
+
+ /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
+ bne,pn %icc, __handle_softirq
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+__handle_softirq_continue:
+rtrap_xcall:
+ sethi %hi(0xf << 20), %l4
+ andcc %l1, TSTATE_PRIV, %l3
+ and %l1, %l4, %l4
+ bne,pn %icc, to_kernel
+ andn %l1, %l4, %l1
+
+ /* We must hold IRQs off and atomically test schedule+signal
+ * state, then hold them off all the way back to userspace.
+ * If we are returning to kernel, none of this matters.
+ *
+ * If we do not do this, there is a window where we would do
+ * the tests, later the signal/resched event arrives but we do
+ * not process it since we are still in kernel mode. It would
+ * take until the next local IRQ before the signal/resched
+ * event would be handled.
+ *
+ * This also means that if we have to deal with performance
+ * counters or user windows, we have to redo all of these
+ * sched+signal checks with IRQs disabled.
+ */
+to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+ wrpr 0, %pil
+__handle_preemption_continue:
+ ldx [%g6 + TI_FLAGS], %l0
+ sethi %hi(_TIF_USER_WORK_MASK), %o0
+ or %o0, %lo(_TIF_USER_WORK_MASK), %o0
+ andcc %l0, %o0, %g0
+ sethi %hi(TSTATE_PEF), %o0
+ be,pt %xcc, user_nowork
+ andcc %l1, %o0, %g0
+ andcc %l0, _TIF_NEED_RESCHED, %g0
+ bne,pn %xcc, __handle_preemption
+ andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+ bne,pn %xcc, __handle_signal
+__handle_signal_continue:
+ ldub [%g6 + TI_WSAVED], %o2
+ brnz,pn %o2, __handle_user_windows
+ nop
+__handle_user_windows_continue:
+ ldx [%g6 + TI_FLAGS], %l5
+ andcc %l5, _TIF_PERFCTR, %g0
+ sethi %hi(TSTATE_PEF), %o0
+ bne,pn %xcc, __handle_perfctrs
+__handle_perfctrs_continue:
+ andcc %l1, %o0, %g0
+
+ /* This fpdepth clear is necessary for non-syscall rtraps only */
+user_nowork:
+ bne,pn %xcc, __handle_userfpu
+ stb %g0, [%g6 + TI_FPDEPTH]
+__handle_userfpu_continue:
+
+rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
+ ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
+
+ ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
+ ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
+ ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
+ mov TSB_REG, %g6
+ brnz,a,pn %l3, 1f
+ ldxa [%g6] ASI_IMMU, %g5
+1: ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
+ ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
+ wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
+ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
+ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
+
+ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
+ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
+ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
+ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
+ ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
+ ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
+ ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
+
+ ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
+ wr %o3, %g0, %y
+ srl %l4, 20, %l4
+ wrpr %l4, 0x0, %pil
+ wrpr %g0, 0x1, %tl
+ wrpr %l1, %g0, %tstate
+ wrpr %l2, %g0, %tpc
+ wrpr %o2, %g0, %tnpc
+
+ brnz,pn %l3, kern_rtt
+ mov PRIMARY_CONTEXT, %l7
+ ldxa [%l7 + %l7] ASI_DMMU, %l0
+cplus_rtrap_insn_1:
+ sethi %hi(0), %l1
+ sllx %l1, 32, %l1
+ or %l0, %l1, %l0
+ stxa %l0, [%l7] ASI_DMMU
+ flush %g6
+ rdpr %wstate, %l1
+ rdpr %otherwin, %l2
+ srl %l1, 3, %l1
+
+ wrpr %l2, %g0, %canrestore
+ wrpr %l1, %g0, %wstate
+ wrpr %g0, %g0, %otherwin
+ restore
+ rdpr %canrestore, %g1
+ wrpr %g1, 0x0, %cleanwin
+ retry
+ nop
+
+kern_rtt: restore
+ retry
+to_kernel:
+#ifdef CONFIG_PREEMPT
+ ldsw [%g6 + TI_PRE_COUNT], %l5
+ brnz %l5, kern_fpucheck
+ ldx [%g6 + TI_FLAGS], %l5
+ andcc %l5, _TIF_NEED_RESCHED, %g0
+ be,pt %xcc, kern_fpucheck
+ srl %l4, 20, %l5
+ cmp %l5, 0
+ bne,pn %xcc, kern_fpucheck
+ sethi %hi(PREEMPT_ACTIVE), %l6
+ stw %l6, [%g6 + TI_PRE_COUNT]
+ call schedule
+ nop
+ ba,pt %xcc, rtrap
+ stw %g0, [%g6 + TI_PRE_COUNT]
+#endif
+kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
+ brz,pt %l5, rt_continue
+ srl %l5, 1, %o0
+ add %g6, TI_FPSAVED, %l6
+ ldub [%l6 + %o0], %l2
+ sub %l5, 2, %l5
+
+ add %g6, TI_GSR, %o1
+ andcc %l2, (FPRS_FEF|FPRS_DU), %g0
+ be,pt %icc, 2f
+ and %l2, FPRS_DL, %l6
+ andcc %l2, FPRS_FEF, %g0
+ be,pn %icc, 5f
+ sll %o0, 3, %o5
+ rd %fprs, %g1
+
+ wr %g1, FPRS_FEF, %fprs
+ ldx [%o1 + %o5], %g1
+ add %g6, TI_XFSR, %o1
+ membar #StoreLoad | #LoadLoad
+ sll %o0, 8, %o2
+ add %g6, TI_FPREGS, %o3
+ brz,pn %l6, 1f
+ add %g6, TI_FPREGS+0x40, %o4
+
+ ldda [%o3 + %o2] ASI_BLK_P, %f0
+ ldda [%o4 + %o2] ASI_BLK_P, %f16
+1: andcc %l2, FPRS_DU, %g0
+ be,pn %icc, 1f
+ wr %g1, 0, %gsr
+ add %o2, 0x80, %o2
+ ldda [%o3 + %o2] ASI_BLK_P, %f32
+ ldda [%o4 + %o2] ASI_BLK_P, %f48
+
+1: membar #Sync
+ ldx [%o1 + %o5], %fsr
+2: stb %l5, [%g6 + TI_FPDEPTH]
+ ba,pt %xcc, rt_continue
+ nop
+5: wr %g0, FPRS_FEF, %fprs
+ membar #StoreLoad | #LoadLoad
+ sll %o0, 8, %o2
+
+ add %g6, TI_FPREGS+0x80, %o3
+ add %g6, TI_FPREGS+0xc0, %o4
+ ldda [%o3 + %o2] ASI_BLK_P, %f32
+ ldda [%o4 + %o2] ASI_BLK_P, %f48
+ membar #Sync
+ wr %g0, FPRS_DU, %fprs
+ ba,pt %xcc, rt_continue
+ stb %l5, [%g6 + TI_FPDEPTH]
+
+cplus_rinsn_1:
+ sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
+
+ .globl cheetah_plus_patch_rtrap
+cheetah_plus_patch_rtrap:
+ /* We configure the dTLB512_0 for 4MB pages and the
+ * dTLB512_1 for 8K pages when in context zero.
+ */
+ sethi %hi(cplus_rinsn_1), %o0
+ sethi %hi(cplus_rtrap_insn_1), %o2
+ lduw [%o0 + %lo(cplus_rinsn_1)], %o1
+ or %o2, %lo(cplus_rtrap_insn_1), %o2
+ stw %o1, [%o2]
+ flush %o2
+
+ retl
+ nop
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
new file mode 100644
index 00000000000..14d9c3a21b9
--- /dev/null
+++ b/arch/sparc64/kernel/sbus.c
@@ -0,0 +1,1243 @@
+/* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
+ * sbus.c: UltraSparc SBUS controller support.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/page.h>
+#include <asm/sbus.h>
+#include <asm/io.h>
+#include <asm/upa.h>
+#include <asm/cache.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/starfire.h>
+
+#include "iommu_common.h"
+
+/* These should be allocated on an SMP_CACHE_BYTES
+ * aligned boundary for optimal performance.
+ *
+ * On SYSIO, using an 8K page size we have 1GB of SBUS
+ * DMA space mapped. We divide this space into equally
+ * sized clusters. We allocate a DMA mapping from the
+ * cluster that matches the order of the allocation, or
+ * if the order is greater than the number of clusters,
+ * we try to allocate from the last cluster.
+ */
+
+#define NCLUSTERS 8UL
+#define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
+#define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
+#define CLUSTER_MASK (CLUSTER_SIZE - 1)
+#define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
+#define MAP_BASE ((u32)0xc0000000)
+
+struct sbus_iommu {
+/*0x00*/spinlock_t lock;
+
+/*0x08*/iopte_t *page_table;
+/*0x10*/unsigned long strbuf_regs;
+/*0x18*/unsigned long iommu_regs;
+/*0x20*/unsigned long sbus_control_reg;
+
+/*0x28*/volatile unsigned long strbuf_flushflag;
+
+ /* If NCLUSTERS is ever decresed to 4 or lower,
+ * you must increase the size of the type of
+ * these counters. You have been duly warned. -DaveM
+ */
+/*0x30*/struct {
+ u16 next;
+ u16 flush;
+ } alloc_info[NCLUSTERS];
+
+ /* The lowest used consistent mapping entry. Since
+ * we allocate consistent maps out of cluster 0 this
+ * is relative to the beginning of closter 0.
+ */
+/*0x50*/u32 lowest_consistent_map;
+};
+
+/* Offsets from iommu_regs */
+#define SYSIO_IOMMUREG_BASE 0x2400UL
+#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
+#define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
+#define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
+#define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
+#define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
+#define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
+#define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
+#define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
+
+#define IOMMU_DRAM_VALID (1UL << 30UL)
+
+static void __iommu_flushall(struct sbus_iommu *iommu)
+{
+ unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
+ int entry;
+
+ for (entry = 0; entry < 16; entry++) {
+ upa_writeq(0, tag);
+ tag += 8UL;
+ }
+ upa_readq(iommu->sbus_control_reg);
+
+ for (entry = 0; entry < NCLUSTERS; entry++) {
+ iommu->alloc_info[entry].flush =
+ iommu->alloc_info[entry].next;
+ }
+}
+
+static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+ while (npages--)
+ upa_writeq(base + (npages << IO_PAGE_SHIFT),
+ iommu->iommu_regs + IOMMU_FLUSH);
+ upa_readq(iommu->sbus_control_reg);
+}
+
+/* Offsets from strbuf_regs */
+#define SYSIO_STRBUFREG_BASE 0x2800UL
+#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
+#define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
+#define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
+#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
+#define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
+#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
+#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
+
+#define STRBUF_TAG_VALID 0x02UL
+
+static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+ iommu->strbuf_flushflag = 0UL;
+ while (npages--)
+ upa_writeq(base + (npages << IO_PAGE_SHIFT),
+ iommu->strbuf_regs + STRBUF_PFLUSH);
+
+ /* Whoopee cushion! */
+ upa_writeq(__pa(&iommu->strbuf_flushflag),
+ iommu->strbuf_regs + STRBUF_FSYNC);
+ upa_readq(iommu->sbus_control_reg);
+ while (iommu->strbuf_flushflag == 0UL)
+ membar("#LoadLoad");
+}
+
+static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
+{
+ iopte_t *iopte, *limit, *first, *cluster;
+ unsigned long cnum, ent, nent, flush_point, found;
+
+ cnum = 0;
+ nent = 1;
+ while ((1UL << cnum) < npages)
+ cnum++;
+ if(cnum >= NCLUSTERS) {
+ nent = 1UL << (cnum - NCLUSTERS);
+ cnum = NCLUSTERS - 1;
+ }
+ iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
+
+ if (cnum == 0)
+ limit = (iommu->page_table +
+ iommu->lowest_consistent_map);
+ else
+ limit = (iopte + CLUSTER_NPAGES);
+
+ iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
+ flush_point = iommu->alloc_info[cnum].flush;
+
+ first = iopte;
+ cluster = NULL;
+ found = 0;
+ for (;;) {
+ if (iopte_val(*iopte) == 0UL) {
+ found++;
+ if (!cluster)
+ cluster = iopte;
+ } else {
+ /* Used cluster in the way */
+ cluster = NULL;
+ found = 0;
+ }
+
+ if (found == nent)
+ break;
+
+ iopte += (1 << cnum);
+ ent++;
+ if (iopte >= limit) {
+ iopte = (iommu->page_table + (cnum * CLUSTER_NPAGES));
+ ent = 0;
+
+ /* Multiple cluster allocations must not wrap */
+ cluster = NULL;
+ found = 0;
+ }
+ if (ent == flush_point)
+ __iommu_flushall(iommu);
+ if (iopte == first)
+ goto bad;
+ }
+
+ /* ent/iopte points to the last cluster entry we're going to use,
+ * so save our place for the next allocation.
+ */
+ if ((iopte + (1 << cnum)) >= limit)
+ ent = 0;
+ else
+ ent = ent + 1;
+ iommu->alloc_info[cnum].next = ent;
+ if (ent == flush_point)
+ __iommu_flushall(iommu);
+
+ /* I've got your streaming cluster right here buddy boy... */
+ return cluster;
+
+bad:
+ printk(KERN_EMERG "sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
+ npages);
+ return NULL;
+}
+
+static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+ unsigned long cnum, ent, nent;
+ iopte_t *iopte;
+
+ cnum = 0;
+ nent = 1;
+ while ((1UL << cnum) < npages)
+ cnum++;
+ if(cnum >= NCLUSTERS) {
+ nent = 1UL << (cnum - NCLUSTERS);
+ cnum = NCLUSTERS - 1;
+ }
+ ent = (base & CLUSTER_MASK) >> (IO_PAGE_SHIFT + cnum);
+ iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
+ do {
+ iopte_val(*iopte) = 0UL;
+ iopte += 1 << cnum;
+ } while(--nent);
+
+ /* If the global flush might not have caught this entry,
+ * adjust the flush point such that we will flush before
+ * ever trying to reuse it.
+ */
+#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
+ if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
+ iommu->alloc_info[cnum].flush = ent;
+#undef between
+}
+
+/* We allocate consistent mappings from the end of cluster zero. */
+static iopte_t *alloc_consistent_cluster(struct sbus_iommu *iommu, unsigned long npages)
+{
+ iopte_t *iopte;
+
+ iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
+ while (iopte > iommu->page_table) {
+ iopte--;
+ if (!(iopte_val(*iopte) & IOPTE_VALID)) {
+ unsigned long tmp = npages;
+
+ while (--tmp) {
+ iopte--;
+ if (iopte_val(*iopte) & IOPTE_VALID)
+ break;
+ }
+ if (tmp == 0) {
+ u32 entry = (iopte - iommu->page_table);
+
+ if (entry < iommu->lowest_consistent_map)
+ iommu->lowest_consistent_map = entry;
+ return iopte;
+ }
+ }
+ }
+ return NULL;
+}
+
+static void free_consistent_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+ iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> IO_PAGE_SHIFT);
+
+ if ((iopte - iommu->page_table) == iommu->lowest_consistent_map) {
+ iopte_t *walk = iopte + npages;
+ iopte_t *limit;
+
+ limit = iommu->page_table + CLUSTER_NPAGES;
+ while (walk < limit) {
+ if (iopte_val(*walk) != 0UL)
+ break;
+ walk++;
+ }
+ iommu->lowest_consistent_map =
+ (walk - iommu->page_table);
+ }
+
+ while (npages--)
+ *iopte++ = __iopte(0UL);
+}
+
+void *sbus_alloc_consistent(struct sbus_dev *sdev, size_t size, dma_addr_t *dvma_addr)
+{
+ unsigned long order, first_page, flags;
+ struct sbus_iommu *iommu;
+ iopte_t *iopte;
+ void *ret;
+ int npages;
+
+ if (size <= 0 || sdev == NULL || dvma_addr == NULL)
+ return NULL;
+
+ size = IO_PAGE_ALIGN(size);
+ order = get_order(size);
+ if (order >= 10)
+ return NULL;
+ first_page = __get_free_pages(GFP_KERNEL, order);
+ if (first_page == 0UL)
+ return NULL;
+ memset((char *)first_page, 0, PAGE_SIZE << order);
+
+ iommu = sdev->bus->iommu;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
+ if (iopte == NULL) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ free_pages(first_page, order);
+ return NULL;
+ }
+
+ /* Ok, we're committed at this point. */
+ *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
+ ret = (void *) first_page;
+ npages = size >> IO_PAGE_SHIFT;
+ while (npages--) {
+ *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
+ (__pa(first_page) & IOPTE_PAGE));
+ first_page += IO_PAGE_SIZE;
+ }
+ iommu_flush(iommu, *dvma_addr, size >> IO_PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return ret;
+}
+
+void sbus_free_consistent(struct sbus_dev *sdev, size_t size, void *cpu, dma_addr_t dvma)
+{
+ unsigned long order, npages;
+ struct sbus_iommu *iommu;
+
+ if (size <= 0 || sdev == NULL || cpu == NULL)
+ return;
+
+ npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
+ iommu = sdev->bus->iommu;
+
+ spin_lock_irq(&iommu->lock);
+ free_consistent_cluster(iommu, dvma, npages);
+ iommu_flush(iommu, dvma, npages);
+ spin_unlock_irq(&iommu->lock);
+
+ order = get_order(size);
+ if (order < 10)
+ free_pages((unsigned long)cpu, order);
+}
+
+dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr, size_t size, int dir)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ unsigned long npages, pbase, flags;
+ iopte_t *iopte;
+ u32 dma_base, offset;
+ unsigned long iopte_bits;
+
+ if (dir == SBUS_DMA_NONE)
+ BUG();
+
+ pbase = (unsigned long) ptr;
+ offset = (u32) (pbase & ~IO_PAGE_MASK);
+ size = (IO_PAGE_ALIGN(pbase + size) - (pbase & IO_PAGE_MASK));
+ pbase = (unsigned long) __pa(pbase & IO_PAGE_MASK);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ npages = size >> IO_PAGE_SHIFT;
+ iopte = alloc_streaming_cluster(iommu, npages);
+ if (iopte == NULL)
+ goto bad;
+ dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
+ npages = size >> IO_PAGE_SHIFT;
+ iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
+ if (dir != SBUS_DMA_TODEVICE)
+ iopte_bits |= IOPTE_WRITE;
+ while (npages--) {
+ *iopte++ = __iopte(iopte_bits | (pbase & IOPTE_PAGE));
+ pbase += IO_PAGE_SIZE;
+ }
+ npages = size >> IO_PAGE_SHIFT;
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return (dma_base | offset);
+
+bad:
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ BUG();
+ return 0;
+}
+
+void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, int direction)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ u32 dma_base = dma_addr & IO_PAGE_MASK;
+ unsigned long flags;
+
+ size = (IO_PAGE_ALIGN(dma_addr + size) - dma_base);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
+ strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+#define SG_ENT_PHYS_ADDRESS(SG) \
+ (__pa(page_address((SG)->page)) + (SG)->offset)
+
+static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, int nelems, unsigned long iopte_bits)
+{
+ struct scatterlist *dma_sg = sg;
+ struct scatterlist *sg_end = sg + nelems;
+ int i;
+
+ for (i = 0; i < nused; i++) {
+ unsigned long pteval = ~0UL;
+ u32 dma_npages;
+
+ dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
+ dma_sg->dma_length +
+ ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
+ do {
+ unsigned long offset;
+ signed int len;
+
+ /* If we are here, we know we have at least one
+ * more page to map. So walk forward until we
+ * hit a page crossing, and begin creating new
+ * mappings from that spot.
+ */
+ for (;;) {
+ unsigned long tmp;
+
+ tmp = (unsigned long) SG_ENT_PHYS_ADDRESS(sg);
+ len = sg->length;
+ if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = tmp & IO_PAGE_MASK;
+ offset = tmp & (IO_PAGE_SIZE - 1UL);
+ break;
+ }
+ if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
+ pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
+ offset = 0UL;
+ len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
+ break;
+ }
+ sg++;
+ }
+
+ pteval = ((pteval & IOPTE_PAGE) | iopte_bits);
+ while (len > 0) {
+ *iopte++ = __iopte(pteval);
+ pteval += IO_PAGE_SIZE;
+ len -= (IO_PAGE_SIZE - offset);
+ offset = 0;
+ dma_npages--;
+ }
+
+ pteval = (pteval & IOPTE_PAGE) + len;
+ sg++;
+
+ /* Skip over any tail mappings we've fully mapped,
+ * adjusting pteval along the way. Stop when we
+ * detect a page crossing event.
+ */
+ while (sg < sg_end &&
+ (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+ (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
+ ((pteval ^
+ (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
+ pteval += sg->length;
+ sg++;
+ }
+ if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
+ pteval = ~0UL;
+ } while (dma_npages != 0);
+ dma_sg++;
+ }
+}
+
+int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int dir)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ unsigned long flags, npages;
+ iopte_t *iopte;
+ u32 dma_base;
+ struct scatterlist *sgtmp;
+ int used;
+ unsigned long iopte_bits;
+
+ if (dir == SBUS_DMA_NONE)
+ BUG();
+
+ /* Fast path single entry scatterlists. */
+ if (nents == 1) {
+ sg->dma_address =
+ sbus_map_single(sdev,
+ (page_address(sg->page) + sg->offset),
+ sg->length, dir);
+ sg->dma_length = sg->length;
+ return 1;
+ }
+
+ npages = prepare_sg(sg, nents);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ iopte = alloc_streaming_cluster(iommu, npages);
+ if (iopte == NULL)
+ goto bad;
+ dma_base = MAP_BASE + ((iopte - iommu->page_table) << IO_PAGE_SHIFT);
+
+ /* Normalize DVMA addresses. */
+ sgtmp = sg;
+ used = nents;
+
+ while (used && sgtmp->dma_length) {
+ sgtmp->dma_address += dma_base;
+ sgtmp++;
+ used--;
+ }
+ used = nents - used;
+
+ iopte_bits = IOPTE_VALID | IOPTE_STBUF | IOPTE_CACHE;
+ if (dir != SBUS_DMA_TODEVICE)
+ iopte_bits |= IOPTE_WRITE;
+
+ fill_sg(iopte, sg, used, nents, iopte_bits);
+#ifdef VERIFY_SG
+ verify_sglist(sg, nents, iopte, npages);
+#endif
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return used;
+
+bad:
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ BUG();
+ return 0;
+}
+
+void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
+{
+ unsigned long size, flags;
+ struct sbus_iommu *iommu;
+ u32 dvma_base;
+ int i;
+
+ /* Fast path single entry scatterlists. */
+ if (nents == 1) {
+ sbus_unmap_single(sdev, sg->dma_address, sg->dma_length, direction);
+ return;
+ }
+
+ dvma_base = sg[0].dma_address & IO_PAGE_MASK;
+ for (i = 0; i < nents; i++) {
+ if (sg[i].dma_length == 0)
+ break;
+ }
+ i--;
+ size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - dvma_base;
+
+ iommu = sdev->bus->iommu;
+ spin_lock_irqsave(&iommu->lock, flags);
+ free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
+ strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ unsigned long flags;
+
+ size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t base, size_t size, int direction)
+{
+}
+
+void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ unsigned long flags, size;
+ u32 base;
+ int i;
+
+ base = sg[0].dma_address & IO_PAGE_MASK;
+ for (i = 0; i < nents; i++) {
+ if (sg[i].dma_length == 0)
+ break;
+ }
+ i--;
+ size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int direction)
+{
+}
+
+/* Enable 64-bit DVMA mode for the given device. */
+void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ int slot = sdev->slot;
+ unsigned long cfg_reg;
+ u64 val;
+
+ cfg_reg = iommu->sbus_control_reg;
+ switch (slot) {
+ case 0:
+ cfg_reg += 0x20UL;
+ break;
+ case 1:
+ cfg_reg += 0x28UL;
+ break;
+ case 2:
+ cfg_reg += 0x30UL;
+ break;
+ case 3:
+ cfg_reg += 0x38UL;
+ break;
+ case 13:
+ cfg_reg += 0x40UL;
+ break;
+ case 14:
+ cfg_reg += 0x48UL;
+ break;
+ case 15:
+ cfg_reg += 0x50UL;
+ break;
+
+ default:
+ return;
+ };
+
+ val = upa_readq(cfg_reg);
+ if (val & (1UL << 14UL)) {
+ /* Extended transfer mode already enabled. */
+ return;
+ }
+
+ val |= (1UL << 14UL);
+
+ if (bursts & DMA_BURST8)
+ val |= (1UL << 1UL);
+ if (bursts & DMA_BURST16)
+ val |= (1UL << 2UL);
+ if (bursts & DMA_BURST32)
+ val |= (1UL << 3UL);
+ if (bursts & DMA_BURST64)
+ val |= (1UL << 4UL);
+ upa_writeq(val, cfg_reg);
+}
+
+/* SBUS SYSIO INO number to Sparc PIL level. */
+static unsigned char sysio_ino_to_pil[] = {
+ 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 0 */
+ 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 1 */
+ 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 2 */
+ 0, 4, 4, 7, 5, 7, 8, 9, /* SBUS slot 3 */
+ 4, /* Onboard SCSI */
+ 5, /* Onboard Ethernet */
+/*XXX*/ 8, /* Onboard BPP */
+ 0, /* Bogon */
+ 13, /* Audio */
+/*XXX*/15, /* PowerFail */
+ 0, /* Bogon */
+ 0, /* Bogon */
+ 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
+ 11, /* Floppy */
+ 0, /* Spare Hardware (bogon for now) */
+ 0, /* Keyboard (bogon for now) */
+ 0, /* Mouse (bogon for now) */
+ 0, /* Serial (bogon for now) */
+ 0, 0, /* Bogon, Bogon */
+ 10, /* Timer 0 */
+ 11, /* Timer 1 */
+ 0, 0, /* Bogon, Bogon */
+ 15, /* Uncorrectable SBUS Error */
+ 15, /* Correctable SBUS Error */
+ 15, /* SBUS Error */
+/*XXX*/ 0, /* Power Management (bogon for now) */
+};
+
+/* INO number to IMAP register offset for SYSIO external IRQ's.
+ * This should conform to both Sunfire/Wildfire server and Fusion
+ * desktop designs.
+ */
+#define SYSIO_IMAP_SLOT0 0x2c04UL
+#define SYSIO_IMAP_SLOT1 0x2c0cUL
+#define SYSIO_IMAP_SLOT2 0x2c14UL
+#define SYSIO_IMAP_SLOT3 0x2c1cUL
+#define SYSIO_IMAP_SCSI 0x3004UL
+#define SYSIO_IMAP_ETH 0x300cUL
+#define SYSIO_IMAP_BPP 0x3014UL
+#define SYSIO_IMAP_AUDIO 0x301cUL
+#define SYSIO_IMAP_PFAIL 0x3024UL
+#define SYSIO_IMAP_KMS 0x302cUL
+#define SYSIO_IMAP_FLPY 0x3034UL
+#define SYSIO_IMAP_SHW 0x303cUL
+#define SYSIO_IMAP_KBD 0x3044UL
+#define SYSIO_IMAP_MS 0x304cUL
+#define SYSIO_IMAP_SER 0x3054UL
+#define SYSIO_IMAP_TIM0 0x3064UL
+#define SYSIO_IMAP_TIM1 0x306cUL
+#define SYSIO_IMAP_UE 0x3074UL
+#define SYSIO_IMAP_CE 0x307cUL
+#define SYSIO_IMAP_SBERR 0x3084UL
+#define SYSIO_IMAP_PMGMT 0x308cUL
+#define SYSIO_IMAP_GFX 0x3094UL
+#define SYSIO_IMAP_EUPA 0x309cUL
+
+#define bogon ((unsigned long) -1)
+static unsigned long sysio_irq_offsets[] = {
+ /* SBUS Slot 0 --> 3, level 1 --> 7 */
+ SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
+ SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
+ SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
+ SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
+ SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
+ SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
+ SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
+ SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
+
+ /* Onboard devices (not relevant/used on SunFire). */
+ SYSIO_IMAP_SCSI,
+ SYSIO_IMAP_ETH,
+ SYSIO_IMAP_BPP,
+ bogon,
+ SYSIO_IMAP_AUDIO,
+ SYSIO_IMAP_PFAIL,
+ bogon,
+ bogon,
+ SYSIO_IMAP_KMS,
+ SYSIO_IMAP_FLPY,
+ SYSIO_IMAP_SHW,
+ SYSIO_IMAP_KBD,
+ SYSIO_IMAP_MS,
+ SYSIO_IMAP_SER,
+ bogon,
+ bogon,
+ SYSIO_IMAP_TIM0,
+ SYSIO_IMAP_TIM1,
+ bogon,
+ bogon,
+ SYSIO_IMAP_UE,
+ SYSIO_IMAP_CE,
+ SYSIO_IMAP_SBERR,
+ SYSIO_IMAP_PMGMT,
+};
+
+#undef bogon
+
+#define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
+
+/* Convert Interrupt Mapping register pointer to associated
+ * Interrupt Clear register pointer, SYSIO specific version.
+ */
+#define SYSIO_ICLR_UNUSED0 0x3400UL
+#define SYSIO_ICLR_SLOT0 0x340cUL
+#define SYSIO_ICLR_SLOT1 0x344cUL
+#define SYSIO_ICLR_SLOT2 0x348cUL
+#define SYSIO_ICLR_SLOT3 0x34ccUL
+static unsigned long sysio_imap_to_iclr(unsigned long imap)
+{
+ unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
+ return imap + diff;
+}
+
+unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
+{
+ struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ unsigned long imap, iclr;
+ int pil, sbus_level = 0;
+
+ pil = sysio_ino_to_pil[ino];
+ if (!pil) {
+ printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
+ panic("Bad SYSIO IRQ translations...");
+ }
+
+ if (PIL_RESERVED(pil))
+ BUG();
+
+ imap = sysio_irq_offsets[ino];
+ if (imap == ((unsigned long)-1)) {
+ prom_printf("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
+ ino, pil);
+ prom_halt();
+ }
+ imap += reg_base;
+
+ /* SYSIO inconsistency. For external SLOTS, we have to select
+ * the right ICLR register based upon the lower SBUS irq level
+ * bits.
+ */
+ if (ino >= 0x20) {
+ iclr = sysio_imap_to_iclr(imap);
+ } else {
+ int sbus_slot = (ino & 0x18)>>3;
+
+ sbus_level = ino & 0x7;
+
+ switch(sbus_slot) {
+ case 0:
+ iclr = reg_base + SYSIO_ICLR_SLOT0;
+ break;
+ case 1:
+ iclr = reg_base + SYSIO_ICLR_SLOT1;
+ break;
+ case 2:
+ iclr = reg_base + SYSIO_ICLR_SLOT2;
+ break;
+ default:
+ case 3:
+ iclr = reg_base + SYSIO_ICLR_SLOT3;
+ break;
+ };
+
+ iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
+ }
+ return build_irq(pil, sbus_level, iclr, imap);
+}
+
+/* Error interrupt handling. */
+#define SYSIO_UE_AFSR 0x0030UL
+#define SYSIO_UE_AFAR 0x0038UL
+#define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
+#define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
+#define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
+#define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
+#define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
+#define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
+#define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
+#define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
+#define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
+#define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
+#define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
+static irqreturn_t sysio_ue_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct sbus_bus *sbus = dev_id;
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ unsigned long afsr_reg, afar_reg;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ afsr_reg = reg_base + SYSIO_UE_AFSR;
+ afar_reg = reg_base + SYSIO_UE_AFAR;
+
+ /* Latch error status. */
+ afsr = upa_readq(afsr_reg);
+ afar = upa_readq(afar_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
+ SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
+ upa_writeq(error_bits, afsr_reg);
+
+ /* Log the error. */
+ printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
+ sbus->portid,
+ (((error_bits & SYSIO_UEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & SYSIO_UEAFSR_PDRD) ?
+ "DVMA Read" :
+ ((error_bits & SYSIO_UEAFSR_PDWR) ?
+ "DVMA Write" : "???")))));
+ printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
+ sbus->portid,
+ (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
+ (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
+ (afsr & SYSIO_UEAFSR_MID) >> 37UL);
+ printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
+ printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
+ reported = 0;
+ if (afsr & SYSIO_UEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & SYSIO_UEAFSR_SDRD) {
+ reported++;
+ printk("(DVMA Read)");
+ }
+ if (afsr & SYSIO_UEAFSR_SDWR) {
+ reported++;
+ printk("(DVMA Write)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ return IRQ_HANDLED;
+}
+
+#define SYSIO_CE_AFSR 0x0040UL
+#define SYSIO_CE_AFAR 0x0048UL
+#define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
+#define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
+#define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
+#define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
+#define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
+#define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
+#define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
+#define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
+#define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
+#define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
+#define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
+#define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
+static irqreturn_t sysio_ce_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct sbus_bus *sbus = dev_id;
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ unsigned long afsr_reg, afar_reg;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ afsr_reg = reg_base + SYSIO_CE_AFSR;
+ afar_reg = reg_base + SYSIO_CE_AFAR;
+
+ /* Latch error status. */
+ afsr = upa_readq(afsr_reg);
+ afar = upa_readq(afar_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
+ SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
+ upa_writeq(error_bits, afsr_reg);
+
+ printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
+ sbus->portid,
+ (((error_bits & SYSIO_CEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & SYSIO_CEAFSR_PDRD) ?
+ "DVMA Read" :
+ ((error_bits & SYSIO_CEAFSR_PDWR) ?
+ "DVMA Write" : "???")))));
+
+ /* XXX Use syndrome and afar to print out module string just like
+ * XXX UDB CE trap handler does... -DaveM
+ */
+ printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
+ sbus->portid,
+ (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
+ (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
+ (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
+ (afsr & SYSIO_CEAFSR_MID) >> 37UL);
+ printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
+
+ printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
+ reported = 0;
+ if (afsr & SYSIO_CEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & SYSIO_CEAFSR_SDRD) {
+ reported++;
+ printk("(DVMA Read)");
+ }
+ if (afsr & SYSIO_CEAFSR_SDWR) {
+ reported++;
+ printk("(DVMA Write)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ return IRQ_HANDLED;
+}
+
+#define SYSIO_SBUS_AFSR 0x2010UL
+#define SYSIO_SBUS_AFAR 0x2018UL
+#define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
+#define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
+#define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
+#define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
+#define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
+#define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
+#define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
+#define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
+#define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
+#define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
+#define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
+#define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
+static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct sbus_bus *sbus = dev_id;
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long afsr_reg, afar_reg, reg_base;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ reg_base = iommu->sbus_control_reg - 0x2000UL;
+ afsr_reg = reg_base + SYSIO_SBUS_AFSR;
+ afar_reg = reg_base + SYSIO_SBUS_AFAR;
+
+ afsr = upa_readq(afsr_reg);
+ afar = upa_readq(afar_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
+ SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
+ upa_writeq(error_bits, afsr_reg);
+
+ /* Log the error. */
+ printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
+ sbus->portid,
+ (((error_bits & SYSIO_SBAFSR_PLE) ?
+ "Late PIO Error" :
+ ((error_bits & SYSIO_SBAFSR_PTO) ?
+ "Time Out" :
+ ((error_bits & SYSIO_SBAFSR_PBERR) ?
+ "Error Ack" : "???")))),
+ (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
+ printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
+ sbus->portid,
+ (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
+ (afsr & SYSIO_SBAFSR_MID) >> 37UL);
+ printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
+ printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
+ reported = 0;
+ if (afsr & SYSIO_SBAFSR_SLE) {
+ reported++;
+ printk("(Late PIO Error)");
+ }
+ if (afsr & SYSIO_SBAFSR_STO) {
+ reported++;
+ printk("(Time Out)");
+ }
+ if (afsr & SYSIO_SBAFSR_SBERR) {
+ reported++;
+ printk("(Error Ack)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* XXX check iommu/strbuf for further error status XXX */
+
+ return IRQ_HANDLED;
+}
+
+#define ECC_CONTROL 0x0020UL
+#define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
+#define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
+#define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
+
+#define SYSIO_UE_INO 0x34
+#define SYSIO_CE_INO 0x35
+#define SYSIO_SBUSERR_INO 0x36
+
+static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
+{
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ unsigned int irq;
+ u64 control;
+
+ irq = sbus_build_irq(sbus, SYSIO_UE_INO);
+ if (request_irq(irq, sysio_ue_handler,
+ SA_SHIRQ, "SYSIO UE", sbus) < 0) {
+ prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
+ sbus->portid);
+ prom_halt();
+ }
+
+ irq = sbus_build_irq(sbus, SYSIO_CE_INO);
+ if (request_irq(irq, sysio_ce_handler,
+ SA_SHIRQ, "SYSIO CE", sbus) < 0) {
+ prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
+ sbus->portid);
+ prom_halt();
+ }
+
+ irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
+ if (request_irq(irq, sysio_sbus_error_handler,
+ SA_SHIRQ, "SYSIO SBUS Error", sbus) < 0) {
+ prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
+ sbus->portid);
+ prom_halt();
+ }
+
+ /* Now turn the error interrupts on and also enable ECC checking. */
+ upa_writeq((SYSIO_ECNTRL_ECCEN |
+ SYSIO_ECNTRL_UEEN |
+ SYSIO_ECNTRL_CEEN),
+ reg_base + ECC_CONTROL);
+
+ control = upa_readq(iommu->sbus_control_reg);
+ control |= 0x100UL; /* SBUS Error Interrupt Enable */
+ upa_writeq(control, iommu->sbus_control_reg);
+}
+
+/* Boot time initialization. */
+void __init sbus_iommu_init(int prom_node, struct sbus_bus *sbus)
+{
+ struct linux_prom64_registers rprop;
+ struct sbus_iommu *iommu;
+ unsigned long regs, tsb_base;
+ u64 control;
+ int err, i;
+
+ sbus->portid = prom_getintdefault(sbus->prom_node,
+ "upa-portid", -1);
+
+ err = prom_getproperty(prom_node, "reg",
+ (char *)&rprop, sizeof(rprop));
+ if (err < 0) {
+ prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
+ prom_halt();
+ }
+ regs = rprop.phys_addr;
+
+ iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
+ if (iommu == NULL) {
+ prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
+ prom_halt();
+ }
+
+ /* Align on E$ line boundary. */
+ iommu = (struct sbus_iommu *)
+ (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
+ ~(SMP_CACHE_BYTES - 1UL));
+
+ memset(iommu, 0, sizeof(*iommu));
+
+ /* We start with no consistent mappings. */
+ iommu->lowest_consistent_map = CLUSTER_NPAGES;
+
+ for (i = 0; i < NCLUSTERS; i++) {
+ iommu->alloc_info[i].flush = 0;
+ iommu->alloc_info[i].next = 0;
+ }
+
+ /* Setup spinlock. */
+ spin_lock_init(&iommu->lock);
+
+ /* Init register offsets. */
+ iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
+ iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
+
+ /* The SYSIO SBUS control register is used for dummy reads
+ * in order to ensure write completion.
+ */
+ iommu->sbus_control_reg = regs + 0x2000UL;
+
+ /* Link into SYSIO software state. */
+ sbus->iommu = iommu;
+
+ printk("SYSIO: UPA portID %x, at %016lx\n",
+ sbus->portid, regs);
+
+ /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
+ control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
+ control = ((7UL << 16UL) |
+ (0UL << 2UL) |
+ (1UL << 1UL) |
+ (1UL << 0UL));
+
+ /* Using the above configuration we need 1MB iommu page
+ * table (128K ioptes * 8 bytes per iopte). This is
+ * page order 7 on UltraSparc.
+ */
+ tsb_base = __get_free_pages(GFP_ATOMIC, get_order(IO_TSB_SIZE));
+ if (tsb_base == 0UL) {
+ prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
+ prom_halt();
+ }
+
+ iommu->page_table = (iopte_t *) tsb_base;
+ memset(iommu->page_table, 0, IO_TSB_SIZE);
+
+ upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
+
+ /* Clean out any cruft in the IOMMU using
+ * diagnostic accesses.
+ */
+ for (i = 0; i < 16; i++) {
+ unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
+ unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
+
+ dram += (unsigned long)i * 8UL;
+ tag += (unsigned long)i * 8UL;
+ upa_writeq(0, dram);
+ upa_writeq(0, tag);
+ }
+ upa_readq(iommu->sbus_control_reg);
+
+ /* Give the TSB to SYSIO. */
+ upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
+
+ /* Setup streaming buffer, DE=1 SB_EN=1 */
+ control = (1UL << 1UL) | (1UL << 0UL);
+ upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
+
+ /* Clear out the tags using diagnostics. */
+ for (i = 0; i < 16; i++) {
+ unsigned long ptag, ltag;
+
+ ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
+ ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
+ ptag += (unsigned long)i * 8UL;
+ ltag += (unsigned long)i * 8UL;
+
+ upa_writeq(0UL, ptag);
+ upa_writeq(0UL, ltag);
+ }
+
+ /* Enable DVMA arbitration for all devices/slots. */
+ control = upa_readq(iommu->sbus_control_reg);
+ control |= 0x3fUL;
+ upa_writeq(control, iommu->sbus_control_reg);
+
+ /* Now some Xfire specific grot... */
+ if (this_is_starfire)
+ sbus->starfire_cookie = starfire_hookup(sbus->portid);
+ else
+ sbus->starfire_cookie = NULL;
+
+ sysio_register_error_handlers(sbus);
+}
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
new file mode 100644
index 00000000000..63496c43fe1
--- /dev/null
+++ b/arch/sparc64/kernel/semaphore.c
@@ -0,0 +1,251 @@
+/* $Id: semaphore.c,v 1.9 2001/11/18 00:12:56 davem Exp $
+ * semaphore.c: Sparc64 semaphore implementation.
+ *
+ * This is basically the PPC semaphore scheme ported to use
+ * the sparc64 atomic instructions, so see the PPC code for
+ * credits.
+ */
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+/*
+ * Atomically update sem->count.
+ * This does the equivalent of the following:
+ *
+ * old_count = sem->count;
+ * tmp = MAX(old_count, 0) + incr;
+ * sem->count = tmp;
+ * return old_count;
+ */
+static __inline__ int __sem_update_count(struct semaphore *sem, int incr)
+{
+ int old_count, tmp;
+
+ __asm__ __volatile__("\n"
+" ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
+"1: ldsw [%3], %0\n"
+" mov %0, %1\n"
+" cmp %0, 0\n"
+" movl %%icc, 0, %1\n"
+" add %1, %4, %1\n"
+" cas [%3], %0, %1\n"
+" cmp %0, %1\n"
+" bne,pn %%icc, 1b\n"
+" membar #StoreLoad | #StoreStore\n"
+ : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
+ : "r" (&sem->count), "r" (incr), "m" (sem->count)
+ : "cc");
+
+ return old_count;
+}
+
+static void __up(struct semaphore *sem)
+{
+ __sem_update_count(sem, 1);
+ wake_up(&sem->wait);
+}
+
+void up(struct semaphore *sem)
+{
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count + 1;
+ * sem->count = new_val;
+ * if (old_val < 0)
+ * __up(sem);
+ *
+ * The (old_val < 0) test is equivalent to
+ * the more straightforward (new_val <= 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! up sem(%0)\n"
+" membar #StoreLoad | #LoadLoad\n"
+"1: lduw [%0], %%g1\n"
+" add %%g1, 1, %%g7\n"
+" cas [%0], %%g1, %%g7\n"
+" cmp %%g1, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" addcc %%g7, 1, %%g0\n"
+" ble,pn %%icc, 3f\n"
+" membar #StoreLoad | #StoreStore\n"
+"2:\n"
+" .subsection 2\n"
+"3: mov %0, %%g1\n"
+" save %%sp, -160, %%sp\n"
+" call %1\n"
+" mov %%g1, %%o0\n"
+" ba,pt %%xcc, 2b\n"
+" restore\n"
+" .previous\n"
+ : : "r" (sem), "i" (__up)
+ : "g1", "g2", "g3", "g7", "memory", "cc");
+}
+
+static void __sched __down(struct semaphore * sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (__sem_update_count(sem, -1) <= 0) {
+ schedule();
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ }
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ wake_up(&sem->wait);
+}
+
+void __sched down(struct semaphore *sem)
+{
+ might_sleep();
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count - 1;
+ * sem->count = new_val;
+ * if (old_val < 1)
+ * __down(sem);
+ *
+ * The (old_val < 1) test is equivalent to
+ * the more straightforward (new_val < 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! down sem(%0)\n"
+"1: lduw [%0], %%g1\n"
+" sub %%g1, 1, %%g7\n"
+" cas [%0], %%g1, %%g7\n"
+" cmp %%g1, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" cmp %%g7, 1\n"
+" bl,pn %%icc, 3f\n"
+" membar #StoreLoad | #StoreStore\n"
+"2:\n"
+" .subsection 2\n"
+"3: mov %0, %%g1\n"
+" save %%sp, -160, %%sp\n"
+" call %1\n"
+" mov %%g1, %%o0\n"
+" ba,pt %%xcc, 2b\n"
+" restore\n"
+" .previous\n"
+ : : "r" (sem), "i" (__down)
+ : "g1", "g2", "g3", "g7", "memory", "cc");
+}
+
+int down_trylock(struct semaphore *sem)
+{
+ int ret;
+
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count - 1;
+ * if (old_val < 1) {
+ * ret = 1;
+ * } else {
+ * sem->count = new_val;
+ * ret = 0;
+ * }
+ *
+ * The (old_val < 1) test is equivalent to
+ * the more straightforward (new_val < 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! down_trylock sem(%1) ret(%0)\n"
+"1: lduw [%1], %%g1\n"
+" sub %%g1, 1, %%g7\n"
+" cmp %%g1, 1\n"
+" bl,pn %%icc, 2f\n"
+" mov 1, %0\n"
+" cas [%1], %%g1, %%g7\n"
+" cmp %%g1, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" mov 0, %0\n"
+" membar #StoreLoad | #StoreStore\n"
+"2:\n"
+ : "=&r" (ret)
+ : "r" (sem)
+ : "g1", "g7", "memory", "cc");
+
+ return ret;
+}
+
+static int __sched __down_interruptible(struct semaphore * sem)
+{
+ int retval = 0;
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+ tsk->state = TASK_INTERRUPTIBLE;
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (__sem_update_count(sem, -1) <= 0) {
+ if (signal_pending(current)) {
+ __sem_update_count(sem, 0);
+ retval = -EINTR;
+ break;
+ }
+ schedule();
+ tsk->state = TASK_INTERRUPTIBLE;
+ }
+ tsk->state = TASK_RUNNING;
+ remove_wait_queue(&sem->wait, &wait);
+ wake_up(&sem->wait);
+ return retval;
+}
+
+int __sched down_interruptible(struct semaphore *sem)
+{
+ int ret = 0;
+
+ might_sleep();
+ /* This atomically does:
+ * old_val = sem->count;
+ * new_val = sem->count - 1;
+ * sem->count = new_val;
+ * if (old_val < 1)
+ * ret = __down_interruptible(sem);
+ *
+ * The (old_val < 1) test is equivalent to
+ * the more straightforward (new_val < 0),
+ * but it is easier to test the former because
+ * of how the CAS instruction works.
+ */
+
+ __asm__ __volatile__("\n"
+" ! down_interruptible sem(%2) ret(%0)\n"
+"1: lduw [%2], %%g1\n"
+" sub %%g1, 1, %%g7\n"
+" cas [%2], %%g1, %%g7\n"
+" cmp %%g1, %%g7\n"
+" bne,pn %%icc, 1b\n"
+" cmp %%g7, 1\n"
+" bl,pn %%icc, 3f\n"
+" membar #StoreLoad | #StoreStore\n"
+"2:\n"
+" .subsection 2\n"
+"3: mov %2, %%g1\n"
+" save %%sp, -160, %%sp\n"
+" call %3\n"
+" mov %%g1, %%o0\n"
+" ba,pt %%xcc, 2b\n"
+" restore\n"
+" .previous\n"
+ : "=r" (ret)
+ : "0" (ret), "r" (sem), "i" (__down_interruptible)
+ : "g1", "g2", "g3", "g7", "memory", "cc");
+ return ret;
+}
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
new file mode 100644
index 00000000000..12c3d84b746
--- /dev/null
+++ b/arch/sparc64/kernel/setup.c
@@ -0,0 +1,731 @@
+/* $Id: setup.c,v 1.72 2002/02/09 19:49:30 davem Exp $
+ * linux/arch/sparc64/kernel/setup.c
+ *
+ * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <asm/smp.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/inet.h>
+#include <linux/console.h>
+#include <linux/root_dev.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/initrd.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/idprom.h>
+#include <asm/head.h>
+#include <asm/starfire.h>
+#include <asm/mmu_context.h>
+#include <asm/timer.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_IP_PNP
+#include <net/ipconfig.h>
+#endif
+
+struct screen_info screen_info = {
+ 0, 0, /* orig-x, orig-y */
+ 0, /* unused */
+ 0, /* orig-video-page */
+ 0, /* orig-video-mode */
+ 128, /* orig-video-cols */
+ 0, 0, 0, /* unused, ega_bx, unused */
+ 54, /* orig-video-lines */
+ 0, /* orig-video-isVGA */
+ 16 /* orig-video-points */
+};
+
+/* Typing sync at the prom prompt calls the function pointed to by
+ * the sync callback which I set to the following function.
+ * This should sync all filesystems and return, for now it just
+ * prints out pretty messages and returns.
+ */
+
+void (*prom_palette)(int);
+void (*prom_keyboard)(void);
+
+static void
+prom_console_write(struct console *con, const char *s, unsigned n)
+{
+ prom_write(s, n);
+}
+
+static struct console prom_console = {
+ .name = "prom",
+ .write = prom_console_write,
+ .flags = CON_CONSDEV | CON_ENABLED,
+ .index = -1,
+};
+
+#define PROM_TRUE -1
+#define PROM_FALSE 0
+
+/* Pretty sick eh? */
+int prom_callback(long *args)
+{
+ struct console *cons, *saved_console = NULL;
+ unsigned long flags;
+ char *cmd;
+ extern spinlock_t prom_entry_lock;
+
+ if (!args)
+ return -1;
+ if (!(cmd = (char *)args[0]))
+ return -1;
+
+ /*
+ * The callback can be invoked on the cpu that first dropped
+ * into prom_cmdline after taking the serial interrupt, or on
+ * a slave processor that was smp_captured() if the
+ * administrator has done a switch-cpu inside obp. In either
+ * case, the cpu is marked as in-interrupt. Drop IRQ locks.
+ */
+ irq_exit();
+
+ /* XXX Revisit the locking here someday. This is a debugging
+ * XXX feature so it isnt all that critical. -DaveM
+ */
+ local_irq_save(flags);
+
+ spin_unlock(&prom_entry_lock);
+ cons = console_drivers;
+ while (cons) {
+ unregister_console(cons);
+ cons->flags &= ~(CON_PRINTBUFFER);
+ cons->next = saved_console;
+ saved_console = cons;
+ cons = console_drivers;
+ }
+ register_console(&prom_console);
+ if (!strcmp(cmd, "sync")) {
+ prom_printf("PROM `%s' command...\n", cmd);
+ show_free_areas();
+ if (current->pid != 0) {
+ local_irq_enable();
+ sys_sync();
+ local_irq_disable();
+ }
+ args[2] = 0;
+ args[args[1] + 3] = -1;
+ prom_printf("Returning to PROM\n");
+ } else if (!strcmp(cmd, "va>tte-data")) {
+ unsigned long ctx, va;
+ unsigned long tte = 0;
+ long res = PROM_FALSE;
+
+ ctx = args[3];
+ va = args[4];
+ if (ctx) {
+ /*
+ * Find process owning ctx, lookup mapping.
+ */
+ struct task_struct *p;
+ struct mm_struct *mm = NULL;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ for_each_process(p) {
+ mm = p->mm;
+ if (CTX_NRBITS(mm->context) == ctx)
+ break;
+ }
+ if (!mm ||
+ CTX_NRBITS(mm->context) != ctx)
+ goto done;
+
+ pgdp = pgd_offset(mm, va);
+ if (pgd_none(*pgdp))
+ goto done;
+ pudp = pud_offset(pgdp, va);
+ if (pud_none(*pudp))
+ goto done;
+ pmdp = pmd_offset(pudp, va);
+ if (pmd_none(*pmdp))
+ goto done;
+
+ /* Preemption implicitly disabled by virtue of
+ * being called from inside OBP.
+ */
+ ptep = pte_offset_map(pmdp, va);
+ if (pte_present(*ptep)) {
+ tte = pte_val(*ptep);
+ res = PROM_TRUE;
+ }
+ pte_unmap(ptep);
+ goto done;
+ }
+
+ if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
+ unsigned long kernel_pctx = 0;
+
+ if (tlb_type == cheetah_plus)
+ kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
+ CTX_CHEETAH_PLUS_CTX0);
+
+ /* Spitfire Errata #32 workaround */
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "flush %%g6"
+ : /* No outputs */
+ : "r" (kernel_pctx),
+ "r" (PRIMARY_CONTEXT),
+ "i" (ASI_DMMU));
+
+ /*
+ * Locked down tlb entry.
+ */
+
+ if (tlb_type == spitfire)
+ tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
+ else if (tlb_type == cheetah || tlb_type == cheetah_plus)
+ tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
+
+ res = PROM_TRUE;
+ goto done;
+ }
+
+ if (va < PGDIR_SIZE) {
+ /*
+ * vmalloc or prom_inherited mapping.
+ */
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int error;
+
+ if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
+ tte = prom_virt_to_phys(va, &error);
+ if (!error)
+ res = PROM_TRUE;
+ goto done;
+ }
+ pgdp = pgd_offset_k(va);
+ if (pgd_none(*pgdp))
+ goto done;
+ pudp = pud_offset(pgdp, va);
+ if (pud_none(*pudp))
+ goto done;
+ pmdp = pmd_offset(pudp, va);
+ if (pmd_none(*pmdp))
+ goto done;
+
+ /* Preemption implicitly disabled by virtue of
+ * being called from inside OBP.
+ */
+ ptep = pte_offset_kernel(pmdp, va);
+ if (pte_present(*ptep)) {
+ tte = pte_val(*ptep);
+ res = PROM_TRUE;
+ }
+ goto done;
+ }
+
+ if (va < PAGE_OFFSET) {
+ /*
+ * No mappings here.
+ */
+ goto done;
+ }
+
+ if (va & (1UL << 40)) {
+ /*
+ * I/O page.
+ */
+
+ tte = (__pa(va) & _PAGE_PADDR) |
+ _PAGE_VALID | _PAGE_SZ4MB |
+ _PAGE_E | _PAGE_P | _PAGE_W;
+ res = PROM_TRUE;
+ goto done;
+ }
+
+ /*
+ * Normal page.
+ */
+ tte = (__pa(va) & _PAGE_PADDR) |
+ _PAGE_VALID | _PAGE_SZ4MB |
+ _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W;
+ res = PROM_TRUE;
+
+ done:
+ if (res == PROM_TRUE) {
+ args[2] = 3;
+ args[args[1] + 3] = 0;
+ args[args[1] + 4] = res;
+ args[args[1] + 5] = tte;
+ } else {
+ args[2] = 2;
+ args[args[1] + 3] = 0;
+ args[args[1] + 4] = res;
+ }
+ } else if (!strcmp(cmd, ".soft1")) {
+ unsigned long tte;
+
+ tte = args[3];
+ prom_printf("%lx:\"%s%s%s%s%s\" ",
+ (tte & _PAGE_SOFT) >> 7,
+ tte & _PAGE_MODIFIED ? "M" : "-",
+ tte & _PAGE_ACCESSED ? "A" : "-",
+ tte & _PAGE_READ ? "W" : "-",
+ tte & _PAGE_WRITE ? "R" : "-",
+ tte & _PAGE_PRESENT ? "P" : "-");
+
+ args[2] = 2;
+ args[args[1] + 3] = 0;
+ args[args[1] + 4] = PROM_TRUE;
+ } else if (!strcmp(cmd, ".soft2")) {
+ unsigned long tte;
+
+ tte = args[3];
+ prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50);
+
+ args[2] = 2;
+ args[args[1] + 3] = 0;
+ args[args[1] + 4] = PROM_TRUE;
+ } else {
+ prom_printf("unknown PROM `%s' command...\n", cmd);
+ }
+ unregister_console(&prom_console);
+ while (saved_console) {
+ cons = saved_console;
+ saved_console = cons->next;
+ register_console(cons);
+ }
+ spin_lock(&prom_entry_lock);
+ local_irq_restore(flags);
+
+ /*
+ * Restore in-interrupt status for a resume from obp.
+ */
+ irq_enter();
+ return 0;
+}
+
+unsigned int boot_flags = 0;
+#define BOOTME_DEBUG 0x1
+#define BOOTME_SINGLE 0x2
+
+/* Exported for mm/init.c:paging_init. */
+unsigned long cmdline_memory_size = 0;
+
+static struct console prom_debug_console = {
+ .name = "debug",
+ .write = prom_console_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/* XXX Implement this at some point... */
+void kernel_enter_debugger(void)
+{
+}
+
+int obp_system_intr(void)
+{
+ if (boot_flags & BOOTME_DEBUG) {
+ printk("OBP: system interrupted\n");
+ prom_halt();
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Process kernel command line switches that are specific to the
+ * SPARC or that require special low-level processing.
+ */
+static void __init process_switch(char c)
+{
+ switch (c) {
+ case 'd':
+ boot_flags |= BOOTME_DEBUG;
+ break;
+ case 's':
+ boot_flags |= BOOTME_SINGLE;
+ break;
+ case 'h':
+ prom_printf("boot_flags_init: Halt!\n");
+ prom_halt();
+ break;
+ case 'p':
+ /* Use PROM debug console. */
+ register_console(&prom_debug_console);
+ break;
+ default:
+ printk("Unknown boot switch (-%c)\n", c);
+ break;
+ }
+}
+
+static void __init process_console(char *commands)
+{
+ serial_console = 0;
+ commands += 8;
+ /* Linux-style serial */
+ if (!strncmp(commands, "ttyS", 4))
+ serial_console = simple_strtoul(commands + 4, NULL, 10) + 1;
+ else if (!strncmp(commands, "tty", 3)) {
+ char c = *(commands + 3);
+ /* Solaris-style serial */
+ if (c == 'a' || c == 'b') {
+ serial_console = c - 'a' + 1;
+ prom_printf ("Using /dev/tty%c as console.\n", c);
+ }
+ /* else Linux-style fbcon, not serial */
+ }
+#if defined(CONFIG_PROM_CONSOLE)
+ if (!strncmp(commands, "prom", 4)) {
+ char *p;
+
+ for (p = commands - 8; *p && *p != ' '; p++)
+ *p = ' ';
+ conswitchp = &prom_con;
+ }
+#endif
+}
+
+static void __init boot_flags_init(char *commands)
+{
+ while (*commands) {
+ /* Move to the start of the next "argument". */
+ while (*commands && *commands == ' ')
+ commands++;
+
+ /* Process any command switches, otherwise skip it. */
+ if (*commands == '\0')
+ break;
+ if (*commands == '-') {
+ commands++;
+ while (*commands && *commands != ' ')
+ process_switch(*commands++);
+ continue;
+ }
+ if (!strncmp(commands, "console=", 8)) {
+ process_console(commands);
+ } else if (!strncmp(commands, "mem=", 4)) {
+ /*
+ * "mem=XXX[kKmM]" overrides the PROM-reported
+ * memory size.
+ */
+ cmdline_memory_size = simple_strtoul(commands + 4,
+ &commands, 0);
+ if (*commands == 'K' || *commands == 'k') {
+ cmdline_memory_size <<= 10;
+ commands++;
+ } else if (*commands=='M' || *commands=='m') {
+ cmdline_memory_size <<= 20;
+ commands++;
+ }
+ }
+ while (*commands && *commands != ' ')
+ commands++;
+ }
+}
+
+extern int prom_probe_memory(void);
+extern unsigned long start, end;
+extern void panic_setup(char *, int *);
+
+extern unsigned short root_flags;
+extern unsigned short root_dev;
+extern unsigned short ram_flags;
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+extern int root_mountflags;
+
+char reboot_command[COMMAND_LINE_SIZE];
+
+static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
+
+void register_prom_callbacks(void)
+{
+ prom_setcallback(prom_callback);
+ prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
+ "' linux-va>tte-data to va>tte-data");
+ prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
+ "' linux-.soft1 to .soft1");
+ prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
+ "' linux-.soft2 to .soft2");
+}
+
+extern void paging_init(void);
+
+void __init setup_arch(char **cmdline_p)
+{
+ unsigned long highest_paddr;
+ int i;
+
+ /* Initialize PROM console and command line. */
+ *cmdline_p = prom_getbootargs();
+ strcpy(saved_command_line, *cmdline_p);
+
+ printk("ARCH: SUN4U\n");
+
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#elif defined(CONFIG_PROM_CONSOLE)
+ conswitchp = &prom_con;
+#endif
+
+#ifdef CONFIG_SMP
+ i = (unsigned long)&irq_stat[1] - (unsigned long)&irq_stat[0];
+ if ((i == SMP_CACHE_BYTES) || (i == (2 * SMP_CACHE_BYTES))) {
+ extern unsigned int irqsz_patchme[1];
+ irqsz_patchme[0] |= ((i == SMP_CACHE_BYTES) ? SMP_CACHE_BYTES_SHIFT : \
+ SMP_CACHE_BYTES_SHIFT + 1);
+ flushi((long)&irqsz_patchme[0]);
+ } else {
+ prom_printf("Unexpected size of irq_stat[] elements\n");
+ prom_halt();
+ }
+#endif
+ /* Work out if we are starfire early on */
+ check_if_starfire();
+
+ boot_flags_init(*cmdline_p);
+
+ idprom_init();
+ (void) prom_probe_memory();
+
+ /* In paging_init() we tip off this value to see if we need
+ * to change init_mm.pgd to point to the real alias mapping.
+ */
+ phys_base = 0xffffffffffffffffUL;
+ highest_paddr = 0UL;
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ unsigned long top;
+
+ if (sp_banks[i].base_addr < phys_base)
+ phys_base = sp_banks[i].base_addr;
+ top = sp_banks[i].base_addr +
+ sp_banks[i].num_bytes;
+ if (highest_paddr < top)
+ highest_paddr = top;
+ }
+ pfn_base = phys_base >> PAGE_SHIFT;
+
+ switch (tlb_type) {
+ default:
+ case spitfire:
+ kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent());
+ kern_base &= _PAGE_PADDR_SF;
+ break;
+
+ case cheetah:
+ case cheetah_plus:
+ kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
+ kern_base &= _PAGE_PADDR;
+ break;
+ };
+
+ kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
+
+ if (!root_flags)
+ root_mountflags &= ~MS_RDONLY;
+ ROOT_DEV = old_decode_dev(root_dev);
+#ifdef CONFIG_BLK_DEV_INITRD
+ rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
+#endif
+
+ init_task.thread_info->kregs = &fake_swapper_regs;
+
+#ifdef CONFIG_IP_PNP
+ if (!ic_set_manually) {
+ int chosen = prom_finddevice ("/chosen");
+ u32 cl, sv, gw;
+
+ cl = prom_getintdefault (chosen, "client-ip", 0);
+ sv = prom_getintdefault (chosen, "server-ip", 0);
+ gw = prom_getintdefault (chosen, "gateway-ip", 0);
+ if (cl && sv) {
+ ic_myaddr = cl;
+ ic_servaddr = sv;
+ if (gw)
+ ic_gateway = gw;
+#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
+ ic_proto_enabled = 0;
+#endif
+ }
+ }
+#endif
+
+ paging_init();
+}
+
+static int __init set_preferred_console(void)
+{
+ int idev, odev;
+
+ /* The user has requested a console so this is already set up. */
+ if (serial_console >= 0)
+ return -EBUSY;
+
+ idev = prom_query_input_device();
+ odev = prom_query_output_device();
+ if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
+ serial_console = 0;
+ } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
+ serial_console = 1;
+ } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
+ serial_console = 2;
+ } else {
+ prom_printf("Inconsistent console: "
+ "input %d, output %d\n",
+ idev, odev);
+ prom_halt();
+ }
+
+ if (serial_console)
+ return add_preferred_console("ttyS", serial_console - 1, NULL);
+
+ return -ENODEV;
+}
+console_initcall(set_preferred_console);
+
+/* BUFFER is PAGE_SIZE bytes long. */
+
+extern char *sparc_cpu_type;
+extern char *sparc_fpu_type;
+
+extern void smp_info(struct seq_file *);
+extern void smp_bogo(struct seq_file *);
+extern void mmu_info(struct seq_file *);
+
+static int show_cpuinfo(struct seq_file *m, void *__unused)
+{
+ seq_printf(m,
+ "cpu\t\t: %s\n"
+ "fpu\t\t: %s\n"
+ "promlib\t\t: Version 3 Revision %d\n"
+ "prom\t\t: %d.%d.%d\n"
+ "type\t\t: sun4u\n"
+ "ncpus probed\t: %ld\n"
+ "ncpus active\t: %ld\n"
+#ifndef CONFIG_SMP
+ "Cpu0Bogo\t: %lu.%02lu\n"
+ "Cpu0ClkTck\t: %016lx\n"
+#endif
+ ,
+ sparc_cpu_type,
+ sparc_fpu_type,
+ prom_rev,
+ prom_prev >> 16,
+ (prom_prev >> 8) & 0xff,
+ prom_prev & 0xff,
+ (long)num_possible_cpus(),
+ (long)num_online_cpus()
+#ifndef CONFIG_SMP
+ , cpu_data(0).udelay_val/(500000/HZ),
+ (cpu_data(0).udelay_val/(5000/HZ)) % 100,
+ cpu_data(0).clock_tick
+#endif
+ );
+#ifdef CONFIG_SMP
+ smp_bogo(m);
+#endif
+ mmu_info(m);
+#ifdef CONFIG_SMP
+ smp_info(m);
+#endif
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ /* The pointer we are returning is arbitrary,
+ * it just has to be non-NULL and not IS_ERR
+ * in the success case.
+ */
+ return *pos == 0 ? &c_start : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ .start =c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+
+extern int stop_a_enabled;
+
+void sun_do_break(void)
+{
+ if (!stop_a_enabled)
+ return;
+
+ prom_printf("\n");
+ flush_user_windows();
+
+ prom_cmdline();
+}
+
+int serial_console = -1;
+int stop_a_enabled = 1;
+
+static int __init topology_init(void)
+{
+ int i, err;
+
+ err = -ENOMEM;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_possible(i)) {
+ struct cpu *p = kmalloc(sizeof(*p), GFP_KERNEL);
+
+ if (p) {
+ memset(p, 0, sizeof(*p));
+ register_cpu(p, i, NULL);
+ err = 0;
+ }
+ }
+ }
+
+ return err;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
new file mode 100644
index 00000000000..b27934671c3
--- /dev/null
+++ b/arch/sparc64/kernel/signal.c
@@ -0,0 +1,688 @@
+/* $Id: signal.c,v 1.60 2002/02/09 19:49:31 davem Exp $
+ * arch/sparc64/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#ifdef CONFIG_SPARC32_COMPAT
+#include <linux/compat.h> /* for compat_old_sigset_t */
+#endif
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/smp_lock.h>
+#include <linux/binfmts.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/svr4.h>
+#include <asm/pgtable.h>
+#include <asm/fpumacro.h>
+#include <asm/uctx.h>
+#include <asm/siginfo.h>
+#include <asm/visasm.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+static int do_signal(sigset_t *oldset, struct pt_regs * regs,
+ unsigned long orig_o0, int ret_from_syscall);
+
+/* {set, get}context() needed for 64-bit SparcLinux userland. */
+asmlinkage void sparc64_set_context(struct pt_regs *regs)
+{
+ struct ucontext __user *ucp = (struct ucontext __user *)
+ regs->u_regs[UREG_I0];
+ mc_gregset_t __user *grp;
+ unsigned long pc, npc, tstate;
+ unsigned long fp, i7;
+ unsigned char fenab;
+ int err;
+
+ flush_user_windows();
+ if (get_thread_wsaved() ||
+ (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
+ (!__access_ok(ucp, sizeof(*ucp))))
+ goto do_sigsegv;
+ grp = &ucp->uc_mcontext.mc_gregs;
+ err = __get_user(pc, &((*grp)[MC_PC]));
+ err |= __get_user(npc, &((*grp)[MC_NPC]));
+ if (err || ((pc | npc) & 3))
+ goto do_sigsegv;
+ if (regs->u_regs[UREG_I1]) {
+ sigset_t set;
+
+ if (_NSIG_WORDS == 1) {
+ if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
+ goto do_sigsegv;
+ } else {
+ if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
+ goto do_sigsegv;
+ }
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+ if (test_thread_flag(TIF_32BIT)) {
+ pc &= 0xffffffff;
+ npc &= 0xffffffff;
+ }
+ regs->tpc = pc;
+ regs->tnpc = npc;
+ err |= __get_user(regs->y, &((*grp)[MC_Y]));
+ err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
+ regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
+ regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
+ err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
+ err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
+ err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
+ err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
+ err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
+ err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
+ err |= __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
+ err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
+ err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
+ err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
+ err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
+ err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
+ err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
+ err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
+ err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
+
+ err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
+ err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
+ err |= __put_user(fp,
+ (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
+ err |= __put_user(i7,
+ (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
+
+ err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
+ if (fenab) {
+ unsigned long *fpregs = current_thread_info()->fpregs;
+ unsigned long fprs;
+
+ fprs_write(0);
+ err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
+ if (fprs & FPRS_DL)
+ err |= copy_from_user(fpregs,
+ &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
+ (sizeof(unsigned int) * 32));
+ if (fprs & FPRS_DU)
+ err |= copy_from_user(fpregs+16,
+ ((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
+ (sizeof(unsigned int) * 32));
+ err |= __get_user(current_thread_info()->xfsr[0],
+ &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
+ err |= __get_user(current_thread_info()->gsr[0],
+ &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
+ regs->tstate &= ~TSTATE_PEF;
+ }
+ if (err)
+ goto do_sigsegv;
+
+ return;
+do_sigsegv:
+ force_sig(SIGSEGV, current);
+}
+
+asmlinkage void sparc64_get_context(struct pt_regs *regs)
+{
+ struct ucontext __user *ucp = (struct ucontext __user *)
+ regs->u_regs[UREG_I0];
+ mc_gregset_t __user *grp;
+ mcontext_t __user *mcp;
+ unsigned long fp, i7;
+ unsigned char fenab;
+ int err;
+
+ synchronize_user_stack();
+ if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
+ goto do_sigsegv;
+
+#if 1
+ fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
+#else
+ fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
+#endif
+
+ mcp = &ucp->uc_mcontext;
+ grp = &mcp->mc_gregs;
+
+ /* Skip over the trap instruction, first. */
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc = (regs->tnpc & 0xffffffff);
+ regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
+ } else {
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ }
+ err = 0;
+ if (_NSIG_WORDS == 1)
+ err |= __put_user(current->blocked.sig[0],
+ (unsigned long __user *)&ucp->uc_sigmask);
+ else
+ err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
+ sizeof(sigset_t));
+
+ err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
+ err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
+ err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
+ err |= __put_user(regs->y, &((*grp)[MC_Y]));
+ err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
+ err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
+ err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
+ err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
+ err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
+ err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
+ err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
+ err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
+ err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
+ err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
+ err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
+ err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
+ err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
+ err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
+ err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
+
+ err |= __get_user(fp,
+ (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
+ err |= __get_user(i7,
+ (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
+ err |= __put_user(fp, &(mcp->mc_fp));
+ err |= __put_user(i7, &(mcp->mc_i7));
+
+ err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
+ if (fenab) {
+ unsigned long *fpregs = current_thread_info()->fpregs;
+ unsigned long fprs;
+
+ fprs = current_thread_info()->fpsaved[0];
+ if (fprs & FPRS_DL)
+ err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
+ (sizeof(unsigned int) * 32));
+ if (fprs & FPRS_DU)
+ err |= copy_to_user(
+ ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
+ (sizeof(unsigned int) * 32));
+ err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
+ err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
+ err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
+ }
+ if (err)
+ goto do_sigsegv;
+
+ return;
+do_sigsegv:
+ force_sig(SIGSEGV, current);
+}
+
+struct rt_signal_frame {
+ struct sparc_stackf ss;
+ siginfo_t info;
+ struct pt_regs regs;
+ __siginfo_fpu_t __user *fpu_save;
+ stack_t stack;
+ sigset_t mask;
+ __siginfo_fpu_t fpu_state;
+};
+
+/* Align macros */
+#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
+
+/*
+ * atomically swap in the new signal mask, and wait for a signal.
+ * This is really tricky on the Sparc, watch out...
+ */
+asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
+{
+ sigset_t saveset;
+
+#ifdef CONFIG_SPARC32_COMPAT
+ if (test_thread_flag(TIF_32BIT)) {
+ extern asmlinkage void _sigpause32_common(compat_old_sigset_t,
+ struct pt_regs *);
+ _sigpause32_common(set, regs);
+ return;
+ }
+#endif
+ set &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, set);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc = (regs->tnpc & 0xffffffff);
+ regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
+ } else {
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ }
+
+ /* Condition codes and return value where set here for sigpause,
+ * and so got used by setup_frame, which again causes sigreturn()
+ * to return -EINTR.
+ */
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ /*
+ * Return -EINTR and set condition code here,
+ * so the interrupted system call actually returns
+ * these.
+ */
+ regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+ regs->u_regs[UREG_I0] = EINTR;
+ if (do_signal(&saveset, regs, 0, 0))
+ return;
+ }
+}
+
+asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
+{
+ _sigpause_common(set, regs);
+}
+
+asmlinkage void do_sigsuspend(struct pt_regs *regs)
+{
+ _sigpause_common(regs->u_regs[UREG_I0], regs);
+}
+
+asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs)
+{
+ sigset_t oldset, set;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t)) {
+ regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+ regs->u_regs[UREG_I0] = EINVAL;
+ return;
+ }
+ if (copy_from_user(&set, uset, sizeof(set))) {
+ regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+ regs->u_regs[UREG_I0] = EFAULT;
+ return;
+ }
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ oldset = current->blocked;
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc = (regs->tnpc & 0xffffffff);
+ regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
+ } else {
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ }
+
+ /* Condition codes and return value where set here for sigpause,
+ * and so got used by setup_frame, which again causes sigreturn()
+ * to return -EINTR.
+ */
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ /*
+ * Return -EINTR and set condition code here,
+ * so the interrupted system call actually returns
+ * these.
+ */
+ regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+ regs->u_regs[UREG_I0] = EINTR;
+ if (do_signal(&oldset, regs, 0, 0))
+ return;
+ }
+}
+
+static inline int
+restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+{
+ unsigned long *fpregs = current_thread_info()->fpregs;
+ unsigned long fprs;
+ int err;
+
+ err = __get_user(fprs, &fpu->si_fprs);
+ fprs_write(0);
+ regs->tstate &= ~TSTATE_PEF;
+ if (fprs & FPRS_DL)
+ err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
+ (sizeof(unsigned int) * 32));
+ if (fprs & FPRS_DU)
+ err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
+ (sizeof(unsigned int) * 32));
+ err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
+ err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
+ current_thread_info()->fpsaved[0] |= fprs;
+ return err;
+}
+
+void do_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_signal_frame __user *sf;
+ unsigned long tpc, tnpc, tstate;
+ __siginfo_fpu_t __user *fpu_save;
+ mm_segment_t old_fs;
+ sigset_t set;
+ stack_t st;
+ int err;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ synchronize_user_stack ();
+ sf = (struct rt_signal_frame __user *)
+ (regs->u_regs [UREG_FP] + STACK_BIAS);
+
+ /* 1. Make sure we are not getting garbage from the user */
+ if (((unsigned long) sf) & 3)
+ goto segv;
+
+ err = get_user(tpc, &sf->regs.tpc);
+ err |= __get_user(tnpc, &sf->regs.tnpc);
+ if (test_thread_flag(TIF_32BIT)) {
+ tpc &= 0xffffffff;
+ tnpc &= 0xffffffff;
+ }
+ err |= ((tpc | tnpc) & 3);
+
+ /* 2. Restore the state */
+ err |= __get_user(regs->y, &sf->regs.y);
+ err |= __get_user(tstate, &sf->regs.tstate);
+ err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
+
+ /* User can only change condition codes and %asi in %tstate. */
+ regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
+ regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
+
+ err |= __get_user(fpu_save, &sf->fpu_save);
+ if (fpu_save)
+ err |= restore_fpu_state(regs, &sf->fpu_state);
+
+ err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
+ err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t));
+
+ if (err)
+ goto segv;
+
+ regs->tpc = tpc;
+ regs->tnpc = tnpc;
+
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
+ set_fs(old_fs);
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return;
+segv:
+ force_sig(SIGSEGV, current);
+}
+
+/* Checks if the fp is valid */
+static int invalid_frame_pointer(void __user *fp, int fplen)
+{
+ if (((unsigned long) fp) & 7)
+ return 1;
+ return 0;
+}
+
+static inline int
+save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+{
+ unsigned long *fpregs = (unsigned long *)(regs+1);
+ unsigned long fprs;
+ int err = 0;
+
+ fprs = current_thread_info()->fpsaved[0];
+ if (fprs & FPRS_DL)
+ err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
+ (sizeof(unsigned int) * 32));
+ if (fprs & FPRS_DU)
+ err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
+ (sizeof(unsigned int) * 32));
+ err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
+ err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
+ err |= __put_user(fprs, &fpu->si_fprs);
+
+ return err;
+}
+
+static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
+{
+ unsigned long sp;
+
+ sp = regs->u_regs[UREG_FP] + STACK_BIAS;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (!on_sig_stack(sp) &&
+ !((current->sas_ss_sp + current->sas_ss_size) & 7))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+ return (void __user *)(sp - framesize);
+}
+
+static inline void
+setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset, siginfo_t *info)
+{
+ struct rt_signal_frame __user *sf;
+ int sigframe_size, err;
+
+ /* 1. Make sure everything is clean */
+ synchronize_user_stack();
+ save_and_clear_fpu();
+
+ sigframe_size = RT_ALIGNEDSZ;
+ if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
+ sigframe_size -= sizeof(__siginfo_fpu_t);
+
+ sf = (struct rt_signal_frame __user *)
+ get_sigframe(ka, regs, sigframe_size);
+
+ if (invalid_frame_pointer (sf, sigframe_size))
+ goto sigill;
+
+ if (get_thread_wsaved() != 0)
+ goto sigill;
+
+ /* 2. Save the current process state */
+ err = copy_to_user(&sf->regs, regs, sizeof (*regs));
+
+ if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
+ err |= save_fpu_state(regs, &sf->fpu_state);
+ err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
+ } else {
+ err |= __put_user(0, &sf->fpu_save);
+ }
+
+ /* Setup sigaltstack */
+ err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
+
+ err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
+
+ err |= copy_in_user((u64 __user *)sf,
+ (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS),
+ sizeof(struct reg_window));
+
+ if (info)
+ err |= copy_siginfo_to_user(&sf->info, info);
+ else {
+ err |= __put_user(signo, &sf->info.si_signo);
+ err |= __put_user(SI_NOINFO, &sf->info.si_code);
+ }
+ if (err)
+ goto sigsegv;
+
+ /* 3. signal handler back-trampoline and parameters */
+ regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
+ regs->u_regs[UREG_I0] = signo;
+ regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
+
+ /* The sigcontext is passed in this way because of how it
+ * is defined in GLIBC's /usr/include/bits/sigcontext.h
+ * for sparc64. It includes the 128 bytes of siginfo_t.
+ */
+ regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
+
+ /* 5. signal handler */
+ regs->tpc = (unsigned long) ka->sa.sa_handler;
+ regs->tnpc = (regs->tpc + 4);
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ /* 4. return to kernel instructions */
+ regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
+ return;
+
+sigill:
+ do_exit(SIGILL);
+sigsegv:
+ force_sigsegv(signo, current);
+}
+
+static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+ setup_rt_frame(ka, regs, signr, oldset,
+ (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
+ if (!(ka->sa.sa_flags & SA_NOMASK)) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,signr);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+}
+
+static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
+ struct sigaction *sa)
+{
+ switch (regs->u_regs[UREG_I0]) {
+ case ERESTART_RESTARTBLOCK:
+ case ERESTARTNOHAND:
+ no_system_call_restart:
+ regs->u_regs[UREG_I0] = EINTR;
+ regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
+ break;
+ case ERESTARTSYS:
+ if (!(sa->sa_flags & SA_RESTART))
+ goto no_system_call_restart;
+ /* fallthrough */
+ case ERESTARTNOINTR:
+ regs->u_regs[UREG_I0] = orig_i0;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ }
+}
+
+/* Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+static int do_signal(sigset_t *oldset, struct pt_regs * regs,
+ unsigned long orig_i0, int restart_syscall)
+{
+ siginfo_t info;
+ struct signal_deliver_cookie cookie;
+ struct k_sigaction ka;
+ int signr;
+
+ cookie.restart_syscall = restart_syscall;
+ cookie.orig_i0 = orig_i0;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+#ifdef CONFIG_SPARC32_COMPAT
+ if (test_thread_flag(TIF_32BIT)) {
+ extern int do_signal32(sigset_t *, struct pt_regs *,
+ unsigned long, int);
+ return do_signal32(oldset, regs, orig_i0,
+ cookie.restart_syscall);
+ }
+#endif
+
+ signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
+ if (signr > 0) {
+ if (cookie.restart_syscall)
+ syscall_restart(orig_i0, regs, &ka.sa);
+ handle_signal(signr, &ka, &info, oldset, regs);
+ return 1;
+ }
+ if (cookie.restart_syscall &&
+ (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
+ regs->u_regs[UREG_I0] == ERESTARTSYS ||
+ regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
+ /* replay the system call when we are done */
+ regs->u_regs[UREG_I0] = cookie.orig_i0;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ }
+ if (cookie.restart_syscall &&
+ regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
+ regs->u_regs[UREG_G1] = __NR_restart_syscall;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ }
+ return 0;
+}
+
+void do_notify_resume(sigset_t *oldset, struct pt_regs *regs,
+ unsigned long orig_i0, int restart_syscall,
+ unsigned long thread_info_flags)
+{
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(oldset, regs, orig_i0, restart_syscall);
+}
+
+void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
+{
+ struct signal_deliver_cookie *cp = cookie;
+
+ if (cp->restart_syscall &&
+ (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
+ regs->u_regs[UREG_I0] == ERESTARTSYS ||
+ regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
+ /* replay the system call when we are done */
+ regs->u_regs[UREG_I0] = cp->orig_i0;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ cp->restart_syscall = 0;
+ }
+ if (cp->restart_syscall &&
+ regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
+ regs->u_regs[UREG_G1] = __NR_restart_syscall;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ cp->restart_syscall = 0;
+ }
+}
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
new file mode 100644
index 00000000000..859255cf676
--- /dev/null
+++ b/arch/sparc64/kernel/signal32.c
@@ -0,0 +1,1469 @@
+/* $Id: signal32.c,v 1.74 2002/02/09 19:49:30 davem Exp $
+ * arch/sparc64/kernel/signal32.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/smp_lock.h>
+#include <linux/binfmts.h>
+#include <linux/compat.h>
+#include <linux/bitops.h>
+
+#include <asm/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/svr4.h>
+#include <asm/pgtable.h>
+#include <asm/psrcompat.h>
+#include <asm/fpumacro.h>
+#include <asm/visasm.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+int do_signal32(sigset_t *oldset, struct pt_regs *regs,
+ unsigned long orig_o0, int ret_from_syscall);
+
+/* Signal frames: the original one (compatible with SunOS):
+ *
+ * Set up a signal frame... Make the stack look the way SunOS
+ * expects it to look which is basically:
+ *
+ * ---------------------------------- <-- %sp at signal time
+ * Struct sigcontext
+ * Signal address
+ * Ptr to sigcontext area above
+ * Signal code
+ * The signal number itself
+ * One register window
+ * ---------------------------------- <-- New %sp
+ */
+struct signal_sframe32 {
+ struct reg_window32 sig_window;
+ int sig_num;
+ int sig_code;
+ /* struct sigcontext32 * */ u32 sig_scptr;
+ int sig_address;
+ struct sigcontext32 sig_context;
+ unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
+};
+
+/* This magic should be in g_upper[0] for all upper parts
+ * to be valid.
+ */
+#define SIGINFO_EXTRA_V8PLUS_MAGIC 0x130e269
+typedef struct {
+ unsigned int g_upper[8];
+ unsigned int o_upper[8];
+ unsigned int asi;
+} siginfo_extra_v8plus_t;
+
+/*
+ * And the new one, intended to be used for Linux applications only
+ * (we have enough in there to work with clone).
+ * All the interesting bits are in the info field.
+ */
+struct new_signal_frame32 {
+ struct sparc_stackf32 ss;
+ __siginfo32_t info;
+ /* __siginfo_fpu32_t * */ u32 fpu_save;
+ unsigned int insns[2];
+ unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
+ unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
+ /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
+ siginfo_extra_v8plus_t v8plus;
+ __siginfo_fpu_t fpu_state;
+};
+
+typedef struct compat_siginfo{
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE32];
+
+ /* kill() */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ unsigned int _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ compat_pid_t _pid; /* which child */
+ unsigned int _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
+ struct {
+ u32 _addr; /* faulting insn/memory ref. */
+ int _trapno;
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+}compat_siginfo_t;
+
+struct rt_signal_frame32 {
+ struct sparc_stackf32 ss;
+ compat_siginfo_t info;
+ struct pt_regs32 regs;
+ compat_sigset_t mask;
+ /* __siginfo_fpu32_t * */ u32 fpu_save;
+ unsigned int insns[2];
+ stack_t32 stack;
+ unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
+ /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
+ siginfo_extra_v8plus_t v8plus;
+ __siginfo_fpu_t fpu_state;
+};
+
+/* Align macros */
+#define SF_ALIGNEDSZ (((sizeof(struct signal_sframe32) + 7) & (~7)))
+#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame32) + 7) & (~7)))
+#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
+
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+{
+ int err;
+
+ if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+ return -EFAULT;
+
+ /* If you change siginfo_t structure, please be sure
+ this code is fixed accordingly.
+ It should never copy any pad contained in the structure
+ to avoid security leaks, but must copy the generic
+ 3 ints plus the relevant union member.
+ This routine must convert siginfo from 64bit to 32bit as well
+ at the same time. */
+ err = __put_user(from->si_signo, &to->si_signo);
+ err |= __put_user(from->si_errno, &to->si_errno);
+ err |= __put_user((short)from->si_code, &to->si_code);
+ if (from->si_code < 0)
+ err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
+ else {
+ switch (from->si_code >> 16) {
+ case __SI_TIMER >> 16:
+ err |= __put_user(from->si_tid, &to->si_tid);
+ err |= __put_user(from->si_overrun, &to->si_overrun);
+ err |= __put_user(from->si_int, &to->si_int);
+ break;
+ case __SI_CHLD >> 16:
+ err |= __put_user(from->si_utime, &to->si_utime);
+ err |= __put_user(from->si_stime, &to->si_stime);
+ err |= __put_user(from->si_status, &to->si_status);
+ default:
+ err |= __put_user(from->si_pid, &to->si_pid);
+ err |= __put_user(from->si_uid, &to->si_uid);
+ break;
+ case __SI_FAULT >> 16:
+ case __SI_POLL >> 16:
+ err |= __put_user(from->si_trapno, &to->si_trapno);
+ err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
+ break;
+ case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
+ case __SI_MESGQ >> 16:
+ err |= __put_user(from->si_pid, &to->si_pid);
+ err |= __put_user(from->si_uid, &to->si_uid);
+ err |= __put_user(from->si_int, &to->si_int);
+ break;
+ }
+ }
+ return err;
+}
+
+/* CAUTION: This is just a very minimalist implementation for the
+ * sake of compat_sys_rt_sigqueueinfo()
+ */
+int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+{
+ if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t)))
+ return -EFAULT;
+
+ if (copy_from_user(to, from, 3*sizeof(int)) ||
+ copy_from_user(to->_sifields._pad, from->_sifields._pad,
+ SI_PAD_SIZE))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * atomically swap in the new signal mask, and wait for a signal.
+ * This is really tricky on the Sparc, watch out...
+ */
+asmlinkage void _sigpause32_common(compat_old_sigset_t set, struct pt_regs *regs)
+{
+ sigset_t saveset;
+
+ set &= _BLOCKABLE;
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, set);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+
+ /* Condition codes and return value where set here for sigpause,
+ * and so got used by setup_frame, which again causes sigreturn()
+ * to return -EINTR.
+ */
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ /*
+ * Return -EINTR and set condition code here,
+ * so the interrupted system call actually returns
+ * these.
+ */
+ regs->tstate |= TSTATE_ICARRY;
+ regs->u_regs[UREG_I0] = EINTR;
+ if (do_signal32(&saveset, regs, 0, 0))
+ return;
+ }
+}
+
+asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *regs)
+{
+ sigset_t oldset, set;
+ compat_sigset_t set32;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (((compat_size_t)sigsetsize) != sizeof(sigset_t)) {
+ regs->tstate |= TSTATE_ICARRY;
+ regs->u_regs[UREG_I0] = EINVAL;
+ return;
+ }
+ if (copy_from_user(&set32, compat_ptr(uset), sizeof(set32))) {
+ regs->tstate |= TSTATE_ICARRY;
+ regs->u_regs[UREG_I0] = EFAULT;
+ return;
+ }
+ switch (_NSIG_WORDS) {
+ case 4: set.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
+ case 3: set.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
+ case 2: set.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
+ case 1: set.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
+ }
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ oldset = current->blocked;
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+
+ /* Condition codes and return value where set here for sigpause,
+ * and so got used by setup_frame, which again causes sigreturn()
+ * to return -EINTR.
+ */
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ /*
+ * Return -EINTR and set condition code here,
+ * so the interrupted system call actually returns
+ * these.
+ */
+ regs->tstate |= TSTATE_ICARRY;
+ regs->u_regs[UREG_I0] = EINTR;
+ if (do_signal32(&oldset, regs, 0, 0))
+ return;
+ }
+}
+
+static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+{
+ unsigned long *fpregs = current_thread_info()->fpregs;
+ unsigned long fprs;
+ int err;
+
+ err = __get_user(fprs, &fpu->si_fprs);
+ fprs_write(0);
+ regs->tstate &= ~TSTATE_PEF;
+ if (fprs & FPRS_DL)
+ err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32));
+ if (fprs & FPRS_DU)
+ err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32));
+ err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
+ err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
+ current_thread_info()->fpsaved[0] |= fprs;
+ return err;
+}
+
+void do_new_sigreturn32(struct pt_regs *regs)
+{
+ struct new_signal_frame32 __user *sf;
+ unsigned int psr;
+ unsigned pc, npc, fpu_save;
+ sigset_t set;
+ unsigned seta[_COMPAT_NSIG_WORDS];
+ int err, i;
+
+ regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+ sf = (struct new_signal_frame32 __user *) regs->u_regs[UREG_FP];
+
+ /* 1. Make sure we are not getting garbage from the user */
+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+ (((unsigned long) sf) & 3))
+ goto segv;
+
+ get_user(pc, &sf->info.si_regs.pc);
+ __get_user(npc, &sf->info.si_regs.npc);
+
+ if ((pc | npc) & 3)
+ goto segv;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ pc &= 0xffffffff;
+ npc &= 0xffffffff;
+ }
+ regs->tpc = pc;
+ regs->tnpc = npc;
+
+ /* 2. Restore the state */
+ err = __get_user(regs->y, &sf->info.si_regs.y);
+ err |= __get_user(psr, &sf->info.si_regs.psr);
+
+ for (i = UREG_G1; i <= UREG_I7; i++)
+ err |= __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
+ if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
+ err |= __get_user(i, &sf->v8plus.g_upper[0]);
+ if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
+ unsigned long asi;
+
+ for (i = UREG_G1; i <= UREG_I7; i++)
+ err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
+ err |= __get_user(asi, &sf->v8plus.asi);
+ regs->tstate &= ~TSTATE_ASI;
+ regs->tstate |= ((asi & 0xffUL) << 24UL);
+ }
+ }
+
+ /* User can only change condition codes in %tstate. */
+ regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
+ regs->tstate |= psr_to_tstate_icc(psr);
+
+ err |= __get_user(fpu_save, &sf->fpu_save);
+ if (fpu_save)
+ err |= restore_fpu_state32(regs, &sf->fpu_state);
+ err |= __get_user(seta[0], &sf->info.si_mask);
+ err |= copy_from_user(seta+1, &sf->extramask,
+ (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
+ if (err)
+ goto segv;
+ switch (_NSIG_WORDS) {
+ case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
+ case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
+ case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
+ case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
+ }
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return;
+
+segv:
+ force_sig(SIGSEGV, current);
+}
+
+asmlinkage void do_sigreturn32(struct pt_regs *regs)
+{
+ struct sigcontext32 __user *scptr;
+ unsigned int pc, npc, psr;
+ sigset_t set;
+ unsigned int seta[_COMPAT_NSIG_WORDS];
+ int err;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ synchronize_user_stack();
+ if (test_thread_flag(TIF_NEWSIGNALS)) {
+ do_new_sigreturn32(regs);
+ return;
+ }
+
+ scptr = (struct sigcontext32 __user *)
+ (regs->u_regs[UREG_I0] & 0x00000000ffffffffUL);
+ /* Check sanity of the user arg. */
+ if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext32)) ||
+ (((unsigned long) scptr) & 3))
+ goto segv;
+
+ err = __get_user(pc, &scptr->sigc_pc);
+ err |= __get_user(npc, &scptr->sigc_npc);
+
+ if ((pc | npc) & 3)
+ goto segv; /* Nice try. */
+
+ err |= __get_user(seta[0], &scptr->sigc_mask);
+ /* Note that scptr + 1 points to extramask */
+ err |= copy_from_user(seta+1, scptr + 1,
+ (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
+ if (err)
+ goto segv;
+ switch (_NSIG_WORDS) {
+ case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
+ case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
+ case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
+ case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
+ }
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ if (test_thread_flag(TIF_32BIT)) {
+ pc &= 0xffffffff;
+ npc &= 0xffffffff;
+ }
+ regs->tpc = pc;
+ regs->tnpc = npc;
+ err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
+ err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
+ err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
+
+ /* User can only change condition codes in %tstate. */
+ err |= __get_user(psr, &scptr->sigc_psr);
+ if (err)
+ goto segv;
+ regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
+ regs->tstate |= psr_to_tstate_icc(psr);
+ return;
+
+segv:
+ force_sig(SIGSEGV, current);
+}
+
+asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
+{
+ struct rt_signal_frame32 __user *sf;
+ unsigned int psr, pc, npc, fpu_save, u_ss_sp;
+ mm_segment_t old_fs;
+ sigset_t set;
+ compat_sigset_t seta;
+ stack_t st;
+ int err, i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ synchronize_user_stack();
+ regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+ sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
+
+ /* 1. Make sure we are not getting garbage from the user */
+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+ (((unsigned long) sf) & 3))
+ goto segv;
+
+ get_user(pc, &sf->regs.pc);
+ __get_user(npc, &sf->regs.npc);
+
+ if ((pc | npc) & 3)
+ goto segv;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ pc &= 0xffffffff;
+ npc &= 0xffffffff;
+ }
+ regs->tpc = pc;
+ regs->tnpc = npc;
+
+ /* 2. Restore the state */
+ err = __get_user(regs->y, &sf->regs.y);
+ err |= __get_user(psr, &sf->regs.psr);
+
+ for (i = UREG_G1; i <= UREG_I7; i++)
+ err |= __get_user(regs->u_regs[i], &sf->regs.u_regs[i]);
+ if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
+ err |= __get_user(i, &sf->v8plus.g_upper[0]);
+ if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
+ unsigned long asi;
+
+ for (i = UREG_G1; i <= UREG_I7; i++)
+ err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
+ err |= __get_user(asi, &sf->v8plus.asi);
+ regs->tstate &= ~TSTATE_ASI;
+ regs->tstate |= ((asi & 0xffUL) << 24UL);
+ }
+ }
+
+ /* User can only change condition codes in %tstate. */
+ regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
+ regs->tstate |= psr_to_tstate_icc(psr);
+
+ err |= __get_user(fpu_save, &sf->fpu_save);
+ if (fpu_save)
+ err |= restore_fpu_state32(regs, &sf->fpu_state);
+ err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
+ err |= __get_user(u_ss_sp, &sf->stack.ss_sp);
+ st.ss_sp = compat_ptr(u_ss_sp);
+ err |= __get_user(st.ss_flags, &sf->stack.ss_flags);
+ err |= __get_user(st.ss_size, &sf->stack.ss_size);
+ if (err)
+ goto segv;
+
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf);
+ set_fs(old_fs);
+
+ switch (_NSIG_WORDS) {
+ case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
+ case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
+ case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
+ case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
+ }
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return;
+segv:
+ force_sig(SIGSEGV, current);
+}
+
+/* Checks if the fp is valid */
+static int invalid_frame_pointer(void __user *fp, int fplen)
+{
+ if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
+ return 1;
+ return 0;
+}
+
+static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
+{
+ unsigned long sp;
+
+ regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+ sp = regs->u_regs[UREG_FP];
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (sa->sa_flags & SA_ONSTACK) {
+ if (!on_sig_stack(sp) && !((current->sas_ss_sp + current->sas_ss_size) & 7))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+ }
+ return (void __user *)(sp - framesize);
+}
+
+static void
+setup_frame32(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
+{
+ struct signal_sframe32 __user *sframep;
+ struct sigcontext32 __user *sc;
+ unsigned int seta[_COMPAT_NSIG_WORDS];
+ int err = 0;
+ void __user *sig_address;
+ int sig_code;
+ unsigned long pc = regs->tpc;
+ unsigned long npc = regs->tnpc;
+ unsigned int psr;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ pc &= 0xffffffff;
+ npc &= 0xffffffff;
+ }
+
+ synchronize_user_stack();
+ save_and_clear_fpu();
+
+ sframep = (struct signal_sframe32 __user *)
+ get_sigframe(sa, regs, SF_ALIGNEDSZ);
+ if (invalid_frame_pointer(sframep, sizeof(*sframep))){
+ /* Don't change signal code and address, so that
+ * post mortem debuggers can have a look.
+ */
+ do_exit(SIGILL);
+ }
+
+ sc = &sframep->sig_context;
+
+ /* We've already made sure frame pointer isn't in kernel space... */
+ err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
+ &sc->sigc_onstack);
+
+ switch (_NSIG_WORDS) {
+ case 4: seta[7] = (oldset->sig[3] >> 32);
+ seta[6] = oldset->sig[3];
+ case 3: seta[5] = (oldset->sig[2] >> 32);
+ seta[4] = oldset->sig[2];
+ case 2: seta[3] = (oldset->sig[1] >> 32);
+ seta[2] = oldset->sig[1];
+ case 1: seta[1] = (oldset->sig[0] >> 32);
+ seta[0] = oldset->sig[0];
+ }
+ err |= __put_user(seta[0], &sc->sigc_mask);
+ err |= __copy_to_user(sframep->extramask, seta + 1,
+ (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
+ err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
+ err |= __put_user(pc, &sc->sigc_pc);
+ err |= __put_user(npc, &sc->sigc_npc);
+ psr = tstate_to_psr(regs->tstate);
+ if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+ psr |= PSR_EF;
+ err |= __put_user(psr, &sc->sigc_psr);
+ err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
+ err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
+ err |= __put_user(get_thread_wsaved(), &sc->sigc_oswins);
+
+ err |= copy_in_user((u32 __user *)sframep,
+ (u32 __user *)(regs->u_regs[UREG_FP]),
+ sizeof(struct reg_window32));
+
+ set_thread_wsaved(0); /* So process is allowed to execute. */
+ err |= __put_user(signr, &sframep->sig_num);
+ sig_address = NULL;
+ sig_code = 0;
+ if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
+ sig_address = info->si_addr;
+ switch (signr) {
+ case SIGSEGV:
+ switch (info->si_code) {
+ case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
+ default: sig_code = SUBSIG_PROTECTION; break;
+ }
+ break;
+ case SIGILL:
+ switch (info->si_code) {
+ case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
+ case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
+ case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
+ default: sig_code = SUBSIG_STACK; break;
+ }
+ break;
+ case SIGFPE:
+ switch (info->si_code) {
+ case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
+ case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
+ case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
+ case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
+ case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
+ case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
+ case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
+ default: sig_code = SUBSIG_FPERROR; break;
+ }
+ break;
+ case SIGBUS:
+ switch (info->si_code) {
+ case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
+ case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
+ default: sig_code = SUBSIG_BUSTIMEOUT; break;
+ }
+ break;
+ case SIGEMT:
+ switch (info->si_code) {
+ case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
+ }
+ break;
+ case SIGSYS:
+ if (info->si_code == (__SI_FAULT|0x100)) {
+ /* See sys_sunos32.c */
+ sig_code = info->si_trapno;
+ break;
+ }
+ default:
+ sig_address = NULL;
+ }
+ }
+ err |= __put_user(ptr_to_compat(sig_address), &sframep->sig_address);
+ err |= __put_user(sig_code, &sframep->sig_code);
+ err |= __put_user(ptr_to_compat(sc), &sframep->sig_scptr);
+ if (err)
+ goto sigsegv;
+
+ regs->u_regs[UREG_FP] = (unsigned long) sframep;
+ regs->tpc = (unsigned long) sa->sa_handler;
+ regs->tnpc = (regs->tpc + 4);
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ return;
+
+sigsegv:
+ force_sigsegv(signr, current);
+}
+
+
+static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+{
+ unsigned long *fpregs = current_thread_info()->fpregs;
+ unsigned long fprs;
+ int err = 0;
+
+ fprs = current_thread_info()->fpsaved[0];
+ if (fprs & FPRS_DL)
+ err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
+ (sizeof(unsigned int) * 32));
+ if (fprs & FPRS_DU)
+ err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
+ (sizeof(unsigned int) * 32));
+ err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
+ err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
+ err |= __put_user(fprs, &fpu->si_fprs);
+
+ return err;
+}
+
+static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ int signo, sigset_t *oldset)
+{
+ struct new_signal_frame32 __user *sf;
+ int sigframe_size;
+ u32 psr;
+ int i, err;
+ unsigned int seta[_COMPAT_NSIG_WORDS];
+
+ /* 1. Make sure everything is clean */
+ synchronize_user_stack();
+ save_and_clear_fpu();
+
+ sigframe_size = NF_ALIGNEDSZ;
+ if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
+ sigframe_size -= sizeof(__siginfo_fpu_t);
+
+ sf = (struct new_signal_frame32 __user *)
+ get_sigframe(&ka->sa, regs, sigframe_size);
+
+ if (invalid_frame_pointer(sf, sigframe_size))
+ goto sigill;
+
+ if (get_thread_wsaved() != 0)
+ goto sigill;
+
+ /* 2. Save the current process state */
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ err = put_user(regs->tpc, &sf->info.si_regs.pc);
+ err |= __put_user(regs->tnpc, &sf->info.si_regs.npc);
+ err |= __put_user(regs->y, &sf->info.si_regs.y);
+ psr = tstate_to_psr(regs->tstate);
+ if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+ psr |= PSR_EF;
+ err |= __put_user(psr, &sf->info.si_regs.psr);
+ for (i = 0; i < 16; i++)
+ err |= __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
+ err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
+ err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
+ for (i = 1; i < 16; i++)
+ err |= __put_user(((u32 *)regs->u_regs)[2*i],
+ &sf->v8plus.g_upper[i]);
+ err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
+ &sf->v8plus.asi);
+
+ if (psr & PSR_EF) {
+ err |= save_fpu_state32(regs, &sf->fpu_state);
+ err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
+ } else {
+ err |= __put_user(0, &sf->fpu_save);
+ }
+
+ switch (_NSIG_WORDS) {
+ case 4: seta[7] = (oldset->sig[3] >> 32);
+ seta[6] = oldset->sig[3];
+ case 3: seta[5] = (oldset->sig[2] >> 32);
+ seta[4] = oldset->sig[2];
+ case 2: seta[3] = (oldset->sig[1] >> 32);
+ seta[2] = oldset->sig[1];
+ case 1: seta[1] = (oldset->sig[0] >> 32);
+ seta[0] = oldset->sig[0];
+ }
+ err |= __put_user(seta[0], &sf->info.si_mask);
+ err |= __copy_to_user(sf->extramask, seta + 1,
+ (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
+
+ err |= copy_in_user((u32 __user *)sf,
+ (u32 __user *)(regs->u_regs[UREG_FP]),
+ sizeof(struct reg_window32));
+
+ if (err)
+ goto sigsegv;
+
+ /* 3. signal handler back-trampoline and parameters */
+ regs->u_regs[UREG_FP] = (unsigned long) sf;
+ regs->u_regs[UREG_I0] = signo;
+ regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
+ regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
+
+ /* 4. signal handler */
+ regs->tpc = (unsigned long) ka->sa.sa_handler;
+ regs->tnpc = (regs->tpc + 4);
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+
+ /* 5. return to kernel instructions */
+ if (ka->ka_restorer) {
+ regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
+ } else {
+ /* Flush instruction space. */
+ unsigned long address = ((unsigned long)&(sf->insns[0]));
+ pgd_t *pgdp = pgd_offset(current->mm, address);
+ pud_t *pudp = pud_offset(pgdp, address);
+ pmd_t *pmdp = pmd_offset(pudp, address);
+ pte_t *ptep;
+
+ regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
+
+ err = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/
+ err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/
+ if (err)
+ goto sigsegv;
+
+ preempt_disable();
+ ptep = pte_offset_map(pmdp, address);
+ if (pte_present(*ptep)) {
+ unsigned long page = (unsigned long)
+ page_address(pte_page(*ptep));
+
+ __asm__ __volatile__(
+ " membar #StoreStore\n"
+ " flush %0 + %1"
+ : : "r" (page), "r" (address & (PAGE_SIZE - 1))
+ : "memory");
+ }
+ pte_unmap(ptep);
+ preempt_enable();
+ }
+ return;
+
+sigill:
+ do_exit(SIGILL);
+sigsegv:
+ force_sigsegv(signo, current);
+}
+
+/* Setup a Solaris stack frame */
+static void
+setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
+ struct pt_regs *regs, int signr, sigset_t *oldset)
+{
+ svr4_signal_frame_t __user *sfp;
+ svr4_gregset_t __user *gr;
+ svr4_siginfo_t __user *si;
+ svr4_mcontext_t __user *mc;
+ svr4_gwindows_t __user *gw;
+ svr4_ucontext_t __user *uc;
+ svr4_sigset_t setv;
+ unsigned int psr;
+ int i, err;
+
+ synchronize_user_stack();
+ save_and_clear_fpu();
+
+ regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
+ sfp = (svr4_signal_frame_t __user *)
+ get_sigframe(sa, regs,
+ sizeof(struct reg_window32) + SVR4_SF_ALIGNED);
+
+ if (invalid_frame_pointer(sfp, sizeof(*sfp)))
+ do_exit(SIGILL);
+
+ /* Start with a clean frame pointer and fill it */
+ err = clear_user(sfp, sizeof(*sfp));
+
+ /* Setup convenience variables */
+ si = &sfp->si;
+ uc = &sfp->uc;
+ gw = &sfp->gw;
+ mc = &uc->mcontext;
+ gr = &mc->greg;
+
+ /* FIXME: where am I supposed to put this?
+ * sc->sigc_onstack = old_status;
+ * anyways, it does not look like it is used for anything at all.
+ */
+ setv.sigbits[0] = oldset->sig[0];
+ setv.sigbits[1] = (oldset->sig[0] >> 32);
+ if (_NSIG_WORDS >= 2) {
+ setv.sigbits[2] = oldset->sig[1];
+ setv.sigbits[3] = (oldset->sig[1] >> 32);
+ err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
+ } else
+ err |= __copy_to_user(&uc->sigmask, &setv,
+ 2 * sizeof(unsigned int));
+
+ /* Store registers */
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ err |= __put_user(regs->tpc, &((*gr)[SVR4_PC]));
+ err |= __put_user(regs->tnpc, &((*gr)[SVR4_NPC]));
+ psr = tstate_to_psr(regs->tstate);
+ if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+ psr |= PSR_EF;
+ err |= __put_user(psr, &((*gr)[SVR4_PSR]));
+ err |= __put_user(regs->y, &((*gr)[SVR4_Y]));
+
+ /* Copy g[1..7] and o[0..7] registers */
+ for (i = 0; i < 7; i++)
+ err |= __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+ for (i = 0; i < 8; i++)
+ err |= __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+
+ /* Setup sigaltstack */
+ err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
+ err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
+ err |= __put_user(current->sas_ss_size, &uc->stack.size);
+
+ /* Save the currently window file: */
+
+ /* 1. Link sfp->uc->gwins to our windows */
+ err |= __put_user(ptr_to_compat(gw), &mc->gwin);
+
+ /* 2. Number of windows to restore at setcontext (): */
+ err |= __put_user(get_thread_wsaved(), &gw->count);
+
+ /* 3. We just pay attention to the gw->count field on setcontext */
+ set_thread_wsaved(0); /* So process is allowed to execute. */
+
+ /* Setup the signal information. Solaris expects a bunch of
+ * information to be passed to the signal handler, we don't provide
+ * that much currently, should use siginfo.
+ */
+ err |= __put_user(signr, &si->siginfo.signo);
+ err |= __put_user(SVR4_SINOINFO, &si->siginfo.code);
+ if (err)
+ goto sigsegv;
+
+ regs->u_regs[UREG_FP] = (unsigned long) sfp;
+ regs->tpc = (unsigned long) sa->sa_handler;
+ regs->tnpc = (regs->tpc + 4);
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+
+ /* Arguments passed to signal handler */
+ if (regs->u_regs[14]){
+ struct reg_window32 __user *rw = (struct reg_window32 __user *)
+ (regs->u_regs[14] & 0x00000000ffffffffUL);
+
+ err |= __put_user(signr, &rw->ins[0]);
+ err |= __put_user((u64)si, &rw->ins[1]);
+ err |= __put_user((u64)uc, &rw->ins[2]);
+ err |= __put_user((u64)sfp, &rw->ins[6]); /* frame pointer */
+ if (err)
+ goto sigsegv;
+
+ regs->u_regs[UREG_I0] = signr;
+ regs->u_regs[UREG_I1] = (u32)(u64) si;
+ regs->u_regs[UREG_I2] = (u32)(u64) uc;
+ }
+ return;
+
+sigsegv:
+ force_sigsegv(signr, current);
+}
+
+asmlinkage int
+svr4_getcontext(svr4_ucontext_t __user *uc, struct pt_regs *regs)
+{
+ svr4_gregset_t __user *gr;
+ svr4_mcontext_t __user *mc;
+ svr4_sigset_t setv;
+ int i, err;
+ u32 psr;
+
+ synchronize_user_stack();
+ save_and_clear_fpu();
+
+ if (get_thread_wsaved())
+ do_exit(SIGSEGV);
+
+ err = clear_user(uc, sizeof(*uc));
+
+ /* Setup convenience variables */
+ mc = &uc->mcontext;
+ gr = &mc->greg;
+
+ setv.sigbits[0] = current->blocked.sig[0];
+ setv.sigbits[1] = (current->blocked.sig[0] >> 32);
+ if (_NSIG_WORDS >= 2) {
+ setv.sigbits[2] = current->blocked.sig[1];
+ setv.sigbits[3] = (current->blocked.sig[1] >> 32);
+ err |= __copy_to_user(&uc->sigmask, &setv, sizeof(svr4_sigset_t));
+ } else
+ err |= __copy_to_user(&uc->sigmask, &setv, 2 * sizeof(unsigned));
+
+ /* Store registers */
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ err |= __put_user(regs->tpc, &uc->mcontext.greg[SVR4_PC]);
+ err |= __put_user(regs->tnpc, &uc->mcontext.greg[SVR4_NPC]);
+
+ psr = tstate_to_psr(regs->tstate) & ~PSR_EF;
+ if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+ psr |= PSR_EF;
+ err |= __put_user(psr, &uc->mcontext.greg[SVR4_PSR]);
+
+ err |= __put_user(regs->y, &uc->mcontext.greg[SVR4_Y]);
+
+ /* Copy g[1..7] and o[0..7] registers */
+ for (i = 0; i < 7; i++)
+ err |= __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+ for (i = 0; i < 8; i++)
+ err |= __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+
+ /* Setup sigaltstack */
+ err |= __put_user(current->sas_ss_sp, &uc->stack.sp);
+ err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &uc->stack.flags);
+ err |= __put_user(current->sas_ss_size, &uc->stack.size);
+
+ /* The register file is not saved
+ * we have already stuffed all of it with sync_user_stack
+ */
+ return (err ? -EFAULT : 0);
+}
+
+
+/* Set the context for a svr4 application, this is Solaris way to sigreturn */
+asmlinkage int svr4_setcontext(svr4_ucontext_t __user *c, struct pt_regs *regs)
+{
+ svr4_gregset_t __user *gr;
+ mm_segment_t old_fs;
+ u32 pc, npc, psr, u_ss_sp;
+ sigset_t set;
+ svr4_sigset_t setv;
+ int i, err;
+ stack_t st;
+
+ /* Fixme: restore windows, or is this already taken care of in
+ * svr4_setup_frame when sync_user_windows is done?
+ */
+ flush_user_windows();
+
+ if (get_thread_wsaved())
+ goto sigsegv;
+
+ if (((unsigned long) c) & 3){
+ printk("Unaligned structure passed\n");
+ goto sigsegv;
+ }
+
+ if (!__access_ok(c, sizeof(*c))) {
+ /* Miguel, add nice debugging msg _here_. ;-) */
+ goto sigsegv;
+ }
+
+ /* Check for valid PC and nPC */
+ gr = &c->mcontext.greg;
+ err = __get_user(pc, &((*gr)[SVR4_PC]));
+ err |= __get_user(npc, &((*gr)[SVR4_NPC]));
+ if ((pc | npc) & 3)
+ goto sigsegv;
+
+ /* Retrieve information from passed ucontext */
+ /* note that nPC is ored a 1, this is used to inform entry.S */
+ /* that we don't want it to mess with our PC and nPC */
+
+ err |= copy_from_user(&setv, &c->sigmask, sizeof(svr4_sigset_t));
+ set.sig[0] = setv.sigbits[0] | (((long)setv.sigbits[1]) << 32);
+ if (_NSIG_WORDS >= 2)
+ set.sig[1] = setv.sigbits[2] | (((long)setv.sigbits[3]) << 32);
+
+ err |= __get_user(u_ss_sp, &c->stack.sp);
+ st.ss_sp = compat_ptr(u_ss_sp);
+ err |= __get_user(st.ss_flags, &c->stack.flags);
+ err |= __get_user(st.ss_size, &c->stack.size);
+ if (err)
+ goto sigsegv;
+
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ do_sigaltstack((stack_t __user *) &st, NULL, regs->u_regs[UREG_I6]);
+ set_fs(old_fs);
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ regs->tpc = pc;
+ regs->tnpc = npc | 1;
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ err |= __get_user(regs->y, &((*gr)[SVR4_Y]));
+ err |= __get_user(psr, &((*gr)[SVR4_PSR]));
+ regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
+ regs->tstate |= psr_to_tstate_icc(psr);
+
+ /* Restore g[1..7] and o[0..7] registers */
+ for (i = 0; i < 7; i++)
+ err |= __get_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+ for (i = 0; i < 8; i++)
+ err |= __get_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+ if (err)
+ goto sigsegv;
+
+ return -EINTR;
+sigsegv:
+ return -EFAULT;
+}
+
+static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+ unsigned long signr, sigset_t *oldset,
+ siginfo_t *info)
+{
+ struct rt_signal_frame32 __user *sf;
+ int sigframe_size;
+ u32 psr;
+ int i, err;
+ compat_sigset_t seta;
+
+ /* 1. Make sure everything is clean */
+ synchronize_user_stack();
+ save_and_clear_fpu();
+
+ sigframe_size = RT_ALIGNEDSZ;
+ if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
+ sigframe_size -= sizeof(__siginfo_fpu_t);
+
+ sf = (struct rt_signal_frame32 __user *)
+ get_sigframe(&ka->sa, regs, sigframe_size);
+
+ if (invalid_frame_pointer(sf, sigframe_size))
+ goto sigill;
+
+ if (get_thread_wsaved() != 0)
+ goto sigill;
+
+ /* 2. Save the current process state */
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ err = put_user(regs->tpc, &sf->regs.pc);
+ err |= __put_user(regs->tnpc, &sf->regs.npc);
+ err |= __put_user(regs->y, &sf->regs.y);
+ psr = tstate_to_psr(regs->tstate);
+ if (current_thread_info()->fpsaved[0] & FPRS_FEF)
+ psr |= PSR_EF;
+ err |= __put_user(psr, &sf->regs.psr);
+ for (i = 0; i < 16; i++)
+ err |= __put_user(regs->u_regs[i], &sf->regs.u_regs[i]);
+ err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
+ err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
+ for (i = 1; i < 16; i++)
+ err |= __put_user(((u32 *)regs->u_regs)[2*i],
+ &sf->v8plus.g_upper[i]);
+ err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
+ &sf->v8plus.asi);
+
+ if (psr & PSR_EF) {
+ err |= save_fpu_state32(regs, &sf->fpu_state);
+ err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
+ } else {
+ err |= __put_user(0, &sf->fpu_save);
+ }
+
+ /* Update the siginfo structure. */
+ err |= copy_siginfo_to_user32(&sf->info, info);
+
+ /* Setup sigaltstack */
+ err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
+
+ switch (_NSIG_WORDS) {
+ case 4: seta.sig[7] = (oldset->sig[3] >> 32);
+ seta.sig[6] = oldset->sig[3];
+ case 3: seta.sig[5] = (oldset->sig[2] >> 32);
+ seta.sig[4] = oldset->sig[2];
+ case 2: seta.sig[3] = (oldset->sig[1] >> 32);
+ seta.sig[2] = oldset->sig[1];
+ case 1: seta.sig[1] = (oldset->sig[0] >> 32);
+ seta.sig[0] = oldset->sig[0];
+ }
+ err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
+
+ err |= copy_in_user((u32 __user *)sf,
+ (u32 __user *)(regs->u_regs[UREG_FP]),
+ sizeof(struct reg_window32));
+ if (err)
+ goto sigsegv;
+
+ /* 3. signal handler back-trampoline and parameters */
+ regs->u_regs[UREG_FP] = (unsigned long) sf;
+ regs->u_regs[UREG_I0] = signr;
+ regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
+ regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
+
+ /* 4. signal handler */
+ regs->tpc = (unsigned long) ka->sa.sa_handler;
+ regs->tnpc = (regs->tpc + 4);
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+
+ /* 5. return to kernel instructions */
+ if (ka->ka_restorer)
+ regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
+ else {
+ /* Flush instruction space. */
+ unsigned long address = ((unsigned long)&(sf->insns[0]));
+ pgd_t *pgdp = pgd_offset(current->mm, address);
+ pud_t *pudp = pud_offset(pgdp, address);
+ pmd_t *pmdp = pmd_offset(pudp, address);
+ pte_t *ptep;
+
+ regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
+
+ /* mov __NR_rt_sigreturn, %g1 */
+ err |= __put_user(0x82102065, &sf->insns[0]);
+
+ /* t 0x10 */
+ err |= __put_user(0x91d02010, &sf->insns[1]);
+ if (err)
+ goto sigsegv;
+
+ preempt_disable();
+ ptep = pte_offset_map(pmdp, address);
+ if (pte_present(*ptep)) {
+ unsigned long page = (unsigned long)
+ page_address(pte_page(*ptep));
+
+ __asm__ __volatile__(
+ " membar #StoreStore\n"
+ " flush %0 + %1"
+ : : "r" (page), "r" (address & (PAGE_SIZE - 1))
+ : "memory");
+ }
+ pte_unmap(ptep);
+ preempt_enable();
+ }
+ return;
+
+sigill:
+ do_exit(SIGILL);
+sigsegv:
+ force_sigsegv(signr, current);
+}
+
+static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
+ siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs,
+ int svr4_signal)
+{
+ if (svr4_signal)
+ setup_svr4_frame32(&ka->sa, regs->tpc, regs->tnpc,
+ regs, signr, oldset);
+ else {
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame32(ka, regs, signr, oldset, info);
+ else if (test_thread_flag(TIF_NEWSIGNALS))
+ new_setup_frame32(ka, regs, signr, oldset);
+ else
+ setup_frame32(&ka->sa, regs, signr, oldset, info);
+ }
+ if (!(ka->sa.sa_flags & SA_NOMASK)) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,signr);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+}
+
+static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
+ struct sigaction *sa)
+{
+ switch (regs->u_regs[UREG_I0]) {
+ case ERESTART_RESTARTBLOCK:
+ case ERESTARTNOHAND:
+ no_system_call_restart:
+ regs->u_regs[UREG_I0] = EINTR;
+ regs->tstate |= TSTATE_ICARRY;
+ break;
+ case ERESTARTSYS:
+ if (!(sa->sa_flags & SA_RESTART))
+ goto no_system_call_restart;
+ /* fallthrough */
+ case ERESTARTNOINTR:
+ regs->u_regs[UREG_I0] = orig_i0;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ }
+}
+
+/* Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+int do_signal32(sigset_t *oldset, struct pt_regs * regs,
+ unsigned long orig_i0, int restart_syscall)
+{
+ siginfo_t info;
+ struct signal_deliver_cookie cookie;
+ struct k_sigaction ka;
+ int signr;
+ int svr4_signal = current->personality == PER_SVR4;
+
+ cookie.restart_syscall = restart_syscall;
+ cookie.orig_i0 = orig_i0;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
+ if (signr > 0) {
+ if (cookie.restart_syscall)
+ syscall_restart32(orig_i0, regs, &ka.sa);
+ handle_signal32(signr, &ka, &info, oldset,
+ regs, svr4_signal);
+ return 1;
+ }
+ if (cookie.restart_syscall &&
+ (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
+ regs->u_regs[UREG_I0] == ERESTARTSYS ||
+ regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
+ /* replay the system call when we are done */
+ regs->u_regs[UREG_I0] = cookie.orig_i0;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ }
+ if (cookie.restart_syscall &&
+ regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
+ regs->u_regs[UREG_G1] = __NR_restart_syscall;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ }
+ return 0;
+}
+
+struct sigstack32 {
+ u32 the_stack;
+ int cur_status;
+};
+
+asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp)
+{
+ struct sigstack32 __user *ssptr =
+ (struct sigstack32 __user *)((unsigned long)(u_ssptr));
+ struct sigstack32 __user *ossptr =
+ (struct sigstack32 __user *)((unsigned long)(u_ossptr));
+ int ret = -EFAULT;
+
+ /* First see if old state is wanted. */
+ if (ossptr) {
+ if (put_user(current->sas_ss_sp + current->sas_ss_size,
+ &ossptr->the_stack) ||
+ __put_user(on_sig_stack(sp), &ossptr->cur_status))
+ goto out;
+ }
+
+ /* Now see if we want to update the new state. */
+ if (ssptr) {
+ u32 ss_sp;
+
+ if (get_user(ss_sp, &ssptr->the_stack))
+ goto out;
+
+ /* If the current stack was set with sigaltstack, don't
+ * swap stacks while we are on it.
+ */
+ ret = -EPERM;
+ if (current->sas_ss_sp && on_sig_stack(sp))
+ goto out;
+
+ /* Since we don't know the extent of the stack, and we don't
+ * track onstack-ness, but rather calculate it, we must
+ * presume a size. Ho hum this interface is lossy.
+ */
+ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
+ current->sas_ss_size = SIGSTKSZ;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+asmlinkage long do_sys32_sigaltstack(u32 ussa, u32 uossa, unsigned long sp)
+{
+ stack_t uss, uoss;
+ u32 u_ss_sp = 0;
+ int ret;
+ mm_segment_t old_fs;
+ stack_t32 __user *uss32 = compat_ptr(ussa);
+ stack_t32 __user *uoss32 = compat_ptr(uossa);
+
+ if (ussa && (get_user(u_ss_sp, &uss32->ss_sp) ||
+ __get_user(uss.ss_flags, &uss32->ss_flags) ||
+ __get_user(uss.ss_size, &uss32->ss_size)))
+ return -EFAULT;
+ uss.ss_sp = compat_ptr(u_ss_sp);
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = do_sigaltstack(ussa ? (stack_t __user *) &uss : NULL,
+ uossa ? (stack_t __user *) &uoss : NULL, sp);
+ set_fs(old_fs);
+ if (!ret && uossa && (put_user(ptr_to_compat(uoss.ss_sp), &uoss32->ss_sp) ||
+ __put_user(uoss.ss_flags, &uoss32->ss_flags) ||
+ __put_user(uoss.ss_size, &uoss32->ss_size)))
+ return -EFAULT;
+ return ret;
+}
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
new file mode 100644
index 00000000000..6dff06a44e7
--- /dev/null
+++ b/arch/sparc64/kernel/smp.c
@@ -0,0 +1,1244 @@
+/* smp.c: Sparc64 SMP support.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/threads.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/cache.h>
+#include <linux/jiffies.h>
+#include <linux/profile.h>
+#include <linux/bootmem.h>
+
+#include <asm/head.h>
+#include <asm/ptrace.h>
+#include <asm/atomic.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/cpudata.h>
+
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+#include <asm/timer.h>
+#include <asm/starfire.h>
+#include <asm/tlb.h>
+
+extern int linux_num_cpus;
+extern void calibrate_delay(void);
+
+/* Please don't make this stuff initdata!!! --DaveM */
+static unsigned char boot_cpu_id;
+
+cpumask_t cpu_online_map = CPU_MASK_NONE;
+cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
+static cpumask_t smp_commenced_mask;
+static cpumask_t cpu_callout_map;
+
+void smp_info(struct seq_file *m)
+{
+ int i;
+
+ seq_printf(m, "State:\n");
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_online(i))
+ seq_printf(m,
+ "CPU%d:\t\tonline\n", i);
+ }
+}
+
+void smp_bogo(struct seq_file *m)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_online(i))
+ seq_printf(m,
+ "Cpu%dBogo\t: %lu.%02lu\n"
+ "Cpu%dClkTck\t: %016lx\n",
+ i, cpu_data(i).udelay_val / (500000/HZ),
+ (cpu_data(i).udelay_val / (5000/HZ)) % 100,
+ i, cpu_data(i).clock_tick);
+}
+
+void __init smp_store_cpu_info(int id)
+{
+ int cpu_node;
+
+ /* multiplier and counter set by
+ smp_setup_percpu_timer() */
+ cpu_data(id).udelay_val = loops_per_jiffy;
+
+ cpu_find_by_mid(id, &cpu_node);
+ cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
+ "clock-frequency", 0);
+
+ cpu_data(id).pgcache_size = 0;
+ cpu_data(id).pte_cache[0] = NULL;
+ cpu_data(id).pte_cache[1] = NULL;
+ cpu_data(id).pgd_cache = NULL;
+ cpu_data(id).idle_volume = 1;
+}
+
+static void smp_setup_percpu_timer(void);
+
+static volatile unsigned long callin_flag = 0;
+
+extern void inherit_locked_prom_mappings(int save_p);
+
+static inline void cpu_setup_percpu_base(unsigned long cpu_id)
+{
+ __asm__ __volatile__("mov %0, %%g5\n\t"
+ "stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (__per_cpu_offset(cpu_id)),
+ "r" (TSB_REG), "i" (ASI_IMMU));
+}
+
+void __init smp_callin(void)
+{
+ int cpuid = hard_smp_processor_id();
+
+ inherit_locked_prom_mappings(0);
+
+ __flush_tlb_all();
+
+ cpu_setup_percpu_base(cpuid);
+
+ smp_setup_percpu_timer();
+
+ local_irq_enable();
+
+ calibrate_delay();
+ smp_store_cpu_info(cpuid);
+ callin_flag = 1;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "flush %%g6" : : : "memory");
+
+ /* Clear this or we will die instantly when we
+ * schedule back to this idler...
+ */
+ clear_thread_flag(TIF_NEWCHILD);
+
+ /* Attach to the address space of init_task. */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+
+ while (!cpu_isset(cpuid, smp_commenced_mask))
+ membar("#LoadLoad");
+
+ cpu_set(cpuid, cpu_online_map);
+}
+
+void cpu_panic(void)
+{
+ printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
+ panic("SMP bolixed\n");
+}
+
+static unsigned long current_tick_offset;
+
+/* This tick register synchronization scheme is taken entirely from
+ * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
+ *
+ * The only change I've made is to rework it so that the master
+ * initiates the synchonization instead of the slave. -DaveM
+ */
+
+#define MASTER 0
+#define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
+
+#define NUM_ROUNDS 64 /* magic value */
+#define NUM_ITERS 5 /* likewise */
+
+static DEFINE_SPINLOCK(itc_sync_lock);
+static unsigned long go[SLAVE + 1];
+
+#define DEBUG_TICK_SYNC 0
+
+static inline long get_delta (long *rt, long *master)
+{
+ unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
+ unsigned long tcenter, t0, t1, tm;
+ unsigned long i;
+
+ for (i = 0; i < NUM_ITERS; i++) {
+ t0 = tick_ops->get_tick();
+ go[MASTER] = 1;
+ membar("#StoreLoad");
+ while (!(tm = go[SLAVE]))
+ membar("#LoadLoad");
+ go[SLAVE] = 0;
+ membar("#StoreStore");
+ t1 = tick_ops->get_tick();
+
+ if (t1 - t0 < best_t1 - best_t0)
+ best_t0 = t0, best_t1 = t1, best_tm = tm;
+ }
+
+ *rt = best_t1 - best_t0;
+ *master = best_tm - best_t0;
+
+ /* average best_t0 and best_t1 without overflow: */
+ tcenter = (best_t0/2 + best_t1/2);
+ if (best_t0 % 2 + best_t1 % 2 == 2)
+ tcenter++;
+ return tcenter - best_tm;
+}
+
+void smp_synchronize_tick_client(void)
+{
+ long i, delta, adj, adjust_latency = 0, done = 0;
+ unsigned long flags, rt, master_time_stamp, bound;
+#if DEBUG_TICK_SYNC
+ struct {
+ long rt; /* roundtrip time */
+ long master; /* master's timestamp */
+ long diff; /* difference between midpoint and master's timestamp */
+ long lat; /* estimate of itc adjustment latency */
+ } t[NUM_ROUNDS];
+#endif
+
+ go[MASTER] = 1;
+
+ while (go[MASTER])
+ membar("#LoadLoad");
+
+ local_irq_save(flags);
+ {
+ for (i = 0; i < NUM_ROUNDS; i++) {
+ delta = get_delta(&rt, &master_time_stamp);
+ if (delta == 0) {
+ done = 1; /* let's lock on to this... */
+ bound = rt;
+ }
+
+ if (!done) {
+ if (i > 0) {
+ adjust_latency += -delta;
+ adj = -delta + adjust_latency/4;
+ } else
+ adj = -delta;
+
+ tick_ops->add_tick(adj, current_tick_offset);
+ }
+#if DEBUG_TICK_SYNC
+ t[i].rt = rt;
+ t[i].master = master_time_stamp;
+ t[i].diff = delta;
+ t[i].lat = adjust_latency/4;
+#endif
+ }
+ }
+ local_irq_restore(flags);
+
+#if DEBUG_TICK_SYNC
+ for (i = 0; i < NUM_ROUNDS; i++)
+ printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
+ t[i].rt, t[i].master, t[i].diff, t[i].lat);
+#endif
+
+ printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
+ "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
+}
+
+static void smp_start_sync_tick_client(int cpu);
+
+static void smp_synchronize_one_tick(int cpu)
+{
+ unsigned long flags, i;
+
+ go[MASTER] = 0;
+
+ smp_start_sync_tick_client(cpu);
+
+ /* wait for client to be ready */
+ while (!go[MASTER])
+ membar("#LoadLoad");
+
+ /* now let the client proceed into his loop */
+ go[MASTER] = 0;
+ membar("#StoreLoad");
+
+ spin_lock_irqsave(&itc_sync_lock, flags);
+ {
+ for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
+ while (!go[MASTER])
+ membar("#LoadLoad");
+ go[MASTER] = 0;
+ membar("#StoreStore");
+ go[SLAVE] = tick_ops->get_tick();
+ membar("#StoreLoad");
+ }
+ }
+ spin_unlock_irqrestore(&itc_sync_lock, flags);
+}
+
+extern unsigned long sparc64_cpu_startup;
+
+/* The OBP cpu startup callback truncates the 3rd arg cookie to
+ * 32-bits (I think) so to be safe we have it read the pointer
+ * contained here so we work on >4GB machines. -DaveM
+ */
+static struct thread_info *cpu_new_thread = NULL;
+
+static int __devinit smp_boot_one_cpu(unsigned int cpu)
+{
+ unsigned long entry =
+ (unsigned long)(&sparc64_cpu_startup);
+ unsigned long cookie =
+ (unsigned long)(&cpu_new_thread);
+ struct task_struct *p;
+ int timeout, ret, cpu_node;
+
+ p = fork_idle(cpu);
+ callin_flag = 0;
+ cpu_new_thread = p->thread_info;
+ cpu_set(cpu, cpu_callout_map);
+
+ cpu_find_by_mid(cpu, &cpu_node);
+ prom_startcpu(cpu_node, entry, cookie);
+
+ for (timeout = 0; timeout < 5000000; timeout++) {
+ if (callin_flag)
+ break;
+ udelay(100);
+ }
+ if (callin_flag) {
+ ret = 0;
+ } else {
+ printk("Processor %d is stuck.\n", cpu);
+ cpu_clear(cpu, cpu_callout_map);
+ ret = -ENODEV;
+ }
+ cpu_new_thread = NULL;
+
+ return ret;
+}
+
+static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
+{
+ u64 result, target;
+ int stuck, tmp;
+
+ if (this_is_starfire) {
+ /* map to real upaid */
+ cpu = (((cpu & 0x3c) << 1) |
+ ((cpu & 0x40) >> 4) |
+ (cpu & 0x3));
+ }
+
+ target = (cpu << 14) | 0x70;
+again:
+ /* Ok, this is the real Spitfire Errata #54.
+ * One must read back from a UDB internal register
+ * after writes to the UDB interrupt dispatch, but
+ * before the membar Sync for that write.
+ * So we use the high UDB control register (ASI 0x7f,
+ * ADDR 0x20) for the dummy read. -DaveM
+ */
+ tmp = 0x40;
+ __asm__ __volatile__(
+ "wrpr %1, %2, %%pstate\n\t"
+ "stxa %4, [%0] %3\n\t"
+ "stxa %5, [%0+%8] %3\n\t"
+ "add %0, %8, %0\n\t"
+ "stxa %6, [%0+%8] %3\n\t"
+ "membar #Sync\n\t"
+ "stxa %%g0, [%7] %3\n\t"
+ "membar #Sync\n\t"
+ "mov 0x20, %%g1\n\t"
+ "ldxa [%%g1] 0x7f, %%g0\n\t"
+ "membar #Sync"
+ : "=r" (tmp)
+ : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
+ "r" (data0), "r" (data1), "r" (data2), "r" (target),
+ "r" (0x10), "0" (tmp)
+ : "g1");
+
+ /* NOTE: PSTATE_IE is still clear. */
+ stuck = 100000;
+ do {
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=r" (result)
+ : "i" (ASI_INTR_DISPATCH_STAT));
+ if (result == 0) {
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : : "r" (pstate));
+ return;
+ }
+ stuck -= 1;
+ if (stuck == 0)
+ break;
+ } while (result & 0x1);
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : : "r" (pstate));
+ if (stuck == 0) {
+ printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+ smp_processor_id(), result);
+ } else {
+ udelay(2);
+ goto again;
+ }
+}
+
+static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+{
+ u64 pstate;
+ int i;
+
+ __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+ for_each_cpu_mask(i, mask)
+ spitfire_xcall_helper(data0, data1, data2, pstate, i);
+}
+
+/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
+ * packet, but we have no use for that. However we do take advantage of
+ * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
+ */
+static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
+{
+ u64 pstate, ver;
+ int nack_busy_id, is_jalapeno;
+
+ if (cpus_empty(mask))
+ return;
+
+ /* Unfortunately, someone at Sun had the brilliant idea to make the
+ * busy/nack fields hard-coded by ITID number for this Ultra-III
+ * derivative processor.
+ */
+ __asm__ ("rdpr %%ver, %0" : "=r" (ver));
+ is_jalapeno = ((ver >> 32) == 0x003e0016);
+
+ __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+
+retry:
+ __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
+ : : "r" (pstate), "i" (PSTATE_IE));
+
+ /* Setup the dispatch data registers. */
+ __asm__ __volatile__("stxa %0, [%3] %6\n\t"
+ "stxa %1, [%4] %6\n\t"
+ "stxa %2, [%5] %6\n\t"
+ "membar #Sync\n\t"
+ : /* no outputs */
+ : "r" (data0), "r" (data1), "r" (data2),
+ "r" (0x40), "r" (0x50), "r" (0x60),
+ "i" (ASI_INTR_W));
+
+ nack_busy_id = 0;
+ {
+ int i;
+
+ for_each_cpu_mask(i, mask) {
+ u64 target = (i << 14) | 0x70;
+
+ if (!is_jalapeno)
+ target |= (nack_busy_id << 24);
+ __asm__ __volatile__(
+ "stxa %%g0, [%0] %1\n\t"
+ "membar #Sync\n\t"
+ : /* no outputs */
+ : "r" (target), "i" (ASI_INTR_W));
+ nack_busy_id++;
+ }
+ }
+
+ /* Now, poll for completion. */
+ {
+ u64 dispatch_stat;
+ long stuck;
+
+ stuck = 100000 * nack_busy_id;
+ do {
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=r" (dispatch_stat)
+ : "i" (ASI_INTR_DISPATCH_STAT));
+ if (dispatch_stat == 0UL) {
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : : "r" (pstate));
+ return;
+ }
+ if (!--stuck)
+ break;
+ } while (dispatch_stat & 0x5555555555555555UL);
+
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : : "r" (pstate));
+
+ if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
+ /* Busy bits will not clear, continue instead
+ * of freezing up on this cpu.
+ */
+ printk("CPU[%d]: mondo stuckage result[%016lx]\n",
+ smp_processor_id(), dispatch_stat);
+ } else {
+ int i, this_busy_nack = 0;
+
+ /* Delay some random time with interrupts enabled
+ * to prevent deadlock.
+ */
+ udelay(2 * nack_busy_id);
+
+ /* Clear out the mask bits for cpus which did not
+ * NACK us.
+ */
+ for_each_cpu_mask(i, mask) {
+ u64 check_mask;
+
+ if (is_jalapeno)
+ check_mask = (0x2UL << (2*i));
+ else
+ check_mask = (0x2UL <<
+ this_busy_nack);
+ if ((dispatch_stat & check_mask) == 0)
+ cpu_clear(i, mask);
+ this_busy_nack += 2;
+ }
+
+ goto retry;
+ }
+ }
+}
+
+/* Send cross call to all processors mentioned in MASK
+ * except self.
+ */
+static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
+{
+ u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
+ int this_cpu = get_cpu();
+
+ cpus_and(mask, mask, cpu_online_map);
+ cpu_clear(this_cpu, mask);
+
+ if (tlb_type == spitfire)
+ spitfire_xcall_deliver(data0, data1, data2, mask);
+ else
+ cheetah_xcall_deliver(data0, data1, data2, mask);
+ /* NOTE: Caller runs local copy on master. */
+
+ put_cpu();
+}
+
+extern unsigned long xcall_sync_tick;
+
+static void smp_start_sync_tick_client(int cpu)
+{
+ cpumask_t mask = cpumask_of_cpu(cpu);
+
+ smp_cross_call_masked(&xcall_sync_tick,
+ 0, 0, 0, mask);
+}
+
+/* Send cross call to all processors except self. */
+#define smp_cross_call(func, ctx, data1, data2) \
+ smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
+
+struct call_data_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t finished;
+ int wait;
+};
+
+static DEFINE_SPINLOCK(call_lock);
+static struct call_data_struct *call_data;
+
+extern unsigned long xcall_call_function;
+
+/*
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+int smp_call_function(void (*func)(void *info), void *info,
+ int nonatomic, int wait)
+{
+ struct call_data_struct data;
+ int cpus = num_online_cpus() - 1;
+ long timeout;
+
+ if (!cpus)
+ return 0;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.finished, 0);
+ data.wait = wait;
+
+ spin_lock(&call_lock);
+
+ call_data = &data;
+
+ smp_cross_call(&xcall_call_function, 0, 0, 0);
+
+ /*
+ * Wait for other cpus to complete function or at
+ * least snap the call data.
+ */
+ timeout = 1000000;
+ while (atomic_read(&data.finished) != cpus) {
+ if (--timeout <= 0)
+ goto out_timeout;
+ barrier();
+ udelay(1);
+ }
+
+ spin_unlock(&call_lock);
+
+ return 0;
+
+out_timeout:
+ spin_unlock(&call_lock);
+ printk("XCALL: Remote cpus not responding, ncpus=%ld finished=%ld\n",
+ (long) num_online_cpus() - 1L,
+ (long) atomic_read(&data.finished));
+ return 0;
+}
+
+void smp_call_function_client(int irq, struct pt_regs *regs)
+{
+ void (*func) (void *info) = call_data->func;
+ void *info = call_data->info;
+
+ clear_softint(1 << irq);
+ if (call_data->wait) {
+ /* let initiator proceed only after completion */
+ func(info);
+ atomic_inc(&call_data->finished);
+ } else {
+ /* let initiator proceed after getting data */
+ atomic_inc(&call_data->finished);
+ func(info);
+ }
+}
+
+extern unsigned long xcall_flush_tlb_mm;
+extern unsigned long xcall_flush_tlb_pending;
+extern unsigned long xcall_flush_tlb_kernel_range;
+extern unsigned long xcall_flush_tlb_all_spitfire;
+extern unsigned long xcall_flush_tlb_all_cheetah;
+extern unsigned long xcall_report_regs;
+extern unsigned long xcall_receive_signal;
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+extern unsigned long xcall_flush_dcache_page_cheetah;
+#endif
+extern unsigned long xcall_flush_dcache_page_spitfire;
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+extern atomic_t dcpage_flushes;
+extern atomic_t dcpage_flushes_xcall;
+#endif
+
+static __inline__ void __local_flush_dcache_page(struct page *page)
+{
+#ifdef DCACHE_ALIASING_POSSIBLE
+ __flush_dcache_page(page_address(page),
+ ((tlb_type == spitfire) &&
+ page_mapping(page) != NULL));
+#else
+ if (page_mapping(page) != NULL &&
+ tlb_type == spitfire)
+ __flush_icache_page(__pa(page_address(page)));
+#endif
+}
+
+void smp_flush_dcache_page_impl(struct page *page, int cpu)
+{
+ cpumask_t mask = cpumask_of_cpu(cpu);
+ int this_cpu = get_cpu();
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+ atomic_inc(&dcpage_flushes);
+#endif
+ if (cpu == this_cpu) {
+ __local_flush_dcache_page(page);
+ } else if (cpu_online(cpu)) {
+ void *pg_addr = page_address(page);
+ u64 data0;
+
+ if (tlb_type == spitfire) {
+ data0 =
+ ((u64)&xcall_flush_dcache_page_spitfire);
+ if (page_mapping(page) != NULL)
+ data0 |= ((u64)1 << 32);
+ spitfire_xcall_deliver(data0,
+ __pa(pg_addr),
+ (u64) pg_addr,
+ mask);
+ } else {
+#ifdef DCACHE_ALIASING_POSSIBLE
+ data0 =
+ ((u64)&xcall_flush_dcache_page_cheetah);
+ cheetah_xcall_deliver(data0,
+ __pa(pg_addr),
+ 0, mask);
+#endif
+ }
+#ifdef CONFIG_DEBUG_DCFLUSH
+ atomic_inc(&dcpage_flushes_xcall);
+#endif
+ }
+
+ put_cpu();
+}
+
+void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+{
+ void *pg_addr = page_address(page);
+ cpumask_t mask = cpu_online_map;
+ u64 data0;
+ int this_cpu = get_cpu();
+
+ cpu_clear(this_cpu, mask);
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+ atomic_inc(&dcpage_flushes);
+#endif
+ if (cpus_empty(mask))
+ goto flush_self;
+ if (tlb_type == spitfire) {
+ data0 = ((u64)&xcall_flush_dcache_page_spitfire);
+ if (page_mapping(page) != NULL)
+ data0 |= ((u64)1 << 32);
+ spitfire_xcall_deliver(data0,
+ __pa(pg_addr),
+ (u64) pg_addr,
+ mask);
+ } else {
+#ifdef DCACHE_ALIASING_POSSIBLE
+ data0 = ((u64)&xcall_flush_dcache_page_cheetah);
+ cheetah_xcall_deliver(data0,
+ __pa(pg_addr),
+ 0, mask);
+#endif
+ }
+#ifdef CONFIG_DEBUG_DCFLUSH
+ atomic_inc(&dcpage_flushes_xcall);
+#endif
+ flush_self:
+ __local_flush_dcache_page(page);
+
+ put_cpu();
+}
+
+void smp_receive_signal(int cpu)
+{
+ cpumask_t mask = cpumask_of_cpu(cpu);
+
+ if (cpu_online(cpu)) {
+ u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);
+
+ if (tlb_type == spitfire)
+ spitfire_xcall_deliver(data0, 0, 0, mask);
+ else
+ cheetah_xcall_deliver(data0, 0, 0, mask);
+ }
+}
+
+void smp_receive_signal_client(int irq, struct pt_regs *regs)
+{
+ /* Just return, rtrap takes care of the rest. */
+ clear_softint(1 << irq);
+}
+
+void smp_report_regs(void)
+{
+ smp_cross_call(&xcall_report_regs, 0, 0, 0);
+}
+
+void smp_flush_tlb_all(void)
+{
+ if (tlb_type == spitfire)
+ smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
+ else
+ smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
+ __flush_tlb_all();
+}
+
+/* We know that the window frames of the user have been flushed
+ * to the stack before we get here because all callers of us
+ * are flush_tlb_*() routines, and these run after flush_cache_*()
+ * which performs the flushw.
+ *
+ * The SMP TLB coherency scheme we use works as follows:
+ *
+ * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
+ * space has (potentially) executed on, this is the heuristic
+ * we use to avoid doing cross calls.
+ *
+ * Also, for flushing from kswapd and also for clones, we
+ * use cpu_vm_mask as the list of cpus to make run the TLB.
+ *
+ * 2) TLB context numbers are shared globally across all processors
+ * in the system, this allows us to play several games to avoid
+ * cross calls.
+ *
+ * One invariant is that when a cpu switches to a process, and
+ * that processes tsk->active_mm->cpu_vm_mask does not have the
+ * current cpu's bit set, that tlb context is flushed locally.
+ *
+ * If the address space is non-shared (ie. mm->count == 1) we avoid
+ * cross calls when we want to flush the currently running process's
+ * tlb state. This is done by clearing all cpu bits except the current
+ * processor's in current->active_mm->cpu_vm_mask and performing the
+ * flush locally only. This will force any subsequent cpus which run
+ * this task to flush the context from the local tlb if the process
+ * migrates to another cpu (again).
+ *
+ * 3) For shared address spaces (threads) and swapping we bite the
+ * bullet for most cases and perform the cross call (but only to
+ * the cpus listed in cpu_vm_mask).
+ *
+ * The performance gain from "optimizing" away the cross call for threads is
+ * questionable (in theory the big win for threads is the massive sharing of
+ * address space state across processors).
+ */
+void smp_flush_tlb_mm(struct mm_struct *mm)
+{
+ /*
+ * This code is called from two places, dup_mmap and exit_mmap. In the
+ * former case, we really need a flush. In the later case, the callers
+ * are single threaded exec_mmap (really need a flush), multithreaded
+ * exec_mmap case (do not need to flush, since the caller gets a new
+ * context via activate_mm), and all other callers of mmput() whence
+ * the flush can be optimized since the associated threads are dead and
+ * the mm is being torn down (__exit_mm and other mmput callers) or the
+ * owning thread is dissociating itself from the mm. The
+ * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
+ * for single thread exec and dup_mmap cases. An alternate check might
+ * have been (current->mm != mm).
+ * Kanoj Sarcar
+ */
+ if (atomic_read(&mm->mm_users) == 0)
+ return;
+
+ {
+ u32 ctx = CTX_HWBITS(mm->context);
+ int cpu = get_cpu();
+
+ if (atomic_read(&mm->mm_users) == 1) {
+ mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+ goto local_flush_and_out;
+ }
+
+ smp_cross_call_masked(&xcall_flush_tlb_mm,
+ ctx, 0, 0,
+ mm->cpu_vm_mask);
+
+ local_flush_and_out:
+ __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+
+ put_cpu();
+ }
+}
+
+void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
+{
+ u32 ctx = CTX_HWBITS(mm->context);
+ int cpu = get_cpu();
+
+ if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
+ mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+ goto local_flush_and_out;
+ } else {
+ /* This optimization is not valid. Normally
+ * we will be holding the page_table_lock, but
+ * there is an exception which is copy_page_range()
+ * when forking. The lock is held during the individual
+ * page table updates in the parent, but not at the
+ * top level, which is where we are invoked.
+ */
+ if (0) {
+ cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
+
+ /* By virtue of running under the mm->page_table_lock,
+ * and mmu_context.h:switch_mm doing the same, the
+ * following operation is safe.
+ */
+ if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
+ goto local_flush_and_out;
+ }
+ }
+
+ smp_cross_call_masked(&xcall_flush_tlb_pending,
+ ctx, nr, (unsigned long) vaddrs,
+ mm->cpu_vm_mask);
+
+local_flush_and_out:
+ __flush_tlb_pending(ctx, nr, vaddrs);
+
+ put_cpu();
+}
+
+void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ start &= PAGE_MASK;
+ end = PAGE_ALIGN(end);
+ if (start != end) {
+ smp_cross_call(&xcall_flush_tlb_kernel_range,
+ 0, start, end);
+
+ __flush_tlb_kernel_range(start, end);
+ }
+}
+
+/* CPU capture. */
+/* #define CAPTURE_DEBUG */
+extern unsigned long xcall_capture;
+
+static atomic_t smp_capture_depth = ATOMIC_INIT(0);
+static atomic_t smp_capture_registry = ATOMIC_INIT(0);
+static unsigned long penguins_are_doing_time;
+
+void smp_capture(void)
+{
+ int result = atomic_add_ret(1, &smp_capture_depth);
+
+ if (result == 1) {
+ int ncpus = num_online_cpus();
+
+#ifdef CAPTURE_DEBUG
+ printk("CPU[%d]: Sending penguins to jail...",
+ smp_processor_id());
+#endif
+ penguins_are_doing_time = 1;
+ membar("#StoreStore | #LoadStore");
+ atomic_inc(&smp_capture_registry);
+ smp_cross_call(&xcall_capture, 0, 0, 0);
+ while (atomic_read(&smp_capture_registry) != ncpus)
+ membar("#LoadLoad");
+#ifdef CAPTURE_DEBUG
+ printk("done\n");
+#endif
+ }
+}
+
+void smp_release(void)
+{
+ if (atomic_dec_and_test(&smp_capture_depth)) {
+#ifdef CAPTURE_DEBUG
+ printk("CPU[%d]: Giving pardon to "
+ "imprisoned penguins\n",
+ smp_processor_id());
+#endif
+ penguins_are_doing_time = 0;
+ membar("#StoreStore | #StoreLoad");
+ atomic_dec(&smp_capture_registry);
+ }
+}
+
+/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
+ * can service tlb flush xcalls...
+ */
+extern void prom_world(int);
+extern void save_alternate_globals(unsigned long *);
+extern void restore_alternate_globals(unsigned long *);
+void smp_penguin_jailcell(int irq, struct pt_regs *regs)
+{
+ unsigned long global_save[24];
+
+ clear_softint(1 << irq);
+
+ preempt_disable();
+
+ __asm__ __volatile__("flushw");
+ save_alternate_globals(global_save);
+ prom_world(1);
+ atomic_inc(&smp_capture_registry);
+ membar("#StoreLoad | #StoreStore");
+ while (penguins_are_doing_time)
+ membar("#LoadLoad");
+ restore_alternate_globals(global_save);
+ atomic_dec(&smp_capture_registry);
+ prom_world(0);
+
+ preempt_enable();
+}
+
+extern unsigned long xcall_promstop;
+
+void smp_promstop_others(void)
+{
+ smp_cross_call(&xcall_promstop, 0, 0, 0);
+}
+
+#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
+#define prof_counter(__cpu) cpu_data(__cpu).counter
+
+void smp_percpu_timer_interrupt(struct pt_regs *regs)
+{
+ unsigned long compare, tick, pstate;
+ int cpu = smp_processor_id();
+ int user = user_mode(regs);
+
+ /*
+ * Check for level 14 softint.
+ */
+ {
+ unsigned long tick_mask = tick_ops->softint_mask;
+
+ if (!(get_softint() & tick_mask)) {
+ extern void handler_irq(int, struct pt_regs *);
+
+ handler_irq(14, regs);
+ return;
+ }
+ clear_softint(tick_mask);
+ }
+
+ do {
+ profile_tick(CPU_PROFILING, regs);
+ if (!--prof_counter(cpu)) {
+ irq_enter();
+
+ if (cpu == boot_cpu_id) {
+ kstat_this_cpu.irqs[0]++;
+ timer_tick_interrupt(regs);
+ }
+
+ update_process_times(user);
+
+ irq_exit();
+
+ prof_counter(cpu) = prof_multiplier(cpu);
+ }
+
+ /* Guarantee that the following sequences execute
+ * uninterrupted.
+ */
+ __asm__ __volatile__("rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+
+ compare = tick_ops->add_compare(current_tick_offset);
+ tick = tick_ops->get_tick();
+
+ /* Restore PSTATE_IE. */
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : /* no outputs */
+ : "r" (pstate));
+ } while (time_after_eq(tick, compare));
+}
+
+static void __init smp_setup_percpu_timer(void)
+{
+ int cpu = smp_processor_id();
+ unsigned long pstate;
+
+ prof_counter(cpu) = prof_multiplier(cpu) = 1;
+
+ /* Guarantee that the following sequences execute
+ * uninterrupted.
+ */
+ __asm__ __volatile__("rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+
+ tick_ops->init_tick(current_tick_offset);
+
+ /* Restore PSTATE_IE. */
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : /* no outputs */
+ : "r" (pstate));
+}
+
+void __init smp_tick_init(void)
+{
+ boot_cpu_id = hard_smp_processor_id();
+ current_tick_offset = timer_tick_offset;
+
+ cpu_set(boot_cpu_id, cpu_online_map);
+ prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
+}
+
+/* /proc/profile writes can call this, don't __init it please. */
+static DEFINE_SPINLOCK(prof_setup_lock);
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ unsigned long flags;
+ int i;
+
+ if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
+ return -EINVAL;
+
+ spin_lock_irqsave(&prof_setup_lock, flags);
+ for (i = 0; i < NR_CPUS; i++)
+ prof_multiplier(i) = multiplier;
+ current_tick_offset = (timer_tick_offset / multiplier);
+ spin_unlock_irqrestore(&prof_setup_lock, flags);
+
+ return 0;
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int instance, mid;
+
+ instance = 0;
+ while (!cpu_find_by_instance(instance, NULL, &mid)) {
+ if (mid < max_cpus)
+ cpu_set(mid, phys_cpu_present_map);
+ instance++;
+ }
+
+ if (num_possible_cpus() > max_cpus) {
+ instance = 0;
+ while (!cpu_find_by_instance(instance, NULL, &mid)) {
+ if (mid != boot_cpu_id) {
+ cpu_clear(mid, phys_cpu_present_map);
+ if (num_possible_cpus() <= max_cpus)
+ break;
+ }
+ instance++;
+ }
+ }
+
+ smp_store_cpu_info(boot_cpu_id);
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+ if (hard_smp_processor_id() >= NR_CPUS) {
+ prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
+ prom_halt();
+ }
+
+ current_thread_info()->cpu = hard_smp_processor_id();
+
+ cpu_set(smp_processor_id(), cpu_online_map);
+ cpu_set(smp_processor_id(), phys_cpu_present_map);
+}
+
+int __devinit __cpu_up(unsigned int cpu)
+{
+ int ret = smp_boot_one_cpu(cpu);
+
+ if (!ret) {
+ cpu_set(cpu, smp_commenced_mask);
+ while (!cpu_isset(cpu, cpu_online_map))
+ mb();
+ if (!cpu_isset(cpu, cpu_online_map)) {
+ ret = -ENODEV;
+ } else {
+ smp_synchronize_one_tick(cpu);
+ }
+ }
+ return ret;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ unsigned long bogosum = 0;
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_online(i))
+ bogosum += cpu_data(i).udelay_val;
+ }
+ printk("Total of %ld processors activated "
+ "(%lu.%02lu BogoMIPS).\n",
+ (long) num_online_cpus(),
+ bogosum/(500000/HZ),
+ (bogosum/(5000/HZ))%100);
+}
+
+/* This needn't do anything as we do not sleep the cpu
+ * inside of the idler task, so an interrupt is not needed
+ * to get a clean fast response.
+ *
+ * XXX Reverify this assumption... -DaveM
+ *
+ * Addendum: We do want it to do something for the signal
+ * delivery case, we detect that by just seeing
+ * if we are trying to send this to an idler or not.
+ */
+void smp_send_reschedule(int cpu)
+{
+ if (cpu_data(cpu).idle_volume == 0)
+ smp_receive_signal(cpu);
+}
+
+/* This is a nop because we capture all other cpus
+ * anyways when making the PROM active.
+ */
+void smp_send_stop(void)
+{
+}
+
+unsigned long __per_cpu_base;
+unsigned long __per_cpu_shift;
+
+EXPORT_SYMBOL(__per_cpu_base);
+EXPORT_SYMBOL(__per_cpu_shift);
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long goal, size, i;
+ char *ptr;
+ /* Created by linker magic */
+ extern char __per_cpu_start[], __per_cpu_end[];
+
+ /* Copy section for each CPU (we discard the original) */
+ goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
+
+#ifdef CONFIG_MODULES
+ if (goal < PERCPU_ENOUGH_ROOM)
+ goal = PERCPU_ENOUGH_ROOM;
+#endif
+ __per_cpu_shift = 0;
+ for (size = 1UL; size < goal; size <<= 1UL)
+ __per_cpu_shift++;
+
+ /* Make sure the resulting __per_cpu_base value
+ * will fit in the 43-bit sign extended IMMU
+ * TSB register.
+ */
+ ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
+ (unsigned long) __per_cpu_start);
+
+ __per_cpu_base = ptr - __per_cpu_start;
+
+ if ((__per_cpu_shift < PAGE_SHIFT) ||
+ (__per_cpu_base & ~PAGE_MASK) ||
+ (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
+ prom_printf("PER_CPU: Invalid layout, "
+ "ptr[%p] shift[%lx] base[%lx]\n",
+ ptr, __per_cpu_shift, __per_cpu_base);
+ prom_halt();
+ }
+
+ for (i = 0; i < NR_CPUS; i++, ptr += size)
+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+
+ /* Finally, load in the boot cpu's base value.
+ * We abuse the IMMU TSB register for trap handler
+ * entry and exit loading of %g5. That is why it
+ * has to be page aligned.
+ */
+ cpu_setup_percpu_base(hard_smp_processor_id());
+}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
new file mode 100644
index 00000000000..cad5a112280
--- /dev/null
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -0,0 +1,432 @@
+/* $Id: sparc64_ksyms.c,v 1.121 2002/02/09 19:49:31 davem Exp $
+ * arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+/* Tell string.h we don't want memcpy etc. as cpp defines */
+#define EXPORT_SYMTAB_STROPS
+#define PROMLIB_INTERNAL
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/fs_struct.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/syscalls.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <net/compat.h>
+
+#include <asm/oplib.h>
+#include <asm/delay.h>
+#include <asm/system.h>
+#include <asm/auxio.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/idprom.h>
+#include <asm/svr4.h>
+#include <asm/elf.h>
+#include <asm/head.h>
+#include <asm/smp.h>
+#include <asm/mostek.h>
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/fpumacro.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#ifdef CONFIG_SBUS
+#include <asm/sbus.h>
+#include <asm/dma.h>
+#endif
+#ifdef CONFIG_PCI
+#include <asm/ebus.h>
+#include <asm/isa.h>
+#endif
+#include <asm/a.out.h>
+#include <asm/ns87303.h>
+#include <asm/timer.h>
+#include <asm/cpudata.h>
+#include <asm/rwsem.h>
+
+struct poll {
+ int fd;
+ short events;
+ short revents;
+};
+
+extern void die_if_kernel(char *str, struct pt_regs *regs);
+extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+void _sigpause_common (unsigned int set, struct pt_regs *);
+extern void *__bzero(void *, size_t);
+extern void *__memscan_zero(void *, size_t);
+extern void *__memscan_generic(void *, int, size_t);
+extern int __memcmp(const void *, const void *, __kernel_size_t);
+extern __kernel_size_t strlen(const char *);
+extern void linux_sparc_syscall(void);
+extern void rtrap(void);
+extern void show_regs(struct pt_regs *);
+extern void solaris_syscall(void);
+extern void syscall_trace(void);
+extern u32 sunos_sys_table[], sys_call_table32[];
+extern void tl0_solaris(void);
+extern void sys_sigsuspend(void);
+extern int svr4_getcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
+extern int svr4_setcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
+extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
+extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
+extern long sparc32_open(const char __user * filename, int flags, int mode);
+extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long offset, unsigned long size, pgprot_t prot, int space);
+extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn, unsigned long size, pgprot_t prot);
+extern void (*prom_palette)(int);
+
+extern int __ashrdi3(int, int);
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK)
+extern void _do_spin_lock (spinlock_t *lock, char *str);
+extern void _do_spin_unlock (spinlock_t *lock);
+extern int _spin_trylock (spinlock_t *lock);
+extern void _do_read_lock(rwlock_t *rw, char *str);
+extern void _do_read_unlock(rwlock_t *rw, char *str);
+extern void _do_write_lock(rwlock_t *rw, char *str);
+extern void _do_write_unlock(rwlock_t *rw);
+extern int _do_write_trylock(rwlock_t *rw, char *str);
+#endif
+
+extern unsigned long phys_base;
+extern unsigned long pfn_base;
+
+extern unsigned int sys_call_table[];
+
+extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *);
+extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *);
+extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *, unsigned long *);
+
+/* Per-CPU information table */
+EXPORT_PER_CPU_SYMBOL(__cpu_data);
+
+/* used by various drivers */
+#ifdef CONFIG_SMP
+#ifndef CONFIG_DEBUG_SPINLOCK
+/* Out of line rw-locking implementation. */
+EXPORT_SYMBOL(__read_lock);
+EXPORT_SYMBOL(__read_unlock);
+EXPORT_SYMBOL(__write_lock);
+EXPORT_SYMBOL(__write_unlock);
+EXPORT_SYMBOL(__write_trylock);
+/* Out of line spin-locking implementation. */
+EXPORT_SYMBOL(_raw_spin_lock);
+EXPORT_SYMBOL(_raw_spin_lock_flags);
+#endif
+
+/* Hard IRQ locking */
+EXPORT_SYMBOL(synchronize_irq);
+
+#if defined(CONFIG_MCOUNT)
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
+
+/* CPU online map and active count. */
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(phys_cpu_present_map);
+
+/* Spinlock debugging library, optional. */
+#ifdef CONFIG_DEBUG_SPINLOCK
+EXPORT_SYMBOL(_do_spin_lock);
+EXPORT_SYMBOL(_do_spin_unlock);
+EXPORT_SYMBOL(_spin_trylock);
+EXPORT_SYMBOL(_do_read_lock);
+EXPORT_SYMBOL(_do_read_unlock);
+EXPORT_SYMBOL(_do_write_lock);
+EXPORT_SYMBOL(_do_write_unlock);
+EXPORT_SYMBOL(_do_write_trylock);
+#endif
+
+EXPORT_SYMBOL(smp_call_function);
+#endif /* CONFIG_SMP */
+
+EXPORT_SYMBOL(sparc64_get_clock_tick);
+
+/* semaphores */
+EXPORT_SYMBOL(down);
+EXPORT_SYMBOL(down_trylock);
+EXPORT_SYMBOL(down_interruptible);
+EXPORT_SYMBOL(up);
+
+/* RW semaphores */
+EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_read_trylock);
+EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__down_write_trylock);
+EXPORT_SYMBOL(__up_read);
+EXPORT_SYMBOL(__up_write);
+EXPORT_SYMBOL(__downgrade_write);
+
+/* Atomic counter implementation. */
+EXPORT_SYMBOL(atomic_add);
+EXPORT_SYMBOL(atomic_add_ret);
+EXPORT_SYMBOL(atomic_sub);
+EXPORT_SYMBOL(atomic_sub_ret);
+EXPORT_SYMBOL(atomic64_add);
+EXPORT_SYMBOL(atomic64_add_ret);
+EXPORT_SYMBOL(atomic64_sub);
+EXPORT_SYMBOL(atomic64_sub_ret);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+#endif
+
+/* Atomic bit operations. */
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(test_and_change_bit);
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(change_bit);
+
+/* Bit searching */
+EXPORT_SYMBOL(find_next_bit);
+EXPORT_SYMBOL(find_next_zero_bit);
+EXPORT_SYMBOL(find_next_zero_le_bit);
+
+EXPORT_SYMBOL(ivector_table);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+
+EXPORT_SYMBOL(__flushw_user);
+
+EXPORT_SYMBOL(tlb_type);
+EXPORT_SYMBOL(get_fb_unmapped_area);
+EXPORT_SYMBOL(flush_icache_range);
+
+EXPORT_SYMBOL(flush_dcache_page);
+#ifdef DCACHE_ALIASING_POSSIBLE
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif
+
+EXPORT_SYMBOL(mostek_lock);
+EXPORT_SYMBOL(mstk48t02_regs);
+EXPORT_SYMBOL(request_fast_irq);
+#ifdef CONFIG_SUN_AUXIO
+EXPORT_SYMBOL(auxio_set_led);
+EXPORT_SYMBOL(auxio_set_lte);
+#endif
+#ifdef CONFIG_SBUS
+EXPORT_SYMBOL(sbus_root);
+EXPORT_SYMBOL(dma_chain);
+EXPORT_SYMBOL(sbus_set_sbus64);
+EXPORT_SYMBOL(sbus_alloc_consistent);
+EXPORT_SYMBOL(sbus_free_consistent);
+EXPORT_SYMBOL(sbus_map_single);
+EXPORT_SYMBOL(sbus_unmap_single);
+EXPORT_SYMBOL(sbus_map_sg);
+EXPORT_SYMBOL(sbus_unmap_sg);
+EXPORT_SYMBOL(sbus_dma_sync_single_for_cpu);
+EXPORT_SYMBOL(sbus_dma_sync_single_for_device);
+EXPORT_SYMBOL(sbus_dma_sync_sg_for_cpu);
+EXPORT_SYMBOL(sbus_dma_sync_sg_for_device);
+#endif
+EXPORT_SYMBOL(outsb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(outsl);
+EXPORT_SYMBOL(insb);
+EXPORT_SYMBOL(insw);
+EXPORT_SYMBOL(insl);
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(ebus_chain);
+EXPORT_SYMBOL(isa_chain);
+EXPORT_SYMBOL(pci_memspace_mask);
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+EXPORT_SYMBOL(pci_map_single);
+EXPORT_SYMBOL(pci_unmap_single);
+EXPORT_SYMBOL(pci_map_sg);
+EXPORT_SYMBOL(pci_unmap_sg);
+EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
+EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
+EXPORT_SYMBOL(pci_dma_supported);
+#endif
+
+/* I/O device mmaping on Sparc64. */
+EXPORT_SYMBOL(io_remap_page_range);
+EXPORT_SYMBOL(io_remap_pfn_range);
+
+/* Solaris/SunOS binary compatibility */
+EXPORT_SYMBOL(_sigpause_common);
+EXPORT_SYMBOL(verify_compat_iovec);
+
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(__pte_alloc_one_kernel);
+#ifndef CONFIG_SMP
+EXPORT_SYMBOL(pgt_quicklists);
+#endif
+EXPORT_SYMBOL(put_fs_struct);
+
+/* math-emu wants this */
+EXPORT_SYMBOL(die_if_kernel);
+
+/* Kernel thread creation. */
+EXPORT_SYMBOL(kernel_thread);
+
+/* prom symbols */
+EXPORT_SYMBOL(idprom);
+EXPORT_SYMBOL(prom_root_node);
+EXPORT_SYMBOL(prom_getchild);
+EXPORT_SYMBOL(prom_getsibling);
+EXPORT_SYMBOL(prom_searchsiblings);
+EXPORT_SYMBOL(prom_firstprop);
+EXPORT_SYMBOL(prom_nextprop);
+EXPORT_SYMBOL(prom_getproplen);
+EXPORT_SYMBOL(prom_getproperty);
+EXPORT_SYMBOL(prom_node_has_property);
+EXPORT_SYMBOL(prom_setprop);
+EXPORT_SYMBOL(saved_command_line);
+EXPORT_SYMBOL(prom_getname);
+EXPORT_SYMBOL(prom_finddevice);
+EXPORT_SYMBOL(prom_feval);
+EXPORT_SYMBOL(prom_getbool);
+EXPORT_SYMBOL(prom_getstring);
+EXPORT_SYMBOL(prom_getint);
+EXPORT_SYMBOL(prom_getintdefault);
+EXPORT_SYMBOL(__prom_getchild);
+EXPORT_SYMBOL(__prom_getsibling);
+
+/* sparc library symbols */
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(__strlen_user);
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+#ifdef CONFIG_SOLARIS_EMUL_MODULE
+EXPORT_SYMBOL(linux_sparc_syscall);
+EXPORT_SYMBOL(rtrap);
+EXPORT_SYMBOL(show_regs);
+EXPORT_SYMBOL(solaris_syscall);
+EXPORT_SYMBOL(syscall_trace);
+EXPORT_SYMBOL(sunos_sys_table);
+EXPORT_SYMBOL(sys_call_table32);
+EXPORT_SYMBOL(tl0_solaris);
+EXPORT_SYMBOL(sys_sigsuspend);
+EXPORT_SYMBOL(sys_getppid);
+EXPORT_SYMBOL(sys_getpid);
+EXPORT_SYMBOL(sys_geteuid);
+EXPORT_SYMBOL(sys_getuid);
+EXPORT_SYMBOL(sys_getegid);
+EXPORT_SYMBOL(sys_getgid);
+EXPORT_SYMBOL(svr4_getcontext);
+EXPORT_SYMBOL(svr4_setcontext);
+EXPORT_SYMBOL(compat_sys_ioctl);
+EXPORT_SYMBOL(sparc32_open);
+EXPORT_SYMBOL(sys_close);
+#endif
+
+/* Special internal versions of library functions. */
+EXPORT_SYMBOL(_clear_page);
+EXPORT_SYMBOL(clear_user_page);
+EXPORT_SYMBOL(copy_user_page);
+EXPORT_SYMBOL(__bzero);
+EXPORT_SYMBOL(__memscan_zero);
+EXPORT_SYMBOL(__memscan_generic);
+EXPORT_SYMBOL(__memcmp);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(memchr);
+
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
+EXPORT_SYMBOL(__csum_partial_copy_to_user);
+EXPORT_SYMBOL(ip_fast_csum);
+
+/* Moving data to/from/in userspace. */
+EXPORT_SYMBOL(___copy_to_user);
+EXPORT_SYMBOL(___copy_from_user);
+EXPORT_SYMBOL(___copy_in_user);
+EXPORT_SYMBOL(copy_to_user_fixup);
+EXPORT_SYMBOL(copy_from_user_fixup);
+EXPORT_SYMBOL(copy_in_user_fixup);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__bzero_noasi);
+
+/* Various address conversion macros use this. */
+EXPORT_SYMBOL(phys_base);
+EXPORT_SYMBOL(pfn_base);
+EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
+EXPORT_SYMBOL(page_to_pfn);
+EXPORT_SYMBOL(pfn_to_page);
+
+/* No version information on this, heavily used in inline asm,
+ * and will always be 'void __ret_efault(void)'.
+ */
+EXPORT_SYMBOL(__ret_efault);
+
+/* No version information on these, as gcc produces such symbols. */
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(strncmp);
+
+/* Delay routines. */
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__ndelay);
+EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(__delay);
+
+void VISenter(void);
+/* RAID code needs this */
+EXPORT_SYMBOL(VISenter);
+
+/* for input/keybdev */
+EXPORT_SYMBOL(sun_do_break);
+EXPORT_SYMBOL(serial_console);
+EXPORT_SYMBOL(stop_a_enabled);
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+EXPORT_SYMBOL(do_BUG);
+#endif
+
+/* for ns8703 */
+EXPORT_SYMBOL(ns87303_lock);
+
+/* for solaris compat module */
+EXPORT_SYMBOL_GPL(sys_call_table);
+
+EXPORT_SYMBOL(tick_ops);
+
+EXPORT_SYMBOL(xor_vis_2);
+EXPORT_SYMBOL(xor_vis_3);
+EXPORT_SYMBOL(xor_vis_4);
+EXPORT_SYMBOL(xor_vis_5);
+
+EXPORT_SYMBOL(prom_palette);
diff --git a/arch/sparc64/kernel/starfire.c b/arch/sparc64/kernel/starfire.c
new file mode 100644
index 00000000000..ae859d40771
--- /dev/null
+++ b/arch/sparc64/kernel/starfire.c
@@ -0,0 +1,123 @@
+/* $Id: starfire.c,v 1.10 2001/04/14 21:13:45 davem Exp $
+ * starfire.c: Starfire/E10000 support.
+ *
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+#include <asm/oplib.h>
+#include <asm/smp.h>
+#include <asm/upa.h>
+#include <asm/starfire.h>
+
+/*
+ * A few places around the kernel check this to see if
+ * they need to call us to do things in a Starfire specific
+ * way.
+ */
+int this_is_starfire = 0;
+
+void check_if_starfire(void)
+{
+ int ssnode = prom_finddevice("/ssp-serial");
+ if (ssnode != 0 && ssnode != -1)
+ this_is_starfire = 1;
+}
+
+void starfire_cpu_setup(void)
+{
+ /* Currently, nothing to do. */
+}
+
+int starfire_hard_smp_processor_id(void)
+{
+ return upa_readl(0x1fff40000d0UL);
+}
+
+/*
+ * Each Starfire board has 32 registers which perform translation
+ * and delivery of traditional interrupt packets into the extended
+ * Starfire hardware format. Essentially UPAID's now have 2 more
+ * bits than in all previous Sun5 systems.
+ */
+struct starfire_irqinfo {
+ unsigned long imap_slots[32];
+ unsigned long tregs[32];
+ struct starfire_irqinfo *next;
+ int upaid, hwmid;
+};
+
+static struct starfire_irqinfo *sflist = NULL;
+
+/* Beam me up Scott(McNeil)y... */
+void *starfire_hookup(int upaid)
+{
+ struct starfire_irqinfo *p;
+ unsigned long treg_base, hwmid, i;
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ prom_printf("starfire_hookup: No memory, this is insane.\n");
+ prom_halt();
+ }
+ treg_base = 0x100fc000000UL;
+ hwmid = ((upaid & 0x3c) << 1) |
+ ((upaid & 0x40) >> 4) |
+ (upaid & 0x3);
+ p->hwmid = hwmid;
+ treg_base += (hwmid << 33UL);
+ treg_base += 0x200UL;
+ for (i = 0; i < 32; i++) {
+ p->imap_slots[i] = 0UL;
+ p->tregs[i] = treg_base + (i * 0x10UL);
+ /* Lets play it safe and not overwrite existing mappings */
+ if (upa_readl(p->tregs[i]) != 0)
+ p->imap_slots[i] = 0xdeadbeaf;
+ }
+ p->upaid = upaid;
+ p->next = sflist;
+ sflist = p;
+
+ return (void *) p;
+}
+
+unsigned int starfire_translate(unsigned long imap,
+ unsigned int upaid)
+{
+ struct starfire_irqinfo *p;
+ unsigned int bus_hwmid;
+ unsigned int i;
+
+ bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
+ for (p = sflist; p != NULL; p = p->next)
+ if (p->hwmid == bus_hwmid)
+ break;
+ if (p == NULL) {
+ prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
+ ((unsigned long)imap));
+ prom_halt();
+ }
+ for (i = 0; i < 32; i++) {
+ if (p->imap_slots[i] == imap ||
+ p->imap_slots[i] == 0UL)
+ break;
+ }
+ if (i == 32) {
+ printk("starfire_translate: Are you kidding me?\n");
+ panic("Lucy in the sky....");
+ }
+ p->imap_slots[i] = imap;
+
+ /* map to real upaid */
+ upaid = (((upaid & 0x3c) << 1) |
+ ((upaid & 0x40) >> 4) |
+ (upaid & 0x3));
+
+ upa_writel(upaid, p->tregs[i]);
+
+ return i;
+}
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c
new file mode 100644
index 00000000000..87c1aeb0222
--- /dev/null
+++ b/arch/sparc64/kernel/sunos_ioctl32.c
@@ -0,0 +1,275 @@
+/* $Id: sunos_ioctl32.c,v 1.11 2000/07/30 23:12:24 davem Exp $
+ * sunos_ioctl32.c: SunOS ioctl compatibility on sparc64.
+ *
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/termios.h>
+#include <linux/ioctl.h>
+#include <linux/route.h>
+#include <linux/sockios.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/syscalls.h>
+#include <linux/compat.h>
+#include <asm/kbio.h>
+
+#define SUNOS_NR_OPEN 256
+
+struct rtentry32 {
+ u32 rt_pad1;
+ struct sockaddr rt_dst; /* target address */
+ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
+ struct sockaddr rt_genmask; /* target network mask (IP) */
+ unsigned short rt_flags;
+ short rt_pad2;
+ u32 rt_pad3;
+ unsigned char rt_tos;
+ unsigned char rt_class;
+ short rt_pad4;
+ short rt_metric; /* +1 for binary compatibility! */
+ /* char * */ u32 rt_dev; /* forcing the device at add */
+ u32 rt_mtu; /* per route MTU/Window */
+ u32 rt_window; /* Window clamping */
+ unsigned short rt_irtt; /* Initial RTT */
+
+};
+
+struct ifmap32 {
+ u32 mem_start;
+ u32 mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+};
+
+struct ifreq32 {
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_ivalue;
+ int ifru_mtu;
+ struct ifmap32 ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ compat_caddr_t ifru_data;
+ } ifr_ifru;
+};
+
+struct ifconf32 {
+ int ifc_len; /* size of buffer */
+ compat_caddr_t ifcbuf;
+};
+
+extern asmlinkage int compat_sys_ioctl(unsigned int, unsigned int, u32);
+
+asmlinkage int sunos_ioctl (int fd, u32 cmd, u32 arg)
+{
+ int ret = -EBADF;
+
+ if(fd >= SUNOS_NR_OPEN)
+ goto out;
+ if(!fcheck(fd))
+ goto out;
+
+ if(cmd == TIOCSETD) {
+ mm_segment_t old_fs = get_fs();
+ int __user *p;
+ int ntty = N_TTY;
+ int tmp;
+
+ p = (int __user *) (unsigned long) arg;
+ ret = -EFAULT;
+ if(get_user(tmp, p))
+ goto out;
+ if(tmp == 2) {
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, cmd, (unsigned long) &ntty);
+ set_fs(old_fs);
+ ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
+ goto out;
+ }
+ }
+ if(cmd == TIOCNOTTY) {
+ ret = sys_setsid();
+ goto out;
+ }
+ switch(cmd) {
+ case _IOW('r', 10, struct rtentry32):
+ ret = compat_sys_ioctl(fd, SIOCADDRT, arg);
+ goto out;
+ case _IOW('r', 11, struct rtentry32):
+ ret = compat_sys_ioctl(fd, SIOCDELRT, arg);
+ goto out;
+
+ case _IOW('i', 12, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCSIFADDR, arg);
+ goto out;
+ case _IOWR('i', 13, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCGIFADDR, arg);
+ goto out;
+ case _IOW('i', 14, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCSIFDSTADDR, arg);
+ goto out;
+ case _IOWR('i', 15, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCGIFDSTADDR, arg);
+ goto out;
+ case _IOW('i', 16, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCSIFFLAGS, arg);
+ goto out;
+ case _IOWR('i', 17, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCGIFFLAGS, arg);
+ goto out;
+ case _IOW('i', 18, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCSIFMEM, arg);
+ goto out;
+ case _IOWR('i', 19, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCGIFMEM, arg);
+ goto out;
+
+ case _IOWR('i', 20, struct ifconf32):
+ ret = compat_sys_ioctl(fd, SIOCGIFCONF, arg);
+ goto out;
+
+ case _IOW('i', 21, struct ifreq): /* SIOCSIFMTU */
+ ret = sys_ioctl(fd, SIOCSIFMTU, arg);
+ goto out;
+ case _IOWR('i', 22, struct ifreq): /* SIOCGIFMTU */
+ ret = sys_ioctl(fd, SIOCGIFMTU, arg);
+ goto out;
+
+ case _IOWR('i', 23, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCGIFBRDADDR, arg);
+ goto out;
+ case _IOW('i', 24, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCSIFBRDADDR, arg);
+ goto out;
+ case _IOWR('i', 25, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCGIFNETMASK, arg);
+ goto out;
+ case _IOW('i', 26, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCSIFNETMASK, arg);
+ goto out;
+ case _IOWR('i', 27, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCGIFMETRIC, arg);
+ goto out;
+ case _IOW('i', 28, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCSIFMETRIC, arg);
+ goto out;
+
+ case _IOW('i', 30, struct arpreq):
+ ret = compat_sys_ioctl(fd, SIOCSARP, arg);
+ goto out;
+ case _IOWR('i', 31, struct arpreq):
+ ret = compat_sys_ioctl(fd, SIOCGARP, arg);
+ goto out;
+ case _IOW('i', 32, struct arpreq):
+ ret = compat_sys_ioctl(fd, SIOCDARP, arg);
+ goto out;
+
+ case _IOW('i', 40, struct ifreq32): /* SIOCUPPER */
+ case _IOW('i', 41, struct ifreq32): /* SIOCLOWER */
+ case _IOW('i', 44, struct ifreq32): /* SIOCSETSYNC */
+ case _IOW('i', 45, struct ifreq32): /* SIOCGETSYNC */
+ case _IOW('i', 46, struct ifreq32): /* SIOCSSDSTATS */
+ case _IOW('i', 47, struct ifreq32): /* SIOCSSESTATS */
+ case _IOW('i', 48, struct ifreq32): /* SIOCSPROMISC */
+ ret = -EOPNOTSUPP;
+ goto out;
+
+ case _IOW('i', 49, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCADDMULTI, arg);
+ goto out;
+ case _IOW('i', 50, struct ifreq32):
+ ret = compat_sys_ioctl(fd, SIOCDELMULTI, arg);
+ goto out;
+
+ /* FDDI interface ioctls, unsupported. */
+
+ case _IOW('i', 51, struct ifreq32): /* SIOCFDRESET */
+ case _IOW('i', 52, struct ifreq32): /* SIOCFDSLEEP */
+ case _IOW('i', 53, struct ifreq32): /* SIOCSTRTFMWAR */
+ case _IOW('i', 54, struct ifreq32): /* SIOCLDNSTRTFW */
+ case _IOW('i', 55, struct ifreq32): /* SIOCGETFDSTAT */
+ case _IOW('i', 56, struct ifreq32): /* SIOCFDNMIINT */
+ case _IOW('i', 57, struct ifreq32): /* SIOCFDEXUSER */
+ case _IOW('i', 58, struct ifreq32): /* SIOCFDGNETMAP */
+ case _IOW('i', 59, struct ifreq32): /* SIOCFDGIOCTL */
+ printk("FDDI ioctl, returning EOPNOTSUPP\n");
+ ret = -EOPNOTSUPP;
+ goto out;
+
+ case _IOW('t', 125, int):
+ /* More stupid tty sunos ioctls, just
+ * say it worked.
+ */
+ ret = 0;
+ goto out;
+
+ /* Non posix grp */
+ case _IOW('t', 118, int): {
+ int oldval, newval, __user *ptr;
+
+ cmd = TIOCSPGRP;
+ ptr = (int __user *) (unsigned long) arg;
+ ret = -EFAULT;
+ if(get_user(oldval, ptr))
+ goto out;
+ ret = compat_sys_ioctl(fd, cmd, arg);
+ __get_user(newval, ptr);
+ if(newval == -1) {
+ __put_user(oldval, ptr);
+ ret = -EIO;
+ }
+ if(ret == -ENOTTY)
+ ret = -EIO;
+ goto out;
+ }
+
+ case _IOR('t', 119, int): {
+ int oldval, newval, __user *ptr;
+
+ cmd = TIOCGPGRP;
+ ptr = (int __user *) (unsigned long) arg;
+ ret = -EFAULT;
+ if(get_user(oldval, ptr))
+ goto out;
+ ret = compat_sys_ioctl(fd, cmd, arg);
+ __get_user(newval, ptr);
+ if(newval == -1) {
+ __put_user(oldval, ptr);
+ ret = -EIO;
+ }
+ if(ret == -ENOTTY)
+ ret = -EIO;
+ goto out;
+ }
+ };
+
+ ret = compat_sys_ioctl(fd, cmd, arg);
+ /* so stupid... */
+ ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
+out:
+ return ret;
+}
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
new file mode 100644
index 00000000000..5a95e98c531
--- /dev/null
+++ b/arch/sparc64/kernel/sys32.S
@@ -0,0 +1,327 @@
+/* $Id: sys32.S,v 1.12 2000/03/24 04:17:37 davem Exp $
+ * sys32.S: I-cache tricks for 32-bit compatibility layer simple
+ * conversions.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <linux/config.h>
+#include <asm/errno.h>
+
+/* NOTE: call as jump breaks return stack, we have to avoid that */
+
+ .text
+
+#define SIGN1(STUB,SYSCALL,REG1) \
+ .align 32; \
+ .globl STUB; \
+STUB: sethi %hi(SYSCALL), %g1; \
+ jmpl %g1 + %lo(SYSCALL), %g0; \
+ sra REG1, 0, REG1
+
+#define SIGN2(STUB,SYSCALL,REG1,REG2) \
+ .align 32; \
+ .globl STUB; \
+STUB: sethi %hi(SYSCALL), %g1; \
+ sra REG1, 0, REG1; \
+ jmpl %g1 + %lo(SYSCALL), %g0; \
+ sra REG2, 0, REG2
+
+#define SIGN3(STUB,SYSCALL,REG1,REG2,REG3) \
+ .align 32; \
+ .globl STUB; \
+STUB: sra REG1, 0, REG1; \
+ sethi %hi(SYSCALL), %g1; \
+ sra REG2, 0, REG2; \
+ jmpl %g1 + %lo(SYSCALL), %g0; \
+ sra REG3, 0, REG3
+
+#define SIGN4(STUB,SYSCALL,REG1,REG2,REG3,REG4) \
+ .align 32; \
+ .globl STUB; \
+STUB: sra REG1, 0, REG1; \
+ sethi %hi(SYSCALL), %g1; \
+ sra REG2, 0, REG2; \
+ sra REG3, 0, REG3; \
+ jmpl %g1 + %lo(SYSCALL), %g0; \
+ sra REG4, 0, REG4
+
+SIGN1(sys32_exit, sparc_exit, %o0)
+SIGN1(sys32_exit_group, sys_exit_group, %o0)
+SIGN1(sys32_wait4, compat_sys_wait4, %o2)
+SIGN1(sys32_creat, sys_creat, %o1)
+SIGN1(sys32_mknod, sys_mknod, %o1)
+SIGN1(sys32_perfctr, sys_perfctr, %o0)
+SIGN1(sys32_umount, sys_umount, %o1)
+SIGN1(sys32_signal, sys_signal, %o0)
+SIGN1(sys32_access, sys_access, %o1)
+SIGN1(sys32_msync, sys_msync, %o2)
+SIGN2(sys32_reboot, sys_reboot, %o0, %o1)
+SIGN1(sys32_setitimer, compat_sys_setitimer, %o0)
+SIGN1(sys32_getitimer, compat_sys_getitimer, %o0)
+SIGN1(sys32_sethostname, sys_sethostname, %o1)
+SIGN1(sys32_swapon, sys_swapon, %o1)
+SIGN1(sys32_sigaction, compat_sys_sigaction, %o0)
+SIGN1(sys32_rt_sigaction, compat_sys_rt_sigaction, %o0)
+SIGN1(sys32_sigprocmask, compat_sys_sigprocmask, %o0)
+SIGN1(sys32_rt_sigprocmask, compat_sys_rt_sigprocmask, %o0)
+SIGN2(sys32_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo, %o0, %o1)
+SIGN1(sys32_getrusage, compat_sys_getrusage, %o0)
+SIGN1(sys32_setxattr, sys_setxattr, %o4)
+SIGN1(sys32_lsetxattr, sys_lsetxattr, %o4)
+SIGN1(sys32_fsetxattr, sys_fsetxattr, %o4)
+SIGN1(sys32_fgetxattr, sys_fgetxattr, %o0)
+SIGN1(sys32_flistxattr, sys_flistxattr, %o0)
+SIGN1(sys32_fremovexattr, sys_fremovexattr, %o0)
+SIGN2(sys32_tkill, sys_tkill, %o0, %o1)
+SIGN1(sys32_epoll_create, sys_epoll_create, %o0)
+SIGN3(sys32_epoll_ctl, sys_epoll_ctl, %o0, %o1, %o2)
+SIGN3(sys32_epoll_wait, sys_epoll_wait, %o0, %o2, %o3)
+SIGN1(sys32_readahead, compat_sys_readahead, %o0)
+SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4)
+SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
+SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1)
+SIGN1(sys32_mlockall, sys_mlockall, %o0)
+SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0)
+SIGN1(sys32_clock_settime, compat_sys_clock_settime, %o1)
+SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1)
+SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
+SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
+SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
+SIGN1(sys32_select, compat_sys_select, %o0)
+SIGN1(sys32_mkdir, sys_mkdir, %o1)
+SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
+SIGN1(sys32_sysfs, compat_sys_sysfs, %o0)
+SIGN3(sys32_ipc, compat_sys_ipc, %o1, %o2, %o3)
+SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
+SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1)
+SIGN1(sys32_prctl, sys_prctl, %o0)
+SIGN1(sys32_sched_rr_get_interval, compat_sys_sched_rr_get_interval, %o0)
+SIGN2(sys32_waitpid, sys_waitpid, %o0, %o2)
+SIGN1(sys32_getgroups, sys_getgroups, %o0)
+SIGN1(sys32_getpgid, sys_getpgid, %o0)
+SIGN2(sys32_getpriority, sys_getpriority, %o0, %o1)
+SIGN1(sys32_getsid, sys_getsid, %o0)
+SIGN2(sys32_kill, sys_kill, %o0, %o1)
+SIGN1(sys32_nice, sys_nice, %o0)
+SIGN1(sys32_lseek, sys_lseek, %o1)
+SIGN2(sys32_open, sparc32_open, %o1, %o2)
+SIGN1(sys32_readlink, sys_readlink, %o2)
+SIGN1(sys32_sched_get_priority_max, sys_sched_get_priority_max, %o0)
+SIGN1(sys32_sched_get_priority_min, sys_sched_get_priority_min, %o0)
+SIGN1(sys32_sched_getparam, sys_sched_getparam, %o0)
+SIGN1(sys32_sched_getscheduler, sys_sched_getscheduler, %o0)
+SIGN1(sys32_sched_setparam, sys_sched_setparam, %o0)
+SIGN2(sys32_sched_setscheduler, sys_sched_setscheduler, %o0, %o1)
+SIGN1(sys32_getdomainname, sys_getdomainname, %o1)
+SIGN1(sys32_setdomainname, sys_setdomainname, %o1)
+SIGN1(sys32_setgroups, sys_setgroups, %o0)
+SIGN2(sys32_setpgid, sys_setpgid, %o0, %o1)
+SIGN3(sys32_setpriority, sys_setpriority, %o0, %o1, %o2)
+SIGN1(sys32_ssetmask, sys_ssetmask, %o0)
+SIGN2(sys32_syslog, sys_syslog, %o0, %o2)
+SIGN1(sys32_umask, sys_umask, %o0)
+SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2)
+SIGN1(sys32_sendto, sys_sendto, %o0)
+SIGN1(sys32_recvfrom, sys_recvfrom, %o0)
+SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2)
+SIGN2(sys32_connect, sys_connect, %o0, %o2)
+SIGN2(sys32_bind, sys_bind, %o0, %o2)
+SIGN2(sys32_listen, sys_listen, %o0, %o1)
+SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
+SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+SIGN2(sys32_shutdown, sys_shutdown, %o0, %o1)
+SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2)
+SIGN1(sys32_getpeername, sys_getpeername, %o0)
+SIGN1(sys32_getsockname, sys_getsockname, %o0)
+
+ .globl sys32_mmap2
+sys32_mmap2:
+ sethi %hi(sys_mmap), %g1
+ jmpl %g1 + %lo(sys_mmap), %g0
+ sllx %o5, 12, %o5
+
+ .align 32
+ .globl sys32_socketcall
+sys32_socketcall: /* %o0=call, %o1=args */
+ cmp %o0, 1
+ bl,pn %xcc, do_einval
+ cmp %o0, 17
+ bg,pn %xcc, do_einval
+ sub %o0, 1, %o0
+ sllx %o0, 5, %o0
+ sethi %hi(__socketcall_table_begin), %g2
+ or %g2, %lo(__socketcall_table_begin), %g2
+ jmpl %g2 + %o0, %g0
+ nop
+
+ /* Each entry is exactly 32 bytes. */
+ .align 32
+__socketcall_table_begin:
+do_sys_socket: /* sys_socket(int, int, int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_socket), %g1
+ ldswa [%o1 + 0x8] %asi, %o2
+ jmpl %g1 + %lo(sys_socket), %g0
+ ldswa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_bind), %g1
+ ldswa [%o1 + 0x8] %asi, %o2
+ jmpl %g1 + %lo(sys_bind), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_connect), %g1
+ ldswa [%o1 + 0x8] %asi, %o2
+ jmpl %g1 + %lo(sys_connect), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+do_sys_listen: /* sys_listen(int, int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_listen), %g1
+ jmpl %g1 + %lo(sys_listen), %g0
+ ldswa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+ nop
+do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_accept), %g1
+ lduwa [%o1 + 0x8] %asi, %o2
+ jmpl %g1 + %lo(sys_accept), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_getsockname), %g1
+ lduwa [%o1 + 0x8] %asi, %o2
+ jmpl %g1 + %lo(sys_getsockname), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_getpeername), %g1
+ lduwa [%o1 + 0x8] %asi, %o2
+ jmpl %g1 + %lo(sys_getpeername), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_socketpair), %g1
+ ldswa [%o1 + 0x8] %asi, %o2
+ lduwa [%o1 + 0xc] %asi, %o3
+ jmpl %g1 + %lo(sys_socketpair), %g0
+ ldswa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_send), %g1
+ lduwa [%o1 + 0x8] %asi, %o2
+ lduwa [%o1 + 0xc] %asi, %o3
+ jmpl %g1 + %lo(sys_send), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_recv), %g1
+ lduwa [%o1 + 0x8] %asi, %o2
+ lduwa [%o1 + 0xc] %asi, %o3
+ jmpl %g1 + %lo(sys_recv), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_sendto), %g1
+ lduwa [%o1 + 0x8] %asi, %o2
+ lduwa [%o1 + 0xc] %asi, %o3
+ lduwa [%o1 + 0x10] %asi, %o4
+ ldswa [%o1 + 0x14] %asi, %o5
+ jmpl %g1 + %lo(sys_sendto), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_recvfrom), %g1
+ lduwa [%o1 + 0x8] %asi, %o2
+ lduwa [%o1 + 0xc] %asi, %o3
+ lduwa [%o1 + 0x10] %asi, %o4
+ lduwa [%o1 + 0x14] %asi, %o5
+ jmpl %g1 + %lo(sys_recvfrom), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+do_sys_shutdown: /* sys_shutdown(int, int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(sys_shutdown), %g1
+ jmpl %g1 + %lo(sys_shutdown), %g0
+ ldswa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+ nop
+do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(compat_sys_setsockopt), %g1
+ ldswa [%o1 + 0x8] %asi, %o2
+ lduwa [%o1 + 0xc] %asi, %o3
+ ldswa [%o1 + 0x10] %asi, %o4
+ jmpl %g1 + %lo(compat_sys_setsockopt), %g0
+ ldswa [%o1 + 0x4] %asi, %o1
+ nop
+do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(compat_sys_getsockopt), %g1
+ ldswa [%o1 + 0x8] %asi, %o2
+ lduwa [%o1 + 0xc] %asi, %o3
+ lduwa [%o1 + 0x10] %asi, %o4
+ jmpl %g1 + %lo(compat_sys_getsockopt), %g0
+ ldswa [%o1 + 0x4] %asi, %o1
+ nop
+do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(compat_sys_sendmsg), %g1
+ lduwa [%o1 + 0x8] %asi, %o2
+ jmpl %g1 + %lo(compat_sys_sendmsg), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
+ ldswa [%o1 + 0x0] %asi, %o0
+ sethi %hi(compat_sys_recvmsg), %g1
+ lduwa [%o1 + 0x8] %asi, %o2
+ jmpl %g1 + %lo(compat_sys_recvmsg), %g0
+ lduwa [%o1 + 0x4] %asi, %o1
+ nop
+ nop
+ nop
+__socketcall_table_end:
+
+do_einval:
+ retl
+ mov -EINVAL, %o0
+do_efault:
+ retl
+ mov -EFAULT, %o0
+
+ .section __ex_table
+ .align 4
+ .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault
+ .previous
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
new file mode 100644
index 00000000000..0077f02f4b3
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -0,0 +1,723 @@
+/* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
+ * linux/arch/sparc64/kernel/sys_sparc.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/sparc
+ * platform.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/utsname.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/ipc.h>
+#include <linux/personality.h>
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+#include <asm/utrap.h>
+#include <asm/perfctr.h>
+
+/* #define DEBUG_UNIMP_SYSCALL */
+
+/* XXX Make this per-binary type, this way we can detect the type of
+ * XXX a binary. Every Sparc executable calls this very early on.
+ */
+asmlinkage unsigned long sys_getpagesize(void)
+{
+ return PAGE_SIZE;
+}
+
+#define COLOUR_ALIGN(addr,pgoff) \
+ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
+ (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct * vma;
+ unsigned long task_size = TASK_SIZE;
+ unsigned long start_addr;
+ int do_color_align;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+ }
+
+ if (test_thread_flag(TIF_32BIT))
+ task_size = 0xf0000000UL;
+ if (len > task_size || len > -PAGE_OFFSET)
+ return -ENOMEM;
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (task_size - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ start_addr = addr = mm->free_area_cache;
+
+ task_size -= len;
+
+full_search:
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
+ addr = PAGE_OFFSET;
+ vma = find_vma(mm, PAGE_OFFSET);
+ }
+ if (task_size < addr) {
+ if (start_addr != TASK_UNMAPPED_BASE) {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+ if (!vma || addr + len <= vma->vm_start) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+ mm->free_area_cache = addr + len;
+ return addr;
+ }
+ addr = vma->vm_end;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ }
+}
+
+/* Try to align mapping such that we align it as much as possible. */
+unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ unsigned long align_goal, addr = -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+ /* Ok, don't mess with it. */
+ return get_unmapped_area(NULL, addr, len, pgoff, flags);
+ }
+ flags &= ~MAP_SHARED;
+
+ align_goal = PAGE_SIZE;
+ if (len >= (4UL * 1024 * 1024))
+ align_goal = (4UL * 1024 * 1024);
+ else if (len >= (512UL * 1024))
+ align_goal = (512UL * 1024);
+ else if (len >= (64UL * 1024))
+ align_goal = (64UL * 1024);
+
+ do {
+ addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+ if (!(addr & ~PAGE_MASK)) {
+ addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
+ break;
+ }
+
+ if (align_goal == (4UL * 1024 * 1024))
+ align_goal = (512UL * 1024);
+ else if (align_goal == (512UL * 1024))
+ align_goal = (64UL * 1024);
+ else
+ align_goal = PAGE_SIZE;
+ } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
+
+ /* Mapping is smaller than 64K or larger areas could not
+ * be obtained.
+ */
+ if (addr & ~PAGE_MASK)
+ addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
+
+ return addr;
+}
+
+asmlinkage unsigned long sparc_brk(unsigned long brk)
+{
+ /* People could try to be nasty and use ta 0x6d in 32bit programs */
+ if (test_thread_flag(TIF_32BIT) &&
+ brk >= 0xf0000000UL)
+ return current->mm->brk;
+
+ if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET))
+ return current->mm->brk;
+ return sys_brk(brk);
+}
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage long sparc_pipe(struct pt_regs *regs)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (error)
+ goto out;
+ regs->u_regs[UREG_I1] = fd[1];
+ error = fd[0];
+out:
+ return error;
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+
+asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
+ unsigned long third, void __user *ptr, long fifth)
+{
+ int err;
+
+ /* No need for backward compatibility. We can start fresh... */
+ if (call <= SEMCTL) {
+ switch (call) {
+ case SEMOP:
+ err = sys_semtimedop(first, ptr,
+ (unsigned)second, NULL);
+ goto out;
+ case SEMTIMEDOP:
+ err = sys_semtimedop(first, ptr, (unsigned)second,
+ (const struct timespec __user *) fifth);
+ goto out;
+ case SEMGET:
+ err = sys_semget(first, (int)second, (int)third);
+ goto out;
+ case SEMCTL: {
+ union semun fourth;
+ err = -EINVAL;
+ if (!ptr)
+ goto out;
+ err = -EFAULT;
+ if (get_user(fourth.__pad,
+ (void __user * __user *) ptr))
+ goto out;
+ err = sys_semctl(first, (int)second | IPC_64,
+ (int)third, fourth);
+ goto out;
+ }
+ default:
+ err = -ENOSYS;
+ goto out;
+ };
+ }
+ if (call <= MSGCTL) {
+ switch (call) {
+ case MSGSND:
+ err = sys_msgsnd(first, ptr, (size_t)second,
+ (int)third);
+ goto out;
+ case MSGRCV:
+ err = sys_msgrcv(first, ptr, (size_t)second, fifth,
+ (int)third);
+ goto out;
+ case MSGGET:
+ err = sys_msgget((key_t)first, (int)second);
+ goto out;
+ case MSGCTL:
+ err = sys_msgctl(first, (int)second | IPC_64, ptr);
+ goto out;
+ default:
+ err = -ENOSYS;
+ goto out;
+ };
+ }
+ if (call <= SHMCTL) {
+ switch (call) {
+ case SHMAT: {
+ ulong raddr;
+ err = do_shmat(first, ptr, (int)second, &raddr);
+ if (!err) {
+ if (put_user(raddr,
+ (ulong __user *) third))
+ err = -EFAULT;
+ }
+ goto out;
+ }
+ case SHMDT:
+ err = sys_shmdt(ptr);
+ goto out;
+ case SHMGET:
+ err = sys_shmget(first, (size_t)second, (int)third);
+ goto out;
+ case SHMCTL:
+ err = sys_shmctl(first, (int)second | IPC_64, ptr);
+ goto out;
+ default:
+ err = -ENOSYS;
+ goto out;
+ };
+ } else {
+ err = -ENOSYS;
+ }
+out:
+ return err;
+}
+
+asmlinkage long sparc64_newuname(struct new_utsname __user *name)
+{
+ int ret = sys_newuname(name);
+
+ if (current->personality == PER_LINUX32 && !ret) {
+ ret = (copy_to_user(name->machine, "sparc\0\0", 8)
+ ? -EFAULT : 0);
+ }
+ return ret;
+}
+
+asmlinkage long sparc64_personality(unsigned long personality)
+{
+ int ret;
+
+ if (current->personality == PER_LINUX32 &&
+ personality == PER_LINUX)
+ personality = PER_LINUX32;
+ ret = sys_personality(personality);
+ if (ret == PER_LINUX32)
+ ret = PER_LINUX;
+
+ return ret;
+}
+
+/* Linux version of mmap */
+asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long off)
+{
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ len = PAGE_ALIGN(len);
+ retval = -EINVAL;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ if (len > 0xf0000000UL ||
+ ((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
+ goto out_putf;
+ } else {
+ if (len > -PAGE_OFFSET ||
+ ((flags & MAP_FIXED) &&
+ addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
+ goto out_putf;
+ }
+
+ down_write(&current->mm->mmap_sem);
+ retval = do_mmap(file, addr, len, prot, flags, off);
+ up_write(&current->mm->mmap_sem);
+
+out_putf:
+ if (file)
+ fput(file);
+out:
+ return retval;
+}
+
+asmlinkage long sys64_munmap(unsigned long addr, size_t len)
+{
+ long ret;
+
+ if (len > -PAGE_OFFSET ||
+ (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
+ return -EINVAL;
+ down_write(&current->mm->mmap_sem);
+ ret = do_munmap(current->mm, addr, len);
+ up_write(&current->mm->mmap_sem);
+ return ret;
+}
+
+extern unsigned long do_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr);
+
+asmlinkage unsigned long sys64_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr)
+{
+ struct vm_area_struct *vma;
+ unsigned long ret = -EINVAL;
+ if (test_thread_flag(TIF_32BIT))
+ goto out;
+ if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
+ goto out;
+ if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET)
+ goto out;
+ down_write(&current->mm->mmap_sem);
+ if (flags & MREMAP_FIXED) {
+ if (new_addr < PAGE_OFFSET &&
+ new_addr + new_len > -PAGE_OFFSET)
+ goto out_sem;
+ } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) {
+ unsigned long map_flags = 0;
+ struct file *file = NULL;
+
+ ret = -ENOMEM;
+ if (!(flags & MREMAP_MAYMOVE))
+ goto out_sem;
+
+ vma = find_vma(current->mm, addr);
+ if (vma) {
+ if (vma->vm_flags & VM_SHARED)
+ map_flags |= MAP_SHARED;
+ file = vma->vm_file;
+ }
+
+ /* MREMAP_FIXED checked above. */
+ new_addr = get_unmapped_area(file, addr, new_len,
+ vma ? vma->vm_pgoff : 0,
+ map_flags);
+ ret = new_addr;
+ if (new_addr & ~PAGE_MASK)
+ goto out_sem;
+ flags |= MREMAP_FIXED;
+ }
+ ret = do_mremap(addr, old_len, new_len, flags, new_addr);
+out_sem:
+ up_write(&current->mm->mmap_sem);
+out:
+ return ret;
+}
+
+/* we come to here via sys_nis_syscall so it can setup the regs argument */
+asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
+{
+ static int count;
+
+ /* Don't make the system unusable, if someone goes stuck */
+ if (count++ > 5)
+ return -ENOSYS;
+
+ printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
+#ifdef DEBUG_UNIMP_SYSCALL
+ show_regs (regs);
+#endif
+
+ return -ENOSYS;
+}
+
+/* #define DEBUG_SPARC_BREAKPOINT */
+
+asmlinkage void sparc_breakpoint(struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+#ifdef DEBUG_SPARC_BREAKPOINT
+ printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
+#endif
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ info.si_addr = (void __user *)regs->tpc;
+ info.si_trapno = 0;
+ force_sig_info(SIGTRAP, &info, current);
+#ifdef DEBUG_SPARC_BREAKPOINT
+ printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
+#endif
+}
+
+extern void check_pending(int signum);
+
+asmlinkage long sys_getdomainname(char __user *name, int len)
+{
+ int nlen;
+ int err = -EFAULT;
+
+ down_read(&uts_sem);
+
+ nlen = strlen(system_utsname.domainname) + 1;
+
+ if (nlen < len)
+ len = nlen;
+ if (len > __NEW_UTS_LEN)
+ goto done;
+ if (copy_to_user(name, system_utsname.domainname, len))
+ goto done;
+ err = 0;
+done:
+ up_read(&uts_sem);
+ return err;
+}
+
+asmlinkage long solaris_syscall(struct pt_regs *regs)
+{
+ static int count;
+
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ if (++count <= 5) {
+ printk ("For Solaris binary emulation you need solaris module loaded\n");
+ show_regs (regs);
+ }
+ send_sig(SIGSEGV, current, 1);
+
+ return -ENOSYS;
+}
+
+#ifndef CONFIG_SUNOS_EMUL
+asmlinkage long sunos_syscall(struct pt_regs *regs)
+{
+ static int count;
+
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ if (++count <= 20)
+ printk ("SunOS binary emulation not compiled in\n");
+ force_sig(SIGSEGV, current);
+
+ return -ENOSYS;
+}
+#endif
+
+asmlinkage long sys_utrap_install(utrap_entry_t type,
+ utrap_handler_t new_p,
+ utrap_handler_t new_d,
+ utrap_handler_t __user *old_p,
+ utrap_handler_t __user *old_d)
+{
+ if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
+ return -EINVAL;
+ if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
+ if (old_p) {
+ if (!current_thread_info()->utraps) {
+ if (put_user(NULL, old_p))
+ return -EFAULT;
+ } else {
+ if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
+ return -EFAULT;
+ }
+ }
+ if (old_d) {
+ if (put_user(NULL, old_d))
+ return -EFAULT;
+ }
+ return 0;
+ }
+ if (!current_thread_info()->utraps) {
+ current_thread_info()->utraps =
+ kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
+ if (!current_thread_info()->utraps)
+ return -ENOMEM;
+ current_thread_info()->utraps[0] = 1;
+ memset(current_thread_info()->utraps+1, 0,
+ UT_TRAP_INSTRUCTION_31*sizeof(long));
+ } else {
+ if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
+ current_thread_info()->utraps[0] > 1) {
+ long *p = current_thread_info()->utraps;
+
+ current_thread_info()->utraps =
+ kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
+ GFP_KERNEL);
+ if (!current_thread_info()->utraps) {
+ current_thread_info()->utraps = p;
+ return -ENOMEM;
+ }
+ p[0]--;
+ current_thread_info()->utraps[0] = 1;
+ memcpy(current_thread_info()->utraps+1, p+1,
+ UT_TRAP_INSTRUCTION_31*sizeof(long));
+ }
+ }
+ if (old_p) {
+ if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
+ return -EFAULT;
+ }
+ if (old_d) {
+ if (put_user(NULL, old_d))
+ return -EFAULT;
+ }
+ current_thread_info()->utraps[type] = (long)new_p;
+
+ return 0;
+}
+
+long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
+{
+ if (model >= 3)
+ return -EINVAL;
+ regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
+ return 0;
+}
+
+asmlinkage long sys_rt_sigaction(int sig,
+ const struct sigaction __user *act,
+ struct sigaction __user *oact,
+ void __user *restorer,
+ size_t sigsetsize)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (act) {
+ new_ka.ka_restorer = restorer;
+ if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
+ return -EFAULT;
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+/* Invoked by rtrap code to update performance counters in
+ * user space.
+ */
+asmlinkage void update_perfctrs(void)
+{
+ unsigned long pic, tmp;
+
+ read_pic(pic);
+ tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
+ __put_user(tmp, current_thread_info()->user_cntd0);
+ tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
+ __put_user(tmp, current_thread_info()->user_cntd1);
+ reset_pic();
+}
+
+asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
+{
+ int err = 0;
+
+ switch(opcode) {
+ case PERFCTR_ON:
+ current_thread_info()->pcr_reg = arg2;
+ current_thread_info()->user_cntd0 = (u64 __user *) arg0;
+ current_thread_info()->user_cntd1 = (u64 __user *) arg1;
+ current_thread_info()->kernel_cntd0 =
+ current_thread_info()->kernel_cntd1 = 0;
+ write_pcr(arg2);
+ reset_pic();
+ set_thread_flag(TIF_PERFCTR);
+ break;
+
+ case PERFCTR_OFF:
+ err = -EINVAL;
+ if (test_thread_flag(TIF_PERFCTR)) {
+ current_thread_info()->user_cntd0 =
+ current_thread_info()->user_cntd1 = NULL;
+ current_thread_info()->pcr_reg = 0;
+ write_pcr(0);
+ clear_thread_flag(TIF_PERFCTR);
+ err = 0;
+ }
+ break;
+
+ case PERFCTR_READ: {
+ unsigned long pic, tmp;
+
+ if (!test_thread_flag(TIF_PERFCTR)) {
+ err = -EINVAL;
+ break;
+ }
+ read_pic(pic);
+ tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
+ err |= __put_user(tmp, current_thread_info()->user_cntd0);
+ tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
+ err |= __put_user(tmp, current_thread_info()->user_cntd1);
+ reset_pic();
+ break;
+ }
+
+ case PERFCTR_CLRPIC:
+ if (!test_thread_flag(TIF_PERFCTR)) {
+ err = -EINVAL;
+ break;
+ }
+ current_thread_info()->kernel_cntd0 =
+ current_thread_info()->kernel_cntd1 = 0;
+ reset_pic();
+ break;
+
+ case PERFCTR_SETPCR: {
+ u64 __user *user_pcr = (u64 __user *)arg0;
+
+ if (!test_thread_flag(TIF_PERFCTR)) {
+ err = -EINVAL;
+ break;
+ }
+ err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
+ write_pcr(current_thread_info()->pcr_reg);
+ current_thread_info()->kernel_cntd0 =
+ current_thread_info()->kernel_cntd1 = 0;
+ reset_pic();
+ break;
+ }
+
+ case PERFCTR_GETPCR: {
+ u64 __user *user_pcr = (u64 __user *)arg0;
+
+ if (!test_thread_flag(TIF_PERFCTR)) {
+ err = -EINVAL;
+ break;
+ }
+ err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
+ break;
+ }
+
+ default:
+ err = -EINVAL;
+ break;
+ };
+ return err;
+}
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
new file mode 100644
index 00000000000..567c91c77b2
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -0,0 +1,1118 @@
+/* $Id: sys_sparc32.c,v 1.184 2002/02/09 19:49:31 davem Exp $
+ * sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * environment.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/signal.h>
+#include <linux/resource.h>
+#include <linux/times.h>
+#include <linux/utsname.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/nfs_fs.h>
+#include <linux/quota.h>
+#include <linux/module.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/poll.h>
+#include <linux/personality.h>
+#include <linux/stat.h>
+#include <linux/filter.h>
+#include <linux/highmem.h>
+#include <linux/highuid.h>
+#include <linux/mman.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/icmpv6.h>
+#include <linux/syscalls.h>
+#include <linux/sysctl.h>
+#include <linux/binfmts.h>
+#include <linux/dnotify.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+#include <linux/vfs.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/ptrace.h>
+#include <linux/highuid.h>
+
+#include <asm/types.h>
+#include <asm/ipc.h>
+#include <asm/uaccess.h>
+#include <asm/fpumacro.h>
+#include <asm/semaphore.h>
+#include <asm/mmu_context.h>
+
+asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
+{
+ return sys_chown(filename, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group)
+{
+ return sys_lchown(filename, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
+{
+ return sys_fchown(fd, low2highuid(user), low2highgid(group));
+}
+
+asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
+{
+ return sys_setregid(low2highgid(rgid), low2highgid(egid));
+}
+
+asmlinkage long sys32_setgid16(u16 gid)
+{
+ return sys_setgid((gid_t)gid);
+}
+
+asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
+{
+ return sys_setreuid(low2highuid(ruid), low2highuid(euid));
+}
+
+asmlinkage long sys32_setuid16(u16 uid)
+{
+ return sys_setuid((uid_t)uid);
+}
+
+asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
+{
+ return sys_setresuid(low2highuid(ruid), low2highuid(euid),
+ low2highuid(suid));
+}
+
+asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid)
+{
+ int retval;
+
+ if (!(retval = put_user(high2lowuid(current->uid), ruid)) &&
+ !(retval = put_user(high2lowuid(current->euid), euid)))
+ retval = put_user(high2lowuid(current->suid), suid);
+
+ return retval;
+}
+
+asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
+{
+ return sys_setresgid(low2highgid(rgid), low2highgid(egid),
+ low2highgid(sgid));
+}
+
+asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
+{
+ int retval;
+
+ if (!(retval = put_user(high2lowgid(current->gid), rgid)) &&
+ !(retval = put_user(high2lowgid(current->egid), egid)))
+ retval = put_user(high2lowgid(current->sgid), sgid);
+
+ return retval;
+}
+
+asmlinkage long sys32_setfsuid16(u16 uid)
+{
+ return sys_setfsuid((uid_t)uid);
+}
+
+asmlinkage long sys32_setfsgid16(u16 gid)
+{
+ return sys_setfsgid((gid_t)gid);
+}
+
+static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
+{
+ int i;
+ u16 group;
+
+ for (i = 0; i < group_info->ngroups; i++) {
+ group = (u16)GROUP_AT(group_info, i);
+ if (put_user(group, grouplist+i))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist)
+{
+ int i;
+ u16 group;
+
+ for (i = 0; i < group_info->ngroups; i++) {
+ if (get_user(group, grouplist+i))
+ return -EFAULT;
+ GROUP_AT(group_info, i) = (gid_t)group;
+ }
+
+ return 0;
+}
+
+asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
+{
+ int i;
+
+ if (gidsetsize < 0)
+ return -EINVAL;
+
+ get_group_info(current->group_info);
+ i = current->group_info->ngroups;
+ if (gidsetsize) {
+ if (i > gidsetsize) {
+ i = -EINVAL;
+ goto out;
+ }
+ if (groups16_to_user(grouplist, current->group_info)) {
+ i = -EFAULT;
+ goto out;
+ }
+ }
+out:
+ put_group_info(current->group_info);
+ return i;
+}
+
+asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
+{
+ struct group_info *group_info;
+ int retval;
+
+ if (!capable(CAP_SETGID))
+ return -EPERM;
+ if ((unsigned)gidsetsize > NGROUPS_MAX)
+ return -EINVAL;
+
+ group_info = groups_alloc(gidsetsize);
+ if (!group_info)
+ return -ENOMEM;
+ retval = groups16_from_user(group_info, grouplist);
+ if (retval) {
+ put_group_info(group_info);
+ return retval;
+ }
+
+ retval = set_current_groups(group_info);
+ put_group_info(group_info);
+
+ return retval;
+}
+
+asmlinkage long sys32_getuid16(void)
+{
+ return high2lowuid(current->uid);
+}
+
+asmlinkage long sys32_geteuid16(void)
+{
+ return high2lowuid(current->euid);
+}
+
+asmlinkage long sys32_getgid16(void)
+{
+ return high2lowgid(current->gid);
+}
+
+asmlinkage long sys32_getegid16(void)
+{
+ return high2lowgid(current->egid);
+}
+
+/* 32-bit timeval and related flotsam. */
+
+static long get_tv32(struct timeval *o, struct compat_timeval __user *i)
+{
+ return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
+ (__get_user(o->tv_sec, &i->tv_sec) |
+ __get_user(o->tv_usec, &i->tv_usec)));
+}
+
+static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
+{
+ return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+ (__put_user(i->tv_sec, &o->tv_sec) |
+ __put_user(i->tv_usec, &o->tv_usec)));
+}
+
+#ifdef CONFIG_SYSVIPC
+asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth)
+{
+ int version;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ switch (call) {
+ case SEMTIMEDOP:
+ if (fifth)
+ /* sign extend semid */
+ return compat_sys_semtimedop((int)first,
+ compat_ptr(ptr), second,
+ compat_ptr(fifth));
+ /* else fall through for normal semop() */
+ case SEMOP:
+ /* struct sembuf is the same on 32 and 64bit :)) */
+ /* sign extend semid */
+ return sys_semtimedop((int)first, compat_ptr(ptr), second,
+ NULL);
+ case SEMGET:
+ /* sign extend key, nsems */
+ return sys_semget((int)first, (int)second, third);
+ case SEMCTL:
+ /* sign extend semid, semnum */
+ return compat_sys_semctl((int)first, (int)second, third,
+ compat_ptr(ptr));
+
+ case MSGSND:
+ /* sign extend msqid */
+ return compat_sys_msgsnd((int)first, (int)second, third,
+ compat_ptr(ptr));
+ case MSGRCV:
+ /* sign extend msqid, msgtyp */
+ return compat_sys_msgrcv((int)first, second, (int)fifth,
+ third, version, compat_ptr(ptr));
+ case MSGGET:
+ /* sign extend key */
+ return sys_msgget((int)first, second);
+ case MSGCTL:
+ /* sign extend msqid */
+ return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
+
+ case SHMAT:
+ /* sign extend shmid */
+ return compat_sys_shmat((int)first, second, third, version,
+ compat_ptr(ptr));
+ case SHMDT:
+ return sys_shmdt(compat_ptr(ptr));
+ case SHMGET:
+ /* sign extend key_t */
+ return sys_shmget((int)first, second, third);
+ case SHMCTL:
+ /* sign extend shmid */
+ return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
+
+ default:
+ return -ENOSYS;
+ };
+
+ return -ENOSYS;
+}
+#endif
+
+asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
+{
+ if ((int)high < 0)
+ return -EINVAL;
+ else
+ return sys_truncate(path, (high << 32) | low);
+}
+
+asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
+{
+ if ((int)high < 0)
+ return -EINVAL;
+ else
+ return sys_ftruncate(fd, (high << 32) | low);
+}
+
+int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
+{
+ int err;
+
+ if (stat->size > MAX_NON_LFS || !old_valid_dev(stat->dev) ||
+ !old_valid_dev(stat->rdev))
+ return -EOVERFLOW;
+
+ err = put_user(old_encode_dev(stat->dev), &statbuf->st_dev);
+ err |= put_user(stat->ino, &statbuf->st_ino);
+ err |= put_user(stat->mode, &statbuf->st_mode);
+ err |= put_user(stat->nlink, &statbuf->st_nlink);
+ err |= put_user(high2lowuid(stat->uid), &statbuf->st_uid);
+ err |= put_user(high2lowgid(stat->gid), &statbuf->st_gid);
+ err |= put_user(old_encode_dev(stat->rdev), &statbuf->st_rdev);
+ err |= put_user(stat->size, &statbuf->st_size);
+ err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
+ err |= put_user(0, &statbuf->__unused1);
+ err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
+ err |= put_user(0, &statbuf->__unused2);
+ err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
+ err |= put_user(0, &statbuf->__unused3);
+ err |= put_user(stat->blksize, &statbuf->st_blksize);
+ err |= put_user(stat->blocks, &statbuf->st_blocks);
+ err |= put_user(0, &statbuf->__unused4[0]);
+ err |= put_user(0, &statbuf->__unused4[1]);
+
+ return err;
+}
+
+asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2)
+{
+ return sys_sysfs(option, arg1, arg2);
+}
+
+struct sysinfo32 {
+ s32 uptime;
+ u32 loads[3];
+ u32 totalram;
+ u32 freeram;
+ u32 sharedram;
+ u32 bufferram;
+ u32 totalswap;
+ u32 freeswap;
+ unsigned short procs;
+ unsigned short pad;
+ u32 totalhigh;
+ u32 freehigh;
+ u32 mem_unit;
+ char _f[20-2*sizeof(int)-sizeof(int)];
+};
+
+asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
+{
+ struct sysinfo s;
+ int ret, err;
+ int bitcount = 0;
+ mm_segment_t old_fs = get_fs ();
+
+ set_fs(KERNEL_DS);
+ ret = sys_sysinfo((struct sysinfo __user *) &s);
+ set_fs(old_fs);
+ /* Check to see if any memory value is too large for 32-bit and
+ * scale down if needed.
+ */
+ if ((s.totalram >> 32) || (s.totalswap >> 32)) {
+ while (s.mem_unit < PAGE_SIZE) {
+ s.mem_unit <<= 1;
+ bitcount++;
+ }
+ s.totalram >>= bitcount;
+ s.freeram >>= bitcount;
+ s.sharedram >>= bitcount;
+ s.bufferram >>= bitcount;
+ s.totalswap >>= bitcount;
+ s.freeswap >>= bitcount;
+ s.totalhigh >>= bitcount;
+ s.freehigh >>= bitcount;
+ }
+
+ err = put_user (s.uptime, &info->uptime);
+ err |= __put_user (s.loads[0], &info->loads[0]);
+ err |= __put_user (s.loads[1], &info->loads[1]);
+ err |= __put_user (s.loads[2], &info->loads[2]);
+ err |= __put_user (s.totalram, &info->totalram);
+ err |= __put_user (s.freeram, &info->freeram);
+ err |= __put_user (s.sharedram, &info->sharedram);
+ err |= __put_user (s.bufferram, &info->bufferram);
+ err |= __put_user (s.totalswap, &info->totalswap);
+ err |= __put_user (s.freeswap, &info->freeswap);
+ err |= __put_user (s.procs, &info->procs);
+ err |= __put_user (s.totalhigh, &info->totalhigh);
+ err |= __put_user (s.freehigh, &info->freehigh);
+ err |= __put_user (s.mem_unit, &info->mem_unit);
+ if (err)
+ return -EFAULT;
+ return ret;
+}
+
+asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
+{
+ struct timespec t;
+ int ret;
+ mm_segment_t old_fs = get_fs ();
+
+ set_fs (KERNEL_DS);
+ ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
+ set_fs (old_fs);
+ if (put_compat_timespec(&t, interval))
+ return -EFAULT;
+ return ret;
+}
+
+asmlinkage long compat_sys_rt_sigprocmask(int how,
+ compat_sigset_t __user *set,
+ compat_sigset_t __user *oset,
+ compat_size_t sigsetsize)
+{
+ sigset_t s;
+ compat_sigset_t s32;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ if (set) {
+ if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
+ return -EFAULT;
+ switch (_NSIG_WORDS) {
+ case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
+ case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
+ case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
+ case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
+ }
+ }
+ set_fs (KERNEL_DS);
+ ret = sys_rt_sigprocmask(how,
+ set ? (sigset_t __user *) &s : NULL,
+ oset ? (sigset_t __user *) &s : NULL,
+ sigsetsize);
+ set_fs (old_fs);
+ if (ret) return ret;
+ if (oset) {
+ switch (_NSIG_WORDS) {
+ case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+ case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+ case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+ case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+ }
+ if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
+ compat_size_t sigsetsize)
+{
+ sigset_t s;
+ compat_sigset_t s32;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
+ set_fs (old_fs);
+ if (!ret) {
+ switch (_NSIG_WORDS) {
+ case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
+ case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
+ case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
+ case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
+ }
+ if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig,
+ struct compat_siginfo __user *uinfo)
+{
+ siginfo_t info;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ if (copy_siginfo_from_user32(&info, uinfo))
+ return -EFAULT;
+
+ set_fs (KERNEL_DS);
+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
+ set_fs (old_fs);
+ return ret;
+}
+
+asmlinkage long compat_sys_sigaction(int sig, struct old_sigaction32 __user *act,
+ struct old_sigaction32 __user *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (sig < 0) {
+ set_thread_flag(TIF_NEWSIGNALS);
+ sig = -sig;
+ }
+
+ if (act) {
+ compat_old_sigset_t mask;
+ u32 u_handler, u_restorer;
+
+ ret = get_user(u_handler, &act->sa_handler);
+ new_ka.sa.sa_handler = compat_ptr(u_handler);
+ ret |= __get_user(u_restorer, &act->sa_restorer);
+ new_ka.sa.sa_restorer = compat_ptr(u_restorer);
+ ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ ret |= __get_user(mask, &act->sa_mask);
+ if (ret)
+ return ret;
+ new_ka.ka_restorer = NULL;
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
+ ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
+ ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage long compat_sys_rt_sigaction(int sig,
+ struct sigaction32 __user *act,
+ struct sigaction32 __user *oact,
+ void __user *restorer,
+ compat_size_t sigsetsize)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+ compat_sigset_t set32;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(compat_sigset_t))
+ return -EINVAL;
+
+ /* All tasks which use RT signals (effectively) use
+ * new style signals.
+ */
+ set_thread_flag(TIF_NEWSIGNALS);
+
+ if (act) {
+ u32 u_handler, u_restorer;
+
+ new_ka.ka_restorer = restorer;
+ ret = get_user(u_handler, &act->sa_handler);
+ new_ka.sa.sa_handler = compat_ptr(u_handler);
+ ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
+ switch (_NSIG_WORDS) {
+ case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32);
+ case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32);
+ case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32);
+ case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32);
+ }
+ ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ ret |= __get_user(u_restorer, &act->sa_restorer);
+ new_ka.sa.sa_restorer = compat_ptr(u_restorer);
+ if (ret)
+ return -EFAULT;
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ switch (_NSIG_WORDS) {
+ case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3];
+ case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2];
+ case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1];
+ case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0];
+ }
+ ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
+ ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
+ ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
+ if (ret)
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+/*
+ * sparc32_execve() executes a new program after the asm stub has set
+ * things up for us. This should basically do what I want it to.
+ */
+asmlinkage long sparc32_execve(struct pt_regs *regs)
+{
+ int error, base = 0;
+ char *filename;
+
+ /* User register window flush is done by entry.S */
+
+ /* Check for indirect call. */
+ if ((u32)regs->u_regs[UREG_G1] == 0)
+ base = 1;
+
+ filename = getname(compat_ptr(regs->u_regs[base + UREG_I0]));
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+
+ error = compat_do_execve(filename,
+ compat_ptr(regs->u_regs[base + UREG_I1]),
+ compat_ptr(regs->u_regs[base + UREG_I2]), regs);
+
+ putname(filename);
+
+ if (!error) {
+ fprs_write(0);
+ current_thread_info()->xfsr[0] = 0;
+ current_thread_info()->fpsaved[0] = 0;
+ regs->tstate &= ~TSTATE_PEF;
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ }
+out:
+ return error;
+}
+
+#ifdef CONFIG_MODULES
+
+asmlinkage long sys32_init_module(void __user *umod, u32 len,
+ const char __user *uargs)
+{
+ return sys_init_module(umod, len, uargs);
+}
+
+asmlinkage long sys32_delete_module(const char __user *name_user,
+ unsigned int flags)
+{
+ return sys_delete_module(name_user, flags);
+}
+
+#else /* CONFIG_MODULES */
+
+asmlinkage long sys32_init_module(const char __user *name_user,
+ struct module __user *mod_user)
+{
+ return -ENOSYS;
+}
+
+asmlinkage long sys32_delete_module(const char __user *name_user)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_MODULES */
+
+/* Translations due to time_t size differences. Which affects all
+ sorts of things, like timeval and itimerval. */
+
+extern struct timezone sys_tz;
+
+asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz)
+{
+ if (tv) {
+ struct timeval ktv;
+ do_gettimeofday(&ktv);
+ if (put_tv32(tv, &ktv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
+{
+ long usec;
+
+ if (!access_ok(VERIFY_READ, i, sizeof(*i)))
+ return -EFAULT;
+ if (__get_user(o->tv_sec, &i->tv_sec))
+ return -EFAULT;
+ if (__get_user(usec, &i->tv_usec))
+ return -EFAULT;
+ o->tv_nsec = usec * 1000;
+ return 0;
+}
+
+asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz)
+{
+ struct timespec kts;
+ struct timezone ktz;
+
+ if (tv) {
+ if (get_ts32(&kts, tv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_from_user(&ktz, tz, sizeof(ktz)))
+ return -EFAULT;
+ }
+
+ return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
+}
+
+asmlinkage long sys32_utimes(char __user *filename,
+ struct compat_timeval __user *tvs)
+{
+ struct timeval ktvs[2];
+
+ if (tvs) {
+ if (get_tv32(&ktvs[0], tvs) ||
+ get_tv32(&ktvs[1], 1+tvs))
+ return -EFAULT;
+ }
+
+ return do_utimes(filename, (tvs ? &ktvs[0] : NULL));
+}
+
+/* These are here just in case some old sparc32 binary calls it. */
+asmlinkage long sys32_pause(void)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ return -ERESTARTNOHAND;
+}
+
+asmlinkage compat_ssize_t sys32_pread64(unsigned int fd,
+ char __user *ubuf,
+ compat_size_t count,
+ unsigned long poshi,
+ unsigned long poslo)
+{
+ return sys_pread64(fd, ubuf, count, (poshi << 32) | poslo);
+}
+
+asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd,
+ char __user *ubuf,
+ compat_size_t count,
+ unsigned long poshi,
+ unsigned long poslo)
+{
+ return sys_pwrite64(fd, ubuf, count, (poshi << 32) | poslo);
+}
+
+asmlinkage long compat_sys_readahead(int fd,
+ unsigned long offhi,
+ unsigned long offlo,
+ compat_size_t count)
+{
+ return sys_readahead(fd, (offhi << 32) | offlo, count);
+}
+
+long compat_sys_fadvise64(int fd,
+ unsigned long offhi,
+ unsigned long offlo,
+ compat_size_t len, int advice)
+{
+ return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice);
+}
+
+long compat_sys_fadvise64_64(int fd,
+ unsigned long offhi, unsigned long offlo,
+ unsigned long lenhi, unsigned long lenlo,
+ int advice)
+{
+ return sys_fadvise64_64(fd,
+ (offhi << 32) | offlo,
+ (lenhi << 32) | lenlo,
+ advice);
+}
+
+asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
+ compat_off_t __user *offset,
+ compat_size_t count)
+{
+ mm_segment_t old_fs = get_fs();
+ int ret;
+ off_t of;
+
+ if (offset && get_user(of, offset))
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+ ret = sys_sendfile(out_fd, in_fd,
+ offset ? (off_t __user *) &of : NULL,
+ count);
+ set_fs(old_fs);
+
+ if (offset && put_user(of, offset))
+ return -EFAULT;
+
+ return ret;
+}
+
+asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
+ compat_loff_t __user *offset,
+ compat_size_t count)
+{
+ mm_segment_t old_fs = get_fs();
+ int ret;
+ loff_t lof;
+
+ if (offset && get_user(lof, offset))
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+ ret = sys_sendfile64(out_fd, in_fd,
+ offset ? (loff_t __user *) &lof : NULL,
+ count);
+ set_fs(old_fs);
+
+ if (offset && put_user(lof, offset))
+ return -EFAULT;
+
+ return ret;
+}
+
+/* Handle adjtimex compatibility. */
+
+struct timex32 {
+ u32 modes;
+ s32 offset, freq, maxerror, esterror;
+ s32 status, constant, precision, tolerance;
+ struct compat_timeval time;
+ s32 tick;
+ s32 ppsfreq, jitter, shift, stabil;
+ s32 jitcnt, calcnt, errcnt, stbcnt;
+ s32 :32; s32 :32; s32 :32; s32 :32;
+ s32 :32; s32 :32; s32 :32; s32 :32;
+ s32 :32; s32 :32; s32 :32; s32 :32;
+};
+
+extern int do_adjtimex(struct timex *);
+
+asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
+{
+ struct timex txc;
+ int ret;
+
+ memset(&txc, 0, sizeof(struct timex));
+
+ if (get_user(txc.modes, &utp->modes) ||
+ __get_user(txc.offset, &utp->offset) ||
+ __get_user(txc.freq, &utp->freq) ||
+ __get_user(txc.maxerror, &utp->maxerror) ||
+ __get_user(txc.esterror, &utp->esterror) ||
+ __get_user(txc.status, &utp->status) ||
+ __get_user(txc.constant, &utp->constant) ||
+ __get_user(txc.precision, &utp->precision) ||
+ __get_user(txc.tolerance, &utp->tolerance) ||
+ __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+ __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+ __get_user(txc.tick, &utp->tick) ||
+ __get_user(txc.ppsfreq, &utp->ppsfreq) ||
+ __get_user(txc.jitter, &utp->jitter) ||
+ __get_user(txc.shift, &utp->shift) ||
+ __get_user(txc.stabil, &utp->stabil) ||
+ __get_user(txc.jitcnt, &utp->jitcnt) ||
+ __get_user(txc.calcnt, &utp->calcnt) ||
+ __get_user(txc.errcnt, &utp->errcnt) ||
+ __get_user(txc.stbcnt, &utp->stbcnt))
+ return -EFAULT;
+
+ ret = do_adjtimex(&txc);
+
+ if (put_user(txc.modes, &utp->modes) ||
+ __put_user(txc.offset, &utp->offset) ||
+ __put_user(txc.freq, &utp->freq) ||
+ __put_user(txc.maxerror, &utp->maxerror) ||
+ __put_user(txc.esterror, &utp->esterror) ||
+ __put_user(txc.status, &utp->status) ||
+ __put_user(txc.constant, &utp->constant) ||
+ __put_user(txc.precision, &utp->precision) ||
+ __put_user(txc.tolerance, &utp->tolerance) ||
+ __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
+ __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
+ __put_user(txc.tick, &utp->tick) ||
+ __put_user(txc.ppsfreq, &utp->ppsfreq) ||
+ __put_user(txc.jitter, &utp->jitter) ||
+ __put_user(txc.shift, &utp->shift) ||
+ __put_user(txc.stabil, &utp->stabil) ||
+ __put_user(txc.jitcnt, &utp->jitcnt) ||
+ __put_user(txc.calcnt, &utp->calcnt) ||
+ __put_user(txc.errcnt, &utp->errcnt) ||
+ __put_user(txc.stbcnt, &utp->stbcnt))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+/* This is just a version for 32-bit applications which does
+ * not force O_LARGEFILE on.
+ */
+
+asmlinkage long sparc32_open(const char __user *filename,
+ int flags, int mode)
+{
+ char * tmp;
+ int fd, error;
+
+ tmp = getname(filename);
+ fd = PTR_ERR(tmp);
+ if (!IS_ERR(tmp)) {
+ fd = get_unused_fd();
+ if (fd >= 0) {
+ struct file * f = filp_open(tmp, flags, mode);
+ error = PTR_ERR(f);
+ if (IS_ERR(f))
+ goto out_error;
+ fd_install(fd, f);
+ }
+out:
+ putname(tmp);
+ }
+ return fd;
+
+out_error:
+ put_unused_fd(fd);
+ fd = error;
+ goto out;
+}
+
+extern unsigned long do_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr);
+
+asmlinkage unsigned long sys32_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, u32 __new_addr)
+{
+ struct vm_area_struct *vma;
+ unsigned long ret = -EINVAL;
+ unsigned long new_addr = __new_addr;
+
+ if (old_len > 0xf0000000UL || new_len > 0xf0000000UL)
+ goto out;
+ if (addr > 0xf0000000UL - old_len)
+ goto out;
+ down_write(&current->mm->mmap_sem);
+ if (flags & MREMAP_FIXED) {
+ if (new_addr > 0xf0000000UL - new_len)
+ goto out_sem;
+ } else if (addr > 0xf0000000UL - new_len) {
+ unsigned long map_flags = 0;
+ struct file *file = NULL;
+
+ ret = -ENOMEM;
+ if (!(flags & MREMAP_MAYMOVE))
+ goto out_sem;
+
+ vma = find_vma(current->mm, addr);
+ if (vma) {
+ if (vma->vm_flags & VM_SHARED)
+ map_flags |= MAP_SHARED;
+ file = vma->vm_file;
+ }
+
+ /* MREMAP_FIXED checked above. */
+ new_addr = get_unmapped_area(file, addr, new_len,
+ vma ? vma->vm_pgoff : 0,
+ map_flags);
+ ret = new_addr;
+ if (new_addr & ~PAGE_MASK)
+ goto out_sem;
+ flags |= MREMAP_FIXED;
+ }
+ ret = do_mremap(addr, old_len, new_len, flags, new_addr);
+out_sem:
+ up_write(&current->mm->mmap_sem);
+out:
+ return ret;
+}
+
+struct __sysctl_args32 {
+ u32 name;
+ int nlen;
+ u32 oldval;
+ u32 oldlenp;
+ u32 newval;
+ u32 newlen;
+ u32 __unused[4];
+};
+
+asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
+{
+#ifndef CONFIG_SYSCTL
+ return -ENOSYS;
+#else
+ struct __sysctl_args32 tmp;
+ int error;
+ size_t oldlen, __user *oldlenp = NULL;
+ unsigned long addr = (((unsigned long)&args->__unused[0]) + 7UL) & ~7UL;
+
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ return -EFAULT;
+
+ if (tmp.oldval && tmp.oldlenp) {
+ /* Duh, this is ugly and might not work if sysctl_args
+ is in read-only memory, but do_sysctl does indirectly
+ a lot of uaccess in both directions and we'd have to
+ basically copy the whole sysctl.c here, and
+ glibc's __sysctl uses rw memory for the structure
+ anyway. */
+ if (get_user(oldlen, (u32 __user *)(unsigned long)tmp.oldlenp) ||
+ put_user(oldlen, (size_t __user *)addr))
+ return -EFAULT;
+ oldlenp = (size_t __user *)addr;
+ }
+
+ lock_kernel();
+ error = do_sysctl((int __user *)(unsigned long) tmp.name,
+ tmp.nlen,
+ (void __user *)(unsigned long) tmp.oldval,
+ oldlenp,
+ (void __user *)(unsigned long) tmp.newval,
+ tmp.newlen);
+ unlock_kernel();
+ if (oldlenp) {
+ if (!error) {
+ if (get_user(oldlen, (size_t __user *)addr) ||
+ put_user(oldlen, (u32 __user *)(unsigned long) tmp.oldlenp))
+ error = -EFAULT;
+ }
+ if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
+ error = -EFAULT;
+ }
+ return error;
+#endif
+}
+
+long sys32_lookup_dcookie(unsigned long cookie_high,
+ unsigned long cookie_low,
+ char __user *buf, size_t len)
+{
+ return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
+ buf, len);
+}
+
+extern asmlinkage long
+sys_timer_create(clockid_t which_clock,
+ struct sigevent __user *timer_event_spec,
+ timer_t __user *created_timer_id);
+
+long
+sys32_timer_create(u32 clock, struct compat_sigevent __user *se32,
+ timer_t __user *timer_id)
+{
+ struct sigevent se;
+ mm_segment_t oldfs;
+ timer_t t;
+ long err;
+
+ if (se32 == NULL)
+ return sys_timer_create(clock, NULL, timer_id);
+
+ if (get_compat_sigevent(&se, se32))
+ return -EFAULT;
+
+ if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
+ return -EFAULT;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_timer_create(clock,
+ (struct sigevent __user *) &se,
+ (timer_t __user *) &t);
+ set_fs(oldfs);
+
+ if (!err)
+ err = __put_user (t, timer_id);
+
+ return err;
+}
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
new file mode 100644
index 00000000000..d0592ed54ea
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -0,0 +1,1343 @@
+/* $Id: sys_sunos32.c,v 1.64 2002/02/09 19:49:31 davem Exp $
+ * sys_sunos32.c: SunOS binary compatibility layer on sparc64.
+ *
+ * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/compat.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/resource.h>
+#include <linux/ipc.h>
+#include <linux/shm.h>
+#include <linux/msg.h>
+#include <linux/sem.h>
+#include <linux/signal.h>
+#include <linux/uio.h>
+#include <linux/utsname.h>
+#include <linux/major.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/syscalls.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pconf.h>
+#include <asm/idprom.h> /* for gethostid() */
+#include <asm/unistd.h>
+#include <asm/system.h>
+
+/* For the nfs mount emulation */
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs_mount.h>
+
+/* for sunos_select */
+#include <linux/time.h>
+#include <linux/personality.h>
+
+/* For SOCKET_I */
+#include <linux/socket.h>
+#include <net/sock.h>
+#include <net/compat.h>
+
+#define SUNOS_NR_OPEN 256
+
+asmlinkage u32 sunos_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u32 off)
+{
+ struct file *file = NULL;
+ unsigned long retval, ret_type;
+
+ if (flags & MAP_NORESERVE) {
+ static int cnt;
+ if (cnt++ < 10)
+ printk("%s: unimplemented SunOS MAP_NORESERVE mmap() flag\n",
+ current->comm);
+ flags &= ~MAP_NORESERVE;
+ }
+ retval = -EBADF;
+ if (!(flags & MAP_ANONYMOUS)) {
+ struct inode * inode;
+ if (fd >= SUNOS_NR_OPEN)
+ goto out;
+ file = fget(fd);
+ if (!file)
+ goto out;
+ inode = file->f_dentry->d_inode;
+ if (imajor(inode) == MEM_MAJOR && iminor(inode) == 5) {
+ flags |= MAP_ANONYMOUS;
+ fput(file);
+ file = NULL;
+ }
+ }
+
+ retval = -EINVAL;
+ if (!(flags & MAP_FIXED))
+ addr = 0;
+ else if (len > 0xf0000000 || addr > 0xf0000000 - len)
+ goto out_putf;
+ ret_type = flags & _MAP_NEW;
+ flags &= ~_MAP_NEW;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ down_write(&current->mm->mmap_sem);
+ retval = do_mmap(file,
+ (unsigned long) addr, (unsigned long) len,
+ (unsigned long) prot, (unsigned long) flags,
+ (unsigned long) off);
+ up_write(&current->mm->mmap_sem);
+ if (!ret_type)
+ retval = ((retval < 0xf0000000) ? 0 : retval);
+out_putf:
+ if (file)
+ fput(file);
+out:
+ return (u32) retval;
+}
+
+asmlinkage int sunos_mctl(u32 addr, u32 len, int function, u32 arg)
+{
+ return 0;
+}
+
+asmlinkage int sunos_brk(u32 baddr)
+{
+ int freepages, retval = -ENOMEM;
+ unsigned long rlim;
+ unsigned long newbrk, oldbrk, brk = (unsigned long) baddr;
+
+ down_write(&current->mm->mmap_sem);
+ if (brk < current->mm->end_code)
+ goto out;
+ newbrk = PAGE_ALIGN(brk);
+ oldbrk = PAGE_ALIGN(current->mm->brk);
+ retval = 0;
+ if (oldbrk == newbrk) {
+ current->mm->brk = brk;
+ goto out;
+ }
+ /* Always allow shrinking brk. */
+ if (brk <= current->mm->brk) {
+ current->mm->brk = brk;
+ do_munmap(current->mm, newbrk, oldbrk-newbrk);
+ goto out;
+ }
+ /* Check against rlimit and stack.. */
+ retval = -ENOMEM;
+ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
+ if (brk - current->mm->end_code > rlim)
+ goto out;
+ /* Check against existing mmap mappings. */
+ if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
+ goto out;
+ /* stupid algorithm to decide if we have enough memory: while
+ * simple, it hopefully works in most obvious cases.. Easy to
+ * fool it, but this should catch most mistakes.
+ */
+ freepages = get_page_cache_size();
+ freepages >>= 1;
+ freepages += nr_free_pages();
+ freepages += nr_swap_pages;
+ freepages -= num_physpages >> 4;
+ freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
+ if (freepages < 0)
+ goto out;
+ /* Ok, we have probably got enough memory - let it rip. */
+ current->mm->brk = brk;
+ do_brk(oldbrk, newbrk-oldbrk);
+ retval = 0;
+out:
+ up_write(&current->mm->mmap_sem);
+ return retval;
+}
+
+asmlinkage u32 sunos_sbrk(int increment)
+{
+ int error, oldbrk;
+
+ /* This should do it hopefully... */
+ oldbrk = (int)current->mm->brk;
+ error = sunos_brk(((int) current->mm->brk) + increment);
+ if (!error)
+ error = oldbrk;
+ return error;
+}
+
+asmlinkage u32 sunos_sstk(int increment)
+{
+ printk("%s: Call to sunos_sstk(increment<%d>) is unsupported\n",
+ current->comm, increment);
+
+ return (u32)-1;
+}
+
+/* Give hints to the kernel as to what paging strategy to use...
+ * Completely bogus, don't remind me.
+ */
+#define VA_NORMAL 0 /* Normal vm usage expected */
+#define VA_ABNORMAL 1 /* Abnormal/random vm usage probable */
+#define VA_SEQUENTIAL 2 /* Accesses will be of a sequential nature */
+#define VA_INVALIDATE 3 /* Page table entries should be flushed ??? */
+static char *vstrings[] = {
+ "VA_NORMAL",
+ "VA_ABNORMAL",
+ "VA_SEQUENTIAL",
+ "VA_INVALIDATE",
+};
+
+asmlinkage void sunos_vadvise(u32 strategy)
+{
+ static int count;
+
+ /* I wanna see who uses this... */
+ if (count++ < 5)
+ printk("%s: Advises us to use %s paging strategy\n",
+ current->comm,
+ strategy <= 3 ? vstrings[strategy] : "BOGUS");
+}
+
+/* This just wants the soft limit (ie. rlim_cur element) of the RLIMIT_NOFILE
+ * resource limit and is for backwards compatibility with older sunos
+ * revs.
+ */
+asmlinkage int sunos_getdtablesize(void)
+{
+ return SUNOS_NR_OPEN;
+}
+
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage u32 sunos_sigblock(u32 blk_mask)
+{
+ u32 old;
+
+ spin_lock_irq(&current->sighand->siglock);
+ old = (u32) current->blocked.sig[0];
+ current->blocked.sig[0] |= (blk_mask & _BLOCKABLE);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return old;
+}
+
+asmlinkage u32 sunos_sigsetmask(u32 newmask)
+{
+ u32 retval;
+
+ spin_lock_irq(&current->sighand->siglock);
+ retval = (u32) current->blocked.sig[0];
+ current->blocked.sig[0] = (newmask & _BLOCKABLE);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ return retval;
+}
+
+/* SunOS getdents is very similar to the newer Linux (iBCS2 compliant) */
+/* getdents system call, the format of the structure just has a different */
+/* layout (d_off+d_ino instead of d_ino+d_off) */
+struct sunos_dirent {
+ s32 d_off;
+ u32 d_ino;
+ u16 d_reclen;
+ u16 d_namlen;
+ char d_name[1];
+};
+
+struct sunos_dirent_callback {
+ struct sunos_dirent __user *curr;
+ struct sunos_dirent __user *previous;
+ int count;
+ int error;
+};
+
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(s32)-1) & ~(sizeof(s32)-1))
+
+static int sunos_filldir(void * __buf, const char * name, int namlen,
+ loff_t offset, ino_t ino, unsigned int d_type)
+{
+ struct sunos_dirent __user *dirent;
+ struct sunos_dirent_callback * buf = (struct sunos_dirent_callback *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ if (dirent)
+ put_user(offset, &dirent->d_off);
+ dirent = buf->curr;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(namlen, &dirent->d_namlen);
+ put_user(reclen, &dirent->d_reclen);
+ if (copy_to_user(dirent->d_name, name, namlen))
+ return -EFAULT;
+ put_user(0, dirent->d_name + namlen);
+ dirent = (void __user *) dirent + reclen;
+ buf->curr = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage int sunos_getdents(unsigned int fd, void __user *dirent, int cnt)
+{
+ struct file * file;
+ struct sunos_dirent __user *lastdirent;
+ struct sunos_dirent_callback buf;
+ int error = -EBADF;
+
+ if (fd >= SUNOS_NR_OPEN)
+ goto out;
+
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ error = -EINVAL;
+ if (cnt < (sizeof(struct sunos_dirent) + 255))
+ goto out_putf;
+
+ buf.curr = (struct sunos_dirent __user *) dirent;
+ buf.previous = NULL;
+ buf.count = cnt;
+ buf.error = 0;
+
+ error = vfs_readdir(file, sunos_filldir, &buf);
+ if (error < 0)
+ goto out_putf;
+
+ lastdirent = buf.previous;
+ error = buf.error;
+ if (lastdirent) {
+ put_user(file->f_pos, &lastdirent->d_off);
+ error = cnt - buf.count;
+ }
+
+out_putf:
+ fput(file);
+out:
+ return error;
+}
+
+/* Old sunos getdirentries, severely broken compatibility stuff here. */
+struct sunos_direntry {
+ u32 d_ino;
+ u16 d_reclen;
+ u16 d_namlen;
+ char d_name[1];
+};
+
+struct sunos_direntry_callback {
+ struct sunos_direntry __user *curr;
+ struct sunos_direntry __user *previous;
+ int count;
+ int error;
+};
+
+static int sunos_filldirentry(void * __buf, const char * name, int namlen,
+ loff_t offset, ino_t ino, unsigned int d_type)
+{
+ struct sunos_direntry __user *dirent;
+ struct sunos_direntry_callback * buf =
+ (struct sunos_direntry_callback *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ dirent = buf->curr;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(namlen, &dirent->d_namlen);
+ put_user(reclen, &dirent->d_reclen);
+ if (copy_to_user(dirent->d_name, name, namlen))
+ return -EFAULT;
+ put_user(0, dirent->d_name + namlen);
+ dirent = (void __user *) dirent + reclen;
+ buf->curr = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage int sunos_getdirentries(unsigned int fd,
+ void __user *dirent,
+ int cnt,
+ unsigned int __user *basep)
+{
+ struct file * file;
+ struct sunos_direntry __user *lastdirent;
+ int error = -EBADF;
+ struct sunos_direntry_callback buf;
+
+ if (fd >= SUNOS_NR_OPEN)
+ goto out;
+
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ error = -EINVAL;
+ if (cnt < (sizeof(struct sunos_direntry) + 255))
+ goto out_putf;
+
+ buf.curr = (struct sunos_direntry __user *) dirent;
+ buf.previous = NULL;
+ buf.count = cnt;
+ buf.error = 0;
+
+ error = vfs_readdir(file, sunos_filldirentry, &buf);
+ if (error < 0)
+ goto out_putf;
+
+ lastdirent = buf.previous;
+ error = buf.error;
+ if (lastdirent) {
+ put_user(file->f_pos, basep);
+ error = cnt - buf.count;
+ }
+
+out_putf:
+ fput(file);
+out:
+ return error;
+}
+
+struct sunos_utsname {
+ char sname[9];
+ char nname[9];
+ char nnext[56];
+ char rel[9];
+ char ver[9];
+ char mach[9];
+};
+
+asmlinkage int sunos_uname(struct sunos_utsname __user *name)
+{
+ int ret;
+
+ down_read(&uts_sem);
+ ret = copy_to_user(&name->sname[0], &system_utsname.sysname[0],
+ sizeof(name->sname) - 1);
+ ret |= copy_to_user(&name->nname[0], &system_utsname.nodename[0],
+ sizeof(name->nname) - 1);
+ ret |= put_user('\0', &name->nname[8]);
+ ret |= copy_to_user(&name->rel[0], &system_utsname.release[0],
+ sizeof(name->rel) - 1);
+ ret |= copy_to_user(&name->ver[0], &system_utsname.version[0],
+ sizeof(name->ver) - 1);
+ ret |= copy_to_user(&name->mach[0], &system_utsname.machine[0],
+ sizeof(name->mach) - 1);
+ up_read(&uts_sem);
+ return (ret ? -EFAULT : 0);
+}
+
+asmlinkage int sunos_nosys(void)
+{
+ struct pt_regs *regs;
+ siginfo_t info;
+ static int cnt;
+
+ regs = current_thread_info()->kregs;
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ info.si_signo = SIGSYS;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT|0x100;
+ info.si_addr = (void __user *)regs->tpc;
+ info.si_trapno = regs->u_regs[UREG_G1];
+ send_sig_info(SIGSYS, &info, current);
+ if (cnt++ < 4) {
+ printk("Process makes ni_syscall number %d, register dump:\n",
+ (int) regs->u_regs[UREG_G1]);
+ show_regs(regs);
+ }
+ return -ENOSYS;
+}
+
+/* This is not a real and complete implementation yet, just to keep
+ * the easy SunOS binaries happy.
+ */
+asmlinkage int sunos_fpathconf(int fd, int name)
+{
+ int ret;
+
+ switch(name) {
+ case _PCONF_LINK:
+ ret = LINK_MAX;
+ break;
+ case _PCONF_CANON:
+ ret = MAX_CANON;
+ break;
+ case _PCONF_INPUT:
+ ret = MAX_INPUT;
+ break;
+ case _PCONF_NAME:
+ ret = NAME_MAX;
+ break;
+ case _PCONF_PATH:
+ ret = PATH_MAX;
+ break;
+ case _PCONF_PIPE:
+ ret = PIPE_BUF;
+ break;
+ case _PCONF_CHRESTRICT: /* XXX Investigate XXX */
+ ret = 1;
+ break;
+ case _PCONF_NOTRUNC: /* XXX Investigate XXX */
+ case _PCONF_VDISABLE:
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+asmlinkage int sunos_pathconf(u32 u_path, int name)
+{
+ int ret;
+
+ ret = sunos_fpathconf(0, name); /* XXX cheese XXX */
+ return ret;
+}
+
+asmlinkage int sunos_select(int width, u32 inp, u32 outp, u32 exp, u32 tvp_x)
+{
+ int ret;
+
+ /* SunOS binaries expect that select won't change the tvp contents */
+ ret = compat_sys_select(width, compat_ptr(inp), compat_ptr(outp),
+ compat_ptr(exp), compat_ptr(tvp_x));
+ if (ret == -EINTR && tvp_x) {
+ struct compat_timeval __user *tvp = compat_ptr(tvp_x);
+ time_t sec, usec;
+
+ __get_user(sec, &tvp->tv_sec);
+ __get_user(usec, &tvp->tv_usec);
+ if (sec == 0 && usec == 0)
+ ret = 0;
+ }
+ return ret;
+}
+
+asmlinkage void sunos_nop(void)
+{
+ return;
+}
+
+#if 0 /* This code doesn't translate user pointers correctly,
+ * disable for now. -DaveM
+ */
+
+/* XXXXXXXXXX SunOS mount/umount. XXXXXXXXXXX */
+#define SMNT_RDONLY 1
+#define SMNT_NOSUID 2
+#define SMNT_NEWTYPE 4
+#define SMNT_GRPID 8
+#define SMNT_REMOUNT 16
+#define SMNT_NOSUB 32
+#define SMNT_MULTI 64
+#define SMNT_SYS5 128
+
+struct sunos_fh_t {
+ char fh_data [NFS_FHSIZE];
+};
+
+struct sunos_nfs_mount_args {
+ struct sockaddr_in *addr; /* file server address */
+ struct nfs_fh *fh; /* File handle to be mounted */
+ int flags; /* flags */
+ int wsize; /* write size in bytes */
+ int rsize; /* read size in bytes */
+ int timeo; /* initial timeout in .1 secs */
+ int retrans; /* times to retry send */
+ char *hostname; /* server's hostname */
+ int acregmin; /* attr cache file min secs */
+ int acregmax; /* attr cache file max secs */
+ int acdirmin; /* attr cache dir min secs */
+ int acdirmax; /* attr cache dir max secs */
+ char *netname; /* server's netname */
+};
+
+
+/* Bind the socket on a local reserved port and connect it to the
+ * remote server. This on Linux/i386 is done by the mount program,
+ * not by the kernel.
+ */
+/* XXXXXXXXXXXXXXXXXXXX */
+static int
+sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
+{
+ struct sockaddr_in local;
+ struct sockaddr_in server;
+ int try_port;
+ int ret;
+ struct socket *socket;
+ struct inode *inode;
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return 0;
+
+ inode = file->f_dentry->d_inode;
+
+ socket = SOCKET_I(inode);
+ local.sin_family = AF_INET;
+ local.sin_addr.s_addr = INADDR_ANY;
+
+ /* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
+ try_port = 1024;
+ do {
+ local.sin_port = htons (--try_port);
+ ret = socket->ops->bind(socket, (struct sockaddr*)&local,
+ sizeof(local));
+ } while (ret && try_port > (1024 / 2));
+
+ if (ret) {
+ fput(file);
+ return 0;
+ }
+
+ server.sin_family = AF_INET;
+ server.sin_addr = addr->sin_addr;
+ server.sin_port = NFS_PORT;
+
+ /* Call sys_connect */
+ ret = socket->ops->connect (socket, (struct sockaddr *) &server,
+ sizeof (server), file->f_flags);
+ fput(file);
+ if (ret < 0)
+ return 0;
+ return 1;
+}
+
+/* XXXXXXXXXXXXXXXXXXXX */
+static int get_default (int value, int def_value)
+{
+ if (value)
+ return value;
+ else
+ return def_value;
+}
+
+/* XXXXXXXXXXXXXXXXXXXX */
+static int sunos_nfs_mount(char *dir_name, int linux_flags, void __user *data)
+{
+ int server_fd, err;
+ char *the_name, *mount_page;
+ struct nfs_mount_data linux_nfs_mount;
+ struct sunos_nfs_mount_args sunos_mount;
+
+ /* Ok, here comes the fun part: Linux's nfs mount needs a
+ * socket connection to the server, but SunOS mount does not
+ * require this, so we use the information on the destination
+ * address to create a socket and bind it to a reserved
+ * port on this system
+ */
+ if (copy_from_user(&sunos_mount, data, sizeof(sunos_mount)))
+ return -EFAULT;
+
+ server_fd = sys_socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if (server_fd < 0)
+ return -ENXIO;
+
+ if (copy_from_user(&linux_nfs_mount.addr, sunos_mount.addr,
+ sizeof(*sunos_mount.addr)) ||
+ copy_from_user(&linux_nfs_mount.root, sunos_mount.fh,
+ sizeof(*sunos_mount.fh))) {
+ sys_close (server_fd);
+ return -EFAULT;
+ }
+
+ if (!sunos_nfs_get_server_fd (server_fd, &linux_nfs_mount.addr)){
+ sys_close (server_fd);
+ return -ENXIO;
+ }
+
+ /* Now, bind it to a locally reserved port */
+ linux_nfs_mount.version = NFS_MOUNT_VERSION;
+ linux_nfs_mount.flags = sunos_mount.flags;
+ linux_nfs_mount.fd = server_fd;
+
+ linux_nfs_mount.rsize = get_default (sunos_mount.rsize, 8192);
+ linux_nfs_mount.wsize = get_default (sunos_mount.wsize, 8192);
+ linux_nfs_mount.timeo = get_default (sunos_mount.timeo, 10);
+ linux_nfs_mount.retrans = sunos_mount.retrans;
+
+ linux_nfs_mount.acregmin = sunos_mount.acregmin;
+ linux_nfs_mount.acregmax = sunos_mount.acregmax;
+ linux_nfs_mount.acdirmin = sunos_mount.acdirmin;
+ linux_nfs_mount.acdirmax = sunos_mount.acdirmax;
+
+ the_name = getname(sunos_mount.hostname);
+ if (IS_ERR(the_name))
+ return PTR_ERR(the_name);
+
+ strlcpy(linux_nfs_mount.hostname, the_name,
+ sizeof(linux_nfs_mount.hostname));
+ putname (the_name);
+
+ mount_page = (char *) get_zeroed_page(GFP_KERNEL);
+ if (!mount_page)
+ return -ENOMEM;
+
+ memcpy(mount_page, &linux_nfs_mount, sizeof(linux_nfs_mount));
+
+ err = do_mount("", dir_name, "nfs", linux_flags, mount_page);
+
+ free_page((unsigned long) mount_page);
+ return err;
+}
+
+/* XXXXXXXXXXXXXXXXXXXX */
+asmlinkage int
+sunos_mount(char *type, char *dir, int flags, void *data)
+{
+ int linux_flags = 0;
+ int ret = -EINVAL;
+ char *dev_fname = 0;
+ char *dir_page, *type_page;
+
+ if (!capable (CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /* We don't handle the integer fs type */
+ if ((flags & SMNT_NEWTYPE) == 0)
+ goto out;
+
+ /* Do not allow for those flags we don't support */
+ if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
+ goto out;
+
+ if (flags & SMNT_REMOUNT)
+ linux_flags |= MS_REMOUNT;
+ if (flags & SMNT_RDONLY)
+ linux_flags |= MS_RDONLY;
+ if (flags & SMNT_NOSUID)
+ linux_flags |= MS_NOSUID;
+
+ dir_page = getname(dir);
+ ret = PTR_ERR(dir_page);
+ if (IS_ERR(dir_page))
+ goto out;
+
+ type_page = getname(type);
+ ret = PTR_ERR(type_page);
+ if (IS_ERR(type_page))
+ goto out1;
+
+ if (strcmp(type_page, "ext2") == 0) {
+ dev_fname = getname(data);
+ } else if (strcmp(type_page, "iso9660") == 0) {
+ dev_fname = getname(data);
+ } else if (strcmp(type_page, "minix") == 0) {
+ dev_fname = getname(data);
+ } else if (strcmp(type_page, "nfs") == 0) {
+ ret = sunos_nfs_mount (dir_page, flags, data);
+ goto out2;
+ } else if (strcmp(type_page, "ufs") == 0) {
+ printk("Warning: UFS filesystem mounts unsupported.\n");
+ ret = -ENODEV;
+ goto out2;
+ } else if (strcmp(type_page, "proc")) {
+ ret = -ENODEV;
+ goto out2;
+ }
+ ret = PTR_ERR(dev_fname);
+ if (IS_ERR(dev_fname))
+ goto out2;
+ lock_kernel();
+ ret = do_mount(dev_fname, dir_page, type_page, linux_flags, NULL);
+ unlock_kernel();
+ if (dev_fname)
+ putname(dev_fname);
+out2:
+ putname(type_page);
+out1:
+ putname(dir_page);
+out:
+ return ret;
+}
+#endif
+
+asmlinkage int sunos_setpgrp(pid_t pid, pid_t pgid)
+{
+ int ret;
+
+ /* So stupid... */
+ if ((!pid || pid == current->pid) &&
+ !pgid) {
+ sys_setsid();
+ ret = 0;
+ } else {
+ ret = sys_setpgid(pid, pgid);
+ }
+ return ret;
+}
+
+/* So stupid... */
+extern long compat_sys_wait4(compat_pid_t, compat_uint_t __user *, int,
+ struct compat_rusage __user *);
+
+asmlinkage int sunos_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options, struct compat_rusage __user *ru)
+{
+ int ret;
+
+ ret = compat_sys_wait4((pid ? pid : ((compat_pid_t)-1)),
+ stat_addr, options, ru);
+ return ret;
+}
+
+extern int kill_pg(int, int, int);
+asmlinkage int sunos_killpg(int pgrp, int sig)
+{
+ return kill_pg(pgrp, sig, 0);
+}
+
+asmlinkage int sunos_audit(void)
+{
+ printk ("sys_audit\n");
+ return -1;
+}
+
+asmlinkage u32 sunos_gethostid(void)
+{
+ u32 ret;
+
+ ret = (((u32)idprom->id_machtype << 24) | ((u32)idprom->id_sernum));
+
+ return ret;
+}
+
+/* sysconf options, for SunOS compatibility */
+#define _SC_ARG_MAX 1
+#define _SC_CHILD_MAX 2
+#define _SC_CLK_TCK 3
+#define _SC_NGROUPS_MAX 4
+#define _SC_OPEN_MAX 5
+#define _SC_JOB_CONTROL 6
+#define _SC_SAVED_IDS 7
+#define _SC_VERSION 8
+
+asmlinkage s32 sunos_sysconf (int name)
+{
+ s32 ret;
+
+ switch (name){
+ case _SC_ARG_MAX:
+ ret = ARG_MAX;
+ break;
+ case _SC_CHILD_MAX:
+ ret = CHILD_MAX;
+ break;
+ case _SC_CLK_TCK:
+ ret = HZ;
+ break;
+ case _SC_NGROUPS_MAX:
+ ret = NGROUPS_MAX;
+ break;
+ case _SC_OPEN_MAX:
+ ret = OPEN_MAX;
+ break;
+ case _SC_JOB_CONTROL:
+ ret = 1; /* yes, we do support job control */
+ break;
+ case _SC_SAVED_IDS:
+ ret = 1; /* yes, we do support saved uids */
+ break;
+ case _SC_VERSION:
+ /* mhm, POSIX_VERSION is in /usr/include/unistd.h
+ * should it go on /usr/include/linux?
+ */
+ ret = 199009;
+ break;
+ default:
+ ret = -1;
+ break;
+ };
+ return ret;
+}
+
+asmlinkage int sunos_semsys(int op, u32 arg1, u32 arg2, u32 arg3, void __user *ptr)
+{
+ union semun arg4;
+ int ret;
+
+ switch (op) {
+ case 0:
+ /* Most arguments match on a 1:1 basis but cmd doesn't */
+ switch(arg3) {
+ case 4:
+ arg3=GETPID; break;
+ case 5:
+ arg3=GETVAL; break;
+ case 6:
+ arg3=GETALL; break;
+ case 3:
+ arg3=GETNCNT; break;
+ case 7:
+ arg3=GETZCNT; break;
+ case 8:
+ arg3=SETVAL; break;
+ case 9:
+ arg3=SETALL; break;
+ }
+ /* sys_semctl(): */
+ /* value to modify semaphore to */
+ arg4.__pad = ptr;
+ ret = sys_semctl((int)arg1, (int)arg2, (int)arg3, arg4);
+ break;
+ case 1:
+ /* sys_semget(): */
+ ret = sys_semget((key_t)arg1, (int)arg2, (int)arg3);
+ break;
+ case 2:
+ /* sys_semop(): */
+ ret = sys_semop((int)arg1, (struct sembuf __user *)(unsigned long)arg2,
+ (unsigned int) arg3);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ };
+ return ret;
+}
+
+struct msgbuf32 {
+ s32 mtype;
+ char mtext[1];
+};
+
+struct ipc_perm32
+{
+ key_t key;
+ compat_uid_t uid;
+ compat_gid_t gid;
+ compat_uid_t cuid;
+ compat_gid_t cgid;
+ compat_mode_t mode;
+ unsigned short seq;
+};
+
+struct msqid_ds32
+{
+ struct ipc_perm32 msg_perm;
+ u32 msg_first;
+ u32 msg_last;
+ compat_time_t msg_stime;
+ compat_time_t msg_rtime;
+ compat_time_t msg_ctime;
+ u32 wwait;
+ u32 rwait;
+ unsigned short msg_cbytes;
+ unsigned short msg_qnum;
+ unsigned short msg_qbytes;
+ compat_ipc_pid_t msg_lspid;
+ compat_ipc_pid_t msg_lrpid;
+};
+
+static inline int sunos_msqid_get(struct msqid_ds32 __user *user,
+ struct msqid_ds *kern)
+{
+ if (get_user(kern->msg_perm.key, &user->msg_perm.key) ||
+ __get_user(kern->msg_perm.uid, &user->msg_perm.uid) ||
+ __get_user(kern->msg_perm.gid, &user->msg_perm.gid) ||
+ __get_user(kern->msg_perm.cuid, &user->msg_perm.cuid) ||
+ __get_user(kern->msg_perm.cgid, &user->msg_perm.cgid) ||
+ __get_user(kern->msg_stime, &user->msg_stime) ||
+ __get_user(kern->msg_rtime, &user->msg_rtime) ||
+ __get_user(kern->msg_ctime, &user->msg_ctime) ||
+ __get_user(kern->msg_ctime, &user->msg_cbytes) ||
+ __get_user(kern->msg_ctime, &user->msg_qnum) ||
+ __get_user(kern->msg_ctime, &user->msg_qbytes) ||
+ __get_user(kern->msg_ctime, &user->msg_lspid) ||
+ __get_user(kern->msg_ctime, &user->msg_lrpid))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int sunos_msqid_put(struct msqid_ds32 __user *user,
+ struct msqid_ds *kern)
+{
+ if (put_user(kern->msg_perm.key, &user->msg_perm.key) ||
+ __put_user(kern->msg_perm.uid, &user->msg_perm.uid) ||
+ __put_user(kern->msg_perm.gid, &user->msg_perm.gid) ||
+ __put_user(kern->msg_perm.cuid, &user->msg_perm.cuid) ||
+ __put_user(kern->msg_perm.cgid, &user->msg_perm.cgid) ||
+ __put_user(kern->msg_stime, &user->msg_stime) ||
+ __put_user(kern->msg_rtime, &user->msg_rtime) ||
+ __put_user(kern->msg_ctime, &user->msg_ctime) ||
+ __put_user(kern->msg_ctime, &user->msg_cbytes) ||
+ __put_user(kern->msg_ctime, &user->msg_qnum) ||
+ __put_user(kern->msg_ctime, &user->msg_qbytes) ||
+ __put_user(kern->msg_ctime, &user->msg_lspid) ||
+ __put_user(kern->msg_ctime, &user->msg_lrpid))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int sunos_msgbuf_get(struct msgbuf32 __user *user, struct msgbuf *kern, int len)
+{
+ if (get_user(kern->mtype, &user->mtype) ||
+ __copy_from_user(kern->mtext, &user->mtext, len))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int sunos_msgbuf_put(struct msgbuf32 __user *user, struct msgbuf *kern, int len)
+{
+ if (put_user(kern->mtype, &user->mtype) ||
+ __copy_to_user(user->mtext, kern->mtext, len))
+ return -EFAULT;
+ return 0;
+}
+
+asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ struct sparc_stackf32 __user *sp;
+ struct msqid_ds kds;
+ struct msgbuf *kmbuf;
+ mm_segment_t old_fs = get_fs();
+ u32 arg5;
+ int rval;
+
+ switch(op) {
+ case 0:
+ rval = sys_msgget((key_t)arg1, (int)arg2);
+ break;
+ case 1:
+ if (!sunos_msqid_get((struct msqid_ds32 __user *)(unsigned long)arg3, &kds)) {
+ set_fs(KERNEL_DS);
+ rval = sys_msgctl((int)arg1, (int)arg2,
+ (struct msqid_ds __user *)(unsigned long)arg3);
+ set_fs(old_fs);
+ if (!rval)
+ rval = sunos_msqid_put((struct msqid_ds32 __user *)(unsigned long)arg3,
+ &kds);
+ } else
+ rval = -EFAULT;
+ break;
+ case 2:
+ rval = -EFAULT;
+ kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
+ GFP_KERNEL);
+ if (!kmbuf)
+ break;
+ sp = (struct sparc_stackf32 __user *)
+ (current_thread_info()->kregs->u_regs[UREG_FP] & 0xffffffffUL);
+ if (get_user(arg5, &sp->xxargs[0])) {
+ rval = -EFAULT;
+ kfree(kmbuf);
+ break;
+ }
+ set_fs(KERNEL_DS);
+ rval = sys_msgrcv((int)arg1, (struct msgbuf __user *) kmbuf,
+ (size_t)arg3,
+ (long)arg4, (int)arg5);
+ set_fs(old_fs);
+ if (!rval)
+ rval = sunos_msgbuf_put((struct msgbuf32 __user *)(unsigned long)arg2,
+ kmbuf, arg3);
+ kfree(kmbuf);
+ break;
+ case 3:
+ rval = -EFAULT;
+ kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
+ GFP_KERNEL);
+ if (!kmbuf || sunos_msgbuf_get((struct msgbuf32 __user *)(unsigned long)arg2,
+ kmbuf, arg3))
+ break;
+ set_fs(KERNEL_DS);
+ rval = sys_msgsnd((int)arg1, (struct msgbuf __user *) kmbuf,
+ (size_t)arg3, (int)arg4);
+ set_fs(old_fs);
+ kfree(kmbuf);
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ return rval;
+}
+
+struct shmid_ds32 {
+ struct ipc_perm32 shm_perm;
+ int shm_segsz;
+ compat_time_t shm_atime;
+ compat_time_t shm_dtime;
+ compat_time_t shm_ctime;
+ compat_ipc_pid_t shm_cpid;
+ compat_ipc_pid_t shm_lpid;
+ unsigned short shm_nattch;
+};
+
+static inline int sunos_shmid_get(struct shmid_ds32 __user *user,
+ struct shmid_ds *kern)
+{
+ if (get_user(kern->shm_perm.key, &user->shm_perm.key) ||
+ __get_user(kern->shm_perm.uid, &user->shm_perm.uid) ||
+ __get_user(kern->shm_perm.gid, &user->shm_perm.gid) ||
+ __get_user(kern->shm_perm.cuid, &user->shm_perm.cuid) ||
+ __get_user(kern->shm_perm.cgid, &user->shm_perm.cgid) ||
+ __get_user(kern->shm_segsz, &user->shm_segsz) ||
+ __get_user(kern->shm_atime, &user->shm_atime) ||
+ __get_user(kern->shm_dtime, &user->shm_dtime) ||
+ __get_user(kern->shm_ctime, &user->shm_ctime) ||
+ __get_user(kern->shm_cpid, &user->shm_cpid) ||
+ __get_user(kern->shm_lpid, &user->shm_lpid) ||
+ __get_user(kern->shm_nattch, &user->shm_nattch))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int sunos_shmid_put(struct shmid_ds32 __user *user,
+ struct shmid_ds *kern)
+{
+ if (put_user(kern->shm_perm.key, &user->shm_perm.key) ||
+ __put_user(kern->shm_perm.uid, &user->shm_perm.uid) ||
+ __put_user(kern->shm_perm.gid, &user->shm_perm.gid) ||
+ __put_user(kern->shm_perm.cuid, &user->shm_perm.cuid) ||
+ __put_user(kern->shm_perm.cgid, &user->shm_perm.cgid) ||
+ __put_user(kern->shm_segsz, &user->shm_segsz) ||
+ __put_user(kern->shm_atime, &user->shm_atime) ||
+ __put_user(kern->shm_dtime, &user->shm_dtime) ||
+ __put_user(kern->shm_ctime, &user->shm_ctime) ||
+ __put_user(kern->shm_cpid, &user->shm_cpid) ||
+ __put_user(kern->shm_lpid, &user->shm_lpid) ||
+ __put_user(kern->shm_nattch, &user->shm_nattch))
+ return -EFAULT;
+ return 0;
+}
+
+asmlinkage int sunos_shmsys(int op, u32 arg1, u32 arg2, u32 arg3)
+{
+ struct shmid_ds ksds;
+ unsigned long raddr;
+ mm_segment_t old_fs = get_fs();
+ int rval;
+
+ switch(op) {
+ case 0:
+ /* do_shmat(): attach a shared memory area */
+ rval = do_shmat((int)arg1,(char __user *)(unsigned long)arg2,(int)arg3,&raddr);
+ if (!rval)
+ rval = (int) raddr;
+ break;
+ case 1:
+ /* sys_shmctl(): modify shared memory area attr. */
+ if (!sunos_shmid_get((struct shmid_ds32 __user *)(unsigned long)arg3, &ksds)) {
+ set_fs(KERNEL_DS);
+ rval = sys_shmctl((int) arg1,(int) arg2,
+ (struct shmid_ds __user *) &ksds);
+ set_fs(old_fs);
+ if (!rval)
+ rval = sunos_shmid_put((struct shmid_ds32 __user *)(unsigned long)arg3,
+ &ksds);
+ } else
+ rval = -EFAULT;
+ break;
+ case 2:
+ /* sys_shmdt(): detach a shared memory area */
+ rval = sys_shmdt((char __user *)(unsigned long)arg1);
+ break;
+ case 3:
+ /* sys_shmget(): get a shared memory area */
+ rval = sys_shmget((key_t)arg1,(int)arg2,(int)arg3);
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ };
+ return rval;
+}
+
+extern asmlinkage long sparc32_open(const char __user * filename, int flags, int mode);
+
+asmlinkage int sunos_open(u32 fname, int flags, int mode)
+{
+ const char __user *filename = compat_ptr(fname);
+
+ return sparc32_open(filename, flags, mode);
+}
+
+#define SUNOS_EWOULDBLOCK 35
+
+/* see the sunos man page read(2v) for an explanation
+ of this garbage. We use O_NDELAY to mark
+ file descriptors that have been set non-blocking
+ using 4.2BSD style calls. (tridge) */
+
+static inline int check_nonblock(int ret, int fd)
+{
+ if (ret == -EAGAIN) {
+ struct file * file = fget(fd);
+ if (file) {
+ if (file->f_flags & O_NDELAY)
+ ret = -SUNOS_EWOULDBLOCK;
+ fput(file);
+ }
+ }
+ return ret;
+}
+
+asmlinkage int sunos_read(unsigned int fd, char __user *buf, u32 count)
+{
+ int ret;
+
+ ret = check_nonblock(sys_read(fd, buf, count), fd);
+ return ret;
+}
+
+asmlinkage int sunos_readv(u32 fd, void __user *vector, s32 count)
+{
+ int ret;
+
+ ret = check_nonblock(compat_sys_readv(fd, vector, count), fd);
+ return ret;
+}
+
+asmlinkage int sunos_write(unsigned int fd, char __user *buf, u32 count)
+{
+ int ret;
+
+ ret = check_nonblock(sys_write(fd, buf, count), fd);
+ return ret;
+}
+
+asmlinkage int sunos_writev(u32 fd, void __user *vector, s32 count)
+{
+ int ret;
+
+ ret = check_nonblock(compat_sys_writev(fd, vector, count), fd);
+ return ret;
+}
+
+asmlinkage int sunos_recv(u32 __fd, void __user *ubuf, int size, unsigned flags)
+{
+ int ret, fd = (int) __fd;
+
+ ret = check_nonblock(sys_recv(fd, ubuf, size, flags), fd);
+ return ret;
+}
+
+asmlinkage int sunos_send(u32 __fd, void __user *buff, int len, unsigned flags)
+{
+ int ret, fd = (int) __fd;
+
+ ret = check_nonblock(sys_send(fd, buff, len, flags), fd);
+ return ret;
+}
+
+asmlinkage int sunos_accept(u32 __fd, struct sockaddr __user *sa, int __user *addrlen)
+{
+ int ret, fd = (int) __fd;
+
+ while (1) {
+ ret = check_nonblock(sys_accept(fd, sa, addrlen), fd);
+ if (ret != -ENETUNREACH && ret != -EHOSTUNREACH)
+ break;
+ }
+ return ret;
+}
+
+#define SUNOS_SV_INTERRUPT 2
+
+asmlinkage int sunos_sigaction (int sig,
+ struct old_sigaction32 __user *act,
+ struct old_sigaction32 __user *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ compat_old_sigset_t mask;
+ u32 u_handler;
+
+ if (get_user(u_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+ return -EFAULT;
+ new_ka.sa.sa_handler = compat_ptr(u_handler);
+ __get_user(mask, &act->sa_mask);
+ new_ka.sa.sa_restorer = NULL;
+ new_ka.ka_restorer = NULL;
+ siginitset(&new_ka.sa.sa_mask, mask);
+ new_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ old_ka.sa.sa_flags ^= SUNOS_SV_INTERRUPT;
+ if (put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int sunos_setsockopt(u32 __fd, u32 __level, u32 __optname,
+ char __user *optval, u32 __optlen)
+{
+ int fd = (int) __fd;
+ int level = (int) __level;
+ int optname = (int) __optname;
+ int optlen = (int) __optlen;
+ int tr_opt = optname;
+ int ret;
+
+ if (level == SOL_IP) {
+ /* Multicast socketopts (ttl, membership) */
+ if (tr_opt >=2 && tr_opt <= 6)
+ tr_opt += 30;
+ }
+ ret = sys_setsockopt(fd, level, tr_opt,
+ optval, optlen);
+ return ret;
+}
+
+asmlinkage int sunos_getsockopt(u32 __fd, u32 __level, u32 __optname,
+ char __user *optval, int __user *optlen)
+{
+ int fd = (int) __fd;
+ int level = (int) __level;
+ int optname = (int) __optname;
+ int tr_opt = optname;
+ int ret;
+
+ if (level == SOL_IP) {
+ /* Multicast socketopts (ttl, membership) */
+ if (tr_opt >=2 && tr_opt <= 6)
+ tr_opt += 30;
+ }
+ ret = compat_sys_getsockopt(fd, level, tr_opt,
+ optval, optlen);
+ return ret;
+}
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
new file mode 100644
index 00000000000..48170f77fff
--- /dev/null
+++ b/arch/sparc64/kernel/systbls.S
@@ -0,0 +1,251 @@
+/* $Id: systbls.S,v 1.81 2002/02/08 03:57:14 davem Exp $
+ * systbls.S: System call entry point tables for OS compatibility.
+ * The native Linux system call table lives here also.
+ *
+ * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+
+#include <linux/config.h>
+
+ .text
+ .align 4
+
+#ifdef CONFIG_COMPAT
+ /* First, the 32-bit Linux native syscall table. */
+
+ .globl sys_call_table32
+sys_call_table32:
+/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
+/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
+/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys32_chown16, sys32_mknod
+/*15*/ .word sys_chmod, sys32_lchown16, sparc_brk, sys32_perfctr, sys32_lseek
+/*20*/ .word sys_getpid, sys_capget, sys_capset, sys32_setuid16, sys32_getuid16
+/*25*/ .word compat_sys_time, sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause
+/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
+ .word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile
+/*40*/ .word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid
+ .word sys32_umount, sys32_setgid16, sys32_getgid16, sys32_signal, sys32_geteuid16
+/*50*/ .word sys32_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
+ .word sys32_reboot, sys32_mmap2, sys_symlink, sys32_readlink, sys32_execve
+/*60*/ .word sys32_umask, sys_chroot, compat_sys_newfstat, sys_fstat64, sys_getpagesize
+ .word sys32_msync, sys_vfork, sys32_pread64, sys32_pwrite64, sys_geteuid
+/*70*/ .word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
+ .word sys_madvise, sys_vhangup, sys32_truncate64, sys_mincore, sys32_getgroups16
+/*80*/ .word sys32_setgroups16, sys_getpgrp, sys32_setgroups, sys32_setitimer, sys32_ftruncate64
+ .word sys32_swapon, sys32_getitimer, sys_setuid, sys32_sethostname, sys_setgid
+/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
+ .word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending
+ .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, sys32_rt_sigsuspend, sys_setresuid, sys_getresuid
+/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
+ .word sys32_getgroups, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
+/*120*/ .word compat_sys_readv, compat_sys_writev, sys32_settimeofday, sys32_fchown16, sys_fchmod
+ .word sys_nis_syscall, sys32_setreuid16, sys32_setregid16, sys_rename, sys_truncate
+/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
+ .word sys_nis_syscall, sys32_mkdir, sys_rmdir, sys32_utimes, sys_stat64
+/*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
+ .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write
+/*150*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
+ .word compat_sys_fcntl64, sys_ni_syscall, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
+/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
+ .word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
+/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
+ .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
+/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
+ .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sparc64_newuname
+/*190*/ .word sys32_init_module, sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl
+ .word sys32_epoll_wait, sys_nis_syscall, sys_getppid, sys32_sigaction, sys_sgetmask
+/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
+ .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64
+/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo
+ .word sys32_ipc, sys32_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
+/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid
+ .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16
+/*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64
+ .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
+/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
+ .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
+/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
+ .word sys_ni_syscall, sys32_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
+/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
+ .word sys_timer_delete, sys32_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
+/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
+ .word sys_mq_timedsend, sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
+/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
+
+#endif /* CONFIG_COMPAT */
+
+ /* Now the 64-bit native Linux syscall table. */
+
+ .align 4
+ .globl sys_call_table64, sys_call_table
+sys_call_table64:
+sys_call_table:
+/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
+/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
+/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
+/*15*/ .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek
+/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
+/*25*/ .word sys_nis_syscall, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
+/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
+ .word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
+/*40*/ .word sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall
+ .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
+/*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
+ .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve
+/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_nis_syscall, sys_getpagesize
+ .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
+/*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys64_munmap, sys_mprotect
+ .word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
+/*80*/ .word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
+ .word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
+/*90*/ .word sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall
+ .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
+/*100*/ .word sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
+ .word sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
+/*110*/ .word sys_setresgid, sys_getresgid, sys_nis_syscall, sys_recvmsg, sys_sendmsg
+ .word sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
+/*120*/ .word sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod
+ .word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
+/*130*/ .word sys_ftruncate, sys_flock, sys_nis_syscall, sys_sendto, sys_shutdown
+ .word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_nis_syscall
+/*140*/ .word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
+ .word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
+/*150*/ .word sys_getsockname, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
+ .word sys_nis_syscall, sys_ni_syscall, sys_statfs, sys_fstatfs, sys_oldumount
+/*160*/ .word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install
+ .word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
+/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
+ .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
+/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
+ .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sparc64_newuname
+/*190*/ .word sys_init_module, sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
+ .word sys_epoll_wait, sys_nis_syscall, sys_getppid, sys_nis_syscall, sys_sgetmask
+/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
+ .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
+/*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
+ .word sys_ipc, sys_nis_syscall, sys_clone, sys_nis_syscall, sys_adjtimex
+/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
+ .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
+/*230*/ .word sys_select, sys_nis_syscall, sys_nis_syscall, sys_stime, sys_statfs64
+ .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
+/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
+ .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
+/*250*/ .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
+ .word sys_ni_syscall, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
+/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
+ .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
+/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
+ .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
+/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl
+
+#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
+ defined(CONFIG_SOLARIS_EMUL_MODULE)
+ /* Now the 32-bit SunOS syscall table. */
+
+ .align 4
+ .globl sunos_sys_table
+sunos_sys_table:
+/*0*/ .word sunos_indir, sys32_exit, sys_fork
+ .word sunos_read, sunos_write, sunos_open
+ .word sys_close, sunos_wait4, sys_creat
+ .word sys_link, sys_unlink, sunos_execv
+ .word sys_chdir, sunos_nosys, sys32_mknod
+ .word sys_chmod, sys32_lchown16, sunos_brk
+ .word sunos_nosys, sys32_lseek, sunos_getpid
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_getuid, sunos_nosys, sys_ptrace
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sys_access, sunos_nosys, sunos_nosys
+ .word sys_sync, sys_kill, compat_sys_newstat
+ .word sunos_nosys, compat_sys_newlstat, sys_dup
+ .word sys_pipe, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_getgid
+ .word sunos_nosys, sunos_nosys
+/*50*/ .word sunos_nosys, sys_acct, sunos_nosys
+ .word sunos_mctl, sunos_ioctl, sys_reboot
+ .word sunos_nosys, sys_symlink, sys_readlink
+ .word sys32_execve, sys_umask, sys_chroot
+ .word compat_sys_newfstat, sunos_nosys, sys_getpagesize
+ .word sys_msync, sys_vfork, sunos_nosys
+ .word sunos_nosys, sunos_sbrk, sunos_sstk
+ .word sunos_mmap, sunos_vadvise, sys_munmap
+ .word sys_mprotect, sys_madvise, sys_vhangup
+ .word sunos_nosys, sys_mincore, sys32_getgroups16
+ .word sys32_setgroups16, sys_getpgrp, sunos_setpgrp
+ .word compat_sys_setitimer, sunos_nosys, sys_swapon
+ .word compat_sys_getitimer, sys_gethostname, sys_sethostname
+ .word sunos_getdtablesize, sys_dup2, sunos_nop
+ .word compat_sys_fcntl, sunos_select, sunos_nop
+ .word sys_fsync, sys32_setpriority, sys32_socket
+ .word sys32_connect, sunos_accept
+/*100*/ .word sys_getpriority, sunos_send, sunos_recv
+ .word sunos_nosys, sys32_bind, sunos_setsockopt
+ .word sys32_listen, sunos_nosys, sunos_sigaction
+ .word sunos_sigblock, sunos_sigsetmask, sys_sigpause
+ .word sys32_sigstack, sys32_recvmsg, sys32_sendmsg
+ .word sunos_nosys, sys32_gettimeofday, compat_sys_getrusage
+ .word sunos_getsockopt, sunos_nosys, sunos_readv
+ .word sunos_writev, sys32_settimeofday, sys32_fchown16
+ .word sys_fchmod, sys32_recvfrom, sys32_setreuid16
+ .word sys32_setregid16, sys_rename, sys_truncate
+ .word sys_ftruncate, sys_flock, sunos_nosys
+ .word sys32_sendto, sys32_shutdown, sys32_socketpair
+ .word sys_mkdir, sys_rmdir, sys32_utimes
+ .word sys32_sigreturn, sunos_nosys, sys32_getpeername
+ .word sunos_gethostid, sunos_nosys, compat_sys_getrlimit
+ .word compat_sys_setrlimit, sunos_killpg, sunos_nosys
+ .word sunos_nosys, sunos_nosys
+/*150*/ .word sys32_getsockname, sunos_nosys, sunos_nosys
+ .word sys_poll, sunos_nosys, sunos_nosys
+ .word sunos_getdirentries, compat_sys_statfs, compat_sys_fstatfs
+ .word sys_oldumount, sunos_nosys, sunos_nosys
+ .word sys_getdomainname, sys_setdomainname
+ .word sunos_nosys, sys_quotactl, sunos_nosys
+ .word sunos_nosys, sys_ustat, sunos_semsys
+ .word sunos_nosys, sunos_shmsys, sunos_audit
+ .word sunos_nosys, sunos_getdents, sys_setsid
+ .word sys_fchdir, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, compat_sys_sigpending, sunos_nosys
+ .word sys_setpgid, sunos_pathconf, sunos_fpathconf
+ .word sunos_sysconf, sunos_uname, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+/*200*/ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys
+/*250*/ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys
+#endif
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
new file mode 100644
index 00000000000..6a717d4d2bc
--- /dev/null
+++ b/arch/sparc64/kernel/time.c
@@ -0,0 +1,1195 @@
+/* $Id: time.c,v 1.42 2002/01/23 14:33:55 davem Exp $
+ * time.c: UltraSparc timer and TOD clock support.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
+ *
+ * Based largely on code which is:
+ *
+ * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/mc146818rtc.h>
+#include <linux/delay.h>
+#include <linux/profile.h>
+#include <linux/bcd.h>
+#include <linux/jiffies.h>
+#include <linux/cpufreq.h>
+#include <linux/percpu.h>
+#include <linux/profile.h>
+
+#include <asm/oplib.h>
+#include <asm/mostek.h>
+#include <asm/timer.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/sbus.h>
+#include <asm/fhc.h>
+#include <asm/pbm.h>
+#include <asm/ebus.h>
+#include <asm/isa.h>
+#include <asm/starfire.h>
+#include <asm/smp.h>
+#include <asm/sections.h>
+#include <asm/cpudata.h>
+
+DEFINE_SPINLOCK(mostek_lock);
+DEFINE_SPINLOCK(rtc_lock);
+unsigned long mstk48t02_regs = 0UL;
+#ifdef CONFIG_PCI
+unsigned long ds1287_regs = 0UL;
+#endif
+
+extern unsigned long wall_jiffies;
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+static unsigned long mstk48t08_regs = 0UL;
+static unsigned long mstk48t59_regs = 0UL;
+
+static int set_rtc_mmss(unsigned long);
+
+static __init unsigned long dummy_get_tick(void)
+{
+ return 0;
+}
+
+static __initdata struct sparc64_tick_ops dummy_tick_ops = {
+ .get_tick = dummy_get_tick,
+};
+
+struct sparc64_tick_ops *tick_ops = &dummy_tick_ops;
+
+#define TICK_PRIV_BIT (1UL << 63)
+
+#ifdef CONFIG_SMP
+unsigned long profile_pc(struct pt_regs *regs)
+{
+ unsigned long pc = instruction_pointer(regs);
+
+ if (in_lock_functions(pc))
+ return regs->u_regs[UREG_RETPC];
+ return pc;
+}
+EXPORT_SYMBOL(profile_pc);
+#endif
+
+static void tick_disable_protection(void)
+{
+ /* Set things up so user can access tick register for profiling
+ * purposes. Also workaround BB_ERRATA_1 by doing a dummy
+ * read back of %tick after writing it.
+ */
+ __asm__ __volatile__(
+ " ba,pt %%xcc, 1f\n"
+ " nop\n"
+ " .align 64\n"
+ "1: rd %%tick, %%g2\n"
+ " add %%g2, 6, %%g2\n"
+ " andn %%g2, %0, %%g2\n"
+ " wrpr %%g2, 0, %%tick\n"
+ " rdpr %%tick, %%g0"
+ : /* no outputs */
+ : "r" (TICK_PRIV_BIT)
+ : "g2");
+}
+
+static void tick_init_tick(unsigned long offset)
+{
+ tick_disable_protection();
+
+ __asm__ __volatile__(
+ " rd %%tick, %%g1\n"
+ " andn %%g1, %1, %%g1\n"
+ " ba,pt %%xcc, 1f\n"
+ " add %%g1, %0, %%g1\n"
+ " .align 64\n"
+ "1: wr %%g1, 0x0, %%tick_cmpr\n"
+ " rd %%tick_cmpr, %%g0"
+ : /* no outputs */
+ : "r" (offset), "r" (TICK_PRIV_BIT)
+ : "g1");
+}
+
+static unsigned long tick_get_tick(void)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("rd %%tick, %0\n\t"
+ "mov %0, %0"
+ : "=r" (ret));
+
+ return ret & ~TICK_PRIV_BIT;
+}
+
+static unsigned long tick_get_compare(void)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
+ "mov %0, %0"
+ : "=r" (ret));
+
+ return ret;
+}
+
+static unsigned long tick_add_compare(unsigned long adj)
+{
+ unsigned long new_compare;
+
+ /* Workaround for Spitfire Errata (#54 I think??), I discovered
+ * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
+ * number 103640.
+ *
+ * On Blackbird writes to %tick_cmpr can fail, the
+ * workaround seems to be to execute the wr instruction
+ * at the start of an I-cache line, and perform a dummy
+ * read back from %tick_cmpr right after writing to it. -DaveM
+ */
+ __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
+ "ba,pt %%xcc, 1f\n\t"
+ " add %0, %1, %0\n\t"
+ ".align 64\n"
+ "1:\n\t"
+ "wr %0, 0, %%tick_cmpr\n\t"
+ "rd %%tick_cmpr, %%g0"
+ : "=&r" (new_compare)
+ : "r" (adj));
+
+ return new_compare;
+}
+
+static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
+{
+ unsigned long new_tick, tmp;
+
+ /* Also need to handle Blackbird bug here too. */
+ __asm__ __volatile__("rd %%tick, %0\n\t"
+ "add %0, %2, %0\n\t"
+ "wrpr %0, 0, %%tick\n\t"
+ "andn %0, %4, %1\n\t"
+ "ba,pt %%xcc, 1f\n\t"
+ " add %1, %3, %1\n\t"
+ ".align 64\n"
+ "1:\n\t"
+ "wr %1, 0, %%tick_cmpr\n\t"
+ "rd %%tick_cmpr, %%g0"
+ : "=&r" (new_tick), "=&r" (tmp)
+ : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
+
+ return new_tick;
+}
+
+static struct sparc64_tick_ops tick_operations = {
+ .init_tick = tick_init_tick,
+ .get_tick = tick_get_tick,
+ .get_compare = tick_get_compare,
+ .add_tick = tick_add_tick,
+ .add_compare = tick_add_compare,
+ .softint_mask = 1UL << 0,
+};
+
+static void stick_init_tick(unsigned long offset)
+{
+ tick_disable_protection();
+
+ /* Let the user get at STICK too. */
+ __asm__ __volatile__(
+ " rd %%asr24, %%g2\n"
+ " andn %%g2, %0, %%g2\n"
+ " wr %%g2, 0, %%asr24"
+ : /* no outputs */
+ : "r" (TICK_PRIV_BIT)
+ : "g1", "g2");
+
+ __asm__ __volatile__(
+ " rd %%asr24, %%g1\n"
+ " andn %%g1, %1, %%g1\n"
+ " add %%g1, %0, %%g1\n"
+ " wr %%g1, 0x0, %%asr25"
+ : /* no outputs */
+ : "r" (offset), "r" (TICK_PRIV_BIT)
+ : "g1");
+}
+
+static unsigned long stick_get_tick(void)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("rd %%asr24, %0"
+ : "=r" (ret));
+
+ return ret & ~TICK_PRIV_BIT;
+}
+
+static unsigned long stick_get_compare(void)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("rd %%asr25, %0"
+ : "=r" (ret));
+
+ return ret;
+}
+
+static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
+{
+ unsigned long new_tick, tmp;
+
+ __asm__ __volatile__("rd %%asr24, %0\n\t"
+ "add %0, %2, %0\n\t"
+ "wr %0, 0, %%asr24\n\t"
+ "andn %0, %4, %1\n\t"
+ "add %1, %3, %1\n\t"
+ "wr %1, 0, %%asr25"
+ : "=&r" (new_tick), "=&r" (tmp)
+ : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
+
+ return new_tick;
+}
+
+static unsigned long stick_add_compare(unsigned long adj)
+{
+ unsigned long new_compare;
+
+ __asm__ __volatile__("rd %%asr25, %0\n\t"
+ "add %0, %1, %0\n\t"
+ "wr %0, 0, %%asr25"
+ : "=&r" (new_compare)
+ : "r" (adj));
+
+ return new_compare;
+}
+
+static struct sparc64_tick_ops stick_operations = {
+ .init_tick = stick_init_tick,
+ .get_tick = stick_get_tick,
+ .get_compare = stick_get_compare,
+ .add_tick = stick_add_tick,
+ .add_compare = stick_add_compare,
+ .softint_mask = 1UL << 16,
+};
+
+/* On Hummingbird the STICK/STICK_CMPR register is implemented
+ * in I/O space. There are two 64-bit registers each, the
+ * first holds the low 32-bits of the value and the second holds
+ * the high 32-bits.
+ *
+ * Since STICK is constantly updating, we have to access it carefully.
+ *
+ * The sequence we use to read is:
+ * 1) read low
+ * 2) read high
+ * 3) read low again, if it rolled over increment high by 1
+ *
+ * Writing STICK safely is also tricky:
+ * 1) write low to zero
+ * 2) write high
+ * 3) write low
+ */
+#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
+#define HBIRD_STICK_ADDR 0x1fe0000f070UL
+
+static unsigned long __hbird_read_stick(void)
+{
+ unsigned long ret, tmp1, tmp2, tmp3;
+ unsigned long addr = HBIRD_STICK_ADDR;
+
+ __asm__ __volatile__("ldxa [%1] %5, %2\n\t"
+ "add %1, 0x8, %1\n\t"
+ "ldxa [%1] %5, %3\n\t"
+ "sub %1, 0x8, %1\n\t"
+ "ldxa [%1] %5, %4\n\t"
+ "cmp %4, %2\n\t"
+ "blu,a,pn %%xcc, 1f\n\t"
+ " add %3, 1, %3\n"
+ "1:\n\t"
+ "sllx %3, 32, %3\n\t"
+ "or %3, %4, %0\n\t"
+ : "=&r" (ret), "=&r" (addr),
+ "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
+ : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
+
+ return ret;
+}
+
+static unsigned long __hbird_read_compare(void)
+{
+ unsigned long low, high;
+ unsigned long addr = HBIRD_STICKCMP_ADDR;
+
+ __asm__ __volatile__("ldxa [%2] %3, %0\n\t"
+ "add %2, 0x8, %2\n\t"
+ "ldxa [%2] %3, %1"
+ : "=&r" (low), "=&r" (high), "=&r" (addr)
+ : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
+
+ return (high << 32UL) | low;
+}
+
+static void __hbird_write_stick(unsigned long val)
+{
+ unsigned long low = (val & 0xffffffffUL);
+ unsigned long high = (val >> 32UL);
+ unsigned long addr = HBIRD_STICK_ADDR;
+
+ __asm__ __volatile__("stxa %%g0, [%0] %4\n\t"
+ "add %0, 0x8, %0\n\t"
+ "stxa %3, [%0] %4\n\t"
+ "sub %0, 0x8, %0\n\t"
+ "stxa %2, [%0] %4"
+ : "=&r" (addr)
+ : "0" (addr), "r" (low), "r" (high),
+ "i" (ASI_PHYS_BYPASS_EC_E));
+}
+
+static void __hbird_write_compare(unsigned long val)
+{
+ unsigned long low = (val & 0xffffffffUL);
+ unsigned long high = (val >> 32UL);
+ unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
+
+ __asm__ __volatile__("stxa %3, [%0] %4\n\t"
+ "sub %0, 0x8, %0\n\t"
+ "stxa %2, [%0] %4"
+ : "=&r" (addr)
+ : "0" (addr), "r" (low), "r" (high),
+ "i" (ASI_PHYS_BYPASS_EC_E));
+}
+
+static void hbtick_init_tick(unsigned long offset)
+{
+ unsigned long val;
+
+ tick_disable_protection();
+
+ /* XXX This seems to be necessary to 'jumpstart' Hummingbird
+ * XXX into actually sending STICK interrupts. I think because
+ * XXX of how we store %tick_cmpr in head.S this somehow resets the
+ * XXX {TICK + STICK} interrupt mux. -DaveM
+ */
+ __hbird_write_stick(__hbird_read_stick());
+
+ val = __hbird_read_stick() & ~TICK_PRIV_BIT;
+ __hbird_write_compare(val + offset);
+}
+
+static unsigned long hbtick_get_tick(void)
+{
+ return __hbird_read_stick() & ~TICK_PRIV_BIT;
+}
+
+static unsigned long hbtick_get_compare(void)
+{
+ return __hbird_read_compare();
+}
+
+static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
+{
+ unsigned long val;
+
+ val = __hbird_read_stick() + adj;
+ __hbird_write_stick(val);
+
+ val &= ~TICK_PRIV_BIT;
+ __hbird_write_compare(val + offset);
+
+ return val;
+}
+
+static unsigned long hbtick_add_compare(unsigned long adj)
+{
+ unsigned long val = __hbird_read_compare() + adj;
+
+ val &= ~TICK_PRIV_BIT;
+ __hbird_write_compare(val);
+
+ return val;
+}
+
+static struct sparc64_tick_ops hbtick_operations = {
+ .init_tick = hbtick_init_tick,
+ .get_tick = hbtick_get_tick,
+ .get_compare = hbtick_get_compare,
+ .add_tick = hbtick_add_tick,
+ .add_compare = hbtick_add_compare,
+ .softint_mask = 1UL << 0,
+};
+
+/* timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ *
+ * NOTE: On SUN5 systems the ticker interrupt comes in using 2
+ * interrupts, one at level14 and one with softint bit 0.
+ */
+unsigned long timer_tick_offset;
+unsigned long timer_tick_compare;
+
+static unsigned long timer_ticks_per_nsec_quotient;
+
+#define TICK_SIZE (tick_nsec / 1000)
+
+static inline void timer_check_rtc(void)
+{
+ /* last time the cmos clock got updated */
+ static long last_rtc_update;
+
+ /* Determine when to update the Mostek clock. */
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec > last_rtc_update + 660 &&
+ (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
+ (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
+ if (set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600;
+ /* do it again in 60 s */
+ }
+}
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ unsigned long ticks, pstate;
+
+ write_seqlock(&xtime_lock);
+
+ do {
+#ifndef CONFIG_SMP
+ profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(regs));
+#endif
+ do_timer(regs);
+
+ /* Guarantee that the following sequences execute
+ * uninterrupted.
+ */
+ __asm__ __volatile__("rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+
+ timer_tick_compare = tick_ops->add_compare(timer_tick_offset);
+ ticks = tick_ops->get_tick();
+
+ /* Restore PSTATE_IE. */
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : /* no outputs */
+ : "r" (pstate));
+ } while (time_after_eq(ticks, timer_tick_compare));
+
+ timer_check_rtc();
+
+ write_sequnlock(&xtime_lock);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SMP
+void timer_tick_interrupt(struct pt_regs *regs)
+{
+ write_seqlock(&xtime_lock);
+
+ do_timer(regs);
+
+ /*
+ * Only keep timer_tick_offset uptodate, but don't set TICK_CMPR.
+ */
+ timer_tick_compare = tick_ops->get_compare() + timer_tick_offset;
+
+ timer_check_rtc();
+
+ write_sequnlock(&xtime_lock);
+}
+#endif
+
+/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
+static void __init kick_start_clock(void)
+{
+ unsigned long regs = mstk48t02_regs;
+ u8 sec, tmp;
+ int i, count;
+
+ prom_printf("CLOCK: Clock was stopped. Kick start ");
+
+ spin_lock_irq(&mostek_lock);
+
+ /* Turn on the kick start bit to start the oscillator. */
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp |= MSTK_CREG_WRITE;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+ tmp = mostek_read(regs + MOSTEK_SEC);
+ tmp &= ~MSTK_STOP;
+ mostek_write(regs + MOSTEK_SEC, tmp);
+ tmp = mostek_read(regs + MOSTEK_HOUR);
+ tmp |= MSTK_KICK_START;
+ mostek_write(regs + MOSTEK_HOUR, tmp);
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp &= ~MSTK_CREG_WRITE;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irq(&mostek_lock);
+
+ /* Delay to allow the clock oscillator to start. */
+ sec = MSTK_REG_SEC(regs);
+ for (i = 0; i < 3; i++) {
+ while (sec == MSTK_REG_SEC(regs))
+ for (count = 0; count < 100000; count++)
+ /* nothing */ ;
+ prom_printf(".");
+ sec = MSTK_REG_SEC(regs);
+ }
+ prom_printf("\n");
+
+ spin_lock_irq(&mostek_lock);
+
+ /* Turn off kick start and set a "valid" time and date. */
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp |= MSTK_CREG_WRITE;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+ tmp = mostek_read(regs + MOSTEK_HOUR);
+ tmp &= ~MSTK_KICK_START;
+ mostek_write(regs + MOSTEK_HOUR, tmp);
+ MSTK_SET_REG_SEC(regs,0);
+ MSTK_SET_REG_MIN(regs,0);
+ MSTK_SET_REG_HOUR(regs,0);
+ MSTK_SET_REG_DOW(regs,5);
+ MSTK_SET_REG_DOM(regs,1);
+ MSTK_SET_REG_MONTH(regs,8);
+ MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp &= ~MSTK_CREG_WRITE;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irq(&mostek_lock);
+
+ /* Ensure the kick start bit is off. If it isn't, turn it off. */
+ while (mostek_read(regs + MOSTEK_HOUR) & MSTK_KICK_START) {
+ prom_printf("CLOCK: Kick start still on!\n");
+
+ spin_lock_irq(&mostek_lock);
+
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp |= MSTK_CREG_WRITE;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+
+ tmp = mostek_read(regs + MOSTEK_HOUR);
+ tmp &= ~MSTK_KICK_START;
+ mostek_write(regs + MOSTEK_HOUR, tmp);
+
+ tmp = mostek_read(regs + MOSTEK_CREG);
+ tmp &= ~MSTK_CREG_WRITE;
+ mostek_write(regs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irq(&mostek_lock);
+ }
+
+ prom_printf("CLOCK: Kick start procedure successful.\n");
+}
+
+/* Return nonzero if the clock chip battery is low. */
+static int __init has_low_battery(void)
+{
+ unsigned long regs = mstk48t02_regs;
+ u8 data1, data2;
+
+ spin_lock_irq(&mostek_lock);
+
+ data1 = mostek_read(regs + MOSTEK_EEPROM); /* Read some data. */
+ mostek_write(regs + MOSTEK_EEPROM, ~data1); /* Write back the complement. */
+ data2 = mostek_read(regs + MOSTEK_EEPROM); /* Read back the complement. */
+ mostek_write(regs + MOSTEK_EEPROM, data1); /* Restore original value. */
+
+ spin_unlock_irq(&mostek_lock);
+
+ return (data1 == data2); /* Was the write blocked? */
+}
+
+/* Probe for the real time clock chip. */
+static void __init set_system_time(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ unsigned long mregs = mstk48t02_regs;
+#ifdef CONFIG_PCI
+ unsigned long dregs = ds1287_regs;
+#else
+ unsigned long dregs = 0UL;
+#endif
+ u8 tmp;
+
+ if (!mregs && !dregs) {
+ prom_printf("Something wrong, clock regs not mapped yet.\n");
+ prom_halt();
+ }
+
+ if (mregs) {
+ spin_lock_irq(&mostek_lock);
+
+ /* Traditional Mostek chip. */
+ tmp = mostek_read(mregs + MOSTEK_CREG);
+ tmp |= MSTK_CREG_READ;
+ mostek_write(mregs + MOSTEK_CREG, tmp);
+
+ sec = MSTK_REG_SEC(mregs);
+ min = MSTK_REG_MIN(mregs);
+ hour = MSTK_REG_HOUR(mregs);
+ day = MSTK_REG_DOM(mregs);
+ mon = MSTK_REG_MONTH(mregs);
+ year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
+ } else {
+ int i;
+
+ /* Dallas 12887 RTC chip. */
+
+ /* Stolen from arch/i386/kernel/time.c, see there for
+ * credits and descriptive comments.
+ */
+ for (i = 0; i < 1000000; i++) {
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+ break;
+ udelay(10);
+ }
+ for (i = 0; i < 1000000; i++) {
+ if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+ break;
+ udelay(10);
+ }
+ do {
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
+ } while (sec != CMOS_READ(RTC_SECONDS));
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year);
+ }
+ if ((year += 1900) < 1970)
+ year += 100;
+ }
+
+ xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+ if (mregs) {
+ tmp = mostek_read(mregs + MOSTEK_CREG);
+ tmp &= ~MSTK_CREG_READ;
+ mostek_write(mregs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irq(&mostek_lock);
+ }
+}
+
+void __init clock_probe(void)
+{
+ struct linux_prom_registers clk_reg[2];
+ char model[128];
+ int node, busnd = -1, err;
+ unsigned long flags;
+ struct linux_central *cbus;
+#ifdef CONFIG_PCI
+ struct linux_ebus *ebus = NULL;
+ struct sparc_isa_bridge *isa_br = NULL;
+#endif
+ static int invoked;
+
+ if (invoked)
+ return;
+ invoked = 1;
+
+
+ if (this_is_starfire) {
+ /* davem suggests we keep this within the 4M locked kernel image */
+ static char obp_gettod[256];
+ static u32 unix_tod;
+
+ sprintf(obp_gettod, "h# %08x unix-gettod",
+ (unsigned int) (long) &unix_tod);
+ prom_feval(obp_gettod);
+ xtime.tv_sec = unix_tod;
+ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+ return;
+ }
+
+ local_irq_save(flags);
+
+ cbus = central_bus;
+ if (cbus != NULL)
+ busnd = central_bus->child->prom_node;
+
+ /* Check FHC Central then EBUSs then ISA bridges then SBUSs.
+ * That way we handle the presence of multiple properly.
+ *
+ * As a special case, machines with Central must provide the
+ * timer chip there.
+ */
+#ifdef CONFIG_PCI
+ if (ebus_chain != NULL) {
+ ebus = ebus_chain;
+ if (busnd == -1)
+ busnd = ebus->prom_node;
+ }
+ if (isa_chain != NULL) {
+ isa_br = isa_chain;
+ if (busnd == -1)
+ busnd = isa_br->prom_node;
+ }
+#endif
+ if (sbus_root != NULL && busnd == -1)
+ busnd = sbus_root->prom_node;
+
+ if (busnd == -1) {
+ prom_printf("clock_probe: problem, cannot find bus to search.\n");
+ prom_halt();
+ }
+
+ node = prom_getchild(busnd);
+
+ while (1) {
+ if (!node)
+ model[0] = 0;
+ else
+ prom_getstring(node, "model", model, sizeof(model));
+ if (strcmp(model, "mk48t02") &&
+ strcmp(model, "mk48t08") &&
+ strcmp(model, "mk48t59") &&
+ strcmp(model, "m5819") &&
+ strcmp(model, "m5819p") &&
+ strcmp(model, "m5823") &&
+ strcmp(model, "ds1287")) {
+ if (cbus != NULL) {
+ prom_printf("clock_probe: Central bus lacks timer chip.\n");
+ prom_halt();
+ }
+
+ if (node != 0)
+ node = prom_getsibling(node);
+#ifdef CONFIG_PCI
+ while ((node == 0) && ebus != NULL) {
+ ebus = ebus->next;
+ if (ebus != NULL) {
+ busnd = ebus->prom_node;
+ node = prom_getchild(busnd);
+ }
+ }
+ while ((node == 0) && isa_br != NULL) {
+ isa_br = isa_br->next;
+ if (isa_br != NULL) {
+ busnd = isa_br->prom_node;
+ node = prom_getchild(busnd);
+ }
+ }
+#endif
+ if (node == 0) {
+ prom_printf("clock_probe: Cannot find timer chip\n");
+ prom_halt();
+ }
+ continue;
+ }
+
+ err = prom_getproperty(node, "reg", (char *)clk_reg,
+ sizeof(clk_reg));
+ if(err == -1) {
+ prom_printf("clock_probe: Cannot get Mostek reg property\n");
+ prom_halt();
+ }
+
+ if (cbus != NULL) {
+ apply_fhc_ranges(central_bus->child, clk_reg, 1);
+ apply_central_ranges(central_bus, clk_reg, 1);
+ }
+#ifdef CONFIG_PCI
+ else if (ebus != NULL) {
+ struct linux_ebus_device *edev;
+
+ for_each_ebusdev(edev, ebus)
+ if (edev->prom_node == node)
+ break;
+ if (edev == NULL) {
+ if (isa_chain != NULL)
+ goto try_isa_clock;
+ prom_printf("%s: Mostek not probed by EBUS\n",
+ __FUNCTION__);
+ prom_halt();
+ }
+
+ if (!strcmp(model, "ds1287") ||
+ !strcmp(model, "m5819") ||
+ !strcmp(model, "m5819p") ||
+ !strcmp(model, "m5823")) {
+ ds1287_regs = edev->resource[0].start;
+ } else {
+ mstk48t59_regs = edev->resource[0].start;
+ mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
+ }
+ break;
+ }
+ else if (isa_br != NULL) {
+ struct sparc_isa_device *isadev;
+
+try_isa_clock:
+ for_each_isadev(isadev, isa_br)
+ if (isadev->prom_node == node)
+ break;
+ if (isadev == NULL) {
+ prom_printf("%s: Mostek not probed by ISA\n");
+ prom_halt();
+ }
+ if (!strcmp(model, "ds1287") ||
+ !strcmp(model, "m5819") ||
+ !strcmp(model, "m5819p") ||
+ !strcmp(model, "m5823")) {
+ ds1287_regs = isadev->resource.start;
+ } else {
+ mstk48t59_regs = isadev->resource.start;
+ mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
+ }
+ break;
+ }
+#endif
+ else {
+ if (sbus_root->num_sbus_ranges) {
+ int nranges = sbus_root->num_sbus_ranges;
+ int rngc;
+
+ for (rngc = 0; rngc < nranges; rngc++)
+ if (clk_reg[0].which_io ==
+ sbus_root->sbus_ranges[rngc].ot_child_space)
+ break;
+ if (rngc == nranges) {
+ prom_printf("clock_probe: Cannot find ranges for "
+ "clock regs.\n");
+ prom_halt();
+ }
+ clk_reg[0].which_io =
+ sbus_root->sbus_ranges[rngc].ot_parent_space;
+ clk_reg[0].phys_addr +=
+ sbus_root->sbus_ranges[rngc].ot_parent_base;
+ }
+ }
+
+ if(model[5] == '0' && model[6] == '2') {
+ mstk48t02_regs = (((u64)clk_reg[0].phys_addr) |
+ (((u64)clk_reg[0].which_io)<<32UL));
+ } else if(model[5] == '0' && model[6] == '8') {
+ mstk48t08_regs = (((u64)clk_reg[0].phys_addr) |
+ (((u64)clk_reg[0].which_io)<<32UL));
+ mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02;
+ } else {
+ mstk48t59_regs = (((u64)clk_reg[0].phys_addr) |
+ (((u64)clk_reg[0].which_io)<<32UL));
+ mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
+ }
+ break;
+ }
+
+ if (mstk48t02_regs != 0UL) {
+ /* Report a low battery voltage condition. */
+ if (has_low_battery())
+ prom_printf("NVRAM: Low battery voltage!\n");
+
+ /* Kick start the clock if it is completely stopped. */
+ if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
+ kick_start_clock();
+ }
+
+ set_system_time();
+
+ local_irq_restore(flags);
+}
+
+/* This is gets the master TICK_INT timer going. */
+static unsigned long sparc64_init_timers(void)
+{
+ unsigned long clock;
+ int node;
+#ifdef CONFIG_SMP
+ extern void smp_tick_init(void);
+#endif
+
+ if (tlb_type == spitfire) {
+ unsigned long ver, manuf, impl;
+
+ __asm__ __volatile__ ("rdpr %%ver, %0"
+ : "=&r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+ if (manuf == 0x17 && impl == 0x13) {
+ /* Hummingbird, aka Ultra-IIe */
+ tick_ops = &hbtick_operations;
+ node = prom_root_node;
+ clock = prom_getint(node, "stick-frequency");
+ } else {
+ tick_ops = &tick_operations;
+ cpu_find_by_instance(0, &node, NULL);
+ clock = prom_getint(node, "clock-frequency");
+ }
+ } else {
+ tick_ops = &stick_operations;
+ node = prom_root_node;
+ clock = prom_getint(node, "stick-frequency");
+ }
+ timer_tick_offset = clock / HZ;
+
+#ifdef CONFIG_SMP
+ smp_tick_init();
+#endif
+
+ return clock;
+}
+
+static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_regs *))
+{
+ unsigned long pstate;
+ int err;
+
+ /* Register IRQ handler. */
+ err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC,
+ "timer", NULL);
+
+ if (err) {
+ prom_printf("Serious problem, cannot register TICK_INT\n");
+ prom_halt();
+ }
+
+ /* Guarantee that the following sequences execute
+ * uninterrupted.
+ */
+ __asm__ __volatile__("rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
+ : "=r" (pstate)
+ : "i" (PSTATE_IE));
+
+ tick_ops->init_tick(timer_tick_offset);
+
+ /* Restore PSTATE_IE. */
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : /* no outputs */
+ : "r" (pstate));
+
+ local_irq_enable();
+}
+
+struct freq_table {
+ unsigned long udelay_val_ref;
+ unsigned long clock_tick_ref;
+ unsigned int ref_freq;
+};
+static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 };
+
+unsigned long sparc64_get_clock_tick(unsigned int cpu)
+{
+ struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
+
+ if (ft->clock_tick_ref)
+ return ft->clock_tick_ref;
+ return cpu_data(cpu).clock_tick;
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ unsigned int cpu = freq->cpu;
+ struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
+
+ if (!ft->ref_freq) {
+ ft->ref_freq = freq->old;
+ ft->udelay_val_ref = cpu_data(cpu).udelay_val;
+ ft->clock_tick_ref = cpu_data(cpu).clock_tick;
+ }
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+ (val == CPUFREQ_RESUMECHANGE)) {
+ cpu_data(cpu).udelay_val =
+ cpufreq_scale(ft->udelay_val_ref,
+ ft->ref_freq,
+ freq->new);
+ cpu_data(cpu).clock_tick =
+ cpufreq_scale(ft->clock_tick_ref,
+ ft->ref_freq,
+ freq->new);
+ }
+
+ return 0;
+}
+
+static struct notifier_block sparc64_cpufreq_notifier_block = {
+ .notifier_call = sparc64_cpufreq_notifier
+};
+
+#endif /* CONFIG_CPU_FREQ */
+
+static struct time_interpolator sparc64_cpu_interpolator = {
+ .source = TIME_SOURCE_CPU,
+ .shift = 16,
+ .mask = 0xffffffffffffffffLL
+};
+
+/* The quotient formula is taken from the IA64 port. */
+#define SPARC64_NSEC_PER_CYC_SHIFT 30UL
+void __init time_init(void)
+{
+ unsigned long clock = sparc64_init_timers();
+
+ sparc64_cpu_interpolator.frequency = clock;
+ register_time_interpolator(&sparc64_cpu_interpolator);
+
+ /* Now that the interpolator is registered, it is
+ * safe to start the timer ticking.
+ */
+ sparc64_start_timers(timer_interrupt);
+
+ timer_ticks_per_nsec_quotient =
+ (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
+ (clock / 2)) / clock);
+
+#ifdef CONFIG_CPU_FREQ
+ cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+}
+
+unsigned long long sched_clock(void)
+{
+ unsigned long ticks = tick_ops->get_tick();
+
+ return (ticks * timer_ticks_per_nsec_quotient)
+ >> SPARC64_NSEC_PER_CYC_SHIFT;
+}
+
+static int set_rtc_mmss(unsigned long nowtime)
+{
+ int real_seconds, real_minutes, chip_minutes;
+ unsigned long mregs = mstk48t02_regs;
+#ifdef CONFIG_PCI
+ unsigned long dregs = ds1287_regs;
+#else
+ unsigned long dregs = 0UL;
+#endif
+ unsigned long flags;
+ u8 tmp;
+
+ /*
+ * Not having a register set can lead to trouble.
+ * Also starfire doesn't have a tod clock.
+ */
+ if (!mregs && !dregs)
+ return -1;
+
+ if (mregs) {
+ spin_lock_irqsave(&mostek_lock, flags);
+
+ /* Read the current RTC minutes. */
+ tmp = mostek_read(mregs + MOSTEK_CREG);
+ tmp |= MSTK_CREG_READ;
+ mostek_write(mregs + MOSTEK_CREG, tmp);
+
+ chip_minutes = MSTK_REG_MIN(mregs);
+
+ tmp = mostek_read(mregs + MOSTEK_CREG);
+ tmp &= ~MSTK_CREG_READ;
+ mostek_write(mregs + MOSTEK_CREG, tmp);
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - chip_minutes) < 30) {
+ tmp = mostek_read(mregs + MOSTEK_CREG);
+ tmp |= MSTK_CREG_WRITE;
+ mostek_write(mregs + MOSTEK_CREG, tmp);
+
+ MSTK_SET_REG_SEC(mregs,real_seconds);
+ MSTK_SET_REG_MIN(mregs,real_minutes);
+
+ tmp = mostek_read(mregs + MOSTEK_CREG);
+ tmp &= ~MSTK_CREG_WRITE;
+ mostek_write(mregs + MOSTEK_CREG, tmp);
+
+ spin_unlock_irqrestore(&mostek_lock, flags);
+
+ return 0;
+ } else {
+ spin_unlock_irqrestore(&mostek_lock, flags);
+
+ return -1;
+ }
+ } else {
+ int retval = 0;
+ unsigned char save_control, save_freq_select;
+
+ /* Stolen from arch/i386/kernel/time.c, see there for
+ * credits and descriptive comments.
+ */
+ spin_lock_irqsave(&rtc_lock, flags);
+ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+ chip_minutes = CMOS_READ(RTC_MINUTES);
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ BCD_TO_BIN(chip_minutes);
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
+ real_minutes += 30;
+ real_minutes %= 60;
+
+ if (abs(real_minutes - chip_minutes) < 30) {
+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ BIN_TO_BCD(real_seconds);
+ BIN_TO_BCD(real_minutes);
+ }
+ CMOS_WRITE(real_seconds,RTC_SECONDS);
+ CMOS_WRITE(real_minutes,RTC_MINUTES);
+ } else {
+ printk(KERN_WARNING
+ "set_rtc_mmss: can't update from %d to %d\n",
+ chip_minutes, real_minutes);
+ retval = -1;
+ }
+
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return retval;
+ }
+}
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
new file mode 100644
index 00000000000..2c8f9344b4e
--- /dev/null
+++ b/arch/sparc64/kernel/trampoline.S
@@ -0,0 +1,368 @@
+/* $Id: trampoline.S,v 1.26 2002/02/09 19:49:30 davem Exp $
+ * trampoline.S: Jump start slave processors on sparc64.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/lsu.h>
+#include <asm/dcr.h>
+#include <asm/dcu.h>
+#include <asm/pstate.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/spitfire.h>
+#include <asm/processor.h>
+#include <asm/thread_info.h>
+#include <asm/mmu.h>
+
+ .data
+ .align 8
+call_method:
+ .asciz "call-method"
+ .align 8
+itlb_load:
+ .asciz "SUNW,itlb-load"
+ .align 8
+dtlb_load:
+ .asciz "SUNW,dtlb-load"
+
+ .text
+ .align 8
+ .globl sparc64_cpu_startup, sparc64_cpu_startup_end
+sparc64_cpu_startup:
+ flushw
+
+ BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_startup)
+ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_startup)
+
+ ba,pt %xcc, spitfire_startup
+ nop
+
+cheetah_plus_startup:
+ /* Preserve OBP chosen DCU and DCR register settings. */
+ ba,pt %xcc, cheetah_generic_startup
+ nop
+
+cheetah_startup:
+ mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
+ wr %g1, %asr18
+
+ sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
+ or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
+ sllx %g5, 32, %g5
+ or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
+ stxa %g5, [%g0] ASI_DCU_CONTROL_REG
+ membar #Sync
+
+cheetah_generic_startup:
+ mov TSB_EXTENSION_P, %g3
+ stxa %g0, [%g3] ASI_DMMU
+ stxa %g0, [%g3] ASI_IMMU
+ membar #Sync
+
+ mov TSB_EXTENSION_S, %g3
+ stxa %g0, [%g3] ASI_DMMU
+ membar #Sync
+
+ mov TSB_EXTENSION_N, %g3
+ stxa %g0, [%g3] ASI_DMMU
+ stxa %g0, [%g3] ASI_IMMU
+ membar #Sync
+
+ /* Disable STICK_INT interrupts. */
+ sethi %hi(0x80000000), %g5
+ sllx %g5, 32, %g5
+ wr %g5, %asr25
+
+ ba,pt %xcc, startup_continue
+ nop
+
+spitfire_startup:
+ mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
+ stxa %g1, [%g0] ASI_LSU_CONTROL
+ membar #Sync
+
+startup_continue:
+ wrpr %g0, 15, %pil
+
+ sethi %hi(0x80000000), %g2
+ sllx %g2, 32, %g2
+ wr %g2, 0, %tick_cmpr
+
+ /* Call OBP by hand to lock KERNBASE into i/d tlbs.
+ * We lock 2 consequetive entries if we are 'bigkernel'.
+ */
+ mov %o0, %l0
+
+ sethi %hi(prom_entry_lock), %g2
+1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
+ brnz,pn %g1, 1b
+ membar #StoreLoad | #StoreStore
+
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x10], %l2
+ mov %sp, %l1
+ add %l2, -(192 + 128), %sp
+ flushw
+
+ sethi %hi(call_method), %g2
+ or %g2, %lo(call_method), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 5, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ sethi %hi(itlb_load), %g2
+ or %g2, %lo(itlb_load), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(KERNBASE), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28]
+ sethi %hi(kern_locked_tte_data), %g2
+ ldx [%g2 + %lo(kern_locked_tte_data)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30]
+
+ mov 15, %g2
+ BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+
+ mov 63, %g2
+1:
+ stx %g2, [%sp + 2047 + 128 + 0x38]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
+
+ sethi %hi(bigkernel), %g2
+ lduw [%g2 + %lo(bigkernel)], %g2
+ cmp %g2, 0
+ be,pt %icc, do_dtlb
+ nop
+
+ sethi %hi(call_method), %g2
+ or %g2, %lo(call_method), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 5, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ sethi %hi(itlb_load), %g2
+ or %g2, %lo(itlb_load), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(KERNBASE + 0x400000), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28]
+ sethi %hi(kern_locked_tte_data), %g2
+ ldx [%g2 + %lo(kern_locked_tte_data)], %g2
+ sethi %hi(0x400000), %g1
+ add %g2, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30]
+
+ mov 14, %g2
+ BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+
+ mov 62, %g2
+1:
+ stx %g2, [%sp + 2047 + 128 + 0x38]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
+
+do_dtlb:
+ sethi %hi(call_method), %g2
+ or %g2, %lo(call_method), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 5, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ sethi %hi(dtlb_load), %g2
+ or %g2, %lo(dtlb_load), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(KERNBASE), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28]
+ sethi %hi(kern_locked_tte_data), %g2
+ ldx [%g2 + %lo(kern_locked_tte_data)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30]
+
+ mov 15, %g2
+ BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+
+ mov 63, %g2
+1:
+
+ stx %g2, [%sp + 2047 + 128 + 0x38]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
+
+ sethi %hi(bigkernel), %g2
+ lduw [%g2 + %lo(bigkernel)], %g2
+ cmp %g2, 0
+ be,pt %icc, do_unlock
+ nop
+
+ sethi %hi(call_method), %g2
+ or %g2, %lo(call_method), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 5, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ sethi %hi(dtlb_load), %g2
+ or %g2, %lo(dtlb_load), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(KERNBASE + 0x400000), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28]
+ sethi %hi(kern_locked_tte_data), %g2
+ ldx [%g2 + %lo(kern_locked_tte_data)], %g2
+ sethi %hi(0x400000), %g1
+ add %g2, %g1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30]
+
+ mov 14, %g2
+ BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+
+ mov 62, %g2
+1:
+
+ stx %g2, [%sp + 2047 + 128 + 0x38]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
+
+do_unlock:
+ sethi %hi(prom_entry_lock), %g2
+ stb %g0, [%g2 + %lo(prom_entry_lock)]
+ membar #StoreStore | #StoreLoad
+
+ mov %l1, %sp
+ flushw
+
+ mov %l0, %o0
+
+ wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
+ wr %g0, 0, %fprs
+
+ /* XXX Buggy PROM... */
+ srl %o0, 0, %o0
+ ldx [%o0], %g6
+
+ wr %g0, ASI_P, %asi
+
+ mov PRIMARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+ mov SECONDARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ mov 1, %g5
+ sllx %g5, THREAD_SHIFT, %g5
+ sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ add %g6, %g5, %sp
+ mov 0, %fp
+
+ wrpr %g0, 0, %wstate
+ wrpr %g0, 0, %tl
+
+ /* Setup the trap globals, then we can resurface. */
+ rdpr %pstate, %o1
+ mov %g6, %o2
+ wrpr %o1, PSTATE_AG, %pstate
+ sethi %hi(sparc64_ttable_tl0), %g5
+ wrpr %g5, %tba
+ mov %o2, %g6
+
+ wrpr %o1, PSTATE_MG, %pstate
+#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
+#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
+
+ mov TSB_REG, %g1
+ stxa %g0, [%g1] ASI_DMMU
+ membar #Sync
+ mov TLB_SFSR, %g1
+ sethi %uhi(KERN_HIGHBITS), %g2
+ or %g2, %ulo(KERN_HIGHBITS), %g2
+ sllx %g2, 32, %g2
+ or %g2, KERN_LOWBITS, %g2
+
+ BRANCH_IF_ANY_CHEETAH(g3,g7,9f)
+
+ ba,pt %xcc, 1f
+ nop
+
+9:
+ sethi %uhi(VPTE_BASE_CHEETAH), %g3
+ or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
+ ba,pt %xcc, 2f
+ sllx %g3, 32, %g3
+1:
+ sethi %uhi(VPTE_BASE_SPITFIRE), %g3
+ or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
+ sllx %g3, 32, %g3
+
+2:
+ clr %g7
+#undef KERN_HIGHBITS
+#undef KERN_LOWBITS
+
+ wrpr %o1, 0x0, %pstate
+ ldx [%g6 + TI_TASK], %g4
+
+ wrpr %g0, 0, %wstate
+
+ call init_irqwork_curcpu
+ nop
+
+ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
+ ba,pt %xcc, 2f
+ nop
+
+1: /* Start using proper page size encodings in ctx register. */
+ sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
+ mov PRIMARY_CONTEXT, %g1
+ sllx %g3, 32, %g3
+ sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
+ or %g3, %g2, %g3
+ stxa %g3, [%g1] ASI_DMMU
+ membar #Sync
+
+2:
+ rdpr %pstate, %o1
+ or %o1, PSTATE_IE, %o1
+ wrpr %o1, 0, %pstate
+
+ call prom_set_trap_table
+ sethi %hi(sparc64_ttable_tl0), %o0
+
+ call smp_callin
+ nop
+ call cpu_idle
+ mov 0, %o0
+ call cpu_panic
+ nop
+1: b,a,pt %xcc, 1b
+
+ .align 8
+sparc64_cpu_startup_end:
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
new file mode 100644
index 00000000000..56b203a2af6
--- /dev/null
+++ b/arch/sparc64/kernel/traps.c
@@ -0,0 +1,2118 @@
+/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
+ * arch/sparc64/kernel/traps.c
+ *
+ * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
+ */
+
+/*
+ * I like traps on v9, :))))
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/sched.h> /* for jiffies */
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/delay.h>
+#include <asm/system.h>
+#include <asm/ptrace.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/unistd.h>
+#include <asm/uaccess.h>
+#include <asm/fpumacro.h>
+#include <asm/lsu.h>
+#include <asm/dcu.h>
+#include <asm/estate.h>
+#include <asm/chafsr.h>
+#include <asm/psrcompat.h>
+#include <asm/processor.h>
+#include <asm/timer.h>
+#include <asm/kdebug.h>
+#ifdef CONFIG_KMOD
+#include <linux/kmod.h>
+#endif
+
+struct notifier_block *sparc64die_chain;
+static DEFINE_SPINLOCK(die_notifier_lock);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+ int err = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&die_notifier_lock, flags);
+ err = notifier_chain_register(&sparc64die_chain, nb);
+ spin_unlock_irqrestore(&die_notifier_lock, flags);
+ return err;
+}
+
+/* When an irrecoverable trap occurs at tl > 0, the trap entry
+ * code logs the trap state registers at every level in the trap
+ * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
+ * is as follows:
+ */
+struct tl1_traplog {
+ struct {
+ unsigned long tstate;
+ unsigned long tpc;
+ unsigned long tnpc;
+ unsigned long tt;
+ } trapstack[4];
+ unsigned long tl;
+};
+
+static void dump_tl1_traplog(struct tl1_traplog *p)
+{
+ int i;
+
+ printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
+ p->tl);
+ for (i = 0; i < 4; i++) {
+ printk(KERN_CRIT
+ "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
+ "TNPC[%016lx] TT[%lx]\n",
+ i + 1,
+ p->trapstack[i].tstate, p->trapstack[i].tpc,
+ p->trapstack[i].tnpc, p->trapstack[i].tt);
+ }
+}
+
+void do_call_debug(struct pt_regs *regs)
+{
+ notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
+}
+
+void bad_trap(struct pt_regs *regs, long lvl)
+{
+ char buffer[32];
+ siginfo_t info;
+
+ if (notify_die(DIE_TRAP, "bad trap", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ if (lvl < 0x100) {
+ sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
+ die_if_kernel(buffer, regs);
+ }
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLTRP;
+ info.si_addr = (void __user *)regs->tpc;
+ info.si_trapno = lvl;
+ force_sig_info(SIGILL, &info, current);
+}
+
+void bad_trap_tl1(struct pt_regs *regs, long lvl)
+{
+ char buffer[32];
+
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+ die_if_kernel (buffer, regs);
+}
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+void do_BUG(const char *file, int line)
+{
+ bust_spinlocks(1);
+ printk("kernel BUG at %s:%d!\n", file, line);
+}
+#endif
+
+void instruction_access_exception(struct pt_regs *regs,
+ unsigned long sfsr, unsigned long sfar)
+{
+ siginfo_t info;
+
+ if (notify_die(DIE_TRAP, "instruction access exception", regs,
+ 0, 0x8, SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ if (regs->tstate & TSTATE_PRIV) {
+ printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
+ sfsr, sfar);
+ die_if_kernel("Iax", regs);
+ }
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = (void __user *)regs->tpc;
+ info.si_trapno = 0;
+ force_sig_info(SIGSEGV, &info, current);
+}
+
+void instruction_access_exception_tl1(struct pt_regs *regs,
+ unsigned long sfsr, unsigned long sfar)
+{
+ if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
+ 0, 0x8, SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ instruction_access_exception(regs, sfsr, sfar);
+}
+
+void data_access_exception(struct pt_regs *regs,
+ unsigned long sfsr, unsigned long sfar)
+{
+ siginfo_t info;
+
+ if (notify_die(DIE_TRAP, "data access exception", regs,
+ 0, 0x30, SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ if (regs->tstate & TSTATE_PRIV) {
+ /* Test if this comes from uaccess places. */
+ unsigned long fixup;
+ unsigned long g2 = regs->u_regs[UREG_G2];
+
+ if ((fixup = search_extables_range(regs->tpc, &g2))) {
+ /* Ouch, somebody is trying ugly VM hole tricks on us... */
+#ifdef DEBUG_EXCEPTIONS
+ printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
+ printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
+ "g2<%016lx>\n", regs->tpc, fixup, g2);
+#endif
+ regs->tpc = fixup;
+ regs->tnpc = regs->tpc + 4;
+ regs->u_regs[UREG_G2] = g2;
+ return;
+ }
+ /* Shit... */
+ printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n",
+ sfsr, sfar);
+ die_if_kernel("Dax", regs);
+ }
+
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = (void __user *)sfar;
+ info.si_trapno = 0;
+ force_sig_info(SIGSEGV, &info, current);
+}
+
+#ifdef CONFIG_PCI
+/* This is really pathetic... */
+extern volatile int pci_poke_in_progress;
+extern volatile int pci_poke_cpu;
+extern volatile int pci_poke_faulted;
+#endif
+
+/* When access exceptions happen, we must do this. */
+static void spitfire_clean_and_reenable_l1_caches(void)
+{
+ unsigned long va;
+
+ if (tlb_type != spitfire)
+ BUG();
+
+ /* Clean 'em. */
+ for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
+ spitfire_put_icache_tag(va, 0x0);
+ spitfire_put_dcache_tag(va, 0x0);
+ }
+
+ /* Re-enable in LSU. */
+ __asm__ __volatile__("flush %%g6\n\t"
+ "membar #Sync\n\t"
+ "stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
+ LSU_CONTROL_IM | LSU_CONTROL_DM),
+ "i" (ASI_LSU_CONTROL)
+ : "memory");
+}
+
+void do_iae(struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ spitfire_clean_and_reenable_l1_caches();
+
+ if (notify_die(DIE_TRAP, "instruction access exception", regs,
+ 0, 0x8, SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_OBJERR;
+ info.si_addr = (void *)0;
+ info.si_trapno = 0;
+ force_sig_info(SIGBUS, &info, current);
+}
+
+void do_dae(struct pt_regs *regs)
+{
+ siginfo_t info;
+
+#ifdef CONFIG_PCI
+ if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
+ spitfire_clean_and_reenable_l1_caches();
+
+ pci_poke_faulted = 1;
+
+ /* Why the fuck did they have to change this? */
+ if (tlb_type == cheetah || tlb_type == cheetah_plus)
+ regs->tpc += 4;
+
+ regs->tnpc = regs->tpc + 4;
+ return;
+ }
+#endif
+ spitfire_clean_and_reenable_l1_caches();
+
+ if (notify_die(DIE_TRAP, "data access exception", regs,
+ 0, 0x30, SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_OBJERR;
+ info.si_addr = (void *)0;
+ info.si_trapno = 0;
+ force_sig_info(SIGBUS, &info, current);
+}
+
+static char ecc_syndrome_table[] = {
+ 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
+ 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
+ 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
+ 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
+ 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
+ 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
+ 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
+ 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
+ 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
+ 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
+ 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
+ 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
+ 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
+ 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
+ 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
+ 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
+ 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
+ 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
+ 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
+ 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
+ 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
+ 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
+ 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
+ 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
+ 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
+ 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
+ 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
+ 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
+ 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
+ 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
+ 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
+ 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
+};
+
+/* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status
+ * in the following format. The AFAR is left as is, with
+ * reserved bits cleared, and is a raw 40-bit physical
+ * address.
+ */
+#define CE_STATUS_UDBH_UE (1UL << (43 + 9))
+#define CE_STATUS_UDBH_CE (1UL << (43 + 8))
+#define CE_STATUS_UDBH_ESYNDR (0xffUL << 43)
+#define CE_STATUS_UDBH_SHIFT 43
+#define CE_STATUS_UDBL_UE (1UL << (33 + 9))
+#define CE_STATUS_UDBL_CE (1UL << (33 + 8))
+#define CE_STATUS_UDBL_ESYNDR (0xffUL << 33)
+#define CE_STATUS_UDBL_SHIFT 33
+#define CE_STATUS_AFSR_MASK (0x1ffffffffUL)
+#define CE_STATUS_AFSR_ME (1UL << 32)
+#define CE_STATUS_AFSR_PRIV (1UL << 31)
+#define CE_STATUS_AFSR_ISAP (1UL << 30)
+#define CE_STATUS_AFSR_ETP (1UL << 29)
+#define CE_STATUS_AFSR_IVUE (1UL << 28)
+#define CE_STATUS_AFSR_TO (1UL << 27)
+#define CE_STATUS_AFSR_BERR (1UL << 26)
+#define CE_STATUS_AFSR_LDP (1UL << 25)
+#define CE_STATUS_AFSR_CP (1UL << 24)
+#define CE_STATUS_AFSR_WP (1UL << 23)
+#define CE_STATUS_AFSR_EDP (1UL << 22)
+#define CE_STATUS_AFSR_UE (1UL << 21)
+#define CE_STATUS_AFSR_CE (1UL << 20)
+#define CE_STATUS_AFSR_ETS (0xfUL << 16)
+#define CE_STATUS_AFSR_ETS_SHIFT 16
+#define CE_STATUS_AFSR_PSYND (0xffffUL << 0)
+#define CE_STATUS_AFSR_PSYND_SHIFT 0
+
+/* Layout of Ecache TAG Parity Syndrome of AFSR */
+#define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */
+#define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */
+#define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */
+#define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */
+
+static char *syndrome_unknown = "<Unknown>";
+
+asmlinkage void cee_log(unsigned long ce_status,
+ unsigned long afar,
+ struct pt_regs *regs)
+{
+ char memmod_str[64];
+ char *p;
+ unsigned short scode, udb_reg;
+
+ printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
+ "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n",
+ smp_processor_id(),
+ (ce_status & CE_STATUS_AFSR_MASK),
+ afar,
+ ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL),
+ ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL));
+
+ udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL);
+ if (udb_reg & (1 << 8)) {
+ scode = ecc_syndrome_table[udb_reg & 0xff];
+ if (prom_getunumber(scode, afar,
+ memmod_str, sizeof(memmod_str)) == -1)
+ p = syndrome_unknown;
+ else
+ p = memmod_str;
+ printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
+ "Memory Module \"%s\"\n",
+ smp_processor_id(), scode, p);
+ }
+
+ udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL);
+ if (udb_reg & (1 << 8)) {
+ scode = ecc_syndrome_table[udb_reg & 0xff];
+ if (prom_getunumber(scode, afar,
+ memmod_str, sizeof(memmod_str)) == -1)
+ p = syndrome_unknown;
+ else
+ p = memmod_str;
+ printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
+ "Memory Module \"%s\"\n",
+ smp_processor_id(), scode, p);
+ }
+}
+
+/* Cheetah error trap handling. */
+static unsigned long ecache_flush_physbase;
+static unsigned long ecache_flush_linesize;
+static unsigned long ecache_flush_size;
+
+/* WARNING: The error trap handlers in assembly know the precise
+ * layout of the following structure.
+ *
+ * C-level handlers below use this information to log the error
+ * and then determine how to recover (if possible).
+ */
+struct cheetah_err_info {
+/*0x00*/u64 afsr;
+/*0x08*/u64 afar;
+
+ /* D-cache state */
+/*0x10*/u64 dcache_data[4]; /* The actual data */
+/*0x30*/u64 dcache_index; /* D-cache index */
+/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
+/*0x40*/u64 dcache_utag; /* D-cache microtag */
+/*0x48*/u64 dcache_stag; /* D-cache snooptag */
+
+ /* I-cache state */
+/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
+/*0x90*/u64 icache_index; /* I-cache index */
+/*0x98*/u64 icache_tag; /* I-cache phys tag */
+/*0xa0*/u64 icache_utag; /* I-cache microtag */
+/*0xa8*/u64 icache_stag; /* I-cache snooptag */
+/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
+/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
+
+ /* E-cache state */
+/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
+/*0xe0*/u64 ecache_index; /* E-cache index */
+/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
+
+/*0xf0*/u64 __pad[32 - 30];
+};
+#define CHAFSR_INVALID ((u64)-1L)
+
+/* This table is ordered in priority of errors and matches the
+ * AFAR overwrite policy as well.
+ */
+
+struct afsr_error_table {
+ unsigned long mask;
+ const char *name;
+};
+
+static const char CHAFSR_PERR_msg[] =
+ "System interface protocol error";
+static const char CHAFSR_IERR_msg[] =
+ "Internal processor error";
+static const char CHAFSR_ISAP_msg[] =
+ "System request parity error on incoming addresss";
+static const char CHAFSR_UCU_msg[] =
+ "Uncorrectable E-cache ECC error for ifetch/data";
+static const char CHAFSR_UCC_msg[] =
+ "SW Correctable E-cache ECC error for ifetch/data";
+static const char CHAFSR_UE_msg[] =
+ "Uncorrectable system bus data ECC error for read";
+static const char CHAFSR_EDU_msg[] =
+ "Uncorrectable E-cache ECC error for stmerge/blkld";
+static const char CHAFSR_EMU_msg[] =
+ "Uncorrectable system bus MTAG error";
+static const char CHAFSR_WDU_msg[] =
+ "Uncorrectable E-cache ECC error for writeback";
+static const char CHAFSR_CPU_msg[] =
+ "Uncorrectable ECC error for copyout";
+static const char CHAFSR_CE_msg[] =
+ "HW corrected system bus data ECC error for read";
+static const char CHAFSR_EDC_msg[] =
+ "HW corrected E-cache ECC error for stmerge/blkld";
+static const char CHAFSR_EMC_msg[] =
+ "HW corrected system bus MTAG ECC error";
+static const char CHAFSR_WDC_msg[] =
+ "HW corrected E-cache ECC error for writeback";
+static const char CHAFSR_CPC_msg[] =
+ "HW corrected ECC error for copyout";
+static const char CHAFSR_TO_msg[] =
+ "Unmapped error from system bus";
+static const char CHAFSR_BERR_msg[] =
+ "Bus error response from system bus";
+static const char CHAFSR_IVC_msg[] =
+ "HW corrected system bus data ECC error for ivec read";
+static const char CHAFSR_IVU_msg[] =
+ "Uncorrectable system bus data ECC error for ivec read";
+static struct afsr_error_table __cheetah_error_table[] = {
+ { CHAFSR_PERR, CHAFSR_PERR_msg },
+ { CHAFSR_IERR, CHAFSR_IERR_msg },
+ { CHAFSR_ISAP, CHAFSR_ISAP_msg },
+ { CHAFSR_UCU, CHAFSR_UCU_msg },
+ { CHAFSR_UCC, CHAFSR_UCC_msg },
+ { CHAFSR_UE, CHAFSR_UE_msg },
+ { CHAFSR_EDU, CHAFSR_EDU_msg },
+ { CHAFSR_EMU, CHAFSR_EMU_msg },
+ { CHAFSR_WDU, CHAFSR_WDU_msg },
+ { CHAFSR_CPU, CHAFSR_CPU_msg },
+ { CHAFSR_CE, CHAFSR_CE_msg },
+ { CHAFSR_EDC, CHAFSR_EDC_msg },
+ { CHAFSR_EMC, CHAFSR_EMC_msg },
+ { CHAFSR_WDC, CHAFSR_WDC_msg },
+ { CHAFSR_CPC, CHAFSR_CPC_msg },
+ { CHAFSR_TO, CHAFSR_TO_msg },
+ { CHAFSR_BERR, CHAFSR_BERR_msg },
+ /* These two do not update the AFAR. */
+ { CHAFSR_IVC, CHAFSR_IVC_msg },
+ { CHAFSR_IVU, CHAFSR_IVU_msg },
+ { 0, NULL },
+};
+static const char CHPAFSR_DTO_msg[] =
+ "System bus unmapped error for prefetch/storequeue-read";
+static const char CHPAFSR_DBERR_msg[] =
+ "System bus error for prefetch/storequeue-read";
+static const char CHPAFSR_THCE_msg[] =
+ "Hardware corrected E-cache Tag ECC error";
+static const char CHPAFSR_TSCE_msg[] =
+ "SW handled correctable E-cache Tag ECC error";
+static const char CHPAFSR_TUE_msg[] =
+ "Uncorrectable E-cache Tag ECC error";
+static const char CHPAFSR_DUE_msg[] =
+ "System bus uncorrectable data ECC error due to prefetch/store-fill";
+static struct afsr_error_table __cheetah_plus_error_table[] = {
+ { CHAFSR_PERR, CHAFSR_PERR_msg },
+ { CHAFSR_IERR, CHAFSR_IERR_msg },
+ { CHAFSR_ISAP, CHAFSR_ISAP_msg },
+ { CHAFSR_UCU, CHAFSR_UCU_msg },
+ { CHAFSR_UCC, CHAFSR_UCC_msg },
+ { CHAFSR_UE, CHAFSR_UE_msg },
+ { CHAFSR_EDU, CHAFSR_EDU_msg },
+ { CHAFSR_EMU, CHAFSR_EMU_msg },
+ { CHAFSR_WDU, CHAFSR_WDU_msg },
+ { CHAFSR_CPU, CHAFSR_CPU_msg },
+ { CHAFSR_CE, CHAFSR_CE_msg },
+ { CHAFSR_EDC, CHAFSR_EDC_msg },
+ { CHAFSR_EMC, CHAFSR_EMC_msg },
+ { CHAFSR_WDC, CHAFSR_WDC_msg },
+ { CHAFSR_CPC, CHAFSR_CPC_msg },
+ { CHAFSR_TO, CHAFSR_TO_msg },
+ { CHAFSR_BERR, CHAFSR_BERR_msg },
+ { CHPAFSR_DTO, CHPAFSR_DTO_msg },
+ { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
+ { CHPAFSR_THCE, CHPAFSR_THCE_msg },
+ { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
+ { CHPAFSR_TUE, CHPAFSR_TUE_msg },
+ { CHPAFSR_DUE, CHPAFSR_DUE_msg },
+ /* These two do not update the AFAR. */
+ { CHAFSR_IVC, CHAFSR_IVC_msg },
+ { CHAFSR_IVU, CHAFSR_IVU_msg },
+ { 0, NULL },
+};
+static const char JPAFSR_JETO_msg[] =
+ "System interface protocol error, hw timeout caused";
+static const char JPAFSR_SCE_msg[] =
+ "Parity error on system snoop results";
+static const char JPAFSR_JEIC_msg[] =
+ "System interface protocol error, illegal command detected";
+static const char JPAFSR_JEIT_msg[] =
+ "System interface protocol error, illegal ADTYPE detected";
+static const char JPAFSR_OM_msg[] =
+ "Out of range memory error has occurred";
+static const char JPAFSR_ETP_msg[] =
+ "Parity error on L2 cache tag SRAM";
+static const char JPAFSR_UMS_msg[] =
+ "Error due to unsupported store";
+static const char JPAFSR_RUE_msg[] =
+ "Uncorrectable ECC error from remote cache/memory";
+static const char JPAFSR_RCE_msg[] =
+ "Correctable ECC error from remote cache/memory";
+static const char JPAFSR_BP_msg[] =
+ "JBUS parity error on returned read data";
+static const char JPAFSR_WBP_msg[] =
+ "JBUS parity error on data for writeback or block store";
+static const char JPAFSR_FRC_msg[] =
+ "Foreign read to DRAM incurring correctable ECC error";
+static const char JPAFSR_FRU_msg[] =
+ "Foreign read to DRAM incurring uncorrectable ECC error";
+static struct afsr_error_table __jalapeno_error_table[] = {
+ { JPAFSR_JETO, JPAFSR_JETO_msg },
+ { JPAFSR_SCE, JPAFSR_SCE_msg },
+ { JPAFSR_JEIC, JPAFSR_JEIC_msg },
+ { JPAFSR_JEIT, JPAFSR_JEIT_msg },
+ { CHAFSR_PERR, CHAFSR_PERR_msg },
+ { CHAFSR_IERR, CHAFSR_IERR_msg },
+ { CHAFSR_ISAP, CHAFSR_ISAP_msg },
+ { CHAFSR_UCU, CHAFSR_UCU_msg },
+ { CHAFSR_UCC, CHAFSR_UCC_msg },
+ { CHAFSR_UE, CHAFSR_UE_msg },
+ { CHAFSR_EDU, CHAFSR_EDU_msg },
+ { JPAFSR_OM, JPAFSR_OM_msg },
+ { CHAFSR_WDU, CHAFSR_WDU_msg },
+ { CHAFSR_CPU, CHAFSR_CPU_msg },
+ { CHAFSR_CE, CHAFSR_CE_msg },
+ { CHAFSR_EDC, CHAFSR_EDC_msg },
+ { JPAFSR_ETP, JPAFSR_ETP_msg },
+ { CHAFSR_WDC, CHAFSR_WDC_msg },
+ { CHAFSR_CPC, CHAFSR_CPC_msg },
+ { CHAFSR_TO, CHAFSR_TO_msg },
+ { CHAFSR_BERR, CHAFSR_BERR_msg },
+ { JPAFSR_UMS, JPAFSR_UMS_msg },
+ { JPAFSR_RUE, JPAFSR_RUE_msg },
+ { JPAFSR_RCE, JPAFSR_RCE_msg },
+ { JPAFSR_BP, JPAFSR_BP_msg },
+ { JPAFSR_WBP, JPAFSR_WBP_msg },
+ { JPAFSR_FRC, JPAFSR_FRC_msg },
+ { JPAFSR_FRU, JPAFSR_FRU_msg },
+ /* These two do not update the AFAR. */
+ { CHAFSR_IVU, CHAFSR_IVU_msg },
+ { 0, NULL },
+};
+static struct afsr_error_table *cheetah_error_table;
+static unsigned long cheetah_afsr_errors;
+
+/* This is allocated at boot time based upon the largest hardware
+ * cpu ID in the system. We allocate two entries per cpu, one for
+ * TL==0 logging and one for TL >= 1 logging.
+ */
+struct cheetah_err_info *cheetah_error_log;
+
+static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
+{
+ struct cheetah_err_info *p;
+ int cpu = smp_processor_id();
+
+ if (!cheetah_error_log)
+ return NULL;
+
+ p = cheetah_error_log + (cpu * 2);
+ if ((afsr & CHAFSR_TL1) != 0UL)
+ p++;
+
+ return p;
+}
+
+extern unsigned int tl0_icpe[], tl1_icpe[];
+extern unsigned int tl0_dcpe[], tl1_dcpe[];
+extern unsigned int tl0_fecc[], tl1_fecc[];
+extern unsigned int tl0_cee[], tl1_cee[];
+extern unsigned int tl0_iae[], tl1_iae[];
+extern unsigned int tl0_dae[], tl1_dae[];
+extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
+extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
+extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
+extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
+extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
+
+void __init cheetah_ecache_flush_init(void)
+{
+ unsigned long largest_size, smallest_linesize, order, ver;
+ int node, i, instance;
+
+ /* Scan all cpu device tree nodes, note two values:
+ * 1) largest E-cache size
+ * 2) smallest E-cache line size
+ */
+ largest_size = 0UL;
+ smallest_linesize = ~0UL;
+
+ instance = 0;
+ while (!cpu_find_by_instance(instance, &node, NULL)) {
+ unsigned long val;
+
+ val = prom_getintdefault(node, "ecache-size",
+ (2 * 1024 * 1024));
+ if (val > largest_size)
+ largest_size = val;
+ val = prom_getintdefault(node, "ecache-line-size", 64);
+ if (val < smallest_linesize)
+ smallest_linesize = val;
+ instance++;
+ }
+
+ if (largest_size == 0UL || smallest_linesize == ~0UL) {
+ prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
+ "parameters.\n");
+ prom_halt();
+ }
+
+ ecache_flush_size = (2 * largest_size);
+ ecache_flush_linesize = smallest_linesize;
+
+ /* Discover a physically contiguous chunk of physical
+ * memory in 'sp_banks' of size ecache_flush_size calculated
+ * above. Store the physical base of this area at
+ * ecache_flush_physbase.
+ */
+ for (node = 0; ; node++) {
+ if (sp_banks[node].num_bytes == 0)
+ break;
+ if (sp_banks[node].num_bytes >= ecache_flush_size) {
+ ecache_flush_physbase = sp_banks[node].base_addr;
+ break;
+ }
+ }
+
+ /* Note: Zero would be a valid value of ecache_flush_physbase so
+ * don't use that as the success test. :-)
+ */
+ if (sp_banks[node].num_bytes == 0) {
+ prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
+ "contiguous physical memory.\n", ecache_flush_size);
+ prom_halt();
+ }
+
+ /* Now allocate error trap reporting scoreboard. */
+ node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
+ for (order = 0; order < MAX_ORDER; order++) {
+ if ((PAGE_SIZE << order) >= node)
+ break;
+ }
+ cheetah_error_log = (struct cheetah_err_info *)
+ __get_free_pages(GFP_KERNEL, order);
+ if (!cheetah_error_log) {
+ prom_printf("cheetah_ecache_flush_init: Failed to allocate "
+ "error logging scoreboard (%d bytes).\n", node);
+ prom_halt();
+ }
+ memset(cheetah_error_log, 0, PAGE_SIZE << order);
+
+ /* Mark all AFSRs as invalid so that the trap handler will
+ * log new new information there.
+ */
+ for (i = 0; i < 2 * NR_CPUS; i++)
+ cheetah_error_log[i].afsr = CHAFSR_INVALID;
+
+ __asm__ ("rdpr %%ver, %0" : "=r" (ver));
+ if ((ver >> 32) == 0x003e0016) {
+ cheetah_error_table = &__jalapeno_error_table[0];
+ cheetah_afsr_errors = JPAFSR_ERRORS;
+ } else if ((ver >> 32) == 0x003e0015) {
+ cheetah_error_table = &__cheetah_plus_error_table[0];
+ cheetah_afsr_errors = CHPAFSR_ERRORS;
+ } else {
+ cheetah_error_table = &__cheetah_error_table[0];
+ cheetah_afsr_errors = CHAFSR_ERRORS;
+ }
+
+ /* Now patch trap tables. */
+ memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
+ memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
+ memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
+ memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
+ memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
+ memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
+ memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
+ memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
+ if (tlb_type == cheetah_plus) {
+ memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
+ memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
+ memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
+ memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
+ }
+ flushi(PAGE_OFFSET);
+}
+
+static void cheetah_flush_ecache(void)
+{
+ unsigned long flush_base = ecache_flush_physbase;
+ unsigned long flush_linesize = ecache_flush_linesize;
+ unsigned long flush_size = ecache_flush_size;
+
+ __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
+ " bne,pt %%xcc, 1b\n\t"
+ " ldxa [%2 + %0] %3, %%g0\n\t"
+ : "=&r" (flush_size)
+ : "0" (flush_size), "r" (flush_base),
+ "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
+}
+
+static void cheetah_flush_ecache_line(unsigned long physaddr)
+{
+ unsigned long alias;
+
+ physaddr &= ~(8UL - 1UL);
+ physaddr = (ecache_flush_physbase +
+ (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
+ alias = physaddr + (ecache_flush_size >> 1UL);
+ __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
+ "ldxa [%1] %2, %%g0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (physaddr), "r" (alias),
+ "i" (ASI_PHYS_USE_EC));
+}
+
+/* Unfortunately, the diagnostic access to the I-cache tags we need to
+ * use to clear the thing interferes with I-cache coherency transactions.
+ *
+ * So we must only flush the I-cache when it is disabled.
+ */
+static void __cheetah_flush_icache(void)
+{
+ unsigned long i;
+
+ /* Clear the valid bits in all the tags. */
+ for (i = 0; i < (1 << 15); i += (1 << 5)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
+ }
+}
+
+static void cheetah_flush_icache(void)
+{
+ unsigned long dcu_save;
+
+ /* Save current DCU, disable I-cache. */
+ __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
+ "or %0, %2, %%g1\n\t"
+ "stxa %%g1, [%%g0] %1\n\t"
+ "membar #Sync"
+ : "=r" (dcu_save)
+ : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
+ : "g1");
+
+ __cheetah_flush_icache();
+
+ /* Restore DCU register */
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
+}
+
+static void cheetah_flush_dcache(void)
+{
+ unsigned long i;
+
+ for (i = 0; i < (1 << 16); i += (1 << 5)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (i), "i" (ASI_DCACHE_TAG));
+ }
+}
+
+/* In order to make the even parity correct we must do two things.
+ * First, we clear DC_data_parity and set DC_utag to an appropriate value.
+ * Next, we clear out all 32-bytes of data for that line. Data of
+ * all-zero + tag parity value of zero == correct parity.
+ */
+static void cheetah_plus_zap_dcache_parity(void)
+{
+ unsigned long i;
+
+ for (i = 0; i < (1 << 16); i += (1 << 5)) {
+ unsigned long tag = (i >> 14);
+ unsigned long j;
+
+ __asm__ __volatile__("membar #Sync\n\t"
+ "stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (tag), "r" (i),
+ "i" (ASI_DCACHE_UTAG));
+ for (j = i; j < i + (1 << 5); j += (1 << 3))
+ __asm__ __volatile__("membar #Sync\n\t"
+ "stxa %%g0, [%0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (j), "i" (ASI_DCACHE_DATA));
+ }
+}
+
+/* Conversion tables used to frob Cheetah AFSR syndrome values into
+ * something palatable to the memory controller driver get_unumber
+ * routine.
+ */
+#define MT0 137
+#define MT1 138
+#define MT2 139
+#define NONE 254
+#define MTC0 140
+#define MTC1 141
+#define MTC2 142
+#define MTC3 143
+#define C0 128
+#define C1 129
+#define C2 130
+#define C3 131
+#define C4 132
+#define C5 133
+#define C6 134
+#define C7 135
+#define C8 136
+#define M2 144
+#define M3 145
+#define M4 146
+#define M 147
+static unsigned char cheetah_ecc_syntab[] = {
+/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
+/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
+/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
+/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
+/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
+/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
+/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
+/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
+/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
+/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
+/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
+/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
+/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
+/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
+/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
+/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
+/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
+/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
+/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
+/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
+/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
+/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
+/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
+/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
+/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
+/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
+/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
+/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
+/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
+/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
+/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
+/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
+};
+static unsigned char cheetah_mtag_syntab[] = {
+ NONE, MTC0,
+ MTC1, NONE,
+ MTC2, NONE,
+ NONE, MT0,
+ MTC3, NONE,
+ NONE, MT1,
+ NONE, MT2,
+ NONE, NONE
+};
+
+/* Return the highest priority error conditon mentioned. */
+static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
+{
+ unsigned long tmp = 0;
+ int i;
+
+ for (i = 0; cheetah_error_table[i].mask; i++) {
+ if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
+ return tmp;
+ }
+ return tmp;
+}
+
+static const char *cheetah_get_string(unsigned long bit)
+{
+ int i;
+
+ for (i = 0; cheetah_error_table[i].mask; i++) {
+ if ((bit & cheetah_error_table[i].mask) != 0UL)
+ return cheetah_error_table[i].name;
+ }
+ return "???";
+}
+
+extern int chmc_getunumber(int, unsigned long, char *, int);
+
+static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
+ unsigned long afsr, unsigned long afar, int recoverable)
+{
+ unsigned long hipri;
+ char unum[256];
+
+ printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ afsr, afar,
+ (afsr & CHAFSR_TL1) ? 1 : 0);
+ printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ regs->tpc, regs->tnpc, regs->tstate);
+ printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+ (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
+ (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
+ (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
+ hipri = cheetah_get_hipri(afsr);
+ printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ hipri, cheetah_get_string(hipri));
+
+ /* Try to get unumber if relevant. */
+#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
+ CHAFSR_CPC | CHAFSR_CPU | \
+ CHAFSR_UE | CHAFSR_CE | \
+ CHAFSR_EDC | CHAFSR_EDU | \
+ CHAFSR_UCC | CHAFSR_UCU | \
+ CHAFSR_WDU | CHAFSR_WDC)
+#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
+ if (afsr & ESYND_ERRORS) {
+ int syndrome;
+ int ret;
+
+ syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
+ syndrome = cheetah_ecc_syntab[syndrome];
+ ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
+ if (ret != -1)
+ printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT),
+ smp_processor_id(), unum);
+ } else if (afsr & MSYND_ERRORS) {
+ int syndrome;
+ int ret;
+
+ syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
+ syndrome = cheetah_mtag_syntab[syndrome];
+ ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
+ if (ret != -1)
+ printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT),
+ smp_processor_id(), unum);
+ }
+
+ /* Now dump the cache snapshots. */
+ printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (int) info->dcache_index,
+ info->dcache_tag,
+ info->dcache_utag,
+ info->dcache_stag);
+ printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ info->dcache_data[0],
+ info->dcache_data[1],
+ info->dcache_data[2],
+ info->dcache_data[3]);
+ printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
+ "u[%016lx] l[%016lx]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (int) info->icache_index,
+ info->icache_tag,
+ info->icache_utag,
+ info->icache_stag,
+ info->icache_upper,
+ info->icache_lower);
+ printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ info->icache_data[0],
+ info->icache_data[1],
+ info->icache_data[2],
+ info->icache_data[3]);
+ printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ info->icache_data[4],
+ info->icache_data[5],
+ info->icache_data[6],
+ info->icache_data[7]);
+ printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (int) info->ecache_index, info->ecache_tag);
+ printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ info->ecache_data[0],
+ info->ecache_data[1],
+ info->ecache_data[2],
+ info->ecache_data[3]);
+
+ afsr = (afsr & ~hipri) & cheetah_afsr_errors;
+ while (afsr != 0UL) {
+ unsigned long bit = cheetah_get_hipri(afsr);
+
+ printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT),
+ bit, cheetah_get_string(bit));
+
+ afsr &= ~bit;
+ }
+
+ if (!recoverable)
+ printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
+}
+
+static int cheetah_recheck_errors(struct cheetah_err_info *logp)
+{
+ unsigned long afsr, afar;
+ int ret = 0;
+
+ __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
+ : "=r" (afsr)
+ : "i" (ASI_AFSR));
+ if ((afsr & cheetah_afsr_errors) != 0) {
+ if (logp != NULL) {
+ __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
+ : "=r" (afar)
+ : "i" (ASI_AFAR));
+ logp->afsr = afsr;
+ logp->afar = afar;
+ }
+ ret = 1;
+ }
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync\n\t"
+ : : "r" (afsr), "i" (ASI_AFSR));
+
+ return ret;
+}
+
+void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
+{
+ struct cheetah_err_info local_snapshot, *p;
+ int recoverable;
+
+ /* Flush E-cache */
+ cheetah_flush_ecache();
+
+ p = cheetah_get_error_log(afsr);
+ if (!p) {
+ prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
+ afsr, afar);
+ prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+ smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
+ prom_halt();
+ }
+
+ /* Grab snapshot of logged error. */
+ memcpy(&local_snapshot, p, sizeof(local_snapshot));
+
+ /* If the current trap snapshot does not match what the
+ * trap handler passed along into our args, big trouble.
+ * In such a case, mark the local copy as invalid.
+ *
+ * Else, it matches and we mark the afsr in the non-local
+ * copy as invalid so we may log new error traps there.
+ */
+ if (p->afsr != afsr || p->afar != afar)
+ local_snapshot.afsr = CHAFSR_INVALID;
+ else
+ p->afsr = CHAFSR_INVALID;
+
+ cheetah_flush_icache();
+ cheetah_flush_dcache();
+
+ /* Re-enable I-cache/D-cache */
+ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+ "or %%g1, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "i" (ASI_DCU_CONTROL_REG),
+ "i" (DCU_DC | DCU_IC)
+ : "g1");
+
+ /* Re-enable error reporting */
+ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+ "or %%g1, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "i" (ASI_ESTATE_ERROR_EN),
+ "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
+ : "g1");
+
+ /* Decide if we can continue after handling this trap and
+ * logging the error.
+ */
+ recoverable = 1;
+ if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
+ recoverable = 0;
+
+ /* Re-check AFSR/AFAR. What we are looking for here is whether a new
+ * error was logged while we had error reporting traps disabled.
+ */
+ if (cheetah_recheck_errors(&local_snapshot)) {
+ unsigned long new_afsr = local_snapshot.afsr;
+
+ /* If we got a new asynchronous error, die... */
+ if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
+ CHAFSR_WDU | CHAFSR_CPU |
+ CHAFSR_IVU | CHAFSR_UE |
+ CHAFSR_BERR | CHAFSR_TO))
+ recoverable = 0;
+ }
+
+ /* Log errors. */
+ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
+
+ if (!recoverable)
+ panic("Irrecoverable Fast-ECC error trap.\n");
+
+ /* Flush E-cache to kick the error trap handlers out. */
+ cheetah_flush_ecache();
+}
+
+/* Try to fix a correctable error by pushing the line out from
+ * the E-cache. Recheck error reporting registers to see if the
+ * problem is intermittent.
+ */
+static int cheetah_fix_ce(unsigned long physaddr)
+{
+ unsigned long orig_estate;
+ unsigned long alias1, alias2;
+ int ret;
+
+ /* Make sure correctable error traps are disabled. */
+ __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
+ "andn %0, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %2\n\t"
+ "membar #Sync"
+ : "=&r" (orig_estate)
+ : "i" (ESTATE_ERROR_CEEN),
+ "i" (ASI_ESTATE_ERROR_EN)
+ : "g1");
+
+ /* We calculate alias addresses that will force the
+ * cache line in question out of the E-cache. Then
+ * we bring it back in with an atomic instruction so
+ * that we get it in some modified/exclusive state,
+ * then we displace it again to try and get proper ECC
+ * pushed back into the system.
+ */
+ physaddr &= ~(8UL - 1UL);
+ alias1 = (ecache_flush_physbase +
+ (physaddr & ((ecache_flush_size >> 1) - 1)));
+ alias2 = alias1 + (ecache_flush_size >> 1);
+ __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
+ "ldxa [%1] %3, %%g0\n\t"
+ "casxa [%2] %3, %%g0, %%g0\n\t"
+ "membar #StoreLoad | #StoreStore\n\t"
+ "ldxa [%0] %3, %%g0\n\t"
+ "ldxa [%1] %3, %%g0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (alias1), "r" (alias2),
+ "r" (physaddr), "i" (ASI_PHYS_USE_EC));
+
+ /* Did that trigger another error? */
+ if (cheetah_recheck_errors(NULL)) {
+ /* Try one more time. */
+ __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
+ "membar #Sync"
+ : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
+ if (cheetah_recheck_errors(NULL))
+ ret = 2;
+ else
+ ret = 1;
+ } else {
+ /* No new error, intermittent problem. */
+ ret = 0;
+ }
+
+ /* Restore error enables. */
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
+
+ return ret;
+}
+
+/* Return non-zero if PADDR is a valid physical memory address. */
+static int cheetah_check_main_memory(unsigned long paddr)
+{
+ int i;
+
+ for (i = 0; ; i++) {
+ if (sp_banks[i].num_bytes == 0)
+ break;
+ if (paddr >= sp_banks[i].base_addr &&
+ paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
+ return 1;
+ }
+ return 0;
+}
+
+void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
+{
+ struct cheetah_err_info local_snapshot, *p;
+ int recoverable, is_memory;
+
+ p = cheetah_get_error_log(afsr);
+ if (!p) {
+ prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
+ afsr, afar);
+ prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+ smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
+ prom_halt();
+ }
+
+ /* Grab snapshot of logged error. */
+ memcpy(&local_snapshot, p, sizeof(local_snapshot));
+
+ /* If the current trap snapshot does not match what the
+ * trap handler passed along into our args, big trouble.
+ * In such a case, mark the local copy as invalid.
+ *
+ * Else, it matches and we mark the afsr in the non-local
+ * copy as invalid so we may log new error traps there.
+ */
+ if (p->afsr != afsr || p->afar != afar)
+ local_snapshot.afsr = CHAFSR_INVALID;
+ else
+ p->afsr = CHAFSR_INVALID;
+
+ is_memory = cheetah_check_main_memory(afar);
+
+ if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
+ /* XXX Might want to log the results of this operation
+ * XXX somewhere... -DaveM
+ */
+ cheetah_fix_ce(afar);
+ }
+
+ {
+ int flush_all, flush_line;
+
+ flush_all = flush_line = 0;
+ if ((afsr & CHAFSR_EDC) != 0UL) {
+ if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
+ flush_line = 1;
+ else
+ flush_all = 1;
+ } else if ((afsr & CHAFSR_CPC) != 0UL) {
+ if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
+ flush_line = 1;
+ else
+ flush_all = 1;
+ }
+
+ /* Trap handler only disabled I-cache, flush it. */
+ cheetah_flush_icache();
+
+ /* Re-enable I-cache */
+ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+ "or %%g1, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "i" (ASI_DCU_CONTROL_REG),
+ "i" (DCU_IC)
+ : "g1");
+
+ if (flush_all)
+ cheetah_flush_ecache();
+ else if (flush_line)
+ cheetah_flush_ecache_line(afar);
+ }
+
+ /* Re-enable error reporting */
+ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+ "or %%g1, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "i" (ASI_ESTATE_ERROR_EN),
+ "i" (ESTATE_ERROR_CEEN)
+ : "g1");
+
+ /* Decide if we can continue after handling this trap and
+ * logging the error.
+ */
+ recoverable = 1;
+ if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
+ recoverable = 0;
+
+ /* Re-check AFSR/AFAR */
+ (void) cheetah_recheck_errors(&local_snapshot);
+
+ /* Log errors. */
+ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
+
+ if (!recoverable)
+ panic("Irrecoverable Correctable-ECC error trap.\n");
+}
+
+void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
+{
+ struct cheetah_err_info local_snapshot, *p;
+ int recoverable, is_memory;
+
+#ifdef CONFIG_PCI
+ /* Check for the special PCI poke sequence. */
+ if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
+ cheetah_flush_icache();
+ cheetah_flush_dcache();
+
+ /* Re-enable I-cache/D-cache */
+ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+ "or %%g1, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "i" (ASI_DCU_CONTROL_REG),
+ "i" (DCU_DC | DCU_IC)
+ : "g1");
+
+ /* Re-enable error reporting */
+ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+ "or %%g1, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "i" (ASI_ESTATE_ERROR_EN),
+ "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
+ : "g1");
+
+ (void) cheetah_recheck_errors(NULL);
+
+ pci_poke_faulted = 1;
+ regs->tpc += 4;
+ regs->tnpc = regs->tpc + 4;
+ return;
+ }
+#endif
+
+ p = cheetah_get_error_log(afsr);
+ if (!p) {
+ prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
+ afsr, afar);
+ prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
+ smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
+ prom_halt();
+ }
+
+ /* Grab snapshot of logged error. */
+ memcpy(&local_snapshot, p, sizeof(local_snapshot));
+
+ /* If the current trap snapshot does not match what the
+ * trap handler passed along into our args, big trouble.
+ * In such a case, mark the local copy as invalid.
+ *
+ * Else, it matches and we mark the afsr in the non-local
+ * copy as invalid so we may log new error traps there.
+ */
+ if (p->afsr != afsr || p->afar != afar)
+ local_snapshot.afsr = CHAFSR_INVALID;
+ else
+ p->afsr = CHAFSR_INVALID;
+
+ is_memory = cheetah_check_main_memory(afar);
+
+ {
+ int flush_all, flush_line;
+
+ flush_all = flush_line = 0;
+ if ((afsr & CHAFSR_EDU) != 0UL) {
+ if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
+ flush_line = 1;
+ else
+ flush_all = 1;
+ } else if ((afsr & CHAFSR_BERR) != 0UL) {
+ if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
+ flush_line = 1;
+ else
+ flush_all = 1;
+ }
+
+ cheetah_flush_icache();
+ cheetah_flush_dcache();
+
+ /* Re-enable I/D caches */
+ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+ "or %%g1, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "i" (ASI_DCU_CONTROL_REG),
+ "i" (DCU_IC | DCU_DC)
+ : "g1");
+
+ if (flush_all)
+ cheetah_flush_ecache();
+ else if (flush_line)
+ cheetah_flush_ecache_line(afar);
+ }
+
+ /* Re-enable error reporting */
+ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+ "or %%g1, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "i" (ASI_ESTATE_ERROR_EN),
+ "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
+ : "g1");
+
+ /* Decide if we can continue after handling this trap and
+ * logging the error.
+ */
+ recoverable = 1;
+ if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
+ recoverable = 0;
+
+ /* Re-check AFSR/AFAR. What we are looking for here is whether a new
+ * error was logged while we had error reporting traps disabled.
+ */
+ if (cheetah_recheck_errors(&local_snapshot)) {
+ unsigned long new_afsr = local_snapshot.afsr;
+
+ /* If we got a new asynchronous error, die... */
+ if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
+ CHAFSR_WDU | CHAFSR_CPU |
+ CHAFSR_IVU | CHAFSR_UE |
+ CHAFSR_BERR | CHAFSR_TO))
+ recoverable = 0;
+ }
+
+ /* Log errors. */
+ cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
+
+ /* "Recoverable" here means we try to yank the page from ever
+ * being newly used again. This depends upon a few things:
+ * 1) Must be main memory, and AFAR must be valid.
+ * 2) If we trapped from user, OK.
+ * 3) Else, if we trapped from kernel we must find exception
+ * table entry (ie. we have to have been accessing user
+ * space).
+ *
+ * If AFAR is not in main memory, or we trapped from kernel
+ * and cannot find an exception table entry, it is unacceptable
+ * to try and continue.
+ */
+ if (recoverable && is_memory) {
+ if ((regs->tstate & TSTATE_PRIV) == 0UL) {
+ /* OK, usermode access. */
+ recoverable = 1;
+ } else {
+ unsigned long g2 = regs->u_regs[UREG_G2];
+ unsigned long fixup = search_extables_range(regs->tpc, &g2);
+
+ if (fixup != 0UL) {
+ /* OK, kernel access to userspace. */
+ recoverable = 1;
+
+ } else {
+ /* BAD, privileged state is corrupted. */
+ recoverable = 0;
+ }
+
+ if (recoverable) {
+ if (pfn_valid(afar >> PAGE_SHIFT))
+ get_page(pfn_to_page(afar >> PAGE_SHIFT));
+ else
+ recoverable = 0;
+
+ /* Only perform fixup if we still have a
+ * recoverable condition.
+ */
+ if (recoverable) {
+ regs->tpc = fixup;
+ regs->tnpc = regs->tpc + 4;
+ regs->u_regs[UREG_G2] = g2;
+ }
+ }
+ }
+ } else {
+ recoverable = 0;
+ }
+
+ if (!recoverable)
+ panic("Irrecoverable deferred error trap.\n");
+}
+
+/* Handle a D/I cache parity error trap. TYPE is encoded as:
+ *
+ * Bit0: 0=dcache,1=icache
+ * Bit1: 0=recoverable,1=unrecoverable
+ *
+ * The hardware has disabled both the I-cache and D-cache in
+ * the %dcr register.
+ */
+void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+{
+ if (type & 0x1)
+ __cheetah_flush_icache();
+ else
+ cheetah_plus_zap_dcache_parity();
+ cheetah_flush_dcache();
+
+ /* Re-enable I-cache/D-cache */
+ __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
+ "or %%g1, %1, %%g1\n\t"
+ "stxa %%g1, [%%g0] %0\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "i" (ASI_DCU_CONTROL_REG),
+ "i" (DCU_DC | DCU_IC)
+ : "g1");
+
+ if (type & 0x2) {
+ printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+ panic("Irrecoverable Cheetah+ parity error.");
+ }
+
+ printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+}
+
+void do_fpe_common(struct pt_regs *regs)
+{
+ if (regs->tstate & TSTATE_PRIV) {
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ } else {
+ unsigned long fsr = current_thread_info()->xfsr[0];
+ siginfo_t info;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_addr = (void __user *)regs->tpc;
+ info.si_trapno = 0;
+ info.si_code = __SI_FAULT;
+ if ((fsr & 0x1c000) == (1 << 14)) {
+ if (fsr & 0x10)
+ info.si_code = FPE_FLTINV;
+ else if (fsr & 0x08)
+ info.si_code = FPE_FLTOVF;
+ else if (fsr & 0x04)
+ info.si_code = FPE_FLTUND;
+ else if (fsr & 0x02)
+ info.si_code = FPE_FLTDIV;
+ else if (fsr & 0x01)
+ info.si_code = FPE_FLTRES;
+ }
+ force_sig_info(SIGFPE, &info, current);
+ }
+}
+
+void do_fpieee(struct pt_regs *regs)
+{
+ if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
+ 0, 0x24, SIGFPE) == NOTIFY_STOP)
+ return;
+
+ do_fpe_common(regs);
+}
+
+extern int do_mathemu(struct pt_regs *, struct fpustate *);
+
+void do_fpother(struct pt_regs *regs)
+{
+ struct fpustate *f = FPUSTATE;
+ int ret = 0;
+
+ if (notify_die(DIE_TRAP, "fpu exception other", regs,
+ 0, 0x25, SIGFPE) == NOTIFY_STOP)
+ return;
+
+ switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
+ case (2 << 14): /* unfinished_FPop */
+ case (3 << 14): /* unimplemented_FPop */
+ ret = do_mathemu(regs, f);
+ break;
+ }
+ if (ret)
+ return;
+ do_fpe_common(regs);
+}
+
+void do_tof(struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
+ 0, 0x26, SIGEMT) == NOTIFY_STOP)
+ return;
+
+ if (regs->tstate & TSTATE_PRIV)
+ die_if_kernel("Penguin overflow trap from kernel mode", regs);
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ info.si_signo = SIGEMT;
+ info.si_errno = 0;
+ info.si_code = EMT_TAGOVF;
+ info.si_addr = (void __user *)regs->tpc;
+ info.si_trapno = 0;
+ force_sig_info(SIGEMT, &info, current);
+}
+
+void do_div0(struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ if (notify_die(DIE_TRAP, "integer division by zero", regs,
+ 0, 0x28, SIGFPE) == NOTIFY_STOP)
+ return;
+
+ if (regs->tstate & TSTATE_PRIV)
+ die_if_kernel("TL0: Kernel divide by zero.", regs);
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = FPE_INTDIV;
+ info.si_addr = (void __user *)regs->tpc;
+ info.si_trapno = 0;
+ force_sig_info(SIGFPE, &info, current);
+}
+
+void instruction_dump (unsigned int *pc)
+{
+ int i;
+
+ if ((((unsigned long) pc) & 3))
+ return;
+
+ printk("Instruction DUMP:");
+ for (i = -3; i < 6; i++)
+ printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
+ printk("\n");
+}
+
+static void user_instruction_dump (unsigned int __user *pc)
+{
+ int i;
+ unsigned int buf[9];
+
+ if ((((unsigned long) pc) & 3))
+ return;
+
+ if (copy_from_user(buf, pc - 3, sizeof(buf)))
+ return;
+
+ printk("Instruction DUMP:");
+ for (i = 0; i < 9; i++)
+ printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
+ printk("\n");
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+{
+ unsigned long pc, fp, thread_base, ksp;
+ struct thread_info *tp = tsk->thread_info;
+ struct reg_window *rw;
+ int count = 0;
+
+ ksp = (unsigned long) _ksp;
+
+ if (tp == current_thread_info())
+ flushw_all();
+
+ fp = ksp + STACK_BIAS;
+ thread_base = (unsigned long) tp;
+
+ printk("Call Trace:");
+#ifdef CONFIG_KALLSYMS
+ printk("\n");
+#endif
+ do {
+ /* Bogus frame pointer? */
+ if (fp < (thread_base + sizeof(struct thread_info)) ||
+ fp >= (thread_base + THREAD_SIZE))
+ break;
+ rw = (struct reg_window *)fp;
+ pc = rw->ins[7];
+ printk(" [%016lx] ", pc);
+ print_symbol("%s\n", pc);
+ fp = rw->ins[6] + STACK_BIAS;
+ } while (++count < 16);
+#ifndef CONFIG_KALLSYMS
+ printk("\n");
+#endif
+}
+
+void dump_stack(void)
+{
+ unsigned long *ksp;
+
+ __asm__ __volatile__("mov %%fp, %0"
+ : "=r" (ksp));
+ show_stack(current, ksp);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+static inline int is_kernel_stack(struct task_struct *task,
+ struct reg_window *rw)
+{
+ unsigned long rw_addr = (unsigned long) rw;
+ unsigned long thread_base, thread_end;
+
+ if (rw_addr < PAGE_OFFSET) {
+ if (task != &init_task)
+ return 0;
+ }
+
+ thread_base = (unsigned long) task->thread_info;
+ thread_end = thread_base + sizeof(union thread_union);
+ if (rw_addr >= thread_base &&
+ rw_addr < thread_end &&
+ !(rw_addr & 0x7UL))
+ return 1;
+
+ return 0;
+}
+
+static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+{
+ unsigned long fp = rw->ins[6];
+
+ if (!fp)
+ return NULL;
+
+ return (struct reg_window *) (fp + STACK_BIAS);
+}
+
+void die_if_kernel(char *str, struct pt_regs *regs)
+{
+ static int die_counter;
+ extern void __show_regs(struct pt_regs * regs);
+ extern void smp_report_regs(void);
+ int count = 0;
+
+ /* Amuse the user. */
+ printk(
+" \\|/ ____ \\|/\n"
+" \"@'/ .. \\`@\"\n"
+" /_| \\__/ |_\\\n"
+" \\__U_/\n");
+
+ printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
+ notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
+ __asm__ __volatile__("flushw");
+ __show_regs(regs);
+ if (regs->tstate & TSTATE_PRIV) {
+ struct reg_window *rw = (struct reg_window *)
+ (regs->u_regs[UREG_FP] + STACK_BIAS);
+
+ /* Stop the back trace when we hit userland or we
+ * find some badly aligned kernel stack.
+ */
+ while (rw &&
+ count++ < 30&&
+ is_kernel_stack(current, rw)) {
+ printk("Caller[%016lx]", rw->ins[7]);
+ print_symbol(": %s", rw->ins[7]);
+ printk("\n");
+
+ rw = kernel_stack_up(rw);
+ }
+ instruction_dump ((unsigned int *) regs->tpc);
+ } else {
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ user_instruction_dump ((unsigned int __user *) regs->tpc);
+ }
+#ifdef CONFIG_SMP
+ smp_report_regs();
+#endif
+
+ if (regs->tstate & TSTATE_PRIV)
+ do_exit(SIGKILL);
+ do_exit(SIGSEGV);
+}
+
+extern int handle_popc(u32 insn, struct pt_regs *regs);
+extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+
+void do_illegal_instruction(struct pt_regs *regs)
+{
+ unsigned long pc = regs->tpc;
+ unsigned long tstate = regs->tstate;
+ u32 insn;
+ siginfo_t info;
+
+ if (notify_die(DIE_TRAP, "illegal instruction", regs,
+ 0, 0x10, SIGILL) == NOTIFY_STOP)
+ return;
+
+ if (tstate & TSTATE_PRIV)
+ die_if_kernel("Kernel illegal instruction", regs);
+ if (test_thread_flag(TIF_32BIT))
+ pc = (u32)pc;
+ if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
+ if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
+ if (handle_popc(insn, regs))
+ return;
+ } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
+ if (handle_ldf_stq(insn, regs))
+ return;
+ }
+ }
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = (void __user *)pc;
+ info.si_trapno = 0;
+ force_sig_info(SIGILL, &info, current);
+}
+
+void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
+{
+ siginfo_t info;
+
+ if (notify_die(DIE_TRAP, "memory address unaligned", regs,
+ 0, 0x34, SIGSEGV) == NOTIFY_STOP)
+ return;
+
+ if (regs->tstate & TSTATE_PRIV) {
+ extern void kernel_unaligned_trap(struct pt_regs *regs,
+ unsigned int insn,
+ unsigned long sfar,
+ unsigned long sfsr);
+
+ kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
+ sfar, sfsr);
+ return;
+ }
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *)sfar;
+ info.si_trapno = 0;
+ force_sig_info(SIGBUS, &info, current);
+}
+
+void do_privop(struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ if (notify_die(DIE_TRAP, "privileged operation", regs,
+ 0, 0x11, SIGILL) == NOTIFY_STOP)
+ return;
+
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_PRVOPC;
+ info.si_addr = (void __user *)regs->tpc;
+ info.si_trapno = 0;
+ force_sig_info(SIGILL, &info, current);
+}
+
+void do_privact(struct pt_regs *regs)
+{
+ do_privop(regs);
+}
+
+/* Trap level 1 stuff or other traps we should never see... */
+void do_cee(struct pt_regs *regs)
+{
+ die_if_kernel("TL0: Cache Error Exception", regs);
+}
+
+void do_cee_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: Cache Error Exception", regs);
+}
+
+void do_dae_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: Data Access Exception", regs);
+}
+
+void do_iae_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: Instruction Access Exception", regs);
+}
+
+void do_div0_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: DIV0 Exception", regs);
+}
+
+void do_fpdis_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: FPU Disabled", regs);
+}
+
+void do_fpieee_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: FPU IEEE Exception", regs);
+}
+
+void do_fpother_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: FPU Other Exception", regs);
+}
+
+void do_ill_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: Illegal Instruction Exception", regs);
+}
+
+void do_irq_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: IRQ Exception", regs);
+}
+
+void do_lddfmna_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: LDDF Exception", regs);
+}
+
+void do_stdfmna_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: STDF Exception", regs);
+}
+
+void do_paw(struct pt_regs *regs)
+{
+ die_if_kernel("TL0: Phys Watchpoint Exception", regs);
+}
+
+void do_paw_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: Phys Watchpoint Exception", regs);
+}
+
+void do_vaw(struct pt_regs *regs)
+{
+ die_if_kernel("TL0: Virt Watchpoint Exception", regs);
+}
+
+void do_vaw_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: Virt Watchpoint Exception", regs);
+}
+
+void do_tof_tl1(struct pt_regs *regs)
+{
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+ die_if_kernel("TL1: Tag Overflow Exception", regs);
+}
+
+void do_getpsr(struct pt_regs *regs)
+{
+ regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+}
+
+extern void thread_info_offsets_are_bolixed_dave(void);
+
+/* Only invoked on boot processor. */
+void __init trap_init(void)
+{
+ /* Compile time sanity check. */
+ if (TI_TASK != offsetof(struct thread_info, task) ||
+ TI_FLAGS != offsetof(struct thread_info, flags) ||
+ TI_CPU != offsetof(struct thread_info, cpu) ||
+ TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
+ TI_KSP != offsetof(struct thread_info, ksp) ||
+ TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
+ TI_KREGS != offsetof(struct thread_info, kregs) ||
+ TI_UTRAPS != offsetof(struct thread_info, utraps) ||
+ TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
+ TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
+ TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
+ TI_GSR != offsetof(struct thread_info, gsr) ||
+ TI_XFSR != offsetof(struct thread_info, xfsr) ||
+ TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
+ TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
+ TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
+ TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
+ TI_PCR != offsetof(struct thread_info, pcr_reg) ||
+ TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
+ TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
+ TI_FPREGS != offsetof(struct thread_info, fpregs) ||
+ (TI_FPREGS & (64 - 1)))
+ thread_info_offsets_are_bolixed_dave();
+
+ /* Attach to the address space of init_task. On SMP we
+ * do this in smp.c:smp_callin for other cpus.
+ */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+}
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
new file mode 100644
index 00000000000..491bb3681f9
--- /dev/null
+++ b/arch/sparc64/kernel/ttable.S
@@ -0,0 +1,280 @@
+/* $Id: ttable.S,v 1.38 2002/02/09 19:49:30 davem Exp $
+ * ttable.S: Sparc V9 Trap Table(s) with SpitFire/Cheetah extensions.
+ *
+ * Copyright (C) 1996, 2001 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+
+ .globl sparc64_ttable_tl0, sparc64_ttable_tl1
+ .globl tl0_icpe, tl1_icpe
+ .globl tl0_dcpe, tl1_dcpe
+ .globl tl0_fecc, tl1_fecc
+ .globl tl0_cee, tl1_cee
+ .globl tl0_iae, tl1_iae
+ .globl tl0_dae, tl1_dae
+
+sparc64_ttable_tl0:
+tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
+tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
+tl0_iax: membar #Sync
+ TRAP_NOSAVE_7INSNS(__do_instruction_access_exception)
+tl0_resv009: BTRAP(0x9)
+tl0_iae: TRAP(do_iae)
+tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
+tl0_ill: membar #Sync
+ TRAP_7INSNS(do_illegal_instruction)
+tl0_privop: TRAP(do_privop)
+tl0_resv012: BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17)
+tl0_resv018: BTRAP(0x18) BTRAP(0x19) BTRAP(0x1a) BTRAP(0x1b) BTRAP(0x1c) BTRAP(0x1d)
+tl0_resv01e: BTRAP(0x1e) BTRAP(0x1f)
+tl0_fpdis: TRAP_NOSAVE(do_fpdis)
+tl0_fpieee: TRAP_SAVEFPU(do_fpieee)
+tl0_fpother: TRAP_NOSAVE(do_fpother_check_fitos)
+tl0_tof: TRAP(do_tof)
+tl0_cwin: CLEAN_WINDOW
+tl0_div0: TRAP(do_div0)
+tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
+tl0_resv02f: BTRAP(0x2f)
+tl0_dax: TRAP_NOSAVE(__do_data_access_exception)
+tl0_resv031: BTRAP(0x31)
+tl0_dae: TRAP(do_dae)
+tl0_resv033: BTRAP(0x33)
+tl0_mna: TRAP_NOSAVE(do_mna)
+tl0_lddfmna: TRAP_NOSAVE(do_lddfmna)
+tl0_stdfmna: TRAP_NOSAVE(do_stdfmna)
+tl0_privact: TRAP_NOSAVE(__do_privact)
+tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d)
+tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
+#ifdef CONFIG_SMP
+tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
+tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
+tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
+#else
+tl0_irq1: BTRAP(0x41)
+tl0_irq2: BTRAP(0x42)
+tl0_irq3: BTRAP(0x43)
+#endif
+tl0_irq4: TRAP_IRQ(handler_irq, 4)
+tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6)
+tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8)
+tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10)
+tl0_irq11: TRAP_IRQ(handler_irq, 11) TRAP_IRQ(handler_irq, 12)
+tl0_irq13: TRAP_IRQ(handler_irq, 13)
+#ifndef CONFIG_SMP
+tl0_irq14: TRAP_IRQ(handler_irq, 14)
+#else
+tl0_irq14: TICK_SMP_IRQ
+#endif
+tl0_irq15: TRAP_IRQ(handler_irq, 15)
+tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
+tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
+tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
+tl0_ivec: TRAP_IVEC
+tl0_paw: TRAP(do_paw)
+tl0_vaw: TRAP(do_vaw)
+tl0_cee: TRAP_NOSAVE(cee_trap)
+tl0_iamiss:
+#include "itlb_base.S"
+tl0_damiss:
+#include "dtlb_base.S"
+tl0_daprot:
+#include "dtlb_prot.S"
+tl0_fecc: BTRAP(0x70) /* Fast-ECC on Cheetah */
+tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */
+tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */
+tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
+tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
+tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f)
+tl0_s0n: SPILL_0_NORMAL
+tl0_s1n: SPILL_1_NORMAL
+tl0_s2n: SPILL_2_NORMAL
+tl0_s3n: SPILL_3_NORMAL
+tl0_s4n: SPILL_4_NORMAL
+tl0_s5n: SPILL_5_NORMAL
+tl0_s6n: SPILL_6_NORMAL
+tl0_s7n: SPILL_7_NORMAL
+tl0_s0o: SPILL_0_OTHER
+tl0_s1o: SPILL_1_OTHER
+tl0_s2o: SPILL_2_OTHER
+tl0_s3o: SPILL_3_OTHER
+tl0_s4o: SPILL_4_OTHER
+tl0_s5o: SPILL_5_OTHER
+tl0_s6o: SPILL_6_OTHER
+tl0_s7o: SPILL_7_OTHER
+tl0_f0n: FILL_0_NORMAL
+tl0_f1n: FILL_1_NORMAL
+tl0_f2n: FILL_2_NORMAL
+tl0_f3n: FILL_3_NORMAL
+tl0_f4n: FILL_4_NORMAL
+tl0_f5n: FILL_5_NORMAL
+tl0_f6n: FILL_6_NORMAL
+tl0_f7n: FILL_7_NORMAL
+tl0_f0o: FILL_0_OTHER
+tl0_f1o: FILL_1_OTHER
+tl0_f2o: FILL_2_OTHER
+tl0_f3o: FILL_3_OTHER
+tl0_f4o: FILL_4_OTHER
+tl0_f5o: FILL_5_OTHER
+tl0_f6o: FILL_6_OTHER
+tl0_f7o: FILL_7_OTHER
+tl0_sunos: SUNOS_SYSCALL_TRAP
+tl0_bkpt: BREAKPOINT_TRAP
+tl0_divz: TRAP(do_div0)
+tl0_flushw: FLUSH_WINDOW_TRAP
+tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107)
+ .globl tl0_solaris
+tl0_solaris: SOLARIS_SYSCALL_TRAP
+tl0_netbsd: NETBSD_SYSCALL_TRAP
+tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e)
+tl0_resv10f: BTRAP(0x10f)
+tl0_linux32: LINUX_32BIT_SYSCALL_TRAP
+tl0_oldlinux64: LINUX_64BIT_SYSCALL_TRAP
+tl0_resv112: TRAP_UTRAP(UT_TRAP_INSTRUCTION_18,0x112) TRAP_UTRAP(UT_TRAP_INSTRUCTION_19,0x113)
+tl0_resv114: TRAP_UTRAP(UT_TRAP_INSTRUCTION_20,0x114) TRAP_UTRAP(UT_TRAP_INSTRUCTION_21,0x115)
+tl0_resv116: TRAP_UTRAP(UT_TRAP_INSTRUCTION_22,0x116) TRAP_UTRAP(UT_TRAP_INSTRUCTION_23,0x117)
+tl0_resv118: TRAP_UTRAP(UT_TRAP_INSTRUCTION_24,0x118) TRAP_UTRAP(UT_TRAP_INSTRUCTION_25,0x119)
+tl0_resv11a: TRAP_UTRAP(UT_TRAP_INSTRUCTION_26,0x11a) TRAP_UTRAP(UT_TRAP_INSTRUCTION_27,0x11b)
+tl0_resv11c: TRAP_UTRAP(UT_TRAP_INSTRUCTION_28,0x11c) TRAP_UTRAP(UT_TRAP_INSTRUCTION_29,0x11d)
+tl0_resv11e: TRAP_UTRAP(UT_TRAP_INSTRUCTION_30,0x11e) TRAP_UTRAP(UT_TRAP_INSTRUCTION_31,0x11f)
+tl0_getcc: GETCC_TRAP
+tl0_setcc: SETCC_TRAP
+tl0_getpsr: TRAP(do_getpsr)
+tl0_resv123: BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126)
+tl0_solindir: INDIRECT_SOLARIS_SYSCALL(156)
+tl0_resv128: BTRAP(0x128) BTRAP(0x129) BTRAP(0x12a) BTRAP(0x12b) BTRAP(0x12c)
+tl0_resv12d: BTRAP(0x12d) BTRAP(0x12e) BTRAP(0x12f) BTRAP(0x130) BTRAP(0x131)
+tl0_resv132: BTRAP(0x132) BTRAP(0x133) BTRAP(0x134) BTRAP(0x135) BTRAP(0x136)
+tl0_resv137: BTRAP(0x137) BTRAP(0x138) BTRAP(0x139) BTRAP(0x13a) BTRAP(0x13b)
+tl0_resv13c: BTRAP(0x13c) BTRAP(0x13d) BTRAP(0x13e) BTRAP(0x13f) BTRAP(0x140)
+tl0_resv141: BTRAP(0x141) BTRAP(0x142) BTRAP(0x143) BTRAP(0x144) BTRAP(0x145)
+tl0_resv146: BTRAP(0x146) BTRAP(0x147) BTRAP(0x148) BTRAP(0x149) BTRAP(0x14a)
+tl0_resv14b: BTRAP(0x14b) BTRAP(0x14c) BTRAP(0x14d) BTRAP(0x14e) BTRAP(0x14f)
+tl0_resv150: BTRAP(0x150) BTRAP(0x151) BTRAP(0x152) BTRAP(0x153) BTRAP(0x154)
+tl0_resv155: BTRAP(0x155) BTRAP(0x156) BTRAP(0x157) BTRAP(0x158) BTRAP(0x159)
+tl0_resv15a: BTRAP(0x15a) BTRAP(0x15b) BTRAP(0x15c) BTRAP(0x15d) BTRAP(0x15e)
+tl0_resv15f: BTRAP(0x15f) BTRAP(0x160) BTRAP(0x161) BTRAP(0x162) BTRAP(0x163)
+tl0_resv164: BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168)
+tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c)
+tl0_linux64: LINUX_64BIT_SYSCALL_TRAP
+tl0_gsctx: TRAP(sparc64_get_context) TRAP(sparc64_set_context)
+tl0_resv170: KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) BTRAP(0x172)
+tl0_resv173: BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
+tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
+tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
+#define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7)
+tl0_resv180: BTRAPS(0x180) BTRAPS(0x188)
+tl0_resv190: BTRAPS(0x190) BTRAPS(0x198)
+tl0_resv1a0: BTRAPS(0x1a0) BTRAPS(0x1a8)
+tl0_resv1b0: BTRAPS(0x1b0) BTRAPS(0x1b8)
+tl0_resv1c0: BTRAPS(0x1c0) BTRAPS(0x1c8)
+tl0_resv1d0: BTRAPS(0x1d0) BTRAPS(0x1d8)
+tl0_resv1e0: BTRAPS(0x1e0) BTRAPS(0x1e8)
+tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
+
+sparc64_ttable_tl1:
+tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
+tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
+tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1)
+tl1_resv009: BTRAPTL1(0x9)
+tl1_iae: TRAPTL1(do_iae_tl1)
+tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
+tl1_ill: TRAPTL1(do_ill_tl1)
+tl1_privop: BTRAPTL1(0x11)
+tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15)
+tl1_resv016: BTRAPTL1(0x16) BTRAPTL1(0x17) BTRAPTL1(0x18) BTRAPTL1(0x19)
+tl1_resv01a: BTRAPTL1(0x1a) BTRAPTL1(0x1b) BTRAPTL1(0x1c) BTRAPTL1(0x1d)
+tl1_resv01e: BTRAPTL1(0x1e) BTRAPTL1(0x1f)
+tl1_fpdis: TRAP_NOSAVE(do_fpdis)
+tl1_fpieee: TRAPTL1(do_fpieee_tl1)
+tl1_fpother: TRAPTL1(do_fpother_tl1)
+tl1_tof: TRAPTL1(do_tof_tl1)
+tl1_cwin: CLEAN_WINDOW
+tl1_div0: TRAPTL1(do_div0_tl1)
+tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
+tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
+tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1)
+tl1_resv031: BTRAPTL1(0x31)
+tl1_dae: TRAPTL1(do_dae_tl1)
+tl1_resv033: BTRAPTL1(0x33)
+tl1_mna: TRAP_NOSAVE(do_mna)
+tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
+tl1_stdfmna: TRAPTL1(do_stdfmna_tl1)
+tl1_privact: BTRAPTL1(0x37)
+tl1_resv038: BTRAPTL1(0x38) BTRAPTL1(0x39) BTRAPTL1(0x3a) BTRAPTL1(0x3b)
+tl1_resv03c: BTRAPTL1(0x3c) BTRAPTL1(0x3d) BTRAPTL1(0x3e) BTRAPTL1(0x3f)
+tl1_resv040: BTRAPTL1(0x40)
+tl1_irq1: TRAP_IRQ(do_irq_tl1, 1) TRAP_IRQ(do_irq_tl1, 2) TRAP_IRQ(do_irq_tl1, 3)
+tl1_irq4: TRAP_IRQ(do_irq_tl1, 4) TRAP_IRQ(do_irq_tl1, 5) TRAP_IRQ(do_irq_tl1, 6)
+tl1_irq7: TRAP_IRQ(do_irq_tl1, 7) TRAP_IRQ(do_irq_tl1, 8) TRAP_IRQ(do_irq_tl1, 9)
+tl1_irq10: TRAP_IRQ(do_irq_tl1, 10) TRAP_IRQ(do_irq_tl1, 11)
+tl1_irq12: TRAP_IRQ(do_irq_tl1, 12) TRAP_IRQ(do_irq_tl1, 13)
+tl1_irq14: TRAP_IRQ(do_irq_tl1, 14) TRAP_IRQ(do_irq_tl1, 15)
+tl1_resv050: BTRAPTL1(0x50) BTRAPTL1(0x51) BTRAPTL1(0x52) BTRAPTL1(0x53)
+tl1_resv054: BTRAPTL1(0x54) BTRAPTL1(0x55) BTRAPTL1(0x56) BTRAPTL1(0x57)
+tl1_resv058: BTRAPTL1(0x58) BTRAPTL1(0x59) BTRAPTL1(0x5a) BTRAPTL1(0x5b)
+tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f)
+tl1_ivec: TRAP_IVEC
+tl1_paw: TRAPTL1(do_paw_tl1)
+tl1_vaw: TRAPTL1(do_vaw_tl1)
+
+ /* The grotty trick to save %g1 into current->thread.cee_stuff
+ * is because when we take this trap we could be interrupting trap
+ * code already using the trap alternate global registers.
+ *
+ * We cross our fingers and pray that this store/load does
+ * not cause yet another CEE trap.
+ */
+tl1_cee: membar #Sync
+ stx %g1, [%g6 + TI_CEE_STUFF]
+ ldxa [%g0] ASI_AFSR, %g1
+ membar #Sync
+ stxa %g1, [%g0] ASI_AFSR
+ membar #Sync
+ ldx [%g6 + TI_CEE_STUFF], %g1
+ retry
+
+tl1_iamiss: BTRAPTL1(0x64) BTRAPTL1(0x65) BTRAPTL1(0x66) BTRAPTL1(0x67)
+tl1_damiss:
+#include "dtlb_backend.S"
+tl1_daprot:
+#include "dtlb_prot.S"
+tl1_fecc: BTRAPTL1(0x70) /* Fast-ECC on Cheetah */
+tl1_dcpe: BTRAPTL1(0x71) /* D-cache Parity Error on Cheetah+ */
+tl1_icpe: BTRAPTL1(0x72) /* I-cache Parity Error on Cheetah+ */
+tl1_resv073: BTRAPTL1(0x73)
+tl1_resv074: BTRAPTL1(0x74) BTRAPTL1(0x75) BTRAPTL1(0x76) BTRAPTL1(0x77)
+tl1_resv078: BTRAPTL1(0x78) BTRAPTL1(0x79) BTRAPTL1(0x7a) BTRAPTL1(0x7b)
+tl1_resv07c: BTRAPTL1(0x7c) BTRAPTL1(0x7d) BTRAPTL1(0x7e) BTRAPTL1(0x7f)
+tl1_s0n: SPILL_0_NORMAL
+tl1_s1n: SPILL_1_NORMAL
+tl1_s2n: SPILL_2_NORMAL
+tl1_s3n: SPILL_3_NORMAL
+tl1_s4n: SPILL_4_NORMAL
+tl1_s5n: SPILL_5_NORMAL
+tl1_s6n: SPILL_6_NORMAL
+tl1_s7n: SPILL_7_NORMAL
+tl1_s0o: SPILL_0_OTHER
+tl1_s1o: SPILL_1_OTHER
+tl1_s2o: SPILL_2_OTHER
+tl1_s3o: SPILL_3_OTHER
+tl1_s4o: SPILL_4_OTHER
+tl1_s5o: SPILL_5_OTHER
+tl1_s6o: SPILL_6_OTHER
+tl1_s7o: SPILL_7_OTHER
+tl1_f0n: FILL_0_NORMAL
+tl1_f1n: FILL_1_NORMAL
+tl1_f2n: FILL_2_NORMAL
+tl1_f3n: FILL_3_NORMAL
+tl1_f4n: FILL_4_NORMAL
+tl1_f5n: FILL_5_NORMAL
+tl1_f6n: FILL_6_NORMAL
+tl1_f7n: FILL_7_NORMAL
+tl1_f0o: FILL_0_OTHER
+tl1_f1o: FILL_1_OTHER
+tl1_f2o: FILL_2_OTHER
+tl1_f3o: FILL_3_OTHER
+tl1_f4o: FILL_4_OTHER
+tl1_f5o: FILL_5_OTHER
+tl1_f6o: FILL_6_OTHER
+tl1_f7o: FILL_7_OTHER
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
new file mode 100644
index 00000000000..4372bf32ecf
--- /dev/null
+++ b/arch/sparc64/kernel/unaligned.c
@@ -0,0 +1,729 @@
+/* $Id: unaligned.c,v 1.24 2002/02/09 19:49:31 davem Exp $
+ * unaligned.c: Unaligned load/store trap handling with special
+ * cases for the kernel to do them more quickly.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <asm/asi.h>
+#include <asm/ptrace.h>
+#include <asm/pstate.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/bitops.h>
+#include <asm/fpumacro.h>
+
+/* #define DEBUG_MNA */
+
+enum direction {
+ load, /* ld, ldd, ldh, ldsh */
+ store, /* st, std, sth, stsh */
+ both, /* Swap, ldstub, cas, ... */
+ fpld,
+ fpst,
+ invalid,
+};
+
+#ifdef DEBUG_MNA
+static char *dirstrings[] = {
+ "load", "store", "both", "fpload", "fpstore", "invalid"
+};
+#endif
+
+static inline enum direction decode_direction(unsigned int insn)
+{
+ unsigned long tmp = (insn >> 21) & 1;
+
+ if (!tmp)
+ return load;
+ else {
+ switch ((insn>>19)&0xf) {
+ case 15: /* swap* */
+ return both;
+ default:
+ return store;
+ }
+ }
+}
+
+/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
+static inline int decode_access_size(unsigned int insn)
+{
+ unsigned int tmp;
+
+ tmp = ((insn >> 19) & 0xf);
+ if (tmp == 11 || tmp == 14) /* ldx/stx */
+ return 8;
+ tmp &= 3;
+ if (!tmp)
+ return 4;
+ else if (tmp == 3)
+ return 16; /* ldd/std - Although it is actually 8 */
+ else if (tmp == 2)
+ return 2;
+ else {
+ printk("Impossible unaligned trap. insn=%08x\n", insn);
+ die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
+
+ /* GCC should never warn that control reaches the end
+ * of this function without returning a value because
+ * die_if_kernel() is marked with attribute 'noreturn'.
+ * Alas, some versions do...
+ */
+
+ return 0;
+ }
+}
+
+static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
+{
+ if (insn & 0x800000) {
+ if (insn & 0x2000)
+ return (unsigned char)(regs->tstate >> 24); /* %asi */
+ else
+ return (unsigned char)(insn >> 5); /* imm_asi */
+ } else
+ return ASI_P;
+}
+
+/* 0x400000 = signed, 0 = unsigned */
+static inline int decode_signedness(unsigned int insn)
+{
+ return (insn & 0x400000);
+}
+
+static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
+ unsigned int rd, int from_kernel)
+{
+ if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
+ if (from_kernel != 0)
+ __asm__ __volatile__("flushw");
+ else
+ flushw_user();
+ }
+}
+
+static inline long sign_extend_imm13(long imm)
+{
+ return imm << 51 >> 51;
+}
+
+static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
+{
+ unsigned long value;
+
+ if (reg < 16)
+ return (!reg ? 0 : regs->u_regs[reg]);
+ if (regs->tstate & TSTATE_PRIV) {
+ struct reg_window *win;
+ win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ value = win->locals[reg - 16];
+ } else if (test_thread_flag(TIF_32BIT)) {
+ struct reg_window32 __user *win32;
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ get_user(value, &win32->locals[reg - 16]);
+ } else {
+ struct reg_window __user *win;
+ win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ get_user(value, &win->locals[reg - 16]);
+ }
+ return value;
+}
+
+static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
+{
+ if (reg < 16)
+ return &regs->u_regs[reg];
+ if (regs->tstate & TSTATE_PRIV) {
+ struct reg_window *win;
+ win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ return &win->locals[reg - 16];
+ } else if (test_thread_flag(TIF_32BIT)) {
+ struct reg_window32 *win32;
+ win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ return (unsigned long *)&win32->locals[reg - 16];
+ } else {
+ struct reg_window *win;
+ win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ return &win->locals[reg - 16];
+ }
+}
+
+unsigned long compute_effective_address(struct pt_regs *regs,
+ unsigned int insn, unsigned int rd)
+{
+ unsigned int rs1 = (insn >> 14) & 0x1f;
+ unsigned int rs2 = insn & 0x1f;
+ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+
+ if (insn & 0x2000) {
+ maybe_flush_windows(rs1, 0, rd, from_kernel);
+ return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+ } else {
+ maybe_flush_windows(rs1, rs2, rd, from_kernel);
+ return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+ }
+}
+
+/* This is just to make gcc think die_if_kernel does return... */
+static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
+{
+ die_if_kernel(str, regs);
+}
+
+#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
+__asm__ __volatile__ ( \
+ "wr %4, 0, %%asi\n\t" \
+ "cmp %1, 8\n\t" \
+ "bge,pn %%icc, 9f\n\t" \
+ " cmp %1, 4\n\t" \
+ "be,pt %%icc, 6f\n" \
+"4:\t" " lduba [%2] %%asi, %%l1\n" \
+"5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
+ "sll %%l1, 8, %%l1\n\t" \
+ "brz,pt %3, 3f\n\t" \
+ " add %%l1, %%l2, %%l1\n\t" \
+ "sllx %%l1, 48, %%l1\n\t" \
+ "srax %%l1, 48, %%l1\n" \
+"3:\t" "ba,pt %%xcc, 0f\n\t" \
+ " stx %%l1, [%0]\n" \
+"6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
+ "sll %%l1, 24, %%l1\n" \
+"7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
+ "sll %%l2, 16, %%l2\n" \
+"8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
+ "sll %%g7, 8, %%g7\n\t" \
+ "or %%l1, %%l2, %%l1\n\t" \
+ "or %%g7, %%g1, %%g7\n\t" \
+ "or %%l1, %%g7, %%l1\n\t" \
+ "brnz,a,pt %3, 3f\n\t" \
+ " sra %%l1, 0, %%l1\n" \
+"3:\t" "ba,pt %%xcc, 0f\n\t" \
+ " stx %%l1, [%0]\n" \
+"9:\t" "lduba [%2] %%asi, %%l1\n" \
+"10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
+ "sllx %%l1, 56, %%l1\n" \
+"11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
+ "sllx %%l2, 48, %%l2\n" \
+"12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
+ "sllx %%g7, 40, %%g7\n\t" \
+ "sllx %%g1, 32, %%g1\n\t" \
+ "or %%l1, %%l2, %%l1\n\t" \
+ "or %%g7, %%g1, %%g7\n" \
+"13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
+ "or %%l1, %%g7, %%g7\n" \
+"14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
+ "sllx %%l2, 24, %%l2\n" \
+"15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
+ "sllx %%g1, 16, %%g1\n\t" \
+ "or %%g7, %%l2, %%g7\n" \
+"16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
+ "sllx %%l1, 8, %%l1\n\t" \
+ "or %%g7, %%g1, %%g7\n\t" \
+ "or %%l1, %%l2, %%l1\n\t" \
+ "or %%g7, %%l1, %%g7\n\t" \
+ "cmp %1, 8\n\t" \
+ "be,a,pt %%icc, 0f\n\t" \
+ " stx %%g7, [%0]\n\t" \
+ "srlx %%g7, 32, %%l1\n\t" \
+ "sra %%g7, 0, %%g7\n\t" \
+ "stx %%l1, [%0]\n\t" \
+ "stx %%g7, [%0 + 8]\n" \
+"0:\n\t" \
+ "wr %%g0, %5, %%asi\n\n\t" \
+ ".section __ex_table\n\t" \
+ ".word 4b, " #errh "\n\t" \
+ ".word 5b, " #errh "\n\t" \
+ ".word 6b, " #errh "\n\t" \
+ ".word 7b, " #errh "\n\t" \
+ ".word 8b, " #errh "\n\t" \
+ ".word 9b, " #errh "\n\t" \
+ ".word 10b, " #errh "\n\t" \
+ ".word 11b, " #errh "\n\t" \
+ ".word 12b, " #errh "\n\t" \
+ ".word 13b, " #errh "\n\t" \
+ ".word 14b, " #errh "\n\t" \
+ ".word 15b, " #errh "\n\t" \
+ ".word 16b, " #errh "\n\n\t" \
+ ".previous\n\t" \
+ : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
+ "r" (asi), "i" (ASI_AIUS) \
+ : "l1", "l2", "g7", "g1", "cc"); \
+})
+
+#define store_common(dst_addr, size, src_val, asi, errh) ({ \
+__asm__ __volatile__ ( \
+ "wr %3, 0, %%asi\n\t" \
+ "ldx [%2], %%l1\n" \
+ "cmp %1, 2\n\t" \
+ "be,pn %%icc, 2f\n\t" \
+ " cmp %1, 4\n\t" \
+ "be,pt %%icc, 1f\n\t" \
+ " srlx %%l1, 24, %%l2\n\t" \
+ "srlx %%l1, 56, %%g1\n\t" \
+ "srlx %%l1, 48, %%g7\n" \
+"4:\t" "stba %%g1, [%0] %%asi\n\t" \
+ "srlx %%l1, 40, %%g1\n" \
+"5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
+ "srlx %%l1, 32, %%g7\n" \
+"6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
+"7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
+ "srlx %%l1, 16, %%g1\n" \
+"8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
+ "srlx %%l1, 8, %%g7\n" \
+"9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
+"10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
+ "ba,pt %%xcc, 0f\n" \
+"11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
+"1:\t" "srl %%l1, 16, %%g7\n" \
+"12:\t" "stba %%l2, [%0] %%asi\n\t" \
+ "srl %%l1, 8, %%l2\n" \
+"13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
+"14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
+ "ba,pt %%xcc, 0f\n" \
+"15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
+"2:\t" "srl %%l1, 8, %%l2\n" \
+"16:\t" "stba %%l2, [%0] %%asi\n" \
+"17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
+"0:\n\t" \
+ "wr %%g0, %4, %%asi\n\n\t" \
+ ".section __ex_table\n\t" \
+ ".word 4b, " #errh "\n\t" \
+ ".word 5b, " #errh "\n\t" \
+ ".word 6b, " #errh "\n\t" \
+ ".word 7b, " #errh "\n\t" \
+ ".word 8b, " #errh "\n\t" \
+ ".word 9b, " #errh "\n\t" \
+ ".word 10b, " #errh "\n\t" \
+ ".word 11b, " #errh "\n\t" \
+ ".word 12b, " #errh "\n\t" \
+ ".word 13b, " #errh "\n\t" \
+ ".word 14b, " #errh "\n\t" \
+ ".word 15b, " #errh "\n\t" \
+ ".word 16b, " #errh "\n\t" \
+ ".word 17b, " #errh "\n\n\t" \
+ ".previous\n\t" \
+ : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
+ : "l1", "l2", "g7", "g1", "cc"); \
+})
+
+#define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
+ unsigned long zero = 0; \
+ unsigned long *src_val = &zero; \
+ \
+ if (size == 16) { \
+ size = 8; \
+ zero = (((long)(reg_num ? \
+ (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
+ (unsigned)fetch_reg(reg_num + 1, regs); \
+ } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
+ store_common(dst_addr, size, src_val, asi, errh); \
+})
+
+extern void smp_capture(void);
+extern void smp_release(void);
+
+#define do_atomic(srcdest_reg, mem, errh) ({ \
+ unsigned long flags, tmp; \
+ \
+ smp_capture(); \
+ local_irq_save(flags); \
+ tmp = *srcdest_reg; \
+ do_integer_load(srcdest_reg, 4, mem, 0, errh); \
+ store_common(mem, 4, &tmp, errh); \
+ local_irq_restore(flags); \
+ smp_release(); \
+})
+
+static inline void advance(struct pt_regs *regs)
+{
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ if (test_thread_flag(TIF_32BIT)) {
+ regs->tpc &= 0xffffffff;
+ regs->tnpc &= 0xffffffff;
+ }
+}
+
+static inline int floating_point_load_or_store_p(unsigned int insn)
+{
+ return (insn >> 24) & 1;
+}
+
+static inline int ok_for_kernel(unsigned int insn)
+{
+ return !floating_point_load_or_store_p(insn);
+}
+
+void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
+
+void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+{
+ unsigned long g2 = regs->u_regs [UREG_G2];
+ unsigned long fixup = search_extables_range(regs->tpc, &g2);
+
+ if (!fixup) {
+ unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+ if (address < PAGE_SIZE) {
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
+ } else
+ printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
+ printk(KERN_ALERT " at virtual address %016lx\n",address);
+ printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n",
+ (current->mm ? CTX_HWBITS(current->mm->context) :
+ CTX_HWBITS(current->active_mm->context)));
+ printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n",
+ (current->mm ? (unsigned long) current->mm->pgd :
+ (unsigned long) current->active_mm->pgd));
+ die_if_kernel("Oops", regs);
+ /* Not reached */
+ }
+ regs->tpc = fixup;
+ regs->tnpc = regs->tpc + 4;
+ regs->u_regs [UREG_G2] = g2;
+
+ regs->tstate &= ~TSTATE_ASI;
+ regs->tstate |= (ASI_AIUS << 24UL);
+}
+
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr)
+{
+ enum direction dir = decode_direction(insn);
+ int size = decode_access_size(insn);
+
+ if (!ok_for_kernel(insn) || dir == both) {
+ printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
+ regs->tpc);
+ unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
+
+ __asm__ __volatile__ ("\n"
+"kernel_unaligned_trap_fault:\n\t"
+ "mov %0, %%o0\n\t"
+ "call kernel_mna_trap_fault\n\t"
+ " mov %1, %%o1\n\t"
+ :
+ : "r" (regs), "r" (insn)
+ : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
+ "g1", "g2", "g3", "g4", "g7", "cc");
+ } else {
+ unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
+
+#ifdef DEBUG_MNA
+ printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
+ regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
+#endif
+ switch (dir) {
+ case load:
+ do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ size, (unsigned long *) addr,
+ decode_signedness(insn), decode_asi(insn, regs),
+ kernel_unaligned_trap_fault);
+ break;
+
+ case store:
+ do_integer_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs,
+ decode_asi(insn, regs),
+ kernel_unaligned_trap_fault);
+ break;
+#if 0 /* unsupported */
+ case both:
+ do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ (unsigned long *) addr,
+ kernel_unaligned_trap_fault);
+ break;
+#endif
+ default:
+ panic("Impossible kernel unaligned trap.");
+ /* Not reached... */
+ }
+ advance(regs);
+ }
+}
+
+static char popc_helper[] = {
+0, 1, 1, 2, 1, 2, 2, 3,
+1, 2, 2, 3, 2, 3, 3, 4,
+};
+
+int handle_popc(u32 insn, struct pt_regs *regs)
+{
+ u64 value;
+ int ret, i, rd = ((insn >> 25) & 0x1f);
+ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+
+ if (insn & 0x2000) {
+ maybe_flush_windows(0, 0, rd, from_kernel);
+ value = sign_extend_imm13(insn);
+ } else {
+ maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
+ value = fetch_reg(insn & 0x1f, regs);
+ }
+ for (ret = 0, i = 0; i < 16; i++) {
+ ret += popc_helper[value & 0xf];
+ value >>= 4;
+ }
+ if (rd < 16) {
+ if (rd)
+ regs->u_regs[rd] = ret;
+ } else {
+ if (test_thread_flag(TIF_32BIT)) {
+ struct reg_window32 __user *win32;
+ win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+ put_user(ret, &win32->locals[rd - 16]);
+ } else {
+ struct reg_window __user *win;
+ win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
+ put_user(ret, &win->locals[rd - 16]);
+ }
+ }
+ advance(regs);
+ return 1;
+}
+
+extern void do_fpother(struct pt_regs *regs);
+extern void do_privact(struct pt_regs *regs);
+extern void data_access_exception(struct pt_regs *regs,
+ unsigned long sfsr,
+ unsigned long sfar);
+
+int handle_ldf_stq(u32 insn, struct pt_regs *regs)
+{
+ unsigned long addr = compute_effective_address(regs, insn, 0);
+ int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+ struct fpustate *f = FPUSTATE;
+ int asi = decode_asi(insn, regs);
+ int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+
+ save_and_clear_fpu();
+ current_thread_info()->xfsr[0] &= ~0x1c000;
+ if (freg & 3) {
+ current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
+ do_fpother(regs);
+ return 0;
+ }
+ if (insn & 0x200000) {
+ /* STQ */
+ u64 first = 0, second = 0;
+
+ if (current_thread_info()->fpsaved[0] & flag) {
+ first = *(u64 *)&f->regs[freg];
+ second = *(u64 *)&f->regs[freg+2];
+ }
+ if (asi < 0x80) {
+ do_privact(regs);
+ return 1;
+ }
+ switch (asi) {
+ case ASI_P:
+ case ASI_S: break;
+ case ASI_PL:
+ case ASI_SL:
+ {
+ /* Need to convert endians */
+ u64 tmp = __swab64p(&first);
+
+ first = __swab64p(&second);
+ second = tmp;
+ break;
+ }
+ default:
+ data_access_exception(regs, 0, addr);
+ return 1;
+ }
+ if (put_user (first >> 32, (u32 __user *)addr) ||
+ __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
+ __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
+ __put_user ((u32)second, (u32 __user *)(addr + 12))) {
+ data_access_exception(regs, 0, addr);
+ return 1;
+ }
+ } else {
+ /* LDF, LDDF, LDQF */
+ u32 data[4] __attribute__ ((aligned(8)));
+ int size, i;
+ int err;
+
+ if (asi < 0x80) {
+ do_privact(regs);
+ return 1;
+ } else if (asi > ASI_SNFL) {
+ data_access_exception(regs, 0, addr);
+ return 1;
+ }
+ switch (insn & 0x180000) {
+ case 0x000000: size = 1; break;
+ case 0x100000: size = 4; break;
+ default: size = 2; break;
+ }
+ for (i = 0; i < size; i++)
+ data[i] = 0;
+
+ err = get_user (data[0], (u32 __user *) addr);
+ if (!err) {
+ for (i = 1; i < size; i++)
+ err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
+ }
+ if (err && !(asi & 0x2 /* NF */)) {
+ data_access_exception(regs, 0, addr);
+ return 1;
+ }
+ if (asi & 0x8) /* Little */ {
+ u64 tmp;
+
+ switch (size) {
+ case 1: data[0] = le32_to_cpup(data + 0); break;
+ default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
+ break;
+ case 4: tmp = le64_to_cpup((u64 *)(data + 0));
+ *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
+ *(u64 *)(data + 2) = tmp;
+ break;
+ }
+ }
+ if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
+ current_thread_info()->fpsaved[0] = FPRS_FEF;
+ current_thread_info()->gsr[0] = 0;
+ }
+ if (!(current_thread_info()->fpsaved[0] & flag)) {
+ if (freg < 32)
+ memset(f->regs, 0, 32*sizeof(u32));
+ else
+ memset(f->regs+32, 0, 32*sizeof(u32));
+ }
+ memcpy(f->regs + freg, data, size * 4);
+ current_thread_info()->fpsaved[0] |= flag;
+ }
+ advance(regs);
+ return 1;
+}
+
+void handle_ld_nf(u32 insn, struct pt_regs *regs)
+{
+ int rd = ((insn >> 25) & 0x1f);
+ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+ unsigned long *reg;
+
+ maybe_flush_windows(0, 0, rd, from_kernel);
+ reg = fetch_reg_addr(rd, regs);
+ if (from_kernel || rd < 16) {
+ reg[0] = 0;
+ if ((insn & 0x780000) == 0x180000)
+ reg[1] = 0;
+ } else if (test_thread_flag(TIF_32BIT)) {
+ put_user(0, (int __user *) reg);
+ if ((insn & 0x780000) == 0x180000)
+ put_user(0, ((int __user *) reg) + 1);
+ } else {
+ put_user(0, (unsigned long __user *) reg);
+ if ((insn & 0x780000) == 0x180000)
+ put_user(0, (unsigned long __user *) reg + 1);
+ }
+ advance(regs);
+}
+
+void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
+{
+ unsigned long pc = regs->tpc;
+ unsigned long tstate = regs->tstate;
+ u32 insn;
+ u32 first, second;
+ u64 value;
+ u8 asi, freg;
+ int flag;
+ struct fpustate *f = FPUSTATE;
+
+ if (tstate & TSTATE_PRIV)
+ die_if_kernel("lddfmna from kernel", regs);
+ if (test_thread_flag(TIF_32BIT))
+ pc = (u32)pc;
+ if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
+ asi = sfsr >> 16;
+ if ((asi > ASI_SNFL) ||
+ (asi < ASI_P))
+ goto daex;
+ if (get_user(first, (u32 __user *)sfar) ||
+ get_user(second, (u32 __user *)(sfar + 4))) {
+ if (asi & 0x2) /* NF */ {
+ first = 0; second = 0;
+ } else
+ goto daex;
+ }
+ save_and_clear_fpu();
+ freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+ value = (((u64)first) << 32) | second;
+ if (asi & 0x8) /* Little */
+ value = __swab64p(&value);
+ flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+ if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
+ current_thread_info()->fpsaved[0] = FPRS_FEF;
+ current_thread_info()->gsr[0] = 0;
+ }
+ if (!(current_thread_info()->fpsaved[0] & flag)) {
+ if (freg < 32)
+ memset(f->regs, 0, 32*sizeof(u32));
+ else
+ memset(f->regs+32, 0, 32*sizeof(u32));
+ }
+ *(u64 *)(f->regs + freg) = value;
+ current_thread_info()->fpsaved[0] |= flag;
+ } else {
+daex: data_access_exception(regs, sfsr, sfar);
+ return;
+ }
+ advance(regs);
+ return;
+}
+
+void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
+{
+ unsigned long pc = regs->tpc;
+ unsigned long tstate = regs->tstate;
+ u32 insn;
+ u64 value;
+ u8 asi, freg;
+ int flag;
+ struct fpustate *f = FPUSTATE;
+
+ if (tstate & TSTATE_PRIV)
+ die_if_kernel("stdfmna from kernel", regs);
+ if (test_thread_flag(TIF_32BIT))
+ pc = (u32)pc;
+ if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
+ freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
+ asi = sfsr >> 16;
+ value = 0;
+ flag = (freg < 32) ? FPRS_DL : FPRS_DU;
+ if ((asi > ASI_SNFL) ||
+ (asi < ASI_P))
+ goto daex;
+ save_and_clear_fpu();
+ if (current_thread_info()->fpsaved[0] & flag)
+ value = *(u64 *)&f->regs[freg];
+ switch (asi) {
+ case ASI_P:
+ case ASI_S: break;
+ case ASI_PL:
+ case ASI_SL:
+ value = __swab64p(&value); break;
+ default: goto daex;
+ }
+ if (put_user (value >> 32, (u32 __user *) sfar) ||
+ __put_user ((u32)value, (u32 __user *)(sfar + 4)))
+ goto daex;
+ } else {
+daex: data_access_exception(regs, sfsr, sfar);
+ return;
+ }
+ advance(regs);
+ return;
+}
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
new file mode 100644
index 00000000000..7aae0a18aab
--- /dev/null
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -0,0 +1,400 @@
+/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <asm/asi.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us2e_driver;
+
+struct us2e_freq_percpu_info {
+ struct cpufreq_frequency_table table[6];
+};
+
+/* Indexed by cpu number. */
+static struct us2e_freq_percpu_info *us2e_freq_table;
+
+#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL
+#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL
+
+/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled
+ * in the ESTAR mode control register.
+ */
+#define ESTAR_MODE_DIV_1 0x0000000000000000UL
+#define ESTAR_MODE_DIV_2 0x0000000000000001UL
+#define ESTAR_MODE_DIV_4 0x0000000000000003UL
+#define ESTAR_MODE_DIV_6 0x0000000000000002UL
+#define ESTAR_MODE_DIV_8 0x0000000000000004UL
+#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL
+
+#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL
+#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL
+#define MCTRL0_REFR_COUNT_SHIFT 8
+#define MCTRL0_REFR_INTERVAL 7800
+#define MCTRL0_REFR_CLKS_P_CNT 64
+
+static unsigned long read_hbreg(unsigned long addr)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("ldxa [%1] %2, %0"
+ : "=&r" (ret)
+ : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
+ return ret;
+}
+
+static void write_hbreg(unsigned long addr, unsigned long val)
+{
+ __asm__ __volatile__("stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
+ : "memory");
+ if (addr == HBIRD_ESTAR_MODE_ADDR) {
+ /* Need to wait 16 clock cycles for the PLL to lock. */
+ udelay(1);
+ }
+}
+
+static void self_refresh_ctl(int enable)
+{
+ unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+ if (enable)
+ mctrl |= MCTRL0_SREFRESH_ENAB;
+ else
+ mctrl &= ~MCTRL0_SREFRESH_ENAB;
+ write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+ (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+}
+
+static void frob_mem_refresh(int cpu_slowing_down,
+ unsigned long clock_tick,
+ unsigned long old_divisor, unsigned long divisor)
+{
+ unsigned long old_refr_count, refr_count, mctrl;
+
+
+ refr_count = (clock_tick * MCTRL0_REFR_INTERVAL);
+ refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL);
+
+ mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+ old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK)
+ >> MCTRL0_REFR_COUNT_SHIFT;
+
+ mctrl &= ~MCTRL0_REFR_COUNT_MASK;
+ mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT;
+ write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl);
+ mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR);
+
+ if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) {
+ unsigned long usecs;
+
+ /* We have to wait for both refresh counts (old
+ * and new) to go to zero.
+ */
+ usecs = (MCTRL0_REFR_CLKS_P_CNT *
+ (refr_count + old_refr_count) *
+ 1000000UL *
+ old_divisor) / clock_tick;
+ udelay(usecs + 1UL);
+ }
+}
+
+static void us2e_transition(unsigned long estar, unsigned long new_bits,
+ unsigned long clock_tick,
+ unsigned long old_divisor, unsigned long divisor)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ estar &= ~ESTAR_MODE_DIV_MASK;
+
+ /* This is based upon the state transition diagram in the IIe manual. */
+ if (old_divisor == 2 && divisor == 1) {
+ self_refresh_ctl(0);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+ } else if (old_divisor == 1 && divisor == 2) {
+ frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ self_refresh_ctl(1);
+ } else if (old_divisor == 1 && divisor > 2) {
+ us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+ 1, 2);
+ us2e_transition(estar, new_bits, clock_tick,
+ 2, divisor);
+ } else if (old_divisor > 2 && divisor == 1) {
+ us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick,
+ old_divisor, 2);
+ us2e_transition(estar, new_bits, clock_tick,
+ 2, divisor);
+ } else if (old_divisor < divisor) {
+ frob_mem_refresh(0, clock_tick, old_divisor, divisor);
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ } else if (old_divisor > divisor) {
+ write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits);
+ frob_mem_refresh(1, clock_tick, old_divisor, divisor);
+ } else {
+ BUG();
+ }
+
+ local_irq_restore(flags);
+}
+
+static unsigned long index_to_estar_mode(unsigned int index)
+{
+ switch (index) {
+ case 0:
+ return ESTAR_MODE_DIV_1;
+
+ case 1:
+ return ESTAR_MODE_DIV_2;
+
+ case 2:
+ return ESTAR_MODE_DIV_4;
+
+ case 3:
+ return ESTAR_MODE_DIV_6;
+
+ case 4:
+ return ESTAR_MODE_DIV_8;
+
+ default:
+ BUG();
+ };
+}
+
+static unsigned long index_to_divisor(unsigned int index)
+{
+ switch (index) {
+ case 0:
+ return 1;
+
+ case 1:
+ return 2;
+
+ case 2:
+ return 4;
+
+ case 3:
+ return 6;
+
+ case 4:
+ return 8;
+
+ default:
+ BUG();
+ };
+}
+
+static unsigned long estar_to_divisor(unsigned long estar)
+{
+ unsigned long ret;
+
+ switch (estar & ESTAR_MODE_DIV_MASK) {
+ case ESTAR_MODE_DIV_1:
+ ret = 1;
+ break;
+ case ESTAR_MODE_DIV_2:
+ ret = 2;
+ break;
+ case ESTAR_MODE_DIV_4:
+ ret = 4;
+ break;
+ case ESTAR_MODE_DIV_6:
+ ret = 6;
+ break;
+ case ESTAR_MODE_DIV_8:
+ ret = 8;
+ break;
+ default:
+ BUG();
+ };
+
+ return ret;
+}
+
+static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
+{
+ unsigned long new_bits, new_freq;
+ unsigned long clock_tick, divisor, old_divisor, estar;
+ cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+
+ if (!cpu_online(cpu))
+ return;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+ new_freq = clock_tick = sparc64_get_clock_tick(cpu);
+ new_bits = index_to_estar_mode(index);
+ divisor = index_to_divisor(index);
+ new_freq /= divisor;
+
+ estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+ old_divisor = estar_to_divisor(estar);
+
+ freqs.old = clock_tick / old_divisor;
+ freqs.new = new_freq;
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ if (old_divisor != divisor)
+ us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ set_cpus_allowed(current, cpus_allowed);
+}
+
+static int us2e_freq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int new_index = 0;
+
+ if (cpufreq_frequency_table_target(policy,
+ &us2e_freq_table[policy->cpu].table[0],
+ target_freq,
+ relation,
+ &new_index))
+ return -EINVAL;
+
+ us2e_set_cpu_divider_index(policy->cpu, new_index);
+
+ return 0;
+}
+
+static int us2e_freq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy,
+ &us2e_freq_table[policy->cpu].table[0]);
+}
+
+static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+ struct cpufreq_frequency_table *table =
+ &us2e_freq_table[cpu].table[0];
+
+ table[0].index = 0;
+ table[0].frequency = clock_tick / 1;
+ table[1].index = 1;
+ table[1].frequency = clock_tick / 2;
+ table[2].index = 2;
+ table[2].frequency = clock_tick / 4;
+ table[2].index = 3;
+ table[2].frequency = clock_tick / 6;
+ table[2].index = 4;
+ table[2].frequency = clock_tick / 8;
+ table[2].index = 5;
+ table[3].frequency = CPUFREQ_TABLE_END;
+
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+ policy->cpuinfo.transition_latency = 0;
+ policy->cur = clock_tick;
+
+ return cpufreq_frequency_table_cpuinfo(policy, table);
+}
+
+static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ if (cpufreq_us2e_driver)
+ us2e_set_cpu_divider_index(policy->cpu, 0);
+
+ return 0;
+}
+
+static int __init us2e_freq_init(void)
+{
+ unsigned long manuf, impl, ver;
+ int ret;
+
+ __asm__("rdpr %%ver, %0" : "=r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+
+ if (manuf == 0x17 && impl == 0x13) {
+ struct cpufreq_driver *driver;
+
+ ret = -ENOMEM;
+ driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+ if (!driver)
+ goto err_out;
+ memset(driver, 0, sizeof(*driver));
+
+ us2e_freq_table = kmalloc(
+ (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
+ GFP_KERNEL);
+ if (!us2e_freq_table)
+ goto err_out;
+
+ memset(us2e_freq_table, 0,
+ (NR_CPUS * sizeof(struct us2e_freq_percpu_info)));
+
+ driver->verify = us2e_freq_verify;
+ driver->target = us2e_freq_target;
+ driver->init = us2e_freq_cpu_init;
+ driver->exit = us2e_freq_cpu_exit;
+ driver->owner = THIS_MODULE,
+ strcpy(driver->name, "UltraSPARC-IIe");
+
+ cpufreq_us2e_driver = driver;
+ ret = cpufreq_register_driver(driver);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ if (driver) {
+ kfree(driver);
+ cpufreq_us2e_driver = NULL;
+ }
+ if (us2e_freq_table) {
+ kfree(us2e_freq_table);
+ us2e_freq_table = NULL;
+ }
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit us2e_freq_exit(void)
+{
+ if (cpufreq_us2e_driver) {
+ cpufreq_unregister_driver(cpufreq_us2e_driver);
+
+ kfree(cpufreq_us2e_driver);
+ cpufreq_us2e_driver = NULL;
+ kfree(us2e_freq_table);
+ us2e_freq_table = NULL;
+ }
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe");
+MODULE_LICENSE("GPL");
+
+module_init(us2e_freq_init);
+module_exit(us2e_freq_exit);
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
new file mode 100644
index 00000000000..18fe54b8aa5
--- /dev/null
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -0,0 +1,255 @@
+/* us3_cpufreq.c: UltraSPARC-III cpu frequency support
+ *
+ * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ *
+ * Many thanks to Dominik Brodowski for fixing up the cpufreq
+ * infrastructure in order to make this driver easier to implement.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpufreq.h>
+#include <linux/threads.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <asm/head.h>
+#include <asm/timer.h>
+
+static struct cpufreq_driver *cpufreq_us3_driver;
+
+struct us3_freq_percpu_info {
+ struct cpufreq_frequency_table table[4];
+};
+
+/* Indexed by cpu number. */
+static struct us3_freq_percpu_info *us3_freq_table;
+
+/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
+ * in the Safari config register.
+ */
+#define SAFARI_CFG_DIV_1 0x0000000000000000UL
+#define SAFARI_CFG_DIV_2 0x0000000040000000UL
+#define SAFARI_CFG_DIV_32 0x0000000080000000UL
+#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
+
+static unsigned long read_safari_cfg(void)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=&r" (ret)
+ : "i" (ASI_SAFARI_CONFIG));
+ return ret;
+}
+
+static void write_safari_cfg(unsigned long val)
+{
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (val), "i" (ASI_SAFARI_CONFIG)
+ : "memory");
+}
+
+static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg)
+{
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+ unsigned long ret;
+
+ switch (safari_cfg & SAFARI_CFG_DIV_MASK) {
+ case SAFARI_CFG_DIV_1:
+ ret = clock_tick / 1;
+ break;
+ case SAFARI_CFG_DIV_2:
+ ret = clock_tick / 2;
+ break;
+ case SAFARI_CFG_DIV_32:
+ ret = clock_tick / 32;
+ break;
+ default:
+ BUG();
+ };
+
+ return ret;
+}
+
+static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
+{
+ unsigned long new_bits, new_freq, reg;
+ cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+
+ if (!cpu_online(cpu))
+ return;
+
+ cpus_allowed = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+ new_freq = sparc64_get_clock_tick(cpu);
+ switch (index) {
+ case 0:
+ new_bits = SAFARI_CFG_DIV_1;
+ new_freq /= 1;
+ break;
+ case 1:
+ new_bits = SAFARI_CFG_DIV_2;
+ new_freq /= 2;
+ break;
+ case 2:
+ new_bits = SAFARI_CFG_DIV_32;
+ new_freq /= 32;
+ break;
+
+ default:
+ BUG();
+ };
+
+ reg = read_safari_cfg();
+
+ freqs.old = get_current_freq(cpu, reg);
+ freqs.new = new_freq;
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ reg &= ~SAFARI_CFG_DIV_MASK;
+ reg |= new_bits;
+ write_safari_cfg(reg);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ set_cpus_allowed(current, cpus_allowed);
+}
+
+static int us3_freq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int new_index = 0;
+
+ if (cpufreq_frequency_table_target(policy,
+ &us3_freq_table[policy->cpu].table[0],
+ target_freq,
+ relation,
+ &new_index))
+ return -EINVAL;
+
+ us3_set_cpu_divider_index(policy->cpu, new_index);
+
+ return 0;
+}
+
+static int us3_freq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy,
+ &us3_freq_table[policy->cpu].table[0]);
+}
+
+static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned long clock_tick = sparc64_get_clock_tick(cpu);
+ struct cpufreq_frequency_table *table =
+ &us3_freq_table[cpu].table[0];
+
+ table[0].index = 0;
+ table[0].frequency = clock_tick / 1;
+ table[1].index = 1;
+ table[1].frequency = clock_tick / 2;
+ table[2].index = 2;
+ table[2].frequency = clock_tick / 32;
+ table[3].index = 0;
+ table[3].frequency = CPUFREQ_TABLE_END;
+
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+ policy->cpuinfo.transition_latency = 0;
+ policy->cur = clock_tick;
+
+ return cpufreq_frequency_table_cpuinfo(policy, table);
+}
+
+static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+{
+ if (cpufreq_us3_driver)
+ us3_set_cpu_divider_index(policy->cpu, 0);
+
+ return 0;
+}
+
+static int __init us3_freq_init(void)
+{
+ unsigned long manuf, impl, ver;
+ int ret;
+
+ __asm__("rdpr %%ver, %0" : "=r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
+
+ if (manuf == CHEETAH_MANUF &&
+ (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) {
+ struct cpufreq_driver *driver;
+
+ ret = -ENOMEM;
+ driver = kmalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+ if (!driver)
+ goto err_out;
+ memset(driver, 0, sizeof(*driver));
+
+ us3_freq_table = kmalloc(
+ (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
+ GFP_KERNEL);
+ if (!us3_freq_table)
+ goto err_out;
+
+ memset(us3_freq_table, 0,
+ (NR_CPUS * sizeof(struct us3_freq_percpu_info)));
+
+ driver->verify = us3_freq_verify;
+ driver->target = us3_freq_target;
+ driver->init = us3_freq_cpu_init;
+ driver->exit = us3_freq_cpu_exit;
+ driver->owner = THIS_MODULE,
+ strcpy(driver->name, "UltraSPARC-III");
+
+ cpufreq_us3_driver = driver;
+ ret = cpufreq_register_driver(driver);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ if (driver) {
+ kfree(driver);
+ cpufreq_us3_driver = NULL;
+ }
+ if (us3_freq_table) {
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+ }
+ return ret;
+ }
+
+ return -ENODEV;
+}
+
+static void __exit us3_freq_exit(void)
+{
+ if (cpufreq_us3_driver) {
+ cpufreq_unregister_driver(cpufreq_us3_driver);
+
+ kfree(cpufreq_us3_driver);
+ cpufreq_us3_driver = NULL;
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+ }
+}
+
+MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
+MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III");
+MODULE_LICENSE("GPL");
+
+module_init(us3_freq_init);
+module_exit(us3_freq_exit);
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
new file mode 100644
index 00000000000..382fd6798bb
--- /dev/null
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -0,0 +1,106 @@
+/* ld script to make UltraLinux kernel */
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc")
+OUTPUT_ARCH(sparc:v9a)
+ENTRY(_start)
+
+jiffies = jiffies_64;
+SECTIONS
+{
+ swapper_pmd_dir = 0x0000000000402000;
+ empty_pg_dir = 0x0000000000403000;
+ . = 0x4000;
+ .text 0x0000000000404000 :
+ {
+ *(.text)
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.gnu.warning)
+ } =0
+ _etext = .;
+ PROVIDE (etext = .);
+
+ RODATA
+
+ .data :
+ {
+ *(.data)
+ CONSTRUCTORS
+ }
+ .data1 : { *(.data1) }
+ . = ALIGN(64);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+ _edata = .;
+ PROVIDE (edata = .);
+ .fixup : { *(.fixup) }
+
+ . = ALIGN(16);
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ . = ALIGN(8192);
+ __init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : { *(.init.data) }
+ . = ALIGN(16);
+ __setup_start = .;
+ .init.setup : { *(.init.setup) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : {
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ }
+ __initcall_end = .;
+ __con_initcall_start = .;
+ .con_initcall.init : { *(.con_initcall.init) }
+ __con_initcall_end = .;
+ SECURITY_INIT
+ . = ALIGN(8192);
+ __initramfs_start = .;
+ .init.ramfs : { *(.init.ramfs) }
+ __initramfs_end = .;
+ . = ALIGN(8192);
+ __per_cpu_start = .;
+ .data.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
+ . = ALIGN(8192);
+ __init_end = .;
+ __bss_start = .;
+ .sbss : { *(.sbss) *(.scommon) }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ }
+ _end = . ;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ .debug 0 : { *(.debug) }
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ .line 0 : { *(.line) }
+ /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) }
+}
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
new file mode 100644
index 00000000000..dfbc7e0dcf7
--- /dev/null
+++ b/arch/sparc64/kernel/winfixup.S
@@ -0,0 +1,417 @@
+/* $Id: winfixup.S,v 1.30 2002/02/09 19:49:30 davem Exp $
+ *
+ * winfixup.S: Handle cases where user stack pointer is found to be bogus.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/asi.h>
+#include <asm/head.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/spitfire.h>
+#include <asm/thread_info.h>
+
+ .text
+
+set_pcontext:
+cplus_winfixup_insn_1:
+ sethi %hi(0), %l1
+ mov PRIMARY_CONTEXT, %g1
+ sllx %l1, 32, %l1
+cplus_winfixup_insn_2:
+ sethi %hi(0), %g2
+ or %l1, %g2, %l1
+ stxa %l1, [%g1] ASI_DMMU
+ flush %g6
+ retl
+ nop
+
+cplus_wfinsn_1:
+ sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
+cplus_wfinsn_2:
+ sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
+
+ .align 32
+
+ /* Here are the rules, pay attention.
+ *
+ * The kernel is disallowed from touching user space while
+ * the trap level is greater than zero, except for from within
+ * the window spill/fill handlers. This must be followed
+ * so that we can easily detect the case where we tried to
+ * spill/fill with a bogus (or unmapped) user stack pointer.
+ *
+ * These are layed out in a special way for cache reasons,
+ * don't touch...
+ */
+ .globl fill_fixup, spill_fixup
+fill_fixup:
+ rdpr %tstate, %g1
+ andcc %g1, TSTATE_PRIV, %g0
+ or %g4, FAULT_CODE_WINFIXUP, %g4
+ be,pt %xcc, window_scheisse_from_user_common
+ and %g1, TSTATE_CWP, %g1
+
+ /* This is the extremely complex case, but it does happen from
+ * time to time if things are just right. Essentially the restore
+ * done in rtrap right before going back to user mode, with tl=1
+ * and that levels trap stack registers all setup, took a fill trap,
+ * the user stack was not mapped in the tlb, and tlb miss occurred,
+ * the pte found was not valid, and a simple ref bit watch update
+ * could not satisfy the miss, so we got here.
+ *
+ * We must carefully unwind the state so we get back to tl=0, preserve
+ * all the register values we were going to give to the user. Luckily
+ * most things are where they need to be, we also have the address
+ * which triggered the fault handy as well.
+ *
+ * Also note that we must preserve %l5 and %l6. If the user was
+ * returning from a system call, we must make it look this way
+ * after we process the fill fault on the users stack.
+ *
+ * First, get into the window where the original restore was executed.
+ */
+
+ rdpr %wstate, %g2 ! Grab user mode wstate.
+ wrpr %g1, %cwp ! Get into the right window.
+ sll %g2, 3, %g2 ! NORMAL-->OTHER
+
+ wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
+ wrpr %g2, 0x0, %wstate ! This must be consistent.
+ wrpr %g0, 0x0, %otherwin ! We know this.
+ call set_pcontext ! Change contexts...
+ nop
+ rdpr %pstate, %l1 ! Prepare to change globals.
+ mov %g6, %o7 ! Get current.
+
+ andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
+ stb %g4, [%g6 + TI_FAULT_CODE]
+ stx %g5, [%g6 + TI_FAULT_ADDR]
+ wrpr %g0, 0x0, %tl ! Out of trap levels.
+ wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
+ mov %o7, %g6
+ ldx [%g6 + TI_TASK], %g4
+#ifdef CONFIG_SMP
+ mov TSB_REG, %g1
+ ldxa [%g1] ASI_IMMU, %g5
+#endif
+
+ /* This is the same as below, except we handle this a bit special
+ * since we must preserve %l5 and %l6, see comment above.
+ */
+ call do_sparc64_fault
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ nop ! yes, nop is correct
+
+ /* Be very careful about usage of the alternate globals here.
+ * You cannot touch %g4/%g5 as that has the fault information
+ * should this be from usermode. Also be careful for the case
+ * where we get here from the save instruction in etrap.S when
+ * coming from either user or kernel (does not matter which, it
+ * is the same problem in both cases). Essentially this means
+ * do not touch %g7 or %g2 so we handle the two cases fine.
+ */
+spill_fixup:
+ ldx [%g6 + TI_FLAGS], %g1
+ andcc %g1, _TIF_32BIT, %g0
+ ldub [%g6 + TI_WSAVED], %g1
+
+ sll %g1, 3, %g3
+ add %g6, %g3, %g3
+ stx %sp, [%g3 + TI_RWIN_SPTRS]
+ sll %g1, 7, %g3
+ bne,pt %xcc, 1f
+ add %g6, %g3, %g3
+ stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
+ stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
+
+ stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
+ stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
+ stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
+ stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
+ stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
+ stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
+ stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
+ stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
+
+ stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
+ stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
+ stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
+ stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
+ stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
+ b,pt %xcc, 2f
+ stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
+1: stw %l0, [%g3 + TI_REG_WINDOW + 0x00]
+
+ stw %l1, [%g3 + TI_REG_WINDOW + 0x04]
+ stw %l2, [%g3 + TI_REG_WINDOW + 0x08]
+ stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]
+ stw %l4, [%g3 + TI_REG_WINDOW + 0x10]
+ stw %l5, [%g3 + TI_REG_WINDOW + 0x14]
+ stw %l6, [%g3 + TI_REG_WINDOW + 0x18]
+ stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]
+ stw %i0, [%g3 + TI_REG_WINDOW + 0x20]
+
+ stw %i1, [%g3 + TI_REG_WINDOW + 0x24]
+ stw %i2, [%g3 + TI_REG_WINDOW + 0x28]
+ stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]
+ stw %i4, [%g3 + TI_REG_WINDOW + 0x30]
+ stw %i5, [%g3 + TI_REG_WINDOW + 0x34]
+ stw %i6, [%g3 + TI_REG_WINDOW + 0x38]
+ stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]
+2: add %g1, 1, %g1
+
+ stb %g1, [%g6 + TI_WSAVED]
+ rdpr %tstate, %g1
+ andcc %g1, TSTATE_PRIV, %g0
+ saved
+ and %g1, TSTATE_CWP, %g1
+ be,pn %xcc, window_scheisse_from_user_common
+ mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
+ retry
+
+window_scheisse_from_user_common:
+ stb %g4, [%g6 + TI_FAULT_CODE]
+ stx %g5, [%g6 + TI_FAULT_ADDR]
+ wrpr %g1, %cwp
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ call do_sparc64_fault
+ add %sp, PTREGS_OFF, %o0
+ ba,a,pt %xcc, rtrap_clr_l6
+
+ .globl winfix_mna, fill_fixup_mna, spill_fixup_mna
+winfix_mna:
+ andn %g3, 0x7f, %g3
+ add %g3, 0x78, %g3
+ wrpr %g3, %tnpc
+ done
+fill_fixup_mna:
+ rdpr %tstate, %g1
+ andcc %g1, TSTATE_PRIV, %g0
+ be,pt %xcc, window_mna_from_user_common
+ and %g1, TSTATE_CWP, %g1
+
+ /* Please, see fill_fixup commentary about why we must preserve
+ * %l5 and %l6 to preserve absolute correct semantics.
+ */
+ rdpr %wstate, %g2 ! Grab user mode wstate.
+ wrpr %g1, %cwp ! Get into the right window.
+ sll %g2, 3, %g2 ! NORMAL-->OTHER
+ wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
+
+ wrpr %g2, 0x0, %wstate ! This must be consistent.
+ wrpr %g0, 0x0, %otherwin ! We know this.
+ call set_pcontext ! Change contexts...
+ nop
+ rdpr %pstate, %l1 ! Prepare to change globals.
+ mov %g4, %o2 ! Setup args for
+ mov %g5, %o1 ! final call to mem_address_unaligned.
+ andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
+
+ mov %g6, %o7 ! Stash away current.
+ wrpr %g0, 0x0, %tl ! Out of trap levels.
+ wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
+ mov %o7, %g6 ! Get current back.
+ ldx [%g6 + TI_TASK], %g4 ! Finish it.
+#ifdef CONFIG_SMP
+ mov TSB_REG, %g1
+ ldxa [%g1] ASI_IMMU, %g5
+#endif
+ call mem_address_unaligned
+ add %sp, PTREGS_OFF, %o0
+
+ b,pt %xcc, rtrap
+ nop ! yes, the nop is correct
+spill_fixup_mna:
+ ldx [%g6 + TI_FLAGS], %g1
+ andcc %g1, _TIF_32BIT, %g0
+ ldub [%g6 + TI_WSAVED], %g1
+ sll %g1, 3, %g3
+ add %g6, %g3, %g3
+ stx %sp, [%g3 + TI_RWIN_SPTRS]
+
+ sll %g1, 7, %g3
+ bne,pt %xcc, 1f
+ add %g6, %g3, %g3
+ stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
+ stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
+ stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
+ stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
+ stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
+
+ stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
+ stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
+ stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
+ stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
+ stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
+ stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
+ stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
+ stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
+
+ stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
+ stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
+ stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
+ b,pt %xcc, 2f
+ add %g1, 1, %g1
+1: std %l0, [%g3 + TI_REG_WINDOW + 0x00]
+ std %l2, [%g3 + TI_REG_WINDOW + 0x08]
+ std %l4, [%g3 + TI_REG_WINDOW + 0x10]
+
+ std %l6, [%g3 + TI_REG_WINDOW + 0x18]
+ std %i0, [%g3 + TI_REG_WINDOW + 0x20]
+ std %i2, [%g3 + TI_REG_WINDOW + 0x28]
+ std %i4, [%g3 + TI_REG_WINDOW + 0x30]
+ std %i6, [%g3 + TI_REG_WINDOW + 0x38]
+ add %g1, 1, %g1
+2: stb %g1, [%g6 + TI_WSAVED]
+ rdpr %tstate, %g1
+
+ andcc %g1, TSTATE_PRIV, %g0
+ saved
+ be,pn %xcc, window_mna_from_user_common
+ and %g1, TSTATE_CWP, %g1
+ retry
+window_mna_from_user_common:
+ wrpr %g1, %cwp
+ sethi %hi(109f), %g7
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o2
+ mov %l5, %o1
+ call mem_address_unaligned
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ /* These are only needed for 64-bit mode processes which
+ * put their stack pointer into the VPTE area and there
+ * happens to be a VPTE tlb entry mapped there during
+ * a spill/fill trap to that stack frame.
+ */
+ .globl winfix_dax, fill_fixup_dax, spill_fixup_dax
+winfix_dax:
+ andn %g3, 0x7f, %g3
+ add %g3, 0x74, %g3
+ wrpr %g3, %tnpc
+ done
+fill_fixup_dax:
+ rdpr %tstate, %g1
+ andcc %g1, TSTATE_PRIV, %g0
+ be,pt %xcc, window_dax_from_user_common
+ and %g1, TSTATE_CWP, %g1
+
+ /* Please, see fill_fixup commentary about why we must preserve
+ * %l5 and %l6 to preserve absolute correct semantics.
+ */
+ rdpr %wstate, %g2 ! Grab user mode wstate.
+ wrpr %g1, %cwp ! Get into the right window.
+ sll %g2, 3, %g2 ! NORMAL-->OTHER
+ wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
+
+ wrpr %g2, 0x0, %wstate ! This must be consistent.
+ wrpr %g0, 0x0, %otherwin ! We know this.
+ call set_pcontext ! Change contexts...
+ nop
+ rdpr %pstate, %l1 ! Prepare to change globals.
+ mov %g4, %o1 ! Setup args for
+ mov %g5, %o2 ! final call to data_access_exception.
+ andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
+
+ mov %g6, %o7 ! Stash away current.
+ wrpr %g0, 0x0, %tl ! Out of trap levels.
+ wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
+ mov %o7, %g6 ! Get current back.
+ ldx [%g6 + TI_TASK], %g4 ! Finish it.
+#ifdef CONFIG_SMP
+ mov TSB_REG, %g1
+ ldxa [%g1] ASI_IMMU, %g5
+#endif
+ call data_access_exception
+ add %sp, PTREGS_OFF, %o0
+
+ b,pt %xcc, rtrap
+ nop ! yes, the nop is correct
+spill_fixup_dax:
+ ldx [%g6 + TI_FLAGS], %g1
+ andcc %g1, _TIF_32BIT, %g0
+ ldub [%g6 + TI_WSAVED], %g1
+ sll %g1, 3, %g3
+ add %g6, %g3, %g3
+ stx %sp, [%g3 + TI_RWIN_SPTRS]
+
+ sll %g1, 7, %g3
+ bne,pt %xcc, 1f
+ add %g6, %g3, %g3
+ stx %l0, [%g3 + TI_REG_WINDOW + 0x00]
+ stx %l1, [%g3 + TI_REG_WINDOW + 0x08]
+ stx %l2, [%g3 + TI_REG_WINDOW + 0x10]
+ stx %l3, [%g3 + TI_REG_WINDOW + 0x18]
+ stx %l4, [%g3 + TI_REG_WINDOW + 0x20]
+
+ stx %l5, [%g3 + TI_REG_WINDOW + 0x28]
+ stx %l6, [%g3 + TI_REG_WINDOW + 0x30]
+ stx %l7, [%g3 + TI_REG_WINDOW + 0x38]
+ stx %i0, [%g3 + TI_REG_WINDOW + 0x40]
+ stx %i1, [%g3 + TI_REG_WINDOW + 0x48]
+ stx %i2, [%g3 + TI_REG_WINDOW + 0x50]
+ stx %i3, [%g3 + TI_REG_WINDOW + 0x58]
+ stx %i4, [%g3 + TI_REG_WINDOW + 0x60]
+
+ stx %i5, [%g3 + TI_REG_WINDOW + 0x68]
+ stx %i6, [%g3 + TI_REG_WINDOW + 0x70]
+ stx %i7, [%g3 + TI_REG_WINDOW + 0x78]
+ b,pt %xcc, 2f
+ add %g1, 1, %g1
+1: std %l0, [%g3 + TI_REG_WINDOW + 0x00]
+ std %l2, [%g3 + TI_REG_WINDOW + 0x08]
+ std %l4, [%g3 + TI_REG_WINDOW + 0x10]
+
+ std %l6, [%g3 + TI_REG_WINDOW + 0x18]
+ std %i0, [%g3 + TI_REG_WINDOW + 0x20]
+ std %i2, [%g3 + TI_REG_WINDOW + 0x28]
+ std %i4, [%g3 + TI_REG_WINDOW + 0x30]
+ std %i6, [%g3 + TI_REG_WINDOW + 0x38]
+ add %g1, 1, %g1
+2: stb %g1, [%g6 + TI_WSAVED]
+ rdpr %tstate, %g1
+
+ andcc %g1, TSTATE_PRIV, %g0
+ saved
+ be,pn %xcc, window_dax_from_user_common
+ and %g1, TSTATE_CWP, %g1
+ retry
+window_dax_from_user_common:
+ wrpr %g1, %cwp
+ sethi %hi(109f), %g7
+ ba,pt %xcc, etrap
+109: or %g7, %lo(109b), %g7
+ mov %l4, %o1
+ mov %l5, %o2
+ call data_access_exception
+ add %sp, PTREGS_OFF, %o0
+ ba,pt %xcc, rtrap
+ clr %l6
+
+
+ .globl cheetah_plus_patch_winfixup
+cheetah_plus_patch_winfixup:
+ sethi %hi(cplus_wfinsn_1), %o0
+ sethi %hi(cplus_winfixup_insn_1), %o2
+ lduw [%o0 + %lo(cplus_wfinsn_1)], %o1
+ or %o2, %lo(cplus_winfixup_insn_1), %o2
+ stw %o1, [%o2]
+ flush %o2
+
+ sethi %hi(cplus_wfinsn_2), %o0
+ sethi %hi(cplus_winfixup_insn_2), %o2
+ lduw [%o0 + %lo(cplus_wfinsn_2)], %o1
+ or %o2, %lo(cplus_winfixup_insn_2), %o2
+ stw %o1, [%o2]
+ flush %o2
+
+ retl
+ nop