summaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-09-28 08:29:59 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-09-28 08:29:59 -0400
commit185a257f2f73bcd89050ad02da5bedbc28fc43fa (patch)
tree5e32586114534ed3f2165614cba3d578f5d87307 /arch/sh/kernel
parent3f1a9aaeffd8d1cbc5ab9776c45cbd66af1c9699 (diff)
parenta77c64c1a641950626181b4857abb701d8f38ccc (diff)
Merge branch 'master' into gfs2
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/Makefile5
-rw-r--r--arch/sh/kernel/apm.c539
-rw-r--r--arch/sh/kernel/cf-enabler.c12
-rw-r--r--arch/sh/kernel/cpu/Makefile1
-rw-r--r--arch/sh/kernel/cpu/clock.c19
-rw-r--r--arch/sh/kernel/cpu/init.c21
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile5
-rw-r--r--arch/sh/kernel/cpu/irq/intc2.c6
-rw-r--r--arch/sh/kernel/cpu/irq/ipr.c14
-rw-r--r--arch/sh/kernel/cpu/irq/maskreg.c93
-rw-r--r--arch/sh/kernel/cpu/irq/pint.c8
-rw-r--r--arch/sh/kernel/cpu/rtc.c128
-rw-r--r--arch/sh/kernel/cpu/sh3/Makefile13
-rw-r--r--arch/sh/kernel/cpu/sh3/clock-sh7706.c84
-rw-r--r--arch/sh/kernel/cpu/sh3/ex.S54
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c6
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7300.c43
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7705.c48
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7708.c43
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7709.c53
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh7710.c43
-rw-r--r--arch/sh/kernel/cpu/sh4/Makefile10
-rw-r--r--arch/sh/kernel/cpu/sh4/ex.S176
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c138
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh4-202.c43
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh73180.c43
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7343.c43
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c48
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7760.c53
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7770.c53
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7780.c79
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c543
-rw-r--r--arch/sh/kernel/early_printk.c106
-rw-r--r--arch/sh/kernel/entry.S333
-rw-r--r--arch/sh/kernel/head.S43
-rw-r--r--arch/sh/kernel/io.c67
-rw-r--r--arch/sh/kernel/irq.c168
-rw-r--r--arch/sh/kernel/kgdb_stub.c33
-rw-r--r--arch/sh/kernel/machine_kexec.c6
-rw-r--r--arch/sh/kernel/pm.c88
-rw-r--r--arch/sh/kernel/process.c28
-rw-r--r--arch/sh/kernel/ptrace.c1
-rw-r--r--arch/sh/kernel/semaphore.c2
-rw-r--r--arch/sh/kernel/setup.c117
-rw-r--r--arch/sh/kernel/sh_ksyms.c32
-rw-r--r--arch/sh/kernel/signal.c158
-rw-r--r--arch/sh/kernel/sys_sh.c56
-rw-r--r--arch/sh/kernel/syscalls.S353
-rw-r--r--arch/sh/kernel/time.c68
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c24
-rw-r--r--arch/sh/kernel/traps.c184
-rw-r--r--arch/sh/kernel/vmlinux.lds.S17
-rw-r--r--arch/sh/kernel/vsyscall/Makefile36
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-note.S25
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-sigreturn.S39
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-syscall.S10
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-trapa.S42
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c150
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.lds.S74
59 files changed, 3552 insertions, 1175 deletions
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index f05cd96f886..5da88a43d35 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -6,9 +6,10 @@ extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o setup.o time.o sys_sh.o semaphore.o \
- io.o io_generic.o sh_ksyms.o
+ io.o io_generic.o sh_ksyms.o syscalls.o
obj-y += cpu/ timers/
+obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_CF_ENABLER) += cf-enabler.o
@@ -18,3 +19,5 @@ obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_APM) += apm.o
+obj-$(CONFIG_PM) += pm.o
diff --git a/arch/sh/kernel/apm.c b/arch/sh/kernel/apm.c
new file mode 100644
index 00000000000..871e7d64000
--- /dev/null
+++ b/arch/sh/kernel/apm.c
@@ -0,0 +1,539 @@
+/*
+ * bios-less APM driver for hp680
+ *
+ * Copyright 2005 (c) Andriy Skulysh <askulysh@gmail.com>
+ *
+ * based on ARM APM driver by
+ * Jamey Hicks <jamey@crl.dec.com>
+ *
+ * adapted from the APM BIOS driver for Linux by
+ * Stephen Rothwell (sfr@linuxcare.com)
+ *
+ * APM 1.2 Reference:
+ * Intel Corporation, Microsoft Corporation. Advanced Power Management
+ * (APM) BIOS Interface Specification, Revision 1.2, February 1996.
+ *
+ * [This document is available from Microsoft at:
+ * http://www.microsoft.com/hwdev/busbios/amp_12.htm]
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/apm_bios.h>
+#include <linux/pm.h>
+#include <linux/pm_legacy.h>
+#include <asm/apm.h>
+
+#define MODNAME "apm"
+
+/*
+ * The apm_bios device is one of the misc char devices.
+ * This is its minor number.
+ */
+#define APM_MINOR_DEV 134
+
+/*
+ * Maximum number of events stored
+ */
+#define APM_MAX_EVENTS 16
+
+struct apm_queue {
+ unsigned int event_head;
+ unsigned int event_tail;
+ apm_event_t events[APM_MAX_EVENTS];
+};
+
+/*
+ * The per-file APM data
+ */
+struct apm_user {
+ struct list_head list;
+
+ unsigned int suser: 1;
+ unsigned int writer: 1;
+ unsigned int reader: 1;
+
+ int suspend_result;
+ unsigned int suspend_state;
+#define SUSPEND_NONE 0 /* no suspend pending */
+#define SUSPEND_PENDING 1 /* suspend pending read */
+#define SUSPEND_READ 2 /* suspend read, pending ack */
+#define SUSPEND_ACKED 3 /* suspend acked */
+#define SUSPEND_DONE 4 /* suspend completed */
+
+ struct apm_queue queue;
+};
+
+/*
+ * Local variables
+ */
+static int suspends_pending;
+
+static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
+static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
+
+/*
+ * This is a list of everyone who has opened /dev/apm_bios
+ */
+static DECLARE_RWSEM(user_list_lock);
+static LIST_HEAD(apm_user_list);
+
+/*
+ * kapmd info. kapmd provides us a process context to handle
+ * "APM" events within - specifically necessary if we're going
+ * to be suspending the system.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
+static DECLARE_COMPLETION(kapmd_exit);
+static DEFINE_SPINLOCK(kapmd_queue_lock);
+static struct apm_queue kapmd_queue;
+
+int apm_suspended;
+EXPORT_SYMBOL(apm_suspended);
+
+/* Platform-specific apm_read_proc(). */
+int (*apm_get_info)(char *buf, char **start, off_t fpos, int length);
+EXPORT_SYMBOL(apm_get_info);
+
+/*
+ * APM event queue management.
+ */
+static inline int queue_empty(struct apm_queue *q)
+{
+ return q->event_head == q->event_tail;
+}
+
+static inline apm_event_t queue_get_event(struct apm_queue *q)
+{
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ return q->events[q->event_tail];
+}
+
+static void queue_add_event(struct apm_queue *q, apm_event_t event)
+{
+ q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
+ if (q->event_head == q->event_tail) {
+ static int notified;
+
+ if (notified++ == 0)
+ printk(KERN_ERR "apm: an event queue overflowed\n");
+
+ q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
+ }
+ q->events[q->event_head] = event;
+}
+
+static void queue_event_one_user(struct apm_user *as, apm_event_t event)
+{
+ if (as->suser && as->writer) {
+ switch (event) {
+ case APM_SYS_SUSPEND:
+ case APM_USER_SUSPEND:
+ /*
+ * If this user already has a suspend pending,
+ * don't queue another one.
+ */
+ if (as->suspend_state != SUSPEND_NONE)
+ return;
+
+ as->suspend_state = SUSPEND_PENDING;
+ suspends_pending++;
+ break;
+ }
+ }
+ queue_add_event(&as->queue, event);
+}
+
+static void queue_event(apm_event_t event, struct apm_user *sender)
+{
+ struct apm_user *as;
+
+ down_read(&user_list_lock);
+
+ list_for_each_entry(as, &apm_user_list, list)
+ if (as != sender && as->reader)
+ queue_event_one_user(as, event);
+
+ up_read(&user_list_lock);
+ wake_up_interruptible(&apm_waitqueue);
+}
+
+/**
+ * apm_queue_event - queue an APM event for kapmd
+ * @event: APM event
+ *
+ * Queue an APM event for kapmd to process and ultimately take the
+ * appropriate action. Only a subset of events are handled:
+ * %APM_LOW_BATTERY
+ * %APM_POWER_STATUS_CHANGE
+ * %APM_USER_SUSPEND
+ * %APM_SYS_SUSPEND
+ * %APM_CRITICAL_SUSPEND
+ */
+void apm_queue_event(apm_event_t event)
+{
+ spin_lock_irq(&kapmd_queue_lock);
+ queue_add_event(&kapmd_queue, event);
+ spin_unlock_irq(&kapmd_queue_lock);
+
+ wake_up_interruptible(&kapmd_wait);
+}
+EXPORT_SYMBOL(apm_queue_event);
+
+static void apm_suspend(void)
+{
+ struct apm_user *as;
+ int err;
+
+ apm_suspended = 1;
+ err = pm_suspend(PM_SUSPEND_MEM);
+
+ /*
+ * Anyone on the APM queues will think we're still suspended.
+ * Send a message so everyone knows we're now awake again.
+ */
+ queue_event(APM_NORMAL_RESUME, NULL);
+
+ /*
+ * Finally, wake up anyone who is sleeping on the suspend.
+ */
+ down_read(&user_list_lock);
+ list_for_each_entry(as, &apm_user_list, list) {
+ as->suspend_result = err;
+ as->suspend_state = SUSPEND_DONE;
+ }
+ up_read(&user_list_lock);
+
+ wake_up(&apm_suspend_waitqueue);
+ apm_suspended = 0;
+}
+
+static ssize_t apm_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct apm_user *as = fp->private_data;
+ apm_event_t event;
+ int i = count, ret = 0;
+
+ if (count < sizeof(apm_event_t))
+ return -EINVAL;
+
+ if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));
+
+ while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
+ event = queue_get_event(&as->queue);
+
+ ret = -EFAULT;
+ if (copy_to_user(buf, &event, sizeof(event)))
+ break;
+
+ if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)
+ as->suspend_state = SUSPEND_READ;
+
+ buf += sizeof(event);
+ i -= sizeof(event);
+ }
+
+ if (i < count)
+ ret = count - i;
+
+ return ret;
+}
+
+static unsigned int apm_poll(struct file *fp, poll_table * wait)
+{
+ struct apm_user *as = fp->private_data;
+
+ poll_wait(fp, &apm_waitqueue, wait);
+ return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
+}
+
+/*
+ * apm_ioctl - handle APM ioctl
+ *
+ * APM_IOC_SUSPEND
+ * This IOCTL is overloaded, and performs two functions. It is used to:
+ * - initiate a suspend
+ * - acknowledge a suspend read from /dev/apm_bios.
+ * Only when everyone who has opened /dev/apm_bios with write permission
+ * has acknowledge does the actual suspend happen.
+ */
+static int
+apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
+{
+ struct apm_user *as = filp->private_data;
+ unsigned long flags;
+ int err = -EINVAL;
+
+ if (!as->suser || !as->writer)
+ return -EPERM;
+
+ switch (cmd) {
+ case APM_IOC_SUSPEND:
+ as->suspend_result = -EINTR;
+
+ if (as->suspend_state == SUSPEND_READ) {
+ /*
+ * If we read a suspend command from /dev/apm_bios,
+ * then the corresponding APM_IOC_SUSPEND ioctl is
+ * interpreted as an acknowledge.
+ */
+ as->suspend_state = SUSPEND_ACKED;
+ suspends_pending--;
+ } else {
+ /*
+ * Otherwise it is a request to suspend the system.
+ * Queue an event for all readers, and expect an
+ * acknowledge from all writers who haven't already
+ * acknowledged.
+ */
+ queue_event(APM_USER_SUSPEND, as);
+ }
+
+ /*
+ * If there are no further acknowledges required, suspend
+ * the system.
+ */
+ if (suspends_pending == 0)
+ apm_suspend();
+
+ /*
+ * Wait for the suspend/resume to complete. If there are
+ * pending acknowledges, we wait here for them.
+ *
+ * Note that we need to ensure that the PM subsystem does
+ * not kick us out of the wait when it suspends the threads.
+ */
+ flags = current->flags;
+ current->flags |= PF_NOFREEZE;
+
+ /*
+ * Note: do not allow a thread which is acking the suspend
+ * to escape until the resume is complete.
+ */
+ if (as->suspend_state == SUSPEND_ACKED)
+ wait_event(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
+ else
+ wait_event_interruptible(apm_suspend_waitqueue,
+ as->suspend_state == SUSPEND_DONE);
+
+ current->flags = flags;
+ err = as->suspend_result;
+ as->suspend_state = SUSPEND_NONE;
+ break;
+ }
+
+ return err;
+}
+
+static int apm_release(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as = filp->private_data;
+ filp->private_data = NULL;
+
+ down_write(&user_list_lock);
+ list_del(&as->list);
+ up_write(&user_list_lock);
+
+ /*
+ * We are now unhooked from the chain. As far as new
+ * events are concerned, we no longer exist. However, we
+ * need to balance suspends_pending, which means the
+ * possibility of sleeping.
+ */
+ if (as->suspend_state != SUSPEND_NONE) {
+ suspends_pending -= 1;
+ if (suspends_pending == 0)
+ apm_suspend();
+ }
+
+ kfree(as);
+ return 0;
+}
+
+static int apm_open(struct inode * inode, struct file * filp)
+{
+ struct apm_user *as;
+
+ as = kzalloc(sizeof(*as), GFP_KERNEL);
+ if (as) {
+ /*
+ * XXX - this is a tiny bit broken, when we consider BSD
+ * process accounting. If the device is opened by root, we
+ * instantly flag that we used superuser privs. Who knows,
+ * we might close the device immediately without doing a
+ * privileged operation -- cevans
+ */
+ as->suser = capable(CAP_SYS_ADMIN);
+ as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
+ as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;
+
+ down_write(&user_list_lock);
+ list_add(&as->list, &apm_user_list);
+ up_write(&user_list_lock);
+
+ filp->private_data = as;
+ }
+
+ return as ? 0 : -ENOMEM;
+}
+
+static struct file_operations apm_bios_fops = {
+ .owner = THIS_MODULE,
+ .read = apm_read,
+ .poll = apm_poll,
+ .ioctl = apm_ioctl,
+ .open = apm_open,
+ .release = apm_release,
+};
+
+static struct miscdevice apm_device = {
+ .minor = APM_MINOR_DEV,
+ .name = "apm_bios",
+ .fops = &apm_bios_fops
+};
+
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Arguments, with symbols from linux/apm_bios.h.
+ *
+ * 0) Linux driver version (this will change if format changes)
+ * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2.
+ * 2) APM flags from APM Installation Check (0x00):
+ * bit 0: APM_16_BIT_SUPPORT
+ * bit 1: APM_32_BIT_SUPPORT
+ * bit 2: APM_IDLE_SLOWS_CLOCK
+ * bit 3: APM_BIOS_DISABLED
+ * bit 4: APM_BIOS_DISENGAGED
+ * 3) AC line status
+ * 0x00: Off-line
+ * 0x01: On-line
+ * 0x02: On backup power (BIOS >= 1.1 only)
+ * 0xff: Unknown
+ * 4) Battery status
+ * 0x00: High
+ * 0x01: Low
+ * 0x02: Critical
+ * 0x03: Charging
+ * 0x04: Selected battery not present (BIOS >= 1.2 only)
+ * 0xff: Unknown
+ * 5) Battery flag
+ * bit 0: High
+ * bit 1: Low
+ * bit 2: Critical
+ * bit 3: Charging
+ * bit 7: No system battery
+ * 0xff: Unknown
+ * 6) Remaining battery life (percentage of charge):
+ * 0-100: valid
+ * -1: Unknown
+ * 7) Remaining battery life (time units):
+ * Number of remaining minutes or seconds
+ * -1: Unknown
+ * 8) min = minutes; sec = seconds
+ */
+static int apm_read_proc(char *buf, char **start, off_t fpos, int length)
+{
+ if (likely(apm_get_info))
+ return apm_get_info(buf, start, fpos, length);
+
+ return -EINVAL;
+}
+#endif
+
+static int kapmd(void *arg)
+{
+ daemonize("kapmd");
+ current->flags |= PF_NOFREEZE;
+
+ do {
+ apm_event_t event;
+
+ wait_event_interruptible(kapmd_wait,
+ !queue_empty(&kapmd_queue) || !pm_active);
+
+ if (!pm_active)
+ break;
+
+ spin_lock_irq(&kapmd_queue_lock);
+ event = 0;
+ if (!queue_empty(&kapmd_queue))
+ event = queue_get_event(&kapmd_queue);
+ spin_unlock_irq(&kapmd_queue_lock);
+
+ switch (event) {
+ case 0:
+ break;
+
+ case APM_LOW_BATTERY:
+ case APM_POWER_STATUS_CHANGE:
+ queue_event(event, NULL);
+ break;
+
+ case APM_USER_SUSPEND:
+ case APM_SYS_SUSPEND:
+ queue_event(event, NULL);
+ if (suspends_pending == 0)
+ apm_suspend();
+ break;
+
+ case APM_CRITICAL_SUSPEND:
+ apm_suspend();
+ break;
+ }
+ } while (1);
+
+ complete_and_exit(&kapmd_exit, 0);
+}
+
+static int __init apm_init(void)
+{
+ int ret;
+
+ pm_active = 1;
+
+ ret = kernel_thread(kapmd, NULL, CLONE_KERNEL);
+ if (unlikely(ret < 0)) {
+ pm_active = 0;
+ return ret;
+ }
+
+ create_proc_info_entry("apm", 0, NULL, apm_read_proc);
+
+ ret = misc_register(&apm_device);
+ if (unlikely(ret != 0)) {
+ remove_proc_entry("apm", NULL);
+
+ pm_active = 0;
+ wake_up(&kapmd_wait);
+ wait_for_completion(&kapmd_exit);
+ }
+
+ return ret;
+}
+
+static void __exit apm_exit(void)
+{
+ misc_deregister(&apm_device);
+ remove_proc_entry("apm", NULL);
+
+ pm_active = 0;
+ wake_up(&kapmd_wait);
+ wait_for_completion(&kapmd_exit);
+}
+
+module_init(apm_init);
+module_exit(apm_exit);
+
+MODULE_AUTHOR("Stephen Rothwell, Andriy Skulysh");
+MODULE_DESCRIPTION("Advanced Power Management");
+MODULE_LICENSE("GPL");
diff --git a/arch/sh/kernel/cf-enabler.c b/arch/sh/kernel/cf-enabler.c
index f1f9ab87f0b..3e5fa1e24df 100644
--- a/arch/sh/kernel/cf-enabler.c
+++ b/arch/sh/kernel/cf-enabler.c
@@ -10,7 +10,8 @@
*/
#include <linux/init.h>
-
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -32,8 +33,6 @@
/* SH4 can't access PCMCIA interface through P2 area.
* we must remap it with appropreate attribute bit of the page set.
* this part is based on Greg Banks' hd64465_ss.c implementation - Masahiro Abe */
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
#if defined(CONFIG_CF_AREA6)
#define slot_no 0
@@ -41,9 +40,6 @@
#define slot_no 1
#endif
-/* defined in mm/ioremap.c */
-extern void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags);
-
/* use this pointer to access to directly connected compact flash io area*/
void *cf_io_base;
@@ -62,7 +58,7 @@ static int __init allocate_cf_area(void)
return -ENOMEM;
}
/* printk("p3_ioremap(paddr=0x%08lx, psize=0x%08lx, prot=0x%08lx)=0x%08lx\n",
- paddrbase, psize, prot.pgprot, cf_io_base);*/
+ paddrbase, psize, prot.pgprot, cf_io_base);*/
/* XXX : do we need attribute and common-memory area also? */
@@ -87,7 +83,7 @@ static int __init cf_init_default(void)
}
#if defined(CONFIG_SH_SOLUTION_ENGINE)
-#include <asm/se/se.h>
+#include <asm/se.h>
/*
* SolutionEngine
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index 59d5b748752..fb5dac06938 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -8,6 +8,5 @@ obj-$(CONFIG_CPU_SH2) += sh2/
obj-$(CONFIG_CPU_SH3) += sh3/
obj-$(CONFIG_CPU_SH4) += sh4/
-obj-$(CONFIG_SH_RTC) += rtc.o
obj-$(CONFIG_UBC_WAKEUP) += ubc.o
obj-$(CONFIG_SH_ADC) += adc.o
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c
index 97fa37f42b8..51ec64cdf34 100644
--- a/arch/sh/kernel/cpu/clock.c
+++ b/arch/sh/kernel/cpu/clock.c
@@ -1,7 +1,7 @@
/*
* arch/sh/kernel/cpu/clock.c - SuperH clock framework
*
- * Copyright (C) 2005 Paul Mundt
+ * Copyright (C) 2005, 2006 Paul Mundt
*
* This clock framework is derived from the OMAP version by:
*
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/seq_file.h>
@@ -24,7 +25,7 @@
static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
-static DECLARE_MUTEX(clock_list_sem);
+static DEFINE_MUTEX(clock_list_sem);
/*
* Each subtype is expected to define the init routines for these clocks,
@@ -140,21 +141,21 @@ void clk_disable(struct clk *clk)
int clk_register(struct clk *clk)
{
- down(&clock_list_sem);
+ mutex_lock(&clock_list_sem);
list_add(&clk->node, &clock_list);
kref_init(&clk->kref);
- up(&clock_list_sem);
+ mutex_unlock(&clock_list_sem);
return 0;
}
void clk_unregister(struct clk *clk)
{
- down(&clock_list_sem);
+ mutex_lock(&clock_list_sem);
list_del(&clk->node);
- up(&clock_list_sem);
+ mutex_unlock(&clock_list_sem);
}
inline unsigned long clk_get_rate(struct clk *clk)
@@ -198,14 +199,14 @@ struct clk *clk_get(const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
- down(&clock_list_sem);
+ mutex_lock(&clock_list_sem);
list_for_each_entry(p, &clock_list, node) {
if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
clk = p;
break;
}
}
- up(&clock_list_sem);
+ mutex_unlock(&clock_list_sem);
return clk;
}
@@ -225,7 +226,7 @@ int __init clk_init(void)
{
int i, ret = 0;
- BUG_ON(unlikely(!master_clk.rate));
+ BUG_ON(!master_clk.rate);
for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
struct clk *clk = onchip_clocks[i];
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 868e68b2888..bfb90eb0b7a 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -4,6 +4,7 @@
* CPU init code
*
* Copyright (C) 2002, 2003 Paul Mundt
+ * Copyright (C) 2003 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -13,6 +14,7 @@
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
+#include <asm/page.h>
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
@@ -51,7 +53,15 @@ static void __init cache_init(void)
ccr = ctrl_inl(CCR);
/*
- * If the cache is already enabled .. flush it.
+ * At this point we don't know whether the cache is enabled or not - a
+ * bootloader may have enabled it. There are at least 2 things that
+ * could be dirty in the cache at this point:
+ * 1. kernel command line set up by boot loader
+ * 2. spilled registers from the prolog of this function
+ * => before re-initialising the cache, we must do a purge of the whole
+ * cache out to memory for safety. As long as nothing is spilled
+ * during the loop to lines that have already been done, this is safe.
+ * - RPC
*/
if (ccr & CCR_CACHE_ENABLE) {
unsigned long ways, waysize, addrstart;
@@ -98,6 +108,8 @@ static void __init cache_init(void)
/* Force EMODE if possible */
if (cpu_data->dcache.ways > 1)
flags |= CCR_CACHE_EMODE;
+ else
+ flags &= ~CCR_CACHE_EMODE;
#endif
#ifdef CONFIG_SH_WRITETHROUGH
@@ -112,6 +124,9 @@ static void __init cache_init(void)
/* Turn on OCRAM -- halve the OC */
flags |= CCR_CACHE_ORA;
cpu_data->dcache.sets >>= 1;
+
+ cpu_data->dcache.way_size = cpu_data->dcache.sets *
+ cpu_data->dcache.linesz;
#endif
ctrl_outl(flags, CCR);
@@ -184,6 +199,10 @@ asmlinkage void __init sh_cpu_init(void)
/* Init the cache */
cache_init();
+ shm_align_mask = max_t(unsigned long,
+ cpu_data->dcache.way_size - 1,
+ PAGE_SIZE - 1);
+
/* Disable the FPU */
if (fpu_disabled) {
printk("FPU Disabled\n");
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index e3cccea15e1..1c034c283f5 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -3,5 +3,6 @@
#
obj-y += ipr.o imask.o
-obj-$(CONFIG_CPU_HAS_PINT_IRQ) += pint.o
-obj-$(CONFIG_CPU_HAS_INTC2_IRQ) += intc2.o
+obj-$(CONFIG_CPU_HAS_PINT_IRQ) += pint.o
+obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o
+obj-$(CONFIG_CPU_HAS_INTC2_IRQ) += intc2.o
diff --git a/arch/sh/kernel/cpu/irq/intc2.c b/arch/sh/kernel/cpu/irq/intc2.c
index 30064bf6e15..e30e4b7aa70 100644
--- a/arch/sh/kernel/cpu/irq/intc2.c
+++ b/arch/sh/kernel/cpu/irq/intc2.c
@@ -241,9 +241,9 @@ static struct intc2_init {
/* 110-111 reserved/unused */
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
{ TIMER_IRQ, 0, 24, 0, INTC_TMU0_MSK, 2},
-#ifdef CONFIG_SH_RTC
- { RTC_IRQ, 4, 0, 0, INTC_RTC_MSK, TIMER_PRIORITY },
-#endif
+ { 21, 1, 0, 0, INTC_RTC_MSK, TIMER_PRIORITY },
+ { 22, 1, 1, 0, INTC_RTC_MSK, TIMER_PRIORITY },
+ { 23, 1, 2, 0, INTC_RTC_MSK, TIMER_PRIORITY },
{ SCIF0_ERI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
{ SCIF0_RXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
{ SCIF0_BRI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY },
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c
index 0f545941fb4..f785822cd5d 100644
--- a/arch/sh/kernel/cpu/irq/ipr.c
+++ b/arch/sh/kernel/cpu/irq/ipr.c
@@ -57,31 +57,27 @@ static struct hw_interrupt_type ipr_irq_type = {
static void disable_ipr_irq(unsigned int irq)
{
- unsigned long val, flags;
+ unsigned long val;
unsigned int addr = ipr_data[irq].addr;
unsigned short mask = 0xffff ^ (0x0f << ipr_data[irq].shift);
/* Set the priority in IPR to 0 */
- local_irq_save(flags);
val = ctrl_inw(addr);
val &= mask;
ctrl_outw(val, addr);
- local_irq_restore(flags);
}
static void enable_ipr_irq(unsigned int irq)
{
- unsigned long val, flags;
+ unsigned long val;
unsigned int addr = ipr_data[irq].addr;
int priority = ipr_data[irq].priority;
unsigned short value = (priority << ipr_data[irq].shift);
/* Set priority in IPR back to original value */
- local_irq_save(flags);
val = ctrl_inw(addr);
val |= value;
ctrl_outw(val, addr);
- local_irq_restore(flags);
}
static void mask_and_ack_ipr(unsigned int irq)
@@ -89,6 +85,7 @@ static void mask_and_ack_ipr(unsigned int irq)
disable_ipr_irq(irq);
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
/* This is needed when we use edge triggered setting */
/* XXX: Is it really needed? */
@@ -123,7 +120,7 @@ void __init init_IRQ(void)
#ifndef CONFIG_CPU_SUBTYPE_SH7780
make_ipr_irq(TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY);
make_ipr_irq(TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY);
-#if defined(CONFIG_SH_RTC)
+#ifdef RTC_IRQ
make_ipr_irq(RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY);
#endif
@@ -162,6 +159,7 @@ void __init init_IRQ(void)
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
/*
* Initialize the Interrupt Controller (INTC)
@@ -192,6 +190,8 @@ void __init init_IRQ(void)
/* Perform the machine specific initialisation */
if (sh_mv.mv_init_irq != NULL)
sh_mv.mv_init_irq();
+
+ irq_ctx_init(smp_processor_id());
}
#if !defined(CONFIG_CPU_HAS_PINT_IRQ)
diff --git a/arch/sh/kernel/cpu/irq/maskreg.c b/arch/sh/kernel/cpu/irq/maskreg.c
new file mode 100644
index 00000000000..492db31b3ca
--- /dev/null
+++ b/arch/sh/kernel/cpu/irq/maskreg.c
@@ -0,0 +1,93 @@
+/*
+ * Interrupt handling for Simple external interrupt mask register
+ *
+ * Copyright (C) 2001 A&D Co., Ltd. <http://www.aandd.co.jp>
+ *
+ * This is for the machine which have single 16 bit register
+ * for masking external IRQ individually.
+ * Each bit of the register is for masking each interrupt.
+ *
+ * This file may be copied or modified under the terms of the GNU
+ * General Public License. See linux/COPYING for more information.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <asm/system.h>
+#include <asm/io.h>
+
+/* address of external interrupt mask register */
+unsigned long irq_mask_register;
+
+/* forward declaration */
+static unsigned int startup_maskreg_irq(unsigned int irq);
+static void shutdown_maskreg_irq(unsigned int irq);
+static void enable_maskreg_irq(unsigned int irq);
+static void disable_maskreg_irq(unsigned int irq);
+static void mask_and_ack_maskreg(unsigned int);
+static void end_maskreg_irq(unsigned int irq);
+
+/* hw_interrupt_type */
+static struct hw_interrupt_type maskreg_irq_type = {
+ .typename = "Mask Register",
+ .startup = startup_maskreg_irq,
+ .shutdown = shutdown_maskreg_irq,
+ .enable = enable_maskreg_irq,
+ .disable = disable_maskreg_irq,
+ .ack = mask_and_ack_maskreg,
+ .end = end_maskreg_irq
+};
+
+/* actual implementatin */
+static unsigned int startup_maskreg_irq(unsigned int irq)
+{
+ enable_maskreg_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static void shutdown_maskreg_irq(unsigned int irq)
+{
+ disable_maskreg_irq(irq);
+}
+
+static void disable_maskreg_irq(unsigned int irq)
+{
+ unsigned short val, mask = 0x01 << irq;
+
+ BUG_ON(!irq_mask_register);
+
+ /* Set "irq"th bit */
+ val = ctrl_inw(irq_mask_register);
+ val |= mask;
+ ctrl_outw(val, irq_mask_register);
+}
+
+static void enable_maskreg_irq(unsigned int irq)
+{
+ unsigned short val, mask = ~(0x01 << irq);
+
+ BUG_ON(!irq_mask_register);
+
+ /* Clear "irq"th bit */
+ val = ctrl_inw(irq_mask_register);
+ val &= mask;
+ ctrl_outw(val, irq_mask_register);
+}
+
+static void mask_and_ack_maskreg(unsigned int irq)
+{
+ disable_maskreg_irq(irq);
+}
+
+static void end_maskreg_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ enable_maskreg_irq(irq);
+}
+
+void make_maskreg_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_desc[irq].handler = &maskreg_irq_type;
+ disable_maskreg_irq(irq);
+}
diff --git a/arch/sh/kernel/cpu/irq/pint.c b/arch/sh/kernel/cpu/irq/pint.c
index 80cd8108d36..17f47b373d6 100644
--- a/arch/sh/kernel/cpu/irq/pint.c
+++ b/arch/sh/kernel/cpu/irq/pint.c
@@ -48,26 +48,22 @@ static struct hw_interrupt_type pint_irq_type = {
static void disable_pint_irq(unsigned int irq)
{
- unsigned long val, flags;
+ unsigned long val;
- local_irq_save(flags);
val = ctrl_inw(INTC_INTER);
val &= ~(1 << (irq - PINT_IRQ_BASE));
ctrl_outw(val, INTC_INTER); /* disable PINTn */
portcr_mask &= ~(3 << (irq - PINT_IRQ_BASE)*2);
- local_irq_restore(flags);
}
static void enable_pint_irq(unsigned int irq)
{
- unsigned long val, flags;
+ unsigned long val;
- local_irq_save(flags);
val = ctrl_inw(INTC_INTER);
val |= 1 << (irq - PINT_IRQ_BASE);
ctrl_outw(val, INTC_INTER); /* enable PINTn */
portcr_mask |= 3 << (irq - PINT_IRQ_BASE)*2;
- local_irq_restore(flags);
}
static void mask_and_ack_pint(unsigned int irq)
diff --git a/arch/sh/kernel/cpu/rtc.c b/arch/sh/kernel/cpu/rtc.c
deleted file mode 100644
index 4304cf75cfa..00000000000
--- a/arch/sh/kernel/cpu/rtc.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * linux/arch/sh/kernel/rtc.c -- SH3 / SH4 on-chip RTC support
- *
- * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/time.h>
-#include <linux/bcd.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
-
-void sh_rtc_gettimeofday(struct timespec *ts)
-{
- unsigned int sec128, sec, sec2, min, hr, wk, day, mon, yr, yr100, cf_bit;
- unsigned long flags;
-
- again:
- do {
- local_irq_save(flags);
- ctrl_outb(0, RCR1); /* Clear CF-bit */
- sec128 = ctrl_inb(R64CNT);
- sec = ctrl_inb(RSECCNT);
- min = ctrl_inb(RMINCNT);
- hr = ctrl_inb(RHRCNT);
- wk = ctrl_inb(RWKCNT);
- day = ctrl_inb(RDAYCNT);
- mon = ctrl_inb(RMONCNT);
-#if defined(CONFIG_CPU_SH4)
- yr = ctrl_inw(RYRCNT);
- yr100 = (yr >> 8);
- yr &= 0xff;
-#else
- yr = ctrl_inb(RYRCNT);
- yr100 = (yr == 0x99) ? 0x19 : 0x20;
-#endif
- sec2 = ctrl_inb(R64CNT);
- cf_bit = ctrl_inb(RCR1) & RCR1_CF;
- local_irq_restore(flags);
- } while (cf_bit != 0 || ((sec128 ^ sec2) & RTC_BIT_INVERTED) != 0);
-
- BCD_TO_BIN(yr100);
- BCD_TO_BIN(yr);
- BCD_TO_BIN(mon);
- BCD_TO_BIN(day);
- BCD_TO_BIN(hr);
- BCD_TO_BIN(min);
- BCD_TO_BIN(sec);
-
- if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 ||
- hr > 23 || min > 59 || sec > 59) {
- printk(KERN_ERR
- "SH RTC: invalid value, resetting to 1 Jan 2000\n");
- local_irq_save(flags);
- ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */
- ctrl_outb(0, RSECCNT);
- ctrl_outb(0, RMINCNT);
- ctrl_outb(0, RHRCNT);
- ctrl_outb(6, RWKCNT);
- ctrl_outb(1, RDAYCNT);
- ctrl_outb(1, RMONCNT);
-#if defined(CONFIG_CPU_SH4)
- ctrl_outw(0x2000, RYRCNT);
-#else
- ctrl_outb(0, RYRCNT);
-#endif
- ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */
- goto again;
- }
-
-#if RTC_BIT_INVERTED != 0
- if ((sec128 & RTC_BIT_INVERTED))
- sec--;
-#endif
-
- ts->tv_sec = mktime(yr100 * 100 + yr, mon, day, hr, min, sec);
- ts->tv_nsec = ((sec128 * 1000000) / 128) * 1000;
-}
-
-/*
- * Changed to only care about tv_sec, and not the full timespec struct
- * (i.e. tv_nsec). It can easily be switched to timespec for future cpus
- * that support setting usec or nsec RTC values.
- */
-int sh_rtc_settimeofday(const time_t secs)
-{
- int retval = 0;
- int real_seconds, real_minutes, cmos_minutes;
- unsigned long flags;
-
- local_irq_save(flags);
- ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */
-
- cmos_minutes = ctrl_inb(RMINCNT);
- BCD_TO_BIN(cmos_minutes);
-
- /*
- * since we're only adjusting minutes and seconds,
- * don't interfere with hour overflow. This avoids
- * messing with unknown time zones but requires your
- * RTC not to be off by more than 15 minutes
- */
- real_seconds = secs % 60;
- real_minutes = secs / 60;
- if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
- real_minutes += 30; /* correct for half hour time zone */
- real_minutes %= 60;
-
- if (abs(real_minutes - cmos_minutes) < 30) {
- BIN_TO_BCD(real_seconds);
- BIN_TO_BCD(real_minutes);
- ctrl_outb(real_seconds, RSECCNT);
- ctrl_outb(real_minutes, RMINCNT);
- } else {
- printk(KERN_WARNING
- "set_rtc_time: can't update from %d to %d\n",
- cmos_minutes, real_minutes);
- retval = -1;
- }
-
- ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */
- local_irq_restore(flags);
-
- return retval;
-}
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile
index b54dbb9a0c8..58d3815695f 100644
--- a/arch/sh/kernel/cpu/sh3/Makefile
+++ b/arch/sh/kernel/cpu/sh3/Makefile
@@ -4,10 +4,21 @@
obj-y := ex.o probe.o
+# CPU subtype setup
+obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh7709.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh7709.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh7708.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh7709.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7300) += setup-sh7300.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o
+
+# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH3) := clock-sh3.o
clock-$(CONFIG_CPU_SUBTYPE_SH7300) := clock-sh7300.o
clock-$(CONFIG_CPU_SUBTYPE_SH7705) := clock-sh7705.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7706) := clock-sh7706.o
clock-$(CONFIG_CPU_SUBTYPE_SH7709) := clock-sh7709.o
+clock-$(CONFIG_CPU_SUBTYPE_SH7710) := clock-sh7300.o
obj-y += $(clock-y)
-
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7706.c b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
new file mode 100644
index 00000000000..0cf96f9833b
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/clock-sh7706.c
@@ -0,0 +1,84 @@
+/*
+ * arch/sh/kernel/cpu/sh3/clock-sh7706.c
+ *
+ * SH7706 support for the clock framework
+ *
+ * Copyright (C) 2006 Takashi YOSHII
+ *
+ * Based on arch/sh/kernel/cpu/sh3/clock-sh7709.c
+ * Copyright (C) 2005 Andriy Skulysh
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/clock.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+
+static int stc_multipliers[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
+static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
+static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
+
+static void master_clk_init(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
+
+ clk->rate *= pfc_divisors[idx];
+}
+
+static struct clk_ops sh7706_master_clk_ops = {
+ .init = master_clk_init,
+};
+
+static void module_clk_recalc(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
+
+ clk->rate = clk->parent->rate / pfc_divisors[idx];
+}
+
+static struct clk_ops sh7706_module_clk_ops = {
+ .recalc = module_clk_recalc,
+};
+
+static void bus_clk_recalc(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
+
+ clk->rate = clk->parent->rate / stc_multipliers[idx];
+}
+
+static struct clk_ops sh7706_bus_clk_ops = {
+ .recalc = bus_clk_recalc,
+};
+
+static void cpu_clk_recalc(struct clk *clk)
+{
+ int frqcr = ctrl_inw(FRQCR);
+ int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
+
+ clk->rate = clk->parent->rate / ifc_divisors[idx];
+}
+
+static struct clk_ops sh7706_cpu_clk_ops = {
+ .recalc = cpu_clk_recalc,
+};
+
+static struct clk_ops *sh7706_clk_ops[] = {
+ &sh7706_master_clk_ops,
+ &sh7706_module_clk_ops,
+ &sh7706_bus_clk_ops,
+ &sh7706_cpu_clk_ops,
+};
+
+void __init arch_init_clk_ops(struct clk_ops **ops, int idx)
+{
+ if (idx < ARRAY_SIZE(sh7706_clk_ops))
+ *ops = sh7706_clk_ops[idx];
+}
diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S
index cc04e9e239f..44daf44833f 100644
--- a/arch/sh/kernel/cpu/sh3/ex.S
+++ b/arch/sh/kernel/cpu/sh3/ex.S
@@ -84,8 +84,12 @@ ENTRY(interrupt_table)
.long do_IRQ ! rovi
.long do_IRQ
.long do_IRQ /* 5E0 */
-#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
- defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705)
+#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7709) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7706) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7300) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7705) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7710)
.long do_IRQ ! 32 IRQ irq0 /* 600 */
.long do_IRQ ! 33 irq1
.long do_IRQ ! 34 irq2
@@ -147,6 +151,51 @@ ENTRY(interrupt_table)
.long do_IRQ ! 62 PCC pcc0i
.long do_IRQ ! 63 pcc1i /* 9E0 */
#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7710)
+ .long exception_none ! 61 /* 9A0 */
+ .long exception_none ! 62
+ .long exception_none ! 63
+ .long exception_none ! 64 /* A00 */
+ .long exception_none ! 65
+ .long exception_none ! 66
+ .long exception_none ! 67
+ .long exception_none ! 68
+ .long exception_none ! 69
+ .long exception_none ! 70
+ .long exception_none ! 71
+ .long exception_none ! 72 /* B00 */
+ .long exception_none ! 73
+ .long exception_none ! 74
+ .long exception_none ! 75
+ .long do_IRQ ! 76 DMAC2 dei4 /* B80 */
+ .long do_IRQ ! 77 DMAC2 dei5
+ .long exception_none ! 78
+ .long do_IRQ ! 79 IPSEC ipseci /* BE0 */
+ .long do_IRQ ! 80 EDMAC eint0 /* C00 */
+ .long do_IRQ ! 81 EDMAC eint1
+ .long do_IRQ ! 82 EDMAC eint2
+ .long exception_none ! 83 /* C60 */
+ .long exception_none ! 84
+ .long exception_none ! 85
+ .long exception_none ! 86
+ .long exception_none ! 87
+ .long exception_none ! 88 /* D00 */
+ .long exception_none ! 89
+ .long exception_none ! 90
+ .long exception_none ! 91
+ .long exception_none ! 92
+ .long exception_none ! 93
+ .long exception_none ! 94
+ .long exception_none ! 95
+ .long do_IRQ ! 96 SIOF eri0 /* E00 */
+ .long do_IRQ ! 97 txi0
+ .long do_IRQ ! 98 rxi0
+ .long do_IRQ ! 99 cci0
+ .long do_IRQ ! 100 eri1 /* E80 */
+ .long do_IRQ ! 101 txi1
+ .long do_IRQ ! 102 rxi2
+ .long do_IRQ ! 103 cci3
+#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7300)
.long do_IRQ ! 64
.long do_IRQ ! 65
@@ -195,4 +244,3 @@ ENTRY(interrupt_table)
.long do_IRQ ! 108
#endif
#endif
-
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index 5cdc8863860..e6709883629 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -72,6 +72,12 @@ int __init detect_cpu_and_cache_system(void)
cpu_data->dcache.sets = 256;
cpu_data->type = CPU_SH7729;
+#if defined(CONFIG_CPU_SUBTYPE_SH7706)
+ cpu_data->type = CPU_SH7706;
+#endif
+#if defined(CONFIG_CPU_SUBTYPE_SH7710)
+ cpu_data->type = CPU_SH7710;
+#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7705)
cpu_data->type = CPU_SH7705;
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7300.c b/arch/sh/kernel/cpu/sh3/setup-sh7300.c
new file mode 100644
index 00000000000..ab4d204bfba
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7300.c
@@ -0,0 +1,43 @@
+/*
+ * SH7300 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xa4430000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 80, 80, 80, 80 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7300_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7300_devices_setup(void)
+{
+ return platform_add_devices(sh7300_devices,
+ ARRAY_SIZE(sh7300_devices));
+}
+__initcall(sh7300_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
new file mode 100644
index 00000000000..a8e41c5241f
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c
@@ -0,0 +1,48 @@
+/*
+ * SH7705 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xa4400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 53, 55, 54 },
+ }, {
+ .mapbase = 0xa4410000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 56, 57, 59, 58 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7705_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7705_devices_setup(void)
+{
+ return platform_add_devices(sh7705_devices,
+ ARRAY_SIZE(sh7705_devices));
+}
+__initcall(sh7705_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7708.c b/arch/sh/kernel/cpu/sh3/setup-sh7708.c
new file mode 100644
index 00000000000..f933723911c
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7708.c
@@ -0,0 +1,43 @@
+/*
+ * SH7708 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfffffe80,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 23, 24, 25, 0 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7708_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7708_devices_setup(void)
+{
+ return platform_add_devices(sh7708_devices,
+ ARRAY_SIZE(sh7708_devices));
+}
+__initcall(sh7708_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7709.c b/arch/sh/kernel/cpu/sh3/setup-sh7709.c
new file mode 100644
index 00000000000..ff43ef2a1f0
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7709.c
@@ -0,0 +1,53 @@
+/*
+ * SH7707/SH7709 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfffffe80,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 23, 24, 25, 0 },
+ }, {
+ .mapbase = 0xa4000150,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 56, 57, 59, 58 },
+ }, {
+ .mapbase = 0xa4000140,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_IRDA,
+ .irqs = { 52, 53, 55, 54 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7709_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7709_devices_setup(void)
+{
+ return platform_add_devices(sh7709_devices,
+ ARRAY_SIZE(sh7709_devices));
+}
+__initcall(sh7709_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
new file mode 100644
index 00000000000..895f99ee6a9
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c
@@ -0,0 +1,43 @@
+/*
+ * SH7710 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xa4400000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 53, 55, 54 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7710_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7710_devices_setup(void)
+{
+ return platform_add_devices(sh7710_devices,
+ ARRAY_SIZE(sh7710_devices));
+}
+__initcall(sh7710_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile
index 3d5cafc71ae..8dbf3895ece 100644
--- a/arch/sh/kernel/cpu/sh4/Makefile
+++ b/arch/sh/kernel/cpu/sh4/Makefile
@@ -7,6 +7,16 @@ obj-y := ex.o probe.o
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_SH_STORE_QUEUES) += sq.o
+# CPU subtype setup
+obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7751) += setup-sh7750.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7760) += setup-sh7760.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o
+obj-$(CONFIG_CPU_SUBTYPE_SH73180) += setup-sh73180.o
+obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o
+obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += setup-sh4-202.o
+
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH4) := clock-sh4.o
clock-$(CONFIG_CPU_SUBTYPE_SH73180) := clock-sh73180.o
diff --git a/arch/sh/kernel/cpu/sh4/ex.S b/arch/sh/kernel/cpu/sh4/ex.S
index 26a27df0650..7146893a6cc 100644
--- a/arch/sh/kernel/cpu/sh4/ex.S
+++ b/arch/sh/kernel/cpu/sh4/ex.S
@@ -72,6 +72,7 @@ ENTRY(interrupt_table)
.long do_IRQ ! 1110
.long exception_error
! Internal hardware
+#ifndef CONFIG_CPU_SUBTYPE_SH7780
.long do_IRQ ! TMU0 tuni0 /* 400 */
.long do_IRQ ! TMU1 tuni1
.long do_IRQ ! TMU2 tuni2
@@ -122,6 +123,13 @@ ENTRY(interrupt_table)
.long do_IRQ ! 45 dmte5
.long do_IRQ ! 46 dmte6
.long do_IRQ ! 47 dmte7 /* 7E0 */
+#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
+ .long do_IRQ ! 44 IIC1 ali /* 780 */
+ .long do_IRQ ! 45 tacki
+ .long do_IRQ ! 46 waiti
+ .long do_IRQ ! 47 dtei /* 7E0 */
+ .long do_IRQ ! 48 DMAC dei0 /* 800 */
+ .long do_IRQ ! 49 dei1 /* 820 */
#else
.long exception_error ! 44 /* 780 */
.long exception_error ! 45
@@ -131,7 +139,8 @@ ENTRY(interrupt_table)
#if defined(CONFIG_SH_FPU)
.long do_fpu_state_restore ! 48 /* 800 */
.long do_fpu_state_restore ! 49 /* 820 */
-#else
+#elif !defined(CONFIG_CPU_SUBTYPE_SH7343) && \
+ !defined(CONFIG_CPU_SUBTYPE_SH73180)
.long exception_error
.long exception_error
#endif
@@ -224,7 +233,7 @@ ENTRY(interrupt_table)
.long exception_error
.long do_IRQ ! ADC adi
.long do_IRQ ! CMT cmti /* FA0 */
-#elif defined(CONFIG_CPU_SUBTYPE_SH73180)
+#elif defined(CONFIG_CPU_SUBTYPE_SH73180) || defined(CONFIG_CPU_SUBTYPE_SH7343)
.long do_IRQ ! 50 0x840
.long do_IRQ ! 51 0x860
.long do_IRQ ! 52 0x880
@@ -379,5 +388,168 @@ ENTRY(interrupt_table)
.long exception_error ! 141 0x13a0
.long exception_error ! 142 0x13c0
.long exception_error ! 143 0x13e0
+#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
+ .long do_IRQ ! 50 0x840
+ .long do_IRQ ! 51 0x860
+ .long do_IRQ ! 52 0x880
+ .long do_IRQ ! 53 0x8a0
+ .long do_IRQ ! 54 0x8c0
+ .long do_IRQ ! 55 0x8e0
+ .long do_IRQ ! 56 0x900
+ .long do_IRQ ! 57 0x920
+ .long do_IRQ ! 58 0x940
+ .long do_IRQ ! 59 0x960
+ .long do_IRQ ! 60 0x980
+ .long do_IRQ ! 61 0x9a0
+ .long do_IRQ ! 62 0x9c0
+ .long do_IRQ ! 63 0x9e0
+ .long do_IRQ ! 64 0xa00
+ .long do_IRQ ! 65 0xa20
+ .long do_IRQ ! 66 0xa4d
+ .long do_IRQ ! 67 0xa60
+ .long do_IRQ ! 68 0xa80
+ .long do_IRQ ! 69 0xaa0
+ .long do_IRQ ! 70 0xac0
+ .long do_IRQ ! 71 0xae0
+ .long do_IRQ ! 72 0xb00
+ .long do_IRQ ! 73 0xb20
+ .long do_IRQ ! 74 0xb40
+ .long do_IRQ ! 75 0xb60
+ .long do_IRQ ! 76 0xb80
+ .long do_IRQ ! 77 0xba0
+ .long do_IRQ ! 78 0xbc0
+ .long do_IRQ ! 79 0xbe0
+ .long do_IRQ ! 80 0xc00
+ .long do_IRQ ! 81 0xc20
+ .long do_IRQ ! 82 0xc40
+ .long do_IRQ ! 83 0xc60
+ .long do_IRQ ! 84 0xc80
+ .long do_IRQ ! 85 0xca0
+ .long do_IRQ ! 86 0xcc0
+ .long do_IRQ ! 87 0xce0
+ .long do_IRQ ! 88 0xd00
+ .long do_IRQ ! 89 0xd20
+ .long do_IRQ ! 90 0xd40
+ .long do_IRQ ! 91 0xd60
+ .long do_IRQ ! 92 0xd80
+ .long do_IRQ ! 93 0xda0
+ .long do_IRQ ! 94 0xdc0
+ .long do_IRQ ! 95 0xde0
+ .long do_IRQ ! 96 0xe00
+ .long do_IRQ ! 97 0xe20
+ .long do_IRQ ! 98 0xe40
+ .long do_IRQ ! 99 0xe60
+ .long do_IRQ ! 100 0xe80
+ .long do_IRQ ! 101 0xea0
+ .long do_IRQ ! 102 0xec0
+ .long do_IRQ ! 103 0xee0
+ .long do_IRQ ! 104 0xf00
+ .long do_IRQ ! 105 0xf20
+ .long do_IRQ ! 106 0xf40
+ .long do_IRQ ! 107 0xf60
+ .long do_IRQ ! 108 0xf80
+#endif
+#else
+ .long exception_error /* 400 */
+ .long exception_error
+ .long exception_error
+ .long exception_error
+ .long do_IRQ ! RTC ati
+ .long do_IRQ ! pri
+ .long do_IRQ ! cui
+ .long exception_error
+ .long exception_error /* 500 */
+ .long exception_error
+ .long exception_error
+ .long do_IRQ ! WDT iti /* 560 */
+ .long do_IRQ ! TMU-ch0
+ .long do_IRQ ! TMU-ch1
+ .long do_IRQ ! TMU-ch2
+ .long do_IRQ ! ticpi2 /* 5E0 */
+ .long do_IRQ ! 32 Hitachi UDI /* 600 */
+ .long exception_error
+ .long do_IRQ ! 34 DMAC dmte0
+ .long do_IRQ ! 35 dmte1
+ .long do_IRQ ! 36 dmte2
+ .long do_IRQ ! 37 dmte3
+ .long do_IRQ ! 38 dmae
+ .long exception_error ! 39 /* 6E0 */
+ .long do_IRQ ! 40 SCIF-ch0 eri /* 700 */
+ .long do_IRQ ! 41 rxi
+ .long do_IRQ ! 42 bri
+ .long do_IRQ ! 43 txi
+ .long do_IRQ ! 44 DMAC dmte4 /* 780 */
+ .long do_IRQ ! 45 dmte5
+ .long do_IRQ ! 46 dmte6
+ .long do_IRQ ! 47 dmte7 /* 7E0 */
+#if defined(CONFIG_SH_FPU)
+ .long do_fpu_state_restore ! 48 /* 800 */
+ .long do_fpu_state_restore ! 49 /* 820 */
+#else
+ .long exception_error
+ .long exception_error
+#endif
+ .long exception_error /* 840 */
+ .long exception_error
+ .long exception_error
+ .long exception_error
+ .long exception_error
+ .long exception_error
+ .long do_IRQ ! 56 CMT /* 900 */
+ .long exception_error
+ .long exception_error
+ .long exception_error
+ .long do_IRQ ! 60 HAC
+ .long exception_error
+ .long exception_error
+ .long exception_error
+ .long do_IRQ ! PCI serr /* A00 */
+ .long do_IRQ ! INTA
+ .long do_IRQ ! INTB
+ .long do_IRQ ! INTC
+ .long do_IRQ ! INTD
+ .long do_IRQ ! err
+ .long do_IRQ ! pwd3
+ .long do_IRQ ! pwd2
+ .long do_IRQ ! pwd1 /* B00 */
+ .long do_IRQ ! pwd0
+ .long exception_error
+ .long exception_error
+ .long do_IRQ ! SCIF-ch1 eri /* B80 */
+ .long do_IRQ ! rxi
+ .long do_IRQ ! bri
+ .long do_IRQ ! txi
+ .long do_IRQ ! SIOF /* C00 */
+ .long exception_error
+ .long exception_error
+ .long exception_error
+ .long do_IRQ ! HSPI /* C80 */
+ .long exception_error
+ .long exception_error
+ .long exception_error
+ .long do_IRQ ! MMCIF fatat /* D00 */
+ .long do_IRQ ! tran
+ .long do_IRQ ! err
+ .long do_IRQ ! frdy
+ .long do_IRQ ! DMAC dmint8 /* D80 */
+ .long do_IRQ ! dmint9
+ .long do_IRQ ! dmint10
+ .long do_IRQ ! dmint11
+ .long do_IRQ ! TMU-ch3 /* E00 */
+ .long do_IRQ ! TMU-ch4
+ .long do_IRQ ! TMU-ch5
+ .long exception_error
+ .long do_IRQ ! SSI
+ .long exception_error
+ .long exception_error
+ .long exception_error
+ .long do_IRQ ! FLCTL flste /* F00 */
+ .long do_IRQ ! fltend
+ .long do_IRQ ! fltrq0
+ .long do_IRQ ! fltrq1
+ .long do_IRQ ! GPIO gpioi0 /* F80 */
+ .long do_IRQ ! gpioi1
+ .long do_IRQ ! gpioi2
+ .long do_IRQ ! gpioi3
#endif
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 42427b79697..c294de1e14a 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -3,7 +3,7 @@
*
* CPU Subtype Probing for SH-4.
*
- * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
+ * Copyright (C) 2001 - 2006 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -29,7 +29,7 @@ int __init detect_cpu_and_cache_system(void)
[9] = (1 << 16)
};
- pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffff;
+ pvr = (ctrl_inl(CCN_PVR) >> 8) & 0xffffff;
prr = (ctrl_inl(CCN_PRR) >> 4) & 0xff;
cvr = (ctrl_inl(CCN_CVR));
@@ -38,7 +38,6 @@ int __init detect_cpu_and_cache_system(void)
*/
cpu_data->icache.way_incr = (1 << 13);
cpu_data->icache.entry_shift = 5;
- cpu_data->icache.entry_mask = 0x1fe0;
cpu_data->icache.sets = 256;
cpu_data->icache.ways = 1;
cpu_data->icache.linesz = L1_CACHE_BYTES;
@@ -48,13 +47,29 @@ int __init detect_cpu_and_cache_system(void)
*/
cpu_data->dcache.way_incr = (1 << 14);
cpu_data->dcache.entry_shift = 5;
- cpu_data->dcache.entry_mask = 0x3fe0;
cpu_data->dcache.sets = 512;
cpu_data->dcache.ways = 1;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
- /* Set the FPU flag, virtually all SH-4's have one */
- cpu_data->flags |= CPU_HAS_FPU;
+ /*
+ * Setup some generic flags we can probe
+ * (L2 and DSP detection only work on SH-4A)
+ */
+ if (((pvr >> 16) & 0xff) == 0x10) {
+ if ((cvr & 0x02000000) == 0)
+ cpu_data->flags |= CPU_HAS_L2_CACHE;
+ if ((cvr & 0x10000000) == 0)
+ cpu_data->flags |= CPU_HAS_DSP;
+
+ cpu_data->flags |= CPU_HAS_LLSC;
+ }
+
+ /* FPU detection works for everyone */
+ if ((cvr & 0x20000000) == 1)
+ cpu_data->flags |= CPU_HAS_FPU;
+
+ /* Mask off the upper chip ID */
+ pvr &= 0xffff;
/*
* Probe the underlying processor version/revision and
@@ -63,56 +78,101 @@ int __init detect_cpu_and_cache_system(void)
switch (pvr) {
case 0x205:
cpu_data->type = CPU_SH7750;
- cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
+ cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
+ CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
break;
case 0x206:
cpu_data->type = CPU_SH7750S;
- cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_PERF_COUNTER;
+ cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
+ CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA;
break;
case 0x1100:
cpu_data->type = CPU_SH7751;
+ cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
break;
case 0x2000:
cpu_data->type = CPU_SH73180;
cpu_data->icache.ways = 4;
cpu_data->dcache.ways = 4;
- cpu_data->flags &= ~CPU_HAS_FPU;
+ cpu_data->flags |= CPU_HAS_LLSC;
+ break;
+ case 0x2001:
+ case 0x2004:
+ cpu_data->type = CPU_SH7770;
+ cpu_data->icache.ways = 4;
+ cpu_data->dcache.ways = 4;
+
+ cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_LLSC;
+ break;
+ case 0x2006:
+ case 0x200A:
+ if (prr == 0x61)
+ cpu_data->type = CPU_SH7781;
+ else
+ cpu_data->type = CPU_SH7780;
+
+ cpu_data->icache.ways = 4;
+ cpu_data->dcache.ways = 4;
+
+ cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER |
+ CPU_HAS_LLSC;
+ break;
+ case 0x3000:
+ case 0x3003:
+ cpu_data->type = CPU_SH7343;
+ cpu_data->icache.ways = 4;
+ cpu_data->dcache.ways = 4;
+ cpu_data->flags |= CPU_HAS_LLSC;
break;
case 0x8000:
cpu_data->type = CPU_ST40RA;
+ cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
break;
case 0x8100:
cpu_data->type = CPU_ST40GX1;
+ cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
break;
case 0x700:
cpu_data->type = CPU_SH4_501;
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
-
- /* No FPU on the SH4-500 series.. */
- cpu_data->flags &= ~CPU_HAS_FPU;
+ cpu_data->flags |= CPU_HAS_PTEA;
break;
case 0x600:
cpu_data->type = CPU_SH4_202;
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
+ cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
break;
case 0x500 ... 0x501:
switch (prr) {
- case 0x10: cpu_data->type = CPU_SH7750R; break;
- case 0x11: cpu_data->type = CPU_SH7751R; break;
- case 0x50: cpu_data->type = CPU_SH7760; break;
+ case 0x10:
+ cpu_data->type = CPU_SH7750R;
+ break;
+ case 0x11:
+ cpu_data->type = CPU_SH7751R;
+ break;
+ case 0x50 ... 0x5f:
+ cpu_data->type = CPU_SH7760;
+ break;
}
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
+ cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA;
+
break;
default:
cpu_data->type = CPU_SH_NONE;
break;
}
+#ifdef CONFIG_SH_DIRECT_MAPPED
+ cpu_data->icache.ways = 1;
+ cpu_data->dcache.ways = 1;
+#endif
+
/*
* On anything that's not a direct-mapped cache, look to the CVR
* for I/D-cache specifics.
@@ -121,18 +181,56 @@ int __init detect_cpu_and_cache_system(void)
size = sizes[(cvr >> 20) & 0xf];
cpu_data->icache.way_incr = (size >> 1);
cpu_data->icache.sets = (size >> 6);
- cpu_data->icache.entry_mask =
- (cpu_data->icache.way_incr - (1 << 5));
+
}
+ /* Setup the rest of the I-cache info */
+ cpu_data->icache.entry_mask = cpu_data->icache.way_incr -
+ cpu_data->icache.linesz;
+
+ cpu_data->icache.way_size = cpu_data->icache.sets *
+ cpu_data->icache.linesz;
+
+ /* And the rest of the D-cache */
if (cpu_data->dcache.ways > 1) {
size = sizes[(cvr >> 16) & 0xf];
cpu_data->dcache.way_incr = (size >> 1);
cpu_data->dcache.sets = (size >> 6);
- cpu_data->dcache.entry_mask =
- (cpu_data->dcache.way_incr - (1 << 5));
+ }
+
+ cpu_data->dcache.entry_mask = cpu_data->dcache.way_incr -
+ cpu_data->dcache.linesz;
+
+ cpu_data->dcache.way_size = cpu_data->dcache.sets *
+ cpu_data->dcache.linesz;
+
+ /*
+ * Setup the L2 cache desc
+ *
+ * SH-4A's have an optional PIPT L2.
+ */
+ if (cpu_data->flags & CPU_HAS_L2_CACHE) {
+ /*
+ * Size calculation is much more sensible
+ * than it is for the L1.
+ *
+ * Sizes are 128KB, 258KB, 512KB, and 1MB.
+ */
+ size = (cvr & 0xf) << 17;
+
+ BUG_ON(!size);
+
+ cpu_data->scache.way_incr = (1 << 16);
+ cpu_data->scache.entry_shift = 5;
+ cpu_data->scache.ways = 4;
+ cpu_data->scache.linesz = L1_CACHE_BYTES;
+ cpu_data->scache.entry_mask =
+ (cpu_data->scache.way_incr - cpu_data->scache.linesz);
+ cpu_data->scache.sets = size /
+ (cpu_data->scache.linesz * cpu_data->scache.ways);
+ cpu_data->scache.way_size =
+ (cpu_data->scache.sets * cpu_data->scache.linesz);
}
return 0;
}
-
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
new file mode 100644
index 00000000000..6e4e9654135
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c
@@ -0,0 +1,43 @@
+/*
+ * SH4-202 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh4202_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh4202_devices_setup(void)
+{
+ return platform_add_devices(sh4202_devices,
+ ARRAY_SIZE(sh4202_devices));
+}
+__initcall(sh4202_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh73180.c b/arch/sh/kernel/cpu/sh4/setup-sh73180.c
new file mode 100644
index 00000000000..cc9ea1e2e5d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh73180.c
@@ -0,0 +1,43 @@
+/*
+ * SH73180 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 81, 83, 82 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh73180_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh73180_devices_setup(void)
+{
+ return platform_add_devices(sh73180_devices,
+ ARRAY_SIZE(sh73180_devices));
+}
+__initcall(sh73180_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7343.c b/arch/sh/kernel/cpu/sh4/setup-sh7343.c
new file mode 100644
index 00000000000..91d61cf91ba
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7343.c
@@ -0,0 +1,43 @@
+/*
+ * SH7343 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 80, 81, 83, 82 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7343_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7343_devices_setup(void)
+{
+ return platform_add_devices(sh7343_devices,
+ ARRAY_SIZE(sh7343_devices));
+}
+__initcall(sh7343_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
new file mode 100644
index 00000000000..50812d57c1c
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -0,0 +1,48 @@
+/*
+ * SH7750/SH7751 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCI,
+ .irqs = { 23, 24, 25, 0 },
+ }, {
+ .mapbase = 0xffe80000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7750_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7750_devices_setup(void)
+{
+ return platform_add_devices(sh7750_devices,
+ ARRAY_SIZE(sh7750_devices));
+}
+__initcall(sh7750_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
new file mode 100644
index 00000000000..97f1c9af35d
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c
@@ -0,0 +1,53 @@
+/*
+ * SH7760 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xfe600000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 52, 53, 55, 54 },
+ }, {
+ .mapbase = 0xfe610000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 72, 73, 75, 74 },
+ }, {
+ .mapbase = 0xfe620000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 77, 79, 78 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7760_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7760_devices_setup(void)
+{
+ return platform_add_devices(sh7760_devices,
+ ARRAY_SIZE(sh7760_devices));
+}
+__initcall(sh7760_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7770.c b/arch/sh/kernel/cpu/sh4/setup-sh7770.c
new file mode 100644
index 00000000000..6a04cc5f5ac
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7770.c
@@ -0,0 +1,53 @@
+/*
+ * SH7770 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xff923000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 61, 61, 61, 61 },
+ }, {
+ .mapbase = 0xff924000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 62, 62, 62, 62 },
+ }, {
+ .mapbase = 0xff925000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 63, 63, 63, 63 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7770_devices[] __initdata = {
+ &sci_device,
+};
+
+static int __init sh7770_devices_setup(void)
+{
+ return platform_add_devices(sh7770_devices,
+ ARRAY_SIZE(sh7770_devices));
+}
+__initcall(sh7770_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7780.c b/arch/sh/kernel/cpu/sh4/setup-sh7780.c
new file mode 100644
index 00000000000..72493f259ed
--- /dev/null
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7780.c
@@ -0,0 +1,79 @@
+/*
+ * SH7780 Setup
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <asm/sci.h>
+
+static struct resource rtc_resources[] = {
+ [0] = {
+ .start = 0xffe80000,
+ .end = 0xffe80000 + 0x58 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ /* Period IRQ */
+ .start = 21,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* Carry IRQ */
+ .start = 22,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ /* Alarm IRQ */
+ .start = 23,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device rtc_device = {
+ .name = "sh-rtc",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resource = rtc_resources,
+};
+
+static struct plat_sci_port sci_platform_data[] = {
+ {
+ .mapbase = 0xffe00000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 40, 41, 43, 42 },
+ }, {
+ .mapbase = 0xffe10000,
+ .flags = UPF_BOOT_AUTOCONF,
+ .type = PORT_SCIF,
+ .irqs = { 76, 77, 79, 78 },
+ }, {
+ .flags = 0,
+ }
+};
+
+static struct platform_device sci_device = {
+ .name = "sh-sci",
+ .id = -1,
+ .dev = {
+ .platform_data = sci_platform_data,
+ },
+};
+
+static struct platform_device *sh7780_devices[] __initdata = {
+ &rtc_device,
+ &sci_device,
+};
+
+static int __init sh7780_devices_setup(void)
+{
+ return platform_add_devices(sh7780_devices,
+ ARRAY_SIZE(sh7780_devices));
+}
+__initcall(sh7780_devices_setup);
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index b09805f3ee2..7bcc73f9b8d 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -1,49 +1,52 @@
/*
- * arch/sh/kernel/cpu/sq.c
+ * arch/sh/kernel/cpu/sh4/sq.c
*
* General management API for SH-4 integrated Store Queues
*
- * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
+ * Copyright (C) 2001 - 2006 Paul Mundt
* Copyright (C) 2001, 2002 M. R. Brown
*
- * Some of this code has been adopted directly from the old arch/sh/mm/sq.c
- * hack that was part of the LinuxDC project. For all intents and purposes,
- * this is a completely new interface that really doesn't have much in common
- * with the old zone-based approach at all. In fact, it's only listed here for
- * general completeness.
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/bitmap.h>
+#include <linux/sysdev.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/proc_fs.h>
-#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
-
+#include <linux/mm.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
#include <asm/cpu/sq.h>
-static LIST_HEAD(sq_mapping_list);
+struct sq_mapping;
+
+struct sq_mapping {
+ const char *name;
+
+ unsigned long sq_addr;
+ unsigned long addr;
+ unsigned int size;
+
+ struct sq_mapping *next;
+};
+
+static struct sq_mapping *sq_mapping_list;
static DEFINE_SPINLOCK(sq_mapping_lock);
+static kmem_cache_t *sq_cache;
+static unsigned long *sq_bitmap;
-/**
- * sq_flush - Flush (prefetch) the store queue cache
- * @addr: the store queue address to flush
- *
- * Executes a prefetch instruction on the specified store queue cache,
- * so that the cached data is written to physical memory.
- */
-inline void sq_flush(void *addr)
-{
- __asm__ __volatile__ ("pref @%0" : : "r" (addr) : "memory");
-}
+#define store_queue_barrier() \
+do { \
+ (void)ctrl_inl(P4SEG_STORE_QUE); \
+ ctrl_outl(0, P4SEG_STORE_QUE + 0); \
+ ctrl_outl(0, P4SEG_STORE_QUE + 8); \
+} while (0);
/**
* sq_flush_range - Flush (prefetch) a specific SQ range
@@ -56,152 +59,73 @@ inline void sq_flush(void *addr)
void sq_flush_range(unsigned long start, unsigned int len)
{
volatile unsigned long *sq = (unsigned long *)start;
- unsigned long dummy;
/* Flush the queues */
for (len >>= 5; len--; sq += 8)
- sq_flush((void *)sq);
+ prefetchw((void *)sq);
/* Wait for completion */
- dummy = ctrl_inl(P4SEG_STORE_QUE);
-
- ctrl_outl(0, P4SEG_STORE_QUE + 0);
- ctrl_outl(0, P4SEG_STORE_QUE + 8);
+ store_queue_barrier();
}
-static struct sq_mapping *__sq_alloc_mapping(unsigned long virt, unsigned long phys, unsigned long size, const char *name)
+static inline void sq_mapping_list_add(struct sq_mapping *map)
{
- struct sq_mapping *map;
-
- if (virt + size > SQ_ADDRMAX)
- return ERR_PTR(-ENOSPC);
-
- map = kmalloc(sizeof(struct sq_mapping), GFP_KERNEL);
- if (!map)
- return ERR_PTR(-ENOMEM);
+ struct sq_mapping **p, *tmp;
- INIT_LIST_HEAD(&map->list);
+ spin_lock_irq(&sq_mapping_lock);
- map->sq_addr = virt;
- map->addr = phys;
- map->size = size + 1;
- map->name = name;
+ p = &sq_mapping_list;
+ while ((tmp = *p) != NULL)
+ p = &tmp->next;
- list_add(&map->list, &sq_mapping_list);
+ map->next = tmp;
+ *p = map;
- return map;
+ spin_unlock_irq(&sq_mapping_lock);
}
-static unsigned long __sq_get_next_addr(void)
+static inline void sq_mapping_list_del(struct sq_mapping *map)
{
- if (!list_empty(&sq_mapping_list)) {
- struct list_head *pos, *tmp;
-
- /*
- * Read one off the list head, as it will have the highest
- * mapped allocation. Set the next one up right above it.
- *
- * This is somewhat sub-optimal, as we don't look at
- * gaps between allocations or anything lower then the
- * highest-level allocation.
- *
- * However, in the interest of performance and the general
- * lack of desire to do constant list rebalancing, we don't
- * worry about it.
- */
- list_for_each_safe(pos, tmp, &sq_mapping_list) {
- struct sq_mapping *entry;
-
- entry = list_entry(pos, typeof(*entry), list);
-
- return entry->sq_addr + entry->size;
+ struct sq_mapping **p, *tmp;
+
+ spin_lock_irq(&sq_mapping_lock);
+
+ for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
+ if (tmp == map) {
+ *p = tmp->next;
+ break;
}
- }
- return P4SEG_STORE_QUE;
+ spin_unlock_irq(&sq_mapping_lock);
}
-/**
- * __sq_remap - Perform a translation from the SQ to a phys addr
- * @map: sq mapping containing phys and store queue addresses.
- *
- * Maps the store queue address specified in the mapping to the physical
- * address specified in the mapping.
- */
-static struct sq_mapping *__sq_remap(struct sq_mapping *map)
+static int __sq_remap(struct sq_mapping *map, unsigned long flags)
{
- unsigned long flags, pteh, ptel;
+#if defined(CONFIG_MMU)
struct vm_struct *vma;
- pgprot_t pgprot;
-
- /*
- * Without an MMU (or with it turned off), this is much more
- * straightforward, as we can just load up each queue's QACR with
- * the physical address appropriately masked.
- */
-
- ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
- ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
-#ifdef CONFIG_MMU
- /*
- * With an MMU on the other hand, things are slightly more involved.
- * Namely, we have to have a direct mapping between the SQ addr and
- * the associated physical address in the UTLB by way of setting up
- * a virt<->phys translation by hand. We do this by simply specifying
- * the SQ addr in UTLB.VPN and the associated physical address in
- * UTLB.PPN.
- *
- * Notably, even though this is a special case translation, and some
- * of the configuration bits are meaningless, we're still required
- * to have a valid ASID context in PTEH.
- *
- * We could also probably get by without explicitly setting PTEA, but
- * we do it here just for good measure.
- */
- spin_lock_irqsave(&sq_mapping_lock, flags);
-
- pteh = map->sq_addr;
- ctrl_outl((pteh & MMU_VPN_MASK) | get_asid(), MMU_PTEH);
-
- ptel = map->addr & PAGE_MASK;
- ctrl_outl(((ptel >> 28) & 0xe) | (ptel & 0x1), MMU_PTEA);
-
- pgprot = pgprot_noncached(PAGE_KERNEL);
-
- ptel &= _PAGE_FLAGS_HARDWARE_MASK;
- ptel |= pgprot_val(pgprot);
- ctrl_outl(ptel, MMU_PTEL);
-
- __asm__ __volatile__ ("ldtlb" : : : "memory");
-
- spin_unlock_irqrestore(&sq_mapping_lock, flags);
-
- /*
- * Next, we need to map ourselves in the kernel page table, so that
- * future accesses after a TLB flush will be handled when we take a
- * page fault.
- *
- * Theoretically we could just do this directly and not worry about
- * setting up the translation by hand ahead of time, but for the
- * cases where we want a one-shot SQ mapping followed by a quick
- * writeout before we hit the TLB flush, we do it anyways. This way
- * we at least save ourselves the initial page fault overhead.
- */
vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
if (!vma)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
vma->phys_addr = map->addr;
if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr,
- map->size, pgprot_val(pgprot))) {
+ map->size, flags)) {
vunmap(vma->addr);
- return NULL;
+ return -EAGAIN;
}
-#endif /* CONFIG_MMU */
+#else
+ /*
+ * Without an MMU (or with it turned off), this is much more
+ * straightforward, as we can just load up each queue's QACR with
+ * the physical address appropriately masked.
+ */
+ ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
+ ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
+#endif
- return map;
+ return 0;
}
/**
@@ -209,42 +133,65 @@ static struct sq_mapping *__sq_remap(struct sq_mapping *map)
* @phys: Physical address of mapping.
* @size: Length of mapping.
* @name: User invoking mapping.
+ * @flags: Protection flags.
*
* Remaps the physical address @phys through the next available store queue
* address of @size length. @name is logged at boot time as well as through
- * the procfs interface.
- *
- * A pre-allocated and filled sq_mapping pointer is returned, and must be
- * cleaned up with a call to sq_unmap() when the user is done with the
- * mapping.
+ * the sysfs interface.
*/
-struct sq_mapping *sq_remap(unsigned long phys, unsigned int size, const char *name)
+unsigned long sq_remap(unsigned long phys, unsigned int size,
+ const char *name, unsigned long flags)
{
struct sq_mapping *map;
- unsigned long virt, end;
+ unsigned long end;
unsigned int psz;
+ int ret, page;
/* Don't allow wraparound or zero size */
end = phys + size - 1;
- if (!size || end < phys)
- return NULL;
+ if (unlikely(!size || end < phys))
+ return -EINVAL;
/* Don't allow anyone to remap normal memory.. */
- if (phys < virt_to_phys(high_memory))
- return NULL;
+ if (unlikely(phys < virt_to_phys(high_memory)))
+ return -EINVAL;
phys &= PAGE_MASK;
+ size = PAGE_ALIGN(end + 1) - phys;
+
+ map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
+ if (unlikely(!map))
+ return -ENOMEM;
+
+ map->addr = phys;
+ map->size = size;
+ map->name = name;
+
+ page = bitmap_find_free_region(sq_bitmap, 0x04000000,
+ get_order(map->size));
+ if (unlikely(page < 0)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
+
+ ret = __sq_remap(map, flags);
+ if (unlikely(ret != 0))
+ goto out;
+
+ psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
+ likely(map->name) ? map->name : "???",
+ psz, psz == 1 ? " " : "s",
+ map->sq_addr, map->addr);
- size = PAGE_ALIGN(end + 1) - phys;
- virt = __sq_get_next_addr();
- psz = (size + (PAGE_SIZE - 1)) / PAGE_SIZE;
- map = __sq_alloc_mapping(virt, phys, size, name);
+ sq_mapping_list_add(map);
- printk("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
- map->name ? map->name : "???",
- psz, psz == 1 ? " " : "s",
- map->sq_addr, map->addr);
+ return map->sq_addr;
- return __sq_remap(map);
+out:
+ kmem_cache_free(sq_cache, map);
+ return ret;
}
/**
@@ -255,188 +202,198 @@ struct sq_mapping *sq_remap(unsigned long phys, unsigned int size, const char *n
* sq_remap(). Also frees up the pte that was previously inserted into
* the kernel page table and discards the UTLB translation.
*/
-void sq_unmap(struct sq_mapping *map)
+void sq_unmap(unsigned long vaddr)
{
- if (map->sq_addr > (unsigned long)high_memory)
- vfree((void *)(map->sq_addr & PAGE_MASK));
+ struct sq_mapping **p, *map;
+ struct vm_struct *vma;
+ int page;
- list_del(&map->list);
- kfree(map);
-}
+ for (p = &sq_mapping_list; (map = *p); p = &map->next)
+ if (map->sq_addr == vaddr)
+ break;
-/**
- * sq_clear - Clear a store queue range
- * @addr: Address to start clearing from.
- * @len: Length to clear.
- *
- * A quick zero-fill implementation for clearing out memory that has been
- * remapped through the store queues.
- */
-void sq_clear(unsigned long addr, unsigned int len)
-{
- int i;
+ if (unlikely(!map)) {
+ printk("%s: bad store queue address 0x%08lx\n",
+ __FUNCTION__, vaddr);
+ return;
+ }
- /* Clear out both queues linearly */
- for (i = 0; i < 8; i++) {
- ctrl_outl(0, addr + i + 0);
- ctrl_outl(0, addr + i + 8);
+ page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
+ bitmap_release_region(sq_bitmap, page, get_order(map->size));
+
+#ifdef CONFIG_MMU
+ vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
+ if (!vma) {
+ printk(KERN_ERR "%s: bad address 0x%08lx\n",
+ __FUNCTION__, map->sq_addr);
+ return;
}
+#endif
+
+ sq_mapping_list_del(map);
- sq_flush_range(addr, len);
+ kmem_cache_free(sq_cache, map);
}
-/**
- * sq_vma_unmap - Unmap a VMA range
- * @area: VMA containing range.
- * @addr: Start of range.
- * @len: Length of range.
+/*
+ * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
+ * there is any other easy way to add things on a per-cpu basis without
+ * putting the directory entries somewhere stupid and having to create
+ * links in sysfs by hand back in to the per-cpu directories.
*
- * Searches the sq_mapping_list for a mapping matching the sq addr @addr,
- * and subsequently frees up the entry. Further cleanup is done by generic
- * code.
+ * Some day we may want to have an additional abstraction per store
+ * queue, but considering the kobject hell we already have to deal with,
+ * it's simply not worth the trouble.
*/
-static void sq_vma_unmap(struct vm_area_struct *area,
- unsigned long addr, size_t len)
-{
- struct list_head *pos, *tmp;
+static struct kobject *sq_kobject[NR_CPUS];
- list_for_each_safe(pos, tmp, &sq_mapping_list) {
- struct sq_mapping *entry;
+struct sq_sysfs_attr {
+ struct attribute attr;
+ ssize_t (*show)(char *buf);
+ ssize_t (*store)(const char *buf, size_t count);
+};
- entry = list_entry(pos, typeof(*entry), list);
+#define to_sq_sysfs_attr(attr) container_of(attr, struct sq_sysfs_attr, attr)
- if (entry->sq_addr == addr) {
- /*
- * We could probably get away without doing the tlb flush
- * here, as generic code should take care of most of this
- * when unmapping the rest of the VMA range for us. Leave
- * it in for added sanity for the time being..
- */
- __flush_tlb_page(get_asid(), entry->sq_addr & PAGE_MASK);
+static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
- list_del(&entry->list);
- kfree(entry);
+ if (likely(sattr->show))
+ return sattr->show(buf);
- return;
- }
- }
+ return -EIO;
}
-/**
- * sq_vma_sync - Sync a VMA range
- * @area: VMA containing range.
- * @start: Start of range.
- * @len: Length of range.
- * @flags: Additional flags.
- *
- * Synchronizes an sq mapped range by flushing the store queue cache for
- * the duration of the mapping.
- *
- * Used internally for user mappings, which must use msync() to prefetch
- * the store queue cache.
- */
-static int sq_vma_sync(struct vm_area_struct *area,
- unsigned long start, size_t len, unsigned int flags)
+static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
{
- sq_flush_range(start, len);
+ struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
- return 0;
+ if (likely(sattr->store))
+ return sattr->store(buf, count);
+
+ return -EIO;
}
-static struct vm_operations_struct sq_vma_ops = {
- .unmap = sq_vma_unmap,
- .sync = sq_vma_sync,
-};
+static ssize_t mapping_show(char *buf)
+{
+ struct sq_mapping **list, *entry;
+ char *p = buf;
-/**
- * sq_mmap - mmap() for /dev/cpu/sq
- * @file: unused.
- * @vma: VMA to remap.
- *
- * Remap the specified vma @vma through the store queues, and setup associated
- * information for the new mapping. Also build up the page tables for the new
- * area.
- */
-static int sq_mmap(struct file *file, struct vm_area_struct *vma)
+ for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
+ p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
+ entry->sq_addr, entry->sq_addr + entry->size,
+ entry->addr, entry->name);
+
+ return p - buf;
+}
+
+static ssize_t mapping_store(const char *buf, size_t count)
{
- unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
- unsigned long size = vma->vm_end - vma->vm_start;
- struct sq_mapping *map;
+ unsigned long base = 0, len = 0;
- /*
- * We're not interested in any arbitrary virtual address that has
- * been stuck in the VMA, as we already know what addresses we
- * want. Save off the size, and reposition the VMA to begin at
- * the next available sq address.
- */
- vma->vm_start = __sq_get_next_addr();
- vma->vm_end = vma->vm_start + size;
+ sscanf(buf, "%lx %lx", &base, &len);
+ if (!base)
+ return -EIO;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (likely(len)) {
+ int ret = sq_remap(base, len, "Userspace",
+ pgprot_val(PAGE_SHARED));
+ if (ret < 0)
+ return ret;
+ } else
+ sq_unmap(base);
- vma->vm_flags |= VM_IO | VM_RESERVED;
+ return count;
+}
- map = __sq_alloc_mapping(vma->vm_start, offset, size, "Userspace");
+static struct sq_sysfs_attr mapping_attr =
+ __ATTR(mapping, 0644, mapping_show, mapping_store);
- if (io_remap_pfn_range(vma, map->sq_addr, map->addr >> PAGE_SHIFT,
- size, vma->vm_page_prot))
- return -EAGAIN;
+static struct attribute *sq_sysfs_attrs[] = {
+ &mapping_attr.attr,
+ NULL,
+};
- vma->vm_ops = &sq_vma_ops;
+static struct sysfs_ops sq_sysfs_ops = {
+ .show = sq_sysfs_show,
+ .store = sq_sysfs_store,
+};
- return 0;
-}
+static struct kobj_type ktype_percpu_entry = {
+ .sysfs_ops = &sq_sysfs_ops,
+ .default_attrs = sq_sysfs_attrs,
+};
-#ifdef CONFIG_PROC_FS
-static int sq_mapping_read_proc(char *buf, char **start, off_t off,
- int len, int *eof, void *data)
+static int __devinit sq_sysdev_add(struct sys_device *sysdev)
{
- struct list_head *pos;
- char *p = buf;
+ unsigned int cpu = sysdev->id;
+ struct kobject *kobj;
- list_for_each_prev(pos, &sq_mapping_list) {
- struct sq_mapping *entry;
+ sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
+ if (unlikely(!sq_kobject[cpu]))
+ return -ENOMEM;
- entry = list_entry(pos, typeof(*entry), list);
+ kobj = sq_kobject[cpu];
+ kobj->parent = &sysdev->kobj;
+ kobject_set_name(kobj, "%s", "sq");
+ kobj->ktype = &ktype_percpu_entry;
- p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", entry->sq_addr,
- entry->sq_addr + entry->size - 1, entry->addr,
- entry->name);
- }
-
- return p - buf;
+ return kobject_register(kobj);
}
-#endif
-static struct file_operations sq_fops = {
- .owner = THIS_MODULE,
- .mmap = sq_mmap,
-};
+static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
+{
+ unsigned int cpu = sysdev->id;
+ struct kobject *kobj = sq_kobject[cpu];
-static struct miscdevice sq_dev = {
- .minor = STORE_QUEUE_MINOR,
- .name = "sq",
- .fops = &sq_fops,
+ kobject_unregister(kobj);
+ return 0;
+}
+
+static struct sysdev_driver sq_sysdev_driver = {
+ .add = sq_sysdev_add,
+ .remove = __devexit_p(sq_sysdev_remove),
};
static int __init sq_api_init(void)
{
- int ret;
+ unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
+ unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG;
+ int ret = -ENOMEM;
+
printk(KERN_NOTICE "sq: Registering store queue API.\n");
- create_proc_read_entry("sq_mapping", 0, 0, sq_mapping_read_proc, 0);
+ sq_cache = kmem_cache_create("store_queue_cache",
+ sizeof(struct sq_mapping), 0, 0,
+ NULL, NULL);
+ if (unlikely(!sq_cache))
+ return ret;
- ret = misc_register(&sq_dev);
- if (ret)
- remove_proc_entry("sq_mapping", NULL);
+ sq_bitmap = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!sq_bitmap))
+ goto out;
+
+ ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver);
+ if (unlikely(ret != 0))
+ goto out;
+
+ return 0;
+
+out:
+ kfree(sq_bitmap);
+ kmem_cache_destroy(sq_cache);
return ret;
}
static void __exit sq_api_exit(void)
{
- misc_deregister(&sq_dev);
- remove_proc_entry("sq_mapping", NULL);
+ sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver);
+ kfree(sq_bitmap);
+ kmem_cache_destroy(sq_cache);
}
module_init(sq_api_init);
@@ -445,11 +402,7 @@ module_exit(sq_api_exit);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(STORE_QUEUE_MINOR);
EXPORT_SYMBOL(sq_remap);
EXPORT_SYMBOL(sq_unmap);
-EXPORT_SYMBOL(sq_clear);
-EXPORT_SYMBOL(sq_flush);
EXPORT_SYMBOL(sq_flush_range);
-
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
index 1378db375e1..a00022722e9 100644
--- a/arch/sh/kernel/early_printk.c
+++ b/arch/sh/kernel/early_printk.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 1999, 2000 Niibe Yutaka
* Copyright (C) 2002 M. R. Brown
- * Copyright (C) 2004 Paul Mundt
+ * Copyright (C) 2004 - 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -49,7 +49,7 @@ static int __init sh_console_setup(struct console *co, char *options)
return 0;
}
-static struct console early_console = {
+static struct console bios_console = {
.name = "bios",
.write = sh_console_write,
.setup = sh_console_setup,
@@ -59,34 +59,43 @@ static struct console early_console = {
#endif
#ifdef CONFIG_EARLY_SCIF_CONSOLE
+#include <linux/serial_core.h>
+#include "../../../drivers/serial/sh-sci.h"
+
+#ifdef CONFIG_CPU_SH4
#define SCIF_REG 0xffe80000
+#elif defined(CONFIG_CPU_SUBTYPE_SH72060)
+#define SCIF_REG 0xfffe9800
+#else
+#error "Undefined SCIF for this subtype"
+#endif
+
+static struct uart_port scif_port = {
+ .mapbase = SCIF_REG,
+ .membase = (char __iomem *)SCIF_REG,
+};
static void scif_sercon_putc(int c)
{
- while (!(ctrl_inw(SCIF_REG + 0x10) & 0x20)) ;
+ while (((sci_in(&scif_port, SCFDR) & 0x1f00 >> 8) == 16))
+ ;
- ctrl_outb(c, SCIF_REG + 12);
- ctrl_outw((ctrl_inw(SCIF_REG + 0x10) & 0x9f), SCIF_REG + 0x10);
+ sci_out(&scif_port, SCxTDR, c);
+ sci_in(&scif_port, SCxSR);
+ sci_out(&scif_port, SCxSR, 0xf3 & ~(0x20 | 0x40));
+
+ while ((sci_in(&scif_port, SCxSR) & 0x40) == 0);
+ ;
if (c == '\n')
scif_sercon_putc('\r');
}
-static void scif_sercon_flush(void)
-{
- ctrl_outw((ctrl_inw(SCIF_REG + 0x10) & 0xbf), SCIF_REG + 0x10);
-
- while (!(ctrl_inw(SCIF_REG + 0x10) & 0x40)) ;
-
- ctrl_outw((ctrl_inw(SCIF_REG + 0x10) & 0xbf), SCIF_REG + 0x10);
-}
-
-static void scif_sercon_write(struct console *con, const char *s, unsigned count)
+static void scif_sercon_write(struct console *con, const char *s,
+ unsigned count)
{
while (count-- > 0)
scif_sercon_putc(*s++);
-
- scif_sercon_flush();
}
static int __init scif_sercon_setup(struct console *con, char *options)
@@ -96,7 +105,7 @@ static int __init scif_sercon_setup(struct console *con, char *options)
return 0;
}
-static struct console early_console = {
+static struct console scif_console = {
.name = "sercon",
.write = scif_sercon_write,
.setup = scif_sercon_setup,
@@ -104,7 +113,7 @@ static struct console early_console = {
.index = -1,
};
-void scif_sercon_init(int baud)
+static void scif_sercon_init(int baud)
{
ctrl_outw(0, SCIF_REG + 8);
ctrl_outw(0, SCIF_REG);
@@ -122,16 +131,61 @@ void scif_sercon_init(int baud)
}
#endif
-void __init enable_early_printk(void)
+/*
+ * Setup a default console, if more than one is compiled in, rely on the
+ * earlyprintk= parsing to give priority.
+ */
+static struct console *early_console =
+#ifdef CONFIG_SH_STANDARD_BIOS
+ &bios_console
+#elif defined(CONFIG_EARLY_SCIF_CONSOLE)
+ &scif_console
+#else
+ NULL
+#endif
+ ;
+
+static int __initdata keep_early;
+
+int __init setup_early_printk(char *opt)
{
-#ifdef CONFIG_EARLY_SCIF_CONSOLE
- scif_sercon_init(115200);
+ char *space;
+ char buf[256];
+
+ strlcpy(buf, opt, sizeof(buf));
+ space = strchr(buf, ' ');
+ if (space)
+ *space = 0;
+
+ if (strstr(buf, "keep"))
+ keep_early = 1;
+
+#ifdef CONFIG_SH_STANDARD_BIOS
+ if (!strncmp(buf, "bios", 4))
+ early_console = &bios_console;
+#endif
+#if defined(CONFIG_EARLY_SCIF_CONSOLE)
+ if (!strncmp(buf, "serial", 6)) {
+ early_console = &scif_console;
+
+#ifdef CONFIG_CPU_SH4
+ scif_sercon_init(115200);
+#endif
+ }
#endif
- register_console(&early_console);
+
+ if (likely(early_console))
+ register_console(early_console);
+
+ return 1;
}
+__setup("earlyprintk=", setup_early_printk);
-void disable_early_printk(void)
+void __init disable_early_printk(void)
{
- unregister_console(&early_console);
+ if (!keep_early) {
+ printk("disabling early console\n");
+ unregister_console(early_console);
+ } else
+ printk("keeping early console\n");
}
-
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
index 7dfd2ba75f7..fe8221855b2 100644
--- a/arch/sh/kernel/entry.S
+++ b/arch/sh/kernel/entry.S
@@ -18,24 +18,6 @@
#include <asm/cpu/mmu_context.h>
#include <asm/unistd.h>
-#if !defined(CONFIG_NFSD) && !defined(CONFIG_NFSD_MODULE)
-#define sys_nfsservctl sys_ni_syscall
-#endif
-
-#if !defined(CONFIG_MMU)
-#define sys_madvise sys_ni_syscall
-#define sys_readahead sys_ni_syscall
-#define sys_mprotect sys_ni_syscall
-#define sys_msync sys_ni_syscall
-#define sys_mlock sys_ni_syscall
-#define sys_munlock sys_ni_syscall
-#define sys_mlockall sys_ni_syscall
-#define sys_munlockall sys_ni_syscall
-#define sys_mremap sys_ni_syscall
-#define sys_mincore sys_ni_syscall
-#define sys_remap_file_pages sys_ni_syscall
-#endif
-
! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
! to be jumped is too far, but it causes illegal slot exception.
@@ -326,7 +308,7 @@ ENTRY(exception_error)
.align 2
ret_from_exception:
preempt_stop()
-ret_from_irq:
+ENTRY(ret_from_irq)
!
mov #OFF_SR, r0
mov.l @(r0,r15), r0 ! get status register
@@ -389,11 +371,12 @@ work_pending:
! r8: current_thread_info
! t: result of "tst #_TIF_NEED_RESCHED, r0"
bf/s work_resched
- tst #_TIF_SIGPENDING, r0
+ tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
work_notifysig:
bt/s restore_all
mov r15, r4
- mov #0, r5
+ mov r12, r5 ! set arg1(save_r0)
+ mov r0, r6
mov.l 2f, r1
mova restore_all, r0
jmp @r1
@@ -431,7 +414,7 @@ work_resched:
.align 2
1: .long schedule
-2: .long do_signal
+2: .long do_notify_resume
.align 2
syscall_exit_work:
@@ -552,6 +535,7 @@ syscall_call:
mov.l @r9, r8
jsr @r8 ! jump to specific syscall handler
nop
+ mov.l @(OFF_R0,r15), r12 ! save r0
mov.l r0, @(OFF_R0,r15) ! save the return value
!
syscall_exit:
@@ -644,7 +628,7 @@ skip_restore:
!
#if defined(CONFIG_KGDB_NMI)
! Clear in_nmi
- mov.l 4f, k0
+ mov.l 6f, k0
mov #0, k1
mov.b k1, @k0
#endif
@@ -722,7 +706,7 @@ interrupt:
!
!
.align 2
-handle_exception:
+ENTRY(handle_exception)
! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
! save all registers onto stack.
!
@@ -732,8 +716,8 @@ handle_exception:
bt/s 1f ! It's a kernel to kernel transition.
mov r15, k0 ! save original stack to k0
/* User space to kernel */
- mov #0x20, k1
- shll8 k1 ! k1 := 8192 (== THREAD_SIZE)
+ mov #(THREAD_SIZE >> 8), k1
+ shll8 k1 ! k1 := THREAD_SIZE
add current, k1
mov k1, r15 ! change to kernel stack
!
@@ -838,300 +822,3 @@ ENTRY(exception_none)
rts
nop
- .data
-ENTRY(sys_call_table)
- .long sys_ni_syscall /* 0 - old "setup()" system call*/
- .long sys_exit
- .long sys_fork
- .long sys_read
- .long sys_write
- .long sys_open /* 5 */
- .long sys_close
- .long sys_waitpid
- .long sys_creat
- .long sys_link
- .long sys_unlink /* 10 */
- .long sys_execve
- .long sys_chdir
- .long sys_time
- .long sys_mknod
- .long sys_chmod /* 15 */
- .long sys_lchown16
- .long sys_ni_syscall /* old break syscall holder */
- .long sys_stat
- .long sys_lseek
- .long sys_getpid /* 20 */
- .long sys_mount
- .long sys_oldumount
- .long sys_setuid16
- .long sys_getuid16
- .long sys_stime /* 25 */
- .long sys_ptrace
- .long sys_alarm
- .long sys_fstat
- .long sys_pause
- .long sys_utime /* 30 */
- .long sys_ni_syscall /* old stty syscall holder */
- .long sys_ni_syscall /* old gtty syscall holder */
- .long sys_access
- .long sys_nice
- .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
- .long sys_sync
- .long sys_kill
- .long sys_rename
- .long sys_mkdir
- .long sys_rmdir /* 40 */
- .long sys_dup
- .long sys_pipe
- .long sys_times
- .long sys_ni_syscall /* old prof syscall holder */
- .long sys_brk /* 45 */
- .long sys_setgid16
- .long sys_getgid16
- .long sys_signal
- .long sys_geteuid16
- .long sys_getegid16 /* 50 */
- .long sys_acct
- .long sys_umount /* recycled never used phys() */
- .long sys_ni_syscall /* old lock syscall holder */
- .long sys_ioctl
- .long sys_fcntl /* 55 */
- .long sys_ni_syscall /* old mpx syscall holder */
- .long sys_setpgid
- .long sys_ni_syscall /* old ulimit syscall holder */
- .long sys_ni_syscall /* sys_olduname */
- .long sys_umask /* 60 */
- .long sys_chroot
- .long sys_ustat
- .long sys_dup2
- .long sys_getppid
- .long sys_getpgrp /* 65 */
- .long sys_setsid
- .long sys_sigaction
- .long sys_sgetmask
- .long sys_ssetmask
- .long sys_setreuid16 /* 70 */
- .long sys_setregid16
- .long sys_sigsuspend
- .long sys_sigpending
- .long sys_sethostname
- .long sys_setrlimit /* 75 */
- .long sys_old_getrlimit
- .long sys_getrusage
- .long sys_gettimeofday
- .long sys_settimeofday
- .long sys_getgroups16 /* 80 */
- .long sys_setgroups16
- .long sys_ni_syscall /* sys_oldselect */
- .long sys_symlink
- .long sys_lstat
- .long sys_readlink /* 85 */
- .long sys_uselib
- .long sys_swapon
- .long sys_reboot
- .long old_readdir
- .long old_mmap /* 90 */
- .long sys_munmap
- .long sys_truncate
- .long sys_ftruncate
- .long sys_fchmod
- .long sys_fchown16 /* 95 */
- .long sys_getpriority
- .long sys_setpriority
- .long sys_ni_syscall /* old profil syscall holder */
- .long sys_statfs
- .long sys_fstatfs /* 100 */
- .long sys_ni_syscall /* ioperm */
- .long sys_socketcall
- .long sys_syslog
- .long sys_setitimer
- .long sys_getitimer /* 105 */
- .long sys_newstat
- .long sys_newlstat
- .long sys_newfstat
- .long sys_uname
- .long sys_ni_syscall /* 110 */ /* iopl */
- .long sys_vhangup
- .long sys_ni_syscall /* idle */
- .long sys_ni_syscall /* vm86old */
- .long sys_wait4
- .long sys_swapoff /* 115 */
- .long sys_sysinfo
- .long sys_ipc
- .long sys_fsync
- .long sys_sigreturn
- .long sys_clone /* 120 */
- .long sys_setdomainname
- .long sys_newuname
- .long sys_ni_syscall /* sys_modify_ldt */
- .long sys_adjtimex
- .long sys_mprotect /* 125 */
- .long sys_sigprocmask
- .long sys_ni_syscall /* old "create_module" */
- .long sys_init_module
- .long sys_delete_module
- .long sys_ni_syscall /* 130: old "get_kernel_syms" */
- .long sys_quotactl
- .long sys_getpgid
- .long sys_fchdir
- .long sys_bdflush
- .long sys_sysfs /* 135 */
- .long sys_personality
- .long sys_ni_syscall /* for afs_syscall */
- .long sys_setfsuid16
- .long sys_setfsgid16
- .long sys_llseek /* 140 */
- .long sys_getdents
- .long sys_select
- .long sys_flock
- .long sys_msync
- .long sys_readv /* 145 */
- .long sys_writev
- .long sys_getsid
- .long sys_fdatasync
- .long sys_sysctl
- .long sys_mlock /* 150 */
- .long sys_munlock
- .long sys_mlockall
- .long sys_munlockall
- .long sys_sched_setparam
- .long sys_sched_getparam /* 155 */
- .long sys_sched_setscheduler
- .long sys_sched_getscheduler
- .long sys_sched_yield
- .long sys_sched_get_priority_max
- .long sys_sched_get_priority_min /* 160 */
- .long sys_sched_rr_get_interval
- .long sys_nanosleep
- .long sys_mremap
- .long sys_setresuid16
- .long sys_getresuid16 /* 165 */
- .long sys_ni_syscall /* vm86 */
- .long sys_ni_syscall /* old "query_module" */
- .long sys_poll
- .long sys_nfsservctl
- .long sys_setresgid16 /* 170 */
- .long sys_getresgid16
- .long sys_prctl
- .long sys_rt_sigreturn
- .long sys_rt_sigaction
- .long sys_rt_sigprocmask /* 175 */
- .long sys_rt_sigpending
- .long sys_rt_sigtimedwait
- .long sys_rt_sigqueueinfo
- .long sys_rt_sigsuspend
- .long sys_pread_wrapper /* 180 */
- .long sys_pwrite_wrapper
- .long sys_chown16
- .long sys_getcwd
- .long sys_capget
- .long sys_capset /* 185 */
- .long sys_sigaltstack
- .long sys_sendfile
- .long sys_ni_syscall /* streams1 */
- .long sys_ni_syscall /* streams2 */
- .long sys_vfork /* 190 */
- .long sys_getrlimit
- .long sys_mmap2
- .long sys_truncate64
- .long sys_ftruncate64
- .long sys_stat64 /* 195 */
- .long sys_lstat64
- .long sys_fstat64
- .long sys_lchown
- .long sys_getuid
- .long sys_getgid /* 200 */
- .long sys_geteuid
- .long sys_getegid
- .long sys_setreuid
- .long sys_setregid
- .long sys_getgroups /* 205 */
- .long sys_setgroups
- .long sys_fchown
- .long sys_setresuid
- .long sys_getresuid
- .long sys_setresgid /* 210 */
- .long sys_getresgid
- .long sys_chown
- .long sys_setuid
- .long sys_setgid
- .long sys_setfsuid /* 215 */
- .long sys_setfsgid
- .long sys_pivot_root
- .long sys_mincore
- .long sys_madvise
- .long sys_getdents64 /* 220 */
- .long sys_fcntl64
- .long sys_ni_syscall /* reserved for TUX */
- .long sys_ni_syscall /* Reserved for Security */
- .long sys_gettid
- .long sys_readahead /* 225 */
- .long sys_setxattr
- .long sys_lsetxattr
- .long sys_fsetxattr
- .long sys_getxattr
- .long sys_lgetxattr /* 230 */
- .long sys_fgetxattr
- .long sys_listxattr
- .long sys_llistxattr
- .long sys_flistxattr
- .long sys_removexattr /* 235 */
- .long sys_lremovexattr
- .long sys_fremovexattr
- .long sys_tkill
- .long sys_sendfile64
- .long sys_futex /* 240 */
- .long sys_sched_setaffinity
- .long sys_sched_getaffinity
- .long sys_ni_syscall
- .long sys_ni_syscall
- .long sys_io_setup /* 245 */
- .long sys_io_destroy
- .long sys_io_getevents
- .long sys_io_submit
- .long sys_io_cancel
- .long sys_fadvise64 /* 250 */
- .long sys_ni_syscall
- .long sys_exit_group
- .long sys_lookup_dcookie
- .long sys_epoll_create
- .long sys_epoll_ctl /* 255 */
- .long sys_epoll_wait
- .long sys_remap_file_pages
- .long sys_set_tid_address
- .long sys_timer_create
- .long sys_timer_settime /* 260 */
- .long sys_timer_gettime
- .long sys_timer_getoverrun
- .long sys_timer_delete
- .long sys_clock_settime
- .long sys_clock_gettime /* 265 */
- .long sys_clock_getres
- .long sys_clock_nanosleep
- .long sys_statfs64
- .long sys_fstatfs64
- .long sys_tgkill /* 270 */
- .long sys_utimes
- .long sys_fadvise64_64_wrapper
- .long sys_ni_syscall /* Reserved for vserver */
- .long sys_ni_syscall /* Reserved for mbind */
- .long sys_ni_syscall /* 275 - get_mempolicy */
- .long sys_ni_syscall /* set_mempolicy */
- .long sys_mq_open
- .long sys_mq_unlink
- .long sys_mq_timedsend
- .long sys_mq_timedreceive /* 280 */
- .long sys_mq_notify
- .long sys_mq_getsetattr
- .long sys_ni_syscall /* Reserved for kexec */
- .long sys_waitid
- .long sys_add_key /* 285 */
- .long sys_request_key
- .long sys_keyctl
- .long sys_ioprio_set
- .long sys_ioprio_get
- .long sys_inotify_init /* 290 */
- .long sys_inotify_add_watch
- .long sys_inotify_rm_watch
-
-/* End of entry.S */
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index 9b9e6ef626c..f5f53d14f24 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -11,6 +11,18 @@
* Head.S contains the SH exception handlers and startup code.
*/
#include <linux/linkage.h>
+#include <asm/thread_info.h>
+
+#ifdef CONFIG_CPU_SH4A
+#define SYNCO() synco
+
+#define PREFI(label, reg) \
+ mov.l label, reg; \
+ prefi @reg
+#else
+#define SYNCO()
+#define PREFI(label, reg)
+#endif
.section .empty_zero_page, "aw"
ENTRY(empty_zero_page)
@@ -42,18 +54,25 @@ ENTRY(_stext)
! Initialize global interrupt mask
mov #0, r0
ldc r0, r6_bank
+
+ /*
+ * Prefetch if possible to reduce cache miss penalty.
+ *
+ * We do this early on for SH-4A as a micro-optimization,
+ * as later on we will have speculative execution enabled
+ * and this will become less of an issue.
+ */
+ PREFI(5f, r0)
+ PREFI(6f, r0)
+
!
mov.l 2f, r0
mov r0, r15 ! Set initial r15 (stack pointer)
- mov #0x20, r1 !
- shll8 r1 ! r1 = 8192
+ mov #(THREAD_SIZE >> 8), r1
+ shll8 r1 ! r1 = THREAD_SIZE
sub r1, r0 !
ldc r0, r7_bank ! ... and initial thread_info
- !
- ! Additional CPU initialization
- mov.l 6f, r0
- jsr @r0
- nop
+
! Clear BSS area
mov.l 3f, r1
add #4, r1
@@ -62,6 +81,14 @@ ENTRY(_stext)
9: cmp/hs r2, r1
bf/s 9b ! while (r1 < r2)
mov.l r0,@-r2
+
+ ! Additional CPU initialization
+ mov.l 6f, r0
+ jsr @r0
+ nop
+
+ SYNCO() ! Wait for pending instructions..
+
! Start kernel
mov.l 5f, r0
jmp @r0
@@ -69,7 +96,7 @@ ENTRY(_stext)
.balign 4
1: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF
-2: .long stack
+2: .long init_thread_union+THREAD_SIZE
3: .long __bss_start
4: .long _end
5: .long start_kernel
diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c
index 71c9fde2fd9..501fe03e371 100644
--- a/arch/sh/kernel/io.c
+++ b/arch/sh/kernel/io.c
@@ -61,6 +61,73 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count)
}
EXPORT_SYMBOL(memset_io);
+void __raw_readsl(unsigned long addr, void *datap, int len)
+{
+ u32 *data;
+
+ for (data = datap; (len != 0) && (((u32)data & 0x1f) != 0); len--)
+ *data++ = ctrl_inl(addr);
+
+ if (likely(len >= (0x20 >> 2))) {
+ int tmp2, tmp3, tmp4, tmp5, tmp6;
+
+ __asm__ __volatile__(
+ "1: \n\t"
+ "mov.l @%7, r0 \n\t"
+ "mov.l @%7, %2 \n\t"
+#ifdef CONFIG_CPU_SH4
+ "movca.l r0, @%0 \n\t"
+#else
+ "mov.l r0, @%0 \n\t"
+#endif
+ "mov.l @%7, %3 \n\t"
+ "mov.l @%7, %4 \n\t"
+ "mov.l @%7, %5 \n\t"
+ "mov.l @%7, %6 \n\t"
+ "mov.l @%7, r7 \n\t"
+ "mov.l @%7, r0 \n\t"
+ "mov.l %2, @(0x04,%0) \n\t"
+ "mov #0x20>>2, %2 \n\t"
+ "mov.l %3, @(0x08,%0) \n\t"
+ "sub %2, %1 \n\t"
+ "mov.l %4, @(0x0c,%0) \n\t"
+ "cmp/hi %1, %2 ! T if 32 > len \n\t"
+ "mov.l %5, @(0x10,%0) \n\t"
+ "mov.l %6, @(0x14,%0) \n\t"
+ "mov.l r7, @(0x18,%0) \n\t"
+ "mov.l r0, @(0x1c,%0) \n\t"
+ "bf.s 1b \n\t"
+ " add #0x20, %0 \n\t"
+ : "=&r" (data), "=&r" (len),
+ "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
+ "=&r" (tmp5), "=&r" (tmp6)
+ : "r"(addr), "0" (data), "1" (len)
+ : "r0", "r7", "t", "memory");
+ }
+
+ for (; len != 0; len--)
+ *data++ = ctrl_inl(addr);
+}
+EXPORT_SYMBOL(__raw_readsl);
+
+void __raw_writesl(unsigned long addr, const void *data, int len)
+{
+ if (likely(len != 0)) {
+ int tmp1;
+
+ __asm__ __volatile__ (
+ "1: \n\t"
+ "mov.l @%0+, %1 \n\t"
+ "dt %3 \n\t"
+ "bf.s 1b \n\t"
+ " mov.l %1, @%4 \n\t"
+ : "=&r" (data), "=&r" (tmp1)
+ : "0" (data), "r" (len), "r"(addr)
+ : "t", "memory");
+ }
+}
+EXPORT_SYMBOL(__raw_writesl);
+
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return sh_mv.mv_ioport_map(port, nr);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index c2e07f7f349..c7ebd6aec95 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -1,5 +1,4 @@
-/* $Id: irq.c,v 1.20 2004/01/13 05:52:11 kkojima Exp $
- *
+/*
* linux/arch/sh/kernel/irq.c
*
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
@@ -7,13 +6,15 @@
*
* SuperH version: Copyright (C) 1999 Niibe Yutaka
*/
-
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <asm/irq.h>
#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/thread_info.h>
#include <asm/cpu/mmu_context.h>
/*
@@ -60,15 +61,46 @@ unlock:
}
#endif
+#ifdef CONFIG_4KSTACKS
+/*
+ * per-CPU IRQ handling contexts (thread information and stack)
+ */
+union irq_ctx {
+ struct thread_info tinfo;
+ u32 stack[THREAD_SIZE/sizeof(u32)];
+};
+
+static union irq_ctx *hardirq_ctx[NR_CPUS];
+static union irq_ctx *softirq_ctx[NR_CPUS];
+#endif
asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
int irq = r4;
+#ifdef CONFIG_4KSTACKS
+ union irq_ctx *curctx, *irqctx;
+#endif
irq_enter();
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ /* Debugging check for stack overflow: is there less than 1KB free? */
+ {
+ long sp;
+
+ __asm__ __volatile__ ("and r15, %0" :
+ "=r" (sp) : "0" (THREAD_SIZE - 1));
+
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
+ }
+ }
+#endif
+
#ifdef CONFIG_CPU_HAS_INTEVT
__asm__ __volatile__ (
#ifdef CONFIG_CPU_HAS_SR_RB
@@ -87,7 +119,135 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
#endif
irq = irq_demux(irq);
- __do_IRQ(irq, &regs);
+
+#ifdef CONFIG_4KSTACKS
+ curctx = (union irq_ctx *)current_thread_info();
+ irqctx = hardirq_ctx[smp_processor_id()];
+
+ /*
+ * this is where we switch to the IRQ stack. However, if we are
+ * already using the IRQ stack (because we interrupted a hardirq
+ * handler) we can't do that and just have to keep using the
+ * current stack (which is the irq stack already after all)
+ */
+ if (curctx != irqctx) {
+ u32 *isp;
+
+ isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
+ irqctx->tinfo.task = curctx->tinfo.task;
+ irqctx->tinfo.previous_sp = current_stack_pointer;
+
+ __asm__ __volatile__ (
+ "mov %0, r4 \n"
+ "mov %1, r5 \n"
+ "mov r15, r9 \n"
+ "jsr @%2 \n"
+ /* swith to the irq stack */
+ " mov %3, r15 \n"
+ /* restore the stack (ring zero) */
+ "mov r9, r15 \n"
+ : /* no outputs */
+ : "r" (irq), "r" (&regs), "r" (__do_IRQ), "r" (isp)
+ /* XXX: A somewhat excessive clobber list? -PFM */
+ : "memory", "r0", "r1", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "t", "pr"
+ );
+ } else
+#endif
+ __do_IRQ(irq, &regs);
+
irq_exit();
+
return 1;
}
+
+#ifdef CONFIG_4KSTACKS
+/*
+ * These should really be __section__(".bss.page_aligned") as well, but
+ * gcc's 3.0 and earlier don't handle that correctly.
+ */
+static char softirq_stack[NR_CPUS * THREAD_SIZE]
+ __attribute__((__aligned__(THREAD_SIZE)));
+
+static char hardirq_stack[NR_CPUS * THREAD_SIZE]
+ __attribute__((__aligned__(THREAD_SIZE)));
+
+/*
+ * allocate per-cpu stacks for hardirq and for softirq processing
+ */
+void irq_ctx_init(int cpu)
+{
+ union irq_ctx *irqctx;
+
+ if (hardirq_ctx[cpu])
+ return;
+
+ irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
+ irqctx->tinfo.task = NULL;
+ irqctx->tinfo.exec_domain = NULL;
+ irqctx->tinfo.cpu = cpu;
+ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+
+ hardirq_ctx[cpu] = irqctx;
+
+ irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
+ irqctx->tinfo.task = NULL;
+ irqctx->tinfo.exec_domain = NULL;
+ irqctx->tinfo.cpu = cpu;
+ irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET;
+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
+
+ softirq_ctx[cpu] = irqctx;
+
+ printk("CPU %u irqstacks, hard=%p soft=%p\n",
+ cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
+}
+
+void irq_ctx_exit(int cpu)
+{
+ hardirq_ctx[cpu] = NULL;
+}
+
+extern asmlinkage void __do_softirq(void);
+
+asmlinkage void do_softirq(void)
+{
+ unsigned long flags;
+ struct thread_info *curctx;
+ union irq_ctx *irqctx;
+ u32 *isp;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ if (local_softirq_pending()) {
+ curctx = current_thread_info();
+ irqctx = softirq_ctx[smp_processor_id()];
+ irqctx->tinfo.task = curctx->task;
+ irqctx->tinfo.previous_sp = current_stack_pointer;
+
+ /* build the stack frame on the softirq stack */
+ isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
+
+ __asm__ __volatile__ (
+ "mov r15, r9 \n"
+ "jsr @%0 \n"
+ /* switch to the softirq stack */
+ " mov %1, r15 \n"
+ /* restore the thread stack */
+ "mov r9, r15 \n"
+ : /* no outputs */
+ : "r" (__do_softirq), "r" (isp)
+ /* XXX: A somewhat excessive clobber list? -PFM */
+ : "memory", "r0", "r1", "r2", "r3", "r4",
+ "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+ );
+ }
+
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(do_softirq);
+#endif
diff --git a/arch/sh/kernel/kgdb_stub.c b/arch/sh/kernel/kgdb_stub.c
index 42638b92b51..9c6315f0335 100644
--- a/arch/sh/kernel/kgdb_stub.c
+++ b/arch/sh/kernel/kgdb_stub.c
@@ -101,16 +101,17 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#ifdef CONFIG_SH_KGDB_CONSOLE
+#include <linux/console.h>
+#endif
+
#include <asm/system.h>
#include <asm/current.h>
#include <asm/signal.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/kgdb.h>
-
-#ifdef CONFIG_SH_KGDB_CONSOLE
-#include <linux/console.h>
-#endif
+#include <asm/io.h>
/* Function pointers for linkage */
kgdb_debug_hook_t *kgdb_debug_hook;
@@ -240,7 +241,6 @@ static jmp_buf rem_com_env;
/* Misc static */
static int stepped_address;
static short stepped_opcode;
-static const char hexchars[] = "0123456789abcdef";
static char in_buffer[BUFMAX];
static char out_buffer[OUTBUFMAX];
@@ -253,29 +253,6 @@ typedef unsigned char threadref[8];
#define BUF_THREAD_ID_SIZE 16
#endif
-/* Return addr as a real volatile address */
-static inline unsigned int ctrl_inl(const unsigned long addr)
-{
- return *(volatile unsigned long *) addr;
-}
-
-/* Correctly set *addr using volatile */
-static inline void ctrl_outl(const unsigned int b, unsigned long addr)
-{
- *(volatile unsigned long *) addr = b;
-}
-
-/* Get high hex bits */
-static char highhex(const int x)
-{
- return hexchars[(x >> 4) & 0xf];
-}
-
-/* Get low hex bits */
-static char lowhex(const int x)
-{
- return hexchars[x & 0xf];
-}
/* Convert ch to hex */
static int hex(const char ch)
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 6bcd8d92399..08587cdb64d 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -29,12 +29,6 @@ extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
extern void *gdb_vbr_vector;
-/*
- * Provide a dummy crash_notes definition while crash dump arrives to ppc.
- * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
- */
-void *crash_notes = NULL;
-
void machine_shutdown(void)
{
}
diff --git a/arch/sh/kernel/pm.c b/arch/sh/kernel/pm.c
new file mode 100644
index 00000000000..10ab62c9aed
--- /dev/null
+++ b/arch/sh/kernel/pm.c
@@ -0,0 +1,88 @@
+/*
+ * Generic Power Management Routine
+ *
+ * Copyright (c) 2006 Andriy Skulysh <askulsyh@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License.
+ */
+#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <asm/freq.h>
+#include <asm/io.h>
+#include <asm/watchdog.h>
+#include <asm/pm.h>
+
+#define INTR_OFFSET 0x600
+
+#define STBCR 0xffffff82
+#define STBCR2 0xffffff88
+
+#define STBCR_STBY 0x80
+#define STBCR_MSTP2 0x04
+
+#define MCR 0xffffff68
+#define RTCNT 0xffffff70
+
+#define MCR_RMODE 2
+#define MCR_RFSH 4
+
+void pm_enter(void)
+{
+ u8 stbcr, csr;
+ u16 frqcr, mcr;
+ u32 vbr_new, vbr_old;
+
+ set_bl_bit();
+
+ /* set wdt */
+ csr = sh_wdt_read_csr();
+ csr &= ~WTCSR_TME;
+ csr |= WTCSR_CKS_4096;
+ sh_wdt_write_csr(csr);
+ csr = sh_wdt_read_csr();
+ sh_wdt_write_cnt(0);
+
+ /* disable PLL1 */
+ frqcr = ctrl_inw(FRQCR);
+ frqcr &= ~(FRQCR_PLLEN | FRQCR_PSTBY);
+ ctrl_outw(frqcr, FRQCR);
+
+ /* enable standby */
+ stbcr = ctrl_inb(STBCR);
+ ctrl_outb(stbcr | STBCR_STBY | STBCR_MSTP2, STBCR);
+
+ /* set self-refresh */
+ mcr = ctrl_inw(MCR);
+ ctrl_outw(mcr & ~MCR_RFSH, MCR);
+
+ /* set interrupt handler */
+ asm volatile("stc vbr, %0" : "=r" (vbr_old));
+ vbr_new = get_zeroed_page(GFP_ATOMIC);
+ udelay(50);
+ memcpy((void*)(vbr_new + INTR_OFFSET),
+ &wakeup_start, &wakeup_end - &wakeup_start);
+ asm volatile("ldc %0, vbr" : : "r" (vbr_new));
+
+ ctrl_outw(0, RTCNT);
+ ctrl_outw(mcr | MCR_RFSH | MCR_RMODE, MCR);
+
+ cpu_sleep();
+
+ asm volatile("ldc %0, vbr" : : "r" (vbr_old));
+
+ free_page(vbr_new);
+
+ /* enable PLL1 */
+ frqcr = ctrl_inw(FRQCR);
+ frqcr |= FRQCR_PSTBY;
+ ctrl_outw(frqcr, FRQCR);
+ udelay(50);
+ frqcr |= FRQCR_PLLEN;
+ ctrl_outw(frqcr, FRQCR);
+
+ ctrl_outb(stbcr, STBCR);
+
+ clear_bl_bit();
+}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index f2031314cb2..0b1d5dd7a93 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -81,16 +81,6 @@ void cpu_idle(void)
void machine_restart(char * __unused)
{
-
-#ifdef CONFIG_KEXEC
- struct kimage *image;
- image = xchg(&kexec_image, 0);
- if (image) {
- machine_shutdown();
- machine_kexec(image);
- }
-#endif
-
/* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
asm volatile("ldc %0, sr\n\t"
"mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
@@ -263,6 +253,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
+ struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs;
#if defined(CONFIG_SH_FPU)
struct task_struct *tsk = current;
@@ -277,8 +268,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
if (user_mode(regs)) {
childregs->regs[15] = usp;
+ ti->addr_limit = USER_DS;
} else {
childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+ ti->addr_limit = KERNEL_DS;
}
if (clone_flags & CLONE_SETTLS) {
childregs->gbr = childregs->regs[0];
@@ -299,13 +292,15 @@ ubc_set_tracing(int asid, unsigned long pc)
{
ctrl_outl(pc, UBC_BARA);
+#ifdef CONFIG_MMU
/* We don't have any ASID settings for the SH-2! */
if (cpu_data->type != CPU_SH7604)
ctrl_outb(asid, UBC_BASRA);
+#endif
ctrl_outl(0, UBC_BAMRA);
- if (cpu_data->type == CPU_SH7729) {
+ if (cpu_data->type == CPU_SH7729 || cpu_data->type == CPU_SH7710) {
ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
} else {
@@ -344,6 +339,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
}
#endif
+#ifdef CONFIG_MMU
/*
* Restore the kernel mode register
* k7 (r7_bank1)
@@ -351,19 +347,21 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
asm volatile("ldc %0, r7_bank"
: /* no output */
: "r" (task_thread_info(next)));
+#endif
-#ifdef CONFIG_MMU
/* If no tasks are using the UBC, we're done */
if (ubc_usercnt == 0)
/* If no tasks are using the UBC, we're done */;
else if (next->thread.ubc_pc && next->mm) {
- ubc_set_tracing(next->mm->context & MMU_CONTEXT_ASID_MASK,
- next->thread.ubc_pc);
+ int asid = 0;
+#ifdef CONFIG_MMU
+ asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK;
+#endif
+ ubc_set_tracing(asid, next->thread.ubc_pc);
} else {
ctrl_outw(0, UBC_BBRA);
ctrl_outw(0, UBC_BBRB);
}
-#endif
return prev;
}
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
index f7eebbde329..04ca13a041c 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace.c
@@ -224,7 +224,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case PTRACE_SETDSPREGS: {
unsigned long dp;
- int i;
ret = -EIO;
dp = ((unsigned long) child) + THREAD_SIZE -
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c
index a3c24dcbf01..184119eeae5 100644
--- a/arch/sh/kernel/semaphore.c
+++ b/arch/sh/kernel/semaphore.c
@@ -14,7 +14,7 @@
#include <asm/semaphore.h>
#include <asm/semaphore-helper.h>
-spinlock_t semaphore_wake_lock;
+DEFINE_SPINLOCK(semaphore_wake_lock);
/*
* Semaphores are implemented using a two-way counter:
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index e75189cb1db..5f587332234 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -43,27 +43,14 @@ extern void * __rd_start, * __rd_end;
* The bigger value means no problem.
*/
struct sh_cpuinfo boot_cpu_data = { CPU_SH_NONE, 10000000, };
+#ifdef CONFIG_VT
struct screen_info screen_info;
+#endif
#if defined(CONFIG_SH_UNKNOWN)
struct sh_machine_vector sh_mv;
#endif
-/* We need this to satisfy some external references. */
-struct screen_info screen_info = {
- 0, 25, /* orig-x, orig-y */
- 0, /* unused */
- 0, /* orig-video-page */
- 0, /* orig-video-mode */
- 80, /* orig-video-cols */
- 0,0,0, /* ega_ax, ega_bx, ega_cx */
- 25, /* orig-video-lines */
- 0, /* orig-video-isVGA */
- 16 /* orig-video-points */
-};
-
-extern void platform_setup(void);
-extern char *get_system_type(void);
extern int root_mountflags;
#define MV_NAME_SIZE 32
@@ -90,29 +77,8 @@ static struct sh_machine_vector* __init get_mv_byname(const char* name);
static char command_line[COMMAND_LINE_SIZE] = { 0, };
-struct resource standard_io_resources[] = {
- { "dma1", 0x00, 0x1f },
- { "pic1", 0x20, 0x3f },
- { "timer", 0x40, 0x5f },
- { "keyboard", 0x60, 0x6f },
- { "dma page reg", 0x80, 0x8f },
- { "pic2", 0xa0, 0xbf },
- { "dma2", 0xc0, 0xdf },
- { "fpu", 0xf0, 0xff }
-};
-
-#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
-
-/* System RAM - interrupted by the 640kB-1M hole */
-#define code_resource (ram_resources[3])
-#define data_resource (ram_resources[4])
-static struct resource ram_resources[] = {
- { "System RAM", 0x000000, 0x09ffff, IORESOURCE_BUSY },
- { "System RAM", 0x100000, 0x100000, IORESOURCE_BUSY },
- { "Video RAM area", 0x0a0000, 0x0bffff },
- { "Kernel code", 0x100000, 0 },
- { "Kernel data", 0, 0 }
-};
+static struct resource code_resource = { .name = "Kernel code", };
+static struct resource data_resource = { .name = "Kernel data", };
unsigned long memory_start, memory_end;
@@ -145,6 +111,24 @@ static inline void parse_cmdline (char ** cmdline_p, char mv_name[MV_NAME_SIZE],
memory_end = memory_start + mem_size;
}
}
+
+#ifdef CONFIG_EARLY_PRINTK
+ if (c == ' ' && !memcmp(from, "earlyprintk=", 12)) {
+ char *ep_end;
+
+ if (to != command_line)
+ to--;
+
+ from += 12;
+ ep_end = strchr(from, ' ');
+
+ setup_early_printk(from);
+ printk("early console enabled\n");
+
+ from = ep_end;
+ }
+#endif
+
if (c == ' ' && !memcmp(from, "sh_mv=", 6)) {
char* mv_end;
char* mv_comma;
@@ -237,6 +221,9 @@ static int __init sh_mv_setup(char **cmdline_p)
__set_io_port_base(mv_io_base);
#endif
+ if (!sh_mv.mv_nr_irqs)
+ sh_mv.mv_nr_irqs = NR_IRQS;
+
return 0;
}
@@ -245,11 +232,6 @@ void __init setup_arch(char **cmdline_p)
unsigned long bootmap_size;
unsigned long start_pfn, max_pfn, max_low_pfn;
-#ifdef CONFIG_EARLY_PRINTK
- extern void enable_early_printk(void);
-
- enable_early_printk();
-#endif
#ifdef CONFIG_CMDLINE_BOOL
strcpy(COMMAND_LINE, CONFIG_CMDLINE);
#endif
@@ -368,14 +350,14 @@ void __init setup_arch(char **cmdline_p)
#endif
/* Perform the machine specific initialisation */
- platform_setup();
+ if (likely(sh_mv.mv_setup))
+ sh_mv.mv_setup(cmdline_p);
paging_init();
}
struct sh_machine_vector* __init get_mv_byname(const char* name)
{
- extern int strcasecmp(const char *, const char *);
extern long __machvec_start, __machvec_end;
struct sh_machine_vector *all_vecs =
(struct sh_machine_vector *)&__machvec_start;
@@ -410,25 +392,18 @@ static int __init topology_init(void)
subsys_initcall(topology_init);
static const char *cpu_name[] = {
- [CPU_SH7604] = "SH7604",
- [CPU_SH7705] = "SH7705",
- [CPU_SH7708] = "SH7708",
- [CPU_SH7729] = "SH7729",
- [CPU_SH7300] = "SH7300",
- [CPU_SH7750] = "SH7750",
- [CPU_SH7750S] = "SH7750S",
- [CPU_SH7750R] = "SH7750R",
- [CPU_SH7751] = "SH7751",
- [CPU_SH7751R] = "SH7751R",
- [CPU_SH7760] = "SH7760",
- [CPU_SH73180] = "SH73180",
- [CPU_ST40RA] = "ST40RA",
- [CPU_ST40GX1] = "ST40GX1",
- [CPU_SH4_202] = "SH4-202",
- [CPU_SH4_501] = "SH4-501",
- [CPU_SH7770] = "SH7770",
- [CPU_SH7780] = "SH7780",
- [CPU_SH7781] = "SH7781",
+ [CPU_SH7604] = "SH7604", [CPU_SH7300] = "SH7300",
+ [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
+ [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
+ [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
+ [CPU_SH7729] = "SH7729", [CPU_SH7750] = "SH7750",
+ [CPU_SH7750S] = "SH7750S", [CPU_SH7750R] = "SH7750R",
+ [CPU_SH7751] = "SH7751", [CPU_SH7751R] = "SH7751R",
+ [CPU_SH7760] = "SH7760", [CPU_SH73180] = "SH73180",
+ [CPU_ST40RA] = "ST40RA", [CPU_ST40GX1] = "ST40GX1",
+ [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
+ [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780",
+ [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343",
[CPU_SH_NONE] = "Unknown"
};
@@ -438,8 +413,10 @@ const char *get_cpu_subtype(void)
}
#ifdef CONFIG_PROC_FS
+/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
static const char *cpu_flags[] = {
- "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr", "ptea", NULL
+ "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
+ "ptea", "llsc", "l2", NULL
};
static void show_cpuflags(struct seq_file *m)
@@ -460,7 +437,8 @@ static void show_cpuflags(struct seq_file *m)
seq_printf(m, "\n");
}
-static void show_cacheinfo(struct seq_file *m, const char *type, struct cache_info info)
+static void show_cacheinfo(struct seq_file *m, const char *type,
+ struct cache_info info)
{
unsigned int cache_size;
@@ -493,7 +471,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
* unified cache on the SH-2 and SH-3, as well as the harvard
* style cache on the SH-4.
*/
- if (test_bit(SH_CACHE_COMBINED, &(boot_cpu_data.icache.flags))) {
+ if (boot_cpu_data.icache.flags & SH_CACHE_COMBINED) {
seq_printf(m, "unified\n");
show_cacheinfo(m, "cache", boot_cpu_data.icache);
} else {
@@ -502,6 +480,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
show_cacheinfo(m, "dcache", boot_cpu_data.dcache);
}
+ /* Optional secondary cache */
+ if (boot_cpu_data.flags & CPU_HAS_L2_CACHE)
+ show_cacheinfo(m, "scache", boot_cpu_data.scache);
+
seq_printf(m, "bogomips\t: %lu.%02lu\n",
boot_cpu_data.loops_per_jiffy/(500000/HZ),
(boot_cpu_data.loops_per_jiffy/(5000/HZ)) % 100);
@@ -617,4 +599,3 @@ static int __init kgdb_parse_options(char *options)
}
__setup("kgdb=", kgdb_parse_options);
#endif /* CONFIG_SH_KGDB */
-
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
index 245ed8f945e..d3cbfa2ad4a 100644
--- a/arch/sh/kernel/sh_ksyms.c
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -27,21 +27,11 @@ EXPORT_SYMBOL(sh_mv);
/* platform dependent support */
EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-EXPORT_SYMBOL(probe_irq_mask);
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(disable_irq_nosync);
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(no_irq_type);
-EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strncat);
/* PCI exports */
#ifdef CONFIG_PCI
@@ -52,13 +42,8 @@ EXPORT_SYMBOL(pci_free_consistent);
/* mem exports */
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memcpy_fromio);
-EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memset_io);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(boot_cpu_data);
@@ -94,7 +79,9 @@ EXPORT_SYMBOL(strcpy);
DECLARE_EXPORT(__movstr_i4_even);
DECLARE_EXPORT(__movstr_i4_odd);
DECLARE_EXPORT(__movstrSI12_i4);
+#endif
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
/* needed by some modules */
EXPORT_SYMBOL(flush_cache_all);
EXPORT_SYMBOL(flush_cache_range);
@@ -102,11 +89,9 @@ EXPORT_SYMBOL(flush_dcache_page);
EXPORT_SYMBOL(__flush_purge_region);
#endif
-#if defined(CONFIG_SH7705_CACHE_32KB)
-EXPORT_SYMBOL(flush_cache_all);
-EXPORT_SYMBOL(flush_cache_range);
-EXPORT_SYMBOL(flush_dcache_page);
-EXPORT_SYMBOL(__flush_purge_region);
+#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \
+ defined(CONFIG_SH7705_CACHE_32KB))
+EXPORT_SYMBOL(clear_user_page);
#endif
EXPORT_SYMBOL(flush_tlb_page);
@@ -116,7 +101,12 @@ EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(synchronize_irq);
#endif
+#ifdef CONFIG_PM
+EXPORT_SYMBOL(pm_suspend);
+#endif
+
EXPORT_SYMBOL(csum_partial);
+#ifdef CONFIG_IPV6
EXPORT_SYMBOL(csum_ipv6_magic);
-EXPORT_SYMBOL(consistent_sync);
+#endif
EXPORT_SYMBOL(clear_page);
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index b475c4d2405..5213f5bc6ce 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -8,7 +8,6 @@
* SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*
*/
-
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -21,6 +20,7 @@
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
+#include <linux/elf.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
@@ -29,12 +29,8 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
-#define DEBUG_SIG 0
-
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
-
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
@@ -43,51 +39,17 @@ sys_sigsuspend(old_sigset_t mask,
unsigned long r5, unsigned long r6, unsigned long r7,
struct pt_regs regs)
{
- sigset_t saveset;
-
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
+ current->saved_sigmask = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- regs.regs[0] = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(&regs, &saveset))
- return -EINTR;
- }
-}
-
-asmlinkage int
-sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
- unsigned long r6, unsigned long r7,
- struct pt_regs regs)
-{
- sigset_t saveset, newset;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&newset, unewset, sizeof(newset)))
- return -EFAULT;
- sigdelsetmask(&newset, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- current->blocked = newset;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs.regs[0] = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_signal(&regs, &saveset))
- return -EINTR;
- }
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ return -ERESTARTNOHAND;
}
asmlinkage int
@@ -348,7 +310,12 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}
-static void setup_frame(int sig, struct k_sigaction *ka,
+/* These symbols are defined with the addresses in the vsyscall page.
+ See vsyscall-trapa.S. */
+extern void __user __kernel_sigreturn;
+extern void __user __kernel_rt_sigreturn;
+
+static int setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
{
struct sigframe __user *frame;
@@ -368,15 +335,18 @@ static void setup_frame(int sig, struct k_sigaction *ka,
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
- if (_NSIG_WORDS > 1) {
+ if (_NSIG_WORDS > 1)
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
- }
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ka->sa.sa_restorer;
+#ifdef CONFIG_VSYSCALL
+ } else if (likely(current->mm->context.vdso)) {
+ regs->pr = VDSO_SYM(&__kernel_sigreturn);
+#endif
} else {
/* Generate return code (system call to sigreturn) */
err |= __put_user(MOVW(7), &frame->retcode[0]);
@@ -402,21 +372,22 @@ static void setup_frame(int sig, struct k_sigaction *ka,
set_fs(USER_DS);
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
- current->comm, current->pid, frame, regs->pc, regs->pr);
-#endif
+ pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+ current->comm, current->pid, frame, regs->pc, regs->pr);
flush_cache_sigtramp(regs->pr);
+
if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
- return;
+
+ return 0;
give_sigsegv:
force_sigsegv(sig, current);
+ return -EFAULT;
}
-static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
@@ -452,6 +423,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
already in userspace. */
if (ka->sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ka->sa.sa_restorer;
+#ifdef CONFIG_VSYSCALL
+ } else if (likely(current->mm->context.vdso)) {
+ regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
+#endif
} else {
/* Generate return code (system call to rt_sigreturn) */
err |= __put_user(MOVW(7), &frame->retcode[0]);
@@ -477,28 +452,31 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
set_fs(USER_DS);
-#if DEBUG_SIG
- printk("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
- current->comm, current->pid, frame, regs->pc, regs->pr);
-#endif
+ pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+ current->comm, current->pid, frame, regs->pc, regs->pr);
flush_cache_sigtramp(regs->pr);
+
if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode))
flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES);
- return;
+
+ return 0;
give_sigsegv:
force_sigsegv(sig, current);
+ return -EFAULT;
}
/*
* OK, we're invoking a handler
*/
-static void
+static int
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
+ int ret;
+
/* Are we from a system call? */
if (regs->tra >= 0) {
/* If so, check system call restarting.. */
@@ -539,19 +517,23 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(sig, ka, info, oldset, regs);
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
else
- setup_frame(sig, ka, oldset, regs);
+ ret = setup_frame(sig, ka, oldset, regs);
if (ka->sa.sa_flags & SA_ONESHOT)
ka->sa.sa_handler = SIG_DFL;
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked,sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ if (ret == 0) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+
+ return ret;
}
/*
@@ -563,11 +545,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+static void do_signal(struct pt_regs *regs, unsigned int save_r0)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
+ sigset_t *oldset;
/*
* We want the common case to go fast, which
@@ -576,19 +559,27 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
* if so.
*/
if (!user_mode(regs))
- return 1;
+ return;
if (try_to_freeze())
goto no_signal;
- if (!oldset)
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
- handle_signal(signr, &ka, &info, oldset, regs);
- return 1;
+ if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ }
}
no_signal:
@@ -597,10 +588,27 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
/* Restart the system call - no handlers present */
if (regs->regs[0] == -ERESTARTNOHAND ||
regs->regs[0] == -ERESTARTSYS ||
- regs->regs[0] == -ERESTARTNOINTR ||
- regs->regs[0] == -ERESTART_RESTARTBLOCK) {
+ regs->regs[0] == -ERESTARTNOINTR) {
+ regs->regs[0] = save_r0;
+ regs->pc -= 2;
+ } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
regs->pc -= 2;
+ regs->regs[3] = __NR_restart_syscall;
}
}
- return 0;
+
+ /* if there's no signal to deliver, we just put the saved sigmask
+ * back */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
+ __u32 thread_info_flags)
+{
+ /* deal with pending signal delivery */
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+ do_signal(regs, save_r0);
}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 917b2f32f26..b68ff705f06 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -21,7 +21,8 @@
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
-
+#include <linux/module.h>
+#include <asm/cacheflush.h>
#include <asm/uaccess.h>
#include <asm/ipc.h>
@@ -44,11 +45,16 @@ asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
return error;
}
-#if defined(HAVE_ARCH_UNMAPPED_AREA)
+unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+
+EXPORT_SYMBOL(shm_align_mask);
+
/*
- * To avoid cache alias, we map the shard page with same color.
+ * To avoid cache aliases, we map the shared page with same color.
*/
-#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + shm_align_mask) & ~shm_align_mask) + \
+ (((pgoff) << PAGE_SHIFT) & shm_align_mask))
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
@@ -56,43 +62,52 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
+ int do_colour_align;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
- if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
+ if ((flags & MAP_SHARED) && (addr & shm_align_mask))
return -EINVAL;
return addr;
}
- if (len > TASK_SIZE)
+ if (unlikely(len > TASK_SIZE))
return -ENOMEM;
+ do_colour_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
if (addr) {
- if (flags & MAP_PRIVATE)
- addr = PAGE_ALIGN(addr);
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
else
- addr = COLOUR_ALIGN(addr);
+ addr = PAGE_ALIGN(addr);
+
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
- if (len <= mm->cached_hole_size) {
+
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
+ } else {
mm->cached_hole_size = 0;
- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ start_addr = addr = TASK_UNMAPPED_BASE;
}
- if (flags & MAP_PRIVATE)
- addr = PAGE_ALIGN(mm->free_area_cache);
- else
- addr = COLOUR_ALIGN(mm->free_area_cache);
- start_addr = addr;
full_search:
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(mm->free_area_cache);
+
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr) {
+ if (unlikely(TASK_SIZE - len < addr)) {
/*
* Start a new search - just in case we missed
* some holes.
@@ -104,7 +119,7 @@ full_search:
}
return -ENOMEM;
}
- if (!vma || addr + len <= vma->vm_start) {
+ if (likely(!vma || addr + len <= vma->vm_start)) {
/*
* Remember the place where we stopped the search:
*/
@@ -115,11 +130,10 @@ full_search:
mm->cached_hole_size = vma->vm_start - addr;
addr = vma->vm_end;
- if (!(flags & MAP_PRIVATE))
- addr = COLOUR_ALIGN(addr);
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
}
}
-#endif
static inline long
do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
diff --git a/arch/sh/kernel/syscalls.S b/arch/sh/kernel/syscalls.S
new file mode 100644
index 00000000000..768334e9507
--- /dev/null
+++ b/arch/sh/kernel/syscalls.S
@@ -0,0 +1,353 @@
+/*
+ * arch/sh/kernel/syscalls.S
+ *
+ * System call table for SuperH
+ *
+ * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
+ * Copyright (C) 2003 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+
+#if !defined(CONFIG_NFSD) && !defined(CONFIG_NFSD_MODULE)
+#define sys_nfsservctl sys_ni_syscall
+#endif
+
+#if !defined(CONFIG_MMU)
+#define sys_madvise sys_ni_syscall
+#define sys_readahead sys_ni_syscall
+#define sys_mprotect sys_ni_syscall
+#define sys_msync sys_ni_syscall
+#define sys_mlock sys_ni_syscall
+#define sys_munlock sys_ni_syscall
+#define sys_mlockall sys_ni_syscall
+#define sys_munlockall sys_ni_syscall
+#define sys_mremap sys_ni_syscall
+#define sys_mincore sys_ni_syscall
+#define sys_remap_file_pages sys_ni_syscall
+#endif
+
+ .data
+ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call*/
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_lchown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys() */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall /* sys_olduname */
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long sys_ni_syscall /* sys_oldselect */
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long old_readdir
+ .long old_mmap /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm */
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_uname
+ .long sys_ni_syscall /* 110 */ /* iopl */
+ .long sys_vhangup
+ .long sys_ni_syscall /* idle */
+ .long sys_ni_syscall /* vm86old */
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_ni_syscall /* sys_modify_ldt */
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130: old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_ni_syscall /* vm86 */
+ .long sys_ni_syscall /* old "query_module" */
+ .long sys_poll
+ .long sys_nfsservctl
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread_wrapper /* 180 */
+ .long sys_pwrite_wrapper
+ .long sys_chown16
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* streams1 */
+ .long sys_ni_syscall /* streams2 */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_mincore
+ .long sys_madvise
+ .long sys_getdents64 /* 220 */
+ .long sys_fcntl64
+ .long sys_ni_syscall /* reserved for TUX */
+ .long sys_ni_syscall /* Reserved for Security */
+ .long sys_gettid
+ .long sys_readahead /* 225 */
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr /* 230 */
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr /* 235 */
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_tkill
+ .long sys_sendfile64
+ .long sys_futex /* 240 */
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_ni_syscall
+ .long sys_ni_syscall
+ .long sys_io_setup /* 245 */
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_fadvise64 /* 250 */
+ .long sys_ni_syscall
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 255 */
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 260 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 265 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 270 */
+ .long sys_utimes
+ .long sys_fadvise64_64_wrapper
+ .long sys_ni_syscall /* Reserved for vserver */
+ .long sys_ni_syscall /* Reserved for mbind */
+ .long sys_ni_syscall /* 275 - get_mempolicy */
+ .long sys_ni_syscall /* set_mempolicy */
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive /* 280 */
+ .long sys_mq_notify
+ .long sys_mq_getsetattr
+ .long sys_kexec_load
+ .long sys_waitid
+ .long sys_ni_syscall /* 285 */
+ .long sys_add_key
+ .long sys_request_key
+ .long sys_keyctl
+ .long sys_ioprio_set
+ .long sys_ioprio_get /* 290 */
+ .long sys_inotify_init
+ .long sys_inotify_add_watch
+ .long sys_inotify_rm_watch
+ .long sys_migrate_pages
+ .long sys_openat /* 295 */
+ .long sys_mkdirat
+ .long sys_mknodat
+ .long sys_fchownat
+ .long sys_futimesat
+ .long sys_fstatat64 /* 300 */
+ .long sys_unlinkat
+ .long sys_renameat
+ .long sys_linkat
+ .long sys_symlinkat
+ .long sys_readlinkat /* 305 */
+ .long sys_fchmodat
+ .long sys_faccessat
+ .long sys_pselect6
+ .long sys_ppoll
+ .long sys_unshare /* 310 */
+ .long sys_set_robust_list
+ .long sys_get_robust_list
+ .long sys_splice
+ .long sys_sync_file_range
+ .long sys_tee /* 315 */
+ .long sys_vmsplice
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index a1589f85499..149d9713edd 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -3,13 +3,12 @@
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 2002, 2003, 2004, 2005 Paul Mundt
+ * Copyright (C) 2002 - 2006 Paul Mundt
* Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
*
* Some code taken from i386 version.
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -26,15 +25,20 @@ struct sys_timer *sys_timer;
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-/* XXX: Can we initialize this in a routine somewhere? Dreamcast doesn't want
- * these routines anywhere... */
-#ifdef CONFIG_SH_RTC
-void (*rtc_get_time)(struct timespec *) = sh_rtc_gettimeofday;
-int (*rtc_set_time)(const time_t) = sh_rtc_settimeofday;
-#else
-void (*rtc_get_time)(struct timespec *);
-int (*rtc_set_time)(const time_t);
-#endif
+/* Dummy RTC ops */
+static void null_rtc_get_time(struct timespec *tv)
+{
+ tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
+ tv->tv_nsec = 0;
+}
+
+static int null_rtc_set_time(const time_t secs)
+{
+ return 0;
+}
+
+void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
+int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
/*
* Scheduler clock - returns current time in nanosec units.
@@ -70,7 +74,6 @@ void do_gettimeofday(struct timeval *tv)
tv->tv_sec = sec;
tv->tv_usec = usec;
}
-
EXPORT_SYMBOL(do_gettimeofday);
int do_settimeofday(struct timespec *tv)
@@ -103,7 +106,6 @@ int do_settimeofday(struct timespec *tv)
return 0;
}
-
EXPORT_SYMBOL(do_settimeofday);
/* last time the RTC clock got updated */
@@ -135,7 +137,7 @@ void handle_timer_tick(struct pt_regs *regs)
xtime.tv_sec > last_rtc_update + 660 &&
(xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
(xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
- if (rtc_set_time(xtime.tv_sec) == 0)
+ if (rtc_sh_set_time(xtime.tv_sec) == 0)
last_rtc_update = xtime.tv_sec;
else
/* do it again in 60s */
@@ -143,8 +145,33 @@ void handle_timer_tick(struct pt_regs *regs)
}
}
+#ifdef CONFIG_PM
+int timer_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
+
+ sys_timer->ops->stop();
+
+ return 0;
+}
+
+int timer_resume(struct sys_device *dev)
+{
+ struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
+
+ sys_timer->ops->start();
+
+ return 0;
+}
+#else
+#define timer_suspend NULL
+#define timer_resume NULL
+#endif
+
static struct sysdev_class timer_sysclass = {
set_kset_name("timer"),
+ .suspend = timer_suspend,
+ .resume = timer_resume,
};
static int __init timer_init_sysfs(void)
@@ -156,7 +183,6 @@ static int __init timer_init_sysfs(void)
sys_timer->dev.cls = &timer_sysclass;
return sysdev_register(&sys_timer->dev);
}
-
device_initcall(timer_init_sysfs);
void (*board_time_init)(void);
@@ -168,15 +194,9 @@ void __init time_init(void)
clk_init();
- if (rtc_get_time) {
- rtc_get_time(&xtime);
- } else {
- xtime.tv_sec = mktime(2000, 1, 1, 0, 0, 0);
- xtime.tv_nsec = 0;
- }
-
- set_normalized_timespec(&wall_to_monotonic,
- -xtime.tv_sec, -xtime.tv_nsec);
+ rtc_sh_get_time(&xtime);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
/*
* Find the timer to use as the system timer, it will be
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index d4212add53b..205816fcf0d 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -132,17 +132,17 @@ static unsigned long tmu_timer_get_frequency(void)
ctrl_outl(0xffffffff, TMU0_TCOR);
ctrl_outl(0xffffffff, TMU0_TCNT);
- rtc_get_time(&ts2);
+ rtc_sh_get_time(&ts2);
do {
- rtc_get_time(&ts1);
+ rtc_sh_get_time(&ts1);
} while (ts1.tv_nsec == ts2.tv_nsec && ts1.tv_sec == ts2.tv_sec);
/* actually start the timer */
ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
do {
- rtc_get_time(&ts2);
+ rtc_sh_get_time(&ts2);
} while (ts1.tv_nsec == ts2.tv_nsec && ts1.tv_sec == ts2.tv_sec);
freq = 0xffffffff - ctrl_inl(TMU0_TCNT);
@@ -188,6 +188,18 @@ static struct clk tmu0_clk = {
.ops = &tmu_clk_ops,
};
+static int tmu_timer_start(void)
+{
+ ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
+ return 0;
+}
+
+static int tmu_timer_stop(void)
+{
+ ctrl_outb(0, TMU_TSTR);
+ return 0;
+}
+
static int tmu_timer_init(void)
{
unsigned long interval;
@@ -197,7 +209,7 @@ static int tmu_timer_init(void)
tmu0_clk.parent = clk_get("module_clk");
/* Start TMU0 */
- ctrl_outb(0, TMU_TSTR);
+ tmu_timer_stop();
#if !defined(CONFIG_CPU_SUBTYPE_SH7300) && !defined(CONFIG_CPU_SUBTYPE_SH7760)
ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
#endif
@@ -211,13 +223,15 @@ static int tmu_timer_init(void)
ctrl_outl(interval, TMU0_TCOR);
ctrl_outl(interval, TMU0_TCNT);
- ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
+ tmu_timer_start();
return 0;
}
struct sys_timer_ops tmu_timer_ops = {
.init = tmu_timer_init,
+ .start = tmu_timer_start,
+ .stop = tmu_timer_stop,
.get_frequency = tmu_timer_get_frequency,
.get_offset = tmu_timer_get_offset,
};
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index d9db1180f77..c2c597e0948 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -36,40 +36,15 @@
#ifdef CONFIG_SH_KGDB
#include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs) \
-{ \
- if ((kgdb_debug_hook != (kgdb_debug_hook_t *) NULL) && (!user_mode(regs))) \
- { \
- (*kgdb_debug_hook)(regs); \
- } \
+#define CHK_REMOTE_DEBUG(regs) \
+{ \
+ if (kgdb_debug_hook && !user_mode(regs))\
+ (*kgdb_debug_hook)(regs); \
}
#else
#define CHK_REMOTE_DEBUG(regs)
#endif
-#define DO_ERROR(trapnr, signr, str, name, tsk) \
-asmlinkage void do_##name(unsigned long r4, unsigned long r5, \
- unsigned long r6, unsigned long r7, \
- struct pt_regs regs) \
-{ \
- unsigned long error_code; \
- \
- /* Check if it's a DSP instruction */ \
- if (is_dsp_inst(&regs)) { \
- /* Enable DSP mode, and restart instruction. */ \
- regs.sr |= SR_DSP; \
- return; \
- } \
- \
- asm volatile("stc r2_bank, %0": "=r" (error_code)); \
- local_irq_enable(); \
- tsk->thread.error_code = error_code; \
- tsk->thread.trap_no = trapnr; \
- CHK_REMOTE_DEBUG(&regs); \
- force_sig(signr, tsk); \
- die_if_no_fixup(str,&regs,error_code); \
-}
-
#ifdef CONFIG_CPU_SH2
#define TRAP_RESERVED_INST 4
#define TRAP_ILLEGAL_SLOT_INST 6
@@ -86,7 +61,7 @@ asmlinkage void do_##name(unsigned long r4, unsigned long r5, \
#define VMALLOC_OFFSET (8*1024*1024)
#define MODULE_RANGE (8*1024*1024)
-spinlock_t die_lock;
+DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
@@ -575,8 +550,117 @@ int is_dsp_inst(struct pt_regs *regs)
#define is_dsp_inst(regs) (0)
#endif /* CONFIG_SH_DSP */
-DO_ERROR(TRAP_RESERVED_INST, SIGILL, "reserved instruction", reserved_inst, current)
-DO_ERROR(TRAP_ILLEGAL_SLOT_INST, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
+extern int do_fpu_inst(unsigned short, struct pt_regs*);
+
+asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ unsigned long error_code;
+ struct task_struct *tsk = current;
+
+#ifdef CONFIG_SH_FPU_EMU
+ unsigned short inst;
+ int err;
+
+ get_user(inst, (unsigned short*)regs.pc);
+
+ err = do_fpu_inst(inst, &regs);
+ if (!err) {
+ regs.pc += 2;
+ return;
+ }
+ /* not a FPU inst. */
+#endif
+
+#ifdef CONFIG_SH_DSP
+ /* Check if it's a DSP instruction */
+ if (is_dsp_inst(&regs)) {
+ /* Enable DSP mode, and restart instruction. */
+ regs.sr |= SR_DSP;
+ return;
+ }
+#endif
+
+ asm volatile("stc r2_bank, %0": "=r" (error_code));
+ local_irq_enable();
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = TRAP_RESERVED_INST;
+ CHK_REMOTE_DEBUG(&regs);
+ force_sig(SIGILL, tsk);
+ die_if_no_fixup("reserved instruction", &regs, error_code);
+}
+
+#ifdef CONFIG_SH_FPU_EMU
+static int emulate_branch(unsigned short inst, struct pt_regs* regs)
+{
+ /*
+ * bfs: 8fxx: PC+=d*2+4;
+ * bts: 8dxx: PC+=d*2+4;
+ * bra: axxx: PC+=D*2+4;
+ * bsr: bxxx: PC+=D*2+4 after PR=PC+4;
+ * braf:0x23: PC+=Rn*2+4;
+ * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
+ * jmp: 4x2b: PC=Rn;
+ * jsr: 4x0b: PC=Rn after PR=PC+4;
+ * rts: 000b: PC=PR;
+ */
+ if ((inst & 0xfd00) == 0x8d00) {
+ regs->pc += SH_PC_8BIT_OFFSET(inst);
+ return 0;
+ }
+
+ if ((inst & 0xe000) == 0xa000) {
+ regs->pc += SH_PC_12BIT_OFFSET(inst);
+ return 0;
+ }
+
+ if ((inst & 0xf0df) == 0x0003) {
+ regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
+ return 0;
+ }
+
+ if ((inst & 0xf0df) == 0x400b) {
+ regs->pc = regs->regs[(inst & 0x0f00) >> 8];
+ return 0;
+ }
+
+ if ((inst & 0xffff) == 0x000b) {
+ regs->pc = regs->pr;
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ unsigned long error_code;
+ struct task_struct *tsk = current;
+#ifdef CONFIG_SH_FPU_EMU
+ unsigned short inst;
+
+ get_user(inst, (unsigned short *)regs.pc + 1);
+ if (!do_fpu_inst(inst, &regs)) {
+ get_user(inst, (unsigned short *)regs.pc);
+ if (!emulate_branch(inst, &regs))
+ return;
+ /* fault in branch.*/
+ }
+ /* not a FPU inst. */
+#endif
+
+ asm volatile("stc r2_bank, %0": "=r" (error_code));
+ local_irq_enable();
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = TRAP_RESERVED_INST;
+ CHK_REMOTE_DEBUG(&regs);
+ force_sig(SIGILL, tsk);
+ die_if_no_fixup("illegal slot instruction", &regs, error_code);
+}
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
@@ -634,14 +718,16 @@ void __init trap_init(void)
exception_handling_table[TRAP_ILLEGAL_SLOT_INST]
= (void *)do_illegal_slot_inst;
-#ifdef CONFIG_CPU_SH4
- if (!(cpu_data->flags & CPU_HAS_FPU)) {
- /* For SH-4 lacking an FPU, treat floating point instructions
- as reserved. */
- /* entry 64 corresponds to EXPEVT=0x800 */
- exception_handling_table[64] = (void *)do_reserved_inst;
- exception_handling_table[65] = (void *)do_illegal_slot_inst;
- }
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
+ defined(CONFIG_SH_FPU_EMU)
+ /*
+ * For SH-4 lacking an FPU, treat floating point instructions as
+ * reserved. They'll be handled in the math-emu case, or faulted on
+ * otherwise.
+ */
+ /* entry 64 corresponds to EXPEVT=0x800 */
+ exception_handling_table[64] = (void *)do_reserved_inst;
+ exception_handling_table[65] = (void *)do_illegal_slot_inst;
#endif
/* Setup VBR for boot cpu */
@@ -655,20 +741,12 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
unsigned long module_end = VMALLOC_END;
int i = 1;
- if (tsk && !sp) {
+ if (!tsk)
+ tsk = current;
+ if (tsk == current)
+ sp = (unsigned long *)current_stack_pointer;
+ else
sp = (unsigned long *)tsk->thread.sp;
- }
-
- if (!sp) {
- __asm__ __volatile__ (
- "mov r15, %0\n\t"
- "stc r7_bank, %1\n\t"
- : "=r" (module_start),
- "=r" (module_end)
- );
-
- sp = (unsigned long *)module_start;
- }
stack = sp;
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index 95fdd9135fc..5eb93091818 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -2,6 +2,7 @@
* ld script to make SuperH Linux kernel
* Written by Niibe Yutaka
*/
+#include <asm/thread_info.h>
#include <asm-generic/vmlinux.lds.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN
@@ -13,7 +14,7 @@ OUTPUT_ARCH(sh)
ENTRY(_start)
SECTIONS
{
- . = 0x80000000 + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
+ . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
_text = .; /* Text and read-only data */
text = .; /* Text and read-only data */
.empty_zero_page : {
@@ -40,16 +41,16 @@ SECTIONS
*(.data)
/* Align the initial ramdisk image (INITRD) on page boundaries. */
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__rd_start = .;
*(.initrd)
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__rd_end = .;
CONSTRUCTORS
}
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
.data.page_aligned : { *(.data.idt) }
. = ALIGN(32);
@@ -60,12 +61,10 @@ SECTIONS
_edata = .; /* End of data section */
- . = ALIGN(8192); /* init_task */
+ . = ALIGN(THREAD_SIZE); /* init_task */
.data.init_task : { *(.data.init_task) }
- /* stack */
- .stack : { stack = .; _stack = .; }
- . = ALIGN(4096); /* Init code and data */
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
_sinittext = .;
.init.text : { *(.init.text) }
@@ -96,7 +95,7 @@ SECTIONS
__machvec_start = .;
.init.machvec : { *(.init.machvec) }
__machvec_end = .;
- . = ALIGN(4096);
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
. = ALIGN(4);
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
new file mode 100644
index 00000000000..4bbce1cfa35
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -0,0 +1,36 @@
+obj-y += vsyscall.o vsyscall-syscall.o
+
+$(obj)/vsyscall-syscall.o: \
+ $(foreach F,trapa,$(obj)/vsyscall-$F.so)
+
+# Teach kbuild about targets
+targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so)
+targets += vsyscall-note.o vsyscall.lds
+
+# The DSO images are built using a special linker script
+quiet_cmd_syscall = SYSCALL $@
+ cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \
+ -Wl,-T,$(filter-out FORCE,$^) -o $@
+
+export CPPFLAGS_vsyscall.lds += -P -C -Ush
+
+vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
+ $(call ld-option, -Wl$(comma)--hash-style=sysv)
+
+SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)
+
+$(obj)/vsyscall-trapa.so: \
+$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+ $(call if_changed,syscall)
+
+# We also create a special relocatable object that should mirror the symbol
+# table and layout of the linked DSO. With ld -R we can then refer to
+# these symbols in the kernel code rather than hand-coded addresses.
+extra-y += vsyscall-syms.o
+$(obj)/built-in.o: $(obj)/vsyscall-syms.o
+$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
+
+SYSCFLAGS_vsyscall-syms.o = -r
+$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
+ $(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE
+ $(call if_changed,syscall)
diff --git a/arch/sh/kernel/vsyscall/vsyscall-note.S b/arch/sh/kernel/vsyscall/vsyscall-note.S
new file mode 100644
index 00000000000..d4b5be4f3d5
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-note.S
@@ -0,0 +1,25 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+
+#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \
+ .section name, flags; \
+ .balign 4; \
+ .long 1f - 0f; /* name length */ \
+ .long 3f - 2f; /* data length */ \
+ .long type; /* note type */ \
+0: .asciz vendor; /* vendor name */ \
+1: .balign 4; \
+2:
+
+#define ASM_ELF_NOTE_END \
+3: .balign 4; /* pad out section */ \
+ .previous
+
+ ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0)
+ .long LINUX_VERSION_CODE
+ ASM_ELF_NOTE_END
diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
new file mode 100644
index 00000000000..555a64f124c
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
@@ -0,0 +1,39 @@
+#include <asm/unistd.h>
+
+ .text
+ .balign 32
+ .globl __kernel_sigreturn
+ .type __kernel_sigreturn,@function
+__kernel_sigreturn:
+.LSTART_sigreturn:
+ mov.w 1f, r3
+ trapa #0x10
+ or r0, r0
+ or r0, r0
+ or r0, r0
+ or r0, r0
+ or r0, r0
+
+1: .short __NR_sigreturn
+.LEND_sigreturn:
+ .size __kernel_sigreturn,.-.LSTART_sigreturn
+
+ .balign 32
+ .globl __kernel_rt_sigreturn
+ .type __kernel_rt_sigreturn,@function
+__kernel_rt_sigreturn:
+.LSTART_rt_sigreturn:
+ mov.w 1f, r3
+ trapa #0x10
+ or r0, r0
+ or r0, r0
+ or r0, r0
+ or r0, r0
+ or r0, r0
+
+1: .short __NR_rt_sigreturn
+.LEND_rt_sigreturn:
+ .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
+
+ .section .eh_frame,"a",@progbits
+ .previous
diff --git a/arch/sh/kernel/vsyscall/vsyscall-syscall.S b/arch/sh/kernel/vsyscall/vsyscall-syscall.S
new file mode 100644
index 00000000000..c2ac7f0282b
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-syscall.S
@@ -0,0 +1,10 @@
+#include <linux/init.h>
+
+__INITDATA
+
+ .globl vsyscall_trapa_start, vsyscall_trapa_end
+vsyscall_trapa_start:
+ .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so"
+vsyscall_trapa_end:
+
+__FINIT
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
new file mode 100644
index 00000000000..3b6eb34c43f
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -0,0 +1,42 @@
+ .text
+ .globl __kernel_vsyscall
+ .type __kernel_vsyscall,@function
+__kernel_vsyscall:
+.LSTART_vsyscall:
+ /* XXX: We'll have to do something here once we opt to use the vDSO
+ * page for something other than the signal trampoline.. as well as
+ * fill out .eh_frame -- PFM. */
+.LEND_vsyscall:
+ .size __kernel_vsyscall,.-.LSTART_vsyscall
+ .previous
+
+ .section .eh_frame,"a",@progbits
+.LCIE:
+ .ualong .LCIE_end - .LCIE_start
+.LCIE_start:
+ .ualong 0 /* CIE ID */
+ .byte 0x1 /* Version number */
+ .string "zRS" /* NUL-terminated augmentation string */
+ .uleb128 0x1 /* Code alignment factor */
+ .sleb128 -4 /* Data alignment factor */
+ .byte 0x11 /* Return address register column */
+ /* Augmentation length and data (none) */
+ .byte 0xc /* DW_CFA_def_cfa */
+ .uleb128 0xf /* r15 */
+ .uleb128 0x0 /* offset 0 */
+
+ .align 2
+.LCIE_end:
+
+ .ualong .LFDE_end-.LFDE_start /* Length FDE */
+.LFDE_start:
+ .ualong .LCIE /* CIE pointer */
+ .ualong .LSTART_vsyscall-. /* start address */
+ .ualong .LEND_vsyscall-.LSTART_vsyscall
+ .uleb128 0
+ .align 2
+.LFDE_end:
+ .previous
+
+/* Get the common code for the sigreturn entry points */
+#include "vsyscall-sigreturn.S"
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
new file mode 100644
index 00000000000..075d6cc1a2d
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -0,0 +1,150 @@
+/*
+ * arch/sh/kernel/vsyscall.c
+ *
+ * Copyright (C) 2006 Paul Mundt
+ *
+ * vDSO randomization
+ * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/elf.h>
+
+/*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+ */
+unsigned int __read_mostly vdso_enabled = 1;
+EXPORT_SYMBOL_GPL(vdso_enabled);
+
+static int __init vdso_setup(char *s)
+{
+ vdso_enabled = simple_strtoul(s, NULL, 0);
+ return 1;
+}
+__setup("vdso=", vdso_setup);
+
+/*
+ * These symbols are defined by vsyscall.o to mark the bounds
+ * of the ELF DSO images included therein.
+ */
+extern const char vsyscall_trapa_start, vsyscall_trapa_end;
+static void *syscall_page;
+
+int __init vsyscall_init(void)
+{
+ syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
+
+ /*
+ * XXX: Map this page to a fixmap entry if we get around
+ * to adding the page to ELF core dumps
+ */
+
+ memcpy(syscall_page,
+ &vsyscall_trapa_start,
+ &vsyscall_trapa_end - &vsyscall_trapa_start);
+
+ return 0;
+}
+
+static struct page *syscall_vma_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ unsigned long offset = address - vma->vm_start;
+ struct page *page;
+
+ if (address < vma->vm_start || address > vma->vm_end)
+ return NOPAGE_SIGBUS;
+
+ page = virt_to_page(syscall_page + offset);
+
+ get_page(page);
+
+ return page;
+}
+
+/* Prevent VMA merging */
+static void syscall_vma_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct syscall_vm_ops = {
+ .nopage = syscall_vma_nopage,
+ .close = syscall_vma_close,
+};
+
+/* Setup a VMA at program startup for the vsyscall page */
+int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int executable_stack)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto up_fail;
+ }
+
+ vma->vm_start = addr;
+ vma->vm_end = addr + PAGE_SIZE;
+ /* MAYWRITE to allow gdb to COW and set breakpoints */
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
+ vma->vm_flags |= mm->def_flags;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ vma->vm_ops = &syscall_vm_ops;
+ vma->vm_mm = mm;
+
+ ret = insert_vm_struct(mm, vma);
+ if (unlikely(ret)) {
+ kmem_cache_free(vm_area_cachep, vma);
+ goto up_fail;
+ }
+
+ current->mm->context.vdso = (void *)addr;
+
+ mm->total_vm++;
+up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+ return "[vdso]";
+
+ return NULL;
+}
+
+struct vm_area_struct *get_gate_vma(struct task_struct *task)
+{
+ return NULL;
+}
+
+int in_gate_area(struct task_struct *task, unsigned long address)
+{
+ return 0;
+}
+
+int in_gate_area_no_task(unsigned long address)
+{
+ return 0;
+}
diff --git a/arch/sh/kernel/vsyscall/vsyscall.lds.S b/arch/sh/kernel/vsyscall/vsyscall.lds.S
new file mode 100644
index 00000000000..b13c3d439fe
--- /dev/null
+++ b/arch/sh/kernel/vsyscall/vsyscall.lds.S
@@ -0,0 +1,74 @@
+/*
+ * Linker script for vsyscall DSO. The vsyscall page is an ELF shared
+ * object prelinked to its virtual address, and with only one read-only
+ * segment (that fits in one page). This script controls its layout.
+ */
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux")
+#else
+OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux")
+#endif
+OUTPUT_ARCH(sh)
+
+/* The ELF entry point can be used to set the AT_SYSINFO value. */
+ENTRY(__kernel_vsyscall);
+
+SECTIONS
+{
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ /* This linker script is used both with -r and with -shared.
+ For the layouts to match, we need to skip more than enough
+ space for the dynamic symbol table et al. If this amount
+ is insufficient, ld -shared will barf. Just increase it here. */
+ . = 0x400;
+
+ .text : { *(.text) } :text =0x90909090
+ .note : { *(.note.*) } :text :note
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+ .dynamic : { *(.dynamic) } :text :dynamic
+ .useless : {
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ } :text
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ LINUX_2.6 {
+ global:
+ __kernel_vsyscall;
+ __kernel_sigreturn;
+ __kernel_rt_sigreturn;
+
+ local: *;
+ };
+}