diff options
author | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-05-03 08:14:09 +0100 |
---|---|---|
committer | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-05-03 08:14:09 +0100 |
commit | 27b030d58c8e72fc7a95187a791bd9406e350f02 (patch) | |
tree | ab3bab7f39a5ce5bab65578a7e08fa4dfdeb198c /arch/arm | |
parent | 79d20b14a0d651f15b0ef9a22b6cf12d284a6d38 (diff) | |
parent | 6628465e33ca694bd8fd5c3cf4eb7ff9177bc694 (diff) |
Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/common/rtctime.c | 29 | ||||
-rw-r--r-- | arch/arm/configs/ixdp2800_defconfig | 2 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 213 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/sys_arm.c | 12 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 58 | ||||
-rw-r--r-- | arch/arm/mach-integrator/integrator_cp.c | 17 | ||||
-rw-r--r-- | arch/arm/mach-integrator/time.c | 17 | ||||
-rw-r--r-- | arch/arm/mach-ixp2000/ixdp2800.c | 147 | ||||
-rw-r--r-- | arch/arm/mach-ixp2000/pci.c | 8 | ||||
-rw-r--r-- | arch/arm/mach-pxa/generic.c | 25 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 14 | ||||
-rw-r--r-- | arch/arm/mm/abort-ev6.S | 16 | ||||
-rw-r--r-- | arch/arm/mm/mm-armv.c | 5 |
14 files changed, 505 insertions, 63 deletions
diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c index c397e71f938..72b03f201eb 100644 --- a/arch/arm/common/rtctime.c +++ b/arch/arm/common/rtctime.c @@ -141,10 +141,10 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc next->tm_sec = alrm->tm_sec; } -static inline void rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) +static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) { memset(tm, 0, sizeof(struct rtc_time)); - ops->read_time(tm); + return ops->read_time(tm); } static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm) @@ -163,8 +163,7 @@ static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) int ret = -EINVAL; if (ops->read_alarm) { memset(alrm, 0, sizeof(struct rtc_wkalrm)); - ops->read_alarm(alrm); - ret = 0; + ret = ops->read_alarm(alrm); } return ret; } @@ -283,7 +282,9 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, break; case RTC_RD_TIME: - rtc_read_time(ops, &tm); + ret = rtc_read_time(ops, &tm); + if (ret) + break; ret = copy_to_user(uarg, &tm, sizeof(tm)); if (ret) ret = -EFAULT; @@ -424,15 +425,15 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo struct rtc_time tm; char *p = page; - rtc_read_time(ops, &tm); - - p += sprintf(p, - "rtc_time\t: %02d:%02d:%02d\n" - "rtc_date\t: %04d-%02d-%02d\n" - "rtc_epoch\t: %04lu\n", - tm.tm_hour, tm.tm_min, tm.tm_sec, - tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, - rtc_epoch); + if (rtc_read_time(ops, &tm) == 0) { + p += sprintf(p, + "rtc_time\t: %02d:%02d:%02d\n" + "rtc_date\t: %04d-%02d-%02d\n" + "rtc_epoch\t: %04lu\n", + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + rtc_epoch); + } if (rtc_read_alarm(ops, &alrm) == 0) { p += sprintf(p, "alrm_time\t: "); diff --git a/arch/arm/configs/ixdp2800_defconfig b/arch/arm/configs/ixdp2800_defconfig index d36f9919296..7be3521f91f 100644 --- a/arch/arm/configs/ixdp2800_defconfig +++ b/arch/arm/configs/ixdp2800_defconfig @@ -133,7 +133,7 @@ CONFIG_ALIGNMENT_TRAP=y # CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="console=ttyS0,9600 root=/dev/nfs ip=bootp mem=64M@0x0 pci=firmware" +CONFIG_CMDLINE="console=ttyS0,9600 root=/dev/nfs ip=bootp mem=64M@0x0" # CONFIG_XIP_KERNEL is not set # diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 2a5c3fe09a9..080df907f24 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -269,6 +269,12 @@ __pabt_svc: add r5, sp, #S_PC ldmia r7, {r2 - r4} @ Get USR pc, cpsr +#if __LINUX_ARM_ARCH__ < 6 + @ make sure our user space atomic helper is aborted + cmp r2, #VIRT_OFFSET + bichs r3, r3, #PSR_Z_BIT +#endif + @ @ We are now ready to fill in the remaining blanks on the stack: @ @@ -499,8 +505,12 @@ ENTRY(__switch_to) mra r4, r5, acc0 stmia ip, {r4, r5} #endif +#ifdef CONFIG_HAS_TLS_REG + mcr p15, 0, r3, c13, c0, 3 @ set TLS register +#else mov r4, #0xffff0fff - str r3, [r4, #-3] @ Set TLS ptr + str r3, [r4, #-15] @ TLS val at 0xffff0ff0 +#endif mcr p15, 0, r6, c3, c0, 0 @ Set domain register #ifdef CONFIG_VFP @ Always disable VFP so we can lazily save/restore the old @@ -519,6 +529,207 @@ ENTRY(__switch_to) ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously __INIT + +/* + * User helpers. + * + * These are segment of kernel provided user code reachable from user space + * at a fixed address in kernel memory. This is used to provide user space + * with some operations which require kernel help because of unimplemented + * native feature and/or instructions in many ARM CPUs. The idea is for + * this code to be executed directly in user mode for best efficiency but + * which is too intimate with the kernel counter part to be left to user + * libraries. In fact this code might even differ from one CPU to another + * depending on the available instruction set and restrictions like on + * SMP systems. In other words, the kernel reserves the right to change + * this code as needed without warning. Only the entry points and their + * results are guaranteed to be stable. + * + * Each segment is 32-byte aligned and will be moved to the top of the high + * vector page. New segments (if ever needed) must be added in front of + * existing ones. This mechanism should be used only for things that are + * really small and justified, and not be abused freely. + * + * User space is expected to implement those things inline when optimizing + * for a processor that has the necessary native support, but only if such + * resulting binaries are already to be incompatible with earlier ARM + * processors due to the use of unsupported instructions other than what + * is provided here. In other words don't make binaries unable to run on + * earlier processors just for the sake of not using these kernel helpers + * if your compiled code is not going to use the new instructions for other + * purpose. + */ + + .align 5 + .globl __kuser_helper_start +__kuser_helper_start: + +/* + * Reference prototype: + * + * int __kernel_cmpxchg(int oldval, int newval, int *ptr) + * + * Input: + * + * r0 = oldval + * r1 = newval + * r2 = ptr + * lr = return address + * + * Output: + * + * r0 = returned value (zero or non-zero) + * C flag = set if r0 == 0, clear if r0 != 0 + * + * Clobbered: + * + * r3, ip, flags + * + * Definition and user space usage example: + * + * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); + * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) + * + * Atomically store newval in *ptr if *ptr is equal to oldval for user space. + * Return zero if *ptr was changed or non-zero if no exchange happened. + * The C flag is also set if *ptr was changed to allow for assembly + * optimization in the calling code. + * + * For example, a user space atomic_add implementation could look like this: + * + * #define atomic_add(ptr, val) \ + * ({ register unsigned int *__ptr asm("r2") = (ptr); \ + * register unsigned int __result asm("r1"); \ + * asm volatile ( \ + * "1: @ atomic_add\n\t" \ + * "ldr r0, [r2]\n\t" \ + * "mov r3, #0xffff0fff\n\t" \ + * "add lr, pc, #4\n\t" \ + * "add r1, r0, %2\n\t" \ + * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ + * "bcc 1b" \ + * : "=&r" (__result) \ + * : "r" (__ptr), "rIL" (val) \ + * : "r0","r3","ip","lr","cc","memory" ); \ + * __result; }) + */ + +__kuser_cmpxchg: @ 0xffff0fc0 + +#if __LINUX_ARM_ARCH__ < 6 + +#ifdef CONFIG_SMP /* sanity check */ +#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" +#endif + + /* + * Theory of operation: + * + * We set the Z flag before loading oldval. If ever an exception + * occurs we can not be sure the loaded value will still be the same + * when the exception returns, therefore the user exception handler + * will clear the Z flag whenever the interrupted user code was + * actually from the kernel address space (see the usr_entry macro). + * + * The post-increment on the str is used to prevent a race with an + * exception happening just after the str instruction which would + * clear the Z flag although the exchange was done. + */ + teq ip, ip @ set Z flag + ldr ip, [r2] @ load current val + add r3, r2, #1 @ prepare store ptr + teqeq ip, r0 @ compare with oldval if still allowed + streq r1, [r3, #-1]! @ store newval if still allowed + subs r0, r2, r3 @ if r2 == r3 the str occured + mov pc, lr + +#else + + ldrex r3, [r2] + subs r3, r3, r0 + strexeq r3, r1, [r2] + rsbs r0, r3, #0 + mov pc, lr + +#endif + + .align 5 + +/* + * Reference prototype: + * + * int __kernel_get_tls(void) + * + * Input: + * + * lr = return address + * + * Output: + * + * r0 = TLS value + * + * Clobbered: + * + * the Z flag might be lost + * + * Definition and user space usage example: + * + * typedef int (__kernel_get_tls_t)(void); + * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) + * + * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. + * + * This could be used as follows: + * + * #define __kernel_get_tls() \ + * ({ register unsigned int __val asm("r0"); \ + * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ + * : "=r" (__val) : : "lr","cc" ); \ + * __val; }) + */ + +__kuser_get_tls: @ 0xffff0fe0 + +#ifndef CONFIG_HAS_TLS_REG + +#ifdef CONFIG_SMP /* sanity check */ +#error "CONFIG_SMP without CONFIG_HAS_TLS_REG is wrong" +#endif + + ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 + mov pc, lr + +#else + + mrc p15, 0, r0, c13, c0, 3 @ read TLS register + mov pc, lr + +#endif + + .rep 5 + .word 0 @ pad up to __kuser_helper_version + .endr + +/* + * Reference declaration: + * + * extern unsigned int __kernel_helper_version; + * + * Definition and user space usage example: + * + * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) + * + * User space may read this to determine the curent number of helpers + * available. + */ + +__kuser_helper_version: @ 0xffff0ffc + .word ((__kuser_helper_end - __kuser_helper_start) >> 5) + + .globl __kuser_helper_end +__kuser_helper_end: + + /* * Vector stubs. * diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index efd7a341614..cd99b83f14c 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -19,6 +19,7 @@ #include <linux/user.h> #include <linux/security.h> #include <linux/init.h> +#include <linux/signal.h> #include <asm/uaccess.h> #include <asm/pgtable.h> @@ -693,7 +694,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat case PTRACE_SYSCALL: case PTRACE_CONT: ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); @@ -728,7 +729,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat */ case PTRACE_SINGLESTEP: ret = -EIO; - if ((unsigned long) data > _NSIG) + if (!valid_signal(data)) break; child->ptrace |= PT_SINGLESTEP; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 7ba6342cf93..ef32577da30 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -227,18 +227,6 @@ asmlinkage int sys_ipc(uint call, int first, int second, int third, } } -asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg, - unsigned long __user *addr) -{ - unsigned long ret; - long err; - - err = do_shmat(shmid, shmaddr, shmflg, &ret); - if (err == 0) - err = put_user(ret, addr); - return err; -} - /* Fork a new task - this creates a new program thread. * This is called indirectly via a small wrapper */ diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0078aeb8573..3a001fe5540 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -450,13 +450,17 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) case NR(set_tls): thread->tp_value = regs->ARM_r0; +#ifdef CONFIG_HAS_TLS_REG + asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); +#else /* - * Our user accessible TLS ptr is located at 0xffff0ffc. - * On SMP read access to this address must raise a fault - * and be emulated from the data abort handler. - * m + * User space must never try to access this directly. + * Expect your app to break eventually if you do so. + * The user helper at 0xffff0fe0 must be used instead. + * (see entry-armv.S for details) */ - *((unsigned long *)0xffff0ffc) = thread->tp_value; + *((unsigned int *)0xffff0ff0) = regs->ARM_r0; +#endif return 0; default: @@ -493,6 +497,41 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) return 0; } +#if defined(CONFIG_CPU_32v6) && !defined(CONFIG_HAS_TLS_REG) + +/* + * We might be running on an ARMv6+ processor which should have the TLS + * register, but for some reason we can't use it and have to emulate it. + */ + +static int get_tp_trap(struct pt_regs *regs, unsigned int instr) +{ + int reg = (instr >> 12) & 15; + if (reg == 15) + return 1; + regs->uregs[reg] = current_thread_info()->tp_value; + regs->ARM_pc += 4; + return 0; +} + +static struct undef_hook arm_mrc_hook = { + .instr_mask = 0x0fff0fff, + .instr_val = 0x0e1d0f70, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = 0, + .fn = get_tp_trap, +}; + +static int __init arm_mrc_hook_init(void) +{ + register_undef_hook(&arm_mrc_hook); + return 0; +} + +late_initcall(arm_mrc_hook_init); + +#endif + void __bad_xchg(volatile void *ptr, int size) { printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", @@ -580,14 +619,17 @@ void __init trap_init(void) { extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; /* - * Copy the vectors and stubs (in entry-armv.S) into the - * vector page, mapped at 0xffff0000, and ensure these are - * visible to the instruction stream. + * Copy the vectors, stubs and kuser helpers (in entry-armv.S) + * into the vector page, mapped at 0xffff0000, and ensure these + * are visible to the instruction stream. */ memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); + memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz); flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); } diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index 68e15c36e33..3b948e8c275 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c @@ -420,7 +420,22 @@ static struct clcd_panel vga = { */ static void cp_clcd_enable(struct clcd_fb *fb) { - cm_control(CM_CTRL_LCDMUXSEL_MASK, CM_CTRL_LCDMUXSEL_VGA); + u32 val; + + if (fb->fb.var.bits_per_pixel <= 8) + val = CM_CTRL_LCDMUXSEL_VGA_8421BPP; + else if (fb->fb.var.bits_per_pixel <= 16) + val = CM_CTRL_LCDMUXSEL_VGA_16BPP; + else + val = 0; /* no idea for this, don't trust the docs */ + + cm_control(CM_CTRL_LCDMUXSEL_MASK| + CM_CTRL_LCDEN0| + CM_CTRL_LCDEN1| + CM_CTRL_STATIC1| + CM_CTRL_STATIC2| + CM_CTRL_STATIC| + CM_CTRL_n24BITEN, val); } static unsigned long framesize = SZ_1M; diff --git a/arch/arm/mach-integrator/time.c b/arch/arm/mach-integrator/time.c index 20729de2af2..1a844ca139e 100644 --- a/arch/arm/mach-integrator/time.c +++ b/arch/arm/mach-integrator/time.c @@ -40,25 +40,32 @@ static int integrator_set_rtc(void) return 1; } -static void rtc_read_alarm(struct rtc_wkalrm *alrm) +static int rtc_read_alarm(struct rtc_wkalrm *alrm) { rtc_time_to_tm(readl(rtc_base + RTC_MR), &alrm->time); + return 0; } -static int rtc_set_alarm(struct rtc_wkalrm *alrm) +static inline int rtc_set_alarm(struct rtc_wkalrm *alrm) { unsigned long time; int ret; - ret = rtc_tm_to_time(&alrm->time, &time); + /* + * At the moment, we can only deal with non-wildcarded alarm times. + */ + ret = rtc_valid_tm(&alrm->time); + if (ret == 0) + ret = rtc_tm_to_time(&alrm->time, &time); if (ret == 0) writel(time, rtc_base + RTC_MR); return ret; } -static void rtc_read_time(struct rtc_time *tm) +static int rtc_read_time(struct rtc_time *tm) { rtc_time_to_tm(readl(rtc_base + RTC_DR), tm); + return 0; } /* @@ -69,7 +76,7 @@ static void rtc_read_time(struct rtc_time *tm) * edge of the 1Hz clock, we must write the time one second * in advance. */ -static int rtc_set_time(struct rtc_time *tm) +static inline int rtc_set_time(struct rtc_time *tm) { unsigned long time; int ret; diff --git a/arch/arm/mach-ixp2000/ixdp2800.c b/arch/arm/mach-ixp2000/ixdp2800.c index c4683aaff84..aec13c7108a 100644 --- a/arch/arm/mach-ixp2000/ixdp2800.c +++ b/arch/arm/mach-ixp2000/ixdp2800.c @@ -65,19 +65,102 @@ static struct sys_timer ixdp2800_timer = { /************************************************************************* * IXDP2800 PCI *************************************************************************/ +static void __init ixdp2800_slave_disable_pci_master(void) +{ + *IXP2000_PCI_CMDSTAT &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); +} + +static void __init ixdp2800_master_wait_for_slave(void) +{ + volatile u32 *addr; + + printk(KERN_INFO "IXDP2800: waiting for slave NPU to configure " + "its BAR sizes\n"); + + addr = ixp2000_pci_config_addr(0, IXDP2X00_SLAVE_NPU_DEVFN, + PCI_BASE_ADDRESS_1); + do { + *addr = 0xffffffff; + cpu_relax(); + } while (*addr != 0xfe000008); + + addr = ixp2000_pci_config_addr(0, IXDP2X00_SLAVE_NPU_DEVFN, + PCI_BASE_ADDRESS_2); + do { + *addr = 0xffffffff; + cpu_relax(); + } while (*addr != 0xc0000008); + + /* + * Configure the slave's SDRAM BAR by hand. + */ + *addr = 0x40000008; +} + +static void __init ixdp2800_slave_wait_for_master_enable(void) +{ + printk(KERN_INFO "IXDP2800: waiting for master NPU to enable us\n"); + + while ((*IXP2000_PCI_CMDSTAT & PCI_COMMAND_MASTER) == 0) + cpu_relax(); +} + void __init ixdp2800_pci_preinit(void) { printk("ixdp2x00_pci_preinit called\n"); - *IXP2000_PCI_ADDR_EXT = 0x0000e000; + *IXP2000_PCI_ADDR_EXT = 0x0001e000; + + if (!ixdp2x00_master_npu()) + ixdp2800_slave_disable_pci_master(); - *IXP2000_PCI_DRAM_BASE_ADDR_MASK = (0x40000000 - 1) & ~0xfffff; *IXP2000_PCI_SRAM_BASE_ADDR_MASK = (0x2000000 - 1) & ~0x3ffff; + *IXP2000_PCI_DRAM_BASE_ADDR_MASK = (0x40000000 - 1) & ~0xfffff; ixp2000_pci_preinit(); + + if (ixdp2x00_master_npu()) { + /* + * Wait until the slave set its SRAM/SDRAM BAR sizes + * correctly before we proceed to scan and enumerate + * the bus. + */ + ixdp2800_master_wait_for_slave(); + + /* + * We configure the SDRAM BARs by hand because they + * are 1G and fall outside of the regular allocated + * PCI address space. + */ + *IXP2000_PCI_SDRAM_BAR = 0x00000008; + } else { + /* + * Wait for the master to complete scanning the bus + * and assigning resources before we proceed to scan + * the bus ourselves. Set pci=firmware to honor the + * master's resource assignment. + */ + ixdp2800_slave_wait_for_master_enable(); + pcibios_setup("firmware"); + } } -int ixdp2800_pci_setup(int nr, struct pci_sys_data *sys) +/* + * We assign the SDRAM BARs for the two IXP2800 CPUs by hand, outside + * of the regular PCI window, because there's only 512M of outbound PCI + * memory window on each IXP, while we need 1G for each of the BARs. + */ +static void __devinit ixp2800_pci_fixup(struct pci_dev *dev) +{ + if (machine_is_ixdp2800()) { + dev->resource[2].start = 0; + dev->resource[2].end = 0; + dev->resource[2].flags = 0; + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IXP2800, ixp2800_pci_fixup); + +static int __init ixdp2800_pci_setup(int nr, struct pci_sys_data *sys) { sys->mem_offset = 0x00000000; @@ -129,22 +212,47 @@ static int __init ixdp2800_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin) } else return IRQ_IXP2000_PCIB; /* Slave NIC interrupt */ } -static void ixdp2800_pci_postinit(void) +static void __init ixdp2800_master_enable_slave(void) { - struct pci_dev *dev; + volatile u32 *addr; - if (ixdp2x00_master_npu()) { - dev = pci_find_slot(1, IXDP2800_SLAVE_ENET_DEVFN); - pci_remove_bus_device(dev); - } else { - dev = pci_find_slot(1, IXDP2800_MASTER_ENET_DEVFN); - pci_remove_bus_device(dev); + printk(KERN_INFO "IXDP2800: enabling slave NPU\n"); + + addr = (volatile u32 *)ixp2000_pci_config_addr(0, + IXDP2X00_SLAVE_NPU_DEVFN, + PCI_COMMAND); + + *addr |= PCI_COMMAND_MASTER; +} +static void __init ixdp2800_master_wait_for_slave_bus_scan(void) +{ + volatile u32 *addr; + + printk(KERN_INFO "IXDP2800: waiting for slave to finish bus scan\n"); + + addr = (volatile u32 *)ixp2000_pci_config_addr(0, + IXDP2X00_SLAVE_NPU_DEVFN, + PCI_COMMAND); + while ((*addr & PCI_COMMAND_MEMORY) == 0) + cpu_relax(); +} + +static void __init ixdp2800_slave_signal_bus_scan_completion(void) +{ + printk(KERN_INFO "IXDP2800: bus scan done, signaling master\n"); + *IXP2000_PCI_CMDSTAT |= PCI_COMMAND_MEMORY; +} + +static void __init ixdp2800_pci_postinit(void) +{ + if (!ixdp2x00_master_npu()) { ixdp2x00_slave_pci_postinit(); + ixdp2800_slave_signal_bus_scan_completion(); } } -struct hw_pci ixdp2800_pci __initdata = { +struct __initdata hw_pci ixdp2800_pci __initdata = { .nr_controllers = 1, .setup = ixdp2800_pci_setup, .preinit = ixdp2800_pci_preinit, @@ -155,8 +263,21 @@ struct hw_pci ixdp2800_pci __initdata = { int __init ixdp2800_pci_init(void) { - if (machine_is_ixdp2800()) + if (machine_is_ixdp2800()) { + struct pci_dev *dev; + pci_common_init(&ixdp2800_pci); + if (ixdp2x00_master_npu()) { + dev = pci_find_slot(1, IXDP2800_SLAVE_ENET_DEVFN); + pci_remove_bus_device(dev); + + ixdp2800_master_enable_slave(); + ixdp2800_master_wait_for_slave_bus_scan(); + } else { + dev = pci_find_slot(1, IXDP2800_MASTER_ENET_DEVFN); + pci_remove_bus_device(dev); + } + } return 0; } diff --git a/arch/arm/mach-ixp2000/pci.c b/arch/arm/mach-ixp2000/pci.c index 831f8ffb6b6..5ff2f2718c5 100644 --- a/arch/arm/mach-ixp2000/pci.c +++ b/arch/arm/mach-ixp2000/pci.c @@ -37,7 +37,7 @@ static int pci_master_aborts = 0; static int clear_master_aborts(void); -static u32 * +u32 * ixp2000_pci_config_addr(unsigned int bus_nr, unsigned int devfn, int where) { u32 *paddress; @@ -208,15 +208,15 @@ ixp2000_pci_preinit(void) * use our own resource space. */ static struct resource ixp2000_pci_mem_space = { - .start = 0x00000000, + .start = 0xe0000000, .end = 0xffffffff, .flags = IORESOURCE_MEM, .name = "PCI Mem Space" }; static struct resource ixp2000_pci_io_space = { - .start = 0x00000000, - .end = 0xffffffff, + .start = 0x00010000, + .end = 0x0001ffff, .flags = IORESOURCE_IO, .name = "PCI I/O Space" }; diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index b1575b8dc1c..a45aaa115a7 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c @@ -220,6 +220,30 @@ static struct platform_device stuart_device = { .id = 2, }; +static struct resource i2c_resources[] = { + { + .start = 0x40301680, + .end = 0x403016a3, + .flags = IORESOURCE_MEM, + }, { + .start = IRQ_I2C, + .end = IRQ_I2C, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device i2c_device = { + .name = "pxa2xx-i2c", + .id = 0, + .resource = i2c_resources, + .num_resources = ARRAY_SIZE(i2c_resources), +}; + +void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info) +{ + i2c_device.dev.platform_data = info; +} + static struct platform_device *devices[] __initdata = { &pxamci_device, &udc_device, @@ -227,6 +251,7 @@ static struct platform_device *devices[] __initdata = { &ffuart_device, &btuart_device, &stuart_device, + &i2c_device, }; static int __init pxa_init(void) diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5b670c9ac5e..007766a0644 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -409,3 +409,17 @@ config CPU_BPREDICT_DISABLE depends on CPU_ARM1020 help Say Y here to disable branch prediction. If unsure, say N. + +config HAS_TLS_REG + bool + depends on CPU_32v6 && !CPU_32v5 && !CPU_32v4 && !CPU_32v3 + help + This selects support for the CP15 thread register. + It is defined to be available on ARMv6 or later. However + if the kernel is configured to support multiple CPUs including + a pre-ARMv6 processors, or if a given ARMv6 processor doesn't + implement the thread register for some reason, then access to + this register from user space must be trapped and emulated. + If user space is relying on the __kuser_get_tls code then + there should not be any impact. + diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 38b2cbb89be..8f76f3df7b4 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -1,5 +1,6 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include "abort-macro.S" /* * Function: v6_early_abort * @@ -13,11 +14,26 @@ * : sp = pointer to registers * * Purpose : obtain information about current aborted instruction. + * Note: we read user space. This means we might cause a data + * abort here if the I-TLB and D-TLB aren't seeing the same + * picture. Unfortunately, this does happen. We live with it. */ .align 5 ENTRY(v6_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR +/* + * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR. + * The test below covers all the write situations, including Java bytecodes + */ + bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR + tst r3, #PSR_J_BIT @ Java? + movne pc, lr + do_thumb_abort + ldreq r3, [r2] @ read aborted ARM instruction + do_ldrd_abort + tst r3, #1 << 20 @ L = 0 -> write + orreq r1, r1, #1 << 11 @ yes. mov pc, lr diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index f5a87db8b49..585dfb8e20b 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c @@ -411,9 +411,10 @@ static void __init build_mem_type_table(void) mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; /* - * Mark cache clean areas read only from SVC mode - * and no access from userspace. + * Mark cache clean areas and XIP ROM read only + * from SVC mode and no access from userspace. */ + mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; } |