diff options
Diffstat (limited to 'arch')
612 files changed, 7404 insertions, 13120 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 60219bf9419..ca2da8da6e9 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -6,6 +6,7 @@ config ALPHA select HAVE_OPROFILE select HAVE_SYSCALL_WRAPPERS select HAVE_IRQ_WORK + select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS select HAVE_DMA_ATTRS select HAVE_GENERIC_HARDIRQS diff --git a/arch/alpha/include/asm/8253pit.h b/arch/alpha/include/asm/8253pit.h deleted file mode 100644 index a71c9c1455a..00000000000 --- a/arch/alpha/include/asm/8253pit.h +++ /dev/null @@ -1,3 +0,0 @@ -/* - * 8253/8254 Programmable Interval Timer - */ diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h index 8af56ce346a..445dc42e033 100644 --- a/arch/alpha/include/asm/mmzone.h +++ b/arch/alpha/include/asm/mmzone.h @@ -56,7 +56,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) * Given a kernel address, find the home node of the underlying memory. */ #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) /* * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 376f2213079..326f0a2d56e 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -409,7 +409,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) return -EFAULT; len = namelen; - if (namelen > 32) + if (len > 32) len = 32; down_read(&uts_sem); @@ -594,7 +594,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) down_read(&uts_sem); res = sysinfo_table[offset]; len = strlen(res)+1; - if (len > count) + if ((unsigned long)len > (unsigned long)count) len = count; if (copy_to_user(buf, res, len)) err = -EFAULT; @@ -649,7 +649,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, return 1; case GSI_GET_HWRPB: - if (nbytes < sizeof(*hwrpb)) + if (nbytes > sizeof(*hwrpb)) return -EINVAL; if (copy_to_user(buffer, hwrpb, nbytes) != 0) return -EFAULT; @@ -1008,6 +1008,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, { struct rusage r; long ret, err; + unsigned int status = 0; mm_segment_t old_fs; if (!ur) @@ -1016,13 +1017,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, old_fs = get_fs(); set_fs (KERNEL_DS); - ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); + ret = sys_wait4(pid, (unsigned int __user *) &status, options, + (struct rusage __user *) &r); set_fs (old_fs); if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) return -EFAULT; err = 0; + err |= put_user(status, ustatus); err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 90561c45e7d..8e47709160f 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr, data.period = event->hw.last_period; if (alpha_perf_event_set_period(event, hwc, idx)) { - if (perf_event_overflow(event, 1, &data, regs)) { + if (perf_event_overflow(event, &data, regs)) { /* Interrupts coming too quickly; "throttle" the * counter, i.e., disable it for a little while. */ diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c index 8de1046fe91..f33648e4e8c 100644 --- a/arch/alpha/kernel/sys_ruffian.c +++ b/arch/alpha/kernel/sys_ruffian.c @@ -26,7 +26,6 @@ #include <asm/pgtable.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> -#include <asm/8253pit.h> #include "proto.h" #include "irq_impl.h" diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 818e74ed45d..e336694ca04 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -46,7 +46,6 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/hwrpb.h> -#include <asm/8253pit.h> #include <asm/rtc.h> #include <linux/mc146818rtc.h> @@ -91,7 +90,7 @@ DEFINE_PER_CPU(u8, irq_work_pending); #define test_irq_work_pending() __get_cpu_var(irq_work_pending) #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 -void set_irq_work_pending(void) +void arch_irq_work_raise(void) { set_irq_work_pending_flag(); } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9adc278a22a..e04fa9d7637 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -642,6 +642,7 @@ config ARCH_SHMOBILE select NO_IOPORT select SPARSE_IRQ select MULTI_IRQ_HANDLER + select PM_GENERIC_DOMAINS if PM help Support for Renesas's SH-Mobile and R-Mobile ARM platforms. diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index f9da41921c5..940b2017810 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -597,6 +597,8 @@ __common_mmu_cache_on: sub pc, lr, r0, lsr #32 @ properly flush pipeline #endif +#define PROC_ENTRY_SIZE (4*5) + /* * Here follow the relocatable cache support functions for the * various processors. This is a generic hook for locating an @@ -624,7 +626,7 @@ call_cache_fn: adr r12, proc_types ARM( addeq pc, r12, r3 ) @ call cache function THUMB( addeq r12, r3 ) THUMB( moveq pc, r12 ) @ call cache function - add r12, r12, #4*5 + add r12, r12, #PROC_ENTRY_SIZE b 1b /* @@ -691,9 +693,9 @@ proc_types: .word 0x41069260 @ ARM926EJ-S (v5TEJ) .word 0xff0ffff0 - b __arm926ejs_mmu_cache_on - b __armv4_mmu_cache_off - b __armv5tej_mmu_cache_flush + W(b) __arm926ejs_mmu_cache_on + W(b) __armv4_mmu_cache_off + W(b) __armv5tej_mmu_cache_flush .word 0x00007000 @ ARM7 IDs .word 0x0000f000 @@ -794,6 +796,16 @@ proc_types: .size proc_types, . - proc_types + /* + * If you get a "non-constant expression in ".if" statement" + * error from the assembler on this line, check that you have + * not accidentally written a "b" instruction where you should + * have written W(b). + */ + .if (. - proc_types) % PROC_ENTRY_SIZE != 0 + .error "The size of one or more proc_types entries is wrong." + .endif + /* * Turn off the Cache and MMU. ARMv3 does not support * reading the control register, but ARMv4 does. diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index e5681636626..841df7d21c2 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -255,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, if (buf == 0) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); - return 0; + return ~0; } dev_dbg(dev, diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig index 889922ad229..67b5abb6f85 100644 --- a/arch/arm/configs/davinci_all_defconfig +++ b/arch/arm/configs/davinci_all_defconfig @@ -157,7 +157,7 @@ CONFIG_LEDS_GPIO=m CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_XFS_FS=m diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig index 47ad3b1a4fe..5a584520db2 100644 --- a/arch/arm/configs/mmp2_defconfig +++ b/arch/arm/configs/mmp2_defconfig @@ -8,6 +8,7 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_ARCH_MMP=y +CONFIG_MACH_BROWNSTONE=y CONFIG_MACH_FLINT=y CONFIG_MACH_MARVELL_JASPER=y CONFIG_HIGH_RES_TIMERS=y @@ -63,10 +64,16 @@ CONFIG_BACKLIGHT_MAX8925=y # CONFIG_USB_SUPPORT is not set CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_MAX8925=y +CONFIG_MMC=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y +CONFIG_MSDOS_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y @@ -81,7 +88,7 @@ CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_PREEMPT is not set CONFIG_DEBUG_INFO=y # CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_DYNAMIC_DEBUG=y +# CONFIG_DYNAMIC_DEBUG is not set CONFIG_DEBUG_USER=y CONFIG_DEBUG_ERRORS=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/arm/configs/netx_defconfig b/arch/arm/configs/netx_defconfig index 316af5479d9..9c0ad799398 100644 --- a/arch/arm/configs/netx_defconfig +++ b/arch/arm/configs/netx_defconfig @@ -60,7 +60,7 @@ CONFIG_FB_ARMCLCD=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_INOTIFY=y CONFIG_TMPFS=y CONFIG_JFFS2_FS=y diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig index 8b0c717378f..1d01ddd3312 100644 --- a/arch/arm/configs/viper_defconfig +++ b/arch/arm/configs/viper_defconfig @@ -142,7 +142,7 @@ CONFIG_USB_GADGETFS=m CONFIG_USB_FILE_STORAGE=m CONFIG_USB_G_SERIAL=m CONFIG_USB_G_PRINTER=m -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=m CONFIG_RTC_DRV_SA1100=m CONFIG_EXT2_FS=m diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig index 5b550414364..721832ffe2d 100644 --- a/arch/arm/configs/xcep_defconfig +++ b/arch/arm/configs/xcep_defconfig @@ -73,7 +73,7 @@ CONFIG_SENSORS_MAX6650=m # CONFIG_VGA_CONSOLE is not set # CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_SA1100=m CONFIG_DMADEVICES=y # CONFIG_DNOTIFY is not set diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig index 960f65514d8..59577ad3f4e 100644 --- a/arch/arm/configs/zeus_defconfig +++ b/arch/arm/configs/zeus_defconfig @@ -158,7 +158,7 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=m CONFIG_LEDS_TRIGGER_BACKLIGHT=m CONFIG_LEDS_TRIGGER_GPIO=m CONFIG_LEDS_TRIGGER_DEFAULT_ON=m -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_ISL1208=m CONFIG_RTC_DRV_PXA=m CONFIG_EXT2_FS=y diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index bc2d2d75f70..65c3f2474f5 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -13,6 +13,9 @@ * Do not include any C declarations in this file - it is included by * assembler source. */ +#ifndef __ASM_ASSEMBLER_H__ +#define __ASM_ASSEMBLER_H__ + #ifndef __ASSEMBLY__ #error "Only include this from assembly code" #endif @@ -290,3 +293,4 @@ .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort .endm +#endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index ec0bbf79c71..2da8547de6d 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -1,3 +1,5 @@ +#include <asm/assembler.h> + /* * Interrupt handling. Preserves r7, r8, r9 */ diff --git a/arch/arm/include/asm/i8253.h b/arch/arm/include/asm/i8253.h deleted file mode 100644 index 70656b69d5c..00000000000 --- a/arch/arm/include/asm/i8253.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ASMARM_I8253_H -#define __ASMARM_I8253_H - -/* i8253A PIT registers */ -#define PIT_MODE 0x43 -#define PIT_CH0 0x40 - -#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) - -extern raw_spinlock_t i8253_lock; - -#define outb_pit outb_p -#define inb_pit inb_p - -#endif diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index a701e4226a6..0cdd7b456cb 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -76,6 +76,9 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) unsigned long dt_root; const char *model; + if (!dt_phys) + return NULL; + devtree = phys_to_virt(dt_phys); /* check device tree validity */ diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index e8d88567680..90c62cd51ca 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -435,6 +435,10 @@ __irq_usr: usr_entry kuser_cmpxchg_check +#ifdef CONFIG_IRQSOFF_TRACER + bl trace_hardirqs_off +#endif + get_thread_info tsk #ifdef CONFIG_PREEMPT ldr r8, [tsk, #TI_PREEMPT] @ get preempt count @@ -453,7 +457,7 @@ __irq_usr: #endif mov why, #0 - b ret_to_user + b ret_to_user_from_irq UNWIND(.fnend ) ENDPROC(__irq_usr) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 1e7b04a40a3..b2a27b6b004 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -64,6 +64,7 @@ work_resched: ENTRY(ret_to_user) ret_slow_syscall: disable_irq @ disable interrupts +ENTRY(ret_to_user_from_irq) ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK bne work_pending @@ -75,6 +76,7 @@ no_work_pending: arch_ret_to_user r1, lr restore_user_regs fast = 0, offset = 0 +ENDPROC(ret_to_user_from_irq) ENDPROC(ret_to_user) /* diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index fee7c36349e..016d6a0830a 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, offset -= 0x02000000; offset += sym->st_value - loc; - /* only Thumb addresses allowed (no interworking) */ - if (!(offset & 1) || + /* + * For function symbols, only Thumb addresses are + * allowed (no interworking). + * + * For non-function symbols, the destination + * has no specific ARM/Thumb disposition, so + * the branch is resolved under the assumption + * that interworking is not required. + */ + if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC && + !(offset & 1)) || offset <= (s32)0xff000000 || offset >= (s32)0x01000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d53c0abc4dd..2b5b1421596 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -583,7 +583,7 @@ static int armpmu_event_init(struct perf_event *event) static void armpmu_enable(struct pmu *pmu) { /* Enable all of the perf events on hardware. */ - int idx; + int idx, enabled = 0; struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (!armpmu) @@ -596,9 +596,11 @@ static void armpmu_enable(struct pmu *pmu) continue; armpmu->enable(&event->hw, idx); + enabled = 1; } - armpmu->start(); + if (enabled) + armpmu->start(); } static void armpmu_disable(struct pmu *pmu) diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index f1e8dd94afe..dd7f3b9f4cb 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -173,6 +173,20 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; enum armv6mpcore_perf_types { @@ -310,6 +324,20 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; static inline unsigned long @@ -479,7 +507,7 @@ armv6pmu_handle_irq(int irq_num, if (!armpmu_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 0, &data, regs)) + if (perf_event_overflow(event, &data, regs)) armpmu->disable(hwc, idx); } diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4960686afb5..e20ca9cafef 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -255,6 +255,20 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; /* @@ -371,6 +385,20 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; /* @@ -787,7 +815,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) if (!armpmu_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 0, &data, regs)) + if (perf_event_overflow(event, &data, regs)) armpmu->disable(hwc, idx); } diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 39affbe4fdb..3c4397491d0 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -144,6 +144,20 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, }; #define XSCALE_PMU_ENABLE 0x001 @@ -251,7 +265,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) if (!armpmu_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 0, &data, regs)) + if (perf_event_overflow(event, &data, regs)) armpmu->disable(hwc, idx); } @@ -583,7 +597,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) if (!armpmu_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 0, &data, regs)) + if (perf_event_overflow(event, &data, regs)) armpmu->disable(hwc, idx); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 97260060bf2..5c199610719 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx) /* * Handle hitting a HW-breakpoint. */ -static void ptrace_hbptriggered(struct perf_event *bp, int unused, +static void ptrace_hbptriggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { @@ -479,7 +479,8 @@ static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) attr.bp_type = type; attr.disabled = 1; - return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk); + return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, + tsk); } static int ptrace_gethbpregs(struct task_struct *tsk, long num, diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index ed11fb08b05..acbb447ac6b 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -73,6 +73,7 @@ __setup("fpe=", fpe_setup); #endif extern void paging_init(struct machine_desc *desc); +extern void sanity_check_meminfo(void); extern void reboot_setup(char *str); unsigned int processor_id; @@ -900,6 +901,7 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); paging_init(mdesc); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 344e52b16c8..e7f92a4321f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void) smp_store_cpu_info(cpu); /* - * OK, now it's safe to let the boot CPU continue + * OK, now it's safe to let the boot CPU continue. Wait for + * the CPU migration code to notice that the CPU is online + * before we continue. */ set_cpu_online(cpu, true); + while (!cpu_active(cpu)) + cpu_relax(); /* * OK, it's off to the idle thread for us diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 60636f499cb..2c277d40cee 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -115,7 +115,7 @@ static void __cpuinit twd_calibrate_rate(void) twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, - (twd_timer_rate / 1000000) % 100); + (twd_timer_rate / 10000) % 100); } } diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 40ee7e5045e..5f452f8fde0 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr) unsigned int address, destreg, data, type; unsigned int res = 0; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); if (current->pid != previous_pid) { pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index d52eec268b4..6807cb1e76d 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -139,7 +139,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) fs = get_fs(); set_fs(KERNEL_DS); - for (i = -4; i < 1; i++) { + for (i = -4; i < 1 + !!thumb; i++) { unsigned int val, bad; if (thumb) @@ -563,7 +563,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) if (!pmd_present(*pmd)) goto bad_access; pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - if (!pte_present(*pte) || !pte_dirty(*pte)) { + if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) { pte_unmap_unlock(pte, ptl); goto bad_access; } diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c index 17fae4a42ab..f1013d08bb5 100644 --- a/arch/arm/mach-at91/at91cap9.c +++ b/arch/arm/mach-at91/at91cap9.c @@ -223,15 +223,15 @@ static struct clk *periph_clocks[] __initdata = { }; static struct clk_lookup periph_clocks_lookups[] = { - CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk), - CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk), + CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), + CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk), CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), }; static struct clk_lookup usart_clocks_lookups[] = { diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c index cd850ed6f33..dba0d8d8a4b 100644 --- a/arch/arm/mach-at91/at91cap9_devices.c +++ b/arch/arm/mach-at91/at91cap9_devices.c @@ -1220,7 +1220,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91cap9_set_console_clock(portnr); + at91cap9_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index b228ce9e21a..83a1a3fee55 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -199,9 +199,9 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk), CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk), CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk), - CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), + CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk), }; static struct clk_lookup usart_clocks_lookups[] = { diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index a0ba475be04..7227755ffec 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -1135,7 +1135,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91rm9200_set_console_clock(portnr); + at91rm9200_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 1fdeb9058a7..39f81f47b4b 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -1173,7 +1173,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9260_set_console_clock(portnr); + at91sam9260_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 3eb4538fcee..5004bf0a05f 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -1013,7 +1013,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9261_set_console_clock(portnr); + at91sam9261_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index ffe081b77ed..a050f41fc86 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -1395,7 +1395,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9263_set_console_clock(portnr); + at91sam9263_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 2bb6ff9af1c..11e214121b2 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c @@ -217,11 +217,11 @@ static struct clk *periph_clocks[] __initdata = { static struct clk_lookup periph_clocks_lookups[] = { /* One additional fake clock for ohci */ CLKDEV_CON_ID("ohci_clk", &uhphs_clk), - CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk), - CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk), - CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk), - CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk), - CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk), + CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk), + CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), + CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), + CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.0", &mmc0_clk), + CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.1", &mmc1_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk), diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 05674865bc2..600bffb01ed 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -1550,7 +1550,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9g45_set_console_clock(portnr); + at91sam9g45_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c index 1a40f16b66c..29dff18ed13 100644 --- a/arch/arm/mach-at91/at91sam9rl.c +++ b/arch/arm/mach-at91/at91sam9rl.c @@ -191,8 +191,8 @@ static struct clk *periph_clocks[] __initdata = { }; static struct clk_lookup periph_clocks_lookups[] = { - CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk), - CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk), + CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), + CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index c296045f2b6..aacb19dc922 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c @@ -1168,7 +1168,7 @@ void __init at91_set_serial_console(unsigned portnr) { if (portnr < ATMEL_MAX_UART) { atmel_default_console_device = at91_uarts[portnr]; - at91sam9rl_set_console_clock(portnr); + at91sam9rl_set_console_clock(at91_uarts[portnr]->id); } } diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c index 1904fdf8761..cdb65d48325 100644 --- a/arch/arm/mach-at91/board-cap9adk.c +++ b/arch/arm/mach-at91/board-cap9adk.c @@ -215,7 +215,7 @@ static void __init cap9adk_add_device_nand(void) csa = at91_sys_read(AT91_MATRIX_EBICSA); at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V); - cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit(); + cap9adk_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (cap9adk_nand_data.bus_width_16) cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c index d600dc12322..5c240743c5b 100644 --- a/arch/arm/mach-at91/board-sam9260ek.c +++ b/arch/arm/mach-at91/board-sam9260ek.c @@ -214,7 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index f897f84d43d..b60c22b6e24 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c @@ -220,7 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c index 605b26f40a4..9bbdc92ea19 100644 --- a/arch/arm/mach-at91/board-sam9263ek.c +++ b/arch/arm/mach-at91/board-sam9263ek.c @@ -221,7 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c index 7624cf0d006..1325a50101a 100644 --- a/arch/arm/mach-at91/board-sam9g20ek.c +++ b/arch/arm/mach-at91/board-sam9g20ek.c @@ -198,7 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c index 063c95d0e8f..33eaa135f24 100644 --- a/arch/arm/mach-at91/board-sam9m10g45ek.c +++ b/arch/arm/mach-at91/board-sam9m10g45ek.c @@ -178,7 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { static void __init ek_add_device_nand(void) { - ek_nand_data.bus_width_16 = !board_have_nand_8bit(); + ek_nand_data.bus_width_16 = board_have_nand_16bit(); /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/arch/arm/mach-at91/include/mach/at91_mci.h deleted file mode 100644 index 02182c16a02..00000000000 --- a/arch/arm/mach-at91/include/mach/at91_mci.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * arch/arm/mach-at91/include/mach/at91_mci.h - * - * Copyright (C) 2005 Ivan Kokshaysky - * Copyright (C) SAN People - * - * MultiMedia Card Interface (MCI) registers. - * Based on AT91RM9200 datasheet revision F. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef AT91_MCI_H -#define AT91_MCI_H - -#define AT91_MCI_CR 0x00 /* Control Register */ -#define AT91_MCI_MCIEN (1 << 0) /* Multi-Media Interface Enable */ -#define AT91_MCI_MCIDIS (1 << 1) /* Multi-Media Interface Disable */ -#define AT91_MCI_PWSEN (1 << 2) /* Power Save Mode Enable */ -#define AT91_MCI_PWSDIS (1 << 3) /* Power Save Mode Disable */ -#define AT91_MCI_SWRST (1 << 7) /* Software Reset */ - -#define AT91_MCI_MR 0x04 /* Mode Register */ -#define AT91_MCI_CLKDIV (0xff << 0) /* Clock Divider */ -#define AT91_MCI_PWSDIV (7 << 8) /* Power Saving Divider */ -#define AT91_MCI_RDPROOF (1 << 11) /* Read Proof Enable [SAM926[03] only] */ -#define AT91_MCI_WRPROOF (1 << 12) /* Write Proof Enable [SAM926[03] only] */ -#define AT91_MCI_PDCFBYTE (1 << 13) /* PDC Force Byte Transfer [SAM926[03] only] */ -#define AT91_MCI_PDCPADV (1 << 14) /* PDC Padding Value */ -#define AT91_MCI_PDCMODE (1 << 15) /* PDC-orientated Mode */ -#define AT91_MCI_BLKLEN (0xfff << 18) /* Data Block Length */ - -#define AT91_MCI_DTOR 0x08 /* Data Timeout Register */ -#define AT91_MCI_DTOCYC (0xf << 0) /* Data Timeout Cycle Number */ -#define AT91_MCI_DTOMUL (7 << 4) /* Data Timeout Multiplier */ -#define AT91_MCI_DTOMUL_1 (0 << 4) -#define AT91_MCI_DTOMUL_16 (1 << 4) -#define AT91_MCI_DTOMUL_128 (2 << 4) -#define AT91_MCI_DTOMUL_256 (3 << 4) -#define AT91_MCI_DTOMUL_1K (4 << 4) -#define AT91_MCI_DTOMUL_4K (5 << 4) -#define AT91_MCI_DTOMUL_64K (6 << 4) -#define AT91_MCI_DTOMUL_1M (7 << 4) - -#define AT91_MCI_SDCR 0x0c /* SD Card Register */ -#define AT91_MCI_SDCSEL (3 << 0) /* SD Card Selector */ -#define AT91_MCI_SDCBUS (1 << 7) /* 1-bit or 4-bit bus */ - -#define AT91_MCI_ARGR 0x10 /* Argument Register */ - -#define AT91_MCI_CMDR 0x14 /* Command Register */ -#define AT91_MCI_CMDNB (0x3f << 0) /* Command Number */ -#define AT91_MCI_RSPTYP (3 << 6) /* Response Type */ -#define AT91_MCI_RSPTYP_NONE (0 << 6) -#define AT91_MCI_RSPTYP_48 (1 << 6) -#define AT91_MCI_RSPTYP_136 (2 << 6) -#define AT91_MCI_SPCMD (7 << 8) /* Special Command */ -#define AT91_MCI_SPCMD_NONE (0 << 8) -#define AT91_MCI_SPCMD_INIT (1 << 8) -#define AT91_MCI_SPCMD_SYNC (2 << 8) -#define AT91_MCI_SPCMD_ICMD (4 << 8) -#define AT91_MCI_SPCMD_IRESP (5 << 8) -#define AT91_MCI_OPDCMD (1 << 11) /* Open Drain Command */ -#define AT91_MCI_MAXLAT (1 << 12) /* Max Latency for Command to Response */ -#define AT91_MCI_TRCMD (3 << 16) /* Transfer Command */ -#define AT91_MCI_TRCMD_NONE (0 << 16) -#define AT91_MCI_TRCMD_START (1 << 16) -#define AT91_MCI_TRCMD_STOP (2 << 16) -#define AT91_MCI_TRDIR (1 << 18) /* Transfer Direction */ -#define AT91_MCI_TRTYP (3 << 19) /* Transfer Type */ -#define AT91_MCI_TRTYP_BLOCK (0 << 19) -#define AT91_MCI_TRTYP_MULTIPLE (1 << 19) -#define AT91_MCI_TRTYP_STREAM (2 << 19) -#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19) -#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19) - -#define AT91_MCI_BLKR 0x18 /* Block Register */ -#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ -#define AT91_MCI_BLKR_BLKLEN(n) ((0xffff & (n)) << 16) /* Block length */ - -#define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */ -#define AT91_MCR_RDR 0x30 /* Receive Data Register */ -#define AT91_MCR_TDR 0x34 /* Transmit Data Register */ - -#define AT91_MCI_SR 0x40 /* Status Register */ -#define AT91_MCI_CMDRDY (1 << 0) /* Command Ready */ -#define AT91_MCI_RXRDY (1 << 1) /* Receiver Ready */ -#define AT91_MCI_TXRDY (1 << 2) /* Transmit Ready */ -#define AT91_MCI_BLKE (1 << 3) /* Data Block Ended */ -#define AT91_MCI_DTIP (1 << 4) /* Data Transfer in Progress */ -#define AT91_MCI_NOTBUSY (1 << 5) /* Data Not Busy */ -#define AT91_MCI_ENDRX (1 << 6) /* End of RX Buffer */ -#define AT91_MCI_ENDTX (1 << 7) /* End fo TX Buffer */ -#define AT91_MCI_SDIOIRQA (1 << 8) /* SDIO Interrupt for Slot A */ -#define AT91_MCI_SDIOIRQB (1 << 9) /* SDIO Interrupt for Slot B */ -#define AT91_MCI_RXBUFF (1 << 14) /* RX Buffer Full */ -#define AT91_MCI_TXBUFE (1 << 15) /* TX Buffer Empty */ -#define AT91_MCI_RINDE (1 << 16) /* Response Index Error */ -#define AT91_MCI_RDIRE (1 << 17) /* Response Direction Error */ -#define AT91_MCI_RCRCE (1 << 18) /* Response CRC Error */ -#define AT91_MCI_RENDE (1 << 19) /* Response End Bit Error */ -#define AT91_MCI_RTOE (1 << 20) /* Response Time-out Error */ -#define AT91_MCI_DCRCE (1 << 21) /* Data CRC Error */ -#define AT91_MCI_DTOE (1 << 22) /* Data Time-out Error */ -#define AT91_MCI_OVRE (1 << 30) /* Overrun */ -#define AT91_MCI_UNRE (1 << 31) /* Underrun */ - -#define AT91_MCI_IER 0x44 /* Interrupt Enable Register */ -#define AT91_MCI_IDR 0x48 /* Interrupt Disable Register */ -#define AT91_MCI_IMR 0x4c /* Interrupt Mask Register */ - -#endif diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h index b855ee75f72..8f4866045b4 100644 --- a/arch/arm/mach-at91/include/mach/system_rev.h +++ b/arch/arm/mach-at91/include/mach/system_rev.h @@ -13,13 +13,13 @@ * the 16-31 bit are reserved for at91 generic information * * bit 31: - * 0 => nand 16 bit - * 1 => nand 8 bit + * 0 => nand 8 bit + * 1 => nand 16 bit */ -#define BOARD_HAVE_NAND_8BIT (1 << 31) -static int inline board_have_nand_8bit(void) +#define BOARD_HAVE_NAND_16BIT (1 << 31) +static inline int board_have_nand_16bit(void) { - return system_rev & BOARD_HAVE_NAND_8BIT; + return system_rev & BOARD_HAVE_NAND_16BIT; } #endif /* __ARCH_SYSTEM_REV_H__ */ diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index c67f684ee3e..09a87e61ffc 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -520,7 +520,7 @@ fail: */ if (have_imager()) { label = "HD imager"; - mux |= 1; + mux |= 2; /* externally mux MMC1/ENET/AIC33 to imager */ mux |= BIT(6) | BIT(5) | BIT(3); @@ -540,7 +540,7 @@ fail: resets &= ~BIT(1); if (have_tvp7002()) { - mux |= 2; + mux |= 1; resets &= ~BIT(2); label = "tvp7002 HD"; } else { diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index 606a6f27ed6..5f5d7830887 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -20,6 +20,7 @@ #include <linux/spi/spi.h> #include <linux/spi/flash.h> +#include <asm/io.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/common.h> diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 4e66881c7ae..fc4e98ea754 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -494,7 +494,7 @@ static struct platform_device da850_mcasp_device = { .resource = da850_mcasp_resources, }; -struct platform_device davinci_pcm_device = { +static struct platform_device davinci_pcm_device = { .name = "davinci-pcm-audio", .id = -1, }; diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c index 8f4f736aa26..806a2f02b98 100644 --- a/arch/arm/mach-davinci/devices.c +++ b/arch/arm/mach-davinci/devices.c @@ -298,7 +298,7 @@ static void davinci_init_wdt(void) /*-------------------------------------------------------------------------*/ -struct platform_device davinci_pcm_device = { +static struct platform_device davinci_pcm_device = { .name = "davinci-pcm-audio", .id = -1, }; diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 1e0f809644b..e00d61e2efb 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -8,6 +8,7 @@ * is licensed "as is" without any warranty of any kind, whether express * or implied. */ +#include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/serial_8250.h> diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c index a0b838894ac..cafbe13a82a 100644 --- a/arch/arm/mach-davinci/gpio.c +++ b/arch/arm/mach-davinci/gpio.c @@ -252,8 +252,12 @@ static struct irq_chip gpio_irqchip = { static void gpio_irq_handler(unsigned irq, struct irq_desc *desc) { - struct davinci_gpio_regs __iomem *g = irq2regs(irq); + struct davinci_gpio_regs __iomem *g; u32 mask = 0xffff; + struct davinci_gpio_controller *d; + + d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc); + g = (struct davinci_gpio_regs __iomem *)d->regs; /* we only care about one bank */ if (irq & 1) @@ -272,11 +276,14 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) if (!status) break; __raw_writel(status, &g->intstat); - if (irq & 1) - status >>= 16; /* now demux them to the right lowlevel handler */ - n = (int)irq_get_handler_data(irq); + n = d->irq_base; + if (irq & 1) { + n += 16; + status >>= 16; + } + while (status) { res = ffs(status); n += res; @@ -422,8 +429,13 @@ static int __init davinci_gpio_irq_setup(void) /* set up all irqs in this bank */ irq_set_chained_handler(bank_irq, gpio_irq_handler); - irq_set_chip_data(bank_irq, (__force void *)g); - irq_set_handler_data(bank_irq, (void *)irq); + + /* + * Each chip handles 32 gpios, and each irq bank consists of 16 + * gpio irqs. Pass the irq bank's corresponding controller to + * the chained irq handler. + */ + irq_set_handler_data(bank_irq, &chips[gpio / 32]); for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { irq_set_chip(irq, &gpio_irqchip); diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c index bfe68ec4e1a..952dc126c39 100644 --- a/arch/arm/mach-davinci/irq.c +++ b/arch/arm/mach-davinci/irq.c @@ -52,8 +52,14 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) struct irq_chip_type *ct; gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); + if (!gc) { + pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", + __func__, irq_start); + return; + } + ct = gc->chip_types; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c index 1bd73a04be2..04c49f7543e 100644 --- a/arch/arm/mach-davinci/pm.c +++ b/arch/arm/mach-davinci/pm.c @@ -17,6 +17,7 @@ #include <asm/cacheflush.h> #include <asm/delay.h> +#include <asm/io.h> #include <mach/da8xx.h> #include <mach/sram.h> diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile index 33ee2c863d1..3cedcf2d39e 100644 --- a/arch/arm/mach-ep93xx/Makefile +++ b/arch/arm/mach-ep93xx/Makefile @@ -1,11 +1,13 @@ # # Makefile for the linux kernel. # -obj-y := core.o clock.o dma-m2p.o gpio.o +obj-y := core.o clock.o obj-m := obj-n := obj- := +obj-$(CONFIG_EP93XX_DMA) += dma.o + obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 82079545adc..c60f081e930 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c @@ -174,14 +174,10 @@ struct sys_timer ep93xx_timer = { /************************************************************************* * EP93xx IRQ handling *************************************************************************/ -extern void ep93xx_gpio_init_irq(void); - void __init ep93xx_init_irq(void) { vic_init(EP93XX_VIC1_BASE, 0, EP93XX_VIC1_VALID_IRQ_MASK, 0); vic_init(EP93XX_VIC2_BASE, 32, EP93XX_VIC2_VALID_IRQ_MASK, 0); - - ep93xx_gpio_init_irq(); } @@ -241,6 +237,24 @@ unsigned int ep93xx_chip_revision(void) } /************************************************************************* + * EP93xx GPIO + *************************************************************************/ +static struct resource ep93xx_gpio_resource[] = { + { + .start = EP93XX_GPIO_PHYS_BASE, + .end = EP93XX_GPIO_PHYS_BASE + 0xcc - 1, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device ep93xx_gpio_device = { + .name = "gpio-ep93xx", + .id = -1, + .num_resources = ARRAY_SIZE(ep93xx_gpio_resource), + .resource = ep93xx_gpio_resource, +}; + +/************************************************************************* * EP93xx peripheral handling *************************************************************************/ #define EP93XX_UART_MCR_OFFSET (0x0100) @@ -251,9 +265,9 @@ static void ep93xx_uart_set_mctrl(struct amba_device *dev, unsigned int mcr; mcr = 0; - if (!(mctrl & TIOCM_RTS)) + if (mctrl & TIOCM_RTS) mcr |= 2; - if (!(mctrl & TIOCM_DTR)) + if (mctrl & TIOCM_DTR) mcr |= 1; __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); @@ -402,11 +416,15 @@ static struct resource ep93xx_eth_resource[] = { } }; +static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32); + static struct platform_device ep93xx_eth_device = { .name = "ep93xx-eth", .id = -1, .dev = { - .platform_data = &ep93xx_eth_data, + .platform_data = &ep93xx_eth_data, + .coherent_dma_mask = DMA_BIT_MASK(32), + .dma_mask = &ep93xx_eth_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_eth_resource), .resource = ep93xx_eth_resource, @@ -488,11 +506,15 @@ static struct resource ep93xx_spi_resources[] = { }, }; +static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32); + static struct platform_device ep93xx_spi_device = { .name = "ep93xx-spi", .id = 0, .dev = { - .platform_data = &ep93xx_spi_master_data, + .platform_data = &ep93xx_spi_master_data, + .coherent_dma_mask = DMA_BIT_MASK(32), + .dma_mask = &ep93xx_spi_dma_mask, }, .num_resources = ARRAY_SIZE(ep93xx_spi_resources), .resource = ep93xx_spi_resources, @@ -866,14 +888,13 @@ void __init ep93xx_register_ac97(void) platform_device_register(&ep93xx_pcm_device); } -extern void ep93xx_gpio_init(void); - void __init ep93xx_init_devices(void) { /* Disallow access to MaverickCrunch initially */ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA); - ep93xx_gpio_init(); + /* Get the GPIO working early, other devices need it */ + platform_device_register(&ep93xx_gpio_device); amba_device_register(&uart1_device, &iomem_resource); amba_device_register(&uart2_device, &iomem_resource); diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c deleted file mode 100644 index a696d354b1f..00000000000 --- a/arch/arm/mach-ep93xx/dma-m2p.c +++ /dev/null @@ -1,411 +0,0 @@ -/* - * arch/arm/mach-ep93xx/dma-m2p.c - * M2P DMA handling for Cirrus EP93xx chips. - * - * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> - * Copyright (C) 2006 Applied Data Systems - * - * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - */ - -/* - * On the EP93xx chip the following peripherals my be allocated to the 10 - * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). - * - * I2S contains 3 Tx and 3 Rx DMA Channels - * AAC contains 3 Tx and 3 Rx DMA Channels - * UART1 contains 1 Tx and 1 Rx DMA Channels - * UART2 contains 1 Tx and 1 Rx DMA Channels - * UART3 contains 1 Tx and 1 Rx DMA Channels - * IrDA contains 1 Tx and 1 Rx DMA Channels - * - * SSP and IDE use the Memory to Memory (M2M) channels and are not covered - * with this implementation. - */ - -#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/clk.h> -#include <linux/err.h> -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/io.h> - -#include <mach/dma.h> -#include <mach/hardware.h> - -#define M2P_CONTROL 0x00 -#define M2P_CONTROL_STALL_IRQ_EN (1 << 0) -#define M2P_CONTROL_NFB_IRQ_EN (1 << 1) -#define M2P_CONTROL_ERROR_IRQ_EN (1 << 3) -#define M2P_CONTROL_ENABLE (1 << 4) -#define M2P_INTERRUPT 0x04 -#define M2P_INTERRUPT_STALL (1 << 0) -#define M2P_INTERRUPT_NFB (1 << 1) -#define M2P_INTERRUPT_ERROR (1 << 3) -#define M2P_PPALLOC 0x08 -#define M2P_STATUS 0x0c -#define M2P_REMAIN 0x14 -#define M2P_MAXCNT0 0x20 -#define M2P_BASE0 0x24 -#define M2P_MAXCNT1 0x30 -#define M2P_BASE1 0x34 - -#define STATE_IDLE 0 /* Channel is inactive. */ -#define STATE_STALL 1 /* Channel is active, no buffers pending. */ -#define STATE_ON 2 /* Channel is active, one buffer pending. */ -#define STATE_NEXT 3 /* Channel is active, two buffers pending. */ - -struct m2p_channel { - char *name; - void __iomem *base; - int irq; - - struct clk *clk; - spinlock_t lock; - - void *client; - unsigned next_slot:1; - struct ep93xx_dma_buffer *buffer_xfer; - struct ep93xx_dma_buffer *buffer_next; - struct list_head buffers_pending; -}; - -static struct m2p_channel m2p_rx[] = { - {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1}, - {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3}, - {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5}, - {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7}, - {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9}, - {NULL}, -}; - -static struct m2p_channel m2p_tx[] = { - {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0}, - {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2}, - {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4}, - {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6}, - {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8}, - {NULL}, -}; - -static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf) -{ - if (ch->next_slot == 0) { - writel(buf->size, ch->base + M2P_MAXCNT0); - writel(buf->bus_addr, ch->base + M2P_BASE0); - } else { - writel(buf->size, ch->base + M2P_MAXCNT1); - writel(buf->bus_addr, ch->base + M2P_BASE1); - } - ch->next_slot ^= 1; -} - -static void choose_buffer_xfer(struct m2p_channel *ch) -{ - struct ep93xx_dma_buffer *buf; - - ch->buffer_xfer = NULL; - if (!list_empty(&ch->buffers_pending)) { - buf = list_entry(ch->buffers_pending.next, - struct ep93xx_dma_buffer, list); - list_del(&buf->list); - feed_buf(ch, buf); - ch->buffer_xfer = buf; - } -} - -static void choose_buffer_next(struct m2p_channel *ch) -{ - struct ep93xx_dma_buffer *buf; - - ch->buffer_next = NULL; - if (!list_empty(&ch->buffers_pending)) { - buf = list_entry(ch->buffers_pending.next, - struct ep93xx_dma_buffer, list); - list_del(&buf->list); - feed_buf(ch, buf); - ch->buffer_next = buf; - } -} - -static inline void m2p_set_control(struct m2p_channel *ch, u32 v) -{ - /* - * The control register must be read immediately after being written so - * that the internal state machine is correctly updated. See the ep93xx - * users' guide for details. - */ - writel(v, ch->base + M2P_CONTROL); - readl(ch->base + M2P_CONTROL); -} - -static inline int m2p_channel_state(struct m2p_channel *ch) -{ - return (readl(ch->base + M2P_STATUS) >> 4) & 0x3; -} - -static irqreturn_t m2p_irq(int irq, void *dev_id) -{ - struct m2p_channel *ch = dev_id; - struct ep93xx_dma_m2p_client *cl; - u32 irq_status, v; - int error = 0; - - cl = ch->client; - - spin_lock(&ch->lock); - irq_status = readl(ch->base + M2P_INTERRUPT); - - if (irq_status & M2P_INTERRUPT_ERROR) { - writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT); - error = 1; - } - - if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) { - spin_unlock(&ch->lock); - return IRQ_NONE; - } - - switch (m2p_channel_state(ch)) { - case STATE_IDLE: - pr_crit("dma interrupt without a dma buffer\n"); - BUG(); - break; - - case STATE_STALL: - cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); - if (ch->buffer_next != NULL) { - cl->buffer_finished(cl->cookie, ch->buffer_next, - 0, error); - } - choose_buffer_xfer(ch); - choose_buffer_next(ch); - if (ch->buffer_xfer != NULL) - cl->buffer_started(cl->cookie, ch->buffer_xfer); - break; - - case STATE_ON: - cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error); - ch->buffer_xfer = ch->buffer_next; - choose_buffer_next(ch); - cl->buffer_started(cl->cookie, ch->buffer_xfer); - break; - - case STATE_NEXT: - pr_crit("dma interrupt while next\n"); - BUG(); - break; - } - - v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN | - M2P_CONTROL_NFB_IRQ_EN); - if (ch->buffer_xfer != NULL) - v |= M2P_CONTROL_STALL_IRQ_EN; - if (ch->buffer_next != NULL) - v |= M2P_CONTROL_NFB_IRQ_EN; - m2p_set_control(ch, v); - - spin_unlock(&ch->lock); - return IRQ_HANDLED; -} - -static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch; - int i; - - if (cl->flags & EP93XX_DMA_M2P_RX) - ch = m2p_rx; - else - ch = m2p_tx; - - for (i = 0; ch[i].base; i++) { - struct ep93xx_dma_m2p_client *client; - - client = ch[i].client; - if (client != NULL) { - int port; - - port = cl->flags & EP93XX_DMA_M2P_PORT_MASK; - if (port == (client->flags & - EP93XX_DMA_M2P_PORT_MASK)) { - pr_warning("DMA channel already used by %s\n", - cl->name ? : "unknown client"); - return ERR_PTR(-EBUSY); - } - } - } - - for (i = 0; ch[i].base; i++) { - if (ch[i].client == NULL) - return ch + i; - } - - pr_warning("No free DMA channel for %s\n", - cl->name ? : "unknown client"); - return ERR_PTR(-ENODEV); -} - -static void channel_enable(struct m2p_channel *ch) -{ - struct ep93xx_dma_m2p_client *cl = ch->client; - u32 v; - - clk_enable(ch->clk); - - v = cl->flags & EP93XX_DMA_M2P_PORT_MASK; - writel(v, ch->base + M2P_PPALLOC); - - v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK; - v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN; - m2p_set_control(ch, v); -} - -static void channel_disable(struct m2p_channel *ch) -{ - u32 v; - - v = readl(ch->base + M2P_CONTROL); - v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN); - m2p_set_control(ch, v); - - while (m2p_channel_state(ch) >= STATE_ON) - cpu_relax(); - - m2p_set_control(ch, 0x0); - - while (m2p_channel_state(ch) == STATE_STALL) - cpu_relax(); - - clk_disable(ch->clk); -} - -int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch; - int err; - - ch = find_free_channel(cl); - if (IS_ERR(ch)) - return PTR_ERR(ch); - - err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch); - if (err) - return err; - - ch->client = cl; - ch->next_slot = 0; - ch->buffer_xfer = NULL; - ch->buffer_next = NULL; - INIT_LIST_HEAD(&ch->buffers_pending); - - cl->channel = ch; - - channel_enable(ch); - - return 0; -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register); - -void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch = cl->channel; - - channel_disable(ch); - free_irq(ch->irq, ch); - ch->client = NULL; -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister); - -void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl, - struct ep93xx_dma_buffer *buf) -{ - struct m2p_channel *ch = cl->channel; - unsigned long flags; - u32 v; - - spin_lock_irqsave(&ch->lock, flags); - v = readl(ch->base + M2P_CONTROL); - if (ch->buffer_xfer == NULL) { - ch->buffer_xfer = buf; - feed_buf(ch, buf); - cl->buffer_started(cl->cookie, buf); - - v |= M2P_CONTROL_STALL_IRQ_EN; - m2p_set_control(ch, v); - - } else if (ch->buffer_next == NULL) { - ch->buffer_next = buf; - feed_buf(ch, buf); - - v |= M2P_CONTROL_NFB_IRQ_EN; - m2p_set_control(ch, v); - } else { - list_add_tail(&buf->list, &ch->buffers_pending); - } - spin_unlock_irqrestore(&ch->lock, flags); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit); - -void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl, - struct ep93xx_dma_buffer *buf) -{ - struct m2p_channel *ch = cl->channel; - - list_add_tail(&buf->list, &ch->buffers_pending); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive); - -void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl) -{ - struct m2p_channel *ch = cl->channel; - - channel_disable(ch); - ch->next_slot = 0; - ch->buffer_xfer = NULL; - ch->buffer_next = NULL; - INIT_LIST_HEAD(&ch->buffers_pending); - channel_enable(ch); -} -EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush); - -static int init_channel(struct m2p_channel *ch) -{ - ch->clk = clk_get(NULL, ch->name); - if (IS_ERR(ch->clk)) - return PTR_ERR(ch->clk); - - spin_lock_init(&ch->lock); - ch->client = NULL; - - return 0; -} - -static int __init ep93xx_dma_m2p_init(void) -{ - int i; - int ret; - - for (i = 0; m2p_rx[i].base; i++) { - ret = init_channel(m2p_rx + i); - if (ret) - return ret; - } - - for (i = 0; m2p_tx[i].base; i++) { - ret = init_channel(m2p_tx + i); - if (ret) - return ret; - } - - pr_info("M2P DMA subsystem initialized\n"); - return 0; -} -arch_initcall(ep93xx_dma_m2p_init); diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c new file mode 100644 index 00000000000..5a257088125 --- /dev/null +++ b/arch/arm/mach-ep93xx/dma.c @@ -0,0 +1,108 @@ +/* + * arch/arm/mach-ep93xx/dma.c + * + * Platform support code for the EP93xx dmaengine driver. + * + * Copyright (C) 2011 Mika Westerberg + * + * This work is based on the original dma-m2p implementation with + * following copyrights: + * + * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> + * Copyright (C) 2006 Applied Data Systems + * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +#include <mach/dma.h> +#include <mach/hardware.h> + +#define DMA_CHANNEL(_name, _base, _irq) \ + { .name = (_name), .base = (_base), .irq = (_irq) } + +/* + * DMA M2P channels. + * + * On the EP93xx chip the following peripherals my be allocated to the 10 + * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive). + * + * I2S contains 3 Tx and 3 Rx DMA Channels + * AAC contains 3 Tx and 3 Rx DMA Channels + * UART1 contains 1 Tx and 1 Rx DMA Channels + * UART2 contains 1 Tx and 1 Rx DMA Channels + * UART3 contains 1 Tx and 1 Rx DMA Channels + * IrDA contains 1 Tx and 1 Rx DMA Channels + * + * Registers are mapped statically in ep93xx_map_io(). + */ +static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = { + DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0), + DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1), + DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2), + DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3), + DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4), + DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5), + DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6), + DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7), + DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8), + DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9), +}; + +static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = { + .channels = ep93xx_dma_m2p_channels, + .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels), +}; + +static struct platform_device ep93xx_dma_m2p_device = { + .name = "ep93xx-dma-m2p", + .id = -1, + .dev = { + .platform_data = &ep93xx_dma_m2p_data, + }, +}; + +/* + * DMA M2M channels. + * + * There are 2 M2M channels which support memcpy/memset and in addition simple + * hardware requests from/to SSP and IDE. We do not implement an external + * hardware requests. + * + * Registers are mapped statically in ep93xx_map_io(). + */ +static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = { + DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0), + DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1), +}; + +static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = { + .channels = ep93xx_dma_m2m_channels, + .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels), +}; + +static struct platform_device ep93xx_dma_m2m_device = { + .name = "ep93xx-dma-m2m", + .id = -1, + .dev = { + .platform_data = &ep93xx_dma_m2m_data, + }, +}; + +static int __init ep93xx_dma_init(void) +{ + platform_device_register(&ep93xx_dma_m2p_device); + platform_device_register(&ep93xx_dma_m2m_device); + return 0; +} +arch_initcall(ep93xx_dma_init); diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c deleted file mode 100644 index 415dce37b88..00000000000 --- a/arch/arm/mach-ep93xx/gpio.c +++ /dev/null @@ -1,410 +0,0 @@ -/* - * linux/arch/arm/mach-ep93xx/gpio.c - * - * Generic EP93xx GPIO handling - * - * Copyright (c) 2008 Ryan Mallon <ryan@bluewatersys.com> - * - * Based on code originally from: - * linux/arch/arm/mach-ep93xx/core.c - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/seq_file.h> -#include <linux/io.h> -#include <linux/gpio.h> -#include <linux/irq.h> - -#include <mach/hardware.h> - -/************************************************************************* - * Interrupt handling for EP93xx on-chip GPIOs - *************************************************************************/ -static unsigned char gpio_int_unmasked[3]; -static unsigned char gpio_int_enabled[3]; -static unsigned char gpio_int_type1[3]; -static unsigned char gpio_int_type2[3]; -static unsigned char gpio_int_debounce[3]; - -/* Port ordering is: A B F */ -static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c }; -static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 }; -static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 }; -static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; -static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; - -static void ep93xx_gpio_update_int_params(unsigned port) -{ - BUG_ON(port > 2); - - __raw_writeb(0, EP93XX_GPIO_REG(int_en_register_offset[port])); - - __raw_writeb(gpio_int_type2[port], - EP93XX_GPIO_REG(int_type2_register_offset[port])); - - __raw_writeb(gpio_int_type1[port], - EP93XX_GPIO_REG(int_type1_register_offset[port])); - - __raw_writeb(gpio_int_unmasked[port] & gpio_int_enabled[port], - EP93XX_GPIO_REG(int_en_register_offset[port])); -} - -static inline void ep93xx_gpio_int_mask(unsigned line) -{ - gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7)); -} - -static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable) -{ - int line = irq_to_gpio(irq); - int port = line >> 3; - int port_mask = 1 << (line & 7); - - if (enable) - gpio_int_debounce[port] |= port_mask; - else - gpio_int_debounce[port] &= ~port_mask; - - __raw_writeb(gpio_int_debounce[port], - EP93XX_GPIO_REG(int_debounce_register_offset[port])); -} - -static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - unsigned char status; - int i; - - status = __raw_readb(EP93XX_GPIO_A_INT_STATUS); - for (i = 0; i < 8; i++) { - if (status & (1 << i)) { - int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_A(0)) + i; - generic_handle_irq(gpio_irq); - } - } - - status = __raw_readb(EP93XX_GPIO_B_INT_STATUS); - for (i = 0; i < 8; i++) { - if (status & (1 << i)) { - int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_B(0)) + i; - generic_handle_irq(gpio_irq); - } - } -} - -static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - /* - * map discontiguous hw irq range to continuous sw irq range: - * - * IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7}) - */ - int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */ - int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx; - - generic_handle_irq(gpio_irq); -} - -static void ep93xx_gpio_irq_ack(struct irq_data *d) -{ - int line = irq_to_gpio(d->irq); - int port = line >> 3; - int port_mask = 1 << (line & 7); - - if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { - gpio_int_type2[port] ^= port_mask; /* switch edge direction */ - ep93xx_gpio_update_int_params(port); - } - - __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port])); -} - -static void ep93xx_gpio_irq_mask_ack(struct irq_data *d) -{ - int line = irq_to_gpio(d->irq); - int port = line >> 3; - int port_mask = 1 << (line & 7); - - if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) - gpio_int_type2[port] ^= port_mask; /* switch edge direction */ - - gpio_int_unmasked[port] &= ~port_mask; - ep93xx_gpio_update_int_params(port); - - __raw_writeb(port_mask, EP93XX_GPIO_REG(eoi_register_offset[port])); -} - -static void ep93xx_gpio_irq_mask(struct irq_data *d) -{ - int line = irq_to_gpio(d->irq); - int port = line >> 3; - - gpio_int_unmasked[port] &= ~(1 << (line & 7)); - ep93xx_gpio_update_int_params(port); -} - -static void ep93xx_gpio_irq_unmask(struct irq_data *d) -{ - int line = irq_to_gpio(d->irq); - int port = line >> 3; - - gpio_int_unmasked[port] |= 1 << (line & 7); - ep93xx_gpio_update_int_params(port); -} - -/* - * gpio_int_type1 controls whether the interrupt is level (0) or - * edge (1) triggered, while gpio_int_type2 controls whether it - * triggers on low/falling (0) or high/rising (1). - */ -static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) -{ - const int gpio = irq_to_gpio(d->irq); - const int port = gpio >> 3; - const int port_mask = 1 << (gpio & 7); - irq_flow_handler_t handler; - - gpio_direction_input(gpio); - - switch (type) { - case IRQ_TYPE_EDGE_RISING: - gpio_int_type1[port] |= port_mask; - gpio_int_type2[port] |= port_mask; - handler = handle_edge_irq; - break; - case IRQ_TYPE_EDGE_FALLING: - gpio_int_type1[port] |= port_mask; - gpio_int_type2[port] &= ~port_mask; - handler = handle_edge_irq; - break; - case IRQ_TYPE_LEVEL_HIGH: - gpio_int_type1[port] &= ~port_mask; - gpio_int_type2[port] |= port_mask; - handler = handle_level_irq; - break; - case IRQ_TYPE_LEVEL_LOW: - gpio_int_type1[port] &= ~port_mask; - gpio_int_type2[port] &= ~port_mask; - handler = handle_level_irq; - break; - case IRQ_TYPE_EDGE_BOTH: - gpio_int_type1[port] |= port_mask; - /* set initial polarity based on current input level */ - if (gpio_get_value(gpio)) - gpio_int_type2[port] &= ~port_mask; /* falling */ - else - gpio_int_type2[port] |= port_mask; /* rising */ - handler = handle_edge_irq; - break; - default: - pr_err("failed to set irq type %d for gpio %d\n", type, gpio); - return -EINVAL; - } - - __irq_set_handler_locked(d->irq, handler); - - gpio_int_enabled[port] |= port_mask; - - ep93xx_gpio_update_int_params(port); - - return 0; -} - -static struct irq_chip ep93xx_gpio_irq_chip = { - .name = "GPIO", - .irq_ack = ep93xx_gpio_irq_ack, - .irq_mask_ack = ep93xx_gpio_irq_mask_ack, - .irq_mask = ep93xx_gpio_irq_mask, - .irq_unmask = ep93xx_gpio_irq_unmask, - .irq_set_type = ep93xx_gpio_irq_type, -}; - -void __init ep93xx_gpio_init_irq(void) -{ - int gpio_irq; - - for (gpio_irq = gpio_to_irq(0); - gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) { - irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip, - handle_level_irq); - set_irq_flags(gpio_irq, IRQF_VALID); - } - - irq_set_chained_handler(IRQ_EP93XX_GPIO_AB, - ep93xx_gpio_ab_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO0MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO1MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO2MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO3MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO4MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO5MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO6MUX, - ep93xx_gpio_f_irq_handler); - irq_set_chained_handler(IRQ_EP93XX_GPIO7MUX, - ep93xx_gpio_f_irq_handler); -} - - -/************************************************************************* - * gpiolib interface for EP93xx on-chip GPIOs - *************************************************************************/ -struct ep93xx_gpio_chip { - struct gpio_chip chip; - - void __iomem *data_reg; - void __iomem *data_dir_reg; -}; - -#define to_ep93xx_gpio_chip(c) container_of(c, struct ep93xx_gpio_chip, chip) - -static int ep93xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip); - unsigned long flags; - u8 v; - - local_irq_save(flags); - v = __raw_readb(ep93xx_chip->data_dir_reg); - v &= ~(1 << offset); - __raw_writeb(v, ep93xx_chip->data_dir_reg); - local_irq_restore(flags); - - return 0; -} - -static int ep93xx_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int val) -{ - struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip); - unsigned long flags; - int line; - u8 v; - - local_irq_save(flags); - - /* Set the value */ - v = __raw_readb(ep93xx_chip->data_reg); - if (val) - v |= (1 << offset); - else - v &= ~(1 << offset); - __raw_writeb(v, ep93xx_chip->data_reg); - - /* Drive as an output */ - line = chip->base + offset; - if (line <= EP93XX_GPIO_LINE_MAX_IRQ) { - /* Ports A/B/F */ - ep93xx_gpio_int_mask(line); - ep93xx_gpio_update_int_params(line >> 3); - } - - v = __raw_readb(ep93xx_chip->data_dir_reg); - v |= (1 << offset); - __raw_writeb(v, ep93xx_chip->data_dir_reg); - - local_irq_restore(flags); - - return 0; -} - -static int ep93xx_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip); - - return !!(__raw_readb(ep93xx_chip->data_reg) & (1 << offset)); -} - -static void ep93xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val) -{ - struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip); - unsigned long flags; - u8 v; - - local_irq_save(flags); - v = __raw_readb(ep93xx_chip->data_reg); - if (val) - v |= (1 << offset); - else - v &= ~(1 << offset); - __raw_writeb(v, ep93xx_chip->data_reg); - local_irq_restore(flags); -} - -static int ep93xx_gpio_set_debounce(struct gpio_chip *chip, - unsigned offset, unsigned debounce) -{ - int gpio = chip->base + offset; - int irq = gpio_to_irq(gpio); - - if (irq < 0) - return -EINVAL; - - ep93xx_gpio_int_debounce(irq, debounce ? true : false); - - return 0; -} - -#define EP93XX_GPIO_BANK(name, dr, ddr, base_gpio) \ - { \ - .chip = { \ - .label = name, \ - .direction_input = ep93xx_gpio_direction_input, \ - .direction_output = ep93xx_gpio_direction_output, \ - .get = ep93xx_gpio_get, \ - .set = ep93xx_gpio_set, \ - .base = base_gpio, \ - .ngpio = 8, \ - }, \ - .data_reg = EP93XX_GPIO_REG(dr), \ - .data_dir_reg = EP93XX_GPIO_REG(ddr), \ - } - -static struct ep93xx_gpio_chip ep93xx_gpio_banks[] = { - EP93XX_GPIO_BANK("A", 0x00, 0x10, 0), - EP93XX_GPIO_BANK("B", 0x04, 0x14, 8), - EP93XX_GPIO_BANK("C", 0x08, 0x18, 40), - EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24), - EP93XX_GPIO_BANK("E", 0x20, 0x24, 32), - EP93XX_GPIO_BANK("F", 0x30, 0x34, 16), - EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48), - EP93XX_GPIO_BANK("H", 0x40, 0x44, 56), -}; - -void __init ep93xx_gpio_init(void) -{ - int i; - - /* Set Ports C, D, E, G, and H for GPIO use */ - ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS | - EP93XX_SYSCON_DEVCFG_GONK | - EP93XX_SYSCON_DEVCFG_EONIDE | - EP93XX_SYSCON_DEVCFG_GONIDE | - EP93XX_SYSCON_DEVCFG_HONIDE); - - for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { - struct gpio_chip *chip = &ep93xx_gpio_banks[i].chip; - - /* - * Ports A, B, and F support input debouncing when - * used as interrupts. - */ - if (!strcmp(chip->label, "A") || - !strcmp(chip->label, "B") || - !strcmp(chip->label, "F")) - chip->set_debounce = ep93xx_gpio_set_debounce; - - gpiochip_add(chip); - } -} diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h index 5e31b2b25da..46d4d876e6f 100644 --- a/arch/arm/mach-ep93xx/include/mach/dma.h +++ b/arch/arm/mach-ep93xx/include/mach/dma.h @@ -1,149 +1,93 @@ -/** - * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine - * - * The EP93xx DMA M2P subsystem handles DMA transfers between memory and - * peripherals. DMA M2P channels are available for audio, UARTs and IrDA. - * See chapter 10 of the EP93xx users guide for full details on the DMA M2P - * engine. - * - * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code. - * - */ - #ifndef __ASM_ARCH_DMA_H #define __ASM_ARCH_DMA_H -#include <linux/list.h> #include <linux/types.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> -/** - * struct ep93xx_dma_buffer - Information about a buffer to be transferred - * using the DMA M2P engine +/* + * M2P channels. * - * @list: Entry in DMA buffer list - * @bus_addr: Physical address of the buffer - * @size: Size of the buffer in bytes + * Note that these values are also directly used for setting the PPALLOC + * register. */ -struct ep93xx_dma_buffer { - struct list_head list; - u32 bus_addr; - u16 size; -}; +#define EP93XX_DMA_I2S1 0 +#define EP93XX_DMA_I2S2 1 +#define EP93XX_DMA_AAC1 2 +#define EP93XX_DMA_AAC2 3 +#define EP93XX_DMA_AAC3 4 +#define EP93XX_DMA_I2S3 5 +#define EP93XX_DMA_UART1 6 +#define EP93XX_DMA_UART2 7 +#define EP93XX_DMA_UART3 8 +#define EP93XX_DMA_IRDA 9 +/* M2M channels */ +#define EP93XX_DMA_SSP 10 +#define EP93XX_DMA_IDE 11 /** - * struct ep93xx_dma_m2p_client - Information about a DMA M2P client - * - * @name: Unique name for this client - * @flags: Client flags - * @cookie: User data to pass to callback functions - * @buffer_started: Non NULL function to call when a transfer is started. - * The arguments are the user data cookie and the DMA - * buffer which is starting. - * @buffer_finished: Non NULL function to call when a transfer is completed. - * The arguments are the user data cookie, the DMA buffer - * which has completed, and a boolean flag indicating if - * the transfer had an error. + * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine + * @port: peripheral which is requesting the channel + * @direction: TX/RX channel + * @name: optional name for the channel, this is displayed in /proc/interrupts + * + * This information is passed as private channel parameter in a filter + * function. Note that this is only needed for slave/cyclic channels. For + * memcpy channels %NULL data should be passed. */ -struct ep93xx_dma_m2p_client { - char *name; - u8 flags; - void *cookie; - void (*buffer_started)(void *cookie, - struct ep93xx_dma_buffer *buf); - void (*buffer_finished)(void *cookie, - struct ep93xx_dma_buffer *buf, - int bytes, int error); - - /* private: Internal use only */ - void *channel; +struct ep93xx_dma_data { + int port; + enum dma_data_direction direction; + const char *name; }; -/* DMA M2P ports */ -#define EP93XX_DMA_M2P_PORT_I2S1 0x00 -#define EP93XX_DMA_M2P_PORT_I2S2 0x01 -#define EP93XX_DMA_M2P_PORT_AAC1 0x02 -#define EP93XX_DMA_M2P_PORT_AAC2 0x03 -#define EP93XX_DMA_M2P_PORT_AAC3 0x04 -#define EP93XX_DMA_M2P_PORT_I2S3 0x05 -#define EP93XX_DMA_M2P_PORT_UART1 0x06 -#define EP93XX_DMA_M2P_PORT_UART2 0x07 -#define EP93XX_DMA_M2P_PORT_UART3 0x08 -#define EP93XX_DMA_M2P_PORT_IRDA 0x09 -#define EP93XX_DMA_M2P_PORT_MASK 0x0f - -/* DMA M2P client flags */ -#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */ -#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */ - -/* - * DMA M2P client error handling flags. See the EP93xx users guide - * documentation on the DMA M2P CONTROL register for more details - */ -#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */ -#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */ -#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */ - /** - * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P - * subsystem - * - * @m2p: Client information to register - * returns 0 on success - * - * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA - * client + * struct ep93xx_dma_chan_data - platform specific data for a DMA channel + * @name: name of the channel, used for getting the right clock for the channel + * @base: mapped registers + * @irq: interrupt number used by this channel */ -int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); +struct ep93xx_dma_chan_data { + const char *name; + void __iomem *base; + int irq; +}; /** - * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P - * subsystem - * - * @m2p: Client to unregister + * struct ep93xx_dma_platform_data - platform data for the dmaengine driver + * @channels: array of channels which are passed to the driver + * @num_channels: number of channels in the array * - * Any transfers currently in progress will be completed in hardware, but - * ignored in software. + * This structure is passed to the DMA engine driver via platform data. For + * M2P channels, contract is that even channels are for TX and odd for RX. + * There is no requirement for the M2M channels. */ -void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); +struct ep93xx_dma_platform_data { + struct ep93xx_dma_chan_data *channels; + size_t num_channels; +}; -/** - * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer - * - * @m2p: DMA Client to submit the transfer on - * @buf: DMA Buffer to submit - * - * If the current or next transfer positions are free on the M2P client then - * the transfer is started immediately. If not, the transfer is added to the - * list of pending transfers. This function must not be called from the - * buffer_finished callback for an M2P channel. - * - */ -void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, - struct ep93xx_dma_buffer *buf); +static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan) +{ + return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p"); +} /** - * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list - * for an M2P channel + * ep93xx_dma_chan_direction - returns direction the channel can be used + * @chan: channel * - * @m2p: DMA Client to submit the transfer on - * @buf: DMA Buffer to submit - * - * This function must only be called from the buffer_finished callback for an - * M2P channel. It is commonly used to add the next transfer in a chained list - * of DMA transfers. + * This function can be used in filter functions to find out whether the + * channel supports given DMA direction. Only M2P channels have such + * limitation, for M2M channels the direction is configurable. */ -void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, - struct ep93xx_dma_buffer *buf); +static inline enum dma_data_direction +ep93xx_dma_chan_direction(struct dma_chan *chan) +{ + if (!ep93xx_dma_chan_is_m2p(chan)) + return DMA_NONE; -/** - * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client - * - * @m2p: DMA client to flush transfers on - * - * Any transfers currently in progress will be completed in hardware, but - * ignored in software. - * - */ -void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); + /* even channels are for TX, odd for RX */ + return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; +} #endif /* __ASM_ARCH_DMA_H */ diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h index 9ac4d105509..c4a7b84ef06 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h @@ -98,6 +98,7 @@ #define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000) +#define EP93XX_GPIO_PHYS_BASE EP93XX_APB_PHYS(0x00040000) #define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000) #define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x)) #define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c) diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h index 0a37961b345..9bb63ac13f0 100644 --- a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h +++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h @@ -7,9 +7,11 @@ struct spi_device; * struct ep93xx_spi_info - EP93xx specific SPI descriptor * @num_chipselect: number of chip selects on this board, must be * at least one + * @use_dma: use DMA for the transfers */ struct ep93xx_spi_info { int num_chipselect; + bool use_dma; }; /** diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig index b92c1e55714..1435fc31c4b 100644 --- a/arch/arm/mach-exynos4/Kconfig +++ b/arch/arm/mach-exynos4/Kconfig @@ -91,6 +91,11 @@ config EXYNOS4_SETUP_FIMC help Common setup code for the camera interfaces. +config EXYNOS4_SETUP_USB_PHY + bool + help + Common setup code for USB PHY controller + # machine support menu "EXYNOS4 Machines" @@ -176,6 +181,7 @@ config MACH_NURI select EXYNOS4_SETUP_I2C3 select EXYNOS4_SETUP_I2C5 select EXYNOS4_SETUP_SDHCI + select EXYNOS4_SETUP_USB_PHY select SAMSUNG_DEV_PWM help Machine support for Samsung Mobile NURI Board. diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile index a9bb94fabaa..60fe5ecf359 100644 --- a/arch/arm/mach-exynos4/Makefile +++ b/arch/arm/mach-exynos4/Makefile @@ -56,4 +56,4 @@ obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o -obj-$(CONFIG_USB_SUPPORT) += usb-phy.o +obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY) += setup-usb-phy.o diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c index 08813a6f66b..bfd621460ab 100644 --- a/arch/arm/mach-exynos4/cpu.c +++ b/arch/arm/mach-exynos4/cpu.c @@ -23,6 +23,7 @@ #include <plat/sdhci.h> #include <plat/devs.h> #include <plat/fimc-core.h> +#include <plat/iic-core.h> #include <mach/regs-irq.h> @@ -98,7 +99,7 @@ static struct map_desc exynos4_iodesc[] __initdata = { .length = SZ_4K, .type = MT_DEVICE, }, { - .virtual = (unsigned long)S5P_VA_USB_HSPHY, + .virtual = (unsigned long)S3C_VA_USB_HSPHY, .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY), .length = SZ_4K, .type = MT_DEVICE, @@ -132,6 +133,11 @@ void __init exynos4_map_io(void) s3c_fimc_setname(1, "exynos4-fimc"); s3c_fimc_setname(2, "exynos4-fimc"); s3c_fimc_setname(3, "exynos4-fimc"); + + /* The I2C bus controllers are directly compatible with s3c2440 */ + s3c_i2c0_setname("s3c2440-i2c"); + s3c_i2c1_setname("s3c2440-i2c"); + s3c_i2c2_setname("s3c2440-i2c"); } void __init exynos4_init_clocks(int xtal) diff --git a/arch/arm/mach-exynos4/dev-audio.c b/arch/arm/mach-exynos4/dev-audio.c index 1eed5f9f7bd..983069a5323 100644 --- a/arch/arm/mach-exynos4/dev-audio.c +++ b/arch/arm/mach-exynos4/dev-audio.c @@ -330,7 +330,7 @@ struct platform_device exynos4_device_ac97 = { static int exynos4_spdif_cfg_gpio(struct platform_device *pdev) { - s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(3)); + s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4)); return 0; } diff --git a/arch/arm/mach-exynos4/headsmp.S b/arch/arm/mach-exynos4/headsmp.S index 6c6cfc50c46..3cdeb364754 100644 --- a/arch/arm/mach-exynos4/headsmp.S +++ b/arch/arm/mach-exynos4/headsmp.S @@ -13,7 +13,7 @@ #include <linux/linkage.h> #include <linux/init.h> - __INIT + __CPUINIT /* * exynos4 specific entry point for secondary CPUs. This provides diff --git a/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h b/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h index 703118d5173..c337cf3a71b 100644 --- a/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h +++ b/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h @@ -11,7 +11,7 @@ #ifndef __PLAT_S5P_REGS_USB_PHY_H #define __PLAT_S5P_REGS_USB_PHY_H -#define EXYNOS4_HSOTG_PHYREG(x) ((x) + S5P_VA_USB_HSPHY) +#define EXYNOS4_HSOTG_PHYREG(x) ((x) + S3C_VA_USB_HSPHY) #define EXYNOS4_PHYPWR EXYNOS4_HSOTG_PHYREG(0x00) #define PHY1_HSIC_NORMAL_MASK (0xf << 9) diff --git a/arch/arm/mach-exynos4/init.c b/arch/arm/mach-exynos4/init.c index cf91f50e43a..a8a83e3881a 100644 --- a/arch/arm/mach-exynos4/init.c +++ b/arch/arm/mach-exynos4/init.c @@ -35,6 +35,7 @@ void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no) tcfg->clocks = exynos4_serial_clocks; tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks); } + tcfg->flags |= NO_NEED_CHECK_CLKSRC; } s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no); diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c index 152676471b6..edd814110da 100644 --- a/arch/arm/mach-exynos4/mach-smdkv310.c +++ b/arch/arm/mach-exynos4/mach-smdkv310.c @@ -78,9 +78,7 @@ static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = { }; static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = { - .cd_type = S3C_SDHCI_CD_GPIO, - .ext_cd_gpio = EXYNOS4_GPK0(2), - .ext_cd_gpio_invert = 1, + .cd_type = S3C_SDHCI_CD_INTERNAL, .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, #ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT .max_width = 8, @@ -96,9 +94,7 @@ static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = { }; static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = { - .cd_type = S3C_SDHCI_CD_GPIO, - .ext_cd_gpio = EXYNOS4_GPK2(2), - .ext_cd_gpio_invert = 1, + .cd_type = S3C_SDHCI_CD_INTERNAL, .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, #ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT .max_width = 8, diff --git a/arch/arm/mach-exynos4/usb-phy.c b/arch/arm/mach-exynos4/setup-usb-phy.c index 0883c1b824b..0883c1b824b 100644 --- a/arch/arm/mach-exynos4/usb-phy.c +++ b/arch/arm/mach-exynos4/setup-usb-phy.c diff --git a/arch/arm/mach-exynos4/time.c b/arch/arm/mach-exynos4/time.c index 86b9fa0d363..ebb8f38d540 100644 --- a/arch/arm/mach-exynos4/time.c +++ b/arch/arm/mach-exynos4/time.c @@ -206,6 +206,7 @@ static cycle_t exynos4_pwm4_read(struct clocksource *cs) return (cycle_t) ~__raw_readl(S3C_TIMERREG(0x40)); } +#ifdef CONFIG_PM static void exynos4_pwm4_resume(struct clocksource *cs) { unsigned long pclk; @@ -218,6 +219,7 @@ static void exynos4_pwm4_resume(struct clocksource *cs) exynos4_pwm_init(4, ~0); exynos4_pwm_start(4, 1); } +#endif struct clocksource pwm_clocksource = { .name = "pwm_timer4", diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index 46adca068f2..dc26fff22cf 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -5,6 +5,7 @@ menu "Footbridge Implementations" config ARCH_CATS bool "CATS" select CLKSRC_I8253 + select CLKEVT_I8253 select FOOTBRIDGE_HOST select ISA select ISA_DMA diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index 5f1f9867fc7..121ad1d4fa3 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c @@ -103,6 +103,7 @@ static void __init footbridge_timer_init(void) clockevents_calc_mult_shift(ce, mem_fclk_21285, 5); ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce); ce->min_delta_ns = clockevent_delta2ns(0x000004, ce); + ce->cpumask = cpumask_of(smp_processor_id()); clockevents_register_device(ce); } diff --git a/arch/arm/mach-footbridge/include/mach/debug-macro.S b/arch/arm/mach-footbridge/include/mach/debug-macro.S index 30b971d6581..1be2eeb7a0a 100644 --- a/arch/arm/mach-footbridge/include/mach/debug-macro.S +++ b/arch/arm/mach-footbridge/include/mach/debug-macro.S @@ -26,6 +26,7 @@ #include <asm/hardware/debug-8250.S> #else +#include <mach/hardware.h> /* For EBSA285 debugging */ .equ dc21285_high, ARMCSR_BASE & 0xff000000 .equ dc21285_low, ARMCSR_BASE & 0x00ffffff @@ -36,8 +37,8 @@ .else mov \rp, #0 .endif - orr \rv, \rp, #0x42000000 - orr \rp, \rp, #dc21285_high + orr \rv, \rp, #dc21285_high + orr \rp, \rp, #0x42000000 .endm .macro senduart,rd,rx diff --git a/arch/arm/mach-footbridge/isa-timer.c b/arch/arm/mach-footbridge/isa-timer.c index 7020f1a3fec..c40bb415f4b 100644 --- a/arch/arm/mach-footbridge/isa-timer.c +++ b/arch/arm/mach-footbridge/isa-timer.c @@ -5,64 +5,18 @@ * Copyright (C) 1998 Phil Blundell */ #include <linux/clockchips.h> -#include <linux/clocksource.h> +#include <linux/i8253.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> -#include <linux/io.h> #include <linux/spinlock.h> #include <linux/timex.h> #include <asm/irq.h> -#include <asm/i8253.h> #include <asm/mach/time.h> #include "common.h" -DEFINE_RAW_SPINLOCK(i8253_lock); - -static void pit_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - unsigned long flags; - - raw_local_irq_save(flags); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - outb_p(0x34, PIT_MODE); - outb_p(PIT_LATCH & 0xff, PIT_CH0); - outb_p(PIT_LATCH >> 8, PIT_CH0); - break; - - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - outb_p(0x30, PIT_MODE); - outb_p(0, PIT_CH0); - outb_p(0, PIT_CH0); - break; - - case CLOCK_EVT_MODE_ONESHOT: - case CLOCK_EVT_MODE_RESUME: - break; - } - local_irq_restore(flags); -} - -static int pit_set_next_event(unsigned long delta, - struct clock_event_device *evt) -{ - return 0; -} - -static struct clock_event_device pit_ce = { - .name = "pit", - .features = CLOCK_EVT_FEAT_PERIODIC, - .set_mode = pit_set_mode, - .set_next_event = pit_set_next_event, - .shift = 32, -}; - static irqreturn_t pit_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *ce = dev_id; @@ -74,20 +28,15 @@ static struct irqaction pit_timer_irq = { .name = "pit", .handler = pit_timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, - .dev_id = &pit_ce, + .dev_id = &i8253_clockevent, }; static void __init isa_timer_init(void) { - pit_ce.cpumask = cpumask_of(smp_processor_id()); - pit_ce.mult = div_sc(PIT_TICK_RATE, NSEC_PER_SEC, pit_ce.shift); - pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce); - pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce); - clocksource_i8253_init(); - setup_irq(pit_ce.irq, &pit_timer_irq); - clockevents_register_device(&pit_ce); + setup_irq(i8253_clockevent.irq, &pit_timer_irq); + clockevent_i8253_init(false); } struct sys_timer isa_timer = { diff --git a/arch/arm/mach-h720x/Kconfig b/arch/arm/mach-h720x/Kconfig index 9b6982efbd2..abf356c0234 100644 --- a/arch/arm/mach-h720x/Kconfig +++ b/arch/arm/mach-h720x/Kconfig @@ -6,12 +6,14 @@ config ARCH_H7201 bool "gms30c7201" depends on ARCH_H720X select CPU_H7201 + select ZONE_DMA help Say Y here if you are using the Hynix GMS30C7201 Reference Board config ARCH_H7202 bool "hms30c7202" select CPU_H7202 + select ZONE_DMA depends on ARCH_H720X help Say Y here if you are using the Hynix HMS30C7202 Reference Board diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c index a65838fc061..af1c580b06b 100644 --- a/arch/arm/mach-imx/clock-imx25.c +++ b/arch/arm/mach-imx/clock-imx25.c @@ -282,9 +282,10 @@ static struct clk_lookup lookups[] = { _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk) _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk) _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk) - _REGISTER_CLOCK("imx25-cspi.0", NULL, cspi1_clk) - _REGISTER_CLOCK("imx25-cspi.1", NULL, cspi2_clk) - _REGISTER_CLOCK("imx25-cspi.2", NULL, cspi3_clk) + /* i.mx25 has the i.mx35 type cspi */ + _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk) + _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk) + _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk) _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk) _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk) _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk) diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c index 236f1495efa..f8aa5be0eb1 100644 --- a/arch/arm/mach-imx/dma-v1.c +++ b/arch/arm/mach-imx/dma-v1.c @@ -26,6 +26,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/interrupt.h> +#include <linux/err.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/scatterlist.h> diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c index 15e45c84e37..59d2a3b137d 100644 --- a/arch/arm/mach-imx/mach-apf9328.c +++ b/arch/arm/mach-imx/mach-apf9328.c @@ -115,6 +115,8 @@ static struct platform_device *devices[] __initdata = { static void __init apf9328_init(void) { + imx1_soc_init(); + mxc_gpio_setup_multiple_pins(apf9328_pins, ARRAY_SIZE(apf9328_pins), "APF9328"); diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c index ffb40ff619b..ede2710f8b7 100644 --- a/arch/arm/mach-imx/mach-armadillo5x0.c +++ b/arch/arm/mach-imx/mach-armadillo5x0.c @@ -490,6 +490,8 @@ static struct platform_device *devices[] __initdata = { */ static void __init armadillo5x0_init(void) { + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(armadillo5x0_pins, ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0"); diff --git a/arch/arm/mach-imx/mach-bug.c b/arch/arm/mach-imx/mach-bug.c index 42e4f078a19..f49470553bd 100644 --- a/arch/arm/mach-imx/mach-bug.c +++ b/arch/arm/mach-imx/mach-bug.c @@ -42,6 +42,8 @@ static const unsigned int bug_pins[] __initconst = { static void __init bug_board_init(void) { + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(bug_pins, ARRAY_SIZE(bug_pins), "uart-4"); imx31_add_imx_uart4(&uart_pdata); diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c index 46a2e41d43d..87887ac5806 100644 --- a/arch/arm/mach-imx/mach-cpuimx27.c +++ b/arch/arm/mach-imx/mach-cpuimx27.c @@ -250,6 +250,8 @@ __setup("otg_mode=", eukrea_cpuimx27_otg_mode); static void __init eukrea_cpuimx27_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(eukrea_cpuimx27_pins, ARRAY_SIZE(eukrea_cpuimx27_pins), "CPUIMX27"); diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c index 3f8ef825fa6..f39a478ba1a 100644 --- a/arch/arm/mach-imx/mach-cpuimx35.c +++ b/arch/arm/mach-imx/mach-cpuimx35.c @@ -156,6 +156,8 @@ __setup("otg_mode=", eukrea_cpuimx35_otg_mode); */ static void __init eukrea_cpuimx35_init(void) { + imx35_soc_init(); + mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx35_pads, ARRAY_SIZE(eukrea_cpuimx35_pads)); diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c index 148cff2819b..da36da52969 100644 --- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c +++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c @@ -125,6 +125,8 @@ __setup("otg_mode=", eukrea_cpuimx25_otg_mode); static void __init eukrea_cpuimx25_init(void) { + imx25_soc_init(); + if (mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx25_pads, ARRAY_SIZE(eukrea_cpuimx25_pads))) printk(KERN_ERR "error setting cpuimx25 pads !\n"); diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c index 7ae43b1ec51..c6269d60ddb 100644 --- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c +++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c @@ -231,6 +231,8 @@ static void __init visstrim_m10_board_init(void) { int ret; + imx27_soc_init(); + ret = mxc_gpio_setup_multiple_pins(visstrim_m10_pins, ARRAY_SIZE(visstrim_m10_pins), "VISSTRIM_M10"); if (ret) diff --git a/arch/arm/mach-imx/mach-imx27ipcam.c b/arch/arm/mach-imx/mach-imx27ipcam.c index 9be6cd6fbf8..272f793e924 100644 --- a/arch/arm/mach-imx/mach-imx27ipcam.c +++ b/arch/arm/mach-imx/mach-imx27ipcam.c @@ -50,6 +50,8 @@ static const int mx27ipcam_pins[] __initconst = { static void __init mx27ipcam_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mx27ipcam_pins, ARRAY_SIZE(mx27ipcam_pins), "mx27ipcam"); diff --git a/arch/arm/mach-imx/mach-imx27lite.c b/arch/arm/mach-imx/mach-imx27lite.c index 841140516ed..d81a769fe89 100644 --- a/arch/arm/mach-imx/mach-imx27lite.c +++ b/arch/arm/mach-imx/mach-imx27lite.c @@ -59,6 +59,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = { static void __init mx27lite_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mx27lite_pins, ARRAY_SIZE(mx27lite_pins), "imx27lite"); imx27_add_imx_uart0(&uart_pdata); diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c index 1ecae20cf4e..e472a1d8805 100644 --- a/arch/arm/mach-imx/mach-kzm_arm11_01.c +++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c @@ -223,6 +223,8 @@ static int kzm_pins[] __initdata = { */ static void __init kzm_board_init(void) { + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(kzm_pins, ARRAY_SIZE(kzm_pins), "kzm"); kzm_init_ext_uart(); diff --git a/arch/arm/mach-imx/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c index 38ec5cbbda9..5cd8bee4696 100644 --- a/arch/arm/mach-imx/mach-mx1ads.c +++ b/arch/arm/mach-imx/mach-mx1ads.c @@ -115,6 +115,8 @@ static struct i2c_board_info mx1ads_i2c_devices[] = { */ static void __init mx1ads_init(void) { + imx1_soc_init(); + mxc_gpio_setup_multiple_pins(mx1ads_pins, ARRAY_SIZE(mx1ads_pins), "mx1ads"); diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c index 74ac88978dd..d389ecf9b5a 100644 --- a/arch/arm/mach-imx/mach-mx21ads.c +++ b/arch/arm/mach-imx/mach-mx21ads.c @@ -279,6 +279,8 @@ static struct platform_device *platform_devices[] __initdata = { static void __init mx21ads_board_init(void) { + imx21_soc_init(); + mxc_gpio_setup_multiple_pins(mx21ads_pins, ARRAY_SIZE(mx21ads_pins), "mx21ads"); diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c index 58ea3fdf091..01534bb6130 100644 --- a/arch/arm/mach-imx/mach-mx25_3ds.c +++ b/arch/arm/mach-imx/mach-mx25_3ds.c @@ -219,6 +219,8 @@ static const struct esdhc_platform_data mx25pdk_esdhc_pdata __initconst = { static void __init mx25pdk_init(void) { + imx25_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads, ARRAY_SIZE(mx25pdk_pads)); diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c index 6e1accf93f8..117ce0a50f4 100644 --- a/arch/arm/mach-imx/mach-mx27_3ds.c +++ b/arch/arm/mach-imx/mach-mx27_3ds.c @@ -267,6 +267,8 @@ static const struct imxi2c_platform_data mx27_3ds_i2c0_data __initconst = { static void __init mx27pdk_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins), "mx27pdk"); mx27_3ds_sdhc1_enable_level_translator(); diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c index 1db79506f5e..fc26ed71b9e 100644 --- a/arch/arm/mach-imx/mach-mx27ads.c +++ b/arch/arm/mach-imx/mach-mx27ads.c @@ -288,6 +288,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = { static void __init mx27ads_board_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mx27ads_pins, ARRAY_SIZE(mx27ads_pins), "mx27ads"); diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c index 9b982449cb5..441fbb83f39 100644 --- a/arch/arm/mach-imx/mach-mx31_3ds.c +++ b/arch/arm/mach-imx/mach-mx31_3ds.c @@ -13,6 +13,7 @@ */ #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/types.h> #include <linux/init.h> #include <linux/clk.h> @@ -689,6 +690,8 @@ static void __init mx31_3ds_init(void) { int ret; + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins), "mx31_3ds"); diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c index f4dee025463..0ce49478a47 100644 --- a/arch/arm/mach-imx/mach-mx31ads.c +++ b/arch/arm/mach-imx/mach-mx31ads.c @@ -516,6 +516,8 @@ static void __init mx31ads_init_irq(void) static void __init mx31ads_init(void) { + imx31_soc_init(); + mxc_init_extuart(); mxc_init_imx_uart(); mxc_init_i2c(); diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c index 410e676ae08..750368ddf0f 100644 --- a/arch/arm/mach-imx/mach-mx31lilly.c +++ b/arch/arm/mach-imx/mach-mx31lilly.c @@ -243,6 +243,8 @@ core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444); static void __init mx31lilly_board_init(void) { + imx31_soc_init(); + switch (mx31lilly_baseboard) { case MX31LILLY_NOBOARD: break; diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c index ac9b4cad320..4b47fd9fdd8 100644 --- a/arch/arm/mach-imx/mach-mx31lite.c +++ b/arch/arm/mach-imx/mach-mx31lite.c @@ -230,6 +230,8 @@ static void __init mx31lite_init(void) { int ret; + imx31_soc_init(); + switch (mx31lite_baseboard) { case MX31LITE_NOBOARD: break; diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index eaa51e49ca9..a52fd36e2b5 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c @@ -507,6 +507,8 @@ core_param(mx31moboard_baseboard, mx31moboard_baseboard, int, 0444); */ static void __init mx31moboard_init(void) { + imx31_soc_init(); + mxc_iomux_setup_multiple_pins(moboard_pins, ARRAY_SIZE(moboard_pins), "moboard"); diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c index 882880ac1bb..48b3c6fd5cf 100644 --- a/arch/arm/mach-imx/mach-mx35_3ds.c +++ b/arch/arm/mach-imx/mach-mx35_3ds.c @@ -179,6 +179,8 @@ static const struct imxi2c_platform_data mx35_3ds_i2c0_data __initconst = { */ static void __init mx35_3ds_init(void) { + imx35_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads)); imx35_add_fec(NULL); diff --git a/arch/arm/mach-imx/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c index 2774541511e..c85876fed66 100644 --- a/arch/arm/mach-imx/mach-mxt_td60.c +++ b/arch/arm/mach-imx/mach-mxt_td60.c @@ -233,6 +233,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = { static void __init mxt_td60_board_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(mxt_td60_pins, ARRAY_SIZE(mxt_td60_pins), "MXT_TD60"); diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c index bbddc5a11c4..71083aa1603 100644 --- a/arch/arm/mach-imx/mach-pca100.c +++ b/arch/arm/mach-imx/mach-pca100.c @@ -357,6 +357,8 @@ static void __init pca100_init(void) { int ret; + imx27_soc_init(); + /* SSI unit */ mxc_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0, MXC_AUDMUX_V1_PCR_SYN | /* 4wire mode */ diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c index 89c213b8129..f45b7cd72c8 100644 --- a/arch/arm/mach-imx/mach-pcm037.c +++ b/arch/arm/mach-imx/mach-pcm037.c @@ -576,6 +576,8 @@ static void __init pcm037_init(void) { int ret; + imx31_soc_init(); + mxc_iomux_set_gpr(MUX_PGP_UH2, 1); mxc_iomux_setup_multiple_pins(pcm037_pins, ARRAY_SIZE(pcm037_pins), diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c index 853bb871c7e..2d6a64bbac4 100644 --- a/arch/arm/mach-imx/mach-pcm038.c +++ b/arch/arm/mach-imx/mach-pcm038.c @@ -295,6 +295,8 @@ static const struct mxc_usbh_platform_data usbh2_pdata __initconst = { static void __init pcm038_init(void) { + imx27_soc_init(); + mxc_gpio_setup_multiple_pins(pcm038_pins, ARRAY_SIZE(pcm038_pins), "PCM038"); diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c index 026441628df..163cc318caf 100644 --- a/arch/arm/mach-imx/mach-pcm043.c +++ b/arch/arm/mach-imx/mach-pcm043.c @@ -356,6 +356,8 @@ static struct esdhc_platform_data sd1_pdata = { */ static void __init pcm043_init(void) { + imx35_soc_init(); + mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads)); mxc_audmux_v2_configure_port(3, diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c index c1632871593..3626f486498 100644 --- a/arch/arm/mach-imx/mach-qong.c +++ b/arch/arm/mach-imx/mach-qong.c @@ -244,6 +244,8 @@ static void __init qong_init_fpga(void) */ static void __init qong_init(void) { + imx31_soc_init(); + mxc_init_imx_uart(); qong_init_nor_mtd(); qong_init_fpga(); diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c index dcaee043628..82805260e19 100644 --- a/arch/arm/mach-imx/mach-scb9328.c +++ b/arch/arm/mach-imx/mach-scb9328.c @@ -129,6 +129,8 @@ static struct platform_device *devices[] __initdata = { */ static void __init scb9328_init(void) { + imx1_soc_init(); + imx1_add_imx_uart0(&uart_pdata); printk(KERN_INFO"Scb9328: Adding devices\n"); diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c index d74e3473d23..7d8e012a633 100644 --- a/arch/arm/mach-imx/mach-vpr200.c +++ b/arch/arm/mach-imx/mach-vpr200.c @@ -267,6 +267,8 @@ static struct platform_device *devices[] __initdata = { */ static void __init vpr200_board_init(void) { + imx35_soc_init(); + mxc_iomux_v3_setup_multiple_pads(vpr200_pads, ARRAY_SIZE(vpr200_pads)); imx35_add_fec(NULL); diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c index 2e482ba5a0e..2bded591d5c 100644 --- a/arch/arm/mach-imx/mm-imx1.c +++ b/arch/arm/mach-imx/mm-imx1.c @@ -23,7 +23,6 @@ #include <mach/common.h> #include <mach/hardware.h> -#include <mach/gpio.h> #include <mach/irqs.h> #include <mach/iomux-v1.h> @@ -44,15 +43,19 @@ void __init imx1_init_early(void) MX1_NUM_GPIO_PORT); } -static struct mxc_gpio_port imx1_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX1, 0, 1, MX1_GPIO_INT_PORTA), - DEFINE_IMX_GPIO_PORT_IRQ(MX1, 1, 2, MX1_GPIO_INT_PORTB), - DEFINE_IMX_GPIO_PORT_IRQ(MX1, 2, 3, MX1_GPIO_INT_PORTC), - DEFINE_IMX_GPIO_PORT_IRQ(MX1, 3, 4, MX1_GPIO_INT_PORTD), -}; - void __init mx1_init_irq(void) { mxc_init_irq(MX1_IO_ADDRESS(MX1_AVIC_BASE_ADDR)); - mxc_gpio_init(imx1_gpio_ports, ARRAY_SIZE(imx1_gpio_ports)); +} + +void __init imx1_soc_init(void) +{ + mxc_register_gpio("imx1-gpio", 0, MX1_GPIO1_BASE_ADDR, SZ_256, + MX1_GPIO_INT_PORTA, 0); + mxc_register_gpio("imx1-gpio", 1, MX1_GPIO2_BASE_ADDR, SZ_256, + MX1_GPIO_INT_PORTB, 0); + mxc_register_gpio("imx1-gpio", 2, MX1_GPIO3_BASE_ADDR, SZ_256, + MX1_GPIO_INT_PORTC, 0); + mxc_register_gpio("imx1-gpio", 3, MX1_GPIO4_BASE_ADDR, SZ_256, + MX1_GPIO_INT_PORTD, 0); } diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c index 7a0c500ac2c..6d7d518686a 100644 --- a/arch/arm/mach-imx/mm-imx21.c +++ b/arch/arm/mach-imx/mm-imx21.c @@ -24,7 +24,6 @@ #include <mach/common.h> #include <asm/pgtable.h> #include <asm/mach/map.h> -#include <mach/gpio.h> #include <mach/irqs.h> #include <mach/iomux-v1.h> @@ -70,17 +69,17 @@ void __init imx21_init_early(void) MX21_NUM_GPIO_PORT); } -static struct mxc_gpio_port imx21_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX21, 0, 1, MX21_INT_GPIO), - DEFINE_IMX_GPIO_PORT(MX21, 1, 2), - DEFINE_IMX_GPIO_PORT(MX21, 2, 3), - DEFINE_IMX_GPIO_PORT(MX21, 3, 4), - DEFINE_IMX_GPIO_PORT(MX21, 4, 5), - DEFINE_IMX_GPIO_PORT(MX21, 5, 6), -}; - void __init mx21_init_irq(void) { mxc_init_irq(MX21_IO_ADDRESS(MX21_AVIC_BASE_ADDR)); - mxc_gpio_init(imx21_gpio_ports, ARRAY_SIZE(imx21_gpio_ports)); +} + +void __init imx21_soc_init(void) +{ + mxc_register_gpio("imx21-gpio", 0, MX21_GPIO1_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 1, MX21_GPIO2_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 2, MX21_GPIO3_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 3, MX21_GPIO4_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 4, MX21_GPIO5_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 5, MX21_GPIO6_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0); } diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c index 02f7b5c7fa8..9a1591c2508 100644 --- a/arch/arm/mach-imx/mm-imx25.c +++ b/arch/arm/mach-imx/mm-imx25.c @@ -27,7 +27,6 @@ #include <mach/hardware.h> #include <mach/mx25.h> #include <mach/iomux-v3.h> -#include <mach/gpio.h> #include <mach/irqs.h> /* @@ -57,16 +56,16 @@ void __init imx25_init_early(void) mxc_arch_reset_init(MX25_IO_ADDRESS(MX25_WDOG_BASE_ADDR)); } -static struct mxc_gpio_port imx25_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX25, 0, 1, MX25_INT_GPIO1), - DEFINE_IMX_GPIO_PORT_IRQ(MX25, 1, 2, MX25_INT_GPIO2), - DEFINE_IMX_GPIO_PORT_IRQ(MX25, 2, 3, MX25_INT_GPIO3), - DEFINE_IMX_GPIO_PORT_IRQ(MX25, 3, 4, MX25_INT_GPIO4), -}; - void __init mx25_init_irq(void) { mxc_init_irq(MX25_IO_ADDRESS(MX25_AVIC_BASE_ADDR)); - mxc_gpio_init(imx25_gpio_ports, ARRAY_SIZE(imx25_gpio_ports)); } +void __init imx25_soc_init(void) +{ + /* i.mx25 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX25_GPIO1_BASE_ADDR, SZ_16K, MX25_INT_GPIO1, 0); + mxc_register_gpio("imx31-gpio", 1, MX25_GPIO2_BASE_ADDR, SZ_16K, MX25_INT_GPIO2, 0); + mxc_register_gpio("imx31-gpio", 2, MX25_GPIO3_BASE_ADDR, SZ_16K, MX25_INT_GPIO3, 0); + mxc_register_gpio("imx31-gpio", 3, MX25_GPIO4_BASE_ADDR, SZ_16K, MX25_INT_GPIO4, 0); +} diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c index a6761a39f08..133b30003dd 100644 --- a/arch/arm/mach-imx/mm-imx27.c +++ b/arch/arm/mach-imx/mm-imx27.c @@ -24,7 +24,6 @@ #include <mach/common.h> #include <asm/pgtable.h> #include <asm/mach/map.h> -#include <mach/gpio.h> #include <mach/irqs.h> #include <mach/iomux-v1.h> @@ -70,17 +69,18 @@ void __init imx27_init_early(void) MX27_NUM_GPIO_PORT); } -static struct mxc_gpio_port imx27_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX27, 0, 1, MX27_INT_GPIO), - DEFINE_IMX_GPIO_PORT(MX27, 1, 2), - DEFINE_IMX_GPIO_PORT(MX27, 2, 3), - DEFINE_IMX_GPIO_PORT(MX27, 3, 4), - DEFINE_IMX_GPIO_PORT(MX27, 4, 5), - DEFINE_IMX_GPIO_PORT(MX27, 5, 6), -}; - void __init mx27_init_irq(void) { mxc_init_irq(MX27_IO_ADDRESS(MX27_AVIC_BASE_ADDR)); - mxc_gpio_init(imx27_gpio_ports, ARRAY_SIZE(imx27_gpio_ports)); +} + +void __init imx27_soc_init(void) +{ + /* i.mx27 has the i.mx21 type gpio */ + mxc_register_gpio("imx21-gpio", 0, MX27_GPIO1_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 1, MX27_GPIO2_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 2, MX27_GPIO3_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 3, MX27_GPIO4_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 4, MX27_GPIO5_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); + mxc_register_gpio("imx21-gpio", 5, MX27_GPIO6_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0); } diff --git a/arch/arm/mach-imx/mm-imx31.c b/arch/arm/mach-imx/mm-imx31.c index 86b9b45864d..6d103c01b8b 100644 --- a/arch/arm/mach-imx/mm-imx31.c +++ b/arch/arm/mach-imx/mm-imx31.c @@ -26,7 +26,6 @@ #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-v3.h> -#include <mach/gpio.h> #include <mach/irqs.h> static struct map_desc mx31_io_desc[] __initdata = { @@ -53,14 +52,14 @@ void __init imx31_init_early(void) mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); } -static struct mxc_gpio_port imx31_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX31, 0, 1, MX31_INT_GPIO1), - DEFINE_IMX_GPIO_PORT_IRQ(MX31, 1, 2, MX31_INT_GPIO2), - DEFINE_IMX_GPIO_PORT_IRQ(MX31, 2, 3, MX31_INT_GPIO3), -}; - void __init mx31_init_irq(void) { mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR)); - mxc_gpio_init(imx31_gpio_ports, ARRAY_SIZE(imx31_gpio_ports)); +} + +void __init imx31_soc_init(void) +{ + mxc_register_gpio("imx31-gpio", 0, MX31_GPIO1_BASE_ADDR, SZ_16K, MX31_INT_GPIO1, 0); + mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0); + mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0); } diff --git a/arch/arm/mach-imx/mm-imx35.c b/arch/arm/mach-imx/mm-imx35.c index c880e6d1ae5..bb068bc8dab 100644 --- a/arch/arm/mach-imx/mm-imx35.c +++ b/arch/arm/mach-imx/mm-imx35.c @@ -27,7 +27,6 @@ #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-v3.h> -#include <mach/gpio.h> #include <mach/irqs.h> static struct map_desc mx35_io_desc[] __initdata = { @@ -50,14 +49,15 @@ void __init imx35_init_early(void) mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR)); } -static struct mxc_gpio_port imx35_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ(MX35, 0, 1, MX35_INT_GPIO1), - DEFINE_IMX_GPIO_PORT_IRQ(MX35, 1, 2, MX35_INT_GPIO2), - DEFINE_IMX_GPIO_PORT_IRQ(MX35, 2, 3, MX35_INT_GPIO3), -}; - void __init mx35_init_irq(void) { mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR)); - mxc_gpio_init(imx35_gpio_ports, ARRAY_SIZE(imx35_gpio_ports)); +} + +void __init imx35_soc_init(void) +{ + /* i.mx35 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX35_GPIO1_BASE_ADDR, SZ_16K, MX35_INT_GPIO1, 0); + mxc_register_gpio("imx31-gpio", 1, MX35_GPIO2_BASE_ADDR, SZ_16K, MX35_INT_GPIO2, 0); + mxc_register_gpio("imx31-gpio", 2, MX35_GPIO3_BASE_ADDR, SZ_16K, MX35_INT_GPIO3, 0); } diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 5c147fb66a0..a5b989728b9 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -17,6 +17,7 @@ * */ +#include <linux/dma-mapping.h> #include <linux/serial_8250.h> #include <linux/io.h> #ifdef CONFIG_MTD_PHYSMAP diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 74ed81a3cb1..07772575d7a 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -419,14 +419,20 @@ static void notrace ixp4xx_update_sched_clock(void) /* * clocksource */ + +static cycle_t ixp4xx_clocksource_read(struct clocksource *c) +{ + return *IXP4XX_OSTS; +} + unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ; EXPORT_SYMBOL(ixp4xx_timer_freq); static void __init ixp4xx_clocksource_init(void) { init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq); - clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32, - clocksource_mmio_readl_up); + clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32, + ixp4xx_clocksource_read); } /* diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c index 7bb78fd5a2a..c79162a50f2 100644 --- a/arch/arm/mach-mmp/brownstone.c +++ b/arch/arm/mach-mmp/brownstone.c @@ -177,9 +177,16 @@ static struct i2c_board_info brownstone_twsi1_info[] = { }; static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = { - .max_speed = 25000000, + .clk_delay_cycles = 0x1f, }; +static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = { + .clk_delay_cycles = 0x1f, + .flags = PXA_FLAG_CARD_PERMANENT + | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT, +}; + + static void __init brownstone_init(void) { mfp_config(ARRAY_AND_SIZE(brownstone_pin_config)); @@ -189,6 +196,7 @@ static void __init brownstone_init(void) mmp2_add_uart(3); mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info)); mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */ + mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */ /* enable 5v regulator */ platform_device_register(&brownstone_v_5vp_device); diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h index 2cbf6df09b8..de7b88826ad 100644 --- a/arch/arm/mach-mmp/include/mach/mmp2.h +++ b/arch/arm/mach-mmp/include/mach/mmp2.h @@ -1,7 +1,7 @@ #ifndef __ASM_MACH_MMP2_H #define __ASM_MACH_MMP2_H -#include <plat/sdhci.h> +#include <linux/platform_data/pxa_sdhci.h> struct sys_timer; diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c index 24172a0aad5..5d6421d6325 100644 --- a/arch/arm/mach-mmp/jasper.c +++ b/arch/arm/mach-mmp/jasper.c @@ -154,7 +154,7 @@ static struct i2c_board_info jasper_twsi1_info[] = { }; static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = { - .max_speed = 25000000, + .clk_delay_cycles = 0x1f, }; static void __init jasper_init(void) diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c index 8e6c3ac7f7c..079c18861d5 100644 --- a/arch/arm/mach-mmp/mmp2.c +++ b/arch/arm/mach-mmp/mmp2.c @@ -168,10 +168,10 @@ static struct clk_lookup mmp2_clkregs[] = { INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL), INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL), INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL), - INIT_CLKREG(&clk_sdh0, "sdhci-pxa.0", "PXA-SDHCLK"), - INIT_CLKREG(&clk_sdh1, "sdhci-pxa.1", "PXA-SDHCLK"), - INIT_CLKREG(&clk_sdh2, "sdhci-pxa.2", "PXA-SDHCLK"), - INIT_CLKREG(&clk_sdh3, "sdhci-pxa.3", "PXA-SDHCLK"), + INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"), + INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"), + INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"), + INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"), }; static int __init mmp2_init(void) @@ -222,8 +222,8 @@ MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70); MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70); MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70); MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29); -MMP2_DEVICE(sdh0, "sdhci-pxa", 0, MMC, 0xd4280000, 0x120); -MMP2_DEVICE(sdh1, "sdhci-pxa", 1, MMC2, 0xd4280800, 0x120); -MMP2_DEVICE(sdh2, "sdhci-pxa", 2, MMC3, 0xd4281000, 0x120); -MMP2_DEVICE(sdh3, "sdhci-pxa", 3, MMC4, 0xd4281800, 0x120); +MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120); +MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120); +MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120); +MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120); diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index 72b4e763158..ab9f999106c 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c @@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0); static APBC_CLK(ssp5, PXA168_SSP5, 4, 0); static APBC_CLK(keypad, PXA168_KPC, 0, 32000); -static APMU_CLK(nand, NAND, 0x01db, 208000000); +static APMU_CLK(nand, NAND, 0x19b, 156000000); static APMU_CLK(lcd, LCD, 0x7f, 312000000); /* device and clock bindings */ diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c index 8f92ccd26ed..1464607aa60 100644 --- a/arch/arm/mach-mmp/pxa910.c +++ b/arch/arm/mach-mmp/pxa910.c @@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000); static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); -static APMU_CLK(nand, NAND, 0x01db, 208000000); +static APMU_CLK(nand, NAND, 0x19b, 156000000); static APMU_CLK(u2o, USB, 0x1b, 480000000); /* device and clock bindings */ diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 1516896e8d1..888e92502e1 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig @@ -148,22 +148,6 @@ config MACH_MSM8960_RUMI3 endmenu -config MSM_IOMMU - bool "MSM IOMMU Support" - depends on ARCH_MSM8X60 || ARCH_MSM8960 - select IOMMU_API - default n - help - Support for the IOMMUs found on certain Qualcomm SOCs. - These IOMMUs allow virtualization of the address space used by most - cores within the multimedia subsystem. - - If unsure, say N here. - -config IOMMU_PGTABLES_L2 - def_bool y - depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n - config MSM_DEBUG_UART int default 1 if MSM_DEBUG_UART1 @@ -205,9 +189,6 @@ config MSM_GPIOMUX config MSM_V2_TLMM bool -config IOMMU_API - bool - config MSM_SCM bool endif diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile index 9519fd28a02..b70658c5ae0 100644 --- a/arch/arm/mach-msm/Makefile +++ b/arch/arm/mach-msm/Makefile @@ -3,7 +3,7 @@ obj-y += clock.o obj-$(CONFIG_DEBUG_FS) += clock-debug.o obj-$(CONFIG_MSM_VIC) += irq-vic.o -obj-$(CONFIG_MSM_IOMMU) += iommu.o iommu_dev.o devices-iommu.o +obj-$(CONFIG_MSM_IOMMU) += devices-iommu.o obj-$(CONFIG_ARCH_MSM7X00A) += dma.o irq.o acpuclock-arm11.o obj-$(CONFIG_ARCH_MSM7X30) += dma.o diff --git a/arch/arm/mach-msm/iommu.c b/arch/arm/mach-msm/iommu.c deleted file mode 100644 index 1a584e077c6..00000000000 --- a/arch/arm/mach-msm/iommu.c +++ /dev/null @@ -1,731 +0,0 @@ -/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/errno.h> -#include <linux/io.h> -#include <linux/interrupt.h> -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/iommu.h> -#include <linux/clk.h> - -#include <asm/cacheflush.h> -#include <asm/sizes.h> - -#include <mach/iommu_hw-8xxx.h> -#include <mach/iommu.h> - -#define MRC(reg, processor, op1, crn, crm, op2) \ -__asm__ __volatile__ ( \ -" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ -: "=r" (reg)) - -#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) -#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) - -static int msm_iommu_tex_class[4]; - -DEFINE_SPINLOCK(msm_iommu_lock); - -struct msm_priv { - unsigned long *pgtable; - struct list_head list_attached; -}; - -static int __enable_clocks(struct msm_iommu_drvdata *drvdata) -{ - int ret; - - ret = clk_enable(drvdata->pclk); - if (ret) - goto fail; - - if (drvdata->clk) { - ret = clk_enable(drvdata->clk); - if (ret) - clk_disable(drvdata->pclk); - } -fail: - return ret; -} - -static void __disable_clocks(struct msm_iommu_drvdata *drvdata) -{ - if (drvdata->clk) - clk_disable(drvdata->clk); - clk_disable(drvdata->pclk); -} - -static int __flush_iotlb(struct iommu_domain *domain) -{ - struct msm_priv *priv = domain->priv; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - int ret = 0; -#ifndef CONFIG_IOMMU_PGTABLES_L2 - unsigned long *fl_table = priv->pgtable; - int i; - - if (!list_empty(&priv->list_attached)) { - dmac_flush_range(fl_table, fl_table + SZ_16K); - - for (i = 0; i < NUM_FL_PTE; i++) - if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { - void *sl_table = __va(fl_table[i] & - FL_BASE_MASK); - dmac_flush_range(sl_table, sl_table + SZ_4K); - } - } -#endif - - list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { - if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent) - BUG(); - - iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); - BUG_ON(!iommu_drvdata); - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; - - SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); - __disable_clocks(iommu_drvdata); - } -fail: - return ret; -} - -static void __reset_context(void __iomem *base, int ctx) -{ - SET_BPRCOSH(base, ctx, 0); - SET_BPRCISH(base, ctx, 0); - SET_BPRCNSH(base, ctx, 0); - SET_BPSHCFG(base, ctx, 0); - SET_BPMTCFG(base, ctx, 0); - SET_ACTLR(base, ctx, 0); - SET_SCTLR(base, ctx, 0); - SET_FSRRESTORE(base, ctx, 0); - SET_TTBR0(base, ctx, 0); - SET_TTBR1(base, ctx, 0); - SET_TTBCR(base, ctx, 0); - SET_BFBCR(base, ctx, 0); - SET_PAR(base, ctx, 0); - SET_FAR(base, ctx, 0); - SET_CTX_TLBIALL(base, ctx, 0); - SET_TLBFLPTER(base, ctx, 0); - SET_TLBSLPTER(base, ctx, 0); - SET_TLBLKCR(base, ctx, 0); - SET_PRRR(base, ctx, 0); - SET_NMRR(base, ctx, 0); -} - -static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) -{ - unsigned int prrr, nmrr; - __reset_context(base, ctx); - - /* Set up HTW mode */ - /* TLB miss configuration: perform HTW on miss */ - SET_TLBMCFG(base, ctx, 0x3); - - /* V2P configuration: HTW for access */ - SET_V2PCFG(base, ctx, 0x3); - - SET_TTBCR(base, ctx, 0); - SET_TTBR0_PA(base, ctx, (pgtable >> 14)); - - /* Invalidate the TLB for this context */ - SET_CTX_TLBIALL(base, ctx, 0); - - /* Set interrupt number to "secure" interrupt */ - SET_IRPTNDX(base, ctx, 0); - - /* Enable context fault interrupt */ - SET_CFEIE(base, ctx, 1); - - /* Stall access on a context fault and let the handler deal with it */ - SET_CFCFG(base, ctx, 1); - - /* Redirect all cacheable requests to L2 slave port. */ - SET_RCISH(base, ctx, 1); - SET_RCOSH(base, ctx, 1); - SET_RCNSH(base, ctx, 1); - - /* Turn on TEX Remap */ - SET_TRE(base, ctx, 1); - - /* Set TEX remap attributes */ - RCP15_PRRR(prrr); - RCP15_NMRR(nmrr); - SET_PRRR(base, ctx, prrr); - SET_NMRR(base, ctx, nmrr); - - /* Turn on BFB prefetch */ - SET_BFBDFE(base, ctx, 1); - -#ifdef CONFIG_IOMMU_PGTABLES_L2 - /* Configure page tables as inner-cacheable and shareable to reduce - * the TLB miss penalty. - */ - SET_TTBR0_SH(base, ctx, 1); - SET_TTBR1_SH(base, ctx, 1); - - SET_TTBR0_NOS(base, ctx, 1); - SET_TTBR1_NOS(base, ctx, 1); - - SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ - SET_TTBR0_IRGNL(base, ctx, 1); - - SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ - SET_TTBR1_IRGNL(base, ctx, 1); - - SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ - SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ -#endif - - /* Enable the MMU */ - SET_M(base, ctx, 1); -} - -static int msm_iommu_domain_init(struct iommu_domain *domain) -{ - struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); - - if (!priv) - goto fail_nomem; - - INIT_LIST_HEAD(&priv->list_attached); - priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL, - get_order(SZ_16K)); - - if (!priv->pgtable) - goto fail_nomem; - - memset(priv->pgtable, 0, SZ_16K); - domain->priv = priv; - return 0; - -fail_nomem: - kfree(priv); - return -ENOMEM; -} - -static void msm_iommu_domain_destroy(struct iommu_domain *domain) -{ - struct msm_priv *priv; - unsigned long flags; - unsigned long *fl_table; - int i; - - spin_lock_irqsave(&msm_iommu_lock, flags); - priv = domain->priv; - domain->priv = NULL; - - if (priv) { - fl_table = priv->pgtable; - - for (i = 0; i < NUM_FL_PTE; i++) - if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) - free_page((unsigned long) __va(((fl_table[i]) & - FL_BASE_MASK))); - - free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); - priv->pgtable = NULL; - } - - kfree(priv); - spin_unlock_irqrestore(&msm_iommu_lock, flags); -} - -static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) -{ - struct msm_priv *priv; - struct msm_iommu_ctx_dev *ctx_dev; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - struct msm_iommu_ctx_drvdata *tmp_drvdata; - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - priv = domain->priv; - - if (!priv || !dev) { - ret = -EINVAL; - goto fail; - } - - iommu_drvdata = dev_get_drvdata(dev->parent); - ctx_drvdata = dev_get_drvdata(dev); - ctx_dev = dev->platform_data; - - if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) { - ret = -EINVAL; - goto fail; - } - - if (!list_empty(&ctx_drvdata->attached_elm)) { - ret = -EBUSY; - goto fail; - } - - list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm) - if (tmp_drvdata == ctx_drvdata) { - ret = -EBUSY; - goto fail; - } - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; - - __program_context(iommu_drvdata->base, ctx_dev->num, - __pa(priv->pgtable)); - - __disable_clocks(iommu_drvdata); - list_add(&(ctx_drvdata->attached_elm), &priv->list_attached); - ret = __flush_iotlb(domain); - -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); - return ret; -} - -static void msm_iommu_detach_dev(struct iommu_domain *domain, - struct device *dev) -{ - struct msm_priv *priv; - struct msm_iommu_ctx_dev *ctx_dev; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - unsigned long flags; - int ret; - - spin_lock_irqsave(&msm_iommu_lock, flags); - priv = domain->priv; - - if (!priv || !dev) - goto fail; - - iommu_drvdata = dev_get_drvdata(dev->parent); - ctx_drvdata = dev_get_drvdata(dev); - ctx_dev = dev->platform_data; - - if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) - goto fail; - - ret = __flush_iotlb(domain); - if (ret) - goto fail; - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; - - __reset_context(iommu_drvdata->base, ctx_dev->num); - __disable_clocks(iommu_drvdata); - list_del_init(&ctx_drvdata->attached_elm); - -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); -} - -static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, - phys_addr_t pa, int order, int prot) -{ - struct msm_priv *priv; - unsigned long flags; - unsigned long *fl_table; - unsigned long *fl_pte; - unsigned long fl_offset; - unsigned long *sl_table; - unsigned long *sl_pte; - unsigned long sl_offset; - unsigned int pgprot; - size_t len = 0x1000UL << order; - int ret = 0, tex, sh; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0; - tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK]; - - if (tex < 0 || tex > NUM_TEX_CLASS - 1) { - ret = -EINVAL; - goto fail; - } - - priv = domain->priv; - if (!priv) { - ret = -EINVAL; - goto fail; - } - - fl_table = priv->pgtable; - - if (len != SZ_16M && len != SZ_1M && - len != SZ_64K && len != SZ_4K) { - pr_debug("Bad size: %d\n", len); - ret = -EINVAL; - goto fail; - } - - if (!fl_table) { - pr_debug("Null page table\n"); - ret = -EINVAL; - goto fail; - } - - if (len == SZ_16M || len == SZ_1M) { - pgprot = sh ? FL_SHARED : 0; - pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0; - pgprot |= tex & 0x02 ? FL_CACHEABLE : 0; - pgprot |= tex & 0x04 ? FL_TEX0 : 0; - } else { - pgprot = sh ? SL_SHARED : 0; - pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0; - pgprot |= tex & 0x02 ? SL_CACHEABLE : 0; - pgprot |= tex & 0x04 ? SL_TEX0 : 0; - } - - fl_offset = FL_OFFSET(va); /* Upper 12 bits */ - fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ - - if (len == SZ_16M) { - int i = 0; - for (i = 0; i < 16; i++) - *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION | - FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT | - FL_SHARED | FL_NG | pgprot; - } - - if (len == SZ_1M) - *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG | - FL_TYPE_SECT | FL_SHARED | pgprot; - - /* Need a 2nd level table */ - if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) { - unsigned long *sl; - sl = (unsigned long *) __get_free_pages(GFP_ATOMIC, - get_order(SZ_4K)); - - if (!sl) { - pr_debug("Could not allocate second level table\n"); - ret = -ENOMEM; - goto fail; - } - - memset(sl, 0, SZ_4K); - *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE); - } - - sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); - sl_offset = SL_OFFSET(va); - sl_pte = sl_table + sl_offset; - - - if (len == SZ_4K) - *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG | - SL_SHARED | SL_TYPE_SMALL | pgprot; - - if (len == SZ_64K) { - int i; - - for (i = 0; i < 16; i++) - *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 | - SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot; - } - - ret = __flush_iotlb(domain); -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); - return ret; -} - -static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, - int order) -{ - struct msm_priv *priv; - unsigned long flags; - unsigned long *fl_table; - unsigned long *fl_pte; - unsigned long fl_offset; - unsigned long *sl_table; - unsigned long *sl_pte; - unsigned long sl_offset; - size_t len = 0x1000UL << order; - int i, ret = 0; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - priv = domain->priv; - - if (!priv) { - ret = -ENODEV; - goto fail; - } - - fl_table = priv->pgtable; - - if (len != SZ_16M && len != SZ_1M && - len != SZ_64K && len != SZ_4K) { - pr_debug("Bad length: %d\n", len); - ret = -EINVAL; - goto fail; - } - - if (!fl_table) { - pr_debug("Null page table\n"); - ret = -EINVAL; - goto fail; - } - - fl_offset = FL_OFFSET(va); /* Upper 12 bits */ - fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */ - - if (*fl_pte == 0) { - pr_debug("First level PTE is 0\n"); - ret = -ENODEV; - goto fail; - } - - /* Unmap supersection */ - if (len == SZ_16M) - for (i = 0; i < 16; i++) - *(fl_pte+i) = 0; - - if (len == SZ_1M) - *fl_pte = 0; - - sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK)); - sl_offset = SL_OFFSET(va); - sl_pte = sl_table + sl_offset; - - if (len == SZ_64K) { - for (i = 0; i < 16; i++) - *(sl_pte+i) = 0; - } - - if (len == SZ_4K) - *sl_pte = 0; - - if (len == SZ_4K || len == SZ_64K) { - int used = 0; - - for (i = 0; i < NUM_SL_PTE; i++) - if (sl_table[i]) - used = 1; - if (!used) { - free_page((unsigned long)sl_table); - *fl_pte = 0; - } - } - - ret = __flush_iotlb(domain); -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); - return ret; -} - -static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, - unsigned long va) -{ - struct msm_priv *priv; - struct msm_iommu_drvdata *iommu_drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata; - unsigned int par; - unsigned long flags; - void __iomem *base; - phys_addr_t ret = 0; - int ctx; - - spin_lock_irqsave(&msm_iommu_lock, flags); - - priv = domain->priv; - if (list_empty(&priv->list_attached)) - goto fail; - - ctx_drvdata = list_entry(priv->list_attached.next, - struct msm_iommu_ctx_drvdata, attached_elm); - iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); - - base = iommu_drvdata->base; - ctx = ctx_drvdata->num; - - ret = __enable_clocks(iommu_drvdata); - if (ret) - goto fail; - - /* Invalidate context TLB */ - SET_CTX_TLBIALL(base, ctx, 0); - SET_V2PPR(base, ctx, va & V2Pxx_VA); - - par = GET_PAR(base, ctx); - - /* We are dealing with a supersection */ - if (GET_NOFAULT_SS(base, ctx)) - ret = (par & 0xFF000000) | (va & 0x00FFFFFF); - else /* Upper 20 bits from PAR, lower 12 from VA */ - ret = (par & 0xFFFFF000) | (va & 0x00000FFF); - - if (GET_FAULT(base, ctx)) - ret = 0; - - __disable_clocks(iommu_drvdata); -fail: - spin_unlock_irqrestore(&msm_iommu_lock, flags); - return ret; -} - -static int msm_iommu_domain_has_cap(struct iommu_domain *domain, - unsigned long cap) -{ - return 0; -} - -static void print_ctx_regs(void __iomem *base, int ctx) -{ - unsigned int fsr = GET_FSR(base, ctx); - pr_err("FAR = %08x PAR = %08x\n", - GET_FAR(base, ctx), GET_PAR(base, ctx)); - pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr, - (fsr & 0x02) ? "TF " : "", - (fsr & 0x04) ? "AFF " : "", - (fsr & 0x08) ? "APF " : "", - (fsr & 0x10) ? "TLBMF " : "", - (fsr & 0x20) ? "HTWDEEF " : "", - (fsr & 0x40) ? "HTWSEEF " : "", - (fsr & 0x80) ? "MHF " : "", - (fsr & 0x10000) ? "SL " : "", - (fsr & 0x40000000) ? "SS " : "", - (fsr & 0x80000000) ? "MULTI " : ""); - - pr_err("FSYNR0 = %08x FSYNR1 = %08x\n", - GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); - pr_err("TTBR0 = %08x TTBR1 = %08x\n", - GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); - pr_err("SCTLR = %08x ACTLR = %08x\n", - GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); - pr_err("PRRR = %08x NMRR = %08x\n", - GET_PRRR(base, ctx), GET_NMRR(base, ctx)); -} - -irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) -{ - struct msm_iommu_drvdata *drvdata = dev_id; - void __iomem *base; - unsigned int fsr; - int i, ret; - - spin_lock(&msm_iommu_lock); - - if (!drvdata) { - pr_err("Invalid device ID in context interrupt handler\n"); - goto fail; - } - - base = drvdata->base; - - pr_err("Unexpected IOMMU page fault!\n"); - pr_err("base = %08x\n", (unsigned int) base); - - ret = __enable_clocks(drvdata); - if (ret) - goto fail; - - for (i = 0; i < drvdata->ncb; i++) { - fsr = GET_FSR(base, i); - if (fsr) { - pr_err("Fault occurred in context %d.\n", i); - pr_err("Interesting registers:\n"); - print_ctx_regs(base, i); - SET_FSR(base, i, 0x4000000F); - } - } - __disable_clocks(drvdata); -fail: - spin_unlock(&msm_iommu_lock); - return 0; -} - -static struct iommu_ops msm_iommu_ops = { - .domain_init = msm_iommu_domain_init, - .domain_destroy = msm_iommu_domain_destroy, - .attach_dev = msm_iommu_attach_dev, - .detach_dev = msm_iommu_detach_dev, - .map = msm_iommu_map, - .unmap = msm_iommu_unmap, - .iova_to_phys = msm_iommu_iova_to_phys, - .domain_has_cap = msm_iommu_domain_has_cap -}; - -static int __init get_tex_class(int icp, int ocp, int mt, int nos) -{ - int i = 0; - unsigned int prrr = 0; - unsigned int nmrr = 0; - int c_icp, c_ocp, c_mt, c_nos; - - RCP15_PRRR(prrr); - RCP15_NMRR(nmrr); - - for (i = 0; i < NUM_TEX_CLASS; i++) { - c_nos = PRRR_NOS(prrr, i); - c_mt = PRRR_MT(prrr, i); - c_icp = NMRR_ICP(nmrr, i); - c_ocp = NMRR_OCP(nmrr, i); - - if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos) - return i; - } - - return -ENODEV; -} - -static void __init setup_iommu_tex_classes(void) -{ - msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] = - get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1); - - msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] = - get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1); - - msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] = - get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1); - - msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] = - get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1); -} - -static int __init msm_iommu_init(void) -{ - setup_iommu_tex_classes(); - register_iommu(&msm_iommu_ops); - return 0; -} - -subsys_initcall(msm_iommu_init); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); diff --git a/arch/arm/mach-msm/iommu_dev.c b/arch/arm/mach-msm/iommu_dev.c deleted file mode 100644 index 8e8fb079852..00000000000 --- a/arch/arm/mach-msm/iommu_dev.c +++ /dev/null @@ -1,422 +0,0 @@ -/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA - * 02110-1301, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/io.h> -#include <linux/clk.h> -#include <linux/iommu.h> -#include <linux/interrupt.h> -#include <linux/err.h> -#include <linux/slab.h> - -#include <mach/iommu_hw-8xxx.h> -#include <mach/iommu.h> -#include <mach/clk.h> - -struct iommu_ctx_iter_data { - /* input */ - const char *name; - - /* output */ - struct device *dev; -}; - -static struct platform_device *msm_iommu_root_dev; - -static int each_iommu_ctx(struct device *dev, void *data) -{ - struct iommu_ctx_iter_data *res = data; - struct msm_iommu_ctx_dev *c = dev->platform_data; - - if (!res || !c || !c->name || !res->name) - return -EINVAL; - - if (!strcmp(res->name, c->name)) { - res->dev = dev; - return 1; - } - return 0; -} - -static int each_iommu(struct device *dev, void *data) -{ - return device_for_each_child(dev, data, each_iommu_ctx); -} - -struct device *msm_iommu_get_ctx(const char *ctx_name) -{ - struct iommu_ctx_iter_data r; - int found; - - if (!msm_iommu_root_dev) { - pr_err("No root IOMMU device.\n"); - goto fail; - } - - r.name = ctx_name; - found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu); - - if (!found) { - pr_err("Could not find context <%s>\n", ctx_name); - goto fail; - } - - return r.dev; -fail: - return NULL; -} -EXPORT_SYMBOL(msm_iommu_get_ctx); - -static void msm_iommu_reset(void __iomem *base, int ncb) -{ - int ctx; - - SET_RPUE(base, 0); - SET_RPUEIE(base, 0); - SET_ESRRESTORE(base, 0); - SET_TBE(base, 0); - SET_CR(base, 0); - SET_SPDMBE(base, 0); - SET_TESTBUSCR(base, 0); - SET_TLBRSW(base, 0); - SET_GLOBAL_TLBIALL(base, 0); - SET_RPU_ACR(base, 0); - SET_TLBLKCRWE(base, 1); - - for (ctx = 0; ctx < ncb; ctx++) { - SET_BPRCOSH(base, ctx, 0); - SET_BPRCISH(base, ctx, 0); - SET_BPRCNSH(base, ctx, 0); - SET_BPSHCFG(base, ctx, 0); - SET_BPMTCFG(base, ctx, 0); - SET_ACTLR(base, ctx, 0); - SET_SCTLR(base, ctx, 0); - SET_FSRRESTORE(base, ctx, 0); - SET_TTBR0(base, ctx, 0); - SET_TTBR1(base, ctx, 0); - SET_TTBCR(base, ctx, 0); - SET_BFBCR(base, ctx, 0); - SET_PAR(base, ctx, 0); - SET_FAR(base, ctx, 0); - SET_CTX_TLBIALL(base, ctx, 0); - SET_TLBFLPTER(base, ctx, 0); - SET_TLBSLPTER(base, ctx, 0); - SET_TLBLKCR(base, ctx, 0); - SET_PRRR(base, ctx, 0); - SET_NMRR(base, ctx, 0); - SET_CONTEXTIDR(base, ctx, 0); - } -} - -static int msm_iommu_probe(struct platform_device *pdev) -{ - struct resource *r, *r2; - struct clk *iommu_clk; - struct clk *iommu_pclk; - struct msm_iommu_drvdata *drvdata; - struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; - void __iomem *regs_base; - resource_size_t len; - int ret, irq, par; - - if (pdev->id == -1) { - msm_iommu_root_dev = pdev; - return 0; - } - - drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL); - - if (!drvdata) { - ret = -ENOMEM; - goto fail; - } - - if (!iommu_dev) { - ret = -ENODEV; - goto fail; - } - - iommu_pclk = clk_get(NULL, "smmu_pclk"); - if (IS_ERR(iommu_pclk)) { - ret = -ENODEV; - goto fail; - } - - ret = clk_enable(iommu_pclk); - if (ret) - goto fail_enable; - - iommu_clk = clk_get(&pdev->dev, "iommu_clk"); - - if (!IS_ERR(iommu_clk)) { - if (clk_get_rate(iommu_clk) == 0) - clk_set_min_rate(iommu_clk, 1); - - ret = clk_enable(iommu_clk); - if (ret) { - clk_put(iommu_clk); - goto fail_pclk; - } - } else - iommu_clk = NULL; - - r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase"); - - if (!r) { - ret = -ENODEV; - goto fail_clk; - } - - len = resource_size(r); - - r2 = request_mem_region(r->start, len, r->name); - if (!r2) { - pr_err("Could not request memory region: start=%p, len=%d\n", - (void *) r->start, len); - ret = -EBUSY; - goto fail_clk; - } - - regs_base = ioremap(r2->start, len); - - if (!regs_base) { - pr_err("Could not ioremap: start=%p, len=%d\n", - (void *) r2->start, len); - ret = -EBUSY; - goto fail_mem; - } - - irq = platform_get_irq_byname(pdev, "secure_irq"); - if (irq < 0) { - ret = -ENODEV; - goto fail_io; - } - - msm_iommu_reset(regs_base, iommu_dev->ncb); - - SET_M(regs_base, 0, 1); - SET_PAR(regs_base, 0, 0); - SET_V2PCFG(regs_base, 0, 1); - SET_V2PPR(regs_base, 0, 0); - par = GET_PAR(regs_base, 0); - SET_V2PCFG(regs_base, 0, 0); - SET_M(regs_base, 0, 0); - - if (!par) { - pr_err("%s: Invalid PAR value detected\n", iommu_dev->name); - ret = -ENODEV; - goto fail_io; - } - - ret = request_irq(irq, msm_iommu_fault_handler, 0, - "msm_iommu_secure_irpt_handler", drvdata); - if (ret) { - pr_err("Request IRQ %d failed with ret=%d\n", irq, ret); - goto fail_io; - } - - - drvdata->pclk = iommu_pclk; - drvdata->clk = iommu_clk; - drvdata->base = regs_base; - drvdata->irq = irq; - drvdata->ncb = iommu_dev->ncb; - - pr_info("device %s mapped at %p, irq %d with %d ctx banks\n", - iommu_dev->name, regs_base, irq, iommu_dev->ncb); - - platform_set_drvdata(pdev, drvdata); - - if (iommu_clk) - clk_disable(iommu_clk); - - clk_disable(iommu_pclk); - - return 0; -fail_io: - iounmap(regs_base); -fail_mem: - release_mem_region(r->start, len); -fail_clk: - if (iommu_clk) { - clk_disable(iommu_clk); - clk_put(iommu_clk); - } -fail_pclk: - clk_disable(iommu_pclk); -fail_enable: - clk_put(iommu_pclk); -fail: - kfree(drvdata); - return ret; -} - -static int msm_iommu_remove(struct platform_device *pdev) -{ - struct msm_iommu_drvdata *drv = NULL; - - drv = platform_get_drvdata(pdev); - if (drv) { - if (drv->clk) - clk_put(drv->clk); - clk_put(drv->pclk); - memset(drv, 0, sizeof(*drv)); - kfree(drv); - platform_set_drvdata(pdev, NULL); - } - return 0; -} - -static int msm_iommu_ctx_probe(struct platform_device *pdev) -{ - struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; - struct msm_iommu_drvdata *drvdata; - struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL; - int i, ret; - if (!c || !pdev->dev.parent) { - ret = -EINVAL; - goto fail; - } - - drvdata = dev_get_drvdata(pdev->dev.parent); - - if (!drvdata) { - ret = -ENODEV; - goto fail; - } - - ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL); - if (!ctx_drvdata) { - ret = -ENOMEM; - goto fail; - } - ctx_drvdata->num = c->num; - ctx_drvdata->pdev = pdev; - - INIT_LIST_HEAD(&ctx_drvdata->attached_elm); - platform_set_drvdata(pdev, ctx_drvdata); - - ret = clk_enable(drvdata->pclk); - if (ret) - goto fail; - - if (drvdata->clk) { - ret = clk_enable(drvdata->clk); - if (ret) { - clk_disable(drvdata->pclk); - goto fail; - } - } - - /* Program the M2V tables for this context */ - for (i = 0; i < MAX_NUM_MIDS; i++) { - int mid = c->mids[i]; - if (mid == -1) - break; - - SET_M2VCBR_N(drvdata->base, mid, 0); - SET_CBACR_N(drvdata->base, c->num, 0); - - /* Set VMID = 0 */ - SET_VMID(drvdata->base, mid, 0); - - /* Set the context number for that MID to this context */ - SET_CBNDX(drvdata->base, mid, c->num); - - /* Set MID associated with this context bank to 0*/ - SET_CBVMID(drvdata->base, c->num, 0); - - /* Set the ASID for TLB tagging for this context */ - SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num); - - /* Set security bit override to be Non-secure */ - SET_NSCFG(drvdata->base, mid, 3); - } - - if (drvdata->clk) - clk_disable(drvdata->clk); - clk_disable(drvdata->pclk); - - dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num); - return 0; -fail: - kfree(ctx_drvdata); - return ret; -} - -static int msm_iommu_ctx_remove(struct platform_device *pdev) -{ - struct msm_iommu_ctx_drvdata *drv = NULL; - drv = platform_get_drvdata(pdev); - if (drv) { - memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata)); - kfree(drv); - platform_set_drvdata(pdev, NULL); - } - return 0; -} - -static struct platform_driver msm_iommu_driver = { - .driver = { - .name = "msm_iommu", - }, - .probe = msm_iommu_probe, - .remove = msm_iommu_remove, -}; - -static struct platform_driver msm_iommu_ctx_driver = { - .driver = { - .name = "msm_iommu_ctx", - }, - .probe = msm_iommu_ctx_probe, - .remove = msm_iommu_ctx_remove, -}; - -static int __init msm_iommu_driver_init(void) -{ - int ret; - ret = platform_driver_register(&msm_iommu_driver); - if (ret != 0) { - pr_err("Failed to register IOMMU driver\n"); - goto error; - } - - ret = platform_driver_register(&msm_iommu_ctx_driver); - if (ret != 0) { - pr_err("Failed to register IOMMU context driver\n"); - goto error; - } - -error: - return ret; -} - -static void __exit msm_iommu_driver_exit(void) -{ - platform_driver_unregister(&msm_iommu_ctx_driver); - platform_driver_unregister(&msm_iommu_driver); -} - -subsys_initcall(msm_iommu_driver_init); -module_exit(msm_iommu_driver_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>"); diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 38b95e949d1..63621f152c9 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c @@ -23,6 +23,8 @@ #include <linux/io.h> #include <asm/mach/time.h> +#include <asm/hardware/gic.h> + #include <mach/msm_iomap.h> #include <mach/cpu.h> @@ -55,10 +57,12 @@ enum timer_location { #if defined(CONFIG_ARCH_QSD8X50) #define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ #define MSM_DGT_SHIFT (0) -#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \ - defined(CONFIG_ARCH_MSM8960) +#elif defined(CONFIG_ARCH_MSM7X30) #define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */ #define MSM_DGT_SHIFT (0) +#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) +#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */ +#define MSM_DGT_SHIFT (0) #else #define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */ #define MSM_DGT_SHIFT (5) @@ -100,7 +104,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs) { struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); - return readl(clk->global_counter); + /* + * Shift timer count down by a constant due to unreliable lower bits + * on some targets. + */ + return readl(clk->global_counter) >> clk->shift; } static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c index 4efa02ee163..add0d42de7a 100644 --- a/arch/arm/mach-mx5/board-cpuimx51.c +++ b/arch/arm/mach-mx5/board-cpuimx51.c @@ -245,6 +245,8 @@ __setup("otg_mode=", eukrea_cpuimx51_otg_mode); */ static void __init eukrea_cpuimx51_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51_pads, ARRAY_SIZE(eukrea_cpuimx51_pads)); diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-mx5/board-cpuimx51sd.c index 5ef25a59614..ff096d58729 100644 --- a/arch/arm/mach-mx5/board-cpuimx51sd.c +++ b/arch/arm/mach-mx5/board-cpuimx51sd.c @@ -264,6 +264,8 @@ static struct platform_device *platform_devices[] __initdata = { static void __init eukrea_cpuimx51sd_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51sd_pads, ARRAY_SIZE(eukrea_cpuimx51sd_pads)); diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-mx5/board-mx50_rdp.c index 11210e1ae42..7de25c6712e 100644 --- a/arch/arm/mach-mx5/board-mx50_rdp.c +++ b/arch/arm/mach-mx5/board-mx50_rdp.c @@ -192,6 +192,8 @@ static const struct imxi2c_platform_data i2c_data __initconst = { */ static void __init mx50_rdp_board_init(void) { + imx50_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx50_rdp_pads, ARRAY_SIZE(mx50_rdp_pads)); diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c index 63dfbeafbc1..3112d15feeb 100644 --- a/arch/arm/mach-mx5/board-mx51_3ds.c +++ b/arch/arm/mach-mx5/board-mx51_3ds.c @@ -135,6 +135,8 @@ static struct spi_board_info mx51_3ds_spi_nor_device[] = { */ static void __init mx51_3ds_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx51_3ds_pads, ARRAY_SIZE(mx51_3ds_pads)); diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c index c7b3fabf50f..6021dd00ec7 100644 --- a/arch/arm/mach-mx5/board-mx51_babbage.c +++ b/arch/arm/mach-mx5/board-mx51_babbage.c @@ -340,6 +340,8 @@ static void __init mx51_babbage_init(void) iomux_v3_cfg_t power_key = _MX51_PAD_EIM_A27__GPIO2_21 | MUX_PAD_CTRL(PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP); + imx51_soc_init(); + #if defined(CONFIG_CPU_FREQ_IMX) get_cpu_op = mx51_get_cpu_op; #endif diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c index 6e362315291..3be603b9075 100644 --- a/arch/arm/mach-mx5/board-mx51_efikamx.c +++ b/arch/arm/mach-mx5/board-mx51_efikamx.c @@ -236,6 +236,8 @@ late_initcall(mx51_efikamx_power_init); static void __init mx51_efikamx_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx51efikamx_pads, ARRAY_SIZE(mx51efikamx_pads)); efika_board_common_init(); diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c index 474fc6e4c6d..4b2e522de0f 100644 --- a/arch/arm/mach-mx5/board-mx51_efikasb.c +++ b/arch/arm/mach-mx5/board-mx51_efikasb.c @@ -248,6 +248,8 @@ static void __init mx51_efikasb_board_id(void) static void __init efikasb_board_init(void) { + imx51_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx51efikasb_pads, ARRAY_SIZE(mx51efikasb_pads)); efika_board_common_init(); diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c index f87d571882c..0d9218a6e2d 100644 --- a/arch/arm/mach-mx5/board-mx53_evk.c +++ b/arch/arm/mach-mx5/board-mx53_evk.c @@ -117,6 +117,8 @@ static const struct spi_imx_master mx53_evk_spi_data __initconst = { static void __init mx53_evk_board_init(void) { + imx53_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx53_evk_pads, ARRAY_SIZE(mx53_evk_pads)); mx53_evk_init_uart(); diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c index 1b947e8c9c0..359c3e248ad 100644 --- a/arch/arm/mach-mx5/board-mx53_loco.c +++ b/arch/arm/mach-mx5/board-mx53_loco.c @@ -227,6 +227,8 @@ static const struct imxi2c_platform_data mx53_loco_i2c_data __initconst = { static void __init mx53_loco_board_init(void) { + imx53_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads, ARRAY_SIZE(mx53_loco_pads)); imx53_add_imx_uart(0, NULL); diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c index 817c08938f5..bc02894eafe 100644 --- a/arch/arm/mach-mx5/board-mx53_smd.c +++ b/arch/arm/mach-mx5/board-mx53_smd.c @@ -113,6 +113,8 @@ static const struct imxi2c_platform_data mx53_smd_i2c_data __initconst = { static void __init mx53_smd_board_init(void) { + imx53_soc_init(); + mxc_iomux_v3_setup_multiple_pads(mx53_smd_pads, ARRAY_SIZE(mx53_smd_pads)); mx53_smd_init_uart(); diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c index 6b89c1bf4eb..cd79e3435e2 100644 --- a/arch/arm/mach-mx5/clock-mx51-mx53.c +++ b/arch/arm/mach-mx5/clock-mx51-mx53.c @@ -1442,7 +1442,8 @@ static struct clk_lookup mx51_lookups[] = { _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk) _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk) _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk) - _REGISTER_CLOCK("imx51-cspi.0", NULL, cspi_clk) + /* i.mx51 has the i.mx35 type cspi */ + _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk) @@ -1471,9 +1472,11 @@ static struct clk_lookup mx53_lookups[] = { _REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_mx53_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_mx53_clk) _REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_mx53_clk) - _REGISTER_CLOCK("imx53-ecspi.0", NULL, ecspi1_clk) - _REGISTER_CLOCK("imx53-ecspi.1", NULL, ecspi2_clk) - _REGISTER_CLOCK("imx53-cspi.0", NULL, cspi_clk) + /* i.mx53 has the i.mx51 type ecspi */ + _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk) + _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk) + /* i.mx53 has the i.mx25 type cspi */ + _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk) _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk) _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk) }; diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c index 153ada53e57..371ca8c8414 100644 --- a/arch/arm/mach-mx5/devices.c +++ b/arch/arm/mach-mx5/devices.c @@ -12,7 +12,6 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> -#include <linux/gpio.h> #include <mach/hardware.h> #include <mach/imx-uart.h> #include <mach/irqs.h> @@ -119,66 +118,3 @@ struct platform_device mxc_usbh2_device = { .coherent_dma_mask = DMA_BIT_MASK(32), }, }; - -static struct mxc_gpio_port mxc_gpio_ports[] = { - { - .chip.label = "gpio-0", - .base = MX51_IO_ADDRESS(MX51_GPIO1_BASE_ADDR), - .irq = MX51_MXC_INT_GPIO1_LOW, - .irq_high = MX51_MXC_INT_GPIO1_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START - }, - { - .chip.label = "gpio-1", - .base = MX51_IO_ADDRESS(MX51_GPIO2_BASE_ADDR), - .irq = MX51_MXC_INT_GPIO2_LOW, - .irq_high = MX51_MXC_INT_GPIO2_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 1 - }, - { - .chip.label = "gpio-2", - .base = MX51_IO_ADDRESS(MX51_GPIO3_BASE_ADDR), - .irq = MX51_MXC_INT_GPIO3_LOW, - .irq_high = MX51_MXC_INT_GPIO3_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 2 - }, - { - .chip.label = "gpio-3", - .base = MX51_IO_ADDRESS(MX51_GPIO4_BASE_ADDR), - .irq = MX51_MXC_INT_GPIO4_LOW, - .irq_high = MX51_MXC_INT_GPIO4_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 3 - }, - { - .chip.label = "gpio-4", - .base = MX53_IO_ADDRESS(MX53_GPIO5_BASE_ADDR), - .irq = MX53_INT_GPIO5_LOW, - .irq_high = MX53_INT_GPIO5_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 4 - }, - { - .chip.label = "gpio-5", - .base = MX53_IO_ADDRESS(MX53_GPIO6_BASE_ADDR), - .irq = MX53_INT_GPIO6_LOW, - .irq_high = MX53_INT_GPIO6_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 5 - }, - { - .chip.label = "gpio-6", - .base = MX53_IO_ADDRESS(MX53_GPIO7_BASE_ADDR), - .irq = MX53_INT_GPIO7_LOW, - .irq_high = MX53_INT_GPIO7_HIGH, - .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 6 - }, -}; - -int __init imx51_register_gpios(void) -{ - return mxc_gpio_init(mxc_gpio_ports, 4); -} - -int __init imx53_register_gpios(void) -{ - return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports)); -} - diff --git a/arch/arm/mach-mx5/mm-mx50.c b/arch/arm/mach-mx5/mm-mx50.c index b9c363b514a..77e374c726f 100644 --- a/arch/arm/mach-mx5/mm-mx50.c +++ b/arch/arm/mach-mx5/mm-mx50.c @@ -26,7 +26,6 @@ #include <mach/hardware.h> #include <mach/common.h> #include <mach/iomux-v3.h> -#include <mach/gpio.h> #include <mach/irqs.h> /* @@ -56,17 +55,18 @@ void __init imx50_init_early(void) mxc_arch_reset_init(MX50_IO_ADDRESS(MX50_WDOG_BASE_ADDR)); } -static struct mxc_gpio_port imx50_gpio_ports[] = { - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 0, 1, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 1, 2, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 2, 3, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 3, 4, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 4, 5, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH), - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 5, 6, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH), -}; - void __init mx50_init_irq(void) { tzic_init_irq(MX50_IO_ADDRESS(MX50_TZIC_BASE_ADDR)); - mxc_gpio_init(imx50_gpio_ports, ARRAY_SIZE(imx50_gpio_ports)); +} + +void __init imx50_soc_init(void) +{ + /* i.mx50 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX50_GPIO1_BASE_ADDR, SZ_16K, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH); + mxc_register_gpio("imx31-gpio", 1, MX50_GPIO2_BASE_ADDR, SZ_16K, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH); + mxc_register_gpio("imx31-gpio", 2, MX50_GPIO3_BASE_ADDR, SZ_16K, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH); + mxc_register_gpio("imx31-gpio", 3, MX50_GPIO4_BASE_ADDR, SZ_16K, MX50_INT_GPIO4_LOW, MX50_INT_GPIO4_HIGH); + mxc_register_gpio("imx31-gpio", 4, MX50_GPIO5_BASE_ADDR, SZ_16K, MX50_INT_GPIO5_LOW, MX50_INT_GPIO5_HIGH); + mxc_register_gpio("imx31-gpio", 5, MX50_GPIO6_BASE_ADDR, SZ_16K, MX50_INT_GPIO6_LOW, MX50_INT_GPIO6_HIGH); } diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c index ff557301b42..665843d6c2b 100644 --- a/arch/arm/mach-mx5/mm.c +++ b/arch/arm/mach-mx5/mm.c @@ -69,8 +69,6 @@ void __init imx53_init_early(void) mxc_arch_reset_init(MX53_IO_ADDRESS(MX53_WDOG1_BASE_ADDR)); } -int imx51_register_gpios(void); - void __init mx51_init_irq(void) { unsigned long tzic_addr; @@ -86,11 +84,8 @@ void __init mx51_init_irq(void) panic("unable to map TZIC interrupt controller\n"); tzic_init_irq(tzic_virt); - imx51_register_gpios(); } -int imx53_register_gpios(void); - void __init mx53_init_irq(void) { unsigned long tzic_addr; @@ -103,5 +98,25 @@ void __init mx53_init_irq(void) panic("unable to map TZIC interrupt controller\n"); tzic_init_irq(tzic_virt); - imx53_register_gpios(); +} + +void __init imx51_soc_init(void) +{ + /* i.mx51 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX51_GPIO1_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO1_LOW, MX51_MXC_INT_GPIO1_HIGH); + mxc_register_gpio("imx31-gpio", 1, MX51_GPIO2_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO2_LOW, MX51_MXC_INT_GPIO2_HIGH); + mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO3_LOW, MX51_MXC_INT_GPIO3_HIGH); + mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO4_LOW, MX51_MXC_INT_GPIO4_HIGH); +} + +void __init imx53_soc_init(void) +{ + /* i.mx53 has the i.mx31 type gpio */ + mxc_register_gpio("imx31-gpio", 0, MX53_GPIO1_BASE_ADDR, SZ_16K, MX53_INT_GPIO1_LOW, MX53_INT_GPIO1_HIGH); + mxc_register_gpio("imx31-gpio", 1, MX53_GPIO2_BASE_ADDR, SZ_16K, MX53_INT_GPIO2_LOW, MX53_INT_GPIO2_HIGH); + mxc_register_gpio("imx31-gpio", 2, MX53_GPIO3_BASE_ADDR, SZ_16K, MX53_INT_GPIO3_LOW, MX53_INT_GPIO3_HIGH); + mxc_register_gpio("imx31-gpio", 3, MX53_GPIO4_BASE_ADDR, SZ_16K, MX53_INT_GPIO4_LOW, MX53_INT_GPIO4_HIGH); + mxc_register_gpio("imx31-gpio", 4, MX53_GPIO5_BASE_ADDR, SZ_16K, MX53_INT_GPIO5_LOW, MX53_INT_GPIO5_HIGH); + mxc_register_gpio("imx31-gpio", 5, MX53_GPIO6_BASE_ADDR, SZ_16K, MX53_INT_GPIO6_LOW, MX53_INT_GPIO6_HIGH); + mxc_register_gpio("imx31-gpio", 6, MX53_GPIO7_BASE_ADDR, SZ_16K, MX53_INT_GPIO7_LOW, MX53_INT_GPIO7_HIGH); } diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile index 58e892376bf..6c38262a3aa 100644 --- a/arch/arm/mach-mxs/Makefile +++ b/arch/arm/mach-mxs/Makefile @@ -1,5 +1,5 @@ # Common support -obj-y := clock.o devices.o gpio.o icoll.o iomux.o system.o timer.o +obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o obj-$(CONFIG_MXS_OCOTP) += ocotp.o obj-$(CONFIG_PM) += pm.o diff --git a/arch/arm/mach-mxs/devices.c b/arch/arm/mach-mxs/devices.c index cfdb6b28470..fe3e847930c 100644 --- a/arch/arm/mach-mxs/devices.c +++ b/arch/arm/mach-mxs/devices.c @@ -88,3 +88,14 @@ int __init mxs_add_amba_device(const struct amba_device *dev) return amba_device_register(adev, &iomem_resource); } + +struct device mxs_apbh_bus = { + .init_name = "mxs_apbh", + .parent = &platform_bus, +}; + +static int __init mxs_device_init(void) +{ + return device_register(&mxs_apbh_bus); +} +core_initcall(mxs_device_init); diff --git a/arch/arm/mach-mxs/devices/Makefile b/arch/arm/mach-mxs/devices/Makefile index 324f2824d38..351915c683f 100644 --- a/arch/arm/mach-mxs/devices/Makefile +++ b/arch/arm/mach-mxs/devices/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_MXS_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_I2C) += platform-mxs-i2c.o obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_MMC) += platform-mxs-mmc.o obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_PWM) += platform-mxs-pwm.o +obj-y += platform-gpio-mxs.o obj-$(CONFIG_MXS_HAVE_PLATFORM_MXSFB) += platform-mxsfb.o diff --git a/arch/arm/mach-mxs/devices/platform-auart.c b/arch/arm/mach-mxs/devices/platform-auart.c index 796606cce0c..27608f5d2ac 100644 --- a/arch/arm/mach-mxs/devices/platform-auart.c +++ b/arch/arm/mach-mxs/devices/platform-auart.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <asm/sizes.h> #include <mach/mx23.h> #include <mach/mx28.h> diff --git a/arch/arm/mach-mxs/devices/platform-dma.c b/arch/arm/mach-mxs/devices/platform-dma.c index 295c4424d5d..6a0202b1016 100644 --- a/arch/arm/mach-mxs/devices/platform-dma.c +++ b/arch/arm/mach-mxs/devices/platform-dma.c @@ -6,6 +6,7 @@ * Free Software Foundation. */ #include <linux/compiler.h> +#include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/init.h> diff --git a/arch/arm/mach-mxs/devices/platform-fec.c b/arch/arm/mach-mxs/devices/platform-fec.c index 9859cf28333..ae96a4fd8f1 100644 --- a/arch/arm/mach-mxs/devices/platform-fec.c +++ b/arch/arm/mach-mxs/devices/platform-fec.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <asm/sizes.h> #include <mach/mx28.h> #include <mach/devices-common.h> diff --git a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c new file mode 100644 index 00000000000..ed0885e414e --- /dev/null +++ b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c @@ -0,0 +1,53 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ +#include <linux/compiler.h> +#include <linux/err.h> +#include <linux/init.h> + +#include <mach/mx23.h> +#include <mach/mx28.h> +#include <mach/devices-common.h> + +struct platform_device *__init mxs_add_gpio( + int id, resource_size_t iobase, int irq) +{ + struct resource res[] = { + { + .start = iobase, + .end = iobase + SZ_8K - 1, + .flags = IORESOURCE_MEM, + }, { + .start = irq, + .end = irq, + .flags = IORESOURCE_IRQ, + }, + }; + + return platform_device_register_resndata(&mxs_apbh_bus, + "gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0); +} + +static int __init mxs_add_mxs_gpio(void) +{ + if (cpu_is_mx23()) { + mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0); + mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1); + mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2); + } + + if (cpu_is_mx28()) { + mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0); + mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1); + mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2); + mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3); + mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4); + } + + return 0; +} +postcore_initcall(mxs_add_mxs_gpio); diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c deleted file mode 100644 index 2c950fef71a..00000000000 --- a/arch/arm/mach-mxs/gpio.c +++ /dev/null @@ -1,331 +0,0 @@ -/* - * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> - * Copyright 2008 Juergen Beisert, kernel@pengutronix.de - * - * Based on code from Freescale, - * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. - */ - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/gpio.h> -#include <mach/mx23.h> -#include <mach/mx28.h> -#include <asm-generic/bug.h> - -#include "gpio.h" - -static struct mxs_gpio_port *mxs_gpio_ports; -static int gpio_table_size; - -#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10) -#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10) -#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10) -#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10) -#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10) -#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10) -#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10) -#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10) - -#define GPIO_INT_FALL_EDGE 0x0 -#define GPIO_INT_LOW_LEV 0x1 -#define GPIO_INT_RISE_EDGE 0x2 -#define GPIO_INT_HIGH_LEV 0x3 -#define GPIO_INT_LEV_MASK (1 << 0) -#define GPIO_INT_POL_MASK (1 << 1) - -/* Note: This driver assumes 32 GPIOs are handled in one register */ - -static void clear_gpio_irqstatus(struct mxs_gpio_port *port, u32 index) -{ - __mxs_clrl(1 << index, port->base + PINCTRL_IRQSTAT(port->id)); -} - -static void set_gpio_irqenable(struct mxs_gpio_port *port, u32 index, - int enable) -{ - if (enable) { - __mxs_setl(1 << index, port->base + PINCTRL_IRQEN(port->id)); - __mxs_setl(1 << index, port->base + PINCTRL_PIN2IRQ(port->id)); - } else { - __mxs_clrl(1 << index, port->base + PINCTRL_IRQEN(port->id)); - } -} - -static void mxs_gpio_ack_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - clear_gpio_irqstatus(&mxs_gpio_ports[gpio / 32], gpio & 0x1f); -} - -static void mxs_gpio_mask_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 0); -} - -static void mxs_gpio_unmask_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 1); -} - -static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset); - -static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type) -{ - u32 gpio = irq_to_gpio(d->irq); - u32 pin_mask = 1 << (gpio & 31); - struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32]; - void __iomem *pin_addr; - int edge; - - switch (type) { - case IRQ_TYPE_EDGE_RISING: - edge = GPIO_INT_RISE_EDGE; - break; - case IRQ_TYPE_EDGE_FALLING: - edge = GPIO_INT_FALL_EDGE; - break; - case IRQ_TYPE_LEVEL_LOW: - edge = GPIO_INT_LOW_LEV; - break; - case IRQ_TYPE_LEVEL_HIGH: - edge = GPIO_INT_HIGH_LEV; - break; - default: - return -EINVAL; - } - - /* set level or edge */ - pin_addr = port->base + PINCTRL_IRQLEV(port->id); - if (edge & GPIO_INT_LEV_MASK) - __mxs_setl(pin_mask, pin_addr); - else - __mxs_clrl(pin_mask, pin_addr); - - /* set polarity */ - pin_addr = port->base + PINCTRL_IRQPOL(port->id); - if (edge & GPIO_INT_POL_MASK) - __mxs_setl(pin_mask, pin_addr); - else - __mxs_clrl(pin_mask, pin_addr); - - clear_gpio_irqstatus(port, gpio & 0x1f); - - return 0; -} - -/* MXS has one interrupt *per* gpio port */ -static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc) -{ - u32 irq_stat; - struct mxs_gpio_port *port = (struct mxs_gpio_port *)irq_get_handler_data(irq); - u32 gpio_irq_no_base = port->virtual_irq_start; - - desc->irq_data.chip->irq_ack(&desc->irq_data); - - irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) & - __raw_readl(port->base + PINCTRL_IRQEN(port->id)); - - while (irq_stat != 0) { - int irqoffset = fls(irq_stat) - 1; - generic_handle_irq(gpio_irq_no_base + irqoffset); - irq_stat &= ~(1 << irqoffset); - } -} - -/* - * Set interrupt number "irq" in the GPIO as a wake-up source. - * While system is running, all registered GPIO interrupts need to have - * wake-up enabled. When system is suspended, only selected GPIO interrupts - * need to have wake-up enabled. - * @param irq interrupt source number - * @param enable enable as wake-up if equal to non-zero - * @return This function returns 0 on success. - */ -static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable) -{ - u32 gpio = irq_to_gpio(d->irq); - u32 gpio_idx = gpio & 0x1f; - struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32]; - - if (enable) { - if (port->irq_high && (gpio_idx >= 16)) - enable_irq_wake(port->irq_high); - else - enable_irq_wake(port->irq); - } else { - if (port->irq_high && (gpio_idx >= 16)) - disable_irq_wake(port->irq_high); - else - disable_irq_wake(port->irq); - } - - return 0; -} - -static struct irq_chip gpio_irq_chip = { - .name = "mxs gpio", - .irq_ack = mxs_gpio_ack_irq, - .irq_mask = mxs_gpio_mask_irq, - .irq_unmask = mxs_gpio_unmask_irq, - .irq_set_type = mxs_gpio_set_irq_type, - .irq_set_wake = mxs_gpio_set_wake_irq, -}; - -static void mxs_set_gpio_direction(struct gpio_chip *chip, unsigned offset, - int dir) -{ - struct mxs_gpio_port *port = - container_of(chip, struct mxs_gpio_port, chip); - void __iomem *pin_addr = port->base + PINCTRL_DOE(port->id); - - if (dir) - __mxs_setl(1 << offset, pin_addr); - else - __mxs_clrl(1 << offset, pin_addr); -} - -static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct mxs_gpio_port *port = - container_of(chip, struct mxs_gpio_port, chip); - - return (__raw_readl(port->base + PINCTRL_DIN(port->id)) >> offset) & 1; -} - -static void mxs_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct mxs_gpio_port *port = - container_of(chip, struct mxs_gpio_port, chip); - void __iomem *pin_addr = port->base + PINCTRL_DOUT(port->id); - - if (value) - __mxs_setl(1 << offset, pin_addr); - else - __mxs_clrl(1 << offset, pin_addr); -} - -static int mxs_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct mxs_gpio_port *port = - container_of(chip, struct mxs_gpio_port, chip); - - return port->virtual_irq_start + offset; -} - -static int mxs_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - mxs_set_gpio_direction(chip, offset, 0); - return 0; -} - -static int mxs_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - mxs_gpio_set(chip, offset, value); - mxs_set_gpio_direction(chip, offset, 1); - return 0; -} - -int __init mxs_gpio_init(struct mxs_gpio_port *port, int cnt) -{ - int i, j; - - /* save for local usage */ - mxs_gpio_ports = port; - gpio_table_size = cnt; - - pr_info("MXS GPIO hardware\n"); - - for (i = 0; i < cnt; i++) { - /* disable the interrupt and clear the status */ - __raw_writel(0, port[i].base + PINCTRL_PIN2IRQ(i)); - __raw_writel(0, port[i].base + PINCTRL_IRQEN(i)); - - /* clear address has to be used to clear IRQSTAT bits */ - __mxs_clrl(~0U, port[i].base + PINCTRL_IRQSTAT(i)); - - for (j = port[i].virtual_irq_start; - j < port[i].virtual_irq_start + 32; j++) { - irq_set_chip_and_handler(j, &gpio_irq_chip, - handle_level_irq); - set_irq_flags(j, IRQF_VALID); - } - - /* setup one handler for each entry */ - irq_set_chained_handler(port[i].irq, mxs_gpio_irq_handler); - irq_set_handler_data(port[i].irq, &port[i]); - - /* register gpio chip */ - port[i].chip.direction_input = mxs_gpio_direction_input; - port[i].chip.direction_output = mxs_gpio_direction_output; - port[i].chip.get = mxs_gpio_get; - port[i].chip.set = mxs_gpio_set; - port[i].chip.to_irq = mxs_gpio_to_irq; - port[i].chip.base = i * 32; - port[i].chip.ngpio = 32; - - /* its a serious configuration bug when it fails */ - BUG_ON(gpiochip_add(&port[i].chip) < 0); - } - - return 0; -} - -#define MX23_GPIO_BASE MX23_IO_ADDRESS(MX23_PINCTRL_BASE_ADDR) -#define MX28_GPIO_BASE MX28_IO_ADDRESS(MX28_PINCTRL_BASE_ADDR) - -#define DEFINE_MXS_GPIO_PORT(_base, _irq, _id) \ - { \ - .chip.label = "gpio-" #_id, \ - .id = _id, \ - .irq = _irq, \ - .base = _base, \ - .virtual_irq_start = MXS_GPIO_IRQ_START + (_id) * 32, \ - } - -#ifdef CONFIG_SOC_IMX23 -static struct mxs_gpio_port mx23_gpio_ports[] = { - DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO0, 0), - DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO1, 1), - DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO2, 2), -}; - -int __init mx23_register_gpios(void) -{ - return mxs_gpio_init(mx23_gpio_ports, ARRAY_SIZE(mx23_gpio_ports)); -} -#endif - -#ifdef CONFIG_SOC_IMX28 -static struct mxs_gpio_port mx28_gpio_ports[] = { - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO0, 0), - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO1, 1), - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO2, 2), - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO3, 3), - DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO4, 4), -}; - -int __init mx28_register_gpios(void) -{ - return mxs_gpio_init(mx28_gpio_ports, ARRAY_SIZE(mx28_gpio_ports)); -} -#endif diff --git a/arch/arm/mach-mxs/gpio.h b/arch/arm/mach-mxs/gpio.h deleted file mode 100644 index 005bb06630b..00000000000 --- a/arch/arm/mach-mxs/gpio.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved. - * Copyright 2008 Juergen Beisert, kernel@pengutronix.de - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. - */ - -#ifndef __MXS_GPIO_H__ -#define __MXS_GPIO_H__ - -struct mxs_gpio_port { - void __iomem *base; - int id; - int irq; - int irq_high; - int virtual_irq_start; - struct gpio_chip chip; -}; - -int mxs_gpio_init(struct mxs_gpio_port*, int); - -#endif /* __MXS_GPIO_H__ */ diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h index 7a37469ed5b..812d7a813a7 100644 --- a/arch/arm/mach-mxs/include/mach/devices-common.h +++ b/arch/arm/mach-mxs/include/mach/devices-common.h @@ -11,6 +11,8 @@ #include <linux/init.h> #include <linux/amba/bus.h> +extern struct device mxs_apbh_bus; + struct platform_device *mxs_add_platform_device_dmamask( const char *name, int id, const struct resource *res, unsigned int num_resources, diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c index eacdc6b0e70..56767a5cce0 100644 --- a/arch/arm/mach-mxs/mach-mx28evk.c +++ b/arch/arm/mach-mxs/mach-mx28evk.c @@ -26,7 +26,6 @@ #include <mach/iomux-mx28.h> #include "devices-mx28.h" -#include "gpio.h" #define MX28EVK_FLEXCAN_SWITCH MXS_GPIO_NR(2, 13) #define MX28EVK_FEC_PHY_POWER MXS_GPIO_NR(2, 15) diff --git a/arch/arm/mach-mxs/mm-mx23.c b/arch/arm/mach-mxs/mm-mx23.c index 5148cd64a6b..1b2345ac1a8 100644 --- a/arch/arm/mach-mxs/mm-mx23.c +++ b/arch/arm/mach-mxs/mm-mx23.c @@ -41,5 +41,4 @@ void __init mx23_map_io(void) void __init mx23_init_irq(void) { icoll_init_irq(); - mx23_register_gpios(); } diff --git a/arch/arm/mach-mxs/mm-mx28.c b/arch/arm/mach-mxs/mm-mx28.c index 7e4cea32ebc..b6e18ddb92c 100644 --- a/arch/arm/mach-mxs/mm-mx28.c +++ b/arch/arm/mach-mxs/mm-mx28.c @@ -41,5 +41,4 @@ void __init mx28_map_io(void) void __init mx28_init_irq(void) { icoll_init_irq(); - mx28_register_gpios(); } diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c index 65157a35dbb..54add60f94c 100644 --- a/arch/arm/mach-mxs/ocotp.c +++ b/arch/arm/mach-mxs/ocotp.c @@ -16,6 +16,8 @@ #include <linux/err.h> #include <linux/mutex.h> +#include <asm/processor.h> /* for cpu_relax() */ + #include <mach/mxs.h> #define OCOTP_WORD_OFFSET 0x20 diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile index af98117043d..5b114d1558c 100644 --- a/arch/arm/mach-omap1/Makefile +++ b/arch/arm/mach-omap1/Makefile @@ -4,14 +4,14 @@ # Common support obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o -obj-y += clock.o clock_data.o opp_data.o reset.o +obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o # Power Management -obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o +obj-$(CONFIG_PM) += pm.o sleep.o # DSP obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index de88c9297b6..f49ce85d244 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -215,7 +215,7 @@ static struct omap_kp_platform_data ams_delta_kp_data __initdata = { .delay = 9, }; -static struct platform_device ams_delta_kp_device __initdata = { +static struct platform_device ams_delta_kp_device = { .name = "omap-keypad", .id = -1, .dev = { @@ -225,12 +225,12 @@ static struct platform_device ams_delta_kp_device __initdata = { .resource = ams_delta_kp_resources, }; -static struct platform_device ams_delta_lcd_device __initdata = { +static struct platform_device ams_delta_lcd_device = { .name = "lcd_ams_delta", .id = -1, }; -static struct platform_device ams_delta_led_device __initdata = { +static struct platform_device ams_delta_led_device = { .name = "ams-delta-led", .id = -1 }; @@ -267,7 +267,7 @@ static struct soc_camera_link ams_delta_iclink = { .power = ams_delta_camera_power, }; -static struct platform_device ams_delta_camera_device __initdata = { +static struct platform_device ams_delta_camera_device = { .name = "soc-camera-pdrv", .id = 0, .dev = { diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c index d8559344c6e..f5a52204b89 100644 --- a/arch/arm/mach-omap1/dma.c +++ b/arch/arm/mach-omap1/dma.c @@ -284,14 +284,15 @@ static int __init omap1_system_dma_init(void) dma_base = ioremap(res[0].start, resource_size(&res[0])); if (!dma_base) { pr_err("%s: Unable to ioremap\n", __func__); - return -ENODEV; + ret = -ENODEV; + goto exit_device_put; } ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); - goto exit_device_del; + goto exit_device_put; } p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); @@ -299,7 +300,7 @@ static int __init omap1_system_dma_init(void) dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", __func__, pdev->name); ret = -ENOMEM; - goto exit_device_put; + goto exit_device_del; } d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); @@ -380,10 +381,10 @@ exit_release_d: kfree(d); exit_release_p: kfree(p); -exit_device_put: - platform_device_put(pdev); exit_device_del: platform_device_del(pdev); +exit_device_put: + platform_device_put(pdev); return ret; } diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c index 04c4b04cf54..399da4ce017 100644 --- a/arch/arm/mach-omap1/gpio15xx.c +++ b/arch/arm/mach-omap1/gpio15xx.c @@ -34,14 +34,25 @@ static struct __initdata resource omap15xx_mpu_gpio_resources[] = { }, }; +static struct omap_gpio_reg_offs omap15xx_mpuio_regs = { + .revision = USHRT_MAX, + .direction = OMAP_MPUIO_IO_CNTL, + .datain = OMAP_MPUIO_INPUT_LATCH, + .dataout = OMAP_MPUIO_OUTPUT, + .irqstatus = OMAP_MPUIO_GPIO_INT, + .irqenable = OMAP_MPUIO_GPIO_MASKIT, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = { .virtual_irq_start = IH_MPUIO_BASE, .bank_type = METHOD_MPUIO, .bank_width = 16, .bank_stride = 1, + .regs = &omap15xx_mpuio_regs, }; -static struct __initdata platform_device omap15xx_mpu_gpio = { +static struct platform_device omap15xx_mpu_gpio = { .name = "omap_gpio", .id = 0, .dev = { @@ -64,13 +75,24 @@ static struct __initdata resource omap15xx_gpio_resources[] = { }, }; +static struct omap_gpio_reg_offs omap15xx_gpio_regs = { + .revision = USHRT_MAX, + .direction = OMAP1510_GPIO_DIR_CONTROL, + .datain = OMAP1510_GPIO_DATA_INPUT, + .dataout = OMAP1510_GPIO_DATA_OUTPUT, + .irqstatus = OMAP1510_GPIO_INT_STATUS, + .irqenable = OMAP1510_GPIO_INT_MASK, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = { .virtual_irq_start = IH_GPIO_BASE, .bank_type = METHOD_GPIO_1510, .bank_width = 16, + .regs = &omap15xx_gpio_regs, }; -static struct __initdata platform_device omap15xx_gpio = { +static struct platform_device omap15xx_gpio = { .name = "omap_gpio", .id = 1, .dev = { diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c index 5dd0d4c82b2..0f399bd0e70 100644 --- a/arch/arm/mach-omap1/gpio16xx.c +++ b/arch/arm/mach-omap1/gpio16xx.c @@ -37,14 +37,25 @@ static struct __initdata resource omap16xx_mpu_gpio_resources[] = { }, }; +static struct omap_gpio_reg_offs omap16xx_mpuio_regs = { + .revision = USHRT_MAX, + .direction = OMAP_MPUIO_IO_CNTL, + .datain = OMAP_MPUIO_INPUT_LATCH, + .dataout = OMAP_MPUIO_OUTPUT, + .irqstatus = OMAP_MPUIO_GPIO_INT, + .irqenable = OMAP_MPUIO_GPIO_MASKIT, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = { .virtual_irq_start = IH_MPUIO_BASE, .bank_type = METHOD_MPUIO, .bank_width = 16, .bank_stride = 1, + .regs = &omap16xx_mpuio_regs, }; -static struct __initdata platform_device omap16xx_mpu_gpio = { +static struct platform_device omap16xx_mpu_gpio = { .name = "omap_gpio", .id = 0, .dev = { @@ -67,13 +78,27 @@ static struct __initdata resource omap16xx_gpio1_resources[] = { }, }; +static struct omap_gpio_reg_offs omap16xx_gpio_regs = { + .revision = OMAP1610_GPIO_REVISION, + .direction = OMAP1610_GPIO_DIRECTION, + .set_dataout = OMAP1610_GPIO_SET_DATAOUT, + .clr_dataout = OMAP1610_GPIO_CLEAR_DATAOUT, + .datain = OMAP1610_GPIO_DATAIN, + .dataout = OMAP1610_GPIO_DATAOUT, + .irqstatus = OMAP1610_GPIO_IRQSTATUS1, + .irqenable = OMAP1610_GPIO_IRQENABLE1, + .set_irqenable = OMAP1610_GPIO_SET_IRQENABLE1, + .clr_irqenable = OMAP1610_GPIO_CLEAR_IRQENABLE1, +}; + static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = { .virtual_irq_start = IH_GPIO_BASE, .bank_type = METHOD_GPIO_1610, .bank_width = 16, + .regs = &omap16xx_gpio_regs, }; -static struct __initdata platform_device omap16xx_gpio1 = { +static struct platform_device omap16xx_gpio1 = { .name = "omap_gpio", .id = 1, .dev = { @@ -100,9 +125,10 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = { .virtual_irq_start = IH_GPIO_BASE + 16, .bank_type = METHOD_GPIO_1610, .bank_width = 16, + .regs = &omap16xx_gpio_regs, }; -static struct __initdata platform_device omap16xx_gpio2 = { +static struct platform_device omap16xx_gpio2 = { .name = "omap_gpio", .id = 2, .dev = { @@ -129,9 +155,10 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = { .virtual_irq_start = IH_GPIO_BASE + 32, .bank_type = METHOD_GPIO_1610, .bank_width = 16, + .regs = &omap16xx_gpio_regs, }; -static struct __initdata platform_device omap16xx_gpio3 = { +static struct platform_device omap16xx_gpio3 = { .name = "omap_gpio", .id = 3, .dev = { @@ -158,9 +185,10 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = { .virtual_irq_start = IH_GPIO_BASE + 48, .bank_type = METHOD_GPIO_1610, .bank_width = 16, + .regs = &omap16xx_gpio_regs, }; -static struct __initdata platform_device omap16xx_gpio4 = { +static struct platform_device omap16xx_gpio4 = { .name = "omap_gpio", .id = 4, .dev = { diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c index 1204c8b871a..5ab63eab0ff 100644 --- a/arch/arm/mach-omap1/gpio7xx.c +++ b/arch/arm/mach-omap1/gpio7xx.c @@ -39,14 +39,25 @@ static struct __initdata resource omap7xx_mpu_gpio_resources[] = { }, }; +static struct omap_gpio_reg_offs omap7xx_mpuio_regs = { + .revision = USHRT_MAX, + .direction = OMAP_MPUIO_IO_CNTL / 2, + .datain = OMAP_MPUIO_INPUT_LATCH / 2, + .dataout = OMAP_MPUIO_OUTPUT / 2, + .irqstatus = OMAP_MPUIO_GPIO_INT / 2, + .irqenable = OMAP_MPUIO_GPIO_MASKIT / 2, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = { .virtual_irq_start = IH_MPUIO_BASE, .bank_type = METHOD_MPUIO, .bank_width = 32, .bank_stride = 2, + .regs = &omap7xx_mpuio_regs, }; -static struct __initdata platform_device omap7xx_mpu_gpio = { +static struct platform_device omap7xx_mpu_gpio = { .name = "omap_gpio", .id = 0, .dev = { @@ -69,13 +80,24 @@ static struct __initdata resource omap7xx_gpio1_resources[] = { }, }; +static struct omap_gpio_reg_offs omap7xx_gpio_regs = { + .revision = USHRT_MAX, + .direction = OMAP7XX_GPIO_DIR_CONTROL, + .datain = OMAP7XX_GPIO_DATA_INPUT, + .dataout = OMAP7XX_GPIO_DATA_OUTPUT, + .irqstatus = OMAP7XX_GPIO_INT_STATUS, + .irqenable = OMAP7XX_GPIO_INT_MASK, + .irqenable_inv = true, +}; + static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = { .virtual_irq_start = IH_GPIO_BASE, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; -static struct __initdata platform_device omap7xx_gpio1 = { +static struct platform_device omap7xx_gpio1 = { .name = "omap_gpio", .id = 1, .dev = { @@ -102,9 +124,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = { .virtual_irq_start = IH_GPIO_BASE + 32, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; -static struct __initdata platform_device omap7xx_gpio2 = { +static struct platform_device omap7xx_gpio2 = { .name = "omap_gpio", .id = 2, .dev = { @@ -131,9 +154,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = { .virtual_irq_start = IH_GPIO_BASE + 64, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; -static struct __initdata platform_device omap7xx_gpio3 = { +static struct platform_device omap7xx_gpio3 = { .name = "omap_gpio", .id = 3, .dev = { @@ -160,9 +184,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = { .virtual_irq_start = IH_GPIO_BASE + 96, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; -static struct __initdata platform_device omap7xx_gpio4 = { +static struct platform_device omap7xx_gpio4 = { .name = "omap_gpio", .id = 4, .dev = { @@ -189,9 +214,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = { .virtual_irq_start = IH_GPIO_BASE + 128, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; -static struct __initdata platform_device omap7xx_gpio5 = { +static struct platform_device omap7xx_gpio5 = { .name = "omap_gpio", .id = 5, .dev = { @@ -218,9 +244,10 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = { .virtual_irq_start = IH_GPIO_BASE + 160, .bank_type = METHOD_GPIO_7XX, .bank_width = 32, + .regs = &omap7xx_gpio_regs, }; -static struct __initdata platform_device omap7xx_gpio6 = { +static struct platform_device omap7xx_gpio6 = { .name = "omap_gpio", .id = 6, .dev = { diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index fe31d933f0e..943072d5a1d 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c @@ -32,7 +32,7 @@ static int omap1_pm_runtime_suspend(struct device *dev) if (ret) return ret; - ret = pm_runtime_clk_suspend(dev); + ret = pm_clk_suspend(dev); if (ret) { pm_generic_runtime_resume(dev); return ret; @@ -45,20 +45,24 @@ static int omap1_pm_runtime_resume(struct device *dev) { dev_dbg(dev, "%s\n", __func__); - pm_runtime_clk_resume(dev); + pm_clk_resume(dev); return pm_generic_runtime_resume(dev); } -static struct dev_power_domain default_power_domain = { +static struct dev_pm_domain default_pm_domain = { .ops = { .runtime_suspend = omap1_pm_runtime_suspend, .runtime_resume = omap1_pm_runtime_resume, USE_PLATFORM_PM_SLEEP_OPS }, }; +#define OMAP1_PM_DOMAIN (&default_pm_domain) +#else +#define OMAP1_PM_DOMAIN NULL +#endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = &default_power_domain, + .pm_domain = OMAP1_PM_DOMAIN, .con_ids = { "ick", "fck", NULL, }, }; @@ -67,9 +71,9 @@ static int __init omap1_pm_runtime_init(void) if (!cpu_class_is_omap1()) return -ENODEV; - pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(omap1_pm_runtime_init); -#endif /* CONFIG_PM_RUNTIME */ + diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c index d54969be0a5..5de6eac0a72 100644 --- a/arch/arm/mach-omap2/board-2430sdp.c +++ b/arch/arm/mach-omap2/board-2430sdp.c @@ -26,13 +26,13 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/gpio.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> -#include <mach/gpio.h> #include <plat/board.h> #include <plat/common.h> #include <plat/gpmc.h> diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index ae2963a9804..5dac974be62 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c @@ -622,19 +622,19 @@ static struct omap_device_pad serial3_pads[] __initdata = { OMAP_MUX_MODE0), }; -static struct omap_board_data serial1_data = { +static struct omap_board_data serial1_data __initdata = { .id = 0, .pads = serial1_pads, .pads_cnt = ARRAY_SIZE(serial1_pads), }; -static struct omap_board_data serial2_data = { +static struct omap_board_data serial2_data __initdata = { .id = 1, .pads = serial2_pads, .pads_cnt = ARRAY_SIZE(serial2_pads), }; -static struct omap_board_data serial3_data = { +static struct omap_board_data serial3_data __initdata = { .id = 2, .pads = serial3_pads, .pads_cnt = ARRAY_SIZE(serial3_pads), diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 73fa90bb695..63de2d396e2 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -258,7 +258,7 @@ static struct gpio sdp4430_eth_gpios[] __initdata = { { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" }, }; -static int omap_ethernet_init(void) +static int __init omap_ethernet_init(void) { int status; @@ -322,6 +322,7 @@ static struct omap2_hsmmc_info mmc[] = { .gpio_wp = -EINVAL, .nonremovable = true, .ocr_mask = MMC_VDD_29_30, + .no_off_init = true, }, { .mmc = 1, @@ -681,19 +682,19 @@ static struct omap_device_pad serial4_pads[] __initdata = { OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), }; -static struct omap_board_data serial2_data = { +static struct omap_board_data serial2_data __initdata = { .id = 1, .pads = serial2_pads, .pads_cnt = ARRAY_SIZE(serial2_pads), }; -static struct omap_board_data serial3_data = { +static struct omap_board_data serial3_data __initdata = { .id = 2, .pads = serial3_pads, .pads_cnt = ARRAY_SIZE(serial3_pads), }; -static struct omap_board_data serial4_data = { +static struct omap_board_data serial4_data __initdata = { .id = 3, .pads = serial4_pads, .pads_cnt = ARRAY_SIZE(serial4_pads), @@ -729,7 +730,7 @@ static void __init omap_4430sdp_init(void) if (omap_rev() == OMAP4430_REV_ES1_0) package = OMAP_PACKAGE_CBL; - omap4_mux_init(board_mux, package); + omap4_mux_init(board_mux, NULL, package); omap_board_config = sdp4430_config; omap_board_config_size = ARRAY_SIZE(sdp4430_config); diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c index f3beb8eeef7..b124bdfb423 100644 --- a/arch/arm/mach-omap2/board-apollon.c +++ b/arch/arm/mach-omap2/board-apollon.c @@ -27,13 +27,13 @@ #include <linux/err.h> #include <linux/clk.h> #include <linux/smc91x.h> +#include <linux/gpio.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> -#include <mach/gpio.h> #include <plat/led.h> #include <plat/usb.h> #include <plat/board.h> diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index c63115bc153..77456dec93e 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c @@ -63,8 +63,6 @@ #define SB_T35_SMSC911X_CS 4 #define SB_T35_SMSC911X_GPIO 65 -#define NAND_BLOCK_SIZE SZ_128K - #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #include <linux/smsc911x.h> #include <plat/gpmc-smsc911x.h> diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c index 08f08e81249..c3a9fd35034 100644 --- a/arch/arm/mach-omap2/board-cm-t3517.c +++ b/arch/arm/mach-omap2/board-cm-t3517.c @@ -48,6 +48,7 @@ #include "mux.h" #include "control.h" +#include "common-board-devices.h" #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) static struct gpio_led cm_t3517_leds[] = { @@ -177,7 +178,7 @@ static struct usbhs_omap_board_data cm_t3517_ehci_pdata __initdata = { .reset_gpio_port[2] = -EINVAL, }; -static int cm_t3517_init_usbh(void) +static int __init cm_t3517_init_usbh(void) { int err; @@ -203,8 +204,6 @@ static inline int cm_t3517_init_usbh(void) #endif #if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE) -#define NAND_BLOCK_SIZE SZ_128K - static struct mtd_partition cm_t3517_nand_partitions[] = { { .name = "xloader", diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index cf520d7dd61..34956ec8329 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c @@ -61,8 +61,6 @@ #include "timer-gp.h" #include "common-board-devices.h" -#define NAND_BLOCK_SIZE SZ_128K - #define OMAP_DM9000_GPIO_IRQ 25 #define OMAP3_DEVKIT_TS_GPIO 27 diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index be71426359f..7f21d24bd43 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -54,8 +54,6 @@ #include "pm.h" #include "common-board-devices.h" -#define NAND_BLOCK_SIZE SZ_128K - /* * OMAP3 Beagle revision * Run time detection of Beagle revision is done by reading GPIO. @@ -106,6 +104,9 @@ static void __init omap3_beagle_init_rev(void) beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1) | (gpio_get_value(173) << 2); + gpio_free_array(omap3_beagle_rev_gpios, + ARRAY_SIZE(omap3_beagle_rev_gpios)); + switch (beagle_rev) { case 7: printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n"); @@ -579,6 +580,9 @@ static void __init omap3_beagle_init(void) omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions, ARRAY_SIZE(omap3beagle_nand_partitions)); + /* Ensure msecure is mux'd to be able to set the RTC. */ + omap_mux_init_signal("sys_drm_msecure", OMAP_PIN_OFF_OUTPUT_HIGH); + /* Ensure SDRC pins are mux'd for self-refresh */ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 1d10736c6d3..23f71d40883 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c @@ -30,6 +30,7 @@ #include <linux/leds.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> +#include <linux/gpio.h> #include <linux/gpio_keys.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> @@ -41,7 +42,6 @@ #include <plat/board.h> #include <plat/common.h> -#include <mach/gpio.h> #include <mach/hardware.h> #include <plat/mcspi.h> #include <plat/usb.h> @@ -57,8 +57,6 @@ #define PANDORA_WIFI_NRESET_GPIO 23 #define OMAP3_PANDORA_TS_GPIO 94 -#define NAND_BLOCK_SIZE SZ_128K - static struct mtd_partition omap3pandora_nand_partitions[] = { { .name = "xloader", @@ -86,7 +84,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = { static struct omap_nand_platform_data pandora_nand_data = { .cs = 0, - .devsize = 1, /* '0' for 8-bit, '1' for 16-bit device */ + .devsize = NAND_BUSWIDTH_16, + .xfer_type = NAND_OMAP_PREFETCH_DMA, .parts = omap3pandora_nand_partitions, .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions), }; diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c index 82872d7d313..5f649faf737 100644 --- a/arch/arm/mach-omap2/board-omap3touchbook.c +++ b/arch/arm/mach-omap2/board-omap3touchbook.c @@ -56,8 +56,6 @@ #include <asm/setup.h> -#define NAND_BLOCK_SIZE SZ_128K - #define OMAP3_AC_GPIO 136 #define OMAP3_TS_GPIO 162 #define TB_BL_PWM_TIMER 9 diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index 90485fced97..0cfe2005cb5 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -526,19 +526,19 @@ static struct omap_device_pad serial4_pads[] __initdata = { OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), }; -static struct omap_board_data serial2_data = { +static struct omap_board_data serial2_data __initdata = { .id = 1, .pads = serial2_pads, .pads_cnt = ARRAY_SIZE(serial2_pads), }; -static struct omap_board_data serial3_data = { +static struct omap_board_data serial3_data __initdata = { .id = 2, .pads = serial3_pads, .pads_cnt = ARRAY_SIZE(serial3_pads), }; -static struct omap_board_data serial4_data = { +static struct omap_board_data serial4_data __initdata = { .id = 3, .pads = serial4_pads, .pads_cnt = ARRAY_SIZE(serial4_pads), @@ -687,7 +687,7 @@ static void __init omap4_panda_init(void) if (omap_rev() == OMAP4430_REV_ES1_0) package = OMAP_PACKAGE_CBL; - omap4_mux_init(board_mux, package); + omap4_mux_init(board_mux, NULL, package); if (wl12xx_set_platform_data(&omap_panda_wlan_data)) pr_err("error setting wl12xx data\n"); diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c index 1555918e3ff..175e1ab2b04 100644 --- a/arch/arm/mach-omap2/board-overo.c +++ b/arch/arm/mach-omap2/board-overo.c @@ -24,6 +24,7 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/gpio.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/i2c/twl.h> @@ -45,7 +46,6 @@ #include <plat/common.h> #include <video/omapdss.h> #include <video/omap-panel-generic-dpi.h> -#include <mach/gpio.h> #include <plat/gpmc.h> #include <mach/hardware.h> #include <plat/nand.h> @@ -65,8 +65,6 @@ #define OVERO_GPIO_USBH_CPEN 168 #define OVERO_GPIO_USBH_NRESET 183 -#define NAND_BLOCK_SIZE SZ_128K - #define OVERO_SMSC911X_CS 5 #define OVERO_SMSC911X_GPIO 176 #define OVERO_SMSC911X2_CS 4 diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index f6247e71a19..88bd6f7705f 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -488,6 +488,7 @@ static struct regulator_init_data rx51_vmmc2 = { .name = "V28_A", .min_uV = 2800000, .max_uV = 3000000, + .always_on = true, /* due VIO leak to AIC34 VDDs */ .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, @@ -557,7 +558,7 @@ static struct radio_si4713_platform_data rx51_si4713_data __initdata_or_module = .subdev_board_info = &rx51_si4713_board_info, }; -static struct platform_device rx51_si4713_dev __initdata_or_module = { +static struct platform_device rx51_si4713_dev = { .name = "radio-si4713", .id = -1, .dev = { @@ -582,7 +583,7 @@ static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n) { /* FIXME this gpio setup is just a placeholder for now */ gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm"); - gpio_request_one(gpio + 7, GPIOF_OUT_INIT_HIGH, "speaker_en"); + gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "speaker_en"); return 0; } diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c index c7c6beb1ec2..d4683ba5f72 100644 --- a/arch/arm/mach-omap2/board-zoom-display.c +++ b/arch/arm/mach-omap2/board-zoom-display.c @@ -26,7 +26,7 @@ static struct gpio zoom_lcd_gpios[] __initdata = { { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" }, }; -static void zoom_lcd_panel_init(void) +static void __init zoom_lcd_panel_init(void) { zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ? LCD_PANEL_RESET_GPIO_PROD : diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c index e94903b2c65..94ccf464677 100644 --- a/arch/arm/mach-omap2/common-board-devices.c +++ b/arch/arm/mach-omap2/common-board-devices.c @@ -85,17 +85,17 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, struct spi_board_info *spi_bi = &ads7846_spi_board_info; int err; - err = gpio_request(gpio_pendown, "TS PenDown"); - if (err) { - pr_err("Could not obtain gpio for TS PenDown: %d\n", err); - return; - } - - gpio_direction_input(gpio_pendown); - gpio_export(gpio_pendown, 0); + if (board_pdata && board_pdata->get_pendown_state) { + err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown"); + if (err) { + pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err); + return; + } + gpio_export(gpio_pendown, 0); - if (gpio_debounce) - gpio_set_debounce(gpio_pendown, gpio_debounce); + if (gpio_debounce) + gpio_set_debounce(gpio_pendown, gpio_debounce); + } ads7846_config.gpio_pendown = gpio_pendown; diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h index eb80b3b0ef4..679719051df 100644 --- a/arch/arm/mach-omap2/common-board-devices.h +++ b/arch/arm/mach-omap2/common-board-devices.h @@ -1,6 +1,8 @@ #ifndef __OMAP_COMMON_BOARD_DEVICES__ #define __OMAP_COMMON_BOARD_DEVICES__ +#define NAND_BLOCK_SIZE SZ_128K + struct twl4030_platform_data; struct mtd_partition; diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 7b855856459..5b8ca680ed9 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -97,7 +97,7 @@ static int __init omap4_l3_init(void) WARN(IS_ERR(od), "could not build omap_device for %s\n", oh_name); - return PTR_ERR(od); + return IS_ERR(od) ? PTR_ERR(od) : 0; } postcore_initcall(omap4_l3_init); diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c index 9529842ae05..2765cdc3152 100644 --- a/arch/arm/mach-omap2/gpio.c +++ b/arch/arm/mach-omap2/gpio.c @@ -61,13 +61,45 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused) pdata->dbck_flag = dev_attr->dbck_flag; pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1); + pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL); + if (!pdata) { + pr_err("gpio%d: Memory allocation failed\n", id); + return -ENOMEM; + } + switch (oh->class->rev) { case 0: case 1: pdata->bank_type = METHOD_GPIO_24XX; + pdata->regs->revision = OMAP24XX_GPIO_REVISION; + pdata->regs->direction = OMAP24XX_GPIO_OE; + pdata->regs->datain = OMAP24XX_GPIO_DATAIN; + pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT; + pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT; + pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT; + pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1; + pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2; + pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1; + pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1; + pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1; + pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL; + pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN; break; case 2: pdata->bank_type = METHOD_GPIO_44XX; + pdata->regs->revision = OMAP4_GPIO_REVISION; + pdata->regs->direction = OMAP4_GPIO_OE; + pdata->regs->datain = OMAP4_GPIO_DATAIN; + pdata->regs->dataout = OMAP4_GPIO_DATAOUT; + pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT; + pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT; + pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0; + pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1; + pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0; + pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0; + pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0; + pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME; + pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE; break; default: WARN(1, "Invalid gpio bank_type\n"); @@ -87,6 +119,8 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused) return PTR_ERR(od); } + omap_device_disable_idle_on_suspend(od); + gpio_bank_count++; return 0; } diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index b2f30bed5a2..66868c5d5a2 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c @@ -145,6 +145,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot, int power_on, int vdd) { u32 reg; + unsigned long timeout; if (power_on) { reg = omap4_ctrl_pad_readl(control_pbias_offset); @@ -157,9 +158,15 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot, OMAP4_MMC1_PWRDNZ_MASK | OMAP4_USBC1_ICUSB_PWRDNZ_MASK); omap4_ctrl_pad_writel(reg, control_pbias_offset); - /* 4 microsec delay for comparator to generate an error*/ - udelay(4); - reg = omap4_ctrl_pad_readl(control_pbias_offset); + + timeout = jiffies + msecs_to_jiffies(5); + do { + reg = omap4_ctrl_pad_readl(control_pbias_offset); + if (!(reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK)) + break; + usleep_range(100, 200); + } while (!time_after(jiffies, timeout)); + if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) { pr_err("Pbias Voltage is not same as LDO\n"); /* Caution : On VMODE_ERROR Power Down MMC IO */ @@ -331,6 +338,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c, if (c->no_off) mmc->slots[0].no_off = 1; + if (c->no_off_init) + mmc->slots[0].no_regulator_off_init = c->no_off_init; + if (c->vcc_aux_disable_is_sleep) mmc->slots[0].vcc_aux_disable_is_sleep = 1; diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h index f119348827d..f757e78d4d4 100644 --- a/arch/arm/mach-omap2/hsmmc.h +++ b/arch/arm/mach-omap2/hsmmc.h @@ -18,6 +18,7 @@ struct omap2_hsmmc_info { bool nonremovable; /* Nonremovable e.g. eMMC */ bool power_saving; /* Try to sleep or power off when possible */ bool no_off; /* power_saving and power is not to go off */ + bool no_off_init; /* no power off when not in MMC sleep state */ bool vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */ int gpio_cd; /* or -EINVAL */ int gpio_wp; /* or -EINVAL */ diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index a4ab1e36431..c7fb22abc21 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -83,6 +83,9 @@ void omap_mux_write(struct omap_mux_partition *partition, u16 val, void omap_mux_write_array(struct omap_mux_partition *partition, struct omap_board_mux *board_mux) { + if (!board_mux) + return; + while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) { omap_mux_write(partition, board_mux->value, board_mux->reg_offset); @@ -906,7 +909,7 @@ static struct omap_mux *omap_mux_get_by_gpio( u16 omap_mux_get_gpio(int gpio) { struct omap_mux_partition *partition; - struct omap_mux *m; + struct omap_mux *m = NULL; list_for_each_entry(partition, &mux_partitions, node) { m = omap_mux_get_by_gpio(partition, gpio); diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h index 137f321c029..2132308ad1e 100644 --- a/arch/arm/mach-omap2/mux.h +++ b/arch/arm/mach-omap2/mux.h @@ -323,10 +323,12 @@ int omap3_mux_init(struct omap_board_mux *board_mux, int flags); /** * omap4_mux_init() - initialize mux system with board specific set - * @board_mux: Board specific mux table + * @board_subset: Board specific mux table + * @board_wkup_subset: Board specific mux table for wakeup instance * @flags: OMAP package type used for the board */ -int omap4_mux_init(struct omap_board_mux *board_mux, int flags); +int omap4_mux_init(struct omap_board_mux *board_subset, + struct omap_board_mux *board_wkup_subset, int flags); /** * omap_mux_init - private mux init function, do not call diff --git a/arch/arm/mach-omap2/mux44xx.c b/arch/arm/mach-omap2/mux44xx.c index 9a66445112a..f5a74daab2f 100644 --- a/arch/arm/mach-omap2/mux44xx.c +++ b/arch/arm/mach-omap2/mux44xx.c @@ -1309,7 +1309,8 @@ static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = { #define omap4_wkup_cbl_cbs_ball NULL #endif -int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags) +int __init omap4_mux_init(struct omap_board_mux *board_subset, + struct omap_board_mux *board_wkup_subset, int flags) { struct omap_ball *package_balls_core; struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball; @@ -1347,7 +1348,7 @@ int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags) OMAP_MUX_GPIO_IN_MODE3, OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE, OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE, - omap4_wkup_muxmodes, NULL, board_subset, + omap4_wkup_muxmodes, NULL, board_wkup_subset, package_balls_wkup); return ret; diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index e03429453ce..293fa6cd50e 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1628,7 +1628,7 @@ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data), void *data) { struct omap_hwmod *temp_oh; - int ret; + int ret = 0; if (!fn) return -EINVAL; diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index abc548a0c98..e1c69ffe0f6 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -5109,7 +5109,7 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = { &omap44xx_iva_seq1_hwmod, /* kbd class */ -/* &omap44xx_kbd_hwmod, */ + &omap44xx_kbd_hwmod, /* mailbox class */ &omap44xx_mailbox_hwmod, diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c index f47813edd95..58775e3c847 100644 --- a/arch/arm/mach-omap2/omap_phy_internal.c +++ b/arch/arm/mach-omap2/omap_phy_internal.c @@ -56,8 +56,10 @@ int omap4430_phy_init(struct device *dev) /* Power down the phy */ __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); - if (!dev) + if (!dev) { + iounmap(ctrl_base); return 0; + } phyclk = clk_get(dev, "ocp2scp_usb_phy_ick"); if (IS_ERR(phyclk)) { diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index a5a83b358dd..e01da45c053 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c @@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir; static int pm_dbg_init_done; -static int __init pm_dbg_init(void); +static int pm_dbg_init(void); enum { DEBUG_FILE_COUNTERS = 0, @@ -595,7 +595,7 @@ static int option_set(void *data, u64 val) DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n"); -static int __init pm_dbg_init(void) +static int pm_dbg_init(void) { int i; struct dentry *d; diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 1ac361b7b8c..466fc722fa0 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -805,6 +805,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata) WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n", name, oh->name); + omap_device_disable_idle_on_suspend(od); oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt); uart->irq = oh->mpu_irqs[0].irq; diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index 87ae3129f4f..b27544bcafc 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c @@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void) if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && (GPDR(i) & GPIO_bit(i))) { if (GPLR(i) & GPIO_bit(i)) - PGSR(i) |= GPIO_bit(i); + PGSR(gpio_to_bank(i)) |= GPIO_bit(i); else - PGSR(i) &= ~GPIO_bit(i); + PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i); } } diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index d130f77b6d1..2f37d43f51b 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = { .xres = 480, .yres = 272, .bpp = 16, - .hsync_len = 4, + .hsync_len = 41, .left_margin = 2, .right_margin = 1, - .vsync_len = 1, + .vsync_len = 10, .upper_margin = 3, .lower_margin = 1, .sync = 0, @@ -596,29 +596,31 @@ static void __init raumfeld_lcd_init(void) { int ret; - pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); - - /* Earlier devices had the backlight regulator controlled - * via PWM, later versions use another controller for that */ - if ((system_rev & 0xff) < 2) { - mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; - pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); - platform_device_register(&raumfeld_pwm_backlight_device); - } else - platform_device_register(&raumfeld_lt3593_device); - ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); if (ret < 0) pr_warning("Unable to request GPIO_TFT_VA_EN\n"); else gpio_direction_output(GPIO_TFT_VA_EN, 1); + msleep(100); + ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable"); if (ret < 0) pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n"); else gpio_direction_output(GPIO_DISPLAY_ENABLE, 1); + /* Hardware revision 2 has the backlight regulator controlled + * by an LT3593, earlier and later devices use PWM for that. */ + if ((system_rev & 0xff) == 2) { + platform_device_register(&raumfeld_lt3593_device); + } else { + mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; + pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); + platform_device_register(&raumfeld_pwm_backlight_device); + } + + pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); platform_device_register(&pxa3xx_device_gcu); } @@ -657,10 +659,10 @@ static struct lis3lv02d_platform_data lis3_pdata = { #define SPI_AK4104 \ { \ - .modalias = "ak4104", \ - .max_speed_hz = 10000, \ - .bus_num = 0, \ - .chip_select = 0, \ + .modalias = "ak4104-codec", \ + .max_speed_hz = 10000, \ + .bus_num = 0, \ + .chip_select = 0, \ .controller_data = (void *) GPIO_SPDIF_CS, \ } diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c index 7fe74067d85..094279aefe9 100644 --- a/arch/arm/mach-pxa/spitz_pm.c +++ b/arch/arm/mach-pxa/spitz_pm.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/delay.h> +#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/apm-emulation.h> diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile index 0d468e96e83..81695353d8f 100644 --- a/arch/arm/mach-s3c2410/Makefile +++ b/arch/arm/mach-s3c2410/Makefile @@ -10,7 +10,6 @@ obj-n := obj- := obj-$(CONFIG_CPU_S3C2410) += s3c2410.o -obj-$(CONFIG_CPU_S3C2410) += irq.o obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o obj-$(CONFIG_S3C2410_PM) += pm.o sleep.o diff --git a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h b/arch/arm/mach-s3c2410/include/mach/spi-gpio.h deleted file mode 100644 index dcef2287cb3..00000000000 --- a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h +++ /dev/null @@ -1,28 +0,0 @@ -/* arch/arm/mach-s3c2410/include/mach/spi-gpio.h - * - * Copyright (c) 2006 Simtec Electronics - * Ben Dooks <ben@simtec.co.uk> - * - * S3C2410 - SPI Controller platform_device info - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef __ASM_ARCH_SPIGPIO_H -#define __ASM_ARCH_SPIGPIO_H __FILE__ - -struct s3c2410_spigpio_info { - unsigned long pin_clk; - unsigned long pin_mosi; - unsigned long pin_miso; - - int num_chipselect; - int bus_num; - - void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs); -}; - - -#endif /* __ASM_ARCH_SPIGPIO_H */ diff --git a/arch/arm/mach-s3c2410/irq.c b/arch/arm/mach-s3c2410/irq.c deleted file mode 100644 index 2854129f8cc..00000000000 --- a/arch/arm/mach-s3c2410/irq.c +++ /dev/null @@ -1,34 +0,0 @@ -/* linux/arch/arm/mach-s3c2410/irq.c - * - * Copyright (c) 2006 Simtec Electronics - * Ben Dooks <ben@simtec.co.uk> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * -*/ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/syscore_ops.h> - -#include <plat/cpu.h> -#include <plat/pm.h> - -struct syscore_ops s3c24xx_irq_syscore_ops = { - .suspend = s3c24xx_irq_suspend, - .resume = s3c24xx_irq_resume, -}; diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c index e8f49feef28..f44f77531b1 100644 --- a/arch/arm/mach-s3c2410/mach-qt2410.c +++ b/arch/arm/mach-s3c2410/mach-qt2410.c @@ -32,7 +32,7 @@ #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/spi/spi.h> -#include <linux/spi/spi_bitbang.h> +#include <linux/spi/spi_gpio.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> @@ -53,8 +53,6 @@ #include <mach/fb.h> #include <plat/nand.h> #include <plat/udc.h> -#include <mach/spi.h> -#include <mach/spi-gpio.h> #include <plat/iic.h> #include <plat/common-smdk.h> @@ -216,32 +214,16 @@ static struct platform_device qt2410_led = { /* SPI */ -static void spi_gpio_cs(struct s3c2410_spigpio_info *spi, int cs) -{ - switch (cs) { - case BITBANG_CS_ACTIVE: - gpio_set_value(S3C2410_GPB(5), 0); - break; - case BITBANG_CS_INACTIVE: - gpio_set_value(S3C2410_GPB(5), 1); - break; - } -} - -static struct s3c2410_spigpio_info spi_gpio_cfg = { - .pin_clk = S3C2410_GPG(7), - .pin_mosi = S3C2410_GPG(6), - .pin_miso = S3C2410_GPG(5), - .chip_select = &spi_gpio_cs, +static struct spi_gpio_platform_data spi_gpio_cfg = { + .sck = S3C2410_GPG(7), + .mosi = S3C2410_GPG(6), + .miso = S3C2410_GPG(5), }; - static struct platform_device qt2410_spi = { - .name = "s3c24xx-spi-gpio", - .id = 1, - .dev = { - .platform_data = &spi_gpio_cfg, - }, + .name = "spi-gpio", + .id = 1, + .dev.platform_data = &spi_gpio_cfg, }; /* Board devices */ diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c index 85dcaeb9e62..5eeb47580b0 100644 --- a/arch/arm/mach-s3c2412/mach-jive.c +++ b/arch/arm/mach-s3c2412/mach-jive.c @@ -25,6 +25,7 @@ #include <video/ili9320.h> #include <linux/spi/spi.h> +#include <linux/spi/spi_gpio.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -38,7 +39,6 @@ #include <mach/regs-gpio.h> #include <mach/regs-mem.h> #include <mach/regs-lcd.h> -#include <mach/spi-gpio.h> #include <mach/fb.h> #include <asm/mach-types.h> @@ -389,45 +389,30 @@ static struct ili9320_platdata jive_lcm_config = { /* LCD SPI support */ -static void jive_lcd_spi_chipselect(struct s3c2410_spigpio_info *spi, int cs) -{ - gpio_set_value(S3C2410_GPB(7), cs ? 0 : 1); -} - -static struct s3c2410_spigpio_info jive_lcd_spi = { - .bus_num = 1, - .pin_clk = S3C2410_GPG(8), - .pin_mosi = S3C2410_GPB(8), - .num_chipselect = 1, - .chip_select = jive_lcd_spi_chipselect, +static struct spi_gpio_platform_data jive_lcd_spi = { + .sck = S3C2410_GPG(8), + .mosi = S3C2410_GPB(8), + .miso = SPI_GPIO_NO_MISO, }; static struct platform_device jive_device_lcdspi = { - .name = "spi_s3c24xx_gpio", + .name = "spi-gpio", .id = 1, - .num_resources = 0, .dev.platform_data = &jive_lcd_spi, }; -/* WM8750 audio code SPI definition */ -static void jive_wm8750_chipselect(struct s3c2410_spigpio_info *spi, int cs) -{ - gpio_set_value(S3C2410_GPH(10), cs ? 0 : 1); -} +/* WM8750 audio code SPI definition */ -static struct s3c2410_spigpio_info jive_wm8750_spi = { - .bus_num = 2, - .pin_clk = S3C2410_GPB(4), - .pin_mosi = S3C2410_GPB(9), - .num_chipselect = 1, - .chip_select = jive_wm8750_chipselect, +static struct spi_gpio_platform_data jive_wm8750_spi = { + .sck = S3C2410_GPB(4), + .mosi = S3C2410_GPB(9), + .miso = SPI_GPIO_NO_MISO, }; static struct platform_device jive_device_wm8750 = { - .name = "spi_s3c24xx_gpio", + .name = "spi-gpio", .id = 2, - .num_resources = 0, .dev.platform_data = &jive_wm8750_spi, }; @@ -441,12 +426,14 @@ static struct spi_board_info __initdata jive_spi_devs[] = { .mode = SPI_MODE_3, /* CPOL=1, CPHA=1 */ .max_speed_hz = 100000, .platform_data = &jive_lcm_config, + .controller_data = (void *)S3C2410_GPB(7), }, { .modalias = "WM8750", .bus_num = 2, .chip_select = 0, .mode = SPI_MODE_0, /* CPOL=0, CPHA=0 */ .max_speed_hz = 100000, + .controller_data = (void *)S3C2410_GPH(10), }, }; diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c index 716662008ce..c10ddf4ed7f 100644 --- a/arch/arm/mach-s3c2440/mach-gta02.c +++ b/arch/arm/mach-s3c2440/mach-gta02.c @@ -74,7 +74,6 @@ #include <mach/fb.h> #include <mach/spi.h> -#include <mach/spi-gpio.h> #include <plat/usb-control.h> #include <mach/regs-mem.h> #include <mach/hardware.h> diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c index dd3120df09f..fc2dc0b3d4f 100644 --- a/arch/arm/mach-s3c2440/mach-mini2440.c +++ b/arch/arm/mach-s3c2440/mach-mini2440.c @@ -552,7 +552,7 @@ struct mini2440_features_t { struct platform_device *optional[8]; }; -static void mini2440_parse_features( +static void __init mini2440_parse_features( struct mini2440_features_t * features, const char * features_str ) { diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c index 82db072cb83..5e6b42089eb 100644 --- a/arch/arm/mach-s3c64xx/dev-spi.c +++ b/arch/arm/mach-s3c64xx/dev-spi.c @@ -88,6 +88,7 @@ static struct s3c64xx_spi_info s3c64xx_spi0_pdata = { .cfg_gpio = s3c64xx_spi_cfg_gpio, .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, + .tx_st_done = 21, }; static u64 spi_dmamask = DMA_BIT_MASK(32); @@ -132,6 +133,7 @@ static struct s3c64xx_spi_info s3c64xx_spi1_pdata = { .cfg_gpio = s3c64xx_spi_cfg_gpio, .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, + .tx_st_done = 21, }; struct platform_device s3c64xx_device_spi1 = { diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c index b197171e7d0..204bfafe4bf 100644 --- a/arch/arm/mach-s3c64xx/dma.c +++ b/arch/arm/mach-s3c64xx/dma.c @@ -113,7 +113,7 @@ found: return chan; } -int s3c2410_dma_config(unsigned int channel, int xferunit) +int s3c2410_dma_config(enum dma_ch channel, int xferunit) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan) return 0; } -int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) +int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl); * */ -int s3c2410_dma_enqueue(unsigned int channel, void *id, +int s3c2410_dma_enqueue(enum dma_ch channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -415,7 +415,7 @@ err_buff: EXPORT_SYMBOL(s3c2410_dma_enqueue); -int s3c2410_dma_devconfig(unsigned int channel, +int s3c2410_dma_devconfig(enum dma_ch channel, enum s3c2410_dmasrc source, unsigned long devaddr) { @@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(unsigned int channel, EXPORT_SYMBOL(s3c2410_dma_devconfig); -int s3c2410_dma_getposition(unsigned int channel, +int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition); * get control of an dma channel */ -int s3c2410_dma_request(unsigned int channel, +int s3c2410_dma_request(enum dma_ch channel, struct s3c2410_dma_client *client, void *dev) { @@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request); * allowed to go through. */ -int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) +int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned long flags; diff --git a/arch/arm/mach-s5p64x0/dev-spi.c b/arch/arm/mach-s5p64x0/dev-spi.c index e78ee18c76e..ac825e82632 100644 --- a/arch/arm/mach-s5p64x0/dev-spi.c +++ b/arch/arm/mach-s5p64x0/dev-spi.c @@ -112,12 +112,14 @@ static struct s3c64xx_spi_info s5p6440_spi0_pdata = { .cfg_gpio = s5p6440_spi_cfg_gpio, .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, + .tx_st_done = 25, }; static struct s3c64xx_spi_info s5p6450_spi0_pdata = { .cfg_gpio = s5p6450_spi_cfg_gpio, .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, + .tx_st_done = 25, }; static u64 spi_dmamask = DMA_BIT_MASK(32); @@ -160,12 +162,14 @@ static struct s3c64xx_spi_info s5p6440_spi1_pdata = { .cfg_gpio = s5p6440_spi_cfg_gpio, .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, + .tx_st_done = 25, }; static struct s3c64xx_spi_info s5p6450_spi1_pdata = { .cfg_gpio = s5p6450_spi_cfg_gpio, .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, + .tx_st_done = 25, }; struct platform_device s5p64x0_device_spi1 = { diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c index 57b19794d9b..e5d6c4dceb5 100644 --- a/arch/arm/mach-s5pc100/dev-spi.c +++ b/arch/arm/mach-s5pc100/dev-spi.c @@ -15,6 +15,7 @@ #include <mach/dma.h> #include <mach/map.h> #include <mach/spi-clocks.h> +#include <mach/irqs.h> #include <plat/s3c64xx-spi.h> #include <plat/gpio-cfg.h> @@ -90,6 +91,7 @@ static struct s3c64xx_spi_info s5pc100_spi0_pdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, .high_speed = 1, + .tx_st_done = 21, }; static u64 spi_dmamask = DMA_BIT_MASK(32); @@ -134,6 +136,7 @@ static struct s3c64xx_spi_info s5pc100_spi1_pdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, .high_speed = 1, + .tx_st_done = 21, }; struct platform_device s5pc100_device_spi1 = { @@ -176,6 +179,7 @@ static struct s3c64xx_spi_info s5pc100_spi2_pdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 13, .high_speed = 1, + .tx_st_done = 21, }; struct platform_device s5pc100_device_spi2 = { diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c index 22046e2f53c..153af8b359e 100644 --- a/arch/arm/mach-s5pv210/cpufreq.c +++ b/arch/arm/mach-s5pv210/cpufreq.c @@ -101,12 +101,14 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) unsigned long tmp, tmp1; void __iomem *reg = NULL; - if (ch == DMC0) + if (ch == DMC0) { reg = (S5P_VA_DMC0 + 0x30); - else if (ch == DMC1) + } else if (ch == DMC1) { reg = (S5P_VA_DMC1 + 0x30); - else + } else { printk(KERN_ERR "Cannot find DMC port\n"); + return; + } /* Find current DRAM frequency */ tmp = s5pv210_dram_conf[ch].freq; diff --git a/arch/arm/mach-s5pv210/dev-spi.c b/arch/arm/mach-s5pv210/dev-spi.c index e3249a47e3b..eaf9a7bff7a 100644 --- a/arch/arm/mach-s5pv210/dev-spi.c +++ b/arch/arm/mach-s5pv210/dev-spi.c @@ -85,6 +85,7 @@ static struct s3c64xx_spi_info s5pv210_spi0_pdata = { .fifo_lvl_mask = 0x1ff, .rx_lvl_offset = 15, .high_speed = 1, + .tx_st_done = 25, }; static u64 spi_dmamask = DMA_BIT_MASK(32); @@ -129,6 +130,7 @@ static struct s3c64xx_spi_info s5pv210_spi1_pdata = { .fifo_lvl_mask = 0x7f, .rx_lvl_offset = 15, .high_speed = 1, + .tx_st_done = 25, }; struct platform_device s5pv210_device_spi1 = { diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index c95258c274c..ce5c2513c6c 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c @@ -381,11 +381,9 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state) gpio_set_value(GPIO_PORT114, state); } -static struct sh_mobile_sdhi_info sh_sdhi1_platdata = { - .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, - .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, +static struct sh_mobile_sdhi_info sh_sdhi1_info = { .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, - .tmio_caps = MMC_CAP_NONREMOVABLE, + .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, .set_pwr = ag5evm_sdhi1_set_pwr, }; @@ -415,7 +413,7 @@ static struct platform_device sdhi1_device = { .name = "sh_mobile_sdhi", .id = 1, .dev = { - .platform_data = &sh_sdhi1_platdata, + .platform_data = &sh_sdhi1_info, }, .num_resources = ARRAY_SIZE(sdhi1_resources), .resource = sdhi1_resources, diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index f6b687f61c2..b473b8efac6 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -913,7 +913,7 @@ static struct i2c_board_info imx074_info = { I2C_BOARD_INFO("imx074", 0x1a), }; -struct soc_camera_link imx074_link = { +static struct soc_camera_link imx074_link = { .bus_id = 0, .board_info = &imx074_info, .i2c_adapter_id = 0, @@ -1408,9 +1408,14 @@ static void __init ap4evb_init(void) platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); + sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device); + sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); + hdmi_init_pm_clock(); fsi_init_pm_clock(); sh7372_pm_init(); + pm_clk_add(&fsi_device.dev, "spu2"); } static void __init ap4evb_timer_init(void) diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 776f20560e7..5b36b6c5b44 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -126,7 +126,7 @@ * ------+--------------------+--------------------+------- * IRQ0 | ICR1A.IRQ0SA=0010 | SDHI2 card detect | Low * IRQ6 | ICR1A.IRQ6SA=0011 | Ether(LAN9220) | High - * IRQ7 | ICR1A.IRQ7SA=0010 | LCD Tuch Panel | Low + * IRQ7 | ICR1A.IRQ7SA=0010 | LCD Touch Panel | Low * IRQ8 | ICR2A.IRQ8SA=0010 | MMC/SD card detect | Low * IRQ9 | ICR2A.IRQ9SA=0010 | KEY(TCA6408) | Low * IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345) | High @@ -165,10 +165,10 @@ * USB1 can become Host by r8a66597, and become Function by renesas_usbhs. * But don't select both drivers in same time. * These uses same IRQ number for request_irq(), and aren't supporting - * IRQF_SHARD / IORESOURCE_IRQ_SHAREABLE. + * IRQF_SHARED / IORESOURCE_IRQ_SHAREABLE. * * Actually these are old/new version of USB driver. - * This mean its register will be broken if it supports SHARD IRQ, + * This mean its register will be broken if it supports shared IRQ, */ /* @@ -562,7 +562,121 @@ out: clk_put(hdmi_ick); } -/* USB1 (Host) */ +/* USBHS0 is connected to CN22 which takes a USB Mini-B plug + * + * The sh7372 SoC has IRQ7 set aside for USBHS0 hotplug, + * but on this particular board IRQ7 is already used by + * the touch screen. This leaves us with software polling. + */ +#define USBHS0_POLL_INTERVAL (HZ * 5) + +struct usbhs_private { + unsigned int usbphyaddr; + unsigned int usbcrcaddr; + struct renesas_usbhs_platform_info info; + struct delayed_work work; + struct platform_device *pdev; +}; + +#define usbhs_get_priv(pdev) \ + container_of(renesas_usbhs_get_info(pdev), \ + struct usbhs_private, info) + +#define usbhs_is_connected(priv) \ + (!((1 << 7) & __raw_readw(priv->usbcrcaddr))) + +static int usbhs_get_vbus(struct platform_device *pdev) +{ + return usbhs_is_connected(usbhs_get_priv(pdev)); +} + +static void usbhs_phy_reset(struct platform_device *pdev) +{ + struct usbhs_private *priv = usbhs_get_priv(pdev); + + /* init phy */ + __raw_writew(0x8a0a, priv->usbcrcaddr); +} + +static int usbhs0_get_id(struct platform_device *pdev) +{ + return USBHS_GADGET; +} + +static void usbhs0_work_function(struct work_struct *work) +{ + struct usbhs_private *priv = container_of(work, struct usbhs_private, + work.work); + + renesas_usbhs_call_notify_hotplug(priv->pdev); + schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL); +} + +static int usbhs0_hardware_init(struct platform_device *pdev) +{ + struct usbhs_private *priv = usbhs_get_priv(pdev); + + priv->pdev = pdev; + INIT_DELAYED_WORK(&priv->work, usbhs0_work_function); + schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL); + return 0; +} + +static void usbhs0_hardware_exit(struct platform_device *pdev) +{ + struct usbhs_private *priv = usbhs_get_priv(pdev); + + cancel_delayed_work_sync(&priv->work); +} + +static struct usbhs_private usbhs0_private = { + .usbcrcaddr = 0xe605810c, /* USBCR2 */ + .info = { + .platform_callback = { + .hardware_init = usbhs0_hardware_init, + .hardware_exit = usbhs0_hardware_exit, + .phy_reset = usbhs_phy_reset, + .get_id = usbhs0_get_id, + .get_vbus = usbhs_get_vbus, + }, + .driver_param = { + .buswait_bwait = 4, + }, + }, +}; + +static struct resource usbhs0_resources[] = { + [0] = { + .name = "USBHS0", + .start = 0xe6890000, + .end = 0xe68900e6 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0x1ca0) /* USB0_USB0I0 */, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usbhs0_device = { + .name = "renesas_usbhs", + .id = 0, + .dev = { + .platform_data = &usbhs0_private.info, + }, + .num_resources = ARRAY_SIZE(usbhs0_resources), + .resource = usbhs0_resources, +}; + +/* USBHS1 is connected to CN31 which takes a USB Mini-AB plug + * + * Use J30 to select between Host and Function. This setting + * can however not be detected by software. Hotplug of USBHS1 + * is provided via IRQ8. + */ +#define IRQ8 evt2irq(0x0300) + +/* USBHS1 USB Host support via r8a66597_hcd */ static void usb1_host_port_power(int port, int power) { if (!power) /* only power-on is supported for now */ @@ -579,9 +693,9 @@ static struct r8a66597_platdata usb1_host_data = { static struct resource usb1_host_resources[] = { [0] = { - .name = "USBHS", - .start = 0xE68B0000, - .end = 0xE68B00E6 - 1, + .name = "USBHS1", + .start = 0xe68b0000, + .end = 0xe68b00e6 - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -602,37 +716,14 @@ static struct platform_device usb1_host_device = { .resource = usb1_host_resources, }; -/* USB1 (Function) */ +/* USBHS1 USB Function support via renesas_usbhs */ + #define USB_PHY_MODE (1 << 4) #define USB_PHY_INT_EN ((1 << 3) | (1 << 2)) #define USB_PHY_ON (1 << 1) #define USB_PHY_OFF (1 << 0) #define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF) -struct usbhs_private { - unsigned int irq; - unsigned int usbphyaddr; - unsigned int usbcrcaddr; - struct renesas_usbhs_platform_info info; -}; - -#define usbhs_get_priv(pdev) \ - container_of(renesas_usbhs_get_info(pdev), \ - struct usbhs_private, info) - -#define usbhs_is_connected(priv) \ - (!((1 << 7) & __raw_readw(priv->usbcrcaddr))) - -static int usbhs1_get_id(struct platform_device *pdev) -{ - return USBHS_GADGET; -} - -static int usbhs1_get_vbus(struct platform_device *pdev) -{ - return usbhs_is_connected(usbhs_get_priv(pdev)); -} - static irqreturn_t usbhs1_interrupt(int irq, void *data) { struct platform_device *pdev = data; @@ -654,12 +745,10 @@ static int usbhs1_hardware_init(struct platform_device *pdev) struct usbhs_private *priv = usbhs_get_priv(pdev); int ret; - irq_set_irq_type(priv->irq, IRQ_TYPE_LEVEL_HIGH); - /* clear interrupt status */ __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr); - ret = request_irq(priv->irq, usbhs1_interrupt, 0, + ret = request_irq(IRQ8, usbhs1_interrupt, IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), pdev); if (ret) { dev_err(&pdev->dev, "request_irq err\n"); @@ -679,15 +768,12 @@ static void usbhs1_hardware_exit(struct platform_device *pdev) /* clear interrupt status */ __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr); - free_irq(priv->irq, pdev); + free_irq(IRQ8, pdev); } -static void usbhs1_phy_reset(struct platform_device *pdev) +static int usbhs1_get_id(struct platform_device *pdev) { - struct usbhs_private *priv = usbhs_get_priv(pdev); - - /* init phy */ - __raw_writew(0x8a0a, priv->usbcrcaddr); + return USBHS_GADGET; } static u32 usbhs1_pipe_cfg[] = { @@ -710,16 +796,15 @@ static u32 usbhs1_pipe_cfg[] = { }; static struct usbhs_private usbhs1_private = { - .irq = evt2irq(0x0300), /* IRQ8 */ - .usbphyaddr = 0xE60581E2, /* USBPHY1INTAP */ - .usbcrcaddr = 0xE6058130, /* USBCR4 */ + .usbphyaddr = 0xe60581e2, /* USBPHY1INTAP */ + .usbcrcaddr = 0xe6058130, /* USBCR4 */ .info = { .platform_callback = { .hardware_init = usbhs1_hardware_init, .hardware_exit = usbhs1_hardware_exit, - .phy_reset = usbhs1_phy_reset, .get_id = usbhs1_get_id, - .get_vbus = usbhs1_get_vbus, + .phy_reset = usbhs_phy_reset, + .get_vbus = usbhs_get_vbus, }, .driver_param = { .buswait_bwait = 4, @@ -731,9 +816,9 @@ static struct usbhs_private usbhs1_private = { static struct resource usbhs1_resources[] = { [0] = { - .name = "USBHS", - .start = 0xE68B0000, - .end = 0xE68B00E6 - 1, + .name = "USBHS1", + .start = 0xe68b0000, + .end = 0xe68b00e6 - 1, .flags = IORESOURCE_MEM, }, [1] = { @@ -752,7 +837,6 @@ static struct platform_device usbhs1_device = { .resource = usbhs1_resources, }; - /* LED */ static struct gpio_led mackerel_leds[] = { { @@ -1205,6 +1289,7 @@ static struct platform_device *mackerel_devices[] __initdata = { &lcdc_device, &usb1_host_device, &usbhs1_device, + &usbhs0_device, &leds_device, &fsi_device, &fsi_ak4643_device, @@ -1301,6 +1386,7 @@ static void __init mackerel_map_io(void) #define GPIO_PORT9CR 0xE6051009 #define GPIO_PORT10CR 0xE605100A +#define GPIO_PORT167CR 0xE60520A7 #define GPIO_PORT168CR 0xE60520A8 #define SRCR4 0xe61580bc #define USCCR1 0xE6058144 @@ -1354,17 +1440,17 @@ static void __init mackerel_init(void) gpio_request(GPIO_PORT151, NULL); /* LCDDON */ gpio_direction_output(GPIO_PORT151, 1); - /* USB enable */ - gpio_request(GPIO_FN_VBUS0_1, NULL); - gpio_request(GPIO_FN_IDIN_1_18, NULL); - gpio_request(GPIO_FN_PWEN_1_115, NULL); - gpio_request(GPIO_FN_OVCN_1_114, NULL); - gpio_request(GPIO_FN_EXTLP_1, NULL); - gpio_request(GPIO_FN_OVCN2_1, NULL); - gpio_pull_down(GPIO_PORT168CR); + /* USBHS0 */ + gpio_request(GPIO_FN_VBUS0_0, NULL); + gpio_pull_down(GPIO_PORT168CR); /* VBUS0_0 pull down */ + + /* USBHS1 */ + gpio_request(GPIO_FN_VBUS0_1, NULL); + gpio_pull_down(GPIO_PORT167CR); /* VBUS0_1 pull down */ + gpio_request(GPIO_FN_IDIN_1_113, NULL); - /* setup USB phy */ - __raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */ + /* USB phy tweak to make the r8a66597_hcd host driver work */ + __raw_writew(0x8a0a, 0xe6058130); /* USBCR4 */ /* enable FSI2 port A (ak4643) */ gpio_request(GPIO_FN_FSIAIBT, NULL); @@ -1496,8 +1582,13 @@ static void __init mackerel_init(void) platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); + sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); + sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); + hdmi_init_pm_clock(); sh7372_pm_init(); + pm_clk_add(&fsi_device.dev, "spu2"); } static void __init mackerel_timer_init(void) diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index c0800d83971..91f5779abdd 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c @@ -662,6 +662,7 @@ static struct clk_lookup lookups[] = { CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]), CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]), + CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]), }; void __init sh7372_clock_init(void) diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index df20d767017..ce595cee86c 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -12,6 +12,7 @@ #define __ASM_SH7372_H__ #include <linux/sh_clk.h> +#include <linux/pm_domain.h> /* * Pin Function Controller: @@ -470,4 +471,32 @@ extern struct clk sh7372_fsibck_clk; extern struct clk sh7372_fsidiva_clk; extern struct clk sh7372_fsidivb_clk; +struct platform_device; + +struct sh7372_pm_domain { + struct generic_pm_domain genpd; + unsigned int bit_shift; +}; + +static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) +{ + return container_of(d, struct sh7372_pm_domain, genpd); +} + +#ifdef CONFIG_PM +extern struct sh7372_pm_domain sh7372_a4lc; +extern struct sh7372_pm_domain sh7372_a4mp; +extern struct sh7372_pm_domain sh7372_d4; +extern struct sh7372_pm_domain sh7372_a3rv; +extern struct sh7372_pm_domain sh7372_a3ri; +extern struct sh7372_pm_domain sh7372_a3sg; + +extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); +extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, + struct platform_device *pdev); +#else +#define sh7372_init_pm_domain(pd) do { } while(0) +#define sh7372_add_device_to_domain(pd, pdev) do { } while(0) +#endif /* CONFIG_PM */ + #endif /* __ASM_SH7372_H__ */ diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c index 5d0e1503ece..a911a60e771 100644 --- a/arch/arm/mach-shmobile/intc-sh73a0.c +++ b/arch/arm/mach-shmobile/intc-sh73a0.c @@ -250,6 +250,11 @@ static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id) return IRQ_HANDLED; } +static int sh73a0_set_wake(struct irq_data *data, unsigned int on) +{ + return 0; /* always allow wakeup */ +} + void __init sh73a0_init_irq(void) { void __iomem *gic_dist_base = __io(0xf0001000); @@ -257,6 +262,7 @@ void __init sh73a0_init_irq(void) void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE); gic_init(0, 29, gic_dist_base, gic_cpu_base); + gic_arch_extn.irq_set_wake = sh73a0_set_wake; register_intc_controller(&intcs_desc); diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index 8e4aadf14c9..933fb411be0 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -15,16 +15,176 @@ #include <linux/list.h> #include <linux/err.h> #include <linux/slab.h> +#include <linux/pm_runtime.h> +#include <linux/platform_device.h> +#include <linux/delay.h> #include <asm/system.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <mach/common.h> +#include <mach/sh7372.h> #define SMFRAM 0xe6a70000 #define SYSTBCR 0xe6150024 #define SBAR 0xe6180020 #define APARMBAREA 0xe6f10020 +#define SPDCR 0xe6180008 +#define SWUCR 0xe6180014 +#define PSTR 0xe6180080 + +#define PSTR_RETRIES 100 +#define PSTR_DELAY_US 10 + +#ifdef CONFIG_PM + +static int pd_power_down(struct generic_pm_domain *genpd) +{ + struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); + unsigned int mask = 1 << sh7372_pd->bit_shift; + + if (__raw_readl(PSTR) & mask) { + unsigned int retry_count; + + __raw_writel(mask, SPDCR); + + for (retry_count = PSTR_RETRIES; retry_count; retry_count--) { + if (!(__raw_readl(SPDCR) & mask)) + break; + cpu_relax(); + } + } + + pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n", + mask, __raw_readl(PSTR)); + + return 0; +} + +static int pd_power_up(struct generic_pm_domain *genpd) +{ + struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); + unsigned int mask = 1 << sh7372_pd->bit_shift; + unsigned int retry_count; + int ret = 0; + + if (__raw_readl(PSTR) & mask) + goto out; + + __raw_writel(mask, SWUCR); + + for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) { + if (!(__raw_readl(SWUCR) & mask)) + goto out; + if (retry_count > PSTR_RETRIES) + udelay(PSTR_DELAY_US); + else + cpu_relax(); + } + if (__raw_readl(SWUCR) & mask) + ret = -EIO; + + out: + pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n", + mask, __raw_readl(PSTR)); + + return ret; +} + +static int pd_power_up_a3rv(struct generic_pm_domain *genpd) +{ + int ret = pd_power_up(genpd); + + /* force A4LC on after A3RV has been requested on */ + pm_genpd_poweron(&sh7372_a4lc.genpd); + + return ret; +} + +static int pd_power_down_a3rv(struct generic_pm_domain *genpd) +{ + int ret = pd_power_down(genpd); + + /* try to power down A4LC after A3RV is requested off */ + genpd_queue_power_off_work(&sh7372_a4lc.genpd); + + return ret; +} + +static int pd_power_down_a4lc(struct generic_pm_domain *genpd) +{ + /* only power down A4LC if A3RV is off */ + if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift))) + return pd_power_down(genpd); + + return -EBUSY; +} + +static bool pd_active_wakeup(struct device *dev) +{ + return true; +} + +void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) +{ + struct generic_pm_domain *genpd = &sh7372_pd->genpd; + + pm_genpd_init(genpd, NULL, false); + genpd->stop_device = pm_clk_suspend; + genpd->start_device = pm_clk_resume; + genpd->active_wakeup = pd_active_wakeup; + + if (sh7372_pd == &sh7372_a4lc) { + genpd->power_off = pd_power_down_a4lc; + genpd->power_on = pd_power_up; + } else if (sh7372_pd == &sh7372_a3rv) { + genpd->power_off = pd_power_down_a3rv; + genpd->power_on = pd_power_up_a3rv; + } else { + genpd->power_off = pd_power_down; + genpd->power_on = pd_power_up; + } + genpd->power_on(&sh7372_pd->genpd); +} + +void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + if (!dev->power.subsys_data) { + pm_clk_init(dev); + pm_clk_add(dev, NULL); + } + pm_genpd_add_device(&sh7372_pd->genpd, dev); +} + +struct sh7372_pm_domain sh7372_a4lc = { + .bit_shift = 1, +}; + +struct sh7372_pm_domain sh7372_a4mp = { + .bit_shift = 2, +}; + +struct sh7372_pm_domain sh7372_d4 = { + .bit_shift = 3, +}; + +struct sh7372_pm_domain sh7372_a3rv = { + .bit_shift = 6, +}; + +struct sh7372_pm_domain sh7372_a3ri = { + .bit_shift = 8, +}; + +struct sh7372_pm_domain sh7372_a3sg = { + .bit_shift = 13, +}; + +#endif /* CONFIG_PM */ + static void sh7372_enter_core_standby(void) { void __iomem *smfram = (void __iomem *)SMFRAM; diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c index 2d1b67a59e4..6ec454e1e06 100644 --- a/arch/arm/mach-shmobile/pm_runtime.c +++ b/arch/arm/mach-shmobile/pm_runtime.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/io.h> #include <linux/pm_runtime.h> +#include <linux/pm_domain.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/sh_clk.h> @@ -28,31 +29,38 @@ static int default_platform_runtime_idle(struct device *dev) return pm_runtime_suspend(dev); } -static struct dev_power_domain default_power_domain = { +static struct dev_pm_domain default_pm_domain = { .ops = { - .runtime_suspend = pm_runtime_clk_suspend, - .runtime_resume = pm_runtime_clk_resume, + .runtime_suspend = pm_clk_suspend, + .runtime_resume = pm_clk_resume, .runtime_idle = default_platform_runtime_idle, USE_PLATFORM_PM_SLEEP_OPS }, }; -#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain) +#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain) #else -#define DEFAULT_PWR_DOMAIN_PTR NULL +#define DEFAULT_PM_DOMAIN_PTR NULL #endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { - .pwr_domain = DEFAULT_PWR_DOMAIN_PTR, + .pm_domain = DEFAULT_PM_DOMAIN_PTR, .con_ids = { NULL, }, }; static int __init sh_pm_runtime_init(void) { - pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); + pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(sh_pm_runtime_init); + +static int __init sh_pm_runtime_late_init(void) +{ + pm_genpd_poweroff_unused(); + return 0; +} +late_initcall(sh_pm_runtime_late_init); diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c index 2c10190dbb5..e546017f15d 100644 --- a/arch/arm/mach-shmobile/setup-sh7367.c +++ b/arch/arm/mach-shmobile/setup-sh7367.c @@ -38,7 +38,7 @@ static struct plat_sci_port scif0_platform_data = { .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, - .type = PORT_SCIF, + .type = PORT_SCIFA, .irqs = { evt2irq(0xc00), evt2irq(0xc00), evt2irq(0xc00), evt2irq(0xc00) }, }; @@ -57,7 +57,7 @@ static struct plat_sci_port scif1_platform_data = { .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, - .type = PORT_SCIF, + .type = PORT_SCIFA, .irqs = { evt2irq(0xc20), evt2irq(0xc20), evt2irq(0xc20), evt2irq(0xc20) }, }; @@ -76,7 +76,7 @@ static struct plat_sci_port scif2_platform_data = { .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, - .type = PORT_SCIF, + .type = PORT_SCIFA, .irqs = { evt2irq(0xc40), evt2irq(0xc40), evt2irq(0xc40), evt2irq(0xc40) }, }; @@ -95,7 +95,7 @@ static struct plat_sci_port scif3_platform_data = { .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, - .type = PORT_SCIF, + .type = PORT_SCIFA, .irqs = { evt2irq(0xc60), evt2irq(0xc60), evt2irq(0xc60), evt2irq(0xc60) }, }; @@ -114,7 +114,7 @@ static struct plat_sci_port scif4_platform_data = { .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, - .type = PORT_SCIF, + .type = PORT_SCIFA, .irqs = { evt2irq(0xd20), evt2irq(0xd20), evt2irq(0xd20), evt2irq(0xd20) }, }; @@ -133,7 +133,7 @@ static struct plat_sci_port scif5_platform_data = { .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, - .type = PORT_SCIF, + .type = PORT_SCIFA, .irqs = { evt2irq(0xd40), evt2irq(0xd40), evt2irq(0xd40), evt2irq(0xd40) }, }; @@ -152,7 +152,7 @@ static struct plat_sci_port scif6_platform_data = { .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, - .type = PORT_SCIF, + .type = PORT_SCIFB, .irqs = { evt2irq(0xd60), evt2irq(0xd60), evt2irq(0xd60), evt2irq(0xd60) }, }; diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index cd807eea69e..79f0413d872 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -841,11 +841,22 @@ static struct platform_device *sh7372_late_devices[] __initdata = { void __init sh7372_add_standard_devices(void) { + sh7372_init_pm_domain(&sh7372_a4lc); + sh7372_init_pm_domain(&sh7372_a4mp); + sh7372_init_pm_domain(&sh7372_d4); + sh7372_init_pm_domain(&sh7372_a3rv); + sh7372_init_pm_domain(&sh7372_a3ri); + sh7372_init_pm_domain(&sh7372_a3sg); + platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); platform_add_devices(sh7372_late_devices, ARRAY_SIZE(sh7372_late_devices)); + + sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device); + sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device); } void __init sh7372_add_early_devices(void) diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index 823c703e573..ed58ef9019b 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -4,7 +4,6 @@ obj-y += io.o obj-y += irq.o obj-y += clock.o obj-y += timer.o -obj-y += gpio.o obj-y += pinmux.o obj-y += powergate.o obj-y += fuse.o diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c deleted file mode 100644 index 919d6383773..00000000000 --- a/arch/arm/mach-tegra/gpio.c +++ /dev/null @@ -1,431 +0,0 @@ -/* - * arch/arm/mach-tegra/gpio.c - * - * Copyright (c) 2010 Google, Inc - * - * Author: - * Erik Gilling <konkers@google.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/init.h> -#include <linux/irq.h> -#include <linux/interrupt.h> - -#include <linux/io.h> -#include <linux/gpio.h> - -#include <asm/mach/irq.h> - -#include <mach/iomap.h> -#include <mach/suspend.h> - -#define GPIO_BANK(x) ((x) >> 5) -#define GPIO_PORT(x) (((x) >> 3) & 0x3) -#define GPIO_BIT(x) ((x) & 0x7) - -#define GPIO_REG(x) (IO_TO_VIRT(TEGRA_GPIO_BASE) + \ - GPIO_BANK(x) * 0x80 + \ - GPIO_PORT(x) * 4) - -#define GPIO_CNF(x) (GPIO_REG(x) + 0x00) -#define GPIO_OE(x) (GPIO_REG(x) + 0x10) -#define GPIO_OUT(x) (GPIO_REG(x) + 0X20) -#define GPIO_IN(x) (GPIO_REG(x) + 0x30) -#define GPIO_INT_STA(x) (GPIO_REG(x) + 0x40) -#define GPIO_INT_ENB(x) (GPIO_REG(x) + 0x50) -#define GPIO_INT_LVL(x) (GPIO_REG(x) + 0x60) -#define GPIO_INT_CLR(x) (GPIO_REG(x) + 0x70) - -#define GPIO_MSK_CNF(x) (GPIO_REG(x) + 0x800) -#define GPIO_MSK_OE(x) (GPIO_REG(x) + 0x810) -#define GPIO_MSK_OUT(x) (GPIO_REG(x) + 0X820) -#define GPIO_MSK_INT_STA(x) (GPIO_REG(x) + 0x840) -#define GPIO_MSK_INT_ENB(x) (GPIO_REG(x) + 0x850) -#define GPIO_MSK_INT_LVL(x) (GPIO_REG(x) + 0x860) - -#define GPIO_INT_LVL_MASK 0x010101 -#define GPIO_INT_LVL_EDGE_RISING 0x000101 -#define GPIO_INT_LVL_EDGE_FALLING 0x000100 -#define GPIO_INT_LVL_EDGE_BOTH 0x010100 -#define GPIO_INT_LVL_LEVEL_HIGH 0x000001 -#define GPIO_INT_LVL_LEVEL_LOW 0x000000 - -struct tegra_gpio_bank { - int bank; - int irq; - spinlock_t lvl_lock[4]; -#ifdef CONFIG_PM - u32 cnf[4]; - u32 out[4]; - u32 oe[4]; - u32 int_enb[4]; - u32 int_lvl[4]; -#endif -}; - - -static struct tegra_gpio_bank tegra_gpio_banks[] = { - {.bank = 0, .irq = INT_GPIO1}, - {.bank = 1, .irq = INT_GPIO2}, - {.bank = 2, .irq = INT_GPIO3}, - {.bank = 3, .irq = INT_GPIO4}, - {.bank = 4, .irq = INT_GPIO5}, - {.bank = 5, .irq = INT_GPIO6}, - {.bank = 6, .irq = INT_GPIO7}, -}; - -static int tegra_gpio_compose(int bank, int port, int bit) -{ - return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7); -} - -static void tegra_gpio_mask_write(u32 reg, int gpio, int value) -{ - u32 val; - - val = 0x100 << GPIO_BIT(gpio); - if (value) - val |= 1 << GPIO_BIT(gpio); - __raw_writel(val, reg); -} - -void tegra_gpio_enable(int gpio) -{ - tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1); -} - -void tegra_gpio_disable(int gpio) -{ - tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 0); -} - -static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - tegra_gpio_mask_write(GPIO_MSK_OUT(offset), offset, value); -} - -static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - return (__raw_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1; -} - -static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 0); - return 0; -} - -static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset, - int value) -{ - tegra_gpio_set(chip, offset, value); - tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 1); - return 0; -} - - - -static struct gpio_chip tegra_gpio_chip = { - .label = "tegra-gpio", - .direction_input = tegra_gpio_direction_input, - .get = tegra_gpio_get, - .direction_output = tegra_gpio_direction_output, - .set = tegra_gpio_set, - .base = 0, - .ngpio = TEGRA_NR_GPIOS, -}; - -static void tegra_gpio_irq_ack(struct irq_data *d) -{ - int gpio = d->irq - INT_GPIO_BASE; - - __raw_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio)); -} - -static void tegra_gpio_irq_mask(struct irq_data *d) -{ - int gpio = d->irq - INT_GPIO_BASE; - - tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0); -} - -static void tegra_gpio_irq_unmask(struct irq_data *d) -{ - int gpio = d->irq - INT_GPIO_BASE; - - tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1); -} - -static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type) -{ - int gpio = d->irq - INT_GPIO_BASE; - struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d); - int port = GPIO_PORT(gpio); - int lvl_type; - int val; - unsigned long flags; - - switch (type & IRQ_TYPE_SENSE_MASK) { - case IRQ_TYPE_EDGE_RISING: - lvl_type = GPIO_INT_LVL_EDGE_RISING; - break; - - case IRQ_TYPE_EDGE_FALLING: - lvl_type = GPIO_INT_LVL_EDGE_FALLING; - break; - - case IRQ_TYPE_EDGE_BOTH: - lvl_type = GPIO_INT_LVL_EDGE_BOTH; - break; - - case IRQ_TYPE_LEVEL_HIGH: - lvl_type = GPIO_INT_LVL_LEVEL_HIGH; - break; - - case IRQ_TYPE_LEVEL_LOW: - lvl_type = GPIO_INT_LVL_LEVEL_LOW; - break; - - default: - return -EINVAL; - } - - spin_lock_irqsave(&bank->lvl_lock[port], flags); - - val = __raw_readl(GPIO_INT_LVL(gpio)); - val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio)); - val |= lvl_type << GPIO_BIT(gpio); - __raw_writel(val, GPIO_INT_LVL(gpio)); - - spin_unlock_irqrestore(&bank->lvl_lock[port], flags); - - if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) - __irq_set_handler_locked(d->irq, handle_level_irq); - else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) - __irq_set_handler_locked(d->irq, handle_edge_irq); - - return 0; -} - -static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) -{ - struct tegra_gpio_bank *bank; - int port; - int pin; - int unmasked = 0; - struct irq_chip *chip = irq_desc_get_chip(desc); - - chained_irq_enter(chip, desc); - - bank = irq_get_handler_data(irq); - - for (port = 0; port < 4; port++) { - int gpio = tegra_gpio_compose(bank->bank, port, 0); - unsigned long sta = __raw_readl(GPIO_INT_STA(gpio)) & - __raw_readl(GPIO_INT_ENB(gpio)); - u32 lvl = __raw_readl(GPIO_INT_LVL(gpio)); - - for_each_set_bit(pin, &sta, 8) { - __raw_writel(1 << pin, GPIO_INT_CLR(gpio)); - - /* if gpio is edge triggered, clear condition - * before executing the hander so that we don't - * miss edges - */ - if (lvl & (0x100 << pin)) { - unmasked = 1; - chained_irq_exit(chip, desc); - } - - generic_handle_irq(gpio_to_irq(gpio + pin)); - } - } - - if (!unmasked) - chained_irq_exit(chip, desc); - -} - -#ifdef CONFIG_PM -void tegra_gpio_resume(void) -{ - unsigned long flags; - int b; - int p; - - local_irq_save(flags); - - for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { - struct tegra_gpio_bank *bank = &tegra_gpio_banks[b]; - - for (p = 0; p < ARRAY_SIZE(bank->oe); p++) { - unsigned int gpio = (b<<5) | (p<<3); - __raw_writel(bank->cnf[p], GPIO_CNF(gpio)); - __raw_writel(bank->out[p], GPIO_OUT(gpio)); - __raw_writel(bank->oe[p], GPIO_OE(gpio)); - __raw_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio)); - __raw_writel(bank->int_enb[p], GPIO_INT_ENB(gpio)); - } - } - - local_irq_restore(flags); -} - -void tegra_gpio_suspend(void) -{ - unsigned long flags; - int b; - int p; - - local_irq_save(flags); - for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { - struct tegra_gpio_bank *bank = &tegra_gpio_banks[b]; - - for (p = 0; p < ARRAY_SIZE(bank->oe); p++) { - unsigned int gpio = (b<<5) | (p<<3); - bank->cnf[p] = __raw_readl(GPIO_CNF(gpio)); - bank->out[p] = __raw_readl(GPIO_OUT(gpio)); - bank->oe[p] = __raw_readl(GPIO_OE(gpio)); - bank->int_enb[p] = __raw_readl(GPIO_INT_ENB(gpio)); - bank->int_lvl[p] = __raw_readl(GPIO_INT_LVL(gpio)); - } - } - local_irq_restore(flags); -} - -static int tegra_gpio_wake_enable(struct irq_data *d, unsigned int enable) -{ - struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d); - return irq_set_irq_wake(bank->irq, enable); -} -#endif - -static struct irq_chip tegra_gpio_irq_chip = { - .name = "GPIO", - .irq_ack = tegra_gpio_irq_ack, - .irq_mask = tegra_gpio_irq_mask, - .irq_unmask = tegra_gpio_irq_unmask, - .irq_set_type = tegra_gpio_irq_set_type, -#ifdef CONFIG_PM - .irq_set_wake = tegra_gpio_wake_enable, -#endif -}; - - -/* This lock class tells lockdep that GPIO irqs are in a different - * category than their parents, so it won't report false recursion. - */ -static struct lock_class_key gpio_lock_class; - -static int __init tegra_gpio_init(void) -{ - struct tegra_gpio_bank *bank; - int i; - int j; - - for (i = 0; i < 7; i++) { - for (j = 0; j < 4; j++) { - int gpio = tegra_gpio_compose(i, j, 0); - __raw_writel(0x00, GPIO_INT_ENB(gpio)); - } - } - - gpiochip_add(&tegra_gpio_chip); - - for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) { - bank = &tegra_gpio_banks[GPIO_BANK(irq_to_gpio(i))]; - - irq_set_lockdep_class(i, &gpio_lock_class); - irq_set_chip_data(i, bank); - irq_set_chip_and_handler(i, &tegra_gpio_irq_chip, - handle_simple_irq); - set_irq_flags(i, IRQF_VALID); - } - - for (i = 0; i < ARRAY_SIZE(tegra_gpio_banks); i++) { - bank = &tegra_gpio_banks[i]; - - irq_set_chained_handler(bank->irq, tegra_gpio_irq_handler); - irq_set_handler_data(bank->irq, bank); - - for (j = 0; j < 4; j++) - spin_lock_init(&bank->lvl_lock[j]); - } - - return 0; -} - -postcore_initcall(tegra_gpio_init); - -void __init tegra_gpio_config(struct tegra_gpio_table *table, int num) -{ - int i; - - for (i = 0; i < num; i++) { - int gpio = table[i].gpio; - - if (table[i].enable) - tegra_gpio_enable(gpio); - else - tegra_gpio_disable(gpio); - } -} - -#ifdef CONFIG_DEBUG_FS - -#include <linux/debugfs.h> -#include <linux/seq_file.h> - -static int dbg_gpio_show(struct seq_file *s, void *unused) -{ - int i; - int j; - - for (i = 0; i < 7; i++) { - for (j = 0; j < 4; j++) { - int gpio = tegra_gpio_compose(i, j, 0); - seq_printf(s, - "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", - i, j, - __raw_readl(GPIO_CNF(gpio)), - __raw_readl(GPIO_OE(gpio)), - __raw_readl(GPIO_OUT(gpio)), - __raw_readl(GPIO_IN(gpio)), - __raw_readl(GPIO_INT_STA(gpio)), - __raw_readl(GPIO_INT_ENB(gpio)), - __raw_readl(GPIO_INT_LVL(gpio))); - } - } - return 0; -} - -static int dbg_gpio_open(struct inode *inode, struct file *file) -{ - return single_open(file, dbg_gpio_show, &inode->i_private); -} - -static const struct file_operations debug_fops = { - .open = dbg_gpio_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int __init tegra_gpio_debuginit(void) -{ - (void) debugfs_create_file("tegra_gpio", S_IRUGO, - NULL, NULL, &debug_fops); - return 0; -} -late_initcall(tegra_gpio_debuginit); -#endif diff --git a/arch/arm/mach-u300/clock.h b/arch/arm/mach-u300/clock.h index c34f3ea3017..4f50ca8f901 100644 --- a/arch/arm/mach-u300/clock.h +++ b/arch/arm/mach-u300/clock.h @@ -31,7 +31,7 @@ struct clk { bool reset; __u16 clk_val; __s8 usecount; - __u32 res_reg; + void __iomem * res_reg; __u16 res_mask; bool hw_ctrld; diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h index 8b85df4c8d8..035fdc9dbdb 100644 --- a/arch/arm/mach-u300/include/mach/u300-regs.h +++ b/arch/arm/mach-u300/include/mach/u300-regs.h @@ -18,6 +18,12 @@ * the defines are used for setting up the I/O memory mapping. */ +#ifdef __ASSEMBLER__ +#define IOMEM(a) (a) +#else +#define IOMEM(a) (void __iomem *) a +#endif + /* NAND Flash CS0 */ #define U300_NAND_CS0_PHYS_BASE 0x80000000 @@ -48,13 +54,6 @@ #endif /* - * All the following peripherals are specified at their PHYSICAL address, - * so if you need to access them (in the kernel), you MUST use the macros - * defined in <asm/io.h> to map to the IO_ADDRESS_AHB() IO_ADDRESS_FAST() - * etc. - */ - -/* * AHB peripherals */ @@ -63,11 +62,11 @@ /* Vectored Interrupt Controller 0, servicing 32 interrupts */ #define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000) -#define U300_INTCON0_VBASE (U300_AHB_PER_VIRT_BASE+0x1000) +#define U300_INTCON0_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x1000) /* Vectored Interrupt Controller 1, servicing 32 interrupts */ #define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000) -#define U300_INTCON1_VBASE (U300_AHB_PER_VIRT_BASE+0x2000) +#define U300_INTCON1_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x2000) /* Memory Stick Pro (MSPRO) controller */ #define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000) @@ -115,7 +114,7 @@ /* SYSCON */ #define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000) -#define U300_SYSCON_VBASE (U300_SLOW_PER_VIRT_BASE+0x1000) +#define U300_SYSCON_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x1000) /* Watchdog */ #define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000) @@ -125,7 +124,7 @@ /* APP side special timer */ #define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000) -#define U300_TIMER_APP_VBASE (U300_SLOW_PER_VIRT_BASE+0x4000) +#define U300_TIMER_APP_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x4000) /* Keypad */ #define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000) @@ -181,5 +180,4 @@ * Virtual accessor macros for static devices */ - #endif diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c index 891cf44591e..18d7fa0603c 100644 --- a/arch/arm/mach-u300/timer.c +++ b/arch/arm/mach-u300/timer.c @@ -411,8 +411,7 @@ static void __init u300_timer_init(void) /* Use general purpose timer 2 as clock source */ if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC, "GPT2", rate, 300, 32, clocksource_mmio_readl_up)) - printk(KERN_ERR "timer: failed to initialize clock " - "source %s\n", clocksource_u300_1mhz.name); + pr_err("timer: failed to initialize U300 clock source\n"); clockevents_calc_mult_shift(&clockevent_u300_1mhz, rate, APPTIMER_MIN_RANGE); diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c index fd4cf1ca5ef..70cdbd60596 100644 --- a/arch/arm/mach-ux500/board-mop500-pins.c +++ b/arch/arm/mach-ux500/board-mop500-pins.c @@ -110,10 +110,18 @@ static pin_cfg_t mop500_pins_common[] = { GPIO168_KP_O0, /* UART */ - GPIO0_U0_CTSn | PIN_INPUT_PULLUP, - GPIO1_U0_RTSn | PIN_OUTPUT_HIGH, - GPIO2_U0_RXD | PIN_INPUT_PULLUP, - GPIO3_U0_TXD | PIN_OUTPUT_HIGH, + /* uart-0 pins gpio configuration should be + * kept intact to prevent glitch in tx line + * when tty dev is opened. Later these pins + * are configured to uart mop500_pins_uart0 + * + * It will be replaced with uart configuration + * once the issue is solved. + */ + GPIO0_GPIO | PIN_INPUT_PULLUP, + GPIO1_GPIO | PIN_OUTPUT_HIGH, + GPIO2_GPIO | PIN_INPUT_PULLUP, + GPIO3_GPIO | PIN_OUTPUT_HIGH, GPIO29_U2_RXD | PIN_INPUT_PULLUP, GPIO30_U2_TXD | PIN_OUTPUT_HIGH, diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index bb26f40493e..2a08c07dec6 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -27,18 +27,21 @@ #include <linux/leds-lp5521.h> #include <linux/input.h> #include <linux/gpio_keys.h> +#include <linux/delay.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <plat/i2c.h> #include <plat/ste_dma40.h> +#include <plat/pincfg.h> #include <mach/hardware.h> #include <mach/setup.h> #include <mach/devices.h> #include <mach/irqs.h> +#include "pins-db8500.h" #include "ste-dma40-db8500.h" #include "devices-db8500.h" #include "board-mop500.h" @@ -393,12 +396,63 @@ static struct stedma40_chan_cfg uart2_dma_cfg_tx = { }; #endif + +static pin_cfg_t mop500_pins_uart0[] = { + GPIO0_U0_CTSn | PIN_INPUT_PULLUP, + GPIO1_U0_RTSn | PIN_OUTPUT_HIGH, + GPIO2_U0_RXD | PIN_INPUT_PULLUP, + GPIO3_U0_TXD | PIN_OUTPUT_HIGH, +}; + +#define PRCC_K_SOFTRST_SET 0x18 +#define PRCC_K_SOFTRST_CLEAR 0x1C +static void ux500_uart0_reset(void) +{ + void __iomem *prcc_rst_set, *prcc_rst_clr; + + prcc_rst_set = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + + PRCC_K_SOFTRST_SET); + prcc_rst_clr = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + + PRCC_K_SOFTRST_CLEAR); + + /* Activate soft reset PRCC_K_SOFTRST_CLEAR */ + writel((readl(prcc_rst_clr) | 0x1), prcc_rst_clr); + udelay(1); + + /* Release soft reset PRCC_K_SOFTRST_SET */ + writel((readl(prcc_rst_set) | 0x1), prcc_rst_set); + udelay(1); +} + +static void ux500_uart0_init(void) +{ + int ret; + + ret = nmk_config_pins(mop500_pins_uart0, + ARRAY_SIZE(mop500_pins_uart0)); + if (ret < 0) + pr_err("pl011: uart pins_enable failed\n"); +} + +static void ux500_uart0_exit(void) +{ + int ret; + + ret = nmk_config_pins_sleep(mop500_pins_uart0, + ARRAY_SIZE(mop500_pins_uart0)); + if (ret < 0) + pr_err("pl011: uart pins_disable failed\n"); +} + static struct amba_pl011_data uart0_plat = { #ifdef CONFIG_STE_DMA40 .dma_filter = stedma40_filter, .dma_rx_param = &uart0_dma_cfg_rx, .dma_tx_param = &uart0_dma_cfg_tx, #endif + .init = ux500_uart0_init, + .exit = ux500_uart0_exit, + .reset = ux500_uart0_reset, }; static struct amba_pl011_data uart1_plat = { diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index c3c417656bd..4598b06c8c5 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -159,6 +159,9 @@ static void __init db8500_add_gpios(void) /* No custom data yet */ }; + if (cpu_is_u8500v2()) + pdata.supports_sleepmode = true; + dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base), IRQ_DB8500_GPIO0, &pdata); } diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c index 285edcd2da2..9e6b93b1a04 100644 --- a/arch/arm/mach-vexpress/v2m.c +++ b/arch/arm/mach-vexpress/v2m.c @@ -46,12 +46,6 @@ static struct map_desc v2m_io_desc[] __initdata = { }, }; -static void __init v2m_init_early(void) -{ - ct_desc->init_early(); - versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000); -} - static void __init v2m_timer_init(void) { u32 scctrl; @@ -365,6 +359,13 @@ static struct clk_lookup v2m_lookups[] = { }, }; +static void __init v2m_init_early(void) +{ + ct_desc->init_early(); + clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups)); + versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000); +} + static void v2m_power_off(void) { if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0)) @@ -418,8 +419,6 @@ static void __init v2m_init(void) { int i; - clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups)); - platform_device_register(&v2m_pcie_i2c_device); platform_device_register(&v2m_ddc_i2c_device); platform_device_register(&v2m_flash_device); diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c index 245140c0df1..642de0408f2 100644 --- a/arch/arm/mach-vt8500/irq.c +++ b/arch/arm/mach-vt8500/irq.c @@ -39,9 +39,10 @@ static void __iomem *ic_regbase; static void __iomem *sic_regbase; -static void vt8500_irq_mask(unsigned int irq) +static void vt8500_irq_mask(struct irq_data *d) { void __iomem *base = ic_regbase; + unsigned irq = d->irq; u8 edge; if (irq >= 64) { @@ -64,9 +65,10 @@ static void vt8500_irq_mask(unsigned int irq) } } -static void vt8500_irq_unmask(unsigned int irq) +static void vt8500_irq_unmask(struct irq_data *d) { void __iomem *base = ic_regbase; + unsigned irq = d->irq; u8 dctr; if (irq >= 64) { @@ -78,10 +80,11 @@ static void vt8500_irq_unmask(unsigned int irq) writeb(dctr, base + VT8500_IC_DCTR + irq); } -static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) +static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type) { void __iomem *base = ic_regbase; - unsigned int orig_irq = irq; + unsigned irq = d->irq; + unsigned orig_irq = irq; u8 dctr; if (irq >= 64) { @@ -114,11 +117,11 @@ static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) } static struct irq_chip vt8500_irq_chip = { - .name = "vt8500", - .ack = vt8500_irq_mask, - .mask = vt8500_irq_mask, - .unmask = vt8500_irq_unmask, - .set_type = vt8500_irq_set_type, + .name = "vt8500", + .irq_ack = vt8500_irq_mask, + .irq_mask = vt8500_irq_mask, + .irq_unmask = vt8500_irq_unmask, + .irq_set_type = vt8500_irq_set_type, }; void __init vt8500_init_irq(void) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index ef59099a546..44c086710d2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -120,17 +120,22 @@ static void l2x0_cache_sync(void) spin_unlock_irqrestore(&l2x0_lock, flags); } -static void l2x0_flush_all(void) +static void __l2x0_flush_all(void) { - unsigned long flags; - - /* clean all ways */ - spin_lock_irqsave(&l2x0_lock, flags); debug_writel(0x03); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); cache_sync(); debug_writel(0x00); +} + +static void l2x0_flush_all(void) +{ + unsigned long flags; + + /* clean all ways */ + spin_lock_irqsave(&l2x0_lock, flags); + __l2x0_flush_all(); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -266,7 +271,9 @@ static void l2x0_disable(void) unsigned long flags; spin_lock_irqsave(&l2x0_lock, flags); - writel(0, l2x0_base + L2X0_CTRL); + __l2x0_flush_all(); + writel_relaxed(0, l2x0_base + L2X0_CTRL); + dsb(); spin_unlock_irqrestore(&l2x0_lock, flags); } diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 8bfae964b13..b0ee9ba3cfa 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -24,7 +24,9 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); /* * We fork()ed a process, and we need a new context for the child - * to run in. + * to run in. We reserve version 0 for initial tasks so we will + * always allocate an ASID. The ASID 0 is reserved for the TTBR + * register changing sequence. */ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { @@ -34,11 +36,8 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) static void flush_context(void) { - u32 ttb; - /* Copy TTBR1 into TTBR0 */ - asm volatile("mrc p15, 0, %0, c2, c0, 1\n" - "mcr p15, 0, %0, c2, c0, 0" - : "=r" (ttb)); + /* set the reserved ASID before flushing the TLB */ + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); isb(); local_flush_tlb_all(); if (icache_is_vivt_asid_tagged()) { @@ -94,7 +93,7 @@ static void reset_context(void *info) return; smp_rmb(); - asid = cpu_last_asid + cpu; + asid = cpu_last_asid + cpu + 1; flush_context(); set_mm_context(mm, asid); @@ -144,13 +143,13 @@ void __new_context(struct mm_struct *mm) * to start a new version and flush the TLB. */ if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = cpu_last_asid + smp_processor_id(); + asid = cpu_last_asid + smp_processor_id() + 1; flush_context(); #ifdef CONFIG_SMP smp_wmb(); smp_call_function(reset_context, NULL, 1); #endif - cpu_last_asid += NR_CPUS - 1; + cpu_last_asid += NR_CPUS; } set_mm_context(mm, asid); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc0e1d88fd3..9ea4f7ddd66 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) fault = __do_page_fault(mm, addr, fsr, tsk); up_read(&mm->mmap_sem); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); if (fault & VM_FAULT_MAJOR) - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); else if (fault & VM_FAULT_MINOR) - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); /* * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2c2cce9cd8c..c19571c40a2 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -331,6 +331,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) #endif #ifdef CONFIG_BLK_DEV_INITRD if (phys_initrd_size && + !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", + phys_initrd_start, phys_initrd_size); + phys_initrd_start = phys_initrd_size = 0; + } + if (phys_initrd_size && memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", phys_initrd_start, phys_initrd_size); @@ -635,7 +641,8 @@ void __init mem_init(void) " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" " .init : 0x%p" " - 0x%p" " (%4d kB)\n" " .text : 0x%p" " - 0x%p" " (%4d kB)\n" - " .data : 0x%p" " - 0x%p" " (%4d kB)\n", + " .data : 0x%p" " - 0x%p" " (%4d kB)\n" + " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), @@ -657,7 +664,8 @@ void __init mem_init(void) MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_text, _etext), - MLK_ROUNDUP(_sdata, _edata)); + MLK_ROUNDUP(_sdata, _edata), + MLK_ROUNDUP(__bss_start, __bss_stop)); #undef MLK #undef MLM diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9d9e736c2b4..594d677b92c 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -759,7 +759,7 @@ early_param("vmalloc", early_vmalloc); static phys_addr_t lowmem_limit __initdata = 0; -static void __init sanity_check_meminfo(void) +void __init sanity_check_meminfo(void) { int i, j, highmem = 0; @@ -1032,8 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc) { void *zero_page; + memblock_set_current_limit(lowmem_limit); + build_mem_type_table(); - sanity_check_meminfo(); prepare_page_table(); map_lowmem(); devicemaps_init(mdesc); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 687d02319a4..941a98c9e8a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -27,6 +27,10 @@ void __init arm_mm_memblock_reserve(void) memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); } +void __init sanity_check_meminfo(void) +{ +} + /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index e4c165ca669..537ffcb0646 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -146,7 +146,7 @@ __arm7tdmi_proc_info: .long 0 .long 0 .long v4_cache_fns - .size __arm7tdmi_proc_info, . - __arm7dmi_proc_info + .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info .type __triscenda7_proc_info, #object __triscenda7_proc_info: diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 7b7ebd4d096..546b54da100 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -116,7 +116,7 @@ __arm9tdmi_proc_info: .long 0 .long 0 .long v4_cache_fns - .size __arm9tdmi_proc_info, . - __arm9dmi_proc_info + .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info .type __p2001_proc_info, #object __p2001_proc_info: diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index b3b566ec83d..089c0b5e454 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -108,16 +108,18 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_ARM_ERRATA_430973 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB #endif - mrc p15, 0, r2, c2, c0, 1 @ load TTB 1 - mcr p15, 0, r2, c2, c0, 0 @ into TTB 0 +#ifdef CONFIG_ARM_ERRATA_754322 + dsb +#endif + mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID + isb +1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 isb #ifdef CONFIG_ARM_ERRATA_754322 dsb #endif mcr p15, 0, r1, c13, c0, 1 @ set context ID isb - mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 - isb #endif mov pc, lr ENDPROC(cpu_v7_switch_mm) @@ -208,19 +210,21 @@ cpu_v7_name: /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size -.equ cpu_v7_suspend_size, 4 * 8 +.equ cpu_v7_suspend_size, 4 * 9 #ifdef CONFIG_PM_SLEEP ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID mrc p15, 0, r5, c13, c0, 1 @ Context ID + mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID + stmia r0!, {r4 - r6} mrc p15, 0, r6, c3, c0, 0 @ Domain ID mrc p15, 0, r7, c2, c0, 0 @ TTB 0 mrc p15, 0, r8, c2, c0, 1 @ TTB 1 mrc p15, 0, r9, c1, c0, 0 @ Control register mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control - stmia r0, {r4 - r11} + stmia r0, {r6 - r11} ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_v7_do_suspend) @@ -228,9 +232,11 @@ ENTRY(cpu_v7_do_resume) mov ip, #0 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache - ldmia r0, {r4 - r11} + ldmia r0!, {r4 - r6} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID mcr p15, 0, r5, c13, c0, 1 @ Context ID + mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID + ldmia r0, {r6 - r11} mcr p15, 0, r6, c3, c0, 0 @ Domain ID mcr p15, 0, r7, c2, c0, 0 @ TTB 0 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 @@ -416,9 +422,9 @@ ENTRY(v7_processor_functions) .word cpu_v7_dcache_clean_area .word cpu_v7_switch_mm .word cpu_v7_set_pte_ext - .word 0 - .word 0 - .word 0 + .word cpu_v7_suspend_size + .word cpu_v7_do_suspend + .word cpu_v7_do_resume .size v7_processor_functions, . - v7_processor_functions .section ".rodata" diff --git a/arch/arm/plat-iop/cp6.c b/arch/arm/plat-iop/cp6.c index 9612a87e2a8..bab73e2c79d 100644 --- a/arch/arm/plat-iop/cp6.c +++ b/arch/arm/plat-iop/cp6.c @@ -18,6 +18,7 @@ */ #include <linux/init.h> #include <asm/traps.h> +#include <asm/ptrace.h> static int cp6_trap(struct pt_regs *regs, unsigned int instr) { diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile index a1387875a49..d53c35fe2ea 100644 --- a/arch/arm/plat-mxc/Makefile +++ b/arch/arm/plat-mxc/Makefile @@ -3,7 +3,7 @@ # # Common support -obj-y := clock.o gpio.o time.o devices.o cpu.o system.o irq-common.o +obj-y := clock.o time.o devices.o cpu.o system.o irq-common.o # MX51 uses the TZIC interrupt controller, older platforms use AVIC obj-$(CONFIG_MXC_TZIC) += tzic.o diff --git a/arch/arm/plat-mxc/devices.c b/arch/arm/plat-mxc/devices.c index eee1b6096a0..fb166b20f60 100644 --- a/arch/arm/plat-mxc/devices.c +++ b/arch/arm/plat-mxc/devices.c @@ -89,3 +89,14 @@ err: return pdev; } + +struct device mxc_aips_bus = { + .init_name = "mxc_aips", + .parent = &platform_bus, +}; + +static int __init mxc_device_init(void) +{ + return device_register(&mxc_aips_bus); +} +core_initcall(mxc_device_init); diff --git a/arch/arm/plat-mxc/devices/Makefile b/arch/arm/plat-mxc/devices/Makefile index ad2922acf48..b41bf972b54 100644 --- a/arch/arm/plat-mxc/devices/Makefile +++ b/arch/arm/plat-mxc/devices/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_IMX_HAVE_PLATFORM_FEC) += platform-fec.o obj-$(CONFIG_IMX_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o obj-$(CONFIG_IMX_HAVE_PLATFORM_FSL_USB2_UDC) += platform-fsl-usb2-udc.o obj-$(CONFIG_IMX_HAVE_PLATFORM_GPIO_KEYS) += platform-gpio_keys.o +obj-y += platform-gpio-mxc.o obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX21_HCD) += platform-imx21-hcd.o obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX2_WDT) += platform-imx2-wdt.o obj-$(CONFIG_IMX_HAVE_PLATFORM_IMXDI_RTC) += platform-imxdi_rtc.o diff --git a/arch/arm/plat-mxc/devices/platform-fec.c b/arch/arm/plat-mxc/devices/platform-fec.c index ccc789e21da..4fc6ffc2a13 100644 --- a/arch/arm/plat-mxc/devices/platform-fec.c +++ b/arch/arm/plat-mxc/devices/platform-fec.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <asm/sizes.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c b/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c index 59c33f6e401..23ce08e6ffd 100644 --- a/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c +++ b/arch/arm/plat-mxc/devices/platform-fsl-usb2-udc.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-gpio-mxc.c b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c new file mode 100644 index 00000000000..a7919a24103 --- /dev/null +++ b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c @@ -0,0 +1,32 @@ +/* + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2011 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ +#include <mach/devices-common.h> + +struct platform_device *__init mxc_register_gpio(char *name, int id, + resource_size_t iobase, resource_size_t iosize, int irq, int irq_high) +{ + struct resource res[] = { + { + .start = iobase, + .end = iobase + iosize - 1, + .flags = IORESOURCE_MEM, + }, { + .start = irq, + .end = irq, + .flags = IORESOURCE_IRQ, + }, { + .start = irq_high, + .end = irq_high, + .flags = IORESOURCE_IRQ, + }, + }; + + return platform_device_register_resndata(&mxc_aips_bus, + name, id, res, ARRAY_SIZE(res), NULL, 0); +} diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c index 3538b85ede9..b130f60ca6b 100644 --- a/arch/arm/plat-mxc/devices/platform-imx-dma.c +++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c @@ -139,7 +139,7 @@ static struct sdma_script_start_addrs addr_imx35_to2 = { #endif #ifdef CONFIG_SOC_IMX51 -static struct sdma_script_start_addrs addr_imx51_to1 = { +static struct sdma_script_start_addrs addr_imx51 = { .ap_2_ap_addr = 642, .uart_2_mcu_addr = 817, .mcu_2_app_addr = 747, @@ -196,7 +196,9 @@ static int __init imxXX_add_imx_dma(void) #if defined(CONFIG_SOC_IMX51) if (cpu_is_mx51()) { - imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51_to1; + int to_version = mx51_revision() >> 4; + imx51_imx_sdma_data.pdata.to_version = to_version; + imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51; ret = imx_add_imx_sdma(&imx51_imx_sdma_data); } else #endif diff --git a/arch/arm/plat-mxc/devices/platform-imx-fb.c b/arch/arm/plat-mxc/devices/platform-imx-fb.c index 79a1cb18a5b..2b0b5e0aa99 100644 --- a/arch/arm/plat-mxc/devices/platform-imx-fb.c +++ b/arch/arm/plat-mxc/devices/platform-imx-fb.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-ipu-core.c b/arch/arm/plat-mxc/devices/platform-ipu-core.c index edf65034aea..79d340ae0af 100644 --- a/arch/arm/plat-mxc/devices/platform-ipu-core.c +++ b/arch/arm/plat-mxc/devices/platform-ipu-core.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-mxc-ehci.c b/arch/arm/plat-mxc/devices/platform-mxc-ehci.c index cc488f4b620..e1763e03e7c 100644 --- a/arch/arm/plat-mxc/devices/platform-mxc-ehci.c +++ b/arch/arm/plat-mxc/devices/platform-mxc-ehci.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-mxc-mmc.c b/arch/arm/plat-mxc/devices/platform-mxc-mmc.c index 90d762f6f93..540d3a7d92d 100644 --- a/arch/arm/plat-mxc/devices/platform-mxc-mmc.c +++ b/arch/arm/plat-mxc/devices/platform-mxc-mmc.c @@ -6,6 +6,7 @@ * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ +#include <linux/dma-mapping.h> #include <mach/hardware.h> #include <mach/devices-common.h> diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c index f97eb3615b2..9bfae8bd5b8 100644 --- a/arch/arm/plat-mxc/devices/platform-spi_imx.c +++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c @@ -40,9 +40,10 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = { #endif #ifdef CONFIG_SOC_IMX25 +/* i.mx25 has the i.mx35 type cspi */ const struct imx_spi_imx_data imx25_cspi_data[] __initconst = { #define imx25_cspi_data_entry(_id, _hwid) \ - imx_spi_imx_data_entry(MX25, CSPI, "imx25-cspi", _id, _hwid, SZ_16K) + imx_spi_imx_data_entry(MX25, CSPI, "imx35-cspi", _id, _hwid, SZ_16K) imx25_cspi_data_entry(0, 1), imx25_cspi_data_entry(1, 2), imx25_cspi_data_entry(2, 3), @@ -79,8 +80,9 @@ const struct imx_spi_imx_data imx35_cspi_data[] __initconst = { #endif /* ifdef CONFIG_SOC_IMX35 */ #ifdef CONFIG_SOC_IMX51 +/* i.mx51 has the i.mx35 type cspi */ const struct imx_spi_imx_data imx51_cspi_data __initconst = - imx_spi_imx_data_entry_single(MX51, CSPI, "imx51-cspi", 2, , SZ_4K); + imx_spi_imx_data_entry_single(MX51, CSPI, "imx35-cspi", 2, , SZ_4K); const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = { #define imx51_ecspi_data_entry(_id, _hwid) \ @@ -91,12 +93,14 @@ const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = { #endif /* ifdef CONFIG_SOC_IMX51 */ #ifdef CONFIG_SOC_IMX53 +/* i.mx53 has the i.mx35 type cspi */ const struct imx_spi_imx_data imx53_cspi_data __initconst = - imx_spi_imx_data_entry_single(MX53, CSPI, "imx53-cspi", 0, , SZ_4K); + imx_spi_imx_data_entry_single(MX53, CSPI, "imx35-cspi", 0, , SZ_4K); +/* i.mx53 has the i.mx51 type ecspi */ const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = { #define imx53_ecspi_data_entry(_id, _hwid) \ - imx_spi_imx_data_entry(MX53, ECSPI, "imx53-ecspi", _id, _hwid, SZ_4K) + imx_spi_imx_data_entry(MX53, ECSPI, "imx51-ecspi", _id, _hwid, SZ_4K) imx53_ecspi_data_entry(0, 1), imx53_ecspi_data_entry(1, 2), }; diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c deleted file mode 100644 index 6cd6d7f686f..00000000000 --- a/arch/arm/plat-mxc/gpio.c +++ /dev/null @@ -1,361 +0,0 @@ -/* - * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> - * Copyright 2008 Juergen Beisert, kernel@pengutronix.de - * - * Based on code from Freescale, - * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/gpio.h> -#include <mach/hardware.h> -#include <asm-generic/bug.h> - -static struct mxc_gpio_port *mxc_gpio_ports; -static int gpio_table_size; - -#define cpu_is_mx1_mx2() (cpu_is_mx1() || cpu_is_mx2()) - -#define GPIO_DR (cpu_is_mx1_mx2() ? 0x1c : 0x00) -#define GPIO_GDIR (cpu_is_mx1_mx2() ? 0x00 : 0x04) -#define GPIO_PSR (cpu_is_mx1_mx2() ? 0x24 : 0x08) -#define GPIO_ICR1 (cpu_is_mx1_mx2() ? 0x28 : 0x0C) -#define GPIO_ICR2 (cpu_is_mx1_mx2() ? 0x2C : 0x10) -#define GPIO_IMR (cpu_is_mx1_mx2() ? 0x30 : 0x14) -#define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18) - -#define GPIO_INT_LOW_LEV (cpu_is_mx1_mx2() ? 0x3 : 0x0) -#define GPIO_INT_HIGH_LEV (cpu_is_mx1_mx2() ? 0x2 : 0x1) -#define GPIO_INT_RISE_EDGE (cpu_is_mx1_mx2() ? 0x0 : 0x2) -#define GPIO_INT_FALL_EDGE (cpu_is_mx1_mx2() ? 0x1 : 0x3) -#define GPIO_INT_NONE 0x4 - -/* Note: This driver assumes 32 GPIOs are handled in one register */ - -static void _clear_gpio_irqstatus(struct mxc_gpio_port *port, u32 index) -{ - __raw_writel(1 << index, port->base + GPIO_ISR); -} - -static void _set_gpio_irqenable(struct mxc_gpio_port *port, u32 index, - int enable) -{ - u32 l; - - l = __raw_readl(port->base + GPIO_IMR); - l = (l & (~(1 << index))) | (!!enable << index); - __raw_writel(l, port->base + GPIO_IMR); -} - -static void gpio_ack_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - _clear_gpio_irqstatus(&mxc_gpio_ports[gpio / 32], gpio & 0x1f); -} - -static void gpio_mask_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 0); -} - -static void gpio_unmask_irq(struct irq_data *d) -{ - u32 gpio = irq_to_gpio(d->irq); - _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 1); -} - -static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset); - -static int gpio_set_irq_type(struct irq_data *d, u32 type) -{ - u32 gpio = irq_to_gpio(d->irq); - struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32]; - u32 bit, val; - int edge; - void __iomem *reg = port->base; - - port->both_edges &= ~(1 << (gpio & 31)); - switch (type) { - case IRQ_TYPE_EDGE_RISING: - edge = GPIO_INT_RISE_EDGE; - break; - case IRQ_TYPE_EDGE_FALLING: - edge = GPIO_INT_FALL_EDGE; - break; - case IRQ_TYPE_EDGE_BOTH: - val = mxc_gpio_get(&port->chip, gpio & 31); - if (val) { - edge = GPIO_INT_LOW_LEV; - pr_debug("mxc: set GPIO %d to low trigger\n", gpio); - } else { - edge = GPIO_INT_HIGH_LEV; - pr_debug("mxc: set GPIO %d to high trigger\n", gpio); - } - port->both_edges |= 1 << (gpio & 31); - break; - case IRQ_TYPE_LEVEL_LOW: - edge = GPIO_INT_LOW_LEV; - break; - case IRQ_TYPE_LEVEL_HIGH: - edge = GPIO_INT_HIGH_LEV; - break; - default: - return -EINVAL; - } - - reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ - bit = gpio & 0xf; - val = __raw_readl(reg) & ~(0x3 << (bit << 1)); - __raw_writel(val | (edge << (bit << 1)), reg); - _clear_gpio_irqstatus(port, gpio & 0x1f); - - return 0; -} - -static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) -{ - void __iomem *reg = port->base; - u32 bit, val; - int edge; - - reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ - bit = gpio & 0xf; - val = __raw_readl(reg); - edge = (val >> (bit << 1)) & 3; - val &= ~(0x3 << (bit << 1)); - if (edge == GPIO_INT_HIGH_LEV) { - edge = GPIO_INT_LOW_LEV; - pr_debug("mxc: switch GPIO %d to low trigger\n", gpio); - } else if (edge == GPIO_INT_LOW_LEV) { - edge = GPIO_INT_HIGH_LEV; - pr_debug("mxc: switch GPIO %d to high trigger\n", gpio); - } else { - pr_err("mxc: invalid configuration for GPIO %d: %x\n", - gpio, edge); - return; - } - __raw_writel(val | (edge << (bit << 1)), reg); -} - -/* handle 32 interrupts in one status register */ -static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat) -{ - u32 gpio_irq_no_base = port->virtual_irq_start; - - while (irq_stat != 0) { - int irqoffset = fls(irq_stat) - 1; - - if (port->both_edges & (1 << irqoffset)) - mxc_flip_edge(port, irqoffset); - - generic_handle_irq(gpio_irq_no_base + irqoffset); - - irq_stat &= ~(1 << irqoffset); - } -} - -/* MX1 and MX3 has one interrupt *per* gpio port */ -static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc) -{ - u32 irq_stat; - struct mxc_gpio_port *port = irq_get_handler_data(irq); - - irq_stat = __raw_readl(port->base + GPIO_ISR) & - __raw_readl(port->base + GPIO_IMR); - - mxc_gpio_irq_handler(port, irq_stat); -} - -/* MX2 has one interrupt *for all* gpio ports */ -static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc) -{ - int i; - u32 irq_msk, irq_stat; - struct mxc_gpio_port *port = irq_get_handler_data(irq); - - /* walk through all interrupt status registers */ - for (i = 0; i < gpio_table_size; i++) { - irq_msk = __raw_readl(port[i].base + GPIO_IMR); - if (!irq_msk) - continue; - - irq_stat = __raw_readl(port[i].base + GPIO_ISR) & irq_msk; - if (irq_stat) - mxc_gpio_irq_handler(&port[i], irq_stat); - } -} - -/* - * Set interrupt number "irq" in the GPIO as a wake-up source. - * While system is running, all registered GPIO interrupts need to have - * wake-up enabled. When system is suspended, only selected GPIO interrupts - * need to have wake-up enabled. - * @param irq interrupt source number - * @param enable enable as wake-up if equal to non-zero - * @return This function returns 0 on success. - */ -static int gpio_set_wake_irq(struct irq_data *d, u32 enable) -{ - u32 gpio = irq_to_gpio(d->irq); - u32 gpio_idx = gpio & 0x1F; - struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32]; - - if (enable) { - if (port->irq_high && (gpio_idx >= 16)) - enable_irq_wake(port->irq_high); - else - enable_irq_wake(port->irq); - } else { - if (port->irq_high && (gpio_idx >= 16)) - disable_irq_wake(port->irq_high); - else - disable_irq_wake(port->irq); - } - - return 0; -} - -static struct irq_chip gpio_irq_chip = { - .name = "GPIO", - .irq_ack = gpio_ack_irq, - .irq_mask = gpio_mask_irq, - .irq_unmask = gpio_unmask_irq, - .irq_set_type = gpio_set_irq_type, - .irq_set_wake = gpio_set_wake_irq, -}; - -static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset, - int dir) -{ - struct mxc_gpio_port *port = - container_of(chip, struct mxc_gpio_port, chip); - u32 l; - unsigned long flags; - - spin_lock_irqsave(&port->lock, flags); - l = __raw_readl(port->base + GPIO_GDIR); - if (dir) - l |= 1 << offset; - else - l &= ~(1 << offset); - __raw_writel(l, port->base + GPIO_GDIR); - spin_unlock_irqrestore(&port->lock, flags); -} - -static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct mxc_gpio_port *port = - container_of(chip, struct mxc_gpio_port, chip); - void __iomem *reg = port->base + GPIO_DR; - u32 l; - unsigned long flags; - - spin_lock_irqsave(&port->lock, flags); - l = (__raw_readl(reg) & (~(1 << offset))) | (!!value << offset); - __raw_writel(l, reg); - spin_unlock_irqrestore(&port->lock, flags); -} - -static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct mxc_gpio_port *port = - container_of(chip, struct mxc_gpio_port, chip); - - return (__raw_readl(port->base + GPIO_PSR) >> offset) & 1; -} - -static int mxc_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - _set_gpio_direction(chip, offset, 0); - return 0; -} - -static int mxc_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - mxc_gpio_set(chip, offset, value); - _set_gpio_direction(chip, offset, 1); - return 0; -} - -/* - * This lock class tells lockdep that GPIO irqs are in a different - * category than their parents, so it won't report false recursion. - */ -static struct lock_class_key gpio_lock_class; - -int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) -{ - int i, j; - - /* save for local usage */ - mxc_gpio_ports = port; - gpio_table_size = cnt; - - printk(KERN_INFO "MXC GPIO hardware\n"); - - for (i = 0; i < cnt; i++) { - /* disable the interrupt and clear the status */ - __raw_writel(0, port[i].base + GPIO_IMR); - __raw_writel(~0, port[i].base + GPIO_ISR); - for (j = port[i].virtual_irq_start; - j < port[i].virtual_irq_start + 32; j++) { - irq_set_lockdep_class(j, &gpio_lock_class); - irq_set_chip_and_handler(j, &gpio_irq_chip, - handle_level_irq); - set_irq_flags(j, IRQF_VALID); - } - - /* register gpio chip */ - port[i].chip.direction_input = mxc_gpio_direction_input; - port[i].chip.direction_output = mxc_gpio_direction_output; - port[i].chip.get = mxc_gpio_get; - port[i].chip.set = mxc_gpio_set; - port[i].chip.base = i * 32; - port[i].chip.ngpio = 32; - - spin_lock_init(&port[i].lock); - - /* its a serious configuration bug when it fails */ - BUG_ON( gpiochip_add(&port[i].chip) < 0 ); - - if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) { - /* setup one handler for each entry */ - irq_set_chained_handler(port[i].irq, - mx3_gpio_irq_handler); - irq_set_handler_data(port[i].irq, &port[i]); - if (port[i].irq_high) { - /* setup handler for GPIO 16 to 31 */ - irq_set_chained_handler(port[i].irq_high, - mx3_gpio_irq_handler); - irq_set_handler_data(port[i].irq_high, - &port[i]); - } - } - } - - if (cpu_is_mx2()) { - /* setup one handler for all GPIO interrupts */ - irq_set_chained_handler(port[0].irq, mx2_gpio_irq_handler); - irq_set_handler_data(port[0].irq, port); - } - - return 0; -} diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h index da7991832af..4e3d97890d6 100644 --- a/arch/arm/plat-mxc/include/mach/common.h +++ b/arch/arm/plat-mxc/include/mach/common.h @@ -43,6 +43,15 @@ extern void mx35_init_irq(void); extern void mx50_init_irq(void); extern void mx51_init_irq(void); extern void mx53_init_irq(void); +extern void imx1_soc_init(void); +extern void imx21_soc_init(void); +extern void imx25_soc_init(void); +extern void imx27_soc_init(void); +extern void imx31_soc_init(void); +extern void imx35_soc_init(void); +extern void imx50_soc_init(void); +extern void imx51_soc_init(void); +extern void imx53_soc_init(void); extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq); extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int); extern int mx1_clocks_init(unsigned long fref); @@ -55,7 +64,8 @@ extern int mx51_clocks_init(unsigned long ckil, unsigned long osc, unsigned long ckih1, unsigned long ckih2); extern int mx53_clocks_init(unsigned long ckil, unsigned long osc, unsigned long ckih1, unsigned long ckih2); -extern int mxc_register_gpios(void); +extern struct platform_device *mxc_register_gpio(char *name, int id, + resource_size_t iobase, resource_size_t iosize, int irq, int irq_high); extern int mxc_register_device(struct platform_device *pdev, void *data); extern void mxc_set_cpu_type(unsigned int type); extern void mxc_arch_reset_init(void __iomem *); diff --git a/arch/arm/plat-mxc/include/mach/devices-common.h b/arch/arm/plat-mxc/include/mach/devices-common.h index fa8477337f9..03f62664537 100644 --- a/arch/arm/plat-mxc/include/mach/devices-common.h +++ b/arch/arm/plat-mxc/include/mach/devices-common.h @@ -10,6 +10,8 @@ #include <linux/platform_device.h> #include <linux/init.h> +extern struct device mxc_aips_bus; + struct platform_device *imx_add_platform_device_dmamask( const char *name, int id, const struct resource *res, unsigned int num_resources, diff --git a/arch/arm/plat-mxc/include/mach/gpio.h b/arch/arm/plat-mxc/include/mach/gpio.h index a2747f12813..31c820c1b79 100644 --- a/arch/arm/plat-mxc/include/mach/gpio.h +++ b/arch/arm/plat-mxc/include/mach/gpio.h @@ -36,31 +36,4 @@ #define gpio_to_irq(gpio) (MXC_GPIO_IRQ_START + (gpio)) #define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START) -struct mxc_gpio_port { - void __iomem *base; - int irq; - int irq_high; - int virtual_irq_start; - struct gpio_chip chip; - u32 both_edges; - spinlock_t lock; -}; - -#define DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, _irq_high) \ - { \ - .chip.label = "gpio-" #_id, \ - .irq = _irq, \ - .irq_high = _irq_high, \ - .base = soc ## _IO_ADDRESS( \ - soc ## _GPIO ## _hwid ## _BASE_ADDR), \ - .virtual_irq_start = MXC_GPIO_IRQ_START + (_id) * 32, \ - } - -#define DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, _irq) \ - DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, 0) -#define DEFINE_IMX_GPIO_PORT(soc, _id, _hwid) \ - DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, 0) - -int mxc_gpio_init(struct mxc_gpio_port*, int); - #endif diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h index 35c89bcdf75..00e812bbd81 100644 --- a/arch/arm/plat-mxc/include/mach/irqs.h +++ b/arch/arm/plat-mxc/include/mach/irqs.h @@ -11,6 +11,8 @@ #ifndef __ASM_ARCH_MXC_IRQS_H__ #define __ASM_ARCH_MXC_IRQS_H__ +#include <asm-generic/gpio.h> + /* * SoCs with TZIC interrupt controller have 128 IRQs, those with AVIC have 64 */ @@ -22,30 +24,13 @@ #define MXC_GPIO_IRQ_START MXC_INTERNAL_IRQS -/* these are ordered by size to support multi-SoC kernels */ -#if defined CONFIG_SOC_IMX53 -#define MXC_GPIO_IRQS (32 * 7) -#elif defined CONFIG_ARCH_MX2 -#define MXC_GPIO_IRQS (32 * 6) -#elif defined CONFIG_SOC_IMX50 -#define MXC_GPIO_IRQS (32 * 6) -#elif defined CONFIG_ARCH_MX1 -#define MXC_GPIO_IRQS (32 * 4) -#elif defined CONFIG_ARCH_MX25 -#define MXC_GPIO_IRQS (32 * 4) -#elif defined CONFIG_SOC_IMX51 -#define MXC_GPIO_IRQS (32 * 4) -#elif defined CONFIG_ARCH_MX3 -#define MXC_GPIO_IRQS (32 * 3) -#endif - /* * The next 16 interrupts are for board specific purposes. Since * the kernel can only run on one machine at a time, we can re-use * these. If you need more, increase MXC_BOARD_IRQS, but keep it * within sensible limits. */ -#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS) +#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + ARCH_NR_GPIOS) #ifdef CONFIG_MACH_MX31ADS_WM1133_EV1 #define MXC_BOARD_IRQS 80 diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h index ea19a5b2f22..d5d7e651269 100644 --- a/arch/arm/plat-nomadik/include/plat/gpio.h +++ b/arch/arm/plat-nomadik/include/plat/gpio.h @@ -90,6 +90,7 @@ struct nmk_gpio_platform_data { int num_gpio; u32 (*get_secondary_status)(unsigned int bank); void (*set_ioforce)(bool enable); + bool supports_sleepmode; }; #endif /* __ASM_PLAT_GPIO_H */ diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index c44886062f8..685c78716d9 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h @@ -10,6 +10,7 @@ #define STE_DMA40_H #include <linux/dmaengine.h> +#include <linux/scatterlist.h> #include <linux/workqueue.h> #include <linux/interrupt.h> diff --git a/arch/arm/plat-omap/include/plat/flash.h b/arch/arm/plat-omap/include/plat/flash.h index 3083195123e..0d88499b79e 100644 --- a/arch/arm/plat-omap/include/plat/flash.h +++ b/arch/arm/plat-omap/include/plat/flash.h @@ -11,6 +11,7 @@ #include <linux/mtd/map.h> +struct platform_device; extern void omap1_set_vpp(struct platform_device *pdev, int enable); #endif diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h index ec97e00cb58..91e8de3db08 100644 --- a/arch/arm/plat-omap/include/plat/gpio.h +++ b/arch/arm/plat-omap/include/plat/gpio.h @@ -174,12 +174,32 @@ struct omap_gpio_dev_attr { bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ }; +struct omap_gpio_reg_offs { + u16 revision; + u16 direction; + u16 datain; + u16 dataout; + u16 set_dataout; + u16 clr_dataout; + u16 irqstatus; + u16 irqstatus2; + u16 irqenable; + u16 set_irqenable; + u16 clr_irqenable; + u16 debounce; + u16 debounce_en; + + bool irqenable_inv; +}; + struct omap_gpio_platform_data { u16 virtual_irq_start; int bank_type; int bank_width; /* GPIO bank width */ int bank_stride; /* Only needed for omap1 MPUIO */ bool dbck_flag; /* dbck required or not - True for OMAP3&4 */ + + struct omap_gpio_reg_offs *regs; }; /* TODO: Analyze removing gpio_bank_count usage from driver code */ diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h index 32a2f6c4d39..e992b9655fb 100644 --- a/arch/arm/plat-omap/include/plat/iovmm.h +++ b/arch/arm/plat-omap/include/plat/iovmm.h @@ -29,9 +29,6 @@ struct iovm_struct { * lower 16 bit is used for h/w and upper 16 bit is for s/w. */ #define IOVMF_SW_SHIFT 16 -#define IOVMF_HW_SIZE (1 << IOVMF_SW_SHIFT) -#define IOVMF_HW_MASK (IOVMF_HW_SIZE - 1) -#define IOVMF_SW_MASK (~IOVMF_HW_MASK)UL /* * iovma: h/w flags derived from cam and ram attribute diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h index f38fef9f131..c7b874186c2 100644 --- a/arch/arm/plat-omap/include/plat/mmc.h +++ b/arch/arm/plat-omap/include/plat/mmc.h @@ -101,6 +101,9 @@ struct omap_mmc_platform_data { /* If using power_saving and the MMC power is not to go off */ unsigned no_off:1; + /* eMMC does not handle power off when not in sleep state */ + unsigned no_regulator_off_init:1; + /* Regulator off remapped to sleep */ unsigned vcc_aux_disable_is_sleep:1; diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h index e4c349ff9fd..ee405b36df4 100644 --- a/arch/arm/plat-omap/include/plat/omap_device.h +++ b/arch/arm/plat-omap/include/plat/omap_device.h @@ -44,6 +44,10 @@ extern struct device omap_device_parent; #define OMAP_DEVICE_STATE_IDLE 2 #define OMAP_DEVICE_STATE_SHUTDOWN 3 +/* omap_device.flags values */ +#define OMAP_DEVICE_SUSPENDED BIT(0) +#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND BIT(1) + /** * struct omap_device - omap_device wrapper for platform_devices * @pdev: platform_device @@ -73,6 +77,7 @@ struct omap_device { s8 pm_lat_level; u8 hwmods_cnt; u8 _state; + u8 flags; }; /* Device driver interface (call via platform_data fn ptrs) */ @@ -117,6 +122,10 @@ int omap_device_enable_hwmods(struct omap_device *od); int omap_device_disable_clocks(struct omap_device *od); int omap_device_enable_clocks(struct omap_device *od); +static inline void omap_device_disable_idle_on_suspend(struct omap_device *od) +{ + od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND; +} /* * Entries should be kept in latency order ascending diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index 51ef43e8def..83a37c54342 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c @@ -648,7 +648,6 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, return PTR_ERR(va); } - flags &= IOVMF_HW_MASK; flags |= IOVMF_DISCONT; flags |= IOVMF_MMIO; @@ -706,7 +705,6 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) if (!va) return -ENOMEM; - flags &= IOVMF_HW_MASK; flags |= IOVMF_DISCONT; flags |= IOVMF_ALLOC; @@ -795,7 +793,6 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, if (!va) return -ENOMEM; - flags &= IOVMF_HW_MASK; flags |= IOVMF_LINEAR; flags |= IOVMF_MMIO; @@ -853,7 +850,6 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) return -ENOMEM; pa = virt_to_phys(va); - flags &= IOVMF_HW_MASK; flags |= IOVMF_LINEAR; flags |= IOVMF_ALLOC; diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index a37b8eb65b7..2526fa312b8 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c @@ -84,6 +84,7 @@ #include <linux/io.h> #include <linux/clk.h> #include <linux/clkdev.h> +#include <linux/pm_runtime.h> #include <plat/omap_device.h> #include <plat/omap_hwmod.h> @@ -536,25 +537,82 @@ int omap_early_device_register(struct omap_device *od) return 0; } +#ifdef CONFIG_PM_RUNTIME static int _od_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); + int ret; + + ret = pm_generic_runtime_suspend(dev); - return omap_device_idle(pdev); + if (!ret) + omap_device_idle(pdev); + + return ret; +} + +static int _od_runtime_idle(struct device *dev) +{ + return pm_generic_runtime_idle(dev); } static int _od_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - return omap_device_enable(pdev); + omap_device_enable(pdev); + + return pm_generic_runtime_resume(dev); +} +#endif + +#ifdef CONFIG_SUSPEND +static int _od_suspend_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct omap_device *od = to_omap_device(pdev); + int ret; + + if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND) + return pm_generic_suspend_noirq(dev); + + ret = pm_generic_suspend_noirq(dev); + + if (!ret && !pm_runtime_status_suspended(dev)) { + if (pm_generic_runtime_suspend(dev) == 0) { + omap_device_idle(pdev); + od->flags |= OMAP_DEVICE_SUSPENDED; + } + } + + return ret; +} + +static int _od_resume_noirq(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct omap_device *od = to_omap_device(pdev); + + if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND) + return pm_generic_resume_noirq(dev); + + if ((od->flags & OMAP_DEVICE_SUSPENDED) && + !pm_runtime_status_suspended(dev)) { + od->flags &= ~OMAP_DEVICE_SUSPENDED; + omap_device_enable(pdev); + pm_generic_runtime_resume(dev); + } + + return pm_generic_resume_noirq(dev); } +#endif -static struct dev_power_domain omap_device_power_domain = { +static struct dev_pm_domain omap_device_pm_domain = { .ops = { - .runtime_suspend = _od_runtime_suspend, - .runtime_resume = _od_runtime_resume, + SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume, + _od_runtime_idle) USE_PLATFORM_PM_SLEEP_OPS + SET_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq) } }; @@ -571,7 +629,7 @@ int omap_device_register(struct omap_device *od) pr_debug("omap_device: %s: registering\n", od->pdev.name); od->pdev.dev.parent = &omap_device_parent; - od->pdev.dev.pwr_domain = &omap_device_power_domain; + od->pdev.dev.pm_domain = &omap_device_pm_domain; return platform_device_register(&od->pdev); } diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index a3f50b34a90..6af3d0b1f8d 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -166,7 +166,7 @@ static void __init omap_detect_sram(void) else if (cpu_is_omap1611()) omap_sram_size = SZ_256K; else { - printk(KERN_ERR "Could not detect SRAM size\n"); + pr_err("Could not detect SRAM size\n"); omap_sram_size = 0x4000; } } @@ -221,10 +221,10 @@ static void __init omap_map_sram(void) omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); - printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", - __pfn_to_phys(omap_sram_io_desc[0].pfn), - omap_sram_io_desc[0].virtual, - omap_sram_io_desc[0].length); + pr_info("SRAM: Mapped pa 0x%08llx to va 0x%08lx size: 0x%lx\n", + (long long) __pfn_to_phys(omap_sram_io_desc[0].pfn), + omap_sram_io_desc[0].virtual, + omap_sram_io_desc[0].length); /* * Normally devicemaps_init() would flush caches and tlb after @@ -252,7 +252,7 @@ static void __init omap_map_sram(void) void *omap_sram_push_address(unsigned long size) { if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) { - printk(KERN_ERR "Not enough space in SRAM\n"); + pr_err("Not enough space in SRAM\n"); return NULL; } diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 5b4fffab1eb..41ab97ebe4c 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c @@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio, ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; ct->regs.ack = GPIO_EDGE_CAUSE_OFF; ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_clr_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_set_type = gpio_irq_set_type; diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c index 48ebb9479b6..a11dc367050 100644 --- a/arch/arm/plat-pxa/gpio.c +++ b/arch/arm/plat-pxa/gpio.c @@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c) return container_of(c, struct pxa_gpio_chip, chip)->regbase; } -static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio) +static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio) { return &pxa_gpio_chips[gpio_to_bank(gpio)]; } @@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) int gpio = irq_to_gpio(d->irq); unsigned long gpdr, mask = GPIO_bit(gpio); - c = gpio_to_chip(gpio); + c = gpio_to_pxachip(gpio); if (type == IRQ_TYPE_PROBE) { /* Don't mess with enabled GPIOs using preconfigured edges or @@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) static void pxa_ack_muxed_gpio(struct irq_data *d) { int gpio = irq_to_gpio(d->irq); - struct pxa_gpio_chip *c = gpio_to_chip(gpio); + struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); } @@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d) static void pxa_mask_muxed_gpio(struct irq_data *d) { int gpio = irq_to_gpio(d->irq); - struct pxa_gpio_chip *c = gpio_to_chip(gpio); + struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); uint32_t grer, gfer; c->irq_mask &= ~GPIO_bit(gpio); @@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d) static void pxa_unmask_muxed_gpio(struct irq_data *d) { int gpio = irq_to_gpio(d->irq); - struct pxa_gpio_chip *c = gpio_to_chip(gpio); + struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); c->irq_mask |= GPIO_bit(gpio); update_edge_detect(c); diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h deleted file mode 100644 index 1ab332e37d7..00000000000 --- a/arch/arm/plat-pxa/include/plat/sdhci.h +++ /dev/null @@ -1,35 +0,0 @@ -/* linux/arch/arm/plat-pxa/include/plat/sdhci.h - * - * Copyright 2010 Marvell - * Zhangfei Gao <zhangfei.gao@marvell.com> - * - * PXA Platform - SDHCI platform data definitions - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __PLAT_PXA_SDHCI_H -#define __PLAT_PXA_SDHCI_H - -/* pxa specific flag */ -/* Require clock free running */ -#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0) - -/* Board design supports 8-bit data on SD/SDIO BUS */ -#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2) - -/* - * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI - * @max_speed: the maximum speed supported - * @quirks: quirks of specific device - * @flags: flags for platform requirement - */ -struct sdhci_pxa_platdata { - unsigned int max_speed; - unsigned int quirks; - unsigned int flags; -}; - -#endif /* __PLAT_PXA_SDHCI_H */ diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index c10d10c56e2..539bd0e3def 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c @@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel); * get control of an dma channel */ -int s3c2410_dma_request(unsigned int channel, +int s3c2410_dma_request(enum dma_ch channel, struct s3c2410_dma_client *client, void *dev) { @@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request); * allowed to go through. */ -int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) +int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned long flags; @@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan) } int -s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) +s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -1021,23 +1021,19 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl); * xfersize: size of unit in bytes (1,2,4) */ -int s3c2410_dma_config(unsigned int channel, +int s3c2410_dma_config(enum dma_ch channel, int xferunit) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); unsigned int dcon; - pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n", - __func__, channel, xferunit, dcon); + pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit); if (chan == NULL) return -EINVAL; - pr_debug("%s: Initial dcon is %08x\n", __func__, dcon); - dcon = chan->dcon & dma_sel.dcon_mask; - - pr_debug("%s: New dcon is %08x\n", __func__, dcon); + pr_debug("%s: dcon is %08x\n", __func__, dcon); switch (chan->req_ch) { case DMACH_I2S_IN: @@ -1104,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config); * devaddr: physical address of the source */ -int s3c2410_dma_devconfig(unsigned int channel, +int s3c2410_dma_devconfig(enum dma_ch channel, enum s3c2410_dmasrc source, unsigned long devaddr) { @@ -1177,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig); * returns the current transfer points for the dma source and destination */ -int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst) +int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -1199,7 +1195,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition); #ifdef CONFIG_PM -static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp) +static void s3c2410_dma_suspend_chan(struct s3c2410_dma_chan *cp) { printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); @@ -1235,7 +1231,7 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) /* restore channel's hardware configuration */ if (!cp->in_use) - return 0; + return; printk(KERN_INFO "dma%d: restoring configuration\n", cp->number); @@ -1246,8 +1242,6 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) if (cp->map != NULL) dma_sel.select(cp, cp->map); - - return 0; } static void s3c2410_dma_resume(void) diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c index 9aee7e1668b..fc8c5f89954 100644 --- a/arch/arm/plat-s3c24xx/irq.c +++ b/arch/arm/plat-s3c24xx/irq.c @@ -23,6 +23,7 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/sysdev.h> +#include <linux/syscore_ops.h> #include <asm/irq.h> #include <asm/mach/irq.h> @@ -668,3 +669,8 @@ void __init s3c24xx_init_irq(void) irqdbf("s3c2410: registered interrupt handlers\n"); } + +struct syscore_ops s3c24xx_irq_syscore_ops = { + .suspend = s3c24xx_irq_suspend, + .resume = s3c24xx_irq_resume, +}; diff --git a/arch/arm/plat-s5p/dev-onenand.c b/arch/arm/plat-s5p/dev-onenand.c index 6db926202ca..20336c8f247 100644 --- a/arch/arm/plat-s5p/dev-onenand.c +++ b/arch/arm/plat-s5p/dev-onenand.c @@ -15,8 +15,6 @@ #include <linux/kernel.h> #include <linux/platform_device.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/onenand.h> #include <mach/irqs.h> #include <mach/map.h> @@ -45,13 +43,3 @@ struct platform_device s5p_device_onenand = { .num_resources = ARRAY_SIZE(s5p_onenand_resources), .resource = s5p_onenand_resources, }; - -void s5p_onenand_set_platdata(struct onenand_platform_data *pdata) -{ - struct onenand_platform_data *pd; - - pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL); - if (!pd) - printk(KERN_ERR "%s: no memory for platform data\n", __func__); - s5p_device_onenand.dev.platform_data = pd; -} diff --git a/arch/arm/plat-s5p/include/plat/map-s5p.h b/arch/arm/plat-s5p/include/plat/map-s5p.h index a6c3d327ce7..d973d39666a 100644 --- a/arch/arm/plat-s5p/include/plat/map-s5p.h +++ b/arch/arm/plat-s5p/include/plat/map-s5p.h @@ -39,7 +39,7 @@ #define S5P_VA_TWD S5P_VA_COREPERI(0x600) #define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000) -#define S5P_VA_USB_HSPHY S3C_ADDR(0x02900000) +#define S3C_VA_USB_HSPHY S3C_ADDR(0x02900000) #define VA_VIC(x) (S3C_VA_IRQ + ((x) * 0x10000)) #define VA_VIC0 VA_VIC(0) diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c index 135abda31c9..327ab9f662e 100644 --- a/arch/arm/plat-s5p/irq-gpioint.c +++ b/arch/arm/plat-s5p/irq-gpioint.c @@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) if (!gc) return -ENOMEM; ct = gc->chip_types; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct->chip.irq_set_type = s5p_gpioint_set_type, diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c index 899a8cc011f..612934c48b0 100644 --- a/arch/arm/plat-s5p/s5p-time.c +++ b/arch/arm/plat-s5p/s5p-time.c @@ -370,11 +370,11 @@ static void __init s5p_clocksource_init(void) clock_rate = clk_get_rate(tin_source); - init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate); - s5p_time_setup(timer_source.source_id, TCNT_MAX); s5p_time_start(timer_source.source_id, PERIODIC); + init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate); + if (clocksource_register_hz(&time_clocksource, clock_rate)) panic("%s: can't register clocksource\n", time_clocksource.name); } diff --git a/arch/arm/plat-samsung/dev-onenand.c b/arch/arm/plat-samsung/dev-onenand.c index 45ec73287d8..f54ae71f0cd 100644 --- a/arch/arm/plat-samsung/dev-onenand.c +++ b/arch/arm/plat-samsung/dev-onenand.c @@ -13,8 +13,6 @@ #include <linux/kernel.h> #include <linux/platform_device.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/onenand.h> #include <mach/irqs.h> #include <mach/map.h> @@ -43,13 +41,3 @@ struct platform_device s3c_device_onenand = { .num_resources = ARRAY_SIZE(s3c_onenand_resources), .resource = s3c_onenand_resources, }; - -void s3c_onenand_set_platdata(struct onenand_platform_data *pdata) -{ - struct onenand_platform_data *pd; - - pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL); - if (!pd) - printk(KERN_ERR "%s: no memory for platform data\n", __func__); - s3c_device_onenand.dev.platform_data = pd; -} diff --git a/arch/arm/plat-samsung/dma.c b/arch/arm/plat-samsung/dma.c index cb459dd9545..6143aa14768 100644 --- a/arch/arm/plat-samsung/dma.c +++ b/arch/arm/plat-samsung/dma.c @@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel) * irq? */ -int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn) +int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn) } EXPORT_SYMBOL(s3c2410_dma_set_opfn); -int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn) +int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); @@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn) } EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn); -int s3c2410_dma_setflags(unsigned int channel, unsigned int flags) +int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h index b61b8ee7cc5..e3b31c26ac3 100644 --- a/arch/arm/plat-samsung/include/plat/devs.h +++ b/arch/arm/plat-samsung/include/plat/devs.h @@ -12,6 +12,10 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + +#ifndef __PLAT_DEVS_H +#define __PLAT_DEVS_H __FILE__ + #include <linux/platform_device.h> struct s3c24xx_uart_resources { @@ -75,10 +79,8 @@ extern struct platform_device s5pc100_device_spi1; extern struct platform_device s5pc100_device_spi2; extern struct platform_device s5pv210_device_spi0; extern struct platform_device s5pv210_device_spi1; -extern struct platform_device s5p6440_device_spi0; -extern struct platform_device s5p6440_device_spi1; -extern struct platform_device s5p6450_device_spi0; -extern struct platform_device s5p6450_device_spi1; +extern struct platform_device s5p64x0_device_spi0; +extern struct platform_device s5p64x0_device_spi1; extern struct platform_device s3c_device_hwmon; @@ -161,3 +163,5 @@ extern struct platform_device s3c_device_ac97; */ extern void *s3c_set_platdata(void *pd, size_t pdsize, struct platform_device *pdev); + +#endif /* __PLAT_DEVS_H */ diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h index 2e8f8c6560d..8c273b7a6f5 100644 --- a/arch/arm/plat-samsung/include/plat/dma.h +++ b/arch/arm/plat-samsung/include/plat/dma.h @@ -42,6 +42,7 @@ struct s3c2410_dma_client { }; struct s3c2410_dma_chan; +enum dma_ch; /* s3c2410_dma_cbfn_t * @@ -62,7 +63,7 @@ typedef int (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *, * request a dma channel exclusivley */ -extern int s3c2410_dma_request(unsigned int channel, +extern int s3c2410_dma_request(enum dma_ch channel, struct s3c2410_dma_client *, void *dev); @@ -71,14 +72,14 @@ extern int s3c2410_dma_request(unsigned int channel, * change the state of the dma channel */ -extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op); +extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op); /* s3c2410_dma_setflags * * set the channel's flags to a given state */ -extern int s3c2410_dma_setflags(unsigned int channel, +extern int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags); /* s3c2410_dma_free @@ -86,7 +87,7 @@ extern int s3c2410_dma_setflags(unsigned int channel, * free the dma channel (will also abort any outstanding operations) */ -extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *); +extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *); /* s3c2410_dma_enqueue * @@ -95,7 +96,7 @@ extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *); * drained before the buffer is given to the DMA system. */ -extern int s3c2410_dma_enqueue(unsigned int channel, void *id, +extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id, dma_addr_t data, int size); /* s3c2410_dma_config @@ -103,14 +104,14 @@ extern int s3c2410_dma_enqueue(unsigned int channel, void *id, * configure the dma channel */ -extern int s3c2410_dma_config(unsigned int channel, int xferunit); +extern int s3c2410_dma_config(enum dma_ch channel, int xferunit); /* s3c2410_dma_devconfig * * configure the device we're talking to */ -extern int s3c2410_dma_devconfig(unsigned int channel, +extern int s3c2410_dma_devconfig(enum dma_ch channel, enum s3c2410_dmasrc source, unsigned long devaddr); /* s3c2410_dma_getposition @@ -118,10 +119,10 @@ extern int s3c2410_dma_devconfig(unsigned int channel, * get the position that the dma transfer is currently at */ -extern int s3c2410_dma_getposition(unsigned int channel, +extern int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dest); -extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn); -extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn); +extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn); +extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn); diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h index c151c5f94a8..116edfe120b 100644 --- a/arch/arm/plat-samsung/include/plat/regs-serial.h +++ b/arch/arm/plat-samsung/include/plat/regs-serial.h @@ -224,6 +224,8 @@ #define S5PV210_UFSTAT_RXMASK (255<<0) #define S5PV210_UFSTAT_RXSHIFT (0) +#define NO_NEED_CHECK_CLKSRC 1 + #ifndef __ASSEMBLY__ /* struct s3c24xx_uart_clksrc diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h index 0ffe34a2155..4c16fa3621b 100644 --- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h +++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h @@ -39,6 +39,7 @@ struct s3c64xx_spi_csinfo { * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6 * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number * @high_speed: If the controller supports HIGH_SPEED_EN bit + * @tx_st_done: Depends on tx fifo_lvl field */ struct s3c64xx_spi_info { int src_clk_nr; @@ -53,6 +54,7 @@ struct s3c64xx_spi_info { int fifo_lvl_mask; int rx_lvl_offset; int high_speed; + int tx_st_done; }; /** diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c index 32582c0958e..657405c481d 100644 --- a/arch/arm/plat-samsung/irq-uart.c +++ b/arch/arm/plat-samsung/irq-uart.c @@ -54,8 +54,15 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, handle_level_irq); + + if (!gc) { + pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", + __func__, uirq->base_irq); + return; + } + ct = gc->chip_types; - ct->chip.irq_ack = irq_gc_ack; + ct->chip.irq_ack = irq_gc_ack_set_bit; ct->chip.irq_mask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_clr_bit; ct->regs.ack = S3C64XX_UINTP; diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c index a607546ddbd..f714d060370 100644 --- a/arch/arm/plat-samsung/irq-vic-timer.c +++ b/arch/arm/plat-samsung/irq-vic-timer.c @@ -54,6 +54,13 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq) s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, S3C64XX_TINT_CSTAT, handle_level_irq); + + if (!s3c_tgc) { + pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n", + __func__, timer_irq); + return; + } + ct = s3c_tgc->chip_types; ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig index 6f9ca56de1f..a06bfccc284 100644 --- a/arch/avr32/configs/atngw100_defconfig +++ b/arch/avr32/configs/atngw100_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig index 7eece0af34c..d8f1fe80d21 100644 --- a/arch/avr32/configs/atngw100_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100_evklcd100_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig index 387eb9d6e42..d4c5b19ec95 100644 --- a/arch/avr32/configs/atngw100_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100_evklcd101_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig index 19f6ceeeff7..77ca4f905d2 100644 --- a/arch/avr32/configs/atngw100_mrmt_defconfig +++ b/arch/avr32/configs/atngw100_mrmt_defconfig @@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_SLUB_DEBUG is not set @@ -109,7 +110,7 @@ CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_S35390A=m CONFIG_RTC_DRV_AT32AP700X=m CONFIG_DMADEVICES=y diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig index f0fe237133a..6e0dca4d313 100644 --- a/arch/avr32/configs/atngw100mkii_defconfig +++ b/arch/avr32/configs/atngw100mkii_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig index e4a7c1dc838..7f2a344a5fa 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig index 6f37f70c2c3..085eeba88f6 100644 --- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig +++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig index 4fb01f5ab42..d1a887e6405 100644 --- a/arch/avr32/configs/atstk1002_defconfig +++ b/arch/avr32/configs/atstk1002_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig index 9faaf9b900f..956f2819ad4 100644 --- a/arch/avr32/configs/atstk1003_defconfig +++ b/arch/avr32/configs/atstk1003_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig index 3d2a5d85f97..40c69f38c61 100644 --- a/arch/avr32/configs/atstk1004_defconfig +++ b/arch/avr32/configs/atstk1004_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig index 1ed8f22d4fe..511eb8af356 100644 --- a/arch/avr32/configs/atstk1006_defconfig +++ b/arch/avr32/configs/atstk1006_defconfig @@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig index aeadc955db3..19973b06170 100644 --- a/arch/avr32/configs/favr-32_defconfig +++ b/arch/avr32/configs/favr-32_defconfig @@ -6,6 +6,7 @@ CONFIG_LOG_BUF_SHIFT=14 CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig index 1692beeb7ed..6f45681196d 100644 --- a/arch/avr32/configs/hammerhead_defconfig +++ b/arch/avr32/configs/hammerhead_defconfig @@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig index 8b670a6530b..3befab96682 100644 --- a/arch/avr32/configs/merisc_defconfig +++ b/arch/avr32/configs/merisc_defconfig @@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set CONFIG_MODULES=y diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig index 5a51f2e7ffb..1bee51f2215 100644 --- a/arch/avr32/configs/mimc200_defconfig +++ b/arch/avr32/configs/mimc200_defconfig @@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_SYSCTL_SYSCALL is not set # CONFIG_BASE_FULL is not set # CONFIG_COMPAT_BRK is not set diff --git a/arch/avr32/include/asm/processor.h b/arch/avr32/include/asm/processor.h index 49a88f5a9d2..108502bc677 100644 --- a/arch/avr32/include/asm/processor.h +++ b/arch/avr32/include/asm/processor.h @@ -131,7 +131,6 @@ struct thread_struct { */ #define start_thread(regs, new_pc, new_sp) \ do { \ - set_fs(USER_DS); \ memset(regs, 0, sizeof(*regs)); \ regs->sr = MODE_USER; \ regs->pc = new_pc & ~1; \ diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index aa677e2a382..7fbf0dcb9af 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -1043,8 +1043,9 @@ void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags) data->regs = (void __iomem *)pdev->resource[0].start; } + pdev->id = line; pdata = pdev->dev.platform_data; - pdata->num = portnr; + pdata->num = line; at32_usarts[line] = pdev; } diff --git a/arch/avr32/mach-at32ap/include/mach/cpu.h b/arch/avr32/mach-at32ap/include/mach/cpu.h index 9c96a130f3a..8181293115e 100644 --- a/arch/avr32/mach-at32ap/include/mach/cpu.h +++ b/arch/avr32/mach-at32ap/include/mach/cpu.h @@ -31,8 +31,20 @@ #define cpu_is_at91sam9263() (0) #define cpu_is_at91sam9rl() (0) #define cpu_is_at91cap9() (0) +#define cpu_is_at91cap9_revB() (0) +#define cpu_is_at91cap9_revC() (0) #define cpu_is_at91sam9g10() (0) +#define cpu_is_at91sam9g20() (0) #define cpu_is_at91sam9g45() (0) #define cpu_is_at91sam9g45es() (0) +#define cpu_is_at91sam9m10() (0) +#define cpu_is_at91sam9g46() (0) +#define cpu_is_at91sam9m11() (0) +#define cpu_is_at91sam9x5() (0) +#define cpu_is_at91sam9g15() (0) +#define cpu_is_at91sam9g35() (0) +#define cpu_is_at91sam9x35() (0) +#define cpu_is_at91sam9g25() (0) +#define cpu_is_at91sam9x25() (0) #endif /* __ASM_ARCH_CPU_H */ diff --git a/arch/avr32/mach-at32ap/intc.c b/arch/avr32/mach-at32ap/intc.c index 3e3646186c9..c9ac2f8e8f6 100644 --- a/arch/avr32/mach-at32ap/intc.c +++ b/arch/avr32/mach-at32ap/intc.c @@ -167,14 +167,12 @@ static int intc_suspend(void) return 0; } -static int intc_resume(void) +static void intc_resume(void) { int i; for (i = 0; i < 64; i++) intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]); - - return 0; } #else #define intc_suspend NULL diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig index 31d954216c0..9f1d08401fc 100644 --- a/arch/blackfin/configs/CM-BF548_defconfig +++ b/arch/blackfin/configs/CM-BF548_defconfig @@ -112,7 +112,7 @@ CONFIG_USB_G_SERIAL=m CONFIG_USB_G_PRINTER=m CONFIG_MMC=m CONFIG_SDH_BFIN=m -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_BFIN=m CONFIG_EXT2_FS=m # CONFIG_DNOTIFY is not set diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu index d236ab4232c..15c22286ae7 100644 --- a/arch/h8300/Kconfig.cpu +++ b/arch/h8300/Kconfig.cpu @@ -162,9 +162,7 @@ config H8300_TPU_CH int "TPU channel" depends on H8300_TPU -config PREEMPT - bool "Preemptible Kernel" - default n +source "kernel/Kconfig.preempt" source "mm/Kconfig" diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 38280ef4a2a..7336ba653b8 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -627,27 +627,6 @@ source "drivers/pci/hotplug/Kconfig" source "drivers/pcmcia/Kconfig" -config DMAR - bool "Support for DMA Remapping Devices (EXPERIMENTAL)" - depends on IA64_GENERIC && ACPI && EXPERIMENTAL - help - DMA remapping (DMAR) devices support enables independent address - translations for Direct Memory Access (DMA) from devices. - These DMA remapping devices are reported via ACPI tables - and include PCI device scope covered by these DMA - remapping devices. - -config DMAR_DEFAULT_ON - def_bool y - prompt "Enable DMA Remapping Devices by default" - depends on DMAR - help - Selecting this option will enable a DMAR device at boot time if - one is found. If this option is not selected, DMAR support can - be enabled by passing intel_iommu=on to the kernel. It is - recommended you say N here while the DMAR code remains - experimental. - endmenu endif @@ -681,6 +660,3 @@ source "lib/Kconfig" config IOMMU_HELPER def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) - -config IOMMU_API - def_bool (DMAR) diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 81a1f4e6bcd..485c42d97e8 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -112,8 +112,6 @@ static void sn_ack_irq(struct irq_data *data) irq_move_irq(data); } -static void sn_irq_info_free(struct rcu_head *head); - struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, nasid_t nasid, int slice) { @@ -177,7 +175,7 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, spin_lock(&sn_irq_info_lock); list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); spin_unlock(&sn_irq_info_lock); - call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + kfree_rcu(sn_irq_info, rcu); finish_up: @@ -338,14 +336,6 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) rcu_read_unlock(); } -static void sn_irq_info_free(struct rcu_head *head) -{ - struct sn_irq_info *sn_irq_info; - - sn_irq_info = container_of(head, struct sn_irq_info, rcu); - kfree(sn_irq_info); -} - void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) { nasid_t nasid = sn_irq_info->irq_nasid; @@ -399,7 +389,7 @@ void sn_irq_unfixup(struct pci_dev *pci_dev) spin_unlock(&sn_irq_info_lock); if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) free_irq_vector(sn_irq_info->irq_irq); - call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + kfree_rcu(sn_irq_info, rcu); pci_dev_put(pci_dev); } diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 85b44e85822..b92b9445255 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -268,17 +268,7 @@ config SCHED_OMIT_FRAME_POINTER bool default y -config PREEMPT - bool "Preemptible Kernel" - help - This option reduces the latency of the kernel when reacting to - real-time or interactive events by allowing a low priority process to - be preempted even if it is in kernel mode executing a system call. - This allows applications to run more reliably even when the system is - under load. - - Say Y here if you are building a kernel for a desktop, embedded - or real-time system. Say N if you are unsure. +source "kernel/Kconfig.preempt" config SMP bool "Symmetric multi-processing support" diff --git a/arch/m32r/include/asm/mmzone.h b/arch/m32r/include/asm/mmzone.h index 9f3b5accda8..115ced33feb 100644 --- a/arch/m32r/include/asm/mmzone.h +++ b/arch/m32r/include/asm/mmzone.h @@ -14,12 +14,6 @@ extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) \ -({ \ - pg_data_t *__pgdat = NODE_DATA(nid); \ - __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \ -}) #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) /* @@ -44,7 +38,7 @@ static __inline__ int pfn_to_nid(unsigned long pfn) int node; for (node = 0 ; node < MAX_NUMNODES ; node++) - if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node)) + if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node)) break; return node; diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu index fc98f9b9d4d..b004dc1b171 100644 --- a/arch/m68k/Kconfig.nommu +++ b/arch/m68k/Kconfig.nommu @@ -14,6 +14,33 @@ config GENERIC_CLOCKEVENTS bool default n +config M68000 + bool + help + The Freescale (was Motorola) 68000 CPU is the first generation of + the well known M68K family of processors. The CPU core as well as + being available as a stand alone CPU was also used in many + System-On-Chip devices (eg 68328, 68302, etc). It does not contain + a paging MMU. + +config MCPU32 + bool + help + The Freescale (was then Motorola) CPU32 is a CPU core that is + based on the 68020 processor. For the most part it is used in + System-On-Chip parts, and does not contain a paging MMU. + +config COLDFIRE + bool + select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB + help + The Freescale ColdFire family of processors is a modern derivitive + of the 68000 processor family. They are mainly targeted at embedded + applications, and are all System-On-Chip (SOC) devices, as opposed + to stand alone CPUs. They implement a subset of the original 68000 + processor instruction set. + config COLDFIRE_SW_A7 bool default n @@ -36,26 +63,31 @@ choice config M68328 bool "MC68328" + select M68000 help Motorola 68328 processor support. config M68EZ328 bool "MC68EZ328" + select M68000 help Motorola 68EX328 processor support. config M68VZ328 bool "MC68VZ328" + select M68000 help Motorola 68VZ328 processor support. config M68360 bool "MC68360" + select MCPU32 help Motorola 68360 processor support. config M5206 bool "MCF5206" + select COLDFIRE select COLDFIRE_SW_A7 select HAVE_MBAR help @@ -63,6 +95,7 @@ config M5206 config M5206e bool "MCF5206e" + select COLDFIRE select COLDFIRE_SW_A7 select HAVE_MBAR help @@ -70,6 +103,7 @@ config M5206e config M520x bool "MCF520x" + select COLDFIRE select GENERIC_CLOCKEVENTS select HAVE_CACHE_SPLIT help @@ -77,6 +111,7 @@ config M520x config M523x bool "MCF523x" + select COLDFIRE select GENERIC_CLOCKEVENTS select HAVE_CACHE_SPLIT select HAVE_IPSBAR @@ -85,6 +120,7 @@ config M523x config M5249 bool "MCF5249" + select COLDFIRE select COLDFIRE_SW_A7 select HAVE_MBAR help @@ -92,6 +128,7 @@ config M5249 config M5271 bool "MCF5271" + select COLDFIRE select HAVE_CACHE_SPLIT select HAVE_IPSBAR help @@ -99,6 +136,7 @@ config M5271 config M5272 bool "MCF5272" + select COLDFIRE select COLDFIRE_SW_A7 select HAVE_MBAR help @@ -106,6 +144,7 @@ config M5272 config M5275 bool "MCF5275" + select COLDFIRE select HAVE_CACHE_SPLIT select HAVE_IPSBAR help @@ -113,6 +152,7 @@ config M5275 config M528x bool "MCF528x" + select COLDFIRE select GENERIC_CLOCKEVENTS select HAVE_CACHE_SPLIT select HAVE_IPSBAR @@ -121,6 +161,7 @@ config M528x config M5307 bool "MCF5307" + select COLDFIRE select COLDFIRE_SW_A7 select HAVE_CACHE_CB select HAVE_MBAR @@ -129,12 +170,14 @@ config M5307 config M532x bool "MCF532x" + select COLDFIRE select HAVE_CACHE_CB help Freescale (Motorola) ColdFire 532x processor support. config M5407 bool "MCF5407" + select COLDFIRE select COLDFIRE_SW_A7 select HAVE_CACHE_CB select HAVE_MBAR @@ -143,6 +186,7 @@ config M5407 config M547x bool "MCF547x" + select COLDFIRE select HAVE_CACHE_CB select HAVE_MBAR help @@ -150,6 +194,7 @@ config M547x config M548x bool "MCF548x" + select COLDFIRE select HAVE_CACHE_CB select HAVE_MBAR help @@ -168,13 +213,6 @@ config M54xx depends on (M548x || M547x) default y -config COLDFIRE - bool - depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M54xx) - select GENERIC_GPIO - select ARCH_REQUIRE_GPIOLIB - default y - config CLOCK_SET bool "Enable setting the CPU clock frequency" default n diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c index 8b6e201b2c2..c5748bb4ea7 100644 --- a/arch/m68k/emu/nfeth.c +++ b/arch/m68k/emu/nfeth.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/interrupt.h> #include <linux/module.h> #include <asm/natfeat.h> #include <asm/virtconvert.h> @@ -204,7 +205,6 @@ static struct net_device * __init nfeth_probe(int unit) dev->irq = nfEtherIRQ; dev->netdev_ops = &nfeth_netdev_ops; - dev->flags |= NETIF_F_NO_CSUM; memcpy(dev->dev_addr, mac, ETH_ALEN); priv = netdev_priv(dev); diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c index 33f82769547..1b7a14d1a00 100644 --- a/arch/m68k/kernel/m68k_ksyms.c +++ b/arch/m68k/kernel/m68k_ksyms.c @@ -14,8 +14,7 @@ EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__muldi3); -#if !defined(__mc68020__) && !defined(__mc68030__) && \ - !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcpu32__) +#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) /* * Simpler 68k and ColdFire parts also need a few other gcc functions. */ diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S index f4d715cdca0..7dc4087a954 100644 --- a/arch/m68k/kernel/vmlinux.lds_no.S +++ b/arch/m68k/kernel/vmlinux.lds_no.S @@ -84,52 +84,52 @@ SECTIONS { /* Kernel symbol table: Normal symbols */ . = ALIGN(4); __start___ksymtab = .; - *(__ksymtab) + *(SORT(___ksymtab+*)) __stop___ksymtab = .; /* Kernel symbol table: GPL-only symbols */ __start___ksymtab_gpl = .; - *(__ksymtab_gpl) + *(SORT(___ksymtab_gpl+*)) __stop___ksymtab_gpl = .; /* Kernel symbol table: Normal unused symbols */ __start___ksymtab_unused = .; - *(__ksymtab_unused) + *(SORT(___ksymtab_unused+*)) __stop___ksymtab_unused = .; /* Kernel symbol table: GPL-only unused symbols */ __start___ksymtab_unused_gpl = .; - *(__ksymtab_unused_gpl) + *(SORT(___ksymtab_unused_gpl+*)) __stop___ksymtab_unused_gpl = .; /* Kernel symbol table: GPL-future symbols */ __start___ksymtab_gpl_future = .; - *(__ksymtab_gpl_future) + *(SORT(___ksymtab_gpl_future+*)) __stop___ksymtab_gpl_future = .; /* Kernel symbol table: Normal symbols */ __start___kcrctab = .; - *(__kcrctab) + *(SORT(___kcrctab+*)) __stop___kcrctab = .; /* Kernel symbol table: GPL-only symbols */ __start___kcrctab_gpl = .; - *(__kcrctab_gpl) + *(SORT(___kcrctab_gpl+*)) __stop___kcrctab_gpl = .; /* Kernel symbol table: Normal unused symbols */ __start___kcrctab_unused = .; - *(__kcrctab_unused) + *(SORT(___kcrctab_unused+*)) __stop___kcrctab_unused = .; /* Kernel symbol table: GPL-only unused symbols */ __start___kcrctab_unused_gpl = .; - *(__kcrctab_unused_gpl) + *(SORT(___kcrctab_unused_gpl+*)) __stop___kcrctab_unused_gpl = .; /* Kernel symbol table: GPL-future symbols */ __start___kcrctab_gpl_future = .; - *(__kcrctab_gpl_future) + *(SORT(___kcrctab_gpl_future+*)) __stop___kcrctab_gpl_future = .; /* Kernel symbol table: strings */ diff --git a/arch/m68k/lib/memcpy.c b/arch/m68k/lib/memcpy.c index 62182c81e91..06488931697 100644 --- a/arch/m68k/lib/memcpy.c +++ b/arch/m68k/lib/memcpy.c @@ -34,8 +34,10 @@ void *memcpy(void *to, const void *from, size_t n) if (temp) { long *lto = to; const long *lfrom = from; -#if defined(__mc68020__) || defined(__mc68030__) || \ - defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) +#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) + for (; temp; temp--) + *lto++ = *lfrom++; +#else asm volatile ( " movel %2,%3\n" " andw #7,%3\n" @@ -56,9 +58,6 @@ void *memcpy(void *to, const void *from, size_t n) " jpl 4b" : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1) : "0" (lfrom), "1" (lto), "2" (temp)); -#else - for (; temp; temp--) - *lto++ = *lfrom++; #endif to = lto; from = lfrom; diff --git a/arch/m68k/lib/memset.c b/arch/m68k/lib/memset.c index f649e6a2e64..8a7639f0a2f 100644 --- a/arch/m68k/lib/memset.c +++ b/arch/m68k/lib/memset.c @@ -32,8 +32,10 @@ void *memset(void *s, int c, size_t count) temp = count >> 2; if (temp) { long *ls = s; -#if defined(__mc68020__) || defined(__mc68030__) || \ - defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) +#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) + for (; temp; temp--) + *ls++ = c; +#else size_t temp1; asm volatile ( " movel %1,%2\n" @@ -55,9 +57,6 @@ void *memset(void *s, int c, size_t count) " jpl 1b" : "=a" (ls), "=d" (temp), "=&d" (temp1) : "d" (c), "0" (ls), "1" (temp)); -#else - for (; temp; temp--) - *ls++ = c; #endif s = ls; } diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c index 079bafca073..79e928a525d 100644 --- a/arch/m68k/lib/muldi3.c +++ b/arch/m68k/lib/muldi3.c @@ -19,17 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#if defined(__mc68020__) || defined(__mc68030__) || \ - defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) - -#define umul_ppmm(w1, w0, u, v) \ - __asm__ ("mulu%.l %3,%1:%0" \ - : "=d" ((USItype)(w0)), \ - "=d" ((USItype)(w1)) \ - : "%0" ((USItype)(u)), \ - "dmi" ((USItype)(v))) - -#else +#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) #define SI_TYPE_SIZE 32 #define __BITS4 (SI_TYPE_SIZE / 4) @@ -61,6 +51,15 @@ Boston, MA 02111-1307, USA. */ (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ } while (0) +#else + +#define umul_ppmm(w1, w0, u, v) \ + __asm__ ("mulu%.l %3,%1:%0" \ + : "=d" ((USItype)(w0)), \ + "=d" ((USItype)(w1)) \ + : "%0" ((USItype)(u)), \ + "dmi" ((USItype)(v))) + #endif #define __umulsidi3(u, v) \ diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h index 746df91e579..242be57a319 100644 --- a/arch/microblaze/include/asm/pci-bridge.h +++ b/arch/microblaze/include/asm/pci-bridge.h @@ -19,9 +19,6 @@ enum { */ PCI_REASSIGN_ALL_RSRC = 0x00000001, - /* Re-assign all bus numbers */ - PCI_REASSIGN_ALL_BUS = 0x00000002, - /* Do not try to assign, just use existing setup */ PCI_PROBE_ONLY = 0x00000004, @@ -110,16 +107,6 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) return bus->sysdata; } -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - struct pci_controller *host; - - if (bus->self) - return pci_device_to_OF_node(bus->self); - host = pci_bus_to_host(bus); - return host ? host->dn : NULL; -} - static inline int isa_vaddr_is_ioport(void __iomem *address) { /* No specific ISA handling on ppc32 at this stage, it diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index ba65cf47254..1dd9d6b1e27 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -40,8 +40,7 @@ struct pci_dev; * Set this to 1 if you want the kernel to re-assign all PCI * bus numbers (don't do that on ppc64 yet !) */ -#define pcibios_assign_all_busses() \ - (pci_has_flag(PCI_REASSIGN_ALL_BUS)) +#define pcibios_assign_all_busses() 0 static inline void pcibios_set_master(struct pci_dev *dev) { diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index d0890d36ef6..9bd01ecb00d 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h @@ -29,21 +29,6 @@ extern int early_uartlite_console(void); extern int early_uart16550_console(void); -#ifdef CONFIG_PCI -/* - * PCI <-> OF matching functions - * (XXX should these be here?) - */ -struct pci_bus; -struct pci_dev; -extern int pci_device_from_OF_node(struct device_node *node, - u8 *bus, u8 *devfn); -extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus, - int devfn); -extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev); -extern void pci_create_OF_bus_map(void); -#endif - /* * OF address retreival & translation */ diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile index 9889cc2e129..d1114fbd478 100644 --- a/arch/microblaze/pci/Makefile +++ b/arch/microblaze/pci/Makefile @@ -2,5 +2,5 @@ # Makefile # -obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o +obj-$(CONFIG_PCI) += pci-common.o indirect_pci.o iomap.o obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 53599067d2f..041b1d86d75 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -50,6 +50,11 @@ unsigned int pci_flags; static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; +unsigned long isa_io_base; +unsigned long pci_dram_offset; +static int pci_bus_count; + + void set_pci_dma_ops(struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; @@ -1558,6 +1563,112 @@ void __devinit pcibios_setup_phb_resources(struct pci_controller *hose) (unsigned long)hose->io_base_virt - _IO_BASE); } +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + + return of_node_get(hose->dn); +} + +static void __devinit pcibios_scan_phb(struct pci_controller *hose) +{ + struct pci_bus *bus; + struct device_node *node = hose->dn; + unsigned long io_offset; + struct resource *res = &hose->io_resource; + + pr_debug("PCI: Scanning PHB %s\n", + node ? node->full_name : "<NO NAME>"); + + /* Create an empty bus for the toplevel */ + bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); + if (bus == NULL) { + printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", + hose->global_number); + return; + } + bus->secondary = hose->first_busno; + hose->bus = bus; + + /* Fixup IO space offset */ + io_offset = (unsigned long)hose->io_base_virt - isa_io_base; + res->start = (res->start + io_offset) & 0xffffffffu; + res->end = (res->end + io_offset) & 0xffffffffu; + + /* Wire up PHB bus resources */ + pcibios_setup_phb_resources(hose); + + /* Scan children */ + hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); +} + +static int __init pcibios_init(void) +{ + struct pci_controller *hose, *tmp; + int next_busno = 0; + + printk(KERN_INFO "PCI: Probing PCI hardware\n"); + + /* Scan all of the recorded PCI controllers. */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + hose->last_busno = 0xff; + pcibios_scan_phb(hose); + printk(KERN_INFO "calling pci_bus_add_devices()\n"); + pci_bus_add_devices(hose->bus); + if (next_busno <= hose->last_busno) + next_busno = hose->last_busno + 1; + } + pci_bus_count = next_busno; + + /* Call common code to handle resource allocation */ + pcibios_resource_survey(); + + return 0; +} + +subsys_initcall(pcibios_init); + +static struct pci_controller *pci_bus_to_hose(int bus) +{ + struct pci_controller *hose, *tmp; + + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + if (bus >= hose->first_busno && bus <= hose->last_busno) + return hose; + return NULL; +} + +/* Provide information on locations of various I/O regions in physical + * memory. Do this on a per-card basis so that we choose the right + * root bridge. + * Note that the returned IO or memory base is a physical address + */ + +long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) +{ + struct pci_controller *hose; + long result = -EOPNOTSUPP; + + hose = pci_bus_to_hose(bus); + if (!hose) + return -ENODEV; + + switch (which) { + case IOBASE_BRIDGE_NUMBER: + return (long)hose->first_busno; + case IOBASE_MEMORY: + return (long)hose->pci_mem_offset; + case IOBASE_IO: + return (long)hose->io_base_phys; + case IOBASE_ISA_IO: + return (long)isa_io_base; + case IOBASE_ISA_MEM: + return (long)isa_mem_base; + } + + return result; +} + /* * Null PCI config access functions, for the case when we can't * find a hose. @@ -1626,3 +1737,4 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn, { return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); } + diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c deleted file mode 100644 index 92728a6cfd8..00000000000 --- a/arch/microblaze/pci/pci_32.c +++ /dev/null @@ -1,432 +0,0 @@ -/* - * Common pmac/prep/chrp pci routines. -- Cort - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/capability.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/bootmem.h> -#include <linux/irq.h> -#include <linux/list.h> -#include <linux/of.h> -#include <linux/slab.h> - -#include <asm/processor.h> -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/sections.h> -#include <asm/pci-bridge.h> -#include <asm/byteorder.h> -#include <asm/uaccess.h> - -#undef DEBUG - -unsigned long isa_io_base; -unsigned long pci_dram_offset; -int pcibios_assign_bus_offset = 1; - -static u8 *pci_to_OF_bus_map; - -/* By default, we don't re-assign bus numbers. We do this only on - * some pmacs - */ -static int pci_assign_all_buses; - -static int pci_bus_count; - -/* - * Functions below are used on OpenFirmware machines. - */ -static void -make_one_node_map(struct device_node *node, u8 pci_bus) -{ - const int *bus_range; - int len; - - if (pci_bus >= pci_bus_count) - return; - bus_range = of_get_property(node, "bus-range", &len); - if (bus_range == NULL || len < 2 * sizeof(int)) { - printk(KERN_WARNING "Can't get bus-range for %s, " - "assuming it starts at 0\n", node->full_name); - pci_to_OF_bus_map[pci_bus] = 0; - } else - pci_to_OF_bus_map[pci_bus] = bus_range[0]; - - for_each_child_of_node(node, node) { - struct pci_dev *dev; - const unsigned int *class_code, *reg; - - class_code = of_get_property(node, "class-code", NULL); - if (!class_code || - ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) - continue; - reg = of_get_property(node, "reg", NULL); - if (!reg) - continue; - dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff)); - if (!dev || !dev->subordinate) { - pci_dev_put(dev); - continue; - } - make_one_node_map(node, dev->subordinate->number); - pci_dev_put(dev); - } -} - -void -pcibios_make_OF_bus_map(void) -{ - int i; - struct pci_controller *hose, *tmp; - struct property *map_prop; - struct device_node *dn; - - pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL); - if (!pci_to_OF_bus_map) { - printk(KERN_ERR "Can't allocate OF bus map !\n"); - return; - } - - /* We fill the bus map with invalid values, that helps - * debugging. - */ - for (i = 0; i < pci_bus_count; i++) - pci_to_OF_bus_map[i] = 0xff; - - /* For each hose, we begin searching bridges */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { - struct device_node *node = hose->dn; - - if (!node) - continue; - make_one_node_map(node, hose->first_busno); - } - dn = of_find_node_by_path("/"); - map_prop = of_find_property(dn, "pci-OF-bus-map", NULL); - if (map_prop) { - BUG_ON(pci_bus_count > map_prop->length); - memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count); - } - of_node_put(dn); -#ifdef DEBUG - printk(KERN_INFO "PCI->OF bus map:\n"); - for (i = 0; i < pci_bus_count; i++) { - if (pci_to_OF_bus_map[i] == 0xff) - continue; - printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]); - } -#endif -} - -typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data); - -static struct device_node *scan_OF_pci_childs(struct device_node *parent, - pci_OF_scan_iterator filter, void *data) -{ - struct device_node *node; - struct device_node *sub_node; - - for_each_child_of_node(parent, node) { - const unsigned int *class_code; - - if (filter(node, data)) { - of_node_put(node); - return node; - } - - /* For PCI<->PCI bridges or CardBus bridges, we go down - * Note: some OFs create a parent node "multifunc-device" as - * a fake root for all functions of a multi-function device, - * we go down them as well. - */ - class_code = of_get_property(node, "class-code", NULL); - if ((!class_code || - ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && - strcmp(node->name, "multifunc-device")) - continue; - sub_node = scan_OF_pci_childs(node, filter, data); - if (sub_node) { - of_node_put(node); - return sub_node; - } - } - return NULL; -} - -static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, - unsigned int devfn) -{ - struct device_node *np, *cnp; - const u32 *reg; - unsigned int psize; - - for_each_child_of_node(parent, np) { - reg = of_get_property(np, "reg", &psize); - if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn) - return np; - - /* Note: some OFs create a parent node "multifunc-device" as - * a fake root for all functions of a multi-function device, - * we go down them as well. */ - if (!strcmp(np->name, "multifunc-device")) { - cnp = scan_OF_for_pci_dev(np, devfn); - if (cnp) - return cnp; - } - } - return NULL; -} - - -static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus) -{ - struct device_node *parent, *np; - - /* Are we a root bus ? */ - if (bus->self == NULL || bus->parent == NULL) { - struct pci_controller *hose = pci_bus_to_host(bus); - if (hose == NULL) - return NULL; - return of_node_get(hose->dn); - } - - /* not a root bus, we need to get our parent */ - parent = scan_OF_for_pci_bus(bus->parent); - if (parent == NULL) - return NULL; - - /* now iterate for children for a match */ - np = scan_OF_for_pci_dev(parent, bus->self->devfn); - of_node_put(parent); - - return np; -} - -/* - * Scans the OF tree for a device node matching a PCI device - */ -struct device_node * -pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) -{ - struct device_node *parent, *np; - - pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); - parent = scan_OF_for_pci_bus(bus); - if (parent == NULL) - return NULL; - pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>"); - np = scan_OF_for_pci_dev(parent, devfn); - of_node_put(parent); - pr_debug(" result is %s\n", np ? np->full_name : "<NULL>"); - - /* XXX most callers don't release the returned node - * mostly because ppc64 doesn't increase the refcount, - * we need to fix that. - */ - return np; -} -EXPORT_SYMBOL(pci_busdev_to_OF_node); - -struct device_node* -pci_device_to_OF_node(struct pci_dev *dev) -{ - return pci_busdev_to_OF_node(dev->bus, dev->devfn); -} -EXPORT_SYMBOL(pci_device_to_OF_node); - -static int -find_OF_pci_device_filter(struct device_node *node, void *data) -{ - return ((void *)node == data); -} - -/* - * Returns the PCI device matching a given OF node - */ -int -pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn) -{ - const unsigned int *reg; - struct pci_controller *hose; - struct pci_dev *dev = NULL; - - /* Make sure it's really a PCI device */ - hose = pci_find_hose_for_OF_device(node); - if (!hose || !hose->dn) - return -ENODEV; - if (!scan_OF_pci_childs(hose->dn, - find_OF_pci_device_filter, (void *)node)) - return -ENODEV; - reg = of_get_property(node, "reg", NULL); - if (!reg) - return -ENODEV; - *bus = (reg[0] >> 16) & 0xff; - *devfn = ((reg[0] >> 8) & 0xff); - - /* Ok, here we need some tweak. If we have already renumbered - * all busses, we can't rely on the OF bus number any more. - * the pci_to_OF_bus_map is not enough as several PCI busses - * may match the same OF bus number. - */ - if (!pci_to_OF_bus_map) - return 0; - - for_each_pci_dev(dev) - if (pci_to_OF_bus_map[dev->bus->number] == *bus && - dev->devfn == *devfn) { - *bus = dev->bus->number; - pci_dev_put(dev); - return 0; - } - - return -ENODEV; -} -EXPORT_SYMBOL(pci_device_from_OF_node); - -/* We create the "pci-OF-bus-map" property now so it appears in the - * /proc device tree - */ -void __init -pci_create_OF_bus_map(void) -{ - struct property *of_prop; - struct device_node *dn; - - of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \ - 256); - if (!of_prop) - return; - dn = of_find_node_by_path("/"); - if (dn) { - memset(of_prop, -1, sizeof(struct property) + 256); - of_prop->name = "pci-OF-bus-map"; - of_prop->length = 256; - of_prop->value = &of_prop[1]; - prom_add_property(dn, of_prop); - of_node_put(dn); - } -} - -static void __devinit pcibios_scan_phb(struct pci_controller *hose) -{ - struct pci_bus *bus; - struct device_node *node = hose->dn; - unsigned long io_offset; - struct resource *res = &hose->io_resource; - - pr_debug("PCI: Scanning PHB %s\n", - node ? node->full_name : "<NO NAME>"); - - /* Create an empty bus for the toplevel */ - bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); - if (bus == NULL) { - printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", - hose->global_number); - return; - } - bus.dev->of_node = of_node_get(node); - bus->secondary = hose->first_busno; - hose->bus = bus; - - /* Fixup IO space offset */ - io_offset = (unsigned long)hose->io_base_virt - isa_io_base; - res->start = (res->start + io_offset) & 0xffffffffu; - res->end = (res->end + io_offset) & 0xffffffffu; - - /* Wire up PHB bus resources */ - pcibios_setup_phb_resources(hose); - - /* Scan children */ - hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); -} - -static int __init pcibios_init(void) -{ - struct pci_controller *hose, *tmp; - int next_busno = 0; - - printk(KERN_INFO "PCI: Probing PCI hardware\n"); - - if (pci_flags & PCI_REASSIGN_ALL_BUS) { - printk(KERN_INFO "setting pci_asign_all_busses\n"); - pci_assign_all_buses = 1; - } - - /* Scan all of the recorded PCI controllers. */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { - if (pci_assign_all_buses) - hose->first_busno = next_busno; - hose->last_busno = 0xff; - pcibios_scan_phb(hose); - printk(KERN_INFO "calling pci_bus_add_devices()\n"); - pci_bus_add_devices(hose->bus); - if (pci_assign_all_buses || next_busno <= hose->last_busno) - next_busno = hose->last_busno + \ - pcibios_assign_bus_offset; - } - pci_bus_count = next_busno; - - /* OpenFirmware based machines need a map of OF bus - * numbers vs. kernel bus numbers since we may have to - * remap them. - */ - if (pci_assign_all_buses) - pcibios_make_OF_bus_map(); - - /* Call common code to handle resource allocation */ - pcibios_resource_survey(); - - return 0; -} - -subsys_initcall(pcibios_init); - -static struct pci_controller* -pci_bus_to_hose(int bus) -{ - struct pci_controller *hose, *tmp; - - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) - if (bus >= hose->first_busno && bus <= hose->last_busno) - return hose; - return NULL; -} - -/* Provide information on locations of various I/O regions in physical - * memory. Do this on a per-card basis so that we choose the right - * root bridge. - * Note that the returned IO or memory base is a physical address - */ - -long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) -{ - struct pci_controller *hose; - long result = -EOPNOTSUPP; - - hose = pci_bus_to_hose(bus); - if (!hose) - return -ENODEV; - - switch (which) { - case IOBASE_BRIDGE_NUMBER: - return (long)hose->first_busno; - case IOBASE_MEMORY: - return (long)hose->pci_mem_offset; - case IOBASE_IO: - return (long)hose->io_base_phys; - case IOBASE_ISA_IO: - return (long)isa_io_base; - case IOBASE_ISA_MEM: - return (long)isa_mem_base; - } - - return result; -} diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 653da62d068..6cb60adb7b3 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -185,6 +185,7 @@ config MACH_JAZZ select CSRC_R4K select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN select GENERIC_ISA_DMA + select HAVE_PCSPKR_PLATFORM select IRQ_CPU select I8253 select I8259 @@ -266,6 +267,7 @@ config MIPS_MALTA select CSRC_R4K select DMA_NONCOHERENT select GENERIC_ISA_DMA + select HAVE_PCSPKR_PLATFORM select IRQ_CPU select IRQ_GIC select HW_HAS_PCI @@ -640,6 +642,7 @@ config SNI_RM select DEFAULT_SGI_PARTITION if CPU_BIG_ENDIAN select DMA_NONCOHERENT select GENERIC_ISA_DMA + select HAVE_PCSPKR_PLATFORM select HW_HAS_EISA select HW_HAS_PCI select IRQ_CPU @@ -2388,6 +2391,7 @@ config MMU config I8253 bool select CLKSRC_I8253 + select CLKEVT_I8253 select MIPS_EXTERNAL_TIMER config ZONE_DMA32 diff --git a/arch/mips/cobalt/time.c b/arch/mips/cobalt/time.c index 0162f9edc69..3bff3b820ba 100644 --- a/arch/mips/cobalt/time.c +++ b/arch/mips/cobalt/time.c @@ -17,10 +17,10 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include <linux/i8253.h> #include <linux/init.h> #include <asm/gt64120.h> -#include <asm/i8253.h> #include <asm/time.h> #define GT641XX_BASE_CLOCK 50000000 /* 50MHz */ diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 37862b2ce36..807c97eed8a 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -678,7 +678,7 @@ CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_DEFAULT_ON=y -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_RTC_INTF_DEV_UIE_EMUL=y CONFIG_RTC_DRV_TEST=m CONFIG_RTC_DRV_DS1307=m diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h deleted file mode 100644 index 9ad011366f7..00000000000 --- a/arch/mips/include/asm/i8253.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Machine specific IO port address definition for generic. - * Written by Osamu Tomita <tomita@cinet.co.jp> - */ -#ifndef __ASM_I8253_H -#define __ASM_I8253_H - -#include <linux/spinlock.h> - -/* i8253A PIT registers */ -#define PIT_MODE 0x43 -#define PIT_CH0 0x40 -#define PIT_CH2 0x42 - -#define PIT_LATCH LATCH - -extern raw_spinlock_t i8253_lock; - -extern void setup_pit_timer(void); - -#define inb_pit inb_p -#define outb_pit outb_p - -#endif /* __ASM_I8253_H */ diff --git a/arch/mips/include/asm/stacktrace.h b/arch/mips/include/asm/stacktrace.h index 0bf82818aa5..780ee2c2a2a 100644 --- a/arch/mips/include/asm/stacktrace.h +++ b/arch/mips/include/asm/stacktrace.h @@ -7,6 +7,10 @@ extern int raw_show_trace; extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, unsigned long pc, unsigned long *ra); +extern unsigned long unwind_stack_by_address(unsigned long stack_page, + unsigned long *sp, + unsigned long pc, + unsigned long *ra); #else #define raw_show_trace 1 static inline unsigned long unwind_stack(struct task_struct *task, diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c index 260df475094..ca9bd206914 100644 --- a/arch/mips/jazz/irq.c +++ b/arch/mips/jazz/irq.c @@ -7,6 +7,7 @@ * Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle */ #include <linux/clockchips.h> +#include <linux/i8253.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> @@ -15,7 +16,6 @@ #include <linux/irq.h> #include <asm/irq_cpu.h> -#include <asm/i8253.h> #include <asm/i8259.h> #include <asm/io.h> #include <asm/jazz.h> diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 391221b6a6a..be4ee7d63e0 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -3,96 +3,16 @@ * */ #include <linux/clockchips.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/jiffies.h> +#include <linux/i8253.h> #include <linux/module.h> #include <linux/smp.h> -#include <linux/spinlock.h> #include <linux/irq.h> -#include <asm/delay.h> -#include <asm/i8253.h> -#include <asm/io.h> #include <asm/time.h> -DEFINE_RAW_SPINLOCK(i8253_lock); -EXPORT_SYMBOL(i8253_lock); - -/* - * Initialize the PIT timer. - * - * This is also called after resume to bring the PIT into operation again. - */ -static void init_pit_timer(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - raw_spin_lock(&i8253_lock); - - switch(mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* binary, mode 2, LSB/MSB, ch 0 */ - outb_p(0x34, PIT_MODE); - outb_p(LATCH & 0xff , PIT_CH0); /* LSB */ - outb(LATCH >> 8 , PIT_CH0); /* MSB */ - break; - - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - if (evt->mode == CLOCK_EVT_MODE_PERIODIC || - evt->mode == CLOCK_EVT_MODE_ONESHOT) { - outb_p(0x30, PIT_MODE); - outb_p(0, PIT_CH0); - outb_p(0, PIT_CH0); - } - break; - - case CLOCK_EVT_MODE_ONESHOT: - /* One shot setup */ - outb_p(0x38, PIT_MODE); - break; - - case CLOCK_EVT_MODE_RESUME: - /* Nothing to do here */ - break; - } - raw_spin_unlock(&i8253_lock); -} - -/* - * Program the next event in oneshot mode - * - * Delta is given in PIT ticks - */ -static int pit_next_event(unsigned long delta, struct clock_event_device *evt) -{ - raw_spin_lock(&i8253_lock); - outb_p(delta & 0xff , PIT_CH0); /* LSB */ - outb(delta >> 8 , PIT_CH0); /* MSB */ - raw_spin_unlock(&i8253_lock); - - return 0; -} - -/* - * On UP the PIT can serve all of the possible timer functions. On SMP systems - * it can be solely used for the global tick. - * - * The profiling and update capabilites are switched off once the local apic is - * registered. This mechanism replaces the previous #ifdef LOCAL_APIC - - * !using_apic_timer decisions in do_timer_interrupt_hook() - */ -static struct clock_event_device pit_clockevent = { - .name = "pit", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = init_pit_timer, - .set_next_event = pit_next_event, - .irq = 0, -}; - static irqreturn_t timer_interrupt(int irq, void *dev_id) { - pit_clockevent.event_handler(&pit_clockevent); + i8253_clockevent.event_handler(&i8253_clockevent); return IRQ_HANDLED; } @@ -103,25 +23,9 @@ static struct irqaction irq0 = { .name = "timer" }; -/* - * Initialize the conversion factor and the min/max deltas of the clock event - * structure and register the clock event source with the framework. - */ void __init setup_pit_timer(void) { - struct clock_event_device *cd = &pit_clockevent; - unsigned int cpu = smp_processor_id(); - - /* - * Start pit with the boot cpu mask and make it global after the - * IO_APIC has been initialized. - */ - cd->cpumask = cpumask_of(cpu); - clockevent_set_clock(cd, CLOCK_TICK_RATE); - cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd); - cd->min_delta_ns = clockevent_delta2ns(0xF, cd); - clockevents_register_device(cd); - + clockevent_i8253_init(true); setup_irq(0, &irq0); } diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index c018696765d..5c74eb797f0 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -14,7 +14,7 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/spinlock.h> -#include <linux/sysdev.h> +#include <linux/syscore_ops.h> #include <linux/irq.h> #include <asm/i8259.h> @@ -215,14 +215,13 @@ spurious_8259A_irq: } } -static int i8259A_resume(struct sys_device *dev) +static void i8259A_resume(void) { if (i8259A_auto_eoi >= 0) init_8259A(i8259A_auto_eoi); - return 0; } -static int i8259A_shutdown(struct sys_device *dev) +static void i8259A_shutdown(void) { /* Put the i8259A into a quiescent state that * the kernel initialization code can get it @@ -232,26 +231,17 @@ static int i8259A_shutdown(struct sys_device *dev) outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ } - return 0; } -static struct sysdev_class i8259_sysdev_class = { - .name = "i8259", +static struct syscore_ops i8259_syscore_ops = { .resume = i8259A_resume, .shutdown = i8259A_shutdown, }; -static struct sys_device device_i8259A = { - .id = 0, - .cls = &i8259_sysdev_class, -}; - static int __init i8259A_init_sysfs(void) { - int error = sysdev_class_register(&i8259_sysdev_class); - if (!error) - error = sysdev_register(&device_i8259A); - return error; + register_syscore_ops(&i8259_syscore_ops); + return 0; } device_initcall(i8259A_init_sysfs); diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index a8244854d3d..d0deaab9ace 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc, if (!mipspmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, 0, data, regs)) + if (perf_event_overflow(event, data, regs)) mipspmu->disable_event(idx); } diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 75266ff4cc3..e5ad09a9baf 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -377,6 +377,20 @@ static const struct mips_perf_event mipsxxcore_cache_map [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, }; /* 74K core has completely different cache event map. */ @@ -480,6 +494,20 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, + }, +}, }; #ifdef CONFIG_MIPS_MT_SMP diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index d2112d3cf11..c28fbe6107b 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -373,18 +373,18 @@ unsigned long thread_saved_pc(struct task_struct *tsk) #ifdef CONFIG_KALLSYMS -/* used by show_backtrace() */ -unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, - unsigned long pc, unsigned long *ra) +/* generic stack unwinding function */ +unsigned long notrace unwind_stack_by_address(unsigned long stack_page, + unsigned long *sp, + unsigned long pc, + unsigned long *ra) { - unsigned long stack_page; struct mips_frame_info info; unsigned long size, ofs; int leaf; extern void ret_from_irq(void); extern void ret_from_exception(void); - stack_page = (unsigned long)task_stack_page(task); if (!stack_page) return 0; @@ -443,6 +443,15 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, *ra = 0; return __kernel_text_address(pc) ? pc : 0; } +EXPORT_SYMBOL(unwind_stack_by_address); + +/* used by show_backtrace() */ +unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, + unsigned long pc, unsigned long *ra) +{ + unsigned long stack_page = (unsigned long)task_stack_page(task); + return unwind_stack_by_address(stack_page, sp, pc, ra); +} #endif /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e9b3af27d84..b7517e3abc8 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) { if ((opcode & OPCODE) == LL) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + 1, regs, 0); return simulate_ll(regs, opcode); } if ((opcode & OPCODE) == SC) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + 1, regs, 0); return simulate_sc(regs, opcode); } @@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) int rd = (opcode & RD) >> 11; int rt = (opcode & RT) >> 16; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + 1, regs, 0); switch (rd) { case 0: /* CPU number */ regs->regs[rt] = smp_processor_id(); @@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode) { if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + 1, regs, 0); return 0; } diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index cfea1adfa15..eb319b58035 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, unsigned long value; unsigned int res; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); /* * This load never faults. @@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs) mm_segment_t seg; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, - 1, 0, regs, regs->cp0_badvaddr); + 1, regs, regs->cp0_badvaddr); /* * Did we catch a fault trying to load an instruction? * Or are we running in MIPS16 mode? diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index d32cb050311..dbf2f93a509 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, } emul: - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, 0, xcp, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0); MIPS_FPU_EMU_INC_STATS(emulated); switch (MIPSInst_OPCODE(ir)) { case ldc1_op:{ diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 137ee76a004..937cf336816 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -145,7 +145,7 @@ good_area: * the fault. */ fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -154,12 +154,10 @@ good_area: BUG(); } if (fault & VM_FAULT_MAJOR) { - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, - 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); tsk->maj_flt++; } else { - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, - 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); tsk->min_flt++; } diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 1620b83cd13..f8ee945ee41 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -19,6 +19,7 @@ */ #include <linux/types.h> +#include <linux/i8253.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/sched.h> @@ -31,7 +32,6 @@ #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/hardirq.h> -#include <asm/i8253.h> #include <asm/irq.h> #include <asm/div64.h> #include <asm/cpu.h> diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile index 4b9d7044e26..29f2f13eb31 100644 --- a/arch/mips/oprofile/Makefile +++ b/arch/mips/oprofile/Makefile @@ -8,7 +8,7 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ oprofilefs.o oprofile_stats.o \ timer_int.o ) -oprofile-y := $(DRIVER_OBJS) common.o +oprofile-y := $(DRIVER_OBJS) common.o backtrace.o oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c new file mode 100644 index 00000000000..6854ed5097d --- /dev/null +++ b/arch/mips/oprofile/backtrace.c @@ -0,0 +1,175 @@ +#include <linux/oprofile.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <asm/ptrace.h> +#include <asm/stacktrace.h> +#include <linux/stacktrace.h> +#include <linux/kernel.h> +#include <asm/sections.h> +#include <asm/inst.h> + +struct stackframe { + unsigned long sp; + unsigned long pc; + unsigned long ra; +}; + +static inline int get_mem(unsigned long addr, unsigned long *result) +{ + unsigned long *address = (unsigned long *) addr; + if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long))) + return -1; + if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) + return -3; + return 0; +} + +/* + * These two instruction helpers were taken from process.c + */ +static inline int is_ra_save_ins(union mips_instruction *ip) +{ + /* sw / sd $ra, offset($sp) */ + return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) + && ip->i_format.rs == 29 && ip->i_format.rt == 31; +} + +static inline int is_sp_move_ins(union mips_instruction *ip) +{ + /* addiu/daddiu sp,sp,-imm */ + if (ip->i_format.rs != 29 || ip->i_format.rt != 29) + return 0; + if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) + return 1; + return 0; +} + +/* + * Looks for specific instructions that mark the end of a function. + * This usually means we ran into the code area of the previous function. + */ +static inline int is_end_of_function_marker(union mips_instruction *ip) +{ + /* jr ra */ + if (ip->r_format.func == jr_op && ip->r_format.rs == 31) + return 1; + /* lui gp */ + if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28) + return 1; + return 0; +} + +/* + * TODO for userspace stack unwinding: + * - handle cases where the stack is adjusted inside a function + * (generally doesn't happen) + * - find optimal value for max_instr_check + * - try to find a way to handle leaf functions + */ + +static inline int unwind_user_frame(struct stackframe *old_frame, + const unsigned int max_instr_check) +{ + struct stackframe new_frame = *old_frame; + off_t ra_offset = 0; + size_t stack_size = 0; + unsigned long addr; + + if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0) + return -9; + + for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc) + && (!ra_offset || !stack_size); --addr) { + union mips_instruction ip; + + if (get_mem(addr, (unsigned long *) &ip)) + return -11; + + if (is_sp_move_ins(&ip)) { + int stack_adjustment = ip.i_format.simmediate; + if (stack_adjustment > 0) + /* This marks the end of the previous function, + which means we overran. */ + break; + stack_size = (unsigned) stack_adjustment; + } else if (is_ra_save_ins(&ip)) { + int ra_slot = ip.i_format.simmediate; + if (ra_slot < 0) + /* This shouldn't happen. */ + break; + ra_offset = ra_slot; + } else if (is_end_of_function_marker(&ip)) + break; + } + + if (!ra_offset || !stack_size) + return -1; + + if (ra_offset) { + new_frame.ra = old_frame->sp + ra_offset; + if (get_mem(new_frame.ra, &(new_frame.ra))) + return -13; + } + + if (stack_size) { + new_frame.sp = old_frame->sp + stack_size; + if (get_mem(new_frame.sp, &(new_frame.sp))) + return -14; + } + + if (new_frame.sp > old_frame->sp) + return -2; + + new_frame.pc = old_frame->ra; + *old_frame = new_frame; + + return 0; +} + +static inline void do_user_backtrace(unsigned long low_addr, + struct stackframe *frame, + unsigned int depth) +{ + const unsigned int max_instr_check = 512; + const unsigned long high_addr = low_addr + THREAD_SIZE; + + while (depth-- && !unwind_user_frame(frame, max_instr_check)) { + oprofile_add_trace(frame->ra); + if (frame->sp < low_addr || frame->sp > high_addr) + break; + } +} + +#ifndef CONFIG_KALLSYMS +static inline void do_kernel_backtrace(unsigned long low_addr, + struct stackframe *frame, + unsigned int depth) { } +#else +static inline void do_kernel_backtrace(unsigned long low_addr, + struct stackframe *frame, + unsigned int depth) +{ + while (depth-- && frame->pc) { + frame->pc = unwind_stack_by_address(low_addr, + &(frame->sp), + frame->pc, + &(frame->ra)); + oprofile_add_trace(frame->ra); + } +} +#endif + +void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth) +{ + struct stackframe frame = { .sp = regs->regs[29], + .pc = regs->cp0_epc, + .ra = regs->regs[31] }; + const int userspace = user_mode(regs); + const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE); + + if (userspace) + do_user_backtrace(low_addr, &frame, depth); + else + do_kernel_backtrace(low_addr, &frame, depth); +} diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c index f9eb1aba634..d1f2d4c52d4 100644 --- a/arch/mips/oprofile/common.c +++ b/arch/mips/oprofile/common.c @@ -115,6 +115,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->start = op_mips_start; ops->stop = op_mips_stop; ops->cpu_type = lmodel->cpu_type; + ops->backtrace = op_mips_backtrace; printk(KERN_INFO "oprofile: using %s performance monitoring.\n", lmodel->cpu_type); diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h index f04b54fb37d..7c2da27ece0 100644 --- a/arch/mips/oprofile/op_impl.h +++ b/arch/mips/oprofile/op_impl.h @@ -36,4 +36,6 @@ struct op_mips_model { unsigned char num_counters; }; +void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth); + #endif diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c index 1a94c989418..60719244933 100644 --- a/arch/mips/sgi-ip22/ip22-time.c +++ b/arch/mips/sgi-ip22/ip22-time.c @@ -10,6 +10,7 @@ * Copyright (C) 2003, 06 Ralf Baechle (ralf@linux-mips.org) */ #include <linux/bcd.h> +#include <linux/i8253.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/kernel.h> @@ -20,7 +21,6 @@ #include <asm/cpu.h> #include <asm/mipsregs.h> -#include <asm/i8253.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/time.h> diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index 0904d4d30cb..ec0be14996a 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c @@ -1,11 +1,11 @@ #include <linux/types.h> +#include <linux/i8253.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/smp.h> #include <linux/time.h> #include <linux/clockchips.h> -#include <asm/i8253.h> #include <asm/sni.h> #include <asm/time.h> #include <asm-generic/rtc.h> diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 3d6e60dad9d..780560b330d 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -15,6 +15,7 @@ * User space memory access functions */ #include <linux/thread_info.h> +#include <linux/kernel.h> #include <asm/page.h> #include <asm/errno.h> diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c index f03cb278828..bd3e5e73826 100644 --- a/arch/mn10300/kernel/traps.c +++ b/arch/mn10300/kernel/traps.c @@ -28,7 +28,7 @@ #include <linux/irq.h> #include <asm/processor.h> #include <asm/system.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <asm/io.h> #include <asm/atomic.h> #include <asm/smp.h> @@ -156,7 +156,7 @@ int die_if_no_fixup(const char *str, struct pt_regs *regs, case EXCEP_TRAP: case EXCEP_UNIMPINS: - if (get_user(opcode, (uint8_t __user *)regs->pc) != 0) + if (probe_kernel_read(&opcode, (u8 *)regs->pc, 1) < 0) break; if (opcode == 0xff) { if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0)) diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S index 6f702a6ab39..13c4814c29f 100644 --- a/arch/mn10300/kernel/vmlinux.lds.S +++ b/arch/mn10300/kernel/vmlinux.lds.S @@ -44,6 +44,7 @@ SECTIONS RO_DATA(PAGE_SIZE) /* writeable */ + _sdata = .; /* Start of rw data section */ RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) _edata = .; diff --git a/arch/mn10300/mm/cache-dbg-flush-by-reg.S b/arch/mn10300/mm/cache-dbg-flush-by-reg.S index 665919f2ab6..a775ea5d7ce 100644 --- a/arch/mn10300/mm/cache-dbg-flush-by-reg.S +++ b/arch/mn10300/mm/cache-dbg-flush-by-reg.S @@ -120,14 +120,14 @@ debugger_local_cache_flushinv_one: # conditionally purge this line in all ways mov d1,(L1_CACHE_WAYDISP*0,a0) -debugger_local_cache_flushinv_no_dcache: +debugger_local_cache_flushinv_one_no_dcache: # # now try to flush the icache # mov CHCTR,a0 movhu (a0),d0 btst CHCTR_ICEN,d0 - beq mn10300_local_icache_inv_range_reg_end + beq debugger_local_cache_flushinv_one_end LOCAL_CLI_SAVE(d1) diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h index 9608d2cf214..e67eb9c3d1b 100644 --- a/arch/parisc/include/asm/mmzone.h +++ b/arch/parisc/include/asm/mmzone.h @@ -14,13 +14,6 @@ extern struct node_map_data node_data[]; #define NODE_DATA(nid) (&node_data[nid].pg_data) -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) \ -({ \ - pg_data_t *__pgdat = NODE_DATA(nid); \ - __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ -}) - /* We have these possible memory map layouts: * Astro: 0-3.75, 67.75-68, 4-64 * zx1: 0-1, 257-260, 4-256 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2729c6663d8..cdf7a0a6440 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -134,6 +134,7 @@ config PPC select GENERIC_IRQ_SHOW_LEVEL select HAVE_RCU_TABLE_FREE if SMP select HAVE_SYSCALL_TRACEPOINTS + select HAVE_BPF_JIT if (PPC64 && NET) config EARLY_PRINTK bool diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index b7212b619c5..b94740f36b1 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -154,7 +154,8 @@ core-y += arch/powerpc/kernel/ \ arch/powerpc/lib/ \ arch/powerpc/sysdev/ \ arch/powerpc/platforms/ \ - arch/powerpc/math-emu/ + arch/powerpc/math-emu/ \ + arch/powerpc/net/ core-$(CONFIG_XMON) += arch/powerpc/xmon/ core-$(CONFIG_KVM) += arch/powerpc/kvm/ diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore index 3d80c3e9cf6..12da77ec022 100644 --- a/arch/powerpc/boot/.gitignore +++ b/arch/powerpc/boot/.gitignore @@ -1,5 +1,4 @@ addnote -dtc empty.c hack-coff infblock.c diff --git a/arch/powerpc/boot/dtc-src/.gitignore b/arch/powerpc/boot/dtc-src/.gitignore deleted file mode 100644 index a7c3f94e5e7..00000000000 --- a/arch/powerpc/boot/dtc-src/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -dtc-lexer.lex.c -dtc-parser.tab.c -dtc-parser.tab.h diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts index 4f685a779f4..98d9426d4b8 100644 --- a/arch/powerpc/boot/dts/p1022ds.dts +++ b/arch/powerpc/boot/dts/p1022ds.dts @@ -209,8 +209,10 @@ wm8776:codec@1a { compatible = "wlf,wm8776"; reg = <0x1a>; - /* MCLK source is a stand-alone oscillator */ - clock-frequency = <12288000>; + /* + * clock-frequency will be set by U-Boot if + * the clock is enabled. + */ }; }; @@ -280,7 +282,8 @@ codec-handle = <&wm8776>; fsl,playback-dma = <&dma00>; fsl,capture-dma = <&dma01>; - fsl,fifo-depth = <16>; + fsl,fifo-depth = <15>; + fsl,ssi-asynchronous; }; dma@c300 { diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig index 7f7e4a87860..22e719575c6 100644 --- a/arch/powerpc/configs/52xx/pcm030_defconfig +++ b/arch/powerpc/configs/52xx/pcm030_defconfig @@ -85,7 +85,7 @@ CONFIG_USB_OHCI_HCD=m CONFIG_USB_OHCI_HCD_PPC_OF_BE=y # CONFIG_USB_OHCI_HCD_PCI is not set CONFIG_USB_STORAGE=m -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_PCF8563=m CONFIG_EXT2_FS=m CONFIG_EXT3_FS=m diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index 6472322bf13..185c292b0f1 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -141,7 +141,7 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y # CONFIG_USB_EHCI_HCD_PPC_OF is not set CONFIG_USB_OHCI_HCD=m CONFIG_USB_STORAGE=m -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_PS3=m CONFIG_EXT2_FS=m CONFIG_EXT3_FS=m diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index c9f212b5f3d..80bc5de7ee1 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -148,7 +148,6 @@ CONFIG_SCSI_SAS_ATTRS=m CONFIG_SCSI_CXGB3_ISCSI=m CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m -CONFIG_SCSI_BNX2_ISCSI=m CONFIG_BE2ISCSI=m CONFIG_SCSI_IBMVSCSI=y CONFIG_SCSI_IBMVFC=m diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h deleted file mode 100644 index a71c9c1455a..00000000000 --- a/arch/powerpc/include/asm/8253pit.h +++ /dev/null @@ -1,3 +0,0 @@ -/* - * 8253/8254 Programmable Interval Timer - */ diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 45921672b97..2cc41c715d2 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type); #define PPC_WARN_EMULATED(type, regs) \ do { \ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ - 1, 0, regs, 0); \ + 1, regs, 0); \ __PPC_WARN_EMULATED(type); \ } while (0) #define PPC_WARN_ALIGNMENT(type, regs) \ do { \ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ - 1, 0, regs, regs->dar); \ + 1, regs, regs->dar); \ __PPC_WARN_EMULATED(type); \ } while (0) diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h index 1c33ec17ca3..80fd4d2b4a6 100644 --- a/arch/powerpc/include/asm/hw_breakpoint.h +++ b/arch/powerpc/include/asm/hw_breakpoint.h @@ -57,7 +57,7 @@ void hw_breakpoint_pmu_read(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); extern struct pmu perf_ops_bp; -extern void ptrace_triggered(struct perf_event *bp, int nmi, +extern void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs); static inline void hw_breakpoint_disable(void) { diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h index fd3fd58bad8..7b589178be4 100644 --- a/arch/powerpc/include/asm/mmzone.h +++ b/arch/powerpc/include/asm/mmzone.h @@ -38,13 +38,6 @@ u64 memory_hotplug_max(void); #define memory_hotplug_max() memblock_end_of_DRAM() #endif -/* - * Following are macros that each numa implmentation must define. - */ - -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn) - #else #define memory_hotplug_max() memblock_end_of_DRAM() #endif /* CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index b90dbf8e5cd..90bd3ed4816 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -171,15 +171,9 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) #ifndef CONFIG_PPC64 -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - struct pci_controller *host; - - if (bus->self) - return pci_device_to_OF_node(bus->self); - host = pci_bus_to_host(bus); - return host ? host->dn : NULL; -} +extern int pci_device_from_OF_node(struct device_node *node, + u8 *bus, u8 *devfn); +extern void pci_create_OF_bus_map(void); static inline int isa_vaddr_is_ioport(void __iomem *address) { @@ -223,17 +217,8 @@ struct pci_dn { /* Get the pointer to a device_node's pci_dn */ #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) -extern struct device_node *fetch_dev_dn(struct pci_dev *dev); extern void * update_dn_pci_info(struct device_node *dn, void *data); -/* Get a device_node from a pci_dev. This code must be fast except - * in the case where the sysdata is incorrect and needs to be fixed - * up (this will only happen once). */ -static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) -{ - return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev); -} - static inline int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn) { @@ -244,14 +229,6 @@ static inline int pci_device_from_OF_node(struct device_node *np, return 0; } -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - if (bus->self) - return pci_device_to_OF_node(bus->self); - else - return bus->dev.of_node; /* Must be root bus (PHB) */ -} - /** Find the bus corresponding to the indicated device node */ extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 7d7790954e0..1f522680ea1 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -179,8 +179,7 @@ extern int remove_phb_dynamic(struct pci_controller *phb); extern struct pci_dev *of_create_pci_dev(struct device_node *node, struct pci_bus *bus, int devfn); -extern void of_scan_pci_bridge(struct device_node *node, - struct pci_dev *dev); +extern void of_scan_pci_bridge(struct pci_dev *dev); extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index e472659d906..e980faae422 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -71,6 +71,42 @@ #define PPC_INST_ERATSX 0x7c000126 #define PPC_INST_ERATSX_DOT 0x7c000127 +/* Misc instructions for BPF compiler */ +#define PPC_INST_LD 0xe8000000 +#define PPC_INST_LHZ 0xa0000000 +#define PPC_INST_LWZ 0x80000000 +#define PPC_INST_STD 0xf8000000 +#define PPC_INST_STDU 0xf8000001 +#define PPC_INST_MFLR 0x7c0802a6 +#define PPC_INST_MTLR 0x7c0803a6 +#define PPC_INST_CMPWI 0x2c000000 +#define PPC_INST_CMPDI 0x2c200000 +#define PPC_INST_CMPLW 0x7c000040 +#define PPC_INST_CMPLWI 0x28000000 +#define PPC_INST_ADDI 0x38000000 +#define PPC_INST_ADDIS 0x3c000000 +#define PPC_INST_ADD 0x7c000214 +#define PPC_INST_SUB 0x7c000050 +#define PPC_INST_BLR 0x4e800020 +#define PPC_INST_BLRL 0x4e800021 +#define PPC_INST_MULLW 0x7c0001d6 +#define PPC_INST_MULHWU 0x7c000016 +#define PPC_INST_MULLI 0x1c000000 +#define PPC_INST_DIVWU 0x7c0003d6 +#define PPC_INST_RLWINM 0x54000000 +#define PPC_INST_RLDICR 0x78000004 +#define PPC_INST_SLW 0x7c000030 +#define PPC_INST_SRW 0x7c000430 +#define PPC_INST_AND 0x7c000038 +#define PPC_INST_ANDDOT 0x7c000039 +#define PPC_INST_OR 0x7c000378 +#define PPC_INST_ANDI 0x70000000 +#define PPC_INST_ORI 0x60000000 +#define PPC_INST_ORIS 0x64000000 +#define PPC_INST_NEG 0x7c0000d0 +#define PPC_INST_BRANCH 0x48000000 +#define PPC_INST_BRANCH_COND 0x40800000 + /* macros to insert fields into opcodes */ #define __PPC_RA(a) (((a) & 0x1f) << 16) #define __PPC_RB(b) (((b) & 0x1f) << 11) @@ -83,6 +119,10 @@ #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) #define __PPC_WS(w) (((w) & 0x1f) << 11) +#define __PPC_SH(s) __PPC_WS(s) +#define __PPC_MB(s) (((s) & 0x1f) << 6) +#define __PPC_ME(s) (((s) & 0x1f) << 1) +#define __PPC_BI(s) (((s) & 0x1f) << 16) /* * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index c189aa5fe1f..b823536375d 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -22,20 +22,6 @@ #define HAVE_ARCH_DEVTREE_FIXUPS -#ifdef CONFIG_PPC32 -/* - * PCI <-> OF matching functions - * (XXX should these be here?) - */ -struct pci_bus; -struct pci_dev; -extern int pci_device_from_OF_node(struct device_node *node, - u8* bus, u8* devfn); -extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int); -extern struct device_node* pci_device_to_OF_node(struct pci_dev *); -extern void pci_create_OF_bus_map(void); -#endif - /* * OF address retreival & translation */ diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h index d902abd3399..b1d2deceeed 100644 --- a/arch/powerpc/include/asm/rio.h +++ b/arch/powerpc/include/asm/rio.h @@ -14,7 +14,7 @@ #define ASM_PPC_RIO_H extern void platform_rio_init(void); -#ifdef CONFIG_RAPIDIO +#ifdef CONFIG_FSL_RIO extern int fsl_rio_mcheck_exception(struct pt_regs *); #else static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; } diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 34d2722b945..9fb933248ab 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1979,7 +1979,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_value = 0x80240000, .cpu_name = "e5500", .cpu_features = CPU_FTRS_E5500, - .cpu_user_features = COMMON_USER_BOOKE, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c index b150b510510..cb2e2949c8d 100644 --- a/arch/powerpc/kernel/e500-pmu.c +++ b/arch/powerpc/kernel/e500-pmu.c @@ -75,6 +75,11 @@ static int e500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static int num_events = 128; diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c index 2cc5e0301d0..845a5847889 100644 --- a/arch/powerpc/kernel/mpc7450-pmu.c +++ b/arch/powerpc/kernel/mpc7450-pmu.c @@ -388,6 +388,11 @@ static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; struct power_pmu mpc7450_pmu = { diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 893af2a9cd0..a3c92770e42 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1097,9 +1097,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) if (dev->is_added) continue; - /* Setup OF node pointer in the device */ - dev->dev.of_node = pci_device_to_OF_node(dev); - /* Fixup NUMA node as it may not be setup yet by the generic * code and is needed by the DMA init */ @@ -1685,6 +1682,13 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn, return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); } +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct pci_controller *hose = bus->sysdata; + + return of_node_get(hose->dn); +} + /** * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus * @hose: Pointer to the PCI host controller instance structure @@ -1705,7 +1709,6 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose) hose->global_number); return; } - bus->dev.of_node = of_node_get(node); bus->secondary = hose->first_busno; hose->bus = bus; diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index bedb370459f..86585508e9c 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -167,150 +167,26 @@ pcibios_make_OF_bus_map(void) #endif } -typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); - -static struct device_node* -scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data) -{ - struct device_node *node; - struct device_node* sub_node; - - for_each_child_of_node(parent, node) { - const unsigned int *class_code; - - if (filter(node, data)) { - of_node_put(node); - return node; - } - - /* For PCI<->PCI bridges or CardBus bridges, we go down - * Note: some OFs create a parent node "multifunc-device" as - * a fake root for all functions of a multi-function device, - * we go down them as well. - */ - class_code = of_get_property(node, "class-code", NULL); - if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && - (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && - strcmp(node->name, "multifunc-device")) - continue; - sub_node = scan_OF_pci_childs(node, filter, data); - if (sub_node) { - of_node_put(node); - return sub_node; - } - } - return NULL; -} - -static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, - unsigned int devfn) -{ - struct device_node *np, *cnp; - const u32 *reg; - unsigned int psize; - - for_each_child_of_node(parent, np) { - reg = of_get_property(np, "reg", &psize); - if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn) - return np; - - /* Note: some OFs create a parent node "multifunc-device" as - * a fake root for all functions of a multi-function device, - * we go down them as well. */ - if (!strcmp(np->name, "multifunc-device")) { - cnp = scan_OF_for_pci_dev(np, devfn); - if (cnp) - return cnp; - } - } - return NULL; -} - - -static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus) -{ - struct device_node *parent, *np; - - /* Are we a root bus ? */ - if (bus->self == NULL || bus->parent == NULL) { - struct pci_controller *hose = pci_bus_to_host(bus); - if (hose == NULL) - return NULL; - return of_node_get(hose->dn); - } - - /* not a root bus, we need to get our parent */ - parent = scan_OF_for_pci_bus(bus->parent); - if (parent == NULL) - return NULL; - - /* now iterate for children for a match */ - np = scan_OF_for_pci_dev(parent, bus->self->devfn); - of_node_put(parent); - - return np; -} - -/* - * Scans the OF tree for a device node matching a PCI device - */ -struct device_node * -pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) -{ - struct device_node *parent, *np; - - pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); - parent = scan_OF_for_pci_bus(bus); - if (parent == NULL) - return NULL; - pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>"); - np = scan_OF_for_pci_dev(parent, devfn); - of_node_put(parent); - pr_debug(" result is %s\n", np ? np->full_name : "<NULL>"); - - /* XXX most callers don't release the returned node - * mostly because ppc64 doesn't increase the refcount, - * we need to fix that. - */ - return np; -} -EXPORT_SYMBOL(pci_busdev_to_OF_node); - -struct device_node* -pci_device_to_OF_node(struct pci_dev *dev) -{ - return pci_busdev_to_OF_node(dev->bus, dev->devfn); -} -EXPORT_SYMBOL(pci_device_to_OF_node); - -static int -find_OF_pci_device_filter(struct device_node* node, void* data) -{ - return ((void *)node == data); -} /* * Returns the PCI device matching a given OF node */ -int -pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) +int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn) { - const unsigned int *reg; - struct pci_controller* hose; - struct pci_dev* dev = NULL; - - /* Make sure it's really a PCI device */ - hose = pci_find_hose_for_OF_device(node); - if (!hose || !hose->dn) - return -ENODEV; - if (!scan_OF_pci_childs(hose->dn, - find_OF_pci_device_filter, (void *)node)) + struct pci_dev *dev = NULL; + const __be32 *reg; + int size; + + /* Check if it might have a chance to be a PCI device */ + if (!pci_find_hose_for_OF_device(node)) return -ENODEV; - reg = of_get_property(node, "reg", NULL); - if (!reg) + + reg = of_get_property(node, "reg", &size); + if (!reg || size < 5 * sizeof(u32)) return -ENODEV; - *bus = (reg[0] >> 16) & 0xff; - *devfn = ((reg[0] >> 8) & 0xff); + + *bus = (be32_to_cpup(®[0]) >> 16) & 0xff; + *devfn = (be32_to_cpup(®[0]) >> 8) & 0xff; /* Ok, here we need some tweak. If we have already renumbered * all busses, we can't rely on the OF bus number any more. diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 6baabc13306..478f8d78716 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -142,53 +142,6 @@ void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb) traverse_pci_devices(dn, update_dn_pci_info, phb); } -/* - * Traversal func that looks for a <busno,devfcn> value. - * If found, the pci_dn is returned (thus terminating the traversal). - */ -static void *is_devfn_node(struct device_node *dn, void *data) -{ - int busno = ((unsigned long)data >> 8) & 0xff; - int devfn = ((unsigned long)data) & 0xff; - struct pci_dn *pci = dn->data; - - if (pci && (devfn == pci->devfn) && (busno == pci->busno)) - return dn; - return NULL; -} - -/* - * This is the "slow" path for looking up a device_node from a - * pci_dev. It will hunt for the device under its parent's - * phb and then update of_node pointer. - * - * It may also do fixups on the actual device since this happens - * on the first read/write. - * - * Note that it also must deal with devices that don't exist. - * In this case it may probe for real hardware ("just in case") - * and add a device_node to the device tree if necessary. - * - * Is this function necessary anymore now that dev->dev.of_node is - * used to store the node pointer? - * - */ -struct device_node *fetch_dev_dn(struct pci_dev *dev) -{ - struct pci_controller *phb = dev->sysdata; - struct device_node *dn; - unsigned long searchval = (dev->bus->number << 8) | dev->devfn; - - if (WARN_ON(!phb)) - return NULL; - - dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval); - if (dn) - dev->dev.of_node = dn; - return dn; -} -EXPORT_SYMBOL(fetch_dev_dn); - /** * pci_devs_phb_init - Initialize phbs and pci devs under them. * diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 1e89a72fd03..fe0a5ad6f73 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -202,9 +202,9 @@ EXPORT_SYMBOL(of_create_pci_dev); * this routine in turn call of_scan_bus() recusively to scan for more child * devices. */ -void __devinit of_scan_pci_bridge(struct device_node *node, - struct pci_dev *dev) +void __devinit of_scan_pci_bridge(struct pci_dev *dev) { + struct device_node *node = dev->dev.of_node; struct pci_bus *bus; const u32 *busrange, *ranges; int len, i, mode; @@ -238,7 +238,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node, bus->primary = dev->bus->number; bus->subordinate = busrange[1]; bus->bridge_ctl = 0; - bus->dev.of_node = of_node_get(node); /* parse ranges property */ /* PCI #address-cells == 3 and #size-cells == 2 always */ @@ -335,9 +334,7 @@ static void __devinit __of_scan_bus(struct device_node *node, list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { - struct device_node *child = pci_device_to_OF_node(dev); - if (child) - of_scan_pci_bridge(child, dev); + of_scan_pci_bridge(dev); } } } diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 822f63008ae..14967de9887 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1207,7 +1207,7 @@ struct pmu power_pmu = { * here so there is no possibility of being interrupted. */ static void record_and_restart(struct perf_event *event, unsigned long val, - struct pt_regs *regs, int nmi) + struct pt_regs *regs) { u64 period = event->hw.sample_period; s64 prev, delta, left; @@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (event->attr.sample_type & PERF_SAMPLE_ADDR) perf_get_data_addr(regs, &data.addr); - if (perf_event_overflow(event, nmi, &data, regs)) + if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); } } @@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if ((int)val < 0) { /* event has overflowed */ found = 1; - record_and_restart(event, val, regs, nmi); + record_and_restart(event, val, regs); } } diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index b0dc8f7069c..0a6d2a9d569 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = { * here so there is no possibility of being interrupted. */ static void record_and_restart(struct perf_event *event, unsigned long val, - struct pt_regs *regs, int nmi) + struct pt_regs *regs) { u64 period = event->hw.sample_period; s64 prev, delta, left; @@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, perf_sample_data_init(&data, 0); data.period = event->hw.last_period; - if (perf_event_overflow(event, nmi, &data, regs)) + if (perf_event_overflow(event, &data, regs)) fsl_emb_pmu_stop(event, 0); } } @@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (event) { /* event has overflowed */ found = 1; - record_and_restart(event, val, regs, nmi); + record_and_restart(event, val, regs); } else { /* * Disabled counter is negative, diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index ead8b3c2649..e9dbc2d35c9 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -587,6 +587,11 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power4_pmu = { diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index eca0ac595cb..f58a2bd41b5 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -653,6 +653,11 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power5p_pmu = { diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index d5ff0f64a5e..b1acab68414 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -595,6 +595,11 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power5_pmu = { diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index 31603927e37..b24a3a23d07 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -516,6 +516,11 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power6_pmu = { diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 593740fcb79..6d9dccb2ea5 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c @@ -342,6 +342,11 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu power7_pmu = { diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 9a6e093858f..b121de9658e 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -467,6 +467,11 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(OP_WRITE)] = { -1, -1 }, [C(OP_PREFETCH)] = { -1, -1 }, }, + [C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */ + [C(OP_READ)] = { -1, -1 }, + [C(OP_WRITE)] = { -1, -1 }, + [C(OP_PREFETCH)] = { -1, -1 }, + }, }; static struct power_pmu ppc970_pmu = { diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index f2c906b1d8d..8c3112a57cf 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -82,11 +82,29 @@ static int __init early_parse_mem(char *p) } early_param("mem", early_parse_mem); +/* + * overlaps_initrd - check for overlap with page aligned extension of + * initrd. + */ +static inline int overlaps_initrd(unsigned long start, unsigned long size) +{ +#ifdef CONFIG_BLK_DEV_INITRD + if (!initrd_start) + return 0; + + return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) && + start <= _ALIGN_UP(initrd_end, PAGE_SIZE); +#else + return 0; +#endif +} + /** * move_device_tree - move tree to an unused area, if needed. * * The device tree may be allocated beyond our memory limit, or inside the - * crash kernel region for kdump. If so, move it out of the way. + * crash kernel region for kdump, or within the page aligned range of initrd. + * If so, move it out of the way. */ static void __init move_device_tree(void) { @@ -99,7 +117,8 @@ static void __init move_device_tree(void) size = be32_to_cpu(initial_boot_params->totalsize); if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || - overlaps_crashkernel(start, size)) { + overlaps_crashkernel(start, size) || + overlaps_initrd(start, size)) { p = __va(memblock_alloc(size, PAGE_SIZE)); memcpy(p, initial_boot_params, size); initial_boot_params = (struct boot_param_header *)p; @@ -555,7 +574,9 @@ static void __init early_reserve_mem(void) #ifdef CONFIG_BLK_DEV_INITRD /* then reserve the initrd, if any */ if (initrd_start && (initrd_end > initrd_start)) - memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); + memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), + _ALIGN_UP(initrd_end, PAGE_SIZE) - + _ALIGN_DOWN(initrd_start, PAGE_SIZE)); #endif /* CONFIG_BLK_DEV_INITRD */ #ifdef CONFIG_PPC32 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index cb22024f2b4..05b7dd217f6 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task) } #ifdef CONFIG_HAVE_HW_BREAKPOINT -void ptrace_triggered(struct perf_event *bp, int nmi, +void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event_attr attr; @@ -973,7 +973,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, &attr.bp_type); thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, - ptrace_triggered, task); + ptrace_triggered, NULL, task); if (IS_ERR(bp)) { thread->ptrace_bps[0] = NULL; ptrace_put_breakpoints(task); diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c index 77578c093dd..c57c19358a2 100644 --- a/arch/powerpc/kernel/rtas-rtc.c +++ b/arch/powerpc/kernel/rtas-rtc.c @@ -4,6 +4,7 @@ #include <linux/init.h> #include <linux/rtc.h> #include <linux/delay.h> +#include <linux/ratelimit.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/time.h> @@ -29,9 +30,10 @@ unsigned long __init rtas_get_boot_time(void) } } while (wait_time && (get_tb() < max_wait_tb)); - if (error != 0 && printk_ratelimit()) { - printk(KERN_WARNING "error: reading the clock failed (%d)\n", - error); + if (error != 0) { + printk_ratelimited(KERN_WARNING + "error: reading the clock failed (%d)\n", + error); return 0; } @@ -55,19 +57,21 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm) wait_time = rtas_busy_delay_time(error); if (wait_time) { - if (in_interrupt() && printk_ratelimit()) { + if (in_interrupt()) { memset(rtc_tm, 0, sizeof(struct rtc_time)); - printk(KERN_WARNING "error: reading clock" - " would delay interrupt\n"); + printk_ratelimited(KERN_WARNING + "error: reading clock " + "would delay interrupt\n"); return; /* delay not allowed */ } msleep(wait_time); } } while (wait_time && (get_tb() < max_wait_tb)); - if (error != 0 && printk_ratelimit()) { - printk(KERN_WARNING "error: reading the clock failed (%d)\n", - error); + if (error != 0) { + printk_ratelimited(KERN_WARNING + "error: reading the clock failed (%d)\n", + error); return; } @@ -99,9 +103,10 @@ int rtas_set_rtc_time(struct rtc_time *tm) } } while (wait_time && (get_tb() < max_wait_tb)); - if (error != 0 && printk_ratelimit()) - printk(KERN_WARNING "error: setting the clock failed (%d)\n", - error); + if (error != 0) + printk_ratelimited(KERN_WARNING + "error: setting the clock failed (%d)\n", + error); return 0; } diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index b96a3a010c2..78b76dc54df 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -25,6 +25,7 @@ #include <linux/errno.h> #include <linux/elf.h> #include <linux/ptrace.h> +#include <linux/ratelimit.h> #ifdef CONFIG_PPC64 #include <linux/syscalls.h> #include <linux/compat.h> @@ -892,11 +893,12 @@ badframe: printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif - if (show_unhandled_signals && printk_ratelimit()) - printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: " - "%p nip %08lx lr %08lx\n", - current->comm, current->pid, - addr, regs->nip, regs->link); + if (show_unhandled_signals) + printk_ratelimited(KERN_INFO + "%s[%d]: bad frame in handle_rt_signal32: " + "%p nip %08lx lr %08lx\n", + current->comm, current->pid, + addr, regs->nip, regs->link); force_sigsegv(sig, current); return 0; @@ -1058,11 +1060,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, return 0; bad: - if (show_unhandled_signals && printk_ratelimit()) - printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: " - "%p nip %08lx lr %08lx\n", - current->comm, current->pid, - rt_sf, regs->nip, regs->link); + if (show_unhandled_signals) + printk_ratelimited(KERN_INFO + "%s[%d]: bad frame in sys_rt_sigreturn: " + "%p nip %08lx lr %08lx\n", + current->comm, current->pid, + rt_sf, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; @@ -1149,12 +1152,12 @@ int sys_debug_setcontext(struct ucontext __user *ctx, * We kill the task with a SIGSEGV in this situation. */ if (do_setcontext(ctx, regs, 1)) { - if (show_unhandled_signals && printk_ratelimit()) - printk(KERN_INFO "%s[%d]: bad frame in " - "sys_debug_setcontext: %p nip %08lx " - "lr %08lx\n", - current->comm, current->pid, - ctx, regs->nip, regs->link); + if (show_unhandled_signals) + printk_ratelimited(KERN_INFO "%s[%d]: bad frame in " + "sys_debug_setcontext: %p nip %08lx " + "lr %08lx\n", + current->comm, current->pid, + ctx, regs->nip, regs->link); force_sig(SIGSEGV, current); goto out; @@ -1236,11 +1239,12 @@ badframe: printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif - if (show_unhandled_signals && printk_ratelimit()) - printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: " - "%p nip %08lx lr %08lx\n", - current->comm, current->pid, - frame, regs->nip, regs->link); + if (show_unhandled_signals) + printk_ratelimited(KERN_INFO + "%s[%d]: bad frame in handle_signal32: " + "%p nip %08lx lr %08lx\n", + current->comm, current->pid, + frame, regs->nip, regs->link); force_sigsegv(sig, current); return 0; @@ -1288,11 +1292,12 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, return 0; badframe: - if (show_unhandled_signals && printk_ratelimit()) - printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: " - "%p nip %08lx lr %08lx\n", - current->comm, current->pid, - addr, regs->nip, regs->link); + if (show_unhandled_signals) + printk_ratelimited(KERN_INFO + "%s[%d]: bad frame in sys_sigreturn: " + "%p nip %08lx lr %08lx\n", + current->comm, current->pid, + addr, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index da989fff19c..e91c736cc84 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -24,6 +24,7 @@ #include <linux/elf.h> #include <linux/ptrace.h> #include <linux/module.h> +#include <linux/ratelimit.h> #include <asm/sigcontext.h> #include <asm/ucontext.h> @@ -380,10 +381,10 @@ badframe: printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", regs, uc, &uc->uc_mcontext); #endif - if (show_unhandled_signals && printk_ratelimit()) - printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, - current->comm, current->pid, "rt_sigreturn", - (long)uc, regs->nip, regs->link); + if (show_unhandled_signals) + printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, + current->comm, current->pid, "rt_sigreturn", + (long)uc, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; @@ -468,10 +469,10 @@ badframe: printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif - if (show_unhandled_signals && printk_ratelimit()) - printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, - current->comm, current->pid, "setup_rt_frame", - (long)frame, regs->nip, regs->link); + if (show_unhandled_signals) + printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, + current->comm, current->pid, "setup_rt_frame", + (long)frame, regs->nip, regs->link); force_sigsegv(signr, current); return 0; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index f33acfd872a..03b29a6759a 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -544,7 +544,7 @@ DEFINE_PER_CPU(u8, irq_work_pending); #endif /* 32 vs 64 bit */ -void set_irq_work_pending(void) +void arch_irq_work_raise(void) { preempt_disable(); set_irq_work_pending_flag(); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 0ff4ab98d50..1a0141426cd 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -34,6 +34,7 @@ #include <linux/bug.h> #include <linux/kdebug.h> #include <linux/debugfs.h> +#include <linux/ratelimit.h> #include <asm/emulated_ops.h> #include <asm/pgtable.h> @@ -197,12 +198,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) if (die("Exception in kernel mode", regs, signr)) return; } else if (show_unhandled_signals && - unhandled_signal(current, signr) && - printk_ratelimit()) { - printk(regs->msr & MSR_64BIT ? fmt64 : fmt32, - current->comm, current->pid, signr, - addr, regs->nip, regs->link, code); - } + unhandled_signal(current, signr)) { + printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, + current->comm, current->pid, signr, + addr, regs->nip, regs->link, code); + } memset(&info, 0, sizeof(info)); info.si_signo = signr; @@ -425,7 +425,7 @@ int machine_check_e500mc(struct pt_regs *regs) unsigned long reason = mcsr; int recoverable = 1; - if (reason & MCSR_BUS_RBERR) { + if (reason & MCSR_LD) { recoverable = fsl_rio_mcheck_exception(regs); if (recoverable == 1) goto silent_out; @@ -1342,9 +1342,8 @@ void altivec_assist_exception(struct pt_regs *regs) } else { /* didn't recognize the instruction */ /* XXX quick hack for now: set the non-Java bit in the VSCR */ - if (printk_ratelimit()) - printk(KERN_ERR "Unrecognized altivec instruction " - "in %s at %lx\n", current->comm, regs->nip); + printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " + "in %s at %lx\n", current->comm, regs->nip); current->thread.vscr.u[3] |= 0x10000; } } @@ -1548,9 +1547,8 @@ u32 ppc_warn_emulated; void ppc_warn_emulated_print(const char *type) { - if (printk_ratelimit()) - pr_warning("%s used emulated %s instruction\n", current->comm, - type); + pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, + type); } static int __init ppc_warn_emulated_init(void) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 54f4fb994e9..5efe8c96d37 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -31,6 +31,7 @@ #include <linux/kdebug.h> #include <linux/perf_event.h> #include <linux/magic.h> +#include <linux/ratelimit.h> #include <asm/firmware.h> #include <asm/page.h> @@ -173,7 +174,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, die("Weird page fault", regs, SIGSEGV); } - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the @@ -319,7 +320,7 @@ good_area: } if (ret & VM_FAULT_MAJOR) { current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); #ifdef CONFIG_PPC_SMLPAR if (firmware_has_feature(FW_FEATURE_CMO)) { @@ -330,7 +331,7 @@ good_area: #endif } else { current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); @@ -346,11 +347,10 @@ bad_area_nosemaphore: return 0; } - if (is_exec && (error_code & DSISR_PROTFAULT) - && printk_ratelimit()) - printk(KERN_CRIT "kernel tried to execute NX-protected" - " page (%lx) - exploit attempt? (uid: %d)\n", - address, current_uid()); + if (is_exec && (error_code & DSISR_PROTFAULT)) + printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected" + " page (%lx) - exploit attempt? (uid: %d)\n", + address, current_uid()); return SIGSEGV; diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index d65b591e555..5de0f254dbb 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -223,21 +223,6 @@ void free_initmem(void) #undef FREESEC } -#ifdef CONFIG_BLK_DEV_INITRD -void free_initrd_mem(unsigned long start, unsigned long end) -{ - if (start < end) - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } -} -#endif - - #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 6374b2196a1..f6dbb4c20e6 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -99,20 +99,6 @@ void free_initmem(void) ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); } -#ifdef CONFIG_BLK_DEV_INITRD -void free_initrd_mem(unsigned long start, unsigned long end) -{ - if (start < end) - printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); - for (; start < end; start += PAGE_SIZE) { - ClearPageReserved(virt_to_page(start)); - init_page_count(virt_to_page(start)); - free_page(start); - totalram_pages++; - } -} -#endif - static void pgd_ctor(void *addr) { memset(addr, 0, PGD_TABLE_SIZE); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 57e545b84bf..29d4dde65c4 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -382,6 +382,25 @@ void __init mem_init(void) mem_init_done = 1; } +#ifdef CONFIG_BLK_DEV_INITRD +void __init free_initrd_mem(unsigned long start, unsigned long end) +{ + if (start >= end) + return; + + start = _ALIGN_DOWN(start, PAGE_SIZE); + end = _ALIGN_UP(end, PAGE_SIZE); + pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); + + for (; start < end; start += PAGE_SIZE) { + ClearPageReserved(virt_to_page(start)); + init_page_count(virt_to_page(start)); + free_page(start); + totalram_pages++; + } +} +#endif + /* * This is called when a page has been modified by the kernel. * It just marks the page as not i-cache clean. We do the i-cache diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile new file mode 100644 index 00000000000..266b3950c3a --- /dev/null +++ b/arch/powerpc/net/Makefile @@ -0,0 +1,4 @@ +# +# Arch-specific network modules +# +obj-$(CONFIG_BPF_JIT) += bpf_jit_64.o bpf_jit_comp.o diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h new file mode 100644 index 00000000000..af1ab5e9a69 --- /dev/null +++ b/arch/powerpc/net/bpf_jit.h @@ -0,0 +1,227 @@ +/* bpf_jit.h: BPF JIT compiler for PPC64 + * + * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef _BPF_JIT_H +#define _BPF_JIT_H + +#define BPF_PPC_STACK_LOCALS 32 +#define BPF_PPC_STACK_BASIC (48+64) +#define BPF_PPC_STACK_SAVE (18*8) +#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ + BPF_PPC_STACK_SAVE) +#define BPF_PPC_SLOWPATH_FRAME (48+64) + +/* + * Generated code register usage: + * + * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with: + * + * skb r3 (Entry parameter) + * A register r4 + * X register r5 + * addr param r6 + * r7-r10 scratch + * skb->data r14 + * skb headlen r15 (skb->len - skb->data_len) + * m[0] r16 + * m[...] ... + * m[15] r31 + */ +#define r_skb 3 +#define r_ret 3 +#define r_A 4 +#define r_X 5 +#define r_addr 6 +#define r_scratch1 7 +#define r_D 14 +#define r_HL 15 +#define r_M 16 + +#ifndef __ASSEMBLY__ + +/* + * Assembly helpers from arch/powerpc/net/bpf_jit.S: + */ +extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; + +#define FUNCTION_DESCR_SIZE 24 + +/* + * 16-bit immediate helper macros: HA() is for use with sign-extending instrs + * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the + * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000). + */ +#define IMM_H(i) ((uintptr_t)(i)>>16) +#define IMM_HA(i) (((uintptr_t)(i)>>16) + \ + (((uintptr_t)(i) & 0x8000) >> 15)) +#define IMM_L(i) ((uintptr_t)(i) & 0xffff) + +#define PLANT_INSTR(d, idx, instr) \ + do { if (d) { (d)[idx] = instr; } idx++; } while (0) +#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr) + +#define PPC_NOP() EMIT(PPC_INST_NOP) +#define PPC_BLR() EMIT(PPC_INST_BLR) +#define PPC_BLRL() EMIT(PPC_INST_BLRL) +#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | __PPC_RT(r)) +#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | __PPC_RT(d) | \ + __PPC_RA(a) | IMM_L(i)) +#define PPC_MR(d, a) PPC_OR(d, a, a) +#define PPC_LI(r, i) PPC_ADDI(r, 0, i) +#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \ + __PPC_RS(d) | __PPC_RA(a) | IMM_L(i)) +#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) +#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | __PPC_RS(r) | \ + __PPC_RA(base) | ((i) & 0xfffc)) + +#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | __PPC_RT(r) | \ + __PPC_RA(base) | IMM_L(i)) +#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | __PPC_RT(r) | \ + __PPC_RA(base) | IMM_L(i)) +#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | __PPC_RT(r) | \ + __PPC_RA(base) | IMM_L(i)) +/* Convenience helpers for the above with 'far' offsets: */ +#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ + else { PPC_ADDIS(r, base, IMM_HA(i)); \ + PPC_LD(r, r, IMM_L(i)); } } while(0) + +#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \ + else { PPC_ADDIS(r, base, IMM_HA(i)); \ + PPC_LWZ(r, r, IMM_L(i)); } } while(0) + +#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \ + else { PPC_ADDIS(r, base, IMM_HA(i)); \ + PPC_LHZ(r, r, IMM_L(i)); } } while(0) + +#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | __PPC_RA(a) | IMM_L(i)) +#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | __PPC_RA(a) | IMM_L(i)) +#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | __PPC_RA(a) | IMM_L(i)) +#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | __PPC_RA(a) | __PPC_RB(b)) + +#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | __PPC_RT(d) | \ + __PPC_RB(a) | __PPC_RA(b)) +#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | __PPC_RT(d) | \ + __PPC_RA(a) | __PPC_RB(b)) +#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | __PPC_RT(d) | \ + __PPC_RA(a) | __PPC_RB(b)) +#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | __PPC_RT(d) | \ + __PPC_RA(a) | __PPC_RB(b)) +#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | __PPC_RT(d) | \ + __PPC_RA(a) | IMM_L(i)) +#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | __PPC_RT(d) | \ + __PPC_RA(a) | __PPC_RB(b)) +#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(b)) +#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | __PPC_RA(d) | \ + __PPC_RS(a) | IMM_L(i)) +#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(b)) +#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(b)) +#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | __PPC_RA(d) | \ + __PPC_RS(a) | IMM_L(i)) +#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | __PPC_RA(d) | \ + __PPC_RS(a) | IMM_L(i)) +#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(s)) +#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_RB(s)) +/* slwi = rlwinm Rx, Ry, n, 0, 31-n */ +#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_SH(i) | \ + __PPC_MB(0) | __PPC_ME(31-(i))) +/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */ +#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_SH(32-(i)) | \ + __PPC_MB(i) | __PPC_ME(31)) +/* sldi = rldicr Rx, Ry, n, 63-n */ +#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | __PPC_RA(d) | \ + __PPC_RS(a) | __PPC_SH(i) | \ + __PPC_MB(63-(i)) | (((i) & 0x20) >> 4)) +#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | __PPC_RT(d) | __PPC_RA(a)) + +/* Long jump; (unconditional 'branch') */ +#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ + (((dest) - (ctx->idx * 4)) & 0x03fffffc)) +/* "cond" here covers BO:BI fields. */ +#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ + (((cond) & 0x3ff) << 16) | \ + (((dest) - (ctx->idx * 4)) & \ + 0xfffc)) +#define PPC_LI32(d, i) do { PPC_LI(d, IMM_L(i)); \ + if ((u32)(uintptr_t)(i) >= 32768) { \ + PPC_ADDIS(d, d, IMM_HA(i)); \ + } } while(0) +#define PPC_LI64(d, i) do { \ + if (!((uintptr_t)(i) & 0xffffffff00000000ULL)) \ + PPC_LI32(d, i); \ + else { \ + PPC_LIS(d, ((uintptr_t)(i) >> 48)); \ + if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \ + PPC_ORI(d, d, \ + ((uintptr_t)(i) >> 32) & 0xffff); \ + PPC_SLDI(d, d, 32); \ + if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \ + PPC_ORIS(d, d, \ + ((uintptr_t)(i) >> 16) & 0xffff); \ + if ((uintptr_t)(i) & 0x000000000000ffffULL) \ + PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ + } } while (0); + +static inline bool is_nearbranch(int offset) +{ + return (offset < 32768) && (offset >= -32768); +} + +/* + * The fly in the ointment of code size changing from pass to pass is + * avoided by padding the short branch case with a NOP. If code size differs + * with different branch reaches we will have the issue of code moving from + * one pass to the next and will need a few passes to converge on a stable + * state. + */ +#define PPC_BCC(cond, dest) do { \ + if (is_nearbranch((dest) - (ctx->idx * 4))) { \ + PPC_BCC_SHORT(cond, dest); \ + PPC_NOP(); \ + } else { \ + /* Flip the 'T or F' bit to invert comparison */ \ + PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \ + PPC_JMP(dest); \ + } } while(0) + +/* To create a branch condition, select a bit of cr0... */ +#define CR0_LT 0 +#define CR0_GT 1 +#define CR0_EQ 2 +/* ...and modify BO[3] */ +#define COND_CMP_TRUE 0x100 +#define COND_CMP_FALSE 0x000 +/* Together, they make all required comparisons: */ +#define COND_GT (CR0_GT | COND_CMP_TRUE) +#define COND_GE (CR0_LT | COND_CMP_FALSE) +#define COND_EQ (CR0_EQ | COND_CMP_TRUE) +#define COND_NE (CR0_EQ | COND_CMP_FALSE) +#define COND_LT (CR0_LT | COND_CMP_TRUE) + +#define SEEN_DATAREF 0x10000 /* might call external helpers */ +#define SEEN_XREG 0x20000 /* X reg is used */ +#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary + * storage */ +#define SEEN_MEM_MSK 0x0ffff + +struct codegen_context { + unsigned int seen; + unsigned int idx; + int pc_ret0; /* bpf index of first RET #0 instruction (if any) */ +}; + +#endif + +#endif diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S new file mode 100644 index 00000000000..ff4506e85cc --- /dev/null +++ b/arch/powerpc/net/bpf_jit_64.S @@ -0,0 +1,138 @@ +/* bpf_jit.S: Packet/header access helper functions + * for PPC64 BPF compiler. + * + * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include <asm/ppc_asm.h> +#include "bpf_jit.h" + +/* + * All of these routines are called directly from generated code, + * whose register usage is: + * + * r3 skb + * r4,r5 A,X + * r6 *** address parameter to helper *** + * r7-r10 scratch + * r14 skb->data + * r15 skb headlen + * r16-31 M[] + */ + +/* + * To consider: These helpers are so small it could be better to just + * generate them inline. Inline code can do the simple headlen check + * then branch directly to slow_path_XXX if required. (In fact, could + * load a spare GPR with the address of slow_path_generic and pass size + * as an argument, making the call site a mtlr, li and bllr.) + * + * Technically, the "is addr < 0" check is unnecessary & slowing down + * the ABS path, as it's statically checked on generation. + */ + .globl sk_load_word +sk_load_word: + cmpdi r_addr, 0 + blt bpf_error + /* Are we accessing past headlen? */ + subi r_scratch1, r_HL, 4 + cmpd r_scratch1, r_addr + blt bpf_slow_path_word + /* Nope, just hitting the header. cr0 here is eq or gt! */ + lwzx r_A, r_D, r_addr + /* When big endian we don't need to byteswap. */ + blr /* Return success, cr0 != LT */ + + .globl sk_load_half +sk_load_half: + cmpdi r_addr, 0 + blt bpf_error + subi r_scratch1, r_HL, 2 + cmpd r_scratch1, r_addr + blt bpf_slow_path_half + lhzx r_A, r_D, r_addr + blr + + .globl sk_load_byte +sk_load_byte: + cmpdi r_addr, 0 + blt bpf_error + cmpd r_HL, r_addr + ble bpf_slow_path_byte + lbzx r_A, r_D, r_addr + blr + +/* + * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) + * r_addr is the offset value, already known positive + */ + .globl sk_load_byte_msh +sk_load_byte_msh: + cmpd r_HL, r_addr + ble bpf_slow_path_byte_msh + lbzx r_X, r_D, r_addr + rlwinm r_X, r_X, 2, 32-4-2, 31-2 + blr + +bpf_error: + /* Entered with cr0 = lt */ + li r3, 0 + /* Generated code will 'blt epilogue', returning 0. */ + blr + +/* Call out to skb_copy_bits: + * We'll need to back up our volatile regs first; we have + * local variable space at r1+(BPF_PPC_STACK_BASIC). + * Allocate a new stack frame here to remain ABI-compliant in + * stashing LR. + */ +#define bpf_slow_path_common(SIZE) \ + mflr r0; \ + std r0, 16(r1); \ + /* R3 goes in parameter space of caller's frame */ \ + std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \ + stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ + /* R3 = r_skb, as passed */ \ + mr r4, r_addr; \ + li r6, SIZE; \ + bl skb_copy_bits; \ + /* R3 = 0 on success */ \ + addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ + ld r0, 16(r1); \ + ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + mtlr r0; \ + cmpdi r3, 0; \ + blt bpf_error; /* cr0 = LT */ \ + ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + /* Great success! */ + +bpf_slow_path_word: + bpf_slow_path_common(4) + /* Data value is on stack, and cr0 != LT */ + lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) + blr + +bpf_slow_path_half: + bpf_slow_path_common(2) + lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) + blr + +bpf_slow_path_byte: + bpf_slow_path_common(1) + lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) + blr + +bpf_slow_path_byte_msh: + bpf_slow_path_common(1) + lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) + rlwinm r_X, r_X, 2, 32-4-2, 31-2 + blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c new file mode 100644 index 00000000000..73619d3aeb6 --- /dev/null +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -0,0 +1,694 @@ +/* bpf_jit_comp.c: BPF JIT compiler for PPC64 + * + * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation + * + * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#include <linux/moduleloader.h> +#include <asm/cacheflush.h> +#include <linux/netdevice.h> +#include <linux/filter.h> +#include "bpf_jit.h" + +#ifndef __BIG_ENDIAN +/* There are endianness assumptions herein. */ +#error "Little-endian PPC not supported in BPF compiler" +#endif + +int bpf_jit_enable __read_mostly; + + +static inline void bpf_flush_icache(void *start, void *end) +{ + smp_wmb(); + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, + struct codegen_context *ctx) +{ + int i; + const struct sock_filter *filter = fp->insns; + + if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { + /* Make stackframe */ + if (ctx->seen & SEEN_DATAREF) { + /* If we call any helpers (for loads), save LR */ + EMIT(PPC_INST_MFLR | __PPC_RT(0)); + PPC_STD(0, 1, 16); + + /* Back up non-volatile regs. */ + PPC_STD(r_D, 1, -(8*(32-r_D))); + PPC_STD(r_HL, 1, -(8*(32-r_HL))); + } + if (ctx->seen & SEEN_MEM) { + /* + * Conditionally save regs r15-r31 as some will be used + * for M[] data. + */ + for (i = r_M; i < (r_M+16); i++) { + if (ctx->seen & (1 << (i-r_M))) + PPC_STD(i, 1, -(8*(32-i))); + } + } + EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) | + (-BPF_PPC_STACKFRAME & 0xfffc)); + } + + if (ctx->seen & SEEN_DATAREF) { + /* + * If this filter needs to access skb data, + * prepare r_D and r_HL: + * r_HL = skb->len - skb->data_len + * r_D = skb->data + */ + PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, + data_len)); + PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); + PPC_SUB(r_HL, r_HL, r_scratch1); + PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); + } + + if (ctx->seen & SEEN_XREG) { + /* + * TODO: Could also detect whether first instr. sets X and + * avoid this (as below, with A). + */ + PPC_LI(r_X, 0); + } + + switch (filter[0].code) { + case BPF_S_RET_K: + case BPF_S_LD_W_LEN: + case BPF_S_ANC_PROTOCOL: + case BPF_S_ANC_IFINDEX: + case BPF_S_ANC_MARK: + case BPF_S_ANC_RXHASH: + case BPF_S_ANC_CPU: + case BPF_S_ANC_QUEUE: + case BPF_S_LD_W_ABS: + case BPF_S_LD_H_ABS: + case BPF_S_LD_B_ABS: + /* first instruction sets A register (or is RET 'constant') */ + break; + default: + /* make sure we dont leak kernel information to user */ + PPC_LI(r_A, 0); + } +} + +static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) +{ + int i; + + if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { + PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); + if (ctx->seen & SEEN_DATAREF) { + PPC_LD(0, 1, 16); + PPC_MTLR(0); + PPC_LD(r_D, 1, -(8*(32-r_D))); + PPC_LD(r_HL, 1, -(8*(32-r_HL))); + } + if (ctx->seen & SEEN_MEM) { + /* Restore any saved non-vol registers */ + for (i = r_M; i < (r_M+16); i++) { + if (ctx->seen & (1 << (i-r_M))) + PPC_LD(i, 1, -(8*(32-i))); + } + } + } + /* The RETs have left a return value in R3. */ + + PPC_BLR(); +} + +/* Assemble the body code between the prologue & epilogue. */ +static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, + struct codegen_context *ctx, + unsigned int *addrs) +{ + const struct sock_filter *filter = fp->insns; + int flen = fp->len; + u8 *func; + unsigned int true_cond; + int i; + + /* Start of epilogue code */ + unsigned int exit_addr = addrs[flen]; + + for (i = 0; i < flen; i++) { + unsigned int K = filter[i].k; + + /* + * addrs[] maps a BPF bytecode address into a real offset from + * the start of the body code. + */ + addrs[i] = ctx->idx * 4; + + switch (filter[i].code) { + /*** ALU ops ***/ + case BPF_S_ALU_ADD_X: /* A += X; */ + ctx->seen |= SEEN_XREG; + PPC_ADD(r_A, r_A, r_X); + break; + case BPF_S_ALU_ADD_K: /* A += K; */ + if (!K) + break; + PPC_ADDI(r_A, r_A, IMM_L(K)); + if (K >= 32768) + PPC_ADDIS(r_A, r_A, IMM_HA(K)); + break; + case BPF_S_ALU_SUB_X: /* A -= X; */ + ctx->seen |= SEEN_XREG; + PPC_SUB(r_A, r_A, r_X); + break; + case BPF_S_ALU_SUB_K: /* A -= K */ + if (!K) + break; + PPC_ADDI(r_A, r_A, IMM_L(-K)); + if (K >= 32768) + PPC_ADDIS(r_A, r_A, IMM_HA(-K)); + break; + case BPF_S_ALU_MUL_X: /* A *= X; */ + ctx->seen |= SEEN_XREG; + PPC_MUL(r_A, r_A, r_X); + break; + case BPF_S_ALU_MUL_K: /* A *= K */ + if (K < 32768) + PPC_MULI(r_A, r_A, K); + else { + PPC_LI32(r_scratch1, K); + PPC_MUL(r_A, r_A, r_scratch1); + } + break; + case BPF_S_ALU_DIV_X: /* A /= X; */ + ctx->seen |= SEEN_XREG; + PPC_CMPWI(r_X, 0); + if (ctx->pc_ret0 != -1) { + PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); + } else { + /* + * Exit, returning 0; first pass hits here + * (longer worst-case code size). + */ + PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); + PPC_LI(r_ret, 0); + PPC_JMP(exit_addr); + } + PPC_DIVWU(r_A, r_A, r_X); + break; + case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */ + PPC_LI32(r_scratch1, K); + /* Top 32 bits of 64bit result -> A */ + PPC_MULHWU(r_A, r_A, r_scratch1); + break; + case BPF_S_ALU_AND_X: + ctx->seen |= SEEN_XREG; + PPC_AND(r_A, r_A, r_X); + break; + case BPF_S_ALU_AND_K: + if (!IMM_H(K)) + PPC_ANDI(r_A, r_A, K); + else { + PPC_LI32(r_scratch1, K); + PPC_AND(r_A, r_A, r_scratch1); + } + break; + case BPF_S_ALU_OR_X: + ctx->seen |= SEEN_XREG; + PPC_OR(r_A, r_A, r_X); + break; + case BPF_S_ALU_OR_K: + if (IMM_L(K)) + PPC_ORI(r_A, r_A, IMM_L(K)); + if (K >= 65536) + PPC_ORIS(r_A, r_A, IMM_H(K)); + break; + case BPF_S_ALU_LSH_X: /* A <<= X; */ + ctx->seen |= SEEN_XREG; + PPC_SLW(r_A, r_A, r_X); + break; + case BPF_S_ALU_LSH_K: + if (K == 0) + break; + else + PPC_SLWI(r_A, r_A, K); + break; + case BPF_S_ALU_RSH_X: /* A >>= X; */ + ctx->seen |= SEEN_XREG; + PPC_SRW(r_A, r_A, r_X); + break; + case BPF_S_ALU_RSH_K: /* A >>= K; */ + if (K == 0) + break; + else + PPC_SRWI(r_A, r_A, K); + break; + case BPF_S_ALU_NEG: + PPC_NEG(r_A, r_A); + break; + case BPF_S_RET_K: + PPC_LI32(r_ret, K); + if (!K) { + if (ctx->pc_ret0 == -1) + ctx->pc_ret0 = i; + } + /* + * If this isn't the very last instruction, branch to + * the epilogue if we've stuff to clean up. Otherwise, + * if there's nothing to tidy, just return. If we /are/ + * the last instruction, we're about to fall through to + * the epilogue to return. + */ + if (i != flen - 1) { + /* + * Note: 'seen' is properly valid only on pass + * #2. Both parts of this conditional are the + * same instruction size though, meaning the + * first pass will still correctly determine the + * code size/addresses. + */ + if (ctx->seen) + PPC_JMP(exit_addr); + else + PPC_BLR(); + } + break; + case BPF_S_RET_A: + PPC_MR(r_ret, r_A); + if (i != flen - 1) { + if (ctx->seen) + PPC_JMP(exit_addr); + else + PPC_BLR(); + } + break; + case BPF_S_MISC_TAX: /* X = A */ + PPC_MR(r_X, r_A); + break; + case BPF_S_MISC_TXA: /* A = X */ + ctx->seen |= SEEN_XREG; + PPC_MR(r_A, r_X); + break; + + /*** Constant loads/M[] access ***/ + case BPF_S_LD_IMM: /* A = K */ + PPC_LI32(r_A, K); + break; + case BPF_S_LDX_IMM: /* X = K */ + PPC_LI32(r_X, K); + break; + case BPF_S_LD_MEM: /* A = mem[K] */ + PPC_MR(r_A, r_M + (K & 0xf)); + ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); + break; + case BPF_S_LDX_MEM: /* X = mem[K] */ + PPC_MR(r_X, r_M + (K & 0xf)); + ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); + break; + case BPF_S_ST: /* mem[K] = A */ + PPC_MR(r_M + (K & 0xf), r_A); + ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); + break; + case BPF_S_STX: /* mem[K] = X */ + PPC_MR(r_M + (K & 0xf), r_X); + ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); + break; + case BPF_S_LD_W_LEN: /* A = skb->len; */ + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); + break; + case BPF_S_LDX_W_LEN: /* X = skb->len; */ + PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); + break; + + /*** Ancillary info loads ***/ + + /* None of the BPF_S_ANC* codes appear to be passed by + * sk_chk_filter(). The interpreter and the x86 BPF + * compiler implement them so we do too -- they may be + * planted in future. + */ + case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, + protocol) != 2); + PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, + protocol)); + /* ntohs is a NOP with BE loads. */ + break; + case BPF_S_ANC_IFINDEX: + PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, + dev)); + PPC_CMPDI(r_scratch1, 0); + if (ctx->pc_ret0 != -1) { + PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); + } else { + /* Exit, returning 0; first pass hits here. */ + PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); + PPC_LI(r_ret, 0); + PPC_JMP(exit_addr); + } + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + ifindex) != 4); + PPC_LWZ_OFFS(r_A, r_scratch1, + offsetof(struct net_device, ifindex)); + break; + case BPF_S_ANC_MARK: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, + mark)); + break; + case BPF_S_ANC_RXHASH: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); + PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, + rxhash)); + break; + case BPF_S_ANC_QUEUE: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, + queue_mapping) != 2); + PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, + queue_mapping)); + break; + case BPF_S_ANC_CPU: +#ifdef CONFIG_SMP + /* + * PACA ptr is r13: + * raw_smp_processor_id() = local_paca->paca_index + */ + BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, + paca_index) != 2); + PPC_LHZ_OFFS(r_A, 13, + offsetof(struct paca_struct, paca_index)); +#else + PPC_LI(r_A, 0); +#endif + break; + + /*** Absolute loads from packet header/data ***/ + case BPF_S_LD_W_ABS: + func = sk_load_word; + goto common_load; + case BPF_S_LD_H_ABS: + func = sk_load_half; + goto common_load; + case BPF_S_LD_B_ABS: + func = sk_load_byte; + common_load: + /* + * Load from [K]. Reference with the (negative) + * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported. + */ + ctx->seen |= SEEN_DATAREF; + if ((int)K < 0) + return -ENOTSUPP; + PPC_LI64(r_scratch1, func); + PPC_MTLR(r_scratch1); + PPC_LI32(r_addr, K); + PPC_BLRL(); + /* + * Helper returns 'lt' condition on error, and an + * appropriate return value in r3 + */ + PPC_BCC(COND_LT, exit_addr); + break; + + /*** Indirect loads from packet header/data ***/ + case BPF_S_LD_W_IND: + func = sk_load_word; + goto common_load_ind; + case BPF_S_LD_H_IND: + func = sk_load_half; + goto common_load_ind; + case BPF_S_LD_B_IND: + func = sk_load_byte; + common_load_ind: + /* + * Load from [X + K]. Negative offsets are tested for + * in the helper functions, and result in a 'ret 0'. + */ + ctx->seen |= SEEN_DATAREF | SEEN_XREG; + PPC_LI64(r_scratch1, func); + PPC_MTLR(r_scratch1); + PPC_ADDI(r_addr, r_X, IMM_L(K)); + if (K >= 32768) + PPC_ADDIS(r_addr, r_addr, IMM_HA(K)); + PPC_BLRL(); + /* If error, cr0.LT set */ + PPC_BCC(COND_LT, exit_addr); + break; + + case BPF_S_LDX_B_MSH: + /* + * x86 version drops packet (RET 0) when K<0, whereas + * interpreter does allow K<0 (__load_pointer, special + * ancillary data). common_load returns ENOTSUPP if K<0, + * so we fall back to interpreter & filter works. + */ + func = sk_load_byte_msh; + goto common_load; + break; + + /*** Jump and branches ***/ + case BPF_S_JMP_JA: + if (K != 0) + PPC_JMP(addrs[i + 1 + K]); + break; + + case BPF_S_JMP_JGT_K: + case BPF_S_JMP_JGT_X: + true_cond = COND_GT; + goto cond_branch; + case BPF_S_JMP_JGE_K: + case BPF_S_JMP_JGE_X: + true_cond = COND_GE; + goto cond_branch; + case BPF_S_JMP_JEQ_K: + case BPF_S_JMP_JEQ_X: + true_cond = COND_EQ; + goto cond_branch; + case BPF_S_JMP_JSET_K: + case BPF_S_JMP_JSET_X: + true_cond = COND_NE; + /* Fall through */ + cond_branch: + /* same targets, can avoid doing the test :) */ + if (filter[i].jt == filter[i].jf) { + if (filter[i].jt > 0) + PPC_JMP(addrs[i + 1 + filter[i].jt]); + break; + } + + switch (filter[i].code) { + case BPF_S_JMP_JGT_X: + case BPF_S_JMP_JGE_X: + case BPF_S_JMP_JEQ_X: + ctx->seen |= SEEN_XREG; + PPC_CMPLW(r_A, r_X); + break; + case BPF_S_JMP_JSET_X: + ctx->seen |= SEEN_XREG; + PPC_AND_DOT(r_scratch1, r_A, r_X); + break; + case BPF_S_JMP_JEQ_K: + case BPF_S_JMP_JGT_K: + case BPF_S_JMP_JGE_K: + if (K < 32768) + PPC_CMPLWI(r_A, K); + else { + PPC_LI32(r_scratch1, K); + PPC_CMPLW(r_A, r_scratch1); + } + break; + case BPF_S_JMP_JSET_K: + if (K < 32768) + /* PPC_ANDI is /only/ dot-form */ + PPC_ANDI(r_scratch1, r_A, K); + else { + PPC_LI32(r_scratch1, K); + PPC_AND_DOT(r_scratch1, r_A, + r_scratch1); + } + break; + } + /* Sometimes branches are constructed "backward", with + * the false path being the branch and true path being + * a fallthrough to the next instruction. + */ + if (filter[i].jt == 0) + /* Swap the sense of the branch */ + PPC_BCC(true_cond ^ COND_CMP_TRUE, + addrs[i + 1 + filter[i].jf]); + else { + PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]); + if (filter[i].jf != 0) + PPC_JMP(addrs[i + 1 + filter[i].jf]); + } + break; + default: + /* The filter contains something cruel & unusual. + * We don't handle it, but also there shouldn't be + * anything missing from our list. + */ + if (printk_ratelimit()) + pr_err("BPF filter opcode %04x (@%d) unsupported\n", + filter[i].code, i); + return -ENOTSUPP; + } + + } + /* Set end-of-body-code address for exit. */ + addrs[i] = ctx->idx * 4; + + return 0; +} + +void bpf_jit_compile(struct sk_filter *fp) +{ + unsigned int proglen; + unsigned int alloclen; + u32 *image = NULL; + u32 *code_base; + unsigned int *addrs; + struct codegen_context cgctx; + int pass; + int flen = fp->len; + + if (!bpf_jit_enable) + return; + + addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL); + if (addrs == NULL) + return; + + /* + * There are multiple assembly passes as the generated code will change + * size as it settles down, figuring out the max branch offsets/exit + * paths required. + * + * The range of standard conditional branches is +/- 32Kbytes. Since + * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to + * finish with 8 bytes/instruction. Not feasible, so long jumps are + * used, distinct from short branches. + * + * Current: + * + * For now, both branch types assemble to 2 words (short branches padded + * with a NOP); this is less efficient, but assembly will always complete + * after exactly 3 passes: + * + * First pass: No code buffer; Program is "faux-generated" -- no code + * emitted but maximum size of output determined (and addrs[] filled + * in). Also, we note whether we use M[], whether we use skb data, etc. + * All generation choices assumed to be 'worst-case', e.g. branches all + * far (2 instructions), return path code reduction not available, etc. + * + * Second pass: Code buffer allocated with size determined previously. + * Prologue generated to support features we have seen used. Exit paths + * determined and addrs[] is filled in again, as code may be slightly + * smaller as a result. + * + * Third pass: Code generated 'for real', and branch destinations + * determined from now-accurate addrs[] map. + * + * Ideal: + * + * If we optimise this, near branches will be shorter. On the + * first assembly pass, we should err on the side of caution and + * generate the biggest code. On subsequent passes, branches will be + * generated short or long and code size will reduce. With smaller + * code, more branches may fall into the short category, and code will + * reduce more. + * + * Finally, if we see one pass generate code the same size as the + * previous pass we have converged and should now generate code for + * real. Allocating at the end will also save the memory that would + * otherwise be wasted by the (small) current code shrinkage. + * Preferably, we should do a small number of passes (e.g. 5) and if we + * haven't converged by then, get impatient and force code to generate + * as-is, even if the odd branch would be left long. The chances of a + * long jump are tiny with all but the most enormous of BPF filter + * inputs, so we should usually converge on the third pass. + */ + + cgctx.idx = 0; + cgctx.seen = 0; + cgctx.pc_ret0 = -1; + /* Scouting faux-generate pass 0 */ + if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) + /* We hit something illegal or unsupported. */ + goto out; + + /* + * Pretend to build prologue, given the features we've seen. This will + * update ctgtx.idx as it pretends to output instructions, then we can + * calculate total size from idx. + */ + bpf_jit_build_prologue(fp, 0, &cgctx); + bpf_jit_build_epilogue(0, &cgctx); + + proglen = cgctx.idx * 4; + alloclen = proglen + FUNCTION_DESCR_SIZE; + image = module_alloc(max_t(unsigned int, alloclen, + sizeof(struct work_struct))); + if (!image) + goto out; + + code_base = image + (FUNCTION_DESCR_SIZE/4); + + /* Code generation passes 1-2 */ + for (pass = 1; pass < 3; pass++) { + /* Now build the prologue, body code & epilogue for real. */ + cgctx.idx = 0; + bpf_jit_build_prologue(fp, code_base, &cgctx); + bpf_jit_build_body(fp, code_base, &cgctx, addrs); + bpf_jit_build_epilogue(code_base, &cgctx); + + if (bpf_jit_enable > 1) + pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass, + proglen - (cgctx.idx * 4), cgctx.seen); + } + + if (bpf_jit_enable > 1) + pr_info("flen=%d proglen=%u pass=%d image=%p\n", + flen, proglen, pass, image); + + if (image) { + if (bpf_jit_enable > 1) + print_hex_dump(KERN_ERR, "JIT code: ", + DUMP_PREFIX_ADDRESS, + 16, 1, code_base, + proglen, false); + + bpf_flush_icache(code_base, code_base + (proglen/4)); + /* Function descriptor nastiness: Address + TOC */ + ((u64 *)image)[0] = (u64)code_base; + ((u64 *)image)[1] = local_paca->kernel_toc; + fp->bpf_func = (void *)image; + } +out: + kfree(addrs); + return; +} + +static void jit_free_defer(struct work_struct *arg) +{ + module_free(NULL, arg); +} + +/* run from softirq, we must use a work_struct to call + * module_free() from process context + */ +void bpf_jit_free(struct sk_filter *fp) +{ + if (fp->bpf_func != sk_run_filter) { + struct work_struct *work = (struct work_struct *)fp->bpf_func; + + INIT_WORK(work, jit_free_defer); + schedule_work(work); + } +} diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig index 47ea1be1481..90f4496017e 100644 --- a/arch/powerpc/platforms/52xx/Kconfig +++ b/arch/powerpc/platforms/52xx/Kconfig @@ -55,14 +55,6 @@ config PPC_MPC5200_BUGFIX It is safe to say 'Y' here -config PPC_MPC5200_GPIO - bool "MPC5200 GPIO support" - depends on PPC_MPC52xx - select ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO - help - Enable gpiolib support for mpc5200 based boards - config PPC_MPC5200_LPBFIFO tristate "MPC5200 LocalPlus bus FIFO driver" depends on PPC_MPC52xx diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile index 2bc8cd0c5cf..4e62486791e 100644 --- a/arch/powerpc/platforms/52xx/Makefile +++ b/arch/powerpc/platforms/52xx/Makefile @@ -14,5 +14,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y) obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o endif -obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c deleted file mode 100644 index 1757d1db4b5..00000000000 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c +++ /dev/null @@ -1,380 +0,0 @@ -/* - * MPC52xx gpio driver - * - * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/of.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/of_gpio.h> -#include <linux/io.h> -#include <linux/of_platform.h> - -#include <asm/gpio.h> -#include <asm/mpc52xx.h> -#include <sysdev/fsl_soc.h> - -static DEFINE_SPINLOCK(gpio_lock); - -struct mpc52xx_gpiochip { - struct of_mm_gpio_chip mmchip; - unsigned int shadow_dvo; - unsigned int shadow_gpioe; - unsigned int shadow_ddr; -}; - -/* - * GPIO LIB API implementation for wakeup GPIOs. - * - * There's a maximum of 8 wakeup GPIOs. Which of these are available - * for use depends on your board setup. - * - * 0 -> GPIO_WKUP_7 - * 1 -> GPIO_WKUP_6 - * 2 -> PSC6_1 - * 3 -> PSC6_0 - * 4 -> ETH_17 - * 5 -> PSC3_9 - * 6 -> PSC2_4 - * 7 -> PSC1_4 - * - */ -static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; - unsigned int ret; - - ret = (in_8(®s->wkup_ival) >> (7 - gpio)) & 1; - - pr_debug("%s: gpio: %d ret: %d\n", __func__, gpio, ret); - - return ret; -} - -static inline void -__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; - - if (val) - chip->shadow_dvo |= 1 << (7 - gpio); - else - chip->shadow_dvo &= ~(1 << (7 - gpio)); - - out_8(®s->wkup_dvo, chip->shadow_dvo); -} - -static void -mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) -{ - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - __mpc52xx_wkup_gpio_set(gc, gpio, val); - - spin_unlock_irqrestore(&gpio_lock, flags); - - pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); -} - -static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - /* set the direction */ - chip->shadow_ddr &= ~(1 << (7 - gpio)); - out_8(®s->wkup_ddr, chip->shadow_ddr); - - /* and enable the pin */ - chip->shadow_gpioe |= 1 << (7 - gpio); - out_8(®s->wkup_gpioe, chip->shadow_gpioe); - - spin_unlock_irqrestore(&gpio_lock, flags); - - return 0; -} - -static int -mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - __mpc52xx_wkup_gpio_set(gc, gpio, val); - - /* Then set direction */ - chip->shadow_ddr |= 1 << (7 - gpio); - out_8(®s->wkup_ddr, chip->shadow_ddr); - - /* Finally enable the pin */ - chip->shadow_gpioe |= 1 << (7 - gpio); - out_8(®s->wkup_gpioe, chip->shadow_gpioe); - - spin_unlock_irqrestore(&gpio_lock, flags); - - pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); - - return 0; -} - -static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev) -{ - struct mpc52xx_gpiochip *chip; - struct mpc52xx_gpio_wkup __iomem *regs; - struct gpio_chip *gc; - int ret; - - chip = kzalloc(sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - - gc = &chip->mmchip.gc; - - gc->ngpio = 8; - gc->direction_input = mpc52xx_wkup_gpio_dir_in; - gc->direction_output = mpc52xx_wkup_gpio_dir_out; - gc->get = mpc52xx_wkup_gpio_get; - gc->set = mpc52xx_wkup_gpio_set; - - ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip); - if (ret) - return ret; - - regs = chip->mmchip.regs; - chip->shadow_gpioe = in_8(®s->wkup_gpioe); - chip->shadow_ddr = in_8(®s->wkup_ddr); - chip->shadow_dvo = in_8(®s->wkup_dvo); - - return 0; -} - -static int mpc52xx_gpiochip_remove(struct platform_device *ofdev) -{ - return -EBUSY; -} - -static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = { - { - .compatible = "fsl,mpc5200-gpio-wkup", - }, - {} -}; - -static struct platform_driver mpc52xx_wkup_gpiochip_driver = { - .driver = { - .name = "gpio_wkup", - .owner = THIS_MODULE, - .of_match_table = mpc52xx_wkup_gpiochip_match, - }, - .probe = mpc52xx_wkup_gpiochip_probe, - .remove = mpc52xx_gpiochip_remove, -}; - -/* - * GPIO LIB API implementation for simple GPIOs - * - * There's a maximum of 32 simple GPIOs. Which of these are available - * for use depends on your board setup. - * The numbering reflects the bit numbering in the port registers: - * - * 0..1 > reserved - * 2..3 > IRDA - * 4..7 > ETHR - * 8..11 > reserved - * 12..15 > USB - * 16..17 > reserved - * 18..23 > PSC3 - * 24..27 > PSC2 - * 28..31 > PSC1 - */ -static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpio __iomem *regs = mm_gc->regs; - unsigned int ret; - - ret = (in_be32(®s->simple_ival) >> (31 - gpio)) & 1; - - return ret; -} - -static inline void -__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio __iomem *regs = mm_gc->regs; - - if (val) - chip->shadow_dvo |= 1 << (31 - gpio); - else - chip->shadow_dvo &= ~(1 << (31 - gpio)); - out_be32(®s->simple_dvo, chip->shadow_dvo); -} - -static void -mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) -{ - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - __mpc52xx_simple_gpio_set(gc, gpio, val); - - spin_unlock_irqrestore(&gpio_lock, flags); - - pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); -} - -static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio __iomem *regs = mm_gc->regs; - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - /* set the direction */ - chip->shadow_ddr &= ~(1 << (31 - gpio)); - out_be32(®s->simple_ddr, chip->shadow_ddr); - - /* and enable the pin */ - chip->shadow_gpioe |= 1 << (31 - gpio); - out_be32(®s->simple_gpioe, chip->shadow_gpioe); - - spin_unlock_irqrestore(&gpio_lock, flags); - - return 0; -} - -static int -mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) -{ - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct mpc52xx_gpiochip *chip = container_of(mm_gc, - struct mpc52xx_gpiochip, mmchip); - struct mpc52xx_gpio __iomem *regs = mm_gc->regs; - unsigned long flags; - - spin_lock_irqsave(&gpio_lock, flags); - - /* First set initial value */ - __mpc52xx_simple_gpio_set(gc, gpio, val); - - /* Then set direction */ - chip->shadow_ddr |= 1 << (31 - gpio); - out_be32(®s->simple_ddr, chip->shadow_ddr); - - /* Finally enable the pin */ - chip->shadow_gpioe |= 1 << (31 - gpio); - out_be32(®s->simple_gpioe, chip->shadow_gpioe); - - spin_unlock_irqrestore(&gpio_lock, flags); - - pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); - - return 0; -} - -static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev) -{ - struct mpc52xx_gpiochip *chip; - struct gpio_chip *gc; - struct mpc52xx_gpio __iomem *regs; - int ret; - - chip = kzalloc(sizeof(*chip), GFP_KERNEL); - if (!chip) - return -ENOMEM; - - gc = &chip->mmchip.gc; - - gc->ngpio = 32; - gc->direction_input = mpc52xx_simple_gpio_dir_in; - gc->direction_output = mpc52xx_simple_gpio_dir_out; - gc->get = mpc52xx_simple_gpio_get; - gc->set = mpc52xx_simple_gpio_set; - - ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip); - if (ret) - return ret; - - regs = chip->mmchip.regs; - chip->shadow_gpioe = in_be32(®s->simple_gpioe); - chip->shadow_ddr = in_be32(®s->simple_ddr); - chip->shadow_dvo = in_be32(®s->simple_dvo); - - return 0; -} - -static const struct of_device_id mpc52xx_simple_gpiochip_match[] = { - { - .compatible = "fsl,mpc5200-gpio", - }, - {} -}; - -static struct platform_driver mpc52xx_simple_gpiochip_driver = { - .driver = { - .name = "gpio", - .owner = THIS_MODULE, - .of_match_table = mpc52xx_simple_gpiochip_match, - }, - .probe = mpc52xx_simple_gpiochip_probe, - .remove = mpc52xx_gpiochip_remove, -}; - -static int __init mpc52xx_gpio_init(void) -{ - if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver)) - printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); - - if (platform_driver_register(&mpc52xx_simple_gpiochip_driver)) - printk(KERN_ERR "Unable to register simple GPIO driver\n"); - - return 0; -} - - -/* Make sure we get initialised before anyone else tries to use us */ -subsys_initcall(mpc52xx_gpio_init); - -/* No exit call at the moment as we cannot unregister of gpio chips */ - -MODULE_DESCRIPTION("Freescale MPC52xx gpio driver"); -MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); -MODULE_LICENSE("GPL v2"); - diff --git a/arch/powerpc/platforms/amigaone/Kconfig b/arch/powerpc/platforms/amigaone/Kconfig index 02247671771..128de25cc28 100644 --- a/arch/powerpc/platforms/amigaone/Kconfig +++ b/arch/powerpc/platforms/amigaone/Kconfig @@ -8,7 +8,7 @@ config AMIGAONE select NOT_COHERENT_CACHE select CHECK_CACHE_COHERENCY select DEFAULT_UIMAGE - select PCSPKR_PLATFORM + select HAVE_PCSPKR_PLATFORM help Select AmigaOne for the following machines: - AmigaOne SE/Teron CX (G3 only) diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig index bc0b0efdc5f..d3cdab582c5 100644 --- a/arch/powerpc/platforms/chrp/Kconfig +++ b/arch/powerpc/platforms/chrp/Kconfig @@ -1,6 +1,7 @@ config PPC_CHRP bool "Common Hardware Reference Platform (CHRP) based machines" depends on 6xx + select HAVE_PCSPKR_PLATFORM select MPIC select PPC_I8259 select PPC_INDIRECT_PCI diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index f33e08d573c..abe8d7e2ebe 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/irq.h> +#include <linux/of_pci.h> #include <asm/sections.h> #include <asm/io.h> @@ -235,7 +236,7 @@ static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) if (offset >= 0x100) return PCIBIOS_BAD_REGISTER_NUMBER; - np = pci_busdev_to_OF_node(bus, devfn); + np = of_pci_find_child_device(bus->dev.of_node, devfn); if (np == NULL) return PCIBIOS_DEVICE_NOT_FOUND; diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig index bf8330ef2e7..f0536c7cda9 100644 --- a/arch/powerpc/platforms/prep/Kconfig +++ b/arch/powerpc/platforms/prep/Kconfig @@ -1,6 +1,7 @@ config PPC_PREP bool "PowerPC Reference Platform (PReP) based machines" depends on 6xx && BROKEN + select HAVE_PCSPKR_PLATFORM select MPIC select PPC_I8259 select PPC_INDIRECT_PCI diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 71af4c5d6c0..05cf4769b88 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -1,6 +1,7 @@ config PPC_PSERIES depends on PPC64 && PPC_BOOK3S bool "IBM pSeries & new (POWER5-based) iSeries" + select HAVE_PCSPKR_PLATFORM select MPIC select PCI_MSI select PPC_XICS diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 33867ec4a23..9d6a8effeda 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -12,6 +12,8 @@ #include <linux/of.h> #include <linux/memblock.h> #include <linux/vmalloc.h> +#include <linux/memory.h> + #include <asm/firmware.h> #include <asm/machdep.h> #include <asm/pSeries_reconfig.h> @@ -20,24 +22,25 @@ static unsigned long get_memblock_size(void) { struct device_node *np; - unsigned int memblock_size = 0; + unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; + struct resource r; np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (np) { - const unsigned long *size; + const __be64 *size; size = of_get_property(np, "ibm,lmb-size", NULL); - memblock_size = size ? *size : 0; - + if (size) + memblock_size = be64_to_cpup(size); of_node_put(np); - } else { + } else if (machine_is(pseries)) { + /* This fallback really only applies to pseries */ unsigned int memzero_size = 0; - const unsigned int *regs; np = of_find_node_by_path("/memory@0"); if (np) { - regs = of_get_property(np, "reg", NULL); - memzero_size = regs ? regs[3] : 0; + if (!of_address_to_resource(np, 0, &r)) + memzero_size = resource_size(&r); of_node_put(np); } @@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void) sprintf(buf, "/memory@%x", memzero_size); np = of_find_node_by_path(buf); if (np) { - regs = of_get_property(np, "reg", NULL); - memblock_size = regs ? regs[3] : 0; + if (!of_address_to_resource(np, 0, &r)) + memblock_size = resource_size(&r); of_node_put(np); } } } - return memblock_size; } +/* WARNING: This is going to override the generic definition whenever + * pseries is built-in regardless of what platform is active at boot + * time. This is fine for now as this is the only "option" and it + * should work everywhere. If not, we'll have to turn this into a + * ppc_md. callback + */ unsigned long memory_block_size_bytes(void) { return get_memblock_size(); diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index 0608b1657da..d917573cf1a 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c @@ -196,9 +196,6 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl, out_be32(&lbc->lteccr, LTECCR_CLEAR); out_be32(&lbc->ltedr, LTEDR_ENABLE); - /* Enable interrupts for any detected events */ - out_be32(&lbc->lteir, LTEIR_ENABLE); - /* Set the monitor timeout value to the maximum for erratum A001 */ if (of_device_is_compatible(node, "fsl,elbc")) clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS); @@ -322,6 +319,9 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev) goto err; } + /* Enable interrupts for any detected events */ + out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE); + return 0; err: diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 5b206a2fe17..b3fd081d56f 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -283,23 +283,24 @@ static void __iomem *rio_regs_win; #ifdef CONFIG_E500 int fsl_rio_mcheck_exception(struct pt_regs *regs) { - const struct exception_table_entry *entry = NULL; - unsigned long reason = mfspr(SPRN_MCSR); - - if (reason & MCSR_BUS_RBERR) { - reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); - if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { - /* Check if we are prepared to handle this fault */ - entry = search_exception_tables(regs->nip); - if (entry) { - pr_debug("RIO: %s - MC Exception handled\n", - __func__); - out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), - 0); - regs->msr |= MSR_RI; - regs->nip = entry->fixup; - return 1; - } + const struct exception_table_entry *entry; + unsigned long reason; + + if (!rio_regs_win) + return 0; + + reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); + if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { + /* Check if we are prepared to handle this fault */ + entry = search_exception_tables(regs->nip); + if (entry) { + pr_debug("RIO: %s - MC Exception handled\n", + __func__); + out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), + 0); + regs->msr |= MSR_RI; + regs->nip = entry->fixup; + return 1; } } diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 3a8de5bb628..58d7a534f87 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -29,6 +29,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/syscore_ops.h> +#include <linux/ratelimit.h> #include <asm/ptrace.h> #include <asm/signal.h> @@ -1648,9 +1649,8 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) return NO_IRQ; } if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { - if (printk_ratelimit()) - printk(KERN_WARNING "%s: Got protected source %d !\n", - mpic->name, (int)src); + printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n", + mpic->name, (int)src); mpic_eoi(mpic); return NO_IRQ; } @@ -1688,9 +1688,8 @@ unsigned int mpic_get_coreint_irq(void) return NO_IRQ; } if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { - if (printk_ratelimit()) - printk(KERN_WARNING "%s: Got protected source %d !\n", - mpic->name, (int)src); + printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n", + mpic->name, (int)src); return NO_IRQ; } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9fab2aa9c2c..c03fef7a9c2 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -89,6 +89,7 @@ config S390 select HAVE_GET_USER_PAGES_FAST select HAVE_ARCH_MUTEX_CPU_RELAX select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 + select HAVE_RCU_TABLE_FREE if SMP select ARCH_INLINE_SPIN_TRYLOCK select ARCH_INLINE_SPIN_TRYLOCK_BH select ARCH_INLINE_SPIN_LOCK @@ -578,6 +579,7 @@ config S390_GUEST def_bool y prompt "s390 guest support for KVM (EXPERIMENTAL)" depends on 64BIT && EXPERIMENTAL + select VIRTUALIZATION select VIRTIO select VIRTIO_RING select VIRTIO_CONSOLE diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index f6314af3b35..38e71ebcd3c 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -17,15 +17,15 @@ #include <linux/gfp.h> #include <linux/mm.h> -#define check_pgt_cache() do {} while (0) - unsigned long *crst_table_alloc(struct mm_struct *); void crst_table_free(struct mm_struct *, unsigned long *); -void crst_table_free_rcu(struct mm_struct *, unsigned long *); unsigned long *page_table_alloc(struct mm_struct *); void page_table_free(struct mm_struct *, unsigned long *); -void page_table_free_rcu(struct mm_struct *, unsigned long *); +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +void page_table_free_rcu(struct mmu_gather *, unsigned long *); +void __tlb_remove_table(void *_table); +#endif static inline void clear_table(unsigned long *s, unsigned long val, size_t n) { diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index e4efacfe1b6..801fbe1d837 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -293,19 +293,6 @@ extern unsigned long VMALLOC_START; * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. */ -/* Page status table bits for virtualization */ -#define RCP_ACC_BITS 0xf000000000000000UL -#define RCP_FP_BIT 0x0800000000000000UL -#define RCP_PCL_BIT 0x0080000000000000UL -#define RCP_HR_BIT 0x0040000000000000UL -#define RCP_HC_BIT 0x0020000000000000UL -#define RCP_GR_BIT 0x0004000000000000UL -#define RCP_GC_BIT 0x0002000000000000UL - -/* User dirty / referenced bit for KVM's migration feature */ -#define KVM_UR_BIT 0x0000800000000000UL -#define KVM_UC_BIT 0x0000400000000000UL - #ifndef __s390x__ /* Bits in the segment table address-space-control-element */ @@ -325,6 +312,19 @@ extern unsigned long VMALLOC_START; #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) +/* Page status table bits for virtualization */ +#define RCP_ACC_BITS 0xf0000000UL +#define RCP_FP_BIT 0x08000000UL +#define RCP_PCL_BIT 0x00800000UL +#define RCP_HR_BIT 0x00400000UL +#define RCP_HC_BIT 0x00200000UL +#define RCP_GR_BIT 0x00040000UL +#define RCP_GC_BIT 0x00020000UL + +/* User dirty / referenced bit for KVM's migration feature */ +#define KVM_UR_BIT 0x00008000UL +#define KVM_UC_BIT 0x00004000UL + #else /* __s390x__ */ /* Bits in the segment/region table address-space-control-element */ @@ -367,6 +367,19 @@ extern unsigned long VMALLOC_START; #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ +/* Page status table bits for virtualization */ +#define RCP_ACC_BITS 0xf000000000000000UL +#define RCP_FP_BIT 0x0800000000000000UL +#define RCP_PCL_BIT 0x0080000000000000UL +#define RCP_HR_BIT 0x0040000000000000UL +#define RCP_HC_BIT 0x0020000000000000UL +#define RCP_GR_BIT 0x0004000000000000UL +#define RCP_GC_BIT 0x0002000000000000UL + +/* User dirty / referenced bit for KVM's migration feature */ +#define KVM_UR_BIT 0x0000800000000000UL +#define KVM_UC_BIT 0x0000400000000000UL + #endif /* __s390x__ */ /* diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 350e7ee5952..15c97625df8 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -139,110 +139,47 @@ struct slib { struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; } __attribute__ ((packed, aligned(2048))); -/** - * struct sbal_flags - storage block address list flags - * @last: last entry - * @cont: contiguous storage - * @frag: fragmentation - */ -struct sbal_flags { - u8 : 1; - u8 last : 1; - u8 cont : 1; - u8 : 1; - u8 frag : 2; - u8 : 2; -} __attribute__ ((packed)); - -#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL -#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL -#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL -#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL -#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL +#define SBAL_EFLAGS_LAST_ENTRY 0x40 +#define SBAL_EFLAGS_CONTIGUOUS 0x20 +#define SBAL_EFLAGS_FIRST_FRAG 0x04 +#define SBAL_EFLAGS_MIDDLE_FRAG 0x08 +#define SBAL_EFLAGS_LAST_FRAG 0x0c +#define SBAL_EFLAGS_MASK 0x6f -#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL +#define SBAL_SFLAGS0_PCI_REQ 0x40 +#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20 /* Awesome OpenFCP extensions */ -#define SBAL_FLAGS0_TYPE_STATUS 0x00UL -#define SBAL_FLAGS0_TYPE_WRITE 0x08UL -#define SBAL_FLAGS0_TYPE_READ 0x10UL -#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL -#define SBAL_FLAGS0_MORE_SBALS 0x04UL -#define SBAL_FLAGS0_COMMAND 0x02UL -#define SBAL_FLAGS0_LAST_SBAL 0x00UL -#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND -#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS -#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND -#define SBAL_FLAGS0_PCI 0x40 - -/** - * struct sbal_sbalf_0 - sbal flags for sbale 0 - * @pci: PCI indicator - * @cont: data continuation - * @sbtype: storage-block type (FCP) - */ -struct sbal_sbalf_0 { - u8 : 1; - u8 pci : 1; - u8 cont : 1; - u8 sbtype : 2; - u8 : 3; -} __attribute__ ((packed)); - -/** - * struct sbal_sbalf_1 - sbal flags for sbale 1 - * @key: storage key - */ -struct sbal_sbalf_1 { - u8 : 4; - u8 key : 4; -} __attribute__ ((packed)); - -/** - * struct sbal_sbalf_14 - sbal flags for sbale 14 - * @erridx: error index - */ -struct sbal_sbalf_14 { - u8 : 4; - u8 erridx : 4; -} __attribute__ ((packed)); - -/** - * struct sbal_sbalf_15 - sbal flags for sbale 15 - * @reason: reason for error state - */ -struct sbal_sbalf_15 { - u8 reason; -} __attribute__ ((packed)); - -/** - * union sbal_sbalf - storage block address list flags - * @i0: sbalf0 - * @i1: sbalf1 - * @i14: sbalf14 - * @i15: sblaf15 - * @value: raw value - */ -union sbal_sbalf { - struct sbal_sbalf_0 i0; - struct sbal_sbalf_1 i1; - struct sbal_sbalf_14 i14; - struct sbal_sbalf_15 i15; - u8 value; -}; +#define SBAL_SFLAGS0_TYPE_STATUS 0x00 +#define SBAL_SFLAGS0_TYPE_WRITE 0x08 +#define SBAL_SFLAGS0_TYPE_READ 0x10 +#define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18 +#define SBAL_SFLAGS0_MORE_SBALS 0x04 +#define SBAL_SFLAGS0_COMMAND 0x02 +#define SBAL_SFLAGS0_LAST_SBAL 0x00 +#define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND +#define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS +#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND) /** * struct qdio_buffer_element - SBAL entry - * @flags: flags + * @eflags: SBAL entry flags + * @scount: SBAL count + * @sflags: whole SBAL flags * @length: length * @addr: address */ struct qdio_buffer_element { - u32 flags; + u8 eflags; + /* private: */ + u8 res1; + /* public: */ + u8 scount; + u8 sflags; u32 length; #ifdef CONFIG_32BIT /* private: */ - void *reserved; + void *res2; /* public: */ #endif void *addr; diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 77eee5477a5..c687a2c8346 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -26,67 +26,60 @@ #include <linux/swap.h> #include <asm/processor.h> #include <asm/pgalloc.h> -#include <asm/smp.h> #include <asm/tlbflush.h> struct mmu_gather { struct mm_struct *mm; +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + struct mmu_table_batch *batch; +#endif unsigned int fullmm; - unsigned int nr_ptes; - unsigned int nr_pxds; - unsigned int max; - void **array; - void *local[8]; + unsigned int need_flush; }; -static inline void __tlb_alloc_page(struct mmu_gather *tlb) -{ - unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +struct mmu_table_batch { + struct rcu_head rcu; + unsigned int nr; + void *tables[0]; +}; - if (addr) { - tlb->array = (void *) addr; - tlb->max = PAGE_SIZE / sizeof(void *); - } -} +#define MAX_TABLE_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) + +extern void tlb_table_flush(struct mmu_gather *tlb); +extern void tlb_remove_table(struct mmu_gather *tlb, void *table); +#endif static inline void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) { tlb->mm = mm; - tlb->max = ARRAY_SIZE(tlb->local); - tlb->array = tlb->local; tlb->fullmm = full_mm_flush; + tlb->need_flush = 0; +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb->batch = NULL; +#endif if (tlb->fullmm) __tlb_flush_mm(mm); - else - __tlb_alloc_page(tlb); - tlb->nr_ptes = 0; - tlb->nr_pxds = tlb->max; } static inline void tlb_flush_mmu(struct mmu_gather *tlb) { - if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max)) - __tlb_flush_mm(tlb->mm); - while (tlb->nr_ptes > 0) - page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); - while (tlb->nr_pxds < tlb->max) - crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); + if (!tlb->need_flush) + return; + tlb->need_flush = 0; + __tlb_flush_mm(tlb->mm); +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb_table_flush(tlb); +#endif } static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { tlb_flush_mmu(tlb); - - rcu_table_freelist_finish(); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - - if (tlb->array != tlb->local) - free_pages((unsigned long) tlb->array, 0); } /* @@ -112,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long address) { - if (!tlb->fullmm) { - tlb->array[tlb->nr_ptes++] = pte; - if (tlb->nr_ptes >= tlb->nr_pxds) - tlb_flush_mmu(tlb); - } else - page_table_free(tlb->mm, (unsigned long *) pte); +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + if (!tlb->fullmm) + return page_table_free_rcu(tlb, (unsigned long *) pte); +#endif + page_table_free(tlb->mm, (unsigned long *) pte); } /* @@ -133,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, #ifdef __s390x__ if (tlb->mm->context.asce_limit <= (1UL << 31)) return; - if (!tlb->fullmm) { - tlb->array[--tlb->nr_pxds] = pmd; - if (tlb->nr_ptes >= tlb->nr_pxds) - tlb_flush_mmu(tlb); - } else - crst_table_free(tlb->mm, (unsigned long *) pmd); +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + if (!tlb->fullmm) + return tlb_remove_table(tlb, pmd); +#endif + crst_table_free(tlb->mm, (unsigned long *) pmd); #endif } @@ -155,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, #ifdef __s390x__ if (tlb->mm->context.asce_limit <= (1UL << 42)) return; - if (!tlb->fullmm) { - tlb->array[--tlb->nr_pxds] = pud; - if (tlb->nr_ptes >= tlb->nr_pxds) - tlb_flush_mmu(tlb); - } else - crst_table_free(tlb->mm, (unsigned long *) pud); +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + if (!tlb->fullmm) + return tlb_remove_table(tlb, pud); +#endif + crst_table_free(tlb->mm, (unsigned long *) pud); #endif } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 52420d2785b..1d55c95f617 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -262,7 +262,7 @@ void smp_ctl_set_bit(int cr, int bit) memset(&parms.orvals, 0, sizeof(parms.orvals)); memset(&parms.andvals, 0xff, sizeof(parms.andvals)); - parms.orvals[cr] = 1 << bit; + parms.orvals[cr] = 1UL << bit; on_each_cpu(smp_ctl_bit_callback, &parms, 1); } EXPORT_SYMBOL(smp_ctl_set_bit); @@ -276,7 +276,7 @@ void smp_ctl_clear_bit(int cr, int bit) memset(&parms.orvals, 0, sizeof(parms.orvals)); memset(&parms.andvals, 0xff, sizeof(parms.andvals)); - parms.andvals[cr] = ~(1L << bit); + parms.andvals[cr] = ~(1UL << bit); on_each_cpu(smp_ctl_bit_callback, &parms, 1); } EXPORT_SYMBOL(smp_ctl_clear_bit); diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a65d2e82f61..a63d34c3611 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -331,7 +331,7 @@ void __kprobes do_per_trap(struct pt_regs *regs) { if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return; - if (tracehook_consider_fatal_signal(current, SIGTRAP)) + if (current->ptrace) force_sig(SIGTRAP, current); } @@ -425,7 +425,7 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { - if (tracehook_consider_fatal_signal(current, SIGTRAP)) + if (current->ptrace) force_sig(SIGTRAP, current); else signal = SIGILL; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 30ca85cce31..67345ae7ce8 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -731,6 +731,7 @@ static int __init kvm_s390_init(void) } memcpy(facilities, S390_lowcore.stfle_fac_list, 16); facilities[0] &= 0xff00fff3f47c0000ULL; + facilities[1] &= 0x201c000000000000ULL; return 0; } diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S index ab0e041ac54..5faa1b1b23f 100644 --- a/arch/s390/kvm/sie64a.S +++ b/arch/s390/kvm/sie64a.S @@ -93,4 +93,6 @@ sie_err: .section __ex_table,"a" .quad sie_inst,sie_err + .quad sie_exit,sie_err + .quad sie_reenter,sie_err .previous diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index fe103e891e7..095f782a551 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access, goto out; address = trans_exc_code & __FAIL_ADDR_MASK; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); flags = FAULT_FLAG_ALLOW_RETRY; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; @@ -345,11 +345,11 @@ retry: if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b09763fe5da..37a23c22370 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -24,94 +24,12 @@ #include <asm/tlbflush.h> #include <asm/mmu_context.h> -struct rcu_table_freelist { - struct rcu_head rcu; - struct mm_struct *mm; - unsigned int pgt_index; - unsigned int crst_index; - unsigned long *table[0]; -}; - -#define RCU_FREELIST_SIZE \ - ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \ - / sizeof(unsigned long)) - -static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist); - -static void __page_table_free(struct mm_struct *mm, unsigned long *table); - -static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm) -{ - struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist); - struct rcu_table_freelist *batch = *batchp; - - if (batch) - return batch; - batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC); - if (batch) { - batch->mm = mm; - batch->pgt_index = 0; - batch->crst_index = RCU_FREELIST_SIZE; - *batchp = batch; - } - return batch; -} - -static void rcu_table_freelist_callback(struct rcu_head *head) -{ - struct rcu_table_freelist *batch = - container_of(head, struct rcu_table_freelist, rcu); - - while (batch->pgt_index > 0) - __page_table_free(batch->mm, batch->table[--batch->pgt_index]); - while (batch->crst_index < RCU_FREELIST_SIZE) - crst_table_free(batch->mm, batch->table[batch->crst_index++]); - free_page((unsigned long) batch); -} - -void rcu_table_freelist_finish(void) -{ - struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist); - struct rcu_table_freelist *batch = *batchp; - - if (!batch) - goto out; - call_rcu(&batch->rcu, rcu_table_freelist_callback); - *batchp = NULL; -out: - put_cpu_var(rcu_table_freelist); -} - -static void smp_sync(void *arg) -{ -} - #ifndef CONFIG_64BIT #define ALLOC_ORDER 1 -#define TABLES_PER_PAGE 4 -#define FRAG_MASK 15UL -#define SECOND_HALVES 10UL - -void clear_table_pgstes(unsigned long *table) -{ - clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); - memset(table + 256, 0, PAGE_SIZE/4); - clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4); - memset(table + 768, 0, PAGE_SIZE/4); -} - +#define FRAG_MASK 0x0f #else #define ALLOC_ORDER 2 -#define TABLES_PER_PAGE 2 -#define FRAG_MASK 3UL -#define SECOND_HALVES 2UL - -void clear_table_pgstes(unsigned long *table) -{ - clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); - memset(table + 256, 0, PAGE_SIZE/2); -} - +#define FRAG_MASK 0x03 #endif unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; @@ -140,29 +58,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) free_pages((unsigned long) table, ALLOC_ORDER); } -void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table) -{ - struct rcu_table_freelist *batch; - - preempt_disable(); - if (atomic_read(&mm->mm_users) < 2 && - cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { - crst_table_free(mm, table); - goto out; - } - batch = rcu_table_freelist_get(mm); - if (!batch) { - smp_call_function(smp_sync, NULL, 1); - crst_table_free(mm, table); - goto out; - } - batch->table[--batch->crst_index] = table; - if (batch->pgt_index >= batch->crst_index) - rcu_table_freelist_finish(); -out: - preempt_enable(); -} - #ifdef CONFIG_64BIT int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) { @@ -238,124 +133,175 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) } #endif +static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) +{ + unsigned int old, new; + + do { + old = atomic_read(v); + new = old ^ bits; + } while (atomic_cmpxchg(v, old, new) != old); + return new; +} + /* * page table entry allocation/free routines. */ +#ifdef CONFIG_PGSTE +static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) +{ + struct page *page; + unsigned long *table; + + page = alloc_page(GFP_KERNEL|__GFP_REPEAT); + if (!page) + return NULL; + pgtable_page_ctor(page); + atomic_set(&page->_mapcount, 3); + table = (unsigned long *) page_to_phys(page); + clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); + clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); + return table; +} + +static inline void page_table_free_pgste(unsigned long *table) +{ + struct page *page; + + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + pgtable_page_ctor(page); + atomic_set(&page->_mapcount, -1); + __free_page(page); +} +#endif + unsigned long *page_table_alloc(struct mm_struct *mm) { struct page *page; unsigned long *table; - unsigned long bits; + unsigned int mask, bit; - bits = (mm->context.has_pgste) ? 3UL : 1UL; +#ifdef CONFIG_PGSTE + if (mm_has_pgste(mm)) + return page_table_alloc_pgste(mm); +#endif + /* Allocate fragments of a 4K page as 1K/2K page table */ spin_lock_bh(&mm->context.list_lock); - page = NULL; + mask = FRAG_MASK; if (!list_empty(&mm->context.pgtable_list)) { page = list_first_entry(&mm->context.pgtable_list, struct page, lru); - if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) - page = NULL; + table = (unsigned long *) page_to_phys(page); + mask = atomic_read(&page->_mapcount); + mask = mask | (mask >> 4); } - if (!page) { + if ((mask & FRAG_MASK) == FRAG_MASK) { spin_unlock_bh(&mm->context.list_lock); page = alloc_page(GFP_KERNEL|__GFP_REPEAT); if (!page) return NULL; pgtable_page_ctor(page); - page->flags &= ~FRAG_MASK; + atomic_set(&page->_mapcount, 1); table = (unsigned long *) page_to_phys(page); - if (mm->context.has_pgste) - clear_table_pgstes(table); - else - clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); + clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); spin_lock_bh(&mm->context.list_lock); list_add(&page->lru, &mm->context.pgtable_list); + } else { + for (bit = 1; mask & bit; bit <<= 1) + table += PTRS_PER_PTE; + mask = atomic_xor_bits(&page->_mapcount, bit); + if ((mask & FRAG_MASK) == FRAG_MASK) + list_del(&page->lru); } - table = (unsigned long *) page_to_phys(page); - while (page->flags & bits) { - table += 256; - bits <<= 1; - } - page->flags |= bits; - if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) - list_move_tail(&page->lru, &mm->context.pgtable_list); spin_unlock_bh(&mm->context.list_lock); return table; } -static void __page_table_free(struct mm_struct *mm, unsigned long *table) +void page_table_free(struct mm_struct *mm, unsigned long *table) { struct page *page; - unsigned long bits; + unsigned int bit, mask; - bits = ((unsigned long) table) & 15; - table = (unsigned long *)(((unsigned long) table) ^ bits); +#ifdef CONFIG_PGSTE + if (mm_has_pgste(mm)) + return page_table_free_pgste(table); +#endif + /* Free 1K/2K page table fragment of a 4K page */ page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - page->flags ^= bits; - if (!(page->flags & FRAG_MASK)) { + bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); + spin_lock_bh(&mm->context.list_lock); + if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) + list_del(&page->lru); + mask = atomic_xor_bits(&page->_mapcount, bit); + if (mask & FRAG_MASK) + list_add(&page->lru, &mm->context.pgtable_list); + spin_unlock_bh(&mm->context.list_lock); + if (mask == 0) { pgtable_page_dtor(page); + atomic_set(&page->_mapcount, -1); __free_page(page); } } -void page_table_free(struct mm_struct *mm, unsigned long *table) +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + +static void __page_table_free_rcu(void *table, unsigned bit) { struct page *page; - unsigned long bits; - bits = (mm->context.has_pgste) ? 3UL : 1UL; - bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); +#ifdef CONFIG_PGSTE + if (bit == FRAG_MASK) + return page_table_free_pgste(table); +#endif + /* Free 1K/2K page table fragment of a 4K page */ page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - spin_lock_bh(&mm->context.list_lock); - page->flags ^= bits; - if (page->flags & FRAG_MASK) { - /* Page now has some free pgtable fragments. */ - if (!list_empty(&page->lru)) - list_move(&page->lru, &mm->context.pgtable_list); - page = NULL; - } else - /* All fragments of the 4K page have been freed. */ - list_del(&page->lru); - spin_unlock_bh(&mm->context.list_lock); - if (page) { + if (atomic_xor_bits(&page->_mapcount, bit) == 0) { pgtable_page_dtor(page); + atomic_set(&page->_mapcount, -1); __free_page(page); } } -void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) +void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) { - struct rcu_table_freelist *batch; + struct mm_struct *mm; struct page *page; - unsigned long bits; + unsigned int bit, mask; - preempt_disable(); - if (atomic_read(&mm->mm_users) < 2 && - cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { - page_table_free(mm, table); - goto out; - } - batch = rcu_table_freelist_get(mm); - if (!batch) { - smp_call_function(smp_sync, NULL, 1); - page_table_free(mm, table); - goto out; + mm = tlb->mm; +#ifdef CONFIG_PGSTE + if (mm_has_pgste(mm)) { + table = (unsigned long *) (__pa(table) | FRAG_MASK); + tlb_remove_table(tlb, table); + return; } - bits = (mm->context.has_pgste) ? 3UL : 1UL; - bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); +#endif + bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); page = pfn_to_page(__pa(table) >> PAGE_SHIFT); spin_lock_bh(&mm->context.list_lock); - /* Delayed freeing with rcu prevents reuse of pgtable fragments */ - list_del_init(&page->lru); + if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) + list_del(&page->lru); + mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); + if (mask & FRAG_MASK) + list_add_tail(&page->lru, &mm->context.pgtable_list); spin_unlock_bh(&mm->context.list_lock); - table = (unsigned long *)(((unsigned long) table) | bits); - batch->table[batch->pgt_index++] = table; - if (batch->pgt_index >= batch->crst_index) - rcu_table_freelist_finish(); -out: - preempt_enable(); + table = (unsigned long *) (__pa(table) | (bit << 4)); + tlb_remove_table(tlb, table); } +void __tlb_remove_table(void *_table) +{ + void *table = (void *)((unsigned long) _table & PAGE_MASK); + unsigned type = (unsigned long) _table & ~PAGE_MASK; + + if (type) + __page_table_free_rcu(table, type); + else + free_pages((unsigned long) table, ALLOC_ORDER); +} + +#endif + /* * switch on pgstes for its userspace process (for kvm) */ @@ -369,7 +315,7 @@ int s390_enable_sie(void) return -EINVAL; /* Do we have pgstes? if yes, we are done */ - if (tsk->mm->context.has_pgste) + if (mm_has_pgste(tsk->mm)) return 0; /* lets check if we are allowed to replace the mm */ diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 5995e9bc72d..0e358c2cffe 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -25,7 +25,7 @@ extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth); #include "hwsampler.h" -#define DEFAULT_INTERVAL 4096 +#define DEFAULT_INTERVAL 4127518 #define DEFAULT_SDBT_BLOCKS 1 #define DEFAULT_SDB_BLOCKS 511 @@ -151,6 +151,12 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) if (oprofile_max_interval == 0) return -ENODEV; + /* The initial value should be sane */ + if (oprofile_hw_interval < oprofile_min_interval) + oprofile_hw_interval = oprofile_min_interval; + if (oprofile_hw_interval > oprofile_max_interval) + oprofile_hw_interval = oprofile_max_interval; + if (oprofile_timer_init(ops)) return -ENODEV; diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index f03338c2f08..bbdeb48bbf8 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -348,6 +348,7 @@ config CPU_SUBTYPE_SH7720 select SYS_SUPPORTS_CMT select ARCH_WANT_OPTIONAL_GPIOLIB select USB_ARCH_HAS_OHCI + select USB_OHCI_SH if USB_OHCI_HCD help Select SH7720 if you have a SH3-DSP SH7720 CPU. @@ -357,6 +358,7 @@ config CPU_SUBTYPE_SH7721 select CPU_HAS_DSP select SYS_SUPPORTS_CMT select USB_ARCH_HAS_OHCI + select USB_OHCI_SH if USB_OHCI_HCD help Select SH7721 if you have a SH3-DSP SH7721 CPU. @@ -440,6 +442,7 @@ config CPU_SUBTYPE_SH7763 bool "Support SH7763 processor" select CPU_SH4A select USB_ARCH_HAS_OHCI + select USB_OHCI_SH if USB_OHCI_HCD help Select SH7763 if you have a SH4A SH7763(R5S77631) CPU. @@ -467,7 +470,9 @@ config CPU_SUBTYPE_SH7786 select GENERIC_CLOCKEVENTS_BROADCAST if SMP select ARCH_WANT_OPTIONAL_GPIOLIB select USB_ARCH_HAS_OHCI + select USB_OHCI_SH if USB_OHCI_HCD select USB_ARCH_HAS_EHCI + select USB_EHCI_SH if USB_EHCI_HCD config CPU_SUBTYPE_SHX3 bool "Support SH-X3 processor" diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 3a32741cc0a..513cb1a2e6c 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -20,6 +20,7 @@ #include <linux/io.h> #include <linux/delay.h> #include <linux/usb/r8a66597.h> +#include <linux/usb/renesas_usbhs.h> #include <linux/i2c.h> #include <linux/i2c/tsc2007.h> #include <linux/spi/spi.h> @@ -232,6 +233,52 @@ static struct platform_device usb1_common_device = { .resource = usb1_common_resources, }; +/* + * USBHS + */ +static int usbhs_get_id(struct platform_device *pdev) +{ + return gpio_get_value(GPIO_PTB3); +} + +static struct renesas_usbhs_platform_info usbhs_info = { + .platform_callback = { + .get_id = usbhs_get_id, + }, + .driver_param = { + .buswait_bwait = 4, + .detection_delay = 5, + }, +}; + +static struct resource usbhs_resources[] = { + [0] = { + .start = 0xa4d90000, + .end = 0xa4d90124 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 66, + .end = 66, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usbhs_device = { + .name = "renesas_usbhs", + .id = 1, + .dev = { + .dma_mask = NULL, /* not use dma */ + .coherent_dma_mask = 0xffffffff, + .platform_data = &usbhs_info, + }, + .num_resources = ARRAY_SIZE(usbhs_resources), + .resource = usbhs_resources, + .archdata = { + .hwblk_id = HWBLK_USB1, + }, +}; + /* LCDC */ const static struct fb_videomode ecovec_lcd_modes[] = { { @@ -897,6 +944,7 @@ static struct platform_device *ecovec_devices[] __initdata = { &sh_eth_device, &usb0_host_device, &usb1_common_device, + &usbhs_device, &lcdc_device, &ceu0_device, &ceu1_device, diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile index 780e083e4d1..23bc849d9c6 100644 --- a/arch/sh/boot/compressed/Makefile +++ b/arch/sh/boot/compressed/Makefile @@ -27,8 +27,6 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \ $(CONFIG_BOOT_LINK_OFFSET)]') endif -LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) - ifeq ($(CONFIG_MCOUNT),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) @@ -37,7 +35,25 @@ endif LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \ -T $(obj)/../../kernel/vmlinux.lds -$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE +# +# Pull in the necessary libgcc bits from the in-kernel implementation. +# +lib1funcs-$(CONFIG_SUPERH32) := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \ + lshrsi3.S +lib1funcs-obj := \ + $(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y)))) + +lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib +ifeq ($(BITS),64) + lib1funcs-dir := $(addsuffix $(BITS), $(lib1funcs-dir)) +endif + +KBUILD_CFLAGS += -I$(lib1funcs-dir) + +$(addprefix $(obj)/,$(lib1funcs-y)): $(obj)/%: $(lib1funcs-dir)/% FORCE + $(call cmd,shipped) + +$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(lib1funcs-obj) FORCE $(call if_changed,ld) @: diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig index 33ddb130a7c..cfde98ddb29 100644 --- a/arch/sh/configs/sh7757lcr_defconfig +++ b/arch/sh/configs/sh7757lcr_defconfig @@ -9,7 +9,6 @@ CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_SYSCTL_SYSCALL is not set CONFIG_KALLSYMS_ALL=y CONFIG_SLAB=y @@ -39,8 +38,6 @@ CONFIG_IPV6=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CONCAT=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_M25P80=y @@ -56,18 +53,19 @@ CONFIG_SH_ETH=y # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_MOUSE_PS2 is not set # CONFIG_SERIO is not set +# CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=3 CONFIG_SERIAL_SH_SCI_CONSOLE=y -# CONFIG_LEGACY_PTYS is not set # CONFIG_HW_RANDOM is not set CONFIG_SPI=y CONFIG_SPI_SH=y # CONFIG_HWMON is not set -CONFIG_MFD_SH_MOBILE_SDHI=y CONFIG_USB=y CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_SH=y CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_SH=y CONFIG_USB_STORAGE=y CONFIG_MMC=y CONFIG_MMC_SDHI=y diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig index 0f558914e76..e2cbd92d520 100644 --- a/arch/sh/configs/titan_defconfig +++ b/arch/sh/configs/titan_defconfig @@ -227,7 +227,7 @@ CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y CONFIG_USB_SERIAL_ARK3116=m CONFIG_USB_SERIAL_PL2303=m -CONFIG_RTC_CLASS=m +CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_SH=m CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h index 4676bf57693..f848dec9e48 100644 --- a/arch/sh/include/asm/cmpxchg-grb.h +++ b/arch/sh/include/asm/cmpxchg-grb.h @@ -15,8 +15,9 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) " mov.l %2, @%1 \n\t" /* store new value */ "1: mov r1, r15 \n\t" /* LOGOUT */ : "=&r" (retval), - "+r" (m) - : "r" (val) + "+r" (m), + "+r" (val) /* inhibit r15 overloading */ + : : "memory", "r0", "r1"); return retval; @@ -36,8 +37,9 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) " mov.b %2, @%1 \n\t" /* store new value */ "1: mov r1, r15 \n\t" /* LOGOUT */ : "=&r" (retval), - "+r" (m) - : "r" (val) + "+r" (m), + "+r" (val) /* inhibit r15 overloading */ + : : "memory" , "r0", "r1"); return retval; @@ -54,13 +56,14 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, " nop \n\t" " mov r15, r1 \n\t" /* r1 = saved sp */ " mov #-8, r15 \n\t" /* LOGIN */ - " mov.l @%1, %0 \n\t" /* load old value */ - " cmp/eq %0, %2 \n\t" + " mov.l @%3, %0 \n\t" /* load old value */ + " cmp/eq %0, %1 \n\t" " bf 1f \n\t" /* if not equal */ - " mov.l %3, @%1 \n\t" /* store new value */ + " mov.l %2, @%3 \n\t" /* store new value */ "1: mov r1, r15 \n\t" /* LOGOUT */ - : "=&r" (retval) - : "r" (m), "r" (old), "r" (new) + : "=&r" (retval), + "+r" (old), "+r" (new) /* old or new can be r15 */ + : "r" (m) : "memory" , "r0", "r1", "t"); return retval; diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h index 8887baff5ef..15a8496960e 100644 --- a/arch/sh/include/asm/mmzone.h +++ b/arch/sh/include/asm/mmzone.h @@ -9,10 +9,6 @@ extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ - NODE_DATA(nid)->node_spanned_pages) - static inline int pfn_to_nid(unsigned long pfn) { int nid; diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 2a541ddb5a1..e25c4c7d6b6 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -150,7 +150,6 @@ struct thread_struct { #define SR_USER (SR_MMU | SR_FD) #define start_thread(_regs, new_pc, new_sp) \ - set_fs(USER_DS); \ _regs->sr = SR_USER; /* User mode. */ \ _regs->pc = new_pc - 4; /* Compensate syscall exit */ \ _regs->pc |= 1; /* Set SHmedia ! */ \ diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h index 3daef8ecbc6..cbc47e6bcab 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7724.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h @@ -298,6 +298,14 @@ enum { SHDMA_SLAVE_SCIF4_RX, SHDMA_SLAVE_SCIF5_TX, SHDMA_SLAVE_SCIF5_RX, + SHDMA_SLAVE_USB0D0_TX, + SHDMA_SLAVE_USB0D0_RX, + SHDMA_SLAVE_USB0D1_TX, + SHDMA_SLAVE_USB0D1_RX, + SHDMA_SLAVE_USB1D0_TX, + SHDMA_SLAVE_USB1D0_RX, + SHDMA_SLAVE_USB1D1_TX, + SHDMA_SLAVE_USB1D1_RX, SHDMA_SLAVE_SDHI0_TX, SHDMA_SLAVE_SDHI0_RX, SHDMA_SLAVE_SDHI1_TX, diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c index 748955df018..fa4f724b295 100644 --- a/arch/sh/kernel/cpu/sh4/perf_event.c +++ b/arch/sh/kernel/cpu/sh4/perf_event.c @@ -180,6 +180,21 @@ static const int sh7750_cache_events [ C(RESULT_MISS) ] = -1, }, }, + + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; static int sh7750_event_map(int event) diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c index 17e6bebfede..84a2c396cee 100644 --- a/arch/sh/kernel/cpu/sh4a/perf_event.c +++ b/arch/sh/kernel/cpu/sh4a/perf_event.c @@ -205,6 +205,21 @@ static const int sh4a_cache_events [ C(RESULT_MISS) ] = -1, }, }, + + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; static int sh4a_event_map(int event) diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index 0333fe9e388..134a397b191 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -93,6 +93,46 @@ static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = { .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x36, }, { + .slave_id = SHDMA_SLAVE_USB0D0_TX, + .addr = 0xA4D80100, + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0x73, + }, { + .slave_id = SHDMA_SLAVE_USB0D0_RX, + .addr = 0xA4D80100, + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0x73, + }, { + .slave_id = SHDMA_SLAVE_USB0D1_TX, + .addr = 0xA4D80120, + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0x77, + }, { + .slave_id = SHDMA_SLAVE_USB0D1_RX, + .addr = 0xA4D80120, + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0x77, + }, { + .slave_id = SHDMA_SLAVE_USB1D0_TX, + .addr = 0xA4D90100, + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xab, + }, { + .slave_id = SHDMA_SLAVE_USB1D0_RX, + .addr = 0xA4D90100, + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xab, + }, { + .slave_id = SHDMA_SLAVE_USB1D1_TX, + .addr = 0xA4D90120, + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xaf, + }, { + .slave_id = SHDMA_SLAVE_USB1D1_RX, + .addr = 0xA4D90120, + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xaf, + }, { .slave_id = SHDMA_SLAVE_SDHI0_TX, .addr = 0x04ce0030, .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c index 423dabf542d..e915deafac8 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c @@ -183,7 +183,7 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = { { .slave_id = SHDMA_SLAVE_SCIF2_RX, .addr = 0x1f4b0014, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x22, }, @@ -197,7 +197,7 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = { { .slave_id = SHDMA_SLAVE_SCIF3_RX, .addr = 0x1f4c0014, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2a, }, @@ -211,7 +211,7 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = { { .slave_id = SHDMA_SLAVE_SCIF4_RX, .addr = 0x1f4d0014, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x42, }, @@ -228,7 +228,7 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = { { .slave_id = SHDMA_SLAVE_RIIC0_RX, .addr = 0x1e500013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x22, }, @@ -242,7 +242,7 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = { { .slave_id = SHDMA_SLAVE_RIIC1_RX, .addr = 0x1e510013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2a, }, @@ -256,7 +256,7 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = { { .slave_id = SHDMA_SLAVE_RIIC2_RX, .addr = 0x1e520013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0xa2, }, @@ -265,12 +265,12 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = { .addr = 0x1e530012, .chcr = SM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), - .mid_rid = 0xab, + .mid_rid = 0xa9, }, { .slave_id = SHDMA_SLAVE_RIIC3_RX, .addr = 0x1e530013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0xaf, }, @@ -279,14 +279,14 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = { .addr = 0x1e540012, .chcr = SM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), - .mid_rid = 0xc1, + .mid_rid = 0xc5, }, { .slave_id = SHDMA_SLAVE_RIIC4_RX, .addr = 0x1e540013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), - .mid_rid = 0xc2, + .mid_rid = 0xc6, }, }; @@ -301,7 +301,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = { { .slave_id = SHDMA_SLAVE_RIIC5_RX, .addr = 0x1e550013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x22, }, @@ -315,7 +315,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = { { .slave_id = SHDMA_SLAVE_RIIC6_RX, .addr = 0x1e560013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x2a, }, @@ -329,7 +329,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = { { .slave_id = SHDMA_SLAVE_RIIC7_RX, .addr = 0x1e570013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x42, }, @@ -343,7 +343,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = { { .slave_id = SHDMA_SLAVE_RIIC8_RX, .addr = 0x1e580013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x46, }, @@ -357,7 +357,7 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = { { .slave_id = SHDMA_SLAVE_RIIC9_RX, .addr = 0x1e590013, - .chcr = SM_INC | 0x800 | 0x40000000 | + .chcr = DM_INC | 0x800 | 0x40000000 | TS_INDEX2VAL(XMIT_SZ_8BIT), .mid_rid = 0x52, }, @@ -659,6 +659,54 @@ static struct platform_device spi0_device = { .resource = spi0_resources, }; +static struct resource usb_ehci_resources[] = { + [0] = { + .start = 0xfe4f1000, + .end = 0xfe4f10ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 57, + .end = 57, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usb_ehci_device = { + .name = "sh_ehci", + .id = -1, + .dev = { + .dma_mask = &usb_ehci_device.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, + .num_resources = ARRAY_SIZE(usb_ehci_resources), + .resource = usb_ehci_resources, +}; + +static struct resource usb_ohci_resources[] = { + [0] = { + .start = 0xfe4f1800, + .end = 0xfe4f18ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 57, + .end = 57, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usb_ohci_device = { + .name = "sh_ohci", + .id = -1, + .dev = { + .dma_mask = &usb_ohci_device.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, + .num_resources = ARRAY_SIZE(usb_ohci_resources), + .resource = usb_ohci_resources, +}; + static struct platform_device *sh7757_devices[] __initdata = { &scif2_device, &scif3_device, @@ -670,6 +718,8 @@ static struct platform_device *sh7757_devices[] __initdata = { &dma2_device, &dma3_device, &spi0_device, + &usb_ehci_device, + &usb_ohci_device, }; static int __init sh7757_devices_setup(void) @@ -1039,13 +1089,13 @@ static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups, /* Support for external interrupt pins in IRQ mode */ static struct intc_vect vectors_irq0123[] __initdata = { - INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), - INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), + INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240), + INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0), }; static struct intc_vect vectors_irq4567[] __initdata = { - INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380), - INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200), + INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340), + INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0), }; static struct intc_sense_reg sense_registers[] __initdata = { @@ -1079,14 +1129,14 @@ static struct intc_vect vectors_irl0123[] __initdata = { }; static struct intc_vect vectors_irl4567[] __initdata = { - INTC_VECT(IRL4_LLLL, 0xb00), INTC_VECT(IRL4_LLLH, 0xb20), - INTC_VECT(IRL4_LLHL, 0xb40), INTC_VECT(IRL4_LLHH, 0xb60), - INTC_VECT(IRL4_LHLL, 0xb80), INTC_VECT(IRL4_LHLH, 0xba0), - INTC_VECT(IRL4_LHHL, 0xbc0), INTC_VECT(IRL4_LHHH, 0xbe0), - INTC_VECT(IRL4_HLLL, 0xc00), INTC_VECT(IRL4_HLLH, 0xc20), - INTC_VECT(IRL4_HLHL, 0xc40), INTC_VECT(IRL4_HLHH, 0xc60), - INTC_VECT(IRL4_HHLL, 0xc80), INTC_VECT(IRL4_HHLH, 0xca0), - INTC_VECT(IRL4_HHHL, 0xcc0), + INTC_VECT(IRL4_LLLL, 0x200), INTC_VECT(IRL4_LLLH, 0x220), + INTC_VECT(IRL4_LLHL, 0x240), INTC_VECT(IRL4_LLHH, 0x260), + INTC_VECT(IRL4_LHLL, 0x280), INTC_VECT(IRL4_LHLH, 0x2a0), + INTC_VECT(IRL4_LHHL, 0x2c0), INTC_VECT(IRL4_LHHH, 0x2e0), + INTC_VECT(IRL4_HLLL, 0x300), INTC_VECT(IRL4_HLLH, 0x320), + INTC_VECT(IRL4_HLHL, 0x340), INTC_VECT(IRL4_HLHH, 0x360), + INTC_VECT(IRL4_HHLL, 0x380), INTC_VECT(IRL4_HHLH, 0x3a0), + INTC_VECT(IRL4_HHHL, 0x3c0), }; static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123, diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c index 64c807c3920..bf280c812d2 100644 --- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c @@ -256,7 +256,7 @@ out: return ret; } -static struct dev_power_domain default_power_domain = { +static struct dev_pm_domain default_pm_domain = { .ops = { .runtime_suspend = default_platform_runtime_suspend, .runtime_resume = default_platform_runtime_resume, @@ -285,7 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb, hwblk_disable(hwblk_info, hwblk); /* make sure driver re-inits itself once */ __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); - dev->pwr_domain = &default_power_domain; + dev->pm_domain = &default_pm_domain; break; /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ case BUS_NOTIFY_BOUND_DRIVER: @@ -299,7 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb, __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); break; case BUS_NOTIFY_DEL_DEVICE: - dev->pwr_domain = NULL; + dev->pm_domain = NULL; break; } return 0; diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 91971103b62..a3ee9197112 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -13,6 +13,7 @@ #include <linux/seq_file.h> #include <linux/ftrace.h> #include <linux/delay.h> +#include <linux/ratelimit.h> #include <asm/processor.h> #include <asm/machvec.h> #include <asm/uaccess.h> @@ -268,9 +269,8 @@ void migrate_irqs(void) unsigned int newcpu = cpumask_any_and(data->affinity, cpu_online_mask); if (newcpu >= nr_cpu_ids) { - if (printk_ratelimit()) - printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", - irq, cpu); + pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", + irq, cpu); cpumask_setall(data->affinity); newcpu = cpumask_any_and(data->affinity, diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index b473f0c06fb..aaf6d59c201 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -102,8 +102,6 @@ EXPORT_SYMBOL(kernel_thread); void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp) { - set_fs(USER_DS); - regs->pr = 0; regs->sr = SR_FD; regs->pc = new_pc; diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 3d7b209b217..92b3c276339 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset, return 0; } -void ptrace_triggered(struct perf_event *bp, int nmi, +void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event_attr attr; @@ -91,7 +91,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr) attr.bp_len = HW_BREAKPOINT_LEN_2; attr.bp_type = HW_BREAKPOINT_R; - bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); + bp = register_user_hw_breakpoint(&attr, ptrace_triggered, + NULL, tsk); if (IS_ERR(bp)) return PTR_ERR(bp); diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index b51a17104b5..d9006f8ffc1 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, */ if (!expected) { unaligned_fixups_notify(current, instruction, regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); } diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index 6713ca97e55..67110be83fd 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c @@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs, return error; } - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); destreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { @@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs, return error; } - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); srcreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { @@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs, return error; } - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); destreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { @@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs, return error; } - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address); srcreg = (opcode >> 4) & 0x3f; if (user_mode(regs)) { diff --git a/arch/sh/math-emu/math.c b/arch/sh/math-emu/math.c index f76a5090d5d..97719521065 100644 --- a/arch/sh/math-emu/math.c +++ b/arch/sh/math-emu/math.c @@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs) struct task_struct *tsk = current; struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu); - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { /* initialize once. */ diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c index b2595b8548e..620fa7ff9ee 100644 --- a/arch/sh/mm/alignment.c +++ b/arch/sh/mm/alignment.c @@ -13,6 +13,7 @@ #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> +#include <linux/ratelimit.h> #include <asm/alignment.h> #include <asm/processor.h> @@ -95,13 +96,13 @@ int set_unalign_ctl(struct task_struct *tsk, unsigned int val) void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn, struct pt_regs *regs) { - if (user_mode(regs) && (se_usermode & UM_WARN) && printk_ratelimit()) - pr_notice("Fixing up unaligned userspace access " + if (user_mode(regs) && (se_usermode & UM_WARN)) + pr_notice_ratelimited("Fixing up unaligned userspace access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", tsk->comm, task_pid_nr(tsk), (void *)instruction_pointer(regs), insn); - else if (se_kernmode_warn && printk_ratelimit()) - pr_notice("Fixing up unaligned kernel access " + else if (se_kernmode_warn) + pr_notice_ratelimited("Fixing up unaligned kernel access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", tsk->comm, task_pid_nr(tsk), (void *)instruction_pointer(regs), insn); diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 52411462c40..11572519803 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c @@ -26,9 +26,9 @@ static int cache_seq_show(struct seq_file *file, void *iter) { unsigned int cache_type = (unsigned int)file->private; struct cache_info *cache; - unsigned int waysize, way, cache_size; - unsigned long ccr, base; - static unsigned long addrstart = 0; + unsigned int waysize, way; + unsigned long ccr; + unsigned long addrstart = 0; /* * Go uncached immediately so we don't skew the results any @@ -45,28 +45,13 @@ static int cache_seq_show(struct seq_file *file, void *iter) } if (cache_type == CACHE_TYPE_DCACHE) { - base = CACHE_OC_ADDRESS_ARRAY; + addrstart = CACHE_OC_ADDRESS_ARRAY; cache = ¤t_cpu_data.dcache; } else { - base = CACHE_IC_ADDRESS_ARRAY; + addrstart = CACHE_IC_ADDRESS_ARRAY; cache = ¤t_cpu_data.icache; } - /* - * Due to the amount of data written out (depending on the cache size), - * we may be iterated over multiple times. In this case, keep track of - * the entry position in addrstart, and rewind it when we've hit the - * end of the cache. - * - * Likewise, the same code is used for multiple caches, so care must - * be taken for bouncing addrstart back and forth so the appropriate - * cache is hit. - */ - cache_size = cache->ways * cache->sets * cache->linesz; - if (((addrstart & 0xff000000) != base) || - (addrstart & 0x00ffffff) > cache_size) - addrstart = base; - waysize = cache->sets; /* diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index d4c34d757f0..7bebd044f2a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if ((regs->sr & SR_IMASK) != SR_IMASK) local_irq_enable(); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -210,11 +210,11 @@ good_area: } if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 7f5810f5dfd..e3430e093d4 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, /* Not an IO address, so reenable interrupts */ local_irq_enable(); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* * If we're in an interrupt or have no user @@ -200,11 +200,11 @@ good_area: if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index af32e17fa17..253986bd6bb 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -26,7 +26,6 @@ config SPARC select HAVE_DMA_API_DEBUG select HAVE_ARCH_JUMP_LABEL select HAVE_GENERIC_HARDIRQS - select GENERIC_HARDIRQS_NO_DEPRECATED select GENERIC_IRQ_SHOW select USE_GENERIC_SMP_HELPERS if SMP @@ -528,6 +527,23 @@ config PCI_DOMAINS config PCI_SYSCALL def_bool PCI +config PCIC_PCI + bool + depends on PCI && SPARC32 && !SPARC_LEON + default y + +config LEON_PCI + bool + depends on PCI && SPARC_LEON + default y + +config GRPCI2 + bool "GRPCI2 Host Bridge Support" + depends on LEON_PCI + default y + help + Say Y here to include the GRPCI2 Host Bridge Driver. + source "drivers/pci/Kconfig" source "drivers/pcmcia/Kconfig" diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h index 482c79e2a41..7440915e86d 100644 --- a/arch/sparc/include/asm/floppy_32.h +++ b/arch/sparc/include/asm/floppy_32.h @@ -138,7 +138,7 @@ static unsigned char sun_82072_fd_inb(int port) return sun_fdc->data_82072; case 7: /* FD_DIR */ return sun_read_dir(); - }; + } panic("sun_82072_fd_inb: How did I get here?"); } @@ -161,7 +161,7 @@ static void sun_82072_fd_outb(unsigned char value, int port) case 4: /* FD_STATUS */ sun_fdc->status_82072 = value; break; - }; + } return; } @@ -186,7 +186,7 @@ static unsigned char sun_82077_fd_inb(int port) return sun_fdc->data_82077; case 7: /* FD_DIR */ return sun_read_dir(); - }; + } panic("sun_82077_fd_inb: How did I get here?"); } @@ -212,7 +212,7 @@ static void sun_82077_fd_outb(unsigned char value, int port) case 3: /* FD_TDR */ sun_fdc->tapectl_82077 = value; break; - }; + } return; } diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h index 6597ce874d7..bcef1f5a2a6 100644 --- a/arch/sparc/include/asm/floppy_64.h +++ b/arch/sparc/include/asm/floppy_64.h @@ -111,7 +111,7 @@ static unsigned char sun_82077_fd_inb(unsigned long port) case 7: /* FD_DIR */ /* XXX: Is DCL on 0x80 in sun4m? */ return sbus_readb(&sun_fdc->dir_82077); - }; + } panic("sun_82072_fd_inb: How did I get here?"); } @@ -135,7 +135,7 @@ static void sun_82077_fd_outb(unsigned char value, unsigned long port) case 4: /* FD_STATUS */ sbus_writeb(value, &sun_fdc->status_82077); break; - }; + } return; } diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h index d4d0711de0f..14848909e0d 100644 --- a/arch/sparc/include/asm/irqflags_32.h +++ b/arch/sparc/include/asm/irqflags_32.h @@ -18,7 +18,7 @@ extern void arch_local_irq_restore(unsigned long); extern unsigned long arch_local_irq_save(void); extern void arch_local_irq_enable(void); -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long arch_local_save_flags(void) { unsigned long flags; @@ -26,17 +26,17 @@ static inline unsigned long arch_local_save_flags(void) return flags; } -static inline void arch_local_irq_disable(void) +static inline notrace void arch_local_irq_disable(void) { arch_local_irq_save(); } -static inline bool arch_irqs_disabled_flags(unsigned long flags) +static inline notrace bool arch_irqs_disabled_flags(unsigned long flags) { return (flags & PSR_PIL) != 0; } -static inline bool arch_irqs_disabled(void) +static inline notrace bool arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h index aab969c82c2..23cd27f6beb 100644 --- a/arch/sparc/include/asm/irqflags_64.h +++ b/arch/sparc/include/asm/irqflags_64.h @@ -14,7 +14,7 @@ #ifndef __ASSEMBLY__ -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long arch_local_save_flags(void) { unsigned long flags; @@ -26,7 +26,7 @@ static inline unsigned long arch_local_save_flags(void) return flags; } -static inline void arch_local_irq_restore(unsigned long flags) +static inline notrace void arch_local_irq_restore(unsigned long flags) { __asm__ __volatile__( "wrpr %0, %%pil" @@ -36,7 +36,7 @@ static inline void arch_local_irq_restore(unsigned long flags) ); } -static inline void arch_local_irq_disable(void) +static inline notrace void arch_local_irq_disable(void) { __asm__ __volatile__( "wrpr %0, %%pil" @@ -46,7 +46,7 @@ static inline void arch_local_irq_disable(void) ); } -static inline void arch_local_irq_enable(void) +static inline notrace void arch_local_irq_enable(void) { __asm__ __volatile__( "wrpr 0, %%pil" @@ -56,17 +56,17 @@ static inline void arch_local_irq_enable(void) ); } -static inline int arch_irqs_disabled_flags(unsigned long flags) +static inline notrace int arch_irqs_disabled_flags(unsigned long flags) { return (flags > 0); } -static inline int arch_irqs_disabled(void) +static inline notrace int arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } -static inline unsigned long arch_local_irq_save(void) +static inline notrace unsigned long arch_local_irq_save(void) { unsigned long flags, tmp; diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index 6bdaf1e43d2..a4e457f003e 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h @@ -318,6 +318,9 @@ struct device_node; extern unsigned int leon_build_device_irq(unsigned int real_irq, irq_flow_handler_t flow_handler, const char *name, int do_ack); +extern void leon_update_virq_handling(unsigned int virq, + irq_flow_handler_t flow_handler, + const char *name, int do_ack); extern void leon_clear_clock_irq(void); extern void leon_load_profile_irq(int cpu, unsigned int limit); extern void leon_init_timers(irq_handler_t counter_fn); diff --git a/arch/sparc/include/asm/leon_pci.h b/arch/sparc/include/asm/leon_pci.h new file mode 100644 index 00000000000..42b4b31a82f --- /dev/null +++ b/arch/sparc/include/asm/leon_pci.h @@ -0,0 +1,21 @@ +/* + * asm/leon_pci.h + * + * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom + */ + +#ifndef _ASM_LEON_PCI_H_ +#define _ASM_LEON_PCI_H_ + +/* PCI related definitions */ +struct leon_pci_info { + struct pci_ops *ops; + struct resource io_space; + struct resource mem_space; + int (*map_irq)(struct pci_dev *dev, u8 slot, u8 pin); +}; + +extern void leon_pci_init(struct platform_device *ofdev, + struct leon_pci_info *info); + +#endif /* _ASM_LEON_PCI_H_ */ diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h index e8c648741ed..99d9b9f577b 100644 --- a/arch/sparc/include/asm/mmzone.h +++ b/arch/sparc/include/asm/mmzone.h @@ -8,8 +8,6 @@ extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn) extern int numa_cpu_lookup_table[]; extern cpumask_t numa_cpumask_lookup_table[]; diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index 332ac9ab36b..02939abd356 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h @@ -42,12 +42,33 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif -struct device_node; -extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); - #endif /* __KERNEL__ */ +#ifndef CONFIG_LEON_PCI /* generic pci stuff */ #include <asm-generic/pci.h> +#else +/* + * On LEON PCI Memory space is mapped 1:1 with physical address space. + * + * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses + * are converted into CPU addresses to virtual addresses that are mapped with + * MMU to the PCI Host PCI I/O space window which are translated to the low + * 64Kbytes by the Host controller. + */ + +extern void +pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, + struct resource *res); + +extern void +pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, + struct pci_bus_region *region); + +static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +{ + return PCI_IRQ_NONE; +} +#endif #endif /* __SPARC_PCI_H */ diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 948b686ec08..2614d96141c 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -91,9 +91,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return PCI_IRQ_NONE; } -struct device_node; -extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); - #define HAVE_ARCH_PCI_RESOURCE_TO_USER extern void pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, diff --git a/arch/sparc/include/asm/pcic.h b/arch/sparc/include/asm/pcic.h index 7eb5d78f521..6676cbcc8b6 100644 --- a/arch/sparc/include/asm/pcic.h +++ b/arch/sparc/include/asm/pcic.h @@ -29,7 +29,7 @@ struct linux_pcic { int pcic_imdim; }; -#ifdef CONFIG_PCI +#ifdef CONFIG_PCIC_PCI extern int pcic_present(void); extern int pcic_probe(void); extern void pci_time_init(void); diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h index c7ad3fe2b25..b928b31424b 100644 --- a/arch/sparc/include/asm/ptrace.h +++ b/arch/sparc/include/asm/ptrace.h @@ -205,6 +205,7 @@ do { current_thread_info()->syscall_noerror = 1; \ } while (0) #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) #define instruction_pointer(regs) ((regs)->tpc) +#define instruction_pointer_set(regs, val) ((regs)->tpc = (val)) #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP]) #define regs_return_value(regs) ((regs)->u_regs[UREG_I0]) #ifdef CONFIG_SMP diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h index 47a7e862474..aba16092a81 100644 --- a/arch/sparc/include/asm/system_32.h +++ b/arch/sparc/include/asm/system_32.h @@ -220,7 +220,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int switch (size) { case 4: return xchg_u32(ptr, x); - }; + } __xchg_called_with_bad_pointer(); return x; } diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index 3c96d3bb9f1..10bcabce97b 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h @@ -234,7 +234,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, return xchg32(ptr, x); case 8: return xchg64(ptr, x); - }; + } __xchg_called_with_bad_pointer(); return x; } diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 9cff2709a96..b90b4a1d070 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -73,7 +73,9 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o obj-y += dma.o -obj-$(CONFIG_SPARC32_PCI) += pcic.o +obj-$(CONFIG_PCIC_PCI) += pcic.o +obj-$(CONFIG_LEON_PCI) += leon_pci.o +obj-$(CONFIG_GRPCI2) += leon_pci_grpci2.o obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index 1e34f29e58b..caef9deb586 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c @@ -123,7 +123,7 @@ static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) default: return -EINVAL; - }; + } return 0; } diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c index 8505e0ac78b..acf5151f3c1 100644 --- a/arch/sparc/kernel/auxio_32.c +++ b/arch/sparc/kernel/auxio_32.c @@ -101,7 +101,7 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off) break; default: panic("Can't set AUXIO register on this machine."); - }; + } spin_unlock_irqrestore(&auxio_lock, flags); } EXPORT_SYMBOL(set_auxio); diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index 668c7be5d36..5f450260981 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c @@ -664,7 +664,7 @@ static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 va case 0x0: bp->interleave = 16; break; - }; + } /* UK[10] is reserved, and UK[11] is not set for the SDRAM * bank size definition. diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 8341963f4c8..f445e98463e 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -229,7 +229,7 @@ real_irq_entry: #ifdef CONFIG_SMP .globl patchme_maybe_smp_msg - cmp %l7, 12 + cmp %l7, 11 patchme_maybe_smp_msg: bgu maybe_smp4m_msg nop @@ -293,7 +293,7 @@ maybe_smp4m_msg: WRITE_PAUSE wr %l4, PSR_ET, %psr WRITE_PAUSE - sll %o2, 28, %o2 ! shift for simpler checks below + srl %o3, 28, %o2 ! shift for simpler checks below maybe_smp4m_msg_check_single: andcc %o2, 0x1, %g0 beq,a maybe_smp4m_msg_check_mask @@ -1604,7 +1604,7 @@ restore_current: retl nop -#ifdef CONFIG_PCI +#ifdef CONFIG_PCIC_PCI #include <asm/pcic.h> .align 4 @@ -1650,7 +1650,7 @@ pcic_nmi_trap_patch: rd %psr, %l0 .word 0 -#endif /* CONFIG_PCI */ +#endif /* CONFIG_PCIC_PCI */ .globl flushw_all flushw_all: diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 2f538ac2e13..d17255a2bba 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -236,6 +236,21 @@ static unsigned int _leon_build_device_irq(struct platform_device *op, return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0); } +void leon_update_virq_handling(unsigned int virq, + irq_flow_handler_t flow_handler, + const char *name, int do_ack) +{ + unsigned long mask = (unsigned long)irq_get_chip_data(virq); + + mask &= ~LEON_DO_ACK_HW; + if (do_ack) + mask |= LEON_DO_ACK_HW; + + irq_set_chip_and_handler_name(virq, &leon_irq, + flow_handler, name); + irq_set_chip_data(virq, (void *)mask); +} + void __init leon_init_timers(irq_handler_t counter_fn) { int irq, eirq; @@ -361,6 +376,22 @@ void __init leon_init_timers(irq_handler_t counter_fn) prom_halt(); } +#ifdef CONFIG_SMP + { + unsigned long flags; + + /* + * In SMP, sun4m adds a IPI handler to IRQ trap handler that + * LEON never must take, sun4d and LEON overwrites the branch + * with a NOP. + */ + local_irq_save(flags); + patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ + local_flush_cache_all(); + local_irq_restore(flags); + } +#endif + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, LEON3_GPTIMER_EN | LEON3_GPTIMER_RL | diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c new file mode 100644 index 00000000000..a8a9a275037 --- /dev/null +++ b/arch/sparc/kernel/leon_pci.c @@ -0,0 +1,253 @@ +/* + * leon_pci.c: LEON Host PCI support + * + * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom + * + * Code is partially derived from pcic.c + */ + +#include <linux/of_device.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <asm/leon.h> +#include <asm/leon_pci.h> + +/* The LEON architecture does not rely on a BIOS or bootloader to setup + * PCI for us. The Linux generic routines are used to setup resources, + * reset values of confuration-space registers settings ae preseved. + */ +void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) +{ + struct pci_bus *root_bus; + + root_bus = pci_scan_bus_parented(&ofdev->dev, 0, info->ops, info); + if (root_bus) { + root_bus->resource[0] = &info->io_space; + root_bus->resource[1] = &info->mem_space; + root_bus->resource[2] = NULL; + + /* Init all PCI devices into PCI tree */ + pci_bus_add_devices(root_bus); + + /* Setup IRQs of all devices using custom routines */ + pci_fixup_irqs(pci_common_swizzle, info->map_irq); + + /* Assign devices with resources */ + pci_assign_unassigned_resources(); + } +} + +/* PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is + * accessed through a Window which is translated to low 64KB in PCI space, the + * first 4KB is not used so 60KB is available. + * + * This function is used by generic code to translate resource addresses into + * PCI addresses. + */ +void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, + struct resource *res) +{ + struct leon_pci_info *info = dev->bus->sysdata; + + region->start = res->start; + region->end = res->end; + + if (res->flags & IORESOURCE_IO) { + region->start -= (info->io_space.start - 0x1000); + region->end -= (info->io_space.start - 0x1000); + } +} +EXPORT_SYMBOL(pcibios_resource_to_bus); + +/* see pcibios_resource_to_bus() comment */ +void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, + struct pci_bus_region *region) +{ + struct leon_pci_info *info = dev->bus->sysdata; + + res->start = region->start; + res->end = region->end; + + if (res->flags & IORESOURCE_IO) { + res->start += (info->io_space.start - 0x1000); + res->end += (info->io_space.start - 0x1000); + } +} +EXPORT_SYMBOL(pcibios_bus_to_resource); + +void __devinit pcibios_fixup_bus(struct pci_bus *pbus) +{ + struct leon_pci_info *info = pbus->sysdata; + struct pci_dev *dev; + int i, has_io, has_mem; + u16 cmd; + + /* Generic PCI bus probing sets these to point at + * &io{port,mem}_resouce which is wrong for us. + */ + if (pbus->self == NULL) { + pbus->resource[0] = &info->io_space; + pbus->resource[1] = &info->mem_space; + pbus->resource[2] = NULL; + } + + list_for_each_entry(dev, &pbus->devices, bus_list) { + /* + * We can not rely on that the bootloader has enabled I/O + * or memory access to PCI devices. Instead we enable it here + * if the device has BARs of respective type. + */ + has_io = has_mem = 0; + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + unsigned long f = dev->resource[i].flags; + if (f & IORESOURCE_IO) + has_io = 1; + else if (f & IORESOURCE_MEM) + has_mem = 1; + } + /* ROM BARs are mapped into 32-bit memory space */ + if (dev->resource[PCI_ROM_RESOURCE].end != 0) { + dev->resource[PCI_ROM_RESOURCE].flags |= + IORESOURCE_ROM_ENABLE; + has_mem = 1; + } + pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd); + if (has_io && !(cmd & PCI_COMMAND_IO)) { +#ifdef CONFIG_PCI_DEBUG + printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n", + pci_name(dev)); +#endif + cmd |= PCI_COMMAND_IO; + pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND, + cmd); + } + if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) { +#ifdef CONFIG_PCI_DEBUG + printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev" + "%s\n", pci_name(dev)); +#endif + cmd |= PCI_COMMAND_MEMORY; + pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND, + cmd); + } + } +} + +/* + * Other archs parse arguments here. + */ +char * __devinit pcibios_setup(char *str) +{ + return str; +} + +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + return res->start; +} + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + return pci_enable_resources(dev, mask); +} + +struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) +{ + /* + * Currently the OpenBoot nodes are not connected with the PCI device, + * this is because the LEON PROM does not create PCI nodes. Eventually + * this will change and the same approach as pcic.c can be used to + * match PROM nodes with pci devices. + */ + return NULL; +} +EXPORT_SYMBOL(pci_device_to_OF_node); + +void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) +{ +#ifdef CONFIG_PCI_DEBUG + printk(KERN_DEBUG "LEONPCI: Assigning IRQ %02d to %s\n", irq, + pci_name(dev)); +#endif + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); +} + +/* in/out routines taken from pcic.c + * + * This probably belongs here rather than ioport.c because + * we do not want this crud linked into SBus kernels. + * Also, think for a moment about likes of floppy.c that + * include architecture specific parts. They may want to redefine ins/outs. + * + * We do not use horrible macros here because we want to + * advance pointer by sizeof(size). + */ +void outsb(unsigned long addr, const void *src, unsigned long count) +{ + while (count) { + count -= 1; + outb(*(const char *)src, addr); + src += 1; + /* addr += 1; */ + } +} +EXPORT_SYMBOL(outsb); + +void outsw(unsigned long addr, const void *src, unsigned long count) +{ + while (count) { + count -= 2; + outw(*(const short *)src, addr); + src += 2; + /* addr += 2; */ + } +} +EXPORT_SYMBOL(outsw); + +void outsl(unsigned long addr, const void *src, unsigned long count) +{ + while (count) { + count -= 4; + outl(*(const long *)src, addr); + src += 4; + /* addr += 4; */ + } +} +EXPORT_SYMBOL(outsl); + +void insb(unsigned long addr, void *dst, unsigned long count) +{ + while (count) { + count -= 1; + *(unsigned char *)dst = inb(addr); + dst += 1; + /* addr += 1; */ + } +} +EXPORT_SYMBOL(insb); + +void insw(unsigned long addr, void *dst, unsigned long count) +{ + while (count) { + count -= 2; + *(unsigned short *)dst = inw(addr); + dst += 2; + /* addr += 2; */ + } +} +EXPORT_SYMBOL(insw); + +void insl(unsigned long addr, void *dst, unsigned long count) +{ + while (count) { + count -= 4; + /* + * XXX I am sure we are in for an unaligned trap here. + */ + *(unsigned long *)dst = inl(addr); + dst += 4; + /* addr += 4; */ + } +} +EXPORT_SYMBOL(insl); diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c new file mode 100644 index 00000000000..44dc093ee33 --- /dev/null +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -0,0 +1,897 @@ +/* + * leon_pci_grpci2.c: GRPCI2 Host PCI driver + * + * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom + * + */ + +#include <linux/of_device.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <asm/io.h> +#include <asm/leon.h> +#include <asm/vaddrs.h> +#include <asm/sections.h> +#include <asm/leon_pci.h> + +#include "irq.h" + +struct grpci2_barcfg { + unsigned long pciadr; /* PCI Space Address */ + unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */ +}; + +/* Device Node Configuration options: + * - barcfgs : Custom Configuration of Host's 6 target BARs + * - irq_mask : Limit which PCI interrupts are enabled + * - do_reset : Force PCI Reset on startup + * + * barcfgs + * ======= + * + * Optional custom Target BAR configuration (see struct grpci2_barcfg). All + * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes) + * + * -1 means not configured (let host driver do default setup). + * + * [i*2+0] = PCI Address of BAR[i] on target interface + * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address + * + * + * irq_mask + * ======== + * + * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default + * all are enabled. Use this when PCI interrupt pins are floating on PCB. + * int, len=4. + * bit0 = PCI INTA# + * bit1 = PCI INTB# + * bit2 = PCI INTC# + * bit3 = PCI INTD# + * + * + * reset + * ===== + * + * Force PCI reset on startup. int, len=4 + */ + +/* Enable Debugging Configuration Space Access */ +#undef GRPCI2_DEBUG_CFGACCESS + +/* + * GRPCI2 APB Register MAP + */ +struct grpci2_regs { + unsigned int ctrl; /* 0x00 Control */ + unsigned int sts_cap; /* 0x04 Status / Capabilities */ + int res1; /* 0x08 */ + unsigned int io_map; /* 0x0C I/O Map address */ + unsigned int dma_ctrl; /* 0x10 DMA */ + unsigned int dma_bdbase; /* 0x14 DMA */ + int res2[2]; /* 0x18 */ + unsigned int bars[6]; /* 0x20 read-only PCI BARs */ + int res3[2]; /* 0x38 */ + unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */ + + /* PCI Trace Buffer Registers (OPTIONAL) */ + unsigned int t_ctrl; /* 0x80 */ + unsigned int t_cnt; /* 0x84 */ + unsigned int t_adpat; /* 0x88 */ + unsigned int t_admask; /* 0x8C */ + unsigned int t_sigpat; /* 0x90 */ + unsigned int t_sigmask; /* 0x94 */ + unsigned int t_adstate; /* 0x98 */ + unsigned int t_sigstate; /* 0x9C */ +}; + +#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) +#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) + +#define CTRL_BUS_BIT 16 + +#define CTRL_RESET (1<<31) +#define CTRL_SI (1<<27) +#define CTRL_PE (1<<26) +#define CTRL_EI (1<<25) +#define CTRL_ER (1<<24) +#define CTRL_BUS (0xff<<CTRL_BUS_BIT) +#define CTRL_HOSTINT 0xf + +#define STS_HOST_BIT 31 +#define STS_MST_BIT 30 +#define STS_TAR_BIT 29 +#define STS_DMA_BIT 28 +#define STS_DI_BIT 27 +#define STS_HI_BIT 26 +#define STS_IRQMODE_BIT 24 +#define STS_TRACE_BIT 23 +#define STS_CFGERRVALID_BIT 20 +#define STS_CFGERR_BIT 19 +#define STS_INTTYPE_BIT 12 +#define STS_INTSTS_BIT 8 +#define STS_FDEPTH_BIT 2 +#define STS_FNUM_BIT 0 + +#define STS_HOST (1<<STS_HOST_BIT) +#define STS_MST (1<<STS_MST_BIT) +#define STS_TAR (1<<STS_TAR_BIT) +#define STS_DMA (1<<STS_DMA_BIT) +#define STS_DI (1<<STS_DI_BIT) +#define STS_HI (1<<STS_HI_BIT) +#define STS_IRQMODE (0x3<<STS_IRQMODE_BIT) +#define STS_TRACE (1<<STS_TRACE_BIT) +#define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT) +#define STS_CFGERR (1<<STS_CFGERR_BIT) +#define STS_INTTYPE (0x3f<<STS_INTTYPE_BIT) +#define STS_INTSTS (0xf<<STS_INTSTS_BIT) +#define STS_FDEPTH (0x7<<STS_FDEPTH_BIT) +#define STS_FNUM (0x3<<STS_FNUM_BIT) + +#define STS_ISYSERR (1<<17) +#define STS_IDMA (1<<16) +#define STS_IDMAERR (1<<15) +#define STS_IMSTABRT (1<<14) +#define STS_ITGTABRT (1<<13) +#define STS_IPARERR (1<<12) + +#define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR) + +struct grpci2_bd_chan { + unsigned int ctrl; /* 0x00 DMA Control */ + unsigned int nchan; /* 0x04 Next DMA Channel Address */ + unsigned int nbd; /* 0x08 Next Data Descriptor in chan */ + unsigned int res; /* 0x0C Reserved */ +}; + +#define BD_CHAN_EN 0x80000000 +#define BD_CHAN_TYPE 0x00300000 +#define BD_CHAN_BDCNT 0x0000ffff +#define BD_CHAN_EN_BIT 31 +#define BD_CHAN_TYPE_BIT 20 +#define BD_CHAN_BDCNT_BIT 0 + +struct grpci2_bd_data { + unsigned int ctrl; /* 0x00 DMA Data Control */ + unsigned int pci_adr; /* 0x04 PCI Start Address */ + unsigned int ahb_adr; /* 0x08 AHB Start address */ + unsigned int next; /* 0x0C Next Data Descriptor in chan */ +}; + +#define BD_DATA_EN 0x80000000 +#define BD_DATA_IE 0x40000000 +#define BD_DATA_DR 0x20000000 +#define BD_DATA_TYPE 0x00300000 +#define BD_DATA_ER 0x00080000 +#define BD_DATA_LEN 0x0000ffff +#define BD_DATA_EN_BIT 31 +#define BD_DATA_IE_BIT 30 +#define BD_DATA_DR_BIT 29 +#define BD_DATA_TYPE_BIT 20 +#define BD_DATA_ER_BIT 19 +#define BD_DATA_LEN_BIT 0 + +/* GRPCI2 Capability */ +struct grpci2_cap_first { + unsigned int ctrl; + unsigned int pci2ahb_map[6]; + unsigned int ext2ahb_map; + unsigned int io_map; + unsigned int pcibar_size[6]; +}; +#define CAP9_CTRL_OFS 0 +#define CAP9_BAR_OFS 0x4 +#define CAP9_IOMAP_OFS 0x20 +#define CAP9_BARSIZE_OFS 0x24 + +struct grpci2_priv { + struct leon_pci_info info; /* must be on top of this structure */ + struct grpci2_regs *regs; + char irq; + char irq_mode; /* IRQ Mode from CAPSTS REG */ + char bt_enabled; + char do_reset; + char irq_mask; + u32 pciid; /* PCI ID of Host */ + unsigned char irq_map[4]; + + /* Virtual IRQ numbers */ + unsigned int virq_err; + unsigned int virq_dma; + + /* AHB PCI Windows */ + unsigned long pci_area; /* MEMORY */ + unsigned long pci_area_end; + unsigned long pci_io; /* I/O */ + unsigned long pci_conf; /* CONFIGURATION */ + unsigned long pci_conf_end; + unsigned long pci_io_va; + + struct grpci2_barcfg tgtbars[6]; +}; + +DEFINE_SPINLOCK(grpci2_dev_lock); +struct grpci2_priv *grpci2priv; + +int grpci2_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + struct grpci2_priv *priv = dev->bus->sysdata; + int irq_group; + + /* Use default IRQ decoding on PCI BUS0 according slot numbering */ + irq_group = slot & 0x3; + pin = ((pin - 1) + irq_group) & 0x3; + + return priv->irq_map[pin]; +} + +static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + unsigned int *pci_conf; + unsigned long flags; + u32 tmp; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0 && PCI_SLOT(devfn) != 0) + devfn += (0x8 * 6); + + /* Select bus */ + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | + (bus << 16)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); + + /* clear old status */ + REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); + + pci_conf = (unsigned int *) (priv->pci_conf | + (devfn << 8) | (where & 0xfc)); + tmp = LEON3_BYPASS_LOAD_PA(pci_conf); + + /* Wait until GRPCI2 signals that CFG access is done, it should be + * done instantaneously unless a DMA operation is ongoing... + */ + while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) + ; + + if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) { + *val = 0xffffffff; + } else { + /* Bus always little endian (unaffected by byte-swapping) */ + *val = flip_dword(tmp); + } + + return 0; +} + +static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + if (where & 0x1) + return -EINVAL; + ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xffff & (v >> (8 * (where & 0x3))); + return ret; +} + +static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xff & (v >> (8 * (where & 3))); + + return ret; +} + +static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + unsigned int *pci_conf; + unsigned long flags; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0 && PCI_SLOT(devfn) != 0) + devfn += (0x8 * 6); + + /* Select bus */ + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | + (bus << 16)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); + + /* clear old status */ + REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); + + pci_conf = (unsigned int *) (priv->pci_conf | + (devfn << 8) | (where & 0xfc)); + LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val)); + + /* Wait until GRPCI2 signals that CFG access is done, it should be + * done instantaneously unless a DMA operation is ongoing... + */ + while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) + ; + + return 0; +} + +static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + if (where & 0x1) + return -EINVAL; + ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v); + if (ret) + return ret; + v = (v & ~(0xffff << (8 * (where & 0x3)))) | + ((0xffff & val) << (8 * (where & 0x3))); + return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + if (ret != 0) + return ret; + v = (v & ~(0xff << (8 * (where & 0x3)))) | + ((0xff & val) << (8 * (where & 0x3))); + return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +/* Read from Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct grpci2_priv *priv = grpci2priv; + unsigned int busno = bus->number; + int ret; + + if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { + *val = ~0; + return 0; + } + + switch (size) { + case 1: + ret = grpci2_cfg_r8(priv, busno, devfn, where, val); + break; + case 2: + ret = grpci2_cfg_r16(priv, busno, devfn, where, val); + break; + case 4: + ret = grpci2_cfg_r32(priv, busno, devfn, where, val); + break; + default: + ret = -EINVAL; + break; + } + +#ifdef GRPCI2_DEBUG_CFGACCESS + printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x " + "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, + *val, size); +#endif + + return ret; +} + +/* Write to Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct grpci2_priv *priv = grpci2priv; + unsigned int busno = bus->number; + + if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) + return 0; + +#ifdef GRPCI2_DEBUG_CFGACCESS + printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d " + "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), + where, size, val); +#endif + + switch (size) { + default: + return -EINVAL; + case 1: + return grpci2_cfg_w8(priv, busno, devfn, where, val); + case 2: + return grpci2_cfg_w16(priv, busno, devfn, where, val); + case 4: + return grpci2_cfg_w32(priv, busno, devfn, where, val); + } +} + +static struct pci_ops grpci2_ops = { + .read = grpci2_read_config, + .write = grpci2_write_config, +}; + +/* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration + * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller + * this is not needed and the standard IRQ controller can be used. + */ + +static void grpci2_mask_irq(struct irq_data *data) +{ + unsigned long flags; + unsigned int irqidx; + struct grpci2_priv *priv = grpci2priv; + + irqidx = (unsigned int)data->chip_data - 1; + if (irqidx > 3) /* only mask PCI interrupts here */ + return; + + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); +} + +static void grpci2_unmask_irq(struct irq_data *data) +{ + unsigned long flags; + unsigned int irqidx; + struct grpci2_priv *priv = grpci2priv; + + irqidx = (unsigned int)data->chip_data - 1; + if (irqidx > 3) /* only unmask PCI interrupts here */ + return; + + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); +} + +static unsigned int grpci2_startup_irq(struct irq_data *data) +{ + grpci2_unmask_irq(data); + return 0; +} + +static void grpci2_shutdown_irq(struct irq_data *data) +{ + grpci2_mask_irq(data); +} + +static struct irq_chip grpci2_irq = { + .name = "grpci2", + .irq_startup = grpci2_startup_irq, + .irq_shutdown = grpci2_shutdown_irq, + .irq_mask = grpci2_mask_irq, + .irq_unmask = grpci2_unmask_irq, +}; + +/* Handle one or multiple IRQs from the PCI core */ +static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc) +{ + struct grpci2_priv *priv = grpci2priv; + int i, ack = 0; + unsigned int ctrl, sts_cap, pci_ints; + + ctrl = REGLOAD(priv->regs->ctrl); + sts_cap = REGLOAD(priv->regs->sts_cap); + + /* Error Interrupt? */ + if (sts_cap & STS_ERR_IRQ) { + generic_handle_irq(priv->virq_err); + ack = 1; + } + + /* PCI Interrupt? */ + pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT; + if (pci_ints) { + /* Call respective PCI Interrupt handler */ + for (i = 0; i < 4; i++) { + if (pci_ints & (1 << i)) + generic_handle_irq(priv->irq_map[i]); + } + ack = 1; + } + + /* + * Decode DMA Interrupt only when shared with Err and PCI INTX#, when + * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they + * goes directly to DMA ISR. + */ + if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) { + generic_handle_irq(priv->virq_dma); + ack = 1; + } + + /* + * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ + * Controller, this must be done after IRQ sources have been handled to + * avoid double IRQ generation + */ + if (ack) + desc->irq_data.chip->irq_eoi(&desc->irq_data); +} + +/* Create a virtual IRQ */ +static unsigned int grpci2_build_device_irq(unsigned int irq) +{ + unsigned int virq = 0, pil; + + pil = 1 << 8; + virq = irq_alloc(irq, pil); + if (virq == 0) + goto out; + + irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq, + "pcilvl"); + irq_set_chip_data(virq, (void *)irq); + +out: + return virq; +} + +void grpci2_hw_init(struct grpci2_priv *priv) +{ + u32 ahbadr, pciadr, bar_sz, capptr, io_map, data; + struct grpci2_regs *regs = priv->regs; + int i; + struct grpci2_barcfg *barcfg = priv->tgtbars; + + /* Reset any earlier setup */ + if (priv->do_reset) { + printk(KERN_INFO "GRPCI2: Resetting PCI bus\n"); + REGSTORE(regs->ctrl, CTRL_RESET); + ssleep(1); /* Wait for boards to settle */ + } + REGSTORE(regs->ctrl, 0); + REGSTORE(regs->sts_cap, ~0); /* Clear Status */ + REGSTORE(regs->dma_ctrl, 0); + REGSTORE(regs->dma_bdbase, 0); + + /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */ + REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff); + + /* set 1:1 mapping between AHB -> PCI memory space, for all Masters + * Each AHB master has it's own mapping registers. Max 16 AHB masters. + */ + for (i = 0; i < 16; i++) + REGSTORE(regs->ahbmst_map[i], priv->pci_area); + + /* Get the GRPCI2 Host PCI ID */ + grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); + + /* Get address to first (always defined) capability structure */ + grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); + + /* Enable/Disable Byte twisting */ + grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); + io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); + grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); + + /* Setup the Host's PCI Target BARs for other peripherals to access, + * and do DMA to the host's memory. The target BARs can be sized and + * enabled individually. + * + * User may set custom target BARs, but default is: + * The first BARs is used to map kernel low (DMA is part of normal + * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the + * PCI bus, the other BARs are disabled. We assume that the first BAR + * is always available. + */ + for (i = 0; i < 6; i++) { + if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) { + /* Target BARs must have the proper alignment */ + ahbadr = barcfg[i].ahbadr; + pciadr = barcfg[i].pciadr; + bar_sz = ((pciadr - 1) & ~pciadr) + 1; + } else { + if (i == 0) { + /* Map main memory */ + bar_sz = 0xf0000008; /* 256MB prefetchable */ + ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN( + (unsigned long) &_end)); + pciadr = ahbadr; + } else { + bar_sz = 0; + ahbadr = 0; + pciadr = 0; + } + } + grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); + grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); + grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); + printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", + i, pciadr, ahbadr); + } + + /* set as bus master and enable pci memory responses */ + grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); + data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); + + /* Enable Error respone (CPU-TRAP) on illegal memory access. */ + REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); +} + +static irqreturn_t grpci2_jump_interrupt(int irq, void *arg) +{ + printk(KERN_ERR "GRPCI2: Jump IRQ happened\n"); + return IRQ_NONE; +} + +/* Handle GRPCI2 Error Interrupt */ +static irqreturn_t grpci2_err_interrupt(int irq, void *arg) +{ + struct grpci2_priv *priv = arg; + struct grpci2_regs *regs = priv->regs; + unsigned int status; + + status = REGLOAD(regs->sts_cap); + if ((status & STS_ERR_IRQ) == 0) + return IRQ_NONE; + + if (status & STS_IPARERR) + printk(KERN_ERR "GRPCI2: Parity Error\n"); + + if (status & STS_ITGTABRT) + printk(KERN_ERR "GRPCI2: Target Abort\n"); + + if (status & STS_IMSTABRT) + printk(KERN_ERR "GRPCI2: Master Abort\n"); + + if (status & STS_ISYSERR) + printk(KERN_ERR "GRPCI2: System Error\n"); + + /* Clear handled INT TYPE IRQs */ + REGSTORE(regs->sts_cap, status & STS_ERR_IRQ); + + return IRQ_HANDLED; +} + +static int __devinit grpci2_of_probe(struct platform_device *ofdev) +{ + struct grpci2_regs *regs; + struct grpci2_priv *priv; + int err, i, len; + const int *tmp; + unsigned int capability; + + if (grpci2priv) { + printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n"); + return -ENODEV; + } + + if (ofdev->num_resources < 3) { + printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n"); + return -EIO; + } + + /* Find Device Address */ + regs = of_ioremap(&ofdev->resource[0], 0, + resource_size(&ofdev->resource[0]), + "grlib-grpci2 regs"); + if (regs == NULL) { + printk(KERN_ERR "GRPCI2: ioremap failed\n"); + return -EIO; + } + + /* + * Check that we're in Host Slot and that we can act as a Host Bridge + * and not only as target. + */ + capability = REGLOAD(regs->sts_cap); + if ((capability & STS_HOST) || !(capability & STS_MST)) { + printk(KERN_INFO "GRPCI2: not in host system slot\n"); + err = -EIO; + goto err1; + } + + priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL); + if (grpci2priv == NULL) { + err = -ENOMEM; + goto err1; + } + memset(grpci2priv, 0, sizeof(*grpci2priv)); + priv->regs = regs; + priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ + priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; + + printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq); + + /* Byte twisting should be made configurable from kernel command line */ + priv->bt_enabled = 1; + + /* Let user do custom Target BAR assignment */ + tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len); + if (tmp && (len == 2*4*6)) + memcpy(priv->tgtbars, tmp, 2*4*6); + else + memset(priv->tgtbars, -1, 2*4*6); + + /* Limit IRQ unmasking in irq_mode 2 and 3 */ + tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len); + if (tmp && (len == 4)) + priv->do_reset = *tmp; + else + priv->irq_mask = 0xf; + + /* Optional PCI reset. Force PCI reset on startup */ + tmp = of_get_property(ofdev->dev.of_node, "reset", &len); + if (tmp && (len == 4)) + priv->do_reset = *tmp; + else + priv->do_reset = 0; + + /* Find PCI Memory, I/O and Configuration Space Windows */ + priv->pci_area = ofdev->resource[1].start; + priv->pci_area_end = ofdev->resource[1].end+1; + priv->pci_io = ofdev->resource[2].start; + priv->pci_conf = ofdev->resource[2].start + 0x10000; + priv->pci_conf_end = priv->pci_conf + 0x10000; + priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000); + if (!priv->pci_io_va) { + err = -EIO; + goto err2; + } + + printk(KERN_INFO + "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n" + " I/O SPACE [0x%08lx - 0x%08lx]\n" + " CONFIG SPACE [0x%08lx - 0x%08lx]\n", + priv->pci_area, priv->pci_area_end-1, + priv->pci_io, priv->pci_conf-1, + priv->pci_conf, priv->pci_conf_end-1); + + /* + * I/O Space resources in I/O Window mapped into Virtual Adr Space + * We never use low 4KB because some devices seem have problems using + * address 0. + */ + memset(&priv->info.io_space, 0, sizeof(struct resource)); + priv->info.io_space.name = "GRPCI2 PCI I/O Space"; + priv->info.io_space.start = priv->pci_io_va + 0x1000; + priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1; + priv->info.io_space.flags = IORESOURCE_IO; + + /* + * GRPCI2 has no prefetchable memory, map everything as + * non-prefetchable memory + */ + memset(&priv->info.mem_space, 0, sizeof(struct resource)); + priv->info.mem_space.name = "GRPCI2 PCI MEM Space"; + priv->info.mem_space.start = priv->pci_area; + priv->info.mem_space.end = priv->pci_area_end - 1; + priv->info.mem_space.flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) + goto err3; + if (request_resource(&ioport_resource, &priv->info.io_space) < 0) + goto err4; + + grpci2_hw_init(priv); + + /* + * Get PCI Interrupt to System IRQ mapping and setup IRQ handling + * Error IRQ always on PCI INTA. + */ + if (priv->irq_mode < 2) { + /* All PCI interrupts are shared using the same system IRQ */ + leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq, + "pcilvl", 0); + + priv->irq_map[0] = grpci2_build_device_irq(1); + priv->irq_map[1] = grpci2_build_device_irq(2); + priv->irq_map[2] = grpci2_build_device_irq(3); + priv->irq_map[3] = grpci2_build_device_irq(4); + + priv->virq_err = grpci2_build_device_irq(5); + if (priv->irq_mode & 1) + priv->virq_dma = ofdev->archdata.irqs[1]; + else + priv->virq_dma = grpci2_build_device_irq(6); + + /* Enable IRQs on LEON IRQ controller */ + err = request_irq(priv->irq, grpci2_jump_interrupt, 0, + "GRPCI2_JUMP", priv); + if (err) + printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n"); + } else { + /* All PCI interrupts have an unique IRQ interrupt */ + for (i = 0; i < 4; i++) { + /* Make LEON IRQ layer handle level IRQ by acking */ + leon_update_virq_handling(ofdev->archdata.irqs[i], + handle_fasteoi_irq, "pcilvl", + 1); + priv->irq_map[i] = ofdev->archdata.irqs[i]; + } + priv->virq_err = priv->irq_map[0]; + if (priv->irq_mode & 1) + priv->virq_dma = ofdev->archdata.irqs[4]; + else + priv->virq_dma = priv->irq_map[0]; + + /* Unmask all PCI interrupts, request_irq will not do that */ + REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf)); + } + + /* Setup IRQ handler for non-configuration space access errors */ + err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED, + "GRPCI2_ERR", priv); + if (err) { + printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err); + goto err5; + } + + /* + * Enable Error Interrupts. PCI interrupts are unmasked once request_irq + * is called by the PCI Device drivers + */ + REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI); + + /* Init common layer and scan buses */ + priv->info.ops = &grpci2_ops; + priv->info.map_irq = grpci2_map_irq; + leon_pci_init(ofdev, &priv->info); + + return 0; + +err5: + release_resource(&priv->info.io_space); +err4: + release_resource(&priv->info.mem_space); +err3: + err = -ENOMEM; + iounmap((void *)priv->pci_io_va); +err2: + kfree(priv); +err1: + of_iounmap(&ofdev->resource[0], regs, + resource_size(&ofdev->resource[0])); + return err; +} + +static struct of_device_id grpci2_of_match[] = { + { + .name = "GAISLER_GRPCI2", + }, + { + .name = "01_07c", + }, + {}, +}; + +static struct platform_driver grpci2_of_driver = { + .driver = { + .name = "grpci2", + .owner = THIS_MODULE, + .of_match_table = grpci2_of_match, + }, + .probe = grpci2_of_probe, +}; + +static int __init grpci2_init(void) +{ + return platform_driver_register(&grpci2_of_driver); +} + +subsys_initcall(grpci2_init); diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index 8d348c474a2..99ba5baa949 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c @@ -214,7 +214,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, me->name, (int) (ELF_R_TYPE(rel[i].r_info) & 0xff)); return -ENOEXEC; - }; + } } return 0; } diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 713dc91020a..80a87e2a3e7 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -284,7 +284,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->sysdata = node; dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; - dev->dev.of_node = node; + dev->dev.of_node = of_node_get(node); dev->devfn = devfn; dev->multifunction = 0; /* maybe a lie? */ set_pcie_port_type(dev); @@ -1021,12 +1021,6 @@ void arch_teardown_msi_irq(unsigned int irq) } #endif /* !(CONFIG_PCI_MSI) */ -struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - return pdev->dev.of_node; -} -EXPORT_SYMBOL(pci_device_to_OF_node); - static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) { struct pci_dev *ali_isa_bridge; diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index 6e3874b6448..a6895987fb7 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -281,7 +281,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, case 4: *value = ret & 0xffffffff; break; - }; + } return PCIBIOS_SUCCESSFUL; @@ -456,7 +456,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm) default: break; - }; + } } if (!saw_io || !saw_mem) { diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index 283fbc329a4..f030b02eddd 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c @@ -264,7 +264,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, default: type_string = "ECC Error"; break; - }; + } printk("%s: IOMMU Error, type[%s]\n", pbm->name, type_string); @@ -319,7 +319,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, default: type_string = "ECC Error"; break; - }; + } printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " "sz(%dK) vpg(%08lx)]\n", pbm->name, i, type_string, @@ -1328,7 +1328,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm, default: chipset_name = "SCHIZO"; break; - }; + } /* For SCHIZO, three OBP regs: * 1) PBM controller regs diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 948601a066f..a19f0419547 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -885,14 +885,6 @@ int pcibios_assign_resource(struct pci_dev *pdev, int resource) return -ENXIO; } -struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - struct pcidev_cookie *pc = pdev->sysdata; - - return pc->prom_node; -} -EXPORT_SYMBOL(pci_device_to_OF_node); - /* * This probably belongs here rather than ioport.c because * we do not want this crud linked into SBus kernels. diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2cb0e1c001e..62a034318b1 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -246,6 +246,20 @@ static const cache_map_t ultra3_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu ultra3_pmu = { @@ -361,6 +375,20 @@ static const cache_map_t niagara1_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu niagara1_pmu = { @@ -473,6 +501,20 @@ static const cache_map_t niagara2_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu niagara2_pmu = { @@ -1277,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, if (!sparc_perf_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) sparc_pmu_stop(event, 0); } diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c index 570b98f6e89..40e4936bd47 100644 --- a/arch/sparc/kernel/prom_irqtrans.c +++ b/arch/sparc/kernel/prom_irqtrans.c @@ -694,7 +694,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp, case 3: iclr = reg_base + SYSIO_ICLR_SLOT3; break; - }; + } iclr += ((unsigned long)sbus_level - 1UL) * 8UL; } diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c index fe2af66bb19..8db48e808ed 100644 --- a/arch/sparc/kernel/psycho_common.c +++ b/arch/sparc/kernel/psycho_common.c @@ -228,7 +228,7 @@ void psycho_check_iommu_error(struct pci_pbm_info *pbm, default: type_str = "ECC Error"; break; - }; + } printk(KERN_ERR "%s: IOMMU Error, type[%s]\n", pbm->name, type_str); diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c index 2ca32d13abc..a161b9c77f0 100644 --- a/arch/sparc/kernel/sbus.c +++ b/arch/sparc/kernel/sbus.c @@ -97,7 +97,7 @@ void sbus_set_sbus64(struct device *dev, int bursts) default: return; - }; + } val = upa_readq(cfg_reg); if (val & (1UL << 14UL)) { @@ -244,7 +244,7 @@ static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino) case 3: iclr = reg_base + SYSIO_ICLR_SLOT3; break; - }; + } iclr += ((unsigned long)sbus_level - 1UL) * 8UL; } diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 3249d3f3234..d26e1f6c717 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -267,7 +267,7 @@ void __init setup_arch(char **cmdline_p) default: printk("UNKNOWN!\n"); break; - }; + } #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index f3b6850cc8d..c4dd0999da8 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -209,7 +209,7 @@ void __init per_cpu_patch(void) default: prom_printf("Unknown cpu type, halting.\n"); prom_halt(); - }; + } *(unsigned int *) (addr + 0) = insns[0]; wmb(); diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index d5b3958be0b..21b125341bf 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -114,7 +114,7 @@ void __init smp_cpus_done(unsigned int max_cpus) printk("UNKNOWN!\n"); BUG(); break; - }; + } } void cpu_panic(void) @@ -374,7 +374,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) printk("UNKNOWN!\n"); BUG(); break; - }; + } } /* Set this up early so that things like the scheduler can init @@ -447,7 +447,7 @@ int __cpuinit __cpu_up(unsigned int cpu) printk("UNKNOWN!\n"); BUG(); break; - }; + } if (!ret) { cpumask_set_cpu(cpu, &smp_commenced_mask); diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index a9ea60eb2c1..1d13c5bda0b 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c @@ -103,10 +103,9 @@ static void sun4d_sbus_handler_irq(int sbusl) sbil = (sbusl << 2); /* Loop for each pending SBI */ - for (sbino = 0; bus_mask; sbino++) { + for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) { unsigned int idx, mask; - bus_mask >>= 1; if (!(bus_mask & 1)) continue; /* XXX This seems to ACK the irq twice. acquire_sbi() @@ -118,19 +117,16 @@ static void sun4d_sbus_handler_irq(int sbusl) mask &= (0xf << sbil); /* Loop for each pending SBI slot */ - idx = 0; slot = (1 << sbil); - while (mask != 0) { + for (idx = 0; mask != 0; idx++, slot <<= 1) { unsigned int pil; struct irq_bucket *p; - idx++; - slot <<= 1; if (!(mask & slot)) continue; mask &= ~slot; - pil = sun4d_encode_irq(sbino, sbil, idx); + pil = sun4d_encode_irq(sbino, sbusl, idx); p = irq_map[pil]; while (p) { @@ -218,10 +214,10 @@ static void sun4d_unmask_irq(struct irq_data *data) #ifdef CONFIG_SMP spin_lock_irqsave(&sun4d_imsk_lock, flags); - cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | ~(1 << real_irq)); + cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq)); spin_unlock_irqrestore(&sun4d_imsk_lock, flags); #else - cc_set_imsk(cc_get_imsk() | ~(1 << real_irq)); + cc_set_imsk(cc_get_imsk() & ~(1 << real_irq)); #endif } @@ -299,26 +295,68 @@ static void __init sun4d_load_profile_irqs(void) } } +unsigned int _sun4d_build_device_irq(unsigned int real_irq, + unsigned int pil, + unsigned int board) +{ + struct sun4d_handler_data *handler_data; + unsigned int irq; + + irq = irq_alloc(real_irq, pil); + if (irq == 0) { + prom_printf("IRQ: allocate for %d %d %d failed\n", + real_irq, pil, board); + goto err_out; + } + + handler_data = irq_get_handler_data(irq); + if (unlikely(handler_data)) + goto err_out; + + handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC); + if (unlikely(!handler_data)) { + prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n"); + prom_halt(); + } + handler_data->cpuid = board_to_cpu[board]; + handler_data->real_irq = real_irq; + irq_set_chip_and_handler_name(irq, &sun4d_irq, + handle_level_irq, "level"); + irq_set_handler_data(irq, handler_data); + +err_out: + return irq; +} + + + unsigned int sun4d_build_device_irq(struct platform_device *op, unsigned int real_irq) { struct device_node *dp = op->dev.of_node; - struct device_node *io_unit, *sbi = dp->parent; + struct device_node *board_parent, *bus = dp->parent; + char *bus_connection; const struct linux_prom_registers *regs; - struct sun4d_handler_data *handler_data; unsigned int pil; unsigned int irq; int board, slot; int sbusl; - irq = 0; - while (sbi) { - if (!strcmp(sbi->name, "sbi")) + irq = real_irq; + while (bus) { + if (!strcmp(bus->name, "sbi")) { + bus_connection = "io-unit"; + break; + } + + if (!strcmp(bus->name, "bootbus")) { + bus_connection = "cpu-unit"; break; + } - sbi = sbi->parent; + bus = bus->parent; } - if (!sbi) + if (!bus) goto err_out; regs = of_get_property(dp, "reg", NULL); @@ -328,17 +366,19 @@ unsigned int sun4d_build_device_irq(struct platform_device *op, slot = regs->which_io; /* - * If SBI's parent is not io-unit or the io-unit lacks - * a "board#" property, something is very wrong. + * If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit + * lacks a "board#" property, something is very wrong. */ - if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) { - printk("%s: Error, parent is not io-unit.\n", sbi->full_name); + if (!bus->parent || strcmp(bus->parent->name, bus_connection)) { + printk(KERN_ERR "%s: Error, parent is not %s.\n", + bus->full_name, bus_connection); goto err_out; } - io_unit = sbi->parent; - board = of_getintprop_default(io_unit, "board#", -1); + board_parent = bus->parent; + board = of_getintprop_default(board_parent, "board#", -1); if (board == -1) { - printk("%s: Error, lacks board# property.\n", io_unit->full_name); + printk(KERN_ERR "%s: Error, lacks board# property.\n", + board_parent->full_name); goto err_out; } @@ -348,29 +388,17 @@ unsigned int sun4d_build_device_irq(struct platform_device *op, else pil = real_irq; - irq = irq_alloc(real_irq, pil); - if (irq == 0) - goto err_out; - - handler_data = irq_get_handler_data(irq); - if (unlikely(handler_data)) - goto err_out; - - handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC); - if (unlikely(!handler_data)) { - prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n"); - prom_halt(); - } - handler_data->cpuid = board_to_cpu[board]; - handler_data->real_irq = real_irq; - irq_set_chip_and_handler_name(irq, &sun4d_irq, - handle_level_irq, "level"); - irq_set_handler_data(irq, handler_data); - + irq = _sun4d_build_device_irq(real_irq, pil, board); err_out: - return real_irq; + return irq; } +unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq) +{ + return _sun4d_build_device_irq(real_irq, real_irq, board); +} + + static void __init sun4d_fixup_trap_table(void) { #ifdef CONFIG_SMP @@ -402,6 +430,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn) unsigned int irq; const u32 *reg; int err; + int board; dp = of_find_node_by_name(NULL, "cpu-unit"); if (!dp) { @@ -414,12 +443,19 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn) * bootbus. */ reg = of_get_property(dp, "reg", NULL); - of_node_put(dp); if (!reg) { prom_printf("sun4d_init_timers: No reg property\n"); prom_halt(); } + board = of_getintprop_default(dp, "board#", -1); + if (board == -1) { + prom_printf("sun4d_init_timers: No board# property on cpu-unit\n"); + prom_halt(); + } + + of_node_put(dp); + res.start = reg[1]; res.end = reg[2] - 1; res.flags = reg[0] & 0xff; @@ -434,7 +470,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn) master_l10_counter = &sun4d_timers->l10_cur_count; - irq = sun4d_build_device_irq(NULL, SUN4D_TIMER_IRQ); + irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ); err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL); if (err) { prom_printf("sun4d_init_timers: request_irq() failed with %d\n", diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c index 6db18c6927f..170cd8e8eb2 100644 --- a/arch/sparc/kernel/sys_sparc32.c +++ b/arch/sparc/kernel/sys_sparc32.c @@ -109,7 +109,7 @@ asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compa default: return -ENOSYS; - }; + } return -ENOSYS; } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 96082d30def..908b47a5ee2 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -460,7 +460,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second default: err = -ENOSYS; goto out; - }; + } } if (call <= MSGCTL) { switch (call) { @@ -481,7 +481,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second default: err = -ENOSYS; goto out; - }; + } } if (call <= SHMCTL) { switch (call) { @@ -507,7 +507,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second default: err = -ENOSYS; goto out; - }; + } } else { err = -ENOSYS; } diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 2b8d54b2d85..1db6b18964d 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -708,7 +708,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode, case CLOCK_EVT_MODE_UNUSED: WARN_ON(1); break; - }; + } } static struct clock_event_device sparc64_clockevent = { diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 1ed547bd850..0cbdaa41cd1 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -1804,7 +1804,7 @@ static const char *sun4v_err_type_to_str(u32 type) return "warning resumable"; default: return "unknown"; - }; + } } static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 4491f4cb269..7efbb2f9e77 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) unsigned long addr = compute_effective_address(regs, insn); int err; - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), @@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) } addr = compute_effective_address(regs, insn); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch(dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index c752c4c479b..35cff1673aa 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -211,7 +211,7 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, default: BUG(); break; - }; + } } return __do_int_store(dst_addr, size, src_val, asi); } @@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (asi) { case ASI_NL: case ASI_AIUPL: @@ -328,7 +328,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) case ASI_SNFL: asi &= ~0x08; break; - }; + } switch (dir) { case load: reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); @@ -351,7 +351,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) default: BUG(); break; - }; + } *reg_addr = val_in; } break; @@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs) int ret, i, rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); @@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) int asi = decode_asi(insn, regs); int flag = (freg < 32) ? FPRS_DL : FPRS_DU; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); save_and_clear_fpu(); current_thread_info()->xfsr[0] &= ~0x1c000; @@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned long *reg; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); maybe_flush_windows(0, 0, rd, from_kernel); reg = fetch_reg_addr(rd, regs); @@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("lddfmna from kernel", regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { @@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c index 531d54fc982..489fc15f319 100644 --- a/arch/sparc/kernel/us2e_cpufreq.c +++ b/arch/sparc/kernel/us2e_cpufreq.c @@ -176,7 +176,7 @@ static unsigned long index_to_estar_mode(unsigned int index) default: BUG(); - }; + } } static unsigned long index_to_divisor(unsigned int index) @@ -199,7 +199,7 @@ static unsigned long index_to_divisor(unsigned int index) default: BUG(); - }; + } } static unsigned long estar_to_divisor(unsigned long estar) @@ -224,7 +224,7 @@ static unsigned long estar_to_divisor(unsigned long estar) break; default: BUG(); - }; + } return ret; } diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c index 9a8ceb70083..eb1624b931d 100644 --- a/arch/sparc/kernel/us3_cpufreq.c +++ b/arch/sparc/kernel/us3_cpufreq.c @@ -71,7 +71,7 @@ static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg break; default: BUG(); - }; + } return ret; } @@ -125,7 +125,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) default: BUG(); - }; + } reg = read_safari_cfg(); diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index aa6ac70d4fd..29348ea139c 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -363,7 +363,7 @@ static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt) default: return handshake_failure(vio); - }; + } } static int process_attr(struct vio_driver_state *vio, void *pkt) diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index 9dfd2ebcb15..32b626c9d81 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -334,7 +334,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) left = edge32_tab_l[(rs1 >> 2) & 0x1].left; right = edge32_tab_l[(rs2 >> 2) & 0x1].right; break; - }; + } if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) rd_val = right & left; @@ -360,7 +360,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); regs->tstate = tstate | (ccr << 32UL); } - }; + } } static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) @@ -392,7 +392,7 @@ static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) case ARRAY32_OPF: rd_val <<= 2; - }; + } store_reg(regs, rd_val, RD(insn)); } @@ -577,7 +577,7 @@ static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) *fpd_regaddr(f, RD(insn)) = rd_val; break; } - }; + } } static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) @@ -693,7 +693,7 @@ static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) *fpd_regaddr(f, RD(insn)) = rd_val; break; } - }; + } } static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) @@ -786,7 +786,7 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) rd_val |= 1 << i; } break; - }; + } maybe_flush_windows(0, 0, RD(insn), 0); store_reg(regs, rd_val, RD(insn)); @@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn) BUG_ON(regs->tstate & TSTATE_PRIV); - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; @@ -885,7 +885,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn) case BSHUFFLE_OPF: bshuffle(regs, insn); break; - }; + } regs->tpc = regs->tnpc; regs->tnpc += 4; diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c index a3fccde894e..aa4d55b0bdf 100644 --- a/arch/sparc/math-emu/math_32.c +++ b/arch/sparc/math-emu/math_32.c @@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt) int retcode = 0; /* assume all succeed */ unsigned long insn; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); #ifdef DEBUG_MATHEMU printk("In do_mathemu()... pc is %08lx\n", regs->pc); diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c index 56d2c44747b..e575bd2fe38 100644 --- a/arch/sparc/math-emu/math_64.c +++ b/arch/sparc/math-emu/math_64.c @@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f) if (tstate & TSTATE_PRIV) die_if_kernel("unfinished/unimplemented FPop from kernel", regs); - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b10ac4d6237..aa1c1b1ce5c 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -135,7 +135,7 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, default: break; - }; + } memset(®s, 0, sizeof (regs)); regs.pc = pc; @@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, if (in_atomic() || !mm) goto no_context; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); down_read(&mm->mmap_sem); @@ -301,12 +301,10 @@ good_area: } if (fault & VM_FAULT_MAJOR) { current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); return; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index f92ce56a8b2..504c0622f72 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) if (in_atomic() || !mm) goto intr_or_no_mm; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (!down_read_trylock(&mm->mmap_sem)) { if ((regs->tstate & TSTATE_PRIV) && @@ -433,12 +433,10 @@ good_area: } if (fault & VM_FAULT_MAJOR) { current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, - regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index ca217327e8d..7b00de61c5f 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -340,7 +340,7 @@ void __init paging_init(void) prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model); prom_printf("paging_init: Halting...\n"); prom_halt(); - }; + } /* Initialize the protection map with non-constant, MMU dependent values. */ protection_map[0] = PAGE_NONE; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index e10cd03fab8..3fd8e18bed8 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1625,7 +1625,7 @@ static void __init sun4v_ktsb_init(void) ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; break; - }; + } ktsb_descr[0].assoc = 1; ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; @@ -2266,7 +2266,7 @@ unsigned long pte_sz_bits(unsigned long sz) return _PAGE_SZ512K_4V; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4V; - }; + } } else { switch (sz) { case 8 * 1024: @@ -2278,7 +2278,7 @@ unsigned long pte_sz_bits(unsigned long sz) return _PAGE_SZ512K_4U; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4U; - }; + } } } diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c index c0e01297e64..e485a680499 100644 --- a/arch/sparc/mm/leon_mm.c +++ b/arch/sparc/mm/leon_mm.c @@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs) * Leon2 and Leon3 differ in their way of telling cache information * */ -int leon_flush_needed(void) +int __init leon_flush_needed(void) { int flush_needed = -1; unsigned int ssize, sets; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index fe09fd8be69..cbef74e793b 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1665,7 +1665,7 @@ static void __init init_swift(void) default: srmmu_modtype = Swift_ok; break; - }; + } BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); @@ -2069,7 +2069,7 @@ static void __init get_srmmu_type(void) /* Some other Cypress revision, assume a 605. */ init_cypress_605(mod_rev); break; - }; + } return; } diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index a2350b5e68a..1cf4f198709 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -318,7 +318,7 @@ void __init sun4c_probe_vac(void) prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n", sun4c_vacinfo.linesize); prom_halt(); - }; + } sun4c_flush_all(); sun4c_enable_vac(); @@ -364,7 +364,7 @@ static void __init patch_kernel_fault_handler(void) prom_printf("Unhandled number of segmaps: %d\n", num_segmaps); prom_halt(); - }; + } switch (num_contexts) { case 8: /* Default, nothing to do. */ @@ -377,7 +377,7 @@ static void __init patch_kernel_fault_handler(void) prom_printf("Unhandled number of contexts: %d\n", num_contexts); prom_halt(); - }; + } if (sun4c_vacinfo.do_hwflushes != 0) { PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1); @@ -394,7 +394,7 @@ static void __init patch_kernel_fault_handler(void) prom_printf("Impossible VAC linesize %d, halting...\n", sun4c_vacinfo.linesize); prom_halt(); - }; + } } } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 94846151349..a5f51b22fcb 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -180,7 +180,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", current->comm, current->pid, tsb_bytes); do_exit(SIGSEGV); - }; + } tte |= pte_sz_bits(page_sz); if (tlb_type == cheetah_plus || tlb_type == hypervisor) { @@ -215,7 +215,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign #endif default: BUG(); - }; + } hp->assoc = 1; hp->num_ttes = tsb_bytes / 16; hp->ctx_idx = 0; @@ -230,7 +230,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign #endif default: BUG(); - }; + } hp->tsb_base = tsb_paddr; hp->resv = 0; } diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c index b05e3db5fa6..a00f47b16c1 100644 --- a/arch/sparc/prom/console_32.c +++ b/arch/sparc/prom/console_32.c @@ -38,7 +38,7 @@ static int prom_nbputchar(const char *buf) break; default: break; - }; + } restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return i; /* Ugh, we could spin forever on unsupported proms ;( */ diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c index 0a601b30063..26c64cea3c9 100644 --- a/arch/sparc/prom/init_32.c +++ b/arch/sparc/prom/init_32.c @@ -53,7 +53,7 @@ void __init prom_init(struct linux_romvec *rp) romvec->pv_romvers); prom_halt(); break; - }; + } prom_rev = romvec->pv_plugin_revision; prom_prev = romvec->pv_printrev; diff --git a/arch/sparc/prom/mp.c b/arch/sparc/prom/mp.c index 97c44c9ddbc..0da8256cf76 100644 --- a/arch/sparc/prom/mp.c +++ b/arch/sparc/prom/mp.c @@ -35,7 +35,7 @@ prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, cha case PROM_V3: ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc); break; - }; + } restore_current(); spin_unlock_irqrestore(&prom_lock, flags); diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h index c6344c4f32a..9d3dbce8f95 100644 --- a/arch/tile/include/asm/mmzone.h +++ b/arch/tile/include/asm/mmzone.h @@ -40,17 +40,6 @@ static inline int pfn_to_nid(unsigned long pfn) return highbits_to_node[__pfn_to_highbits(pfn)]; } -/* - * Following are macros that each numa implmentation must define. - */ - -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) \ -({ \ - pg_data_t *__pgdat = NODE_DATA(nid); \ - __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ -}) - #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) static inline int pfn_valid(int pfn) diff --git a/arch/um/include/asm/percpu.h b/arch/um/include/asm/percpu.h new file mode 100644 index 00000000000..efe7508d8ab --- /dev/null +++ b/arch/um/include/asm/percpu.h @@ -0,0 +1,6 @@ +#ifndef __UM_PERCPU_H +#define __UM_PERCPU_H + +#include <asm-generic/percpu.h> + +#endif /* __UM_PERCPU_H */ diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile index b1da91c1b20..15587ed9a36 100644 --- a/arch/um/sys-i386/Makefile +++ b/arch/um/sys-i386/Makefile @@ -8,7 +8,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ obj-$(CONFIG_BINFMT_ELF) += elfcore.o -subarch-obj-y = lib/semaphore_32.o lib/string_32.o +subarch-obj-y = lib/rwsem.o lib/string_32.o subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o subarch-obj-$(CONFIG_MODULES) += kernel/module.o diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile index c1ea9eb0446..61fc99a42e1 100644 --- a/arch/um/sys-x86_64/Makefile +++ b/arch/um/sys-x86_64/Makefile @@ -9,7 +9,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \ sysrq.o ksyms.o tls.o subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o \ - lib/rwsem_64.o + lib/rwsem.o subarch-obj-$(CONFIG_MODULES) += kernel/module.o ldt-y = ../sys-i386/ldt.o diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index d3a303246c9..e57dcce9bfd 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -231,10 +231,6 @@ config PUV3_PWM help Enable support for NB0916 PWM controllers -config PUV3_RTC - tristate "PKUnity v3 RTC Support" - depends on !ARCH_FPGA - if PUV3_NB0916 menu "PKUnity NetBook-0916 Features" diff --git a/arch/unicore32/Makefile b/arch/unicore32/Makefile index 76a8beec7d0..6af4bc415f2 100644 --- a/arch/unicore32/Makefile +++ b/arch/unicore32/Makefile @@ -40,42 +40,10 @@ core-y += arch/unicore32/mm/ libs-y += arch/unicore32/lib/ -ASM_GENERATED_DIR := $(srctree)/arch/unicore32/include/generated -LINUXINCLUDE += -I$(ASM_GENERATED_DIR) - -ASM_GENERIC_HEADERS := atomic.h auxvec.h -ASM_GENERIC_HEADERS += bitsperlong.h bug.h bugs.h -ASM_GENERIC_HEADERS += cputime.h current.h -ASM_GENERIC_HEADERS += device.h div64.h -ASM_GENERIC_HEADERS += emergency-restart.h errno.h -ASM_GENERIC_HEADERS += fb.h fcntl.h ftrace.h futex.h -ASM_GENERIC_HEADERS += hardirq.h hw_irq.h -ASM_GENERIC_HEADERS += ioctl.h ioctls.h ipcbuf.h irq_regs.h -ASM_GENERIC_HEADERS += kdebug.h kmap_types.h -ASM_GENERIC_HEADERS += local.h -ASM_GENERIC_HEADERS += mman.h module.h msgbuf.h -ASM_GENERIC_HEADERS += param.h parport.h percpu.h poll.h posix_types.h -ASM_GENERIC_HEADERS += resource.h -ASM_GENERIC_HEADERS += scatterlist.h sections.h segment.h sembuf.h serial.h -ASM_GENERIC_HEADERS += setup.h shmbuf.h shmparam.h -ASM_GENERIC_HEADERS += siginfo.h signal.h sizes.h -ASM_GENERIC_HEADERS += socket.h sockios.h stat.h statfs.h swab.h syscalls.h -ASM_GENERIC_HEADERS += termbits.h termios.h topology.h types.h -ASM_GENERIC_HEADERS += ucontext.h unaligned.h user.h -ASM_GENERIC_HEADERS += vga.h -ASM_GENERIC_HEADERS += xor.h - -archprepare: -ifneq ($(ASM_GENERATED_DIR), $(wildcard $(ASM_GENERATED_DIR))) - $(Q)mkdir -p $(ASM_GENERATED_DIR)/asm - $(Q)$(foreach a, $(ASM_GENERIC_HEADERS), \ - echo '#include <asm-generic/$a>' \ - > $(ASM_GENERATED_DIR)/asm/$a; ) -endif - boot := arch/unicore32/boot -# Default target when executing plain make +# Default defconfig and target when executing plain make +KBUILD_DEFCONFIG := $(ARCH)_defconfig KBUILD_IMAGE := zImage all: $(KBUILD_IMAGE) @@ -83,8 +51,6 @@ all: $(KBUILD_IMAGE) zImage Image uImage: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ -MRPROPER_DIRS += $(ASM_GENERATED_DIR) - archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile index 95373428cb3..b0954a2d23c 100644 --- a/arch/unicore32/boot/compressed/Makefile +++ b/arch/unicore32/boot/compressed/Makefile @@ -59,7 +59,7 @@ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \ # We now have a PIC decompressor implementation. Decompressors running # from RAM should not define ZTEXTADDR. Decompressors running directly # from ROM or Flash must define ZTEXTADDR (preferably via the config) -ZTEXTADDR := 0 +ZTEXTADDR := 0x03000000 ZBSSADDR := ALIGN(4) SEDFLAGS_lds = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ diff --git a/arch/unicore32/configs/debug_defconfig b/arch/unicore32/configs/unicore32_defconfig index b5fbde9f1cb..c9dd3198b6f 100644 --- a/arch/unicore32/configs/debug_defconfig +++ b/arch/unicore32/configs/unicore32_defconfig @@ -1,6 +1,6 @@ ### General setup CONFIG_EXPERIMENTAL=y -CONFIG_LOCALVERSION="-debug" +CONFIG_LOCALVERSION="-unicore32" CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -64,7 +64,6 @@ CONFIG_I2C_BATTERY_BQ27200=n CONFIG_I2C_EEPROM_AT24=n CONFIG_LCD_BACKLIGHT=n -CONFIG_PUV3_RTC=y CONFIG_PUV3_UMAL=y CONFIG_PUV3_MUSB=n CONFIG_PUV3_AC97=n @@ -167,8 +166,9 @@ CONFIG_LEDS_TRIGGER_IDE_DISK=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y # Real Time Clock -CONFIG_RTC_LIB=m -CONFIG_RTC_CLASS=m +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PUV3=y ### File systems CONFIG_EXT2_FS=m diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index b200fdaca44..ca113d6999c 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -1,2 +1,61 @@ include include/asm-generic/Kbuild.asm +generic-y += atomic.h +generic-y += auxvec.h +generic-y += bitsperlong.h +generic-y += bug.h +generic-y += bugs.h +generic-y += cputime.h +generic-y += current.h +generic-y += device.h +generic-y += div64.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += fb.h +generic-y += fcntl.h +generic-y += ftrace.h +generic-y += futex.h +generic-y += hardirq.h +generic-y += hw_irq.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += local.h +generic-y += mman.h +generic-y += module.h +generic-y += msgbuf.h +generic-y += param.h +generic-y += parport.h +generic-y += percpu.h +generic-y += poll.h +generic-y += posix_types.h +generic-y += resource.h +generic-y += scatterlist.h +generic-y += sections.h +generic-y += segment.h +generic-y += sembuf.h +generic-y += serial.h +generic-y += setup.h +generic-y += shmbuf.h +generic-y += shmparam.h +generic-y += siginfo.h +generic-y += signal.h +generic-y += sizes.h +generic-y += socket.h +generic-y += sockios.h +generic-y += stat.h +generic-y += statfs.h +generic-y += swab.h +generic-y += syscalls.h +generic-y += termbits.h +generic-y += termios.h +generic-y += topology.h +generic-y += types.h +generic-y += ucontext.h +generic-y += unaligned.h +generic-y += user.h +generic-y += vga.h +generic-y += xor.h diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile index ec23a2fb2f5..aeb0f181568 100644 --- a/arch/unicore32/kernel/Makefile +++ b/arch/unicore32/kernel/Makefile @@ -16,7 +16,6 @@ obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o obj-$(CONFIG_ARCH_PUV3) += clock.o irq.o time.o obj-$(CONFIG_PUV3_GPIO) += gpio.o -obj-$(CONFIG_PUV3_RTC) += rtc.o obj-$(CONFIG_PUV3_PWM) += pwm.o obj-$(CONFIG_PUV3_PM) += pm.o sleep.o obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o diff --git a/arch/unicore32/kernel/rtc.c b/arch/unicore32/kernel/rtc.c deleted file mode 100644 index 8cad70b3302..00000000000 --- a/arch/unicore32/kernel/rtc.c +++ /dev/null @@ -1,371 +0,0 @@ -/* - * linux/arch/unicore32/kernel/rtc.c - * - * Code specific to PKUnity SoC and UniCore ISA - * - * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> - * Copyright (C) 2001-2010 Guan Xuetao - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/fs.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/interrupt.h> -#include <linux/rtc.h> -#include <linux/bcd.h> -#include <linux/clk.h> -#include <linux/log2.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#include <linux/io.h> - -#include <asm/irq.h> -#include <mach/hardware.h> - -static struct resource *puv3_rtc_mem; - -static int puv3_rtc_alarmno = IRQ_RTCAlarm; -static int puv3_rtc_tickno = IRQ_RTC; - -static DEFINE_SPINLOCK(puv3_rtc_pie_lock); - -/* IRQ Handlers */ - -static irqreturn_t puv3_rtc_alarmirq(int irq, void *id) -{ - struct rtc_device *rdev = id; - - writel(readl(RTC_RTSR) | RTC_RTSR_AL, RTC_RTSR); - rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF); - return IRQ_HANDLED; -} - -static irqreturn_t puv3_rtc_tickirq(int irq, void *id) -{ - struct rtc_device *rdev = id; - - writel(readl(RTC_RTSR) | RTC_RTSR_HZ, RTC_RTSR); - rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF); - return IRQ_HANDLED; -} - -/* Update control registers */ -static void puv3_rtc_setaie(int to) -{ - unsigned int tmp; - - pr_debug("%s: aie=%d\n", __func__, to); - - tmp = readl(RTC_RTSR) & ~RTC_RTSR_ALE; - - if (to) - tmp |= RTC_RTSR_ALE; - - writel(tmp, RTC_RTSR); -} - -static int puv3_rtc_setpie(struct device *dev, int enabled) -{ - unsigned int tmp; - - pr_debug("%s: pie=%d\n", __func__, enabled); - - spin_lock_irq(&puv3_rtc_pie_lock); - tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE; - - if (enabled) - tmp |= RTC_RTSR_HZE; - - writel(tmp, RTC_RTSR); - spin_unlock_irq(&puv3_rtc_pie_lock); - - return 0; -} - -/* Time read/write */ - -static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) -{ - rtc_time_to_tm(readl(RTC_RCNR), rtc_tm); - - pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n", - rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, - rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); - - return 0; -} - -static int puv3_rtc_settime(struct device *dev, struct rtc_time *tm) -{ - unsigned long rtc_count = 0; - - pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n", - tm->tm_year, tm->tm_mon, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); - - rtc_tm_to_time(tm, &rtc_count); - writel(rtc_count, RTC_RCNR); - - return 0; -} - -static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) -{ - struct rtc_time *alm_tm = &alrm->time; - - rtc_time_to_tm(readl(RTC_RTAR), alm_tm); - - alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE; - - pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n", - alrm->enabled, - alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday, - alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec); - - return 0; -} - -static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) -{ - struct rtc_time *tm = &alrm->time; - unsigned long rtcalarm_count = 0; - - pr_debug("puv3_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n", - alrm->enabled, - tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff, - tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec); - - rtc_tm_to_time(tm, &rtcalarm_count); - writel(rtcalarm_count, RTC_RTAR); - - puv3_rtc_setaie(alrm->enabled); - - if (alrm->enabled) - enable_irq_wake(puv3_rtc_alarmno); - else - disable_irq_wake(puv3_rtc_alarmno); - - return 0; -} - -static int puv3_rtc_proc(struct device *dev, struct seq_file *seq) -{ - seq_printf(seq, "periodic_IRQ\t: %s\n", - (readl(RTC_RTSR) & RTC_RTSR_HZE) ? "yes" : "no"); - return 0; -} - -static int puv3_rtc_open(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct rtc_device *rtc_dev = platform_get_drvdata(pdev); - int ret; - - ret = request_irq(puv3_rtc_alarmno, puv3_rtc_alarmirq, - IRQF_DISABLED, "pkunity-rtc alarm", rtc_dev); - - if (ret) { - dev_err(dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret); - return ret; - } - - ret = request_irq(puv3_rtc_tickno, puv3_rtc_tickirq, - IRQF_DISABLED, "pkunity-rtc tick", rtc_dev); - - if (ret) { - dev_err(dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret); - goto tick_err; - } - - return ret; - - tick_err: - free_irq(puv3_rtc_alarmno, rtc_dev); - return ret; -} - -static void puv3_rtc_release(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct rtc_device *rtc_dev = platform_get_drvdata(pdev); - - /* do not clear AIE here, it may be needed for wake */ - - puv3_rtc_setpie(dev, 0); - free_irq(puv3_rtc_alarmno, rtc_dev); - free_irq(puv3_rtc_tickno, rtc_dev); -} - -static const struct rtc_class_ops puv3_rtcops = { - .open = puv3_rtc_open, - .release = puv3_rtc_release, - .read_time = puv3_rtc_gettime, - .set_time = puv3_rtc_settime, - .read_alarm = puv3_rtc_getalarm, - .set_alarm = puv3_rtc_setalarm, - .proc = puv3_rtc_proc, -}; - -static void puv3_rtc_enable(struct platform_device *pdev, int en) -{ - if (!en) { - writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR); - } else { - /* re-enable the device, and check it is ok */ - - if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) { - dev_info(&pdev->dev, "rtc disabled, re-enabling\n"); - writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR); - } - } -} - -static int puv3_rtc_remove(struct platform_device *dev) -{ - struct rtc_device *rtc = platform_get_drvdata(dev); - - platform_set_drvdata(dev, NULL); - rtc_device_unregister(rtc); - - puv3_rtc_setpie(&dev->dev, 0); - puv3_rtc_setaie(0); - - release_resource(puv3_rtc_mem); - kfree(puv3_rtc_mem); - - return 0; -} - -static int puv3_rtc_probe(struct platform_device *pdev) -{ - struct rtc_device *rtc; - struct resource *res; - int ret; - - pr_debug("%s: probe=%p\n", __func__, pdev); - - /* find the IRQs */ - - puv3_rtc_tickno = platform_get_irq(pdev, 1); - if (puv3_rtc_tickno < 0) { - dev_err(&pdev->dev, "no irq for rtc tick\n"); - return -ENOENT; - } - - puv3_rtc_alarmno = platform_get_irq(pdev, 0); - if (puv3_rtc_alarmno < 0) { - dev_err(&pdev->dev, "no irq for alarm\n"); - return -ENOENT; - } - - pr_debug("PKUnity_rtc: tick irq %d, alarm irq %d\n", - puv3_rtc_tickno, puv3_rtc_alarmno); - - /* get the memory region */ - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "failed to get memory region resource\n"); - return -ENOENT; - } - - puv3_rtc_mem = request_mem_region(res->start, - res->end-res->start+1, - pdev->name); - - if (puv3_rtc_mem == NULL) { - dev_err(&pdev->dev, "failed to reserve memory region\n"); - ret = -ENOENT; - goto err_nores; - } - - puv3_rtc_enable(pdev, 1); - - /* register RTC and exit */ - - rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops, - THIS_MODULE); - - if (IS_ERR(rtc)) { - dev_err(&pdev->dev, "cannot attach rtc\n"); - ret = PTR_ERR(rtc); - goto err_nortc; - } - - /* platform setup code should have handled this; sigh */ - if (!device_can_wakeup(&pdev->dev)) - device_init_wakeup(&pdev->dev, 1); - - platform_set_drvdata(pdev, rtc); - return 0; - - err_nortc: - puv3_rtc_enable(pdev, 0); - release_resource(puv3_rtc_mem); - - err_nores: - return ret; -} - -#ifdef CONFIG_PM - -/* RTC Power management control */ - -static int ticnt_save; - -static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state) -{ - /* save RTAR for anyone using periodic interrupts */ - ticnt_save = readl(RTC_RTAR); - puv3_rtc_enable(pdev, 0); - return 0; -} - -static int puv3_rtc_resume(struct platform_device *pdev) -{ - puv3_rtc_enable(pdev, 1); - writel(ticnt_save, RTC_RTAR); - return 0; -} -#else -#define puv3_rtc_suspend NULL -#define puv3_rtc_resume NULL -#endif - -static struct platform_driver puv3_rtcdrv = { - .probe = puv3_rtc_probe, - .remove = __devexit_p(puv3_rtc_remove), - .suspend = puv3_rtc_suspend, - .resume = puv3_rtc_resume, - .driver = { - .name = "PKUnity-v3-RTC", - .owner = THIS_MODULE, - } -}; - -static char __initdata banner[] = "PKUnity-v3 RTC, (c) 2009 PKUnity Co.\n"; - -static int __init puv3_rtc_init(void) -{ - printk(banner); - return platform_driver_register(&puv3_rtcdrv); -} - -static void __exit puv3_rtc_exit(void) -{ - platform_driver_unregister(&puv3_rtcdrv); -} - -module_init(puv3_rtc_init); -module_exit(puv3_rtc_exit); - -MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip"); -MODULE_AUTHOR("Hu Dongliang"); -MODULE_LICENSE("GPL v2"); - diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S index 9bf7f7af52c..77e407e49a6 100644 --- a/arch/unicore32/kernel/vmlinux.lds.S +++ b/arch/unicore32/kernel/vmlinux.lds.S @@ -30,7 +30,7 @@ SECTIONS HEAD_TEXT_SECTION INIT_TEXT_SECTION(PAGE_SIZE) INIT_DATA_SECTION(16) - PERCPU(L1_CACHE_BYTES, PAGE_SIZE) + PERCPU_SECTION(L1_CACHE_BYTES) __init_end = .; _stext = .; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index da349723d41..fc76e420900 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -20,6 +20,7 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE + select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS select HAVE_IRQ_WORK select HAVE_IOREMAP_PROT @@ -70,6 +71,7 @@ config X86 select IRQ_FORCED_THREADING select USE_GENERIC_SMP_HELPERS if SMP select HAVE_BPF_JIT if (X86_64 && NET) + select CLKEVT_I8253 config INSTRUCTION_DECODER def_bool (KPROBES || PERF_EVENTS) @@ -680,33 +682,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT Calgary anyway, pass 'iommu=calgary' on the kernel command line. If unsure, say Y. -config AMD_IOMMU - bool "AMD IOMMU support" - select SWIOTLB - select PCI_MSI - select PCI_IOV - depends on X86_64 && PCI && ACPI - ---help--- - With this option you can enable support for AMD IOMMU hardware in - your system. An IOMMU is a hardware component which provides - remapping of DMA memory accesses from devices. With an AMD IOMMU you - can isolate the the DMA memory of different devices and protect the - system from misbehaving device drivers or hardware. - - You can find out if your system has an AMD IOMMU if you look into - your BIOS for an option to enable it or if you have an IVRS ACPI - table. - -config AMD_IOMMU_STATS - bool "Export AMD IOMMU statistics to debugfs" - depends on AMD_IOMMU - select DEBUG_FS - ---help--- - This option enables code in the AMD IOMMU driver to collect various - statistics about whats happening in the driver and exports that - information to userspace via debugfs. - If unsure, say N. - # need this always selected by IOMMU for the VIA workaround config SWIOTLB def_bool y if X86_64 @@ -720,9 +695,6 @@ config SWIOTLB config IOMMU_HELPER def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) -config IOMMU_API - def_bool (AMD_IOMMU || DMAR) - config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL @@ -1170,7 +1142,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI" config AMD_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" - depends on NUMA && PCI + depends on X86_64 && NUMA && PCI ---help--- Enable AMD NUMA node topology detection. You should say Y here if you have a multi processor AMD system. This uses an old method to @@ -1942,55 +1914,6 @@ config PCI_CNB20LE_QUIRK You should say N unless you know you need this. -config DMAR - bool "Support for DMA Remapping Devices (EXPERIMENTAL)" - depends on PCI_MSI && ACPI && EXPERIMENTAL - help - DMA remapping (DMAR) devices support enables independent address - translations for Direct Memory Access (DMA) from devices. - These DMA remapping devices are reported via ACPI tables - and include PCI device scope covered by these DMA - remapping devices. - -config DMAR_DEFAULT_ON - def_bool y - prompt "Enable DMA Remapping Devices by default" - depends on DMAR - help - Selecting this option will enable a DMAR device at boot time if - one is found. If this option is not selected, DMAR support can - be enabled by passing intel_iommu=on to the kernel. It is - recommended you say N here while the DMAR code remains - experimental. - -config DMAR_BROKEN_GFX_WA - bool "Workaround broken graphics drivers (going away soon)" - depends on DMAR && BROKEN - ---help--- - Current Graphics drivers tend to use physical address - for DMA and avoid using DMA APIs. Setting this config - option permits the IOMMU driver to set a unity map for - all the OS-visible memory. Hence the driver can continue - to use physical addresses for DMA, at least until this - option is removed in the 2.6.32 kernel. - -config DMAR_FLOPPY_WA - def_bool y - depends on DMAR - ---help--- - Floppy disk drivers are known to bypass DMA API calls - thereby failing to work when IOMMU is enabled. This - workaround will setup a 1:1 mapping for the first - 16MiB to make floppy (an ISA device) work. - -config INTR_REMAP - bool "Support for Interrupt Remapping (EXPERIMENTAL)" - depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL - ---help--- - Supports Interrupt remapping for IO-APIC and MSI devices. - To use x2apic mode in the CPU's which support x2APIC enhancements or - to support platforms with CPU's having > 8 bit APIC ID, say Y. - source "drivers/pci/pcie/Kconfig" source "drivers/pci/Kconfig" diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index c1870dddd32..a0e866d233e 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -143,7 +143,7 @@ ENTRY(ia32_sysenter_target) CFI_REL_OFFSET rip,0 pushq_cfi %rax cld - SAVE_ARGS 0,0,1 + SAVE_ARGS 0,1,0 /* no need to do an access_ok check here because rbp has been 32bit zero extended */ 1: movl (%rbp),%ebp @@ -173,7 +173,7 @@ sysexit_from_sys_call: andl $~0x200,EFLAGS-R11(%rsp) movl RIP-R11(%rsp),%edx /* User %eip */ CFI_REGISTER rip,rdx - RESTORE_ARGS 1,24,1,1,1,1 + RESTORE_ARGS 0,24,0,0,0,0 xorq %r8,%r8 xorq %r9,%r9 xorq %r10,%r10 @@ -289,7 +289,7 @@ ENTRY(ia32_cstar_target) * disabled irqs and here we enable it straight after entry: */ ENABLE_INTERRUPTS(CLBR_NONE) - SAVE_ARGS 8,1,1 + SAVE_ARGS 8,0,0 movl %eax,%eax /* zero extension */ movq %rax,ORIG_RAX-ARGOFFSET(%rsp) movq %rcx,RIP-ARGOFFSET(%rsp) @@ -328,7 +328,7 @@ cstar_dispatch: jnz sysretl_audit sysretl_from_sys_call: andl $~TS_COMPAT,TI_status(%r10) - RESTORE_ARGS 1,-ARG_SKIP,1,1,1 + RESTORE_ARGS 0,-ARG_SKIP,0,0,0 movl RIP-ARGOFFSET(%rsp),%ecx CFI_REGISTER rip,rcx movl EFLAGS-ARGOFFSET(%rsp),%r11d @@ -419,7 +419,7 @@ ENTRY(ia32_syscall) cld /* note the registers are not zero extended to the sf. this could be a problem. */ - SAVE_ARGS 0,0,1 + SAVE_ARGS 0,1,0 GET_THREAD_INFO(%r10) orl $TS_COMPAT,TI_status(%r10) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h deleted file mode 100644 index a6863a2dec1..00000000000 --- a/arch/x86/include/asm/amd_iommu.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * Leo Duran <leo.duran@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_X86_AMD_IOMMU_H -#define _ASM_X86_AMD_IOMMU_H - -#include <linux/irqreturn.h> - -#ifdef CONFIG_AMD_IOMMU - -extern int amd_iommu_detect(void); - -#else - -static inline int amd_iommu_detect(void) { return -ENODEV; } - -#endif - -#endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h deleted file mode 100644 index 55d95eb789b..00000000000 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_X86_AMD_IOMMU_PROTO_H -#define _ASM_X86_AMD_IOMMU_PROTO_H - -#include <asm/amd_iommu_types.h> - -extern int amd_iommu_init_dma_ops(void); -extern int amd_iommu_init_passthrough(void); -extern irqreturn_t amd_iommu_int_thread(int irq, void *data); -extern irqreturn_t amd_iommu_int_handler(int irq, void *data); -extern void amd_iommu_apply_erratum_63(u16 devid); -extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); -extern int amd_iommu_init_devices(void); -extern void amd_iommu_uninit_devices(void); -extern void amd_iommu_init_notifier(void); -extern void amd_iommu_init_api(void); -#ifndef CONFIG_AMD_IOMMU_STATS - -static inline void amd_iommu_stats_init(void) { } - -#endif /* !CONFIG_AMD_IOMMU_STATS */ - -static inline bool is_rd890_iommu(struct pci_dev *pdev) -{ - return (pdev->vendor == PCI_VENDOR_ID_ATI) && - (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); -} - -static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) -{ - if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) - return false; - - return !!(iommu->features & f); -} - -#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h deleted file mode 100644 index 4c998299541..00000000000 --- a/arch/x86/include/asm/amd_iommu_types.h +++ /dev/null @@ -1,580 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * Leo Duran <leo.duran@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_X86_AMD_IOMMU_TYPES_H -#define _ASM_X86_AMD_IOMMU_TYPES_H - -#include <linux/types.h> -#include <linux/mutex.h> -#include <linux/list.h> -#include <linux/spinlock.h> - -/* - * Maximum number of IOMMUs supported - */ -#define MAX_IOMMUS 32 - -/* - * some size calculation constants - */ -#define DEV_TABLE_ENTRY_SIZE 32 -#define ALIAS_TABLE_ENTRY_SIZE 2 -#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) - -/* Length of the MMIO region for the AMD IOMMU */ -#define MMIO_REGION_LENGTH 0x4000 - -/* Capability offsets used by the driver */ -#define MMIO_CAP_HDR_OFFSET 0x00 -#define MMIO_RANGE_OFFSET 0x0c -#define MMIO_MISC_OFFSET 0x10 - -/* Masks, shifts and macros to parse the device range capability */ -#define MMIO_RANGE_LD_MASK 0xff000000 -#define MMIO_RANGE_FD_MASK 0x00ff0000 -#define MMIO_RANGE_BUS_MASK 0x0000ff00 -#define MMIO_RANGE_LD_SHIFT 24 -#define MMIO_RANGE_FD_SHIFT 16 -#define MMIO_RANGE_BUS_SHIFT 8 -#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) -#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) -#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) -#define MMIO_MSI_NUM(x) ((x) & 0x1f) - -/* Flag masks for the AMD IOMMU exclusion range */ -#define MMIO_EXCL_ENABLE_MASK 0x01ULL -#define MMIO_EXCL_ALLOW_MASK 0x02ULL - -/* Used offsets into the MMIO space */ -#define MMIO_DEV_TABLE_OFFSET 0x0000 -#define MMIO_CMD_BUF_OFFSET 0x0008 -#define MMIO_EVT_BUF_OFFSET 0x0010 -#define MMIO_CONTROL_OFFSET 0x0018 -#define MMIO_EXCL_BASE_OFFSET 0x0020 -#define MMIO_EXCL_LIMIT_OFFSET 0x0028 -#define MMIO_EXT_FEATURES 0x0030 -#define MMIO_CMD_HEAD_OFFSET 0x2000 -#define MMIO_CMD_TAIL_OFFSET 0x2008 -#define MMIO_EVT_HEAD_OFFSET 0x2010 -#define MMIO_EVT_TAIL_OFFSET 0x2018 -#define MMIO_STATUS_OFFSET 0x2020 - - -/* Extended Feature Bits */ -#define FEATURE_PREFETCH (1ULL<<0) -#define FEATURE_PPR (1ULL<<1) -#define FEATURE_X2APIC (1ULL<<2) -#define FEATURE_NX (1ULL<<3) -#define FEATURE_GT (1ULL<<4) -#define FEATURE_IA (1ULL<<6) -#define FEATURE_GA (1ULL<<7) -#define FEATURE_HE (1ULL<<8) -#define FEATURE_PC (1ULL<<9) - -/* MMIO status bits */ -#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 - -/* event logging constants */ -#define EVENT_ENTRY_SIZE 0x10 -#define EVENT_TYPE_SHIFT 28 -#define EVENT_TYPE_MASK 0xf -#define EVENT_TYPE_ILL_DEV 0x1 -#define EVENT_TYPE_IO_FAULT 0x2 -#define EVENT_TYPE_DEV_TAB_ERR 0x3 -#define EVENT_TYPE_PAGE_TAB_ERR 0x4 -#define EVENT_TYPE_ILL_CMD 0x5 -#define EVENT_TYPE_CMD_HARD_ERR 0x6 -#define EVENT_TYPE_IOTLB_INV_TO 0x7 -#define EVENT_TYPE_INV_DEV_REQ 0x8 -#define EVENT_DEVID_MASK 0xffff -#define EVENT_DEVID_SHIFT 0 -#define EVENT_DOMID_MASK 0xffff -#define EVENT_DOMID_SHIFT 0 -#define EVENT_FLAGS_MASK 0xfff -#define EVENT_FLAGS_SHIFT 0x10 - -/* feature control bits */ -#define CONTROL_IOMMU_EN 0x00ULL -#define CONTROL_HT_TUN_EN 0x01ULL -#define CONTROL_EVT_LOG_EN 0x02ULL -#define CONTROL_EVT_INT_EN 0x03ULL -#define CONTROL_COMWAIT_EN 0x04ULL -#define CONTROL_PASSPW_EN 0x08ULL -#define CONTROL_RESPASSPW_EN 0x09ULL -#define CONTROL_COHERENT_EN 0x0aULL -#define CONTROL_ISOC_EN 0x0bULL -#define CONTROL_CMDBUF_EN 0x0cULL -#define CONTROL_PPFLOG_EN 0x0dULL -#define CONTROL_PPFINT_EN 0x0eULL - -/* command specific defines */ -#define CMD_COMPL_WAIT 0x01 -#define CMD_INV_DEV_ENTRY 0x02 -#define CMD_INV_IOMMU_PAGES 0x03 -#define CMD_INV_IOTLB_PAGES 0x04 -#define CMD_INV_ALL 0x08 - -#define CMD_COMPL_WAIT_STORE_MASK 0x01 -#define CMD_COMPL_WAIT_INT_MASK 0x02 -#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 -#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 - -#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL - -/* macros and definitions for device table entries */ -#define DEV_ENTRY_VALID 0x00 -#define DEV_ENTRY_TRANSLATION 0x01 -#define DEV_ENTRY_IR 0x3d -#define DEV_ENTRY_IW 0x3e -#define DEV_ENTRY_NO_PAGE_FAULT 0x62 -#define DEV_ENTRY_EX 0x67 -#define DEV_ENTRY_SYSMGT1 0x68 -#define DEV_ENTRY_SYSMGT2 0x69 -#define DEV_ENTRY_INIT_PASS 0xb8 -#define DEV_ENTRY_EINT_PASS 0xb9 -#define DEV_ENTRY_NMI_PASS 0xba -#define DEV_ENTRY_LINT0_PASS 0xbe -#define DEV_ENTRY_LINT1_PASS 0xbf -#define DEV_ENTRY_MODE_MASK 0x07 -#define DEV_ENTRY_MODE_SHIFT 0x09 - -/* constants to configure the command buffer */ -#define CMD_BUFFER_SIZE 8192 -#define CMD_BUFFER_UNINITIALIZED 1 -#define CMD_BUFFER_ENTRIES 512 -#define MMIO_CMD_SIZE_SHIFT 56 -#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) - -/* constants for event buffer handling */ -#define EVT_BUFFER_SIZE 8192 /* 512 entries */ -#define EVT_LEN_MASK (0x9ULL << 56) - -#define PAGE_MODE_NONE 0x00 -#define PAGE_MODE_1_LEVEL 0x01 -#define PAGE_MODE_2_LEVEL 0x02 -#define PAGE_MODE_3_LEVEL 0x03 -#define PAGE_MODE_4_LEVEL 0x04 -#define PAGE_MODE_5_LEVEL 0x05 -#define PAGE_MODE_6_LEVEL 0x06 - -#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) -#define PM_LEVEL_SIZE(x) (((x) < 6) ? \ - ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ - (0xffffffffffffffffULL)) -#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) -#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) -#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ - IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) -#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) - -#define PM_MAP_4k 0 -#define PM_ADDR_MASK 0x000ffffffffff000ULL -#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ - (~((1ULL << (12 + ((lvl) * 9))) - 1))) -#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) - -/* - * Returns the page table level to use for a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_LEVEL(pagesize) \ - ((__ffs(pagesize) - 12) / 9) -/* - * Returns the number of ptes to use for a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_PTE_COUNT(pagesize) \ - (1ULL << ((__ffs(pagesize) - 12) % 9)) - -/* - * Aligns a given io-virtual address to a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_ALIGN(address, pagesize) \ - ((address) & ~((pagesize) - 1)) -/* - * Creates an IOMMU PTE for an address an a given pagesize - * The PTE has no permission bits set - * Pagesize is expected to be a power-of-two larger than 4096 - */ -#define PAGE_SIZE_PTE(address, pagesize) \ - (((address) | ((pagesize) - 1)) & \ - (~(pagesize >> 1)) & PM_ADDR_MASK) - -/* - * Takes a PTE value with mode=0x07 and returns the page size it maps - */ -#define PTE_PAGE_SIZE(pte) \ - (1ULL << (1 + ffz(((pte) | 0xfffULL)))) - -#define IOMMU_PTE_P (1ULL << 0) -#define IOMMU_PTE_TV (1ULL << 1) -#define IOMMU_PTE_U (1ULL << 59) -#define IOMMU_PTE_FC (1ULL << 60) -#define IOMMU_PTE_IR (1ULL << 61) -#define IOMMU_PTE_IW (1ULL << 62) - -#define DTE_FLAG_IOTLB 0x01 - -#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) -#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) -#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) -#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) - -#define IOMMU_PROT_MASK 0x03 -#define IOMMU_PROT_IR 0x01 -#define IOMMU_PROT_IW 0x02 - -/* IOMMU capabilities */ -#define IOMMU_CAP_IOTLB 24 -#define IOMMU_CAP_NPCACHE 26 -#define IOMMU_CAP_EFR 27 - -#define MAX_DOMAIN_ID 65536 - -/* FIXME: move this macro to <linux/pci.h> */ -#define PCI_BUS(x) (((x) >> 8) & 0xff) - -/* Protection domain flags */ -#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ -#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops - domain for an IOMMU */ -#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page - translation */ - -extern bool amd_iommu_dump; -#define DUMP_printk(format, arg...) \ - do { \ - if (amd_iommu_dump) \ - printk(KERN_INFO "AMD-Vi: " format, ## arg); \ - } while(0); - -/* global flag if IOMMUs cache non-present entries */ -extern bool amd_iommu_np_cache; -/* Only true if all IOMMUs support device IOTLBs */ -extern bool amd_iommu_iotlb_sup; - -/* - * Make iterating over all IOMMUs easier - */ -#define for_each_iommu(iommu) \ - list_for_each_entry((iommu), &amd_iommu_list, list) -#define for_each_iommu_safe(iommu, next) \ - list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) - -#define APERTURE_RANGE_SHIFT 27 /* 128 MB */ -#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) -#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) -#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ -#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) -#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) - -/* - * This structure contains generic data for IOMMU protection domains - * independent of their use. - */ -struct protection_domain { - struct list_head list; /* for list of all protection domains */ - struct list_head dev_list; /* List of all devices in this domain */ - spinlock_t lock; /* mostly used to lock the page table*/ - struct mutex api_lock; /* protect page tables in the iommu-api path */ - u16 id; /* the domain id written to the device table */ - int mode; /* paging mode (0-6 levels) */ - u64 *pt_root; /* page table root pointer */ - unsigned long flags; /* flags to find out type of domain */ - bool updated; /* complete domain flush required */ - unsigned dev_cnt; /* devices assigned to this domain */ - unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ - void *priv; /* private data */ - -}; - -/* - * This struct contains device specific data for the IOMMU - */ -struct iommu_dev_data { - struct list_head list; /* For domain->dev_list */ - struct device *dev; /* Device this data belong to */ - struct device *alias; /* The Alias Device */ - struct protection_domain *domain; /* Domain the device is bound to */ - atomic_t bind; /* Domain attach reverent count */ -}; - -/* - * For dynamic growth the aperture size is split into ranges of 128MB of - * DMA address space each. This struct represents one such range. - */ -struct aperture_range { - - /* address allocation bitmap */ - unsigned long *bitmap; - - /* - * Array of PTE pages for the aperture. In this array we save all the - * leaf pages of the domain page table used for the aperture. This way - * we don't need to walk the page table to find a specific PTE. We can - * just calculate its address in constant time. - */ - u64 *pte_pages[64]; - - unsigned long offset; -}; - -/* - * Data container for a dma_ops specific protection domain - */ -struct dma_ops_domain { - struct list_head list; - - /* generic protection domain information */ - struct protection_domain domain; - - /* size of the aperture for the mappings */ - unsigned long aperture_size; - - /* address we start to search for free addresses */ - unsigned long next_address; - - /* address space relevant data */ - struct aperture_range *aperture[APERTURE_MAX_RANGES]; - - /* This will be set to true when TLB needs to be flushed */ - bool need_flush; - - /* - * if this is a preallocated domain, keep the device for which it was - * preallocated in this variable - */ - u16 target_dev; -}; - -/* - * Structure where we save information about one hardware AMD IOMMU in the - * system. - */ -struct amd_iommu { - struct list_head list; - - /* Index within the IOMMU array */ - int index; - - /* locks the accesses to the hardware */ - spinlock_t lock; - - /* Pointer to PCI device of this IOMMU */ - struct pci_dev *dev; - - /* physical address of MMIO space */ - u64 mmio_phys; - /* virtual address of MMIO space */ - u8 *mmio_base; - - /* capabilities of that IOMMU read from ACPI */ - u32 cap; - - /* flags read from acpi table */ - u8 acpi_flags; - - /* Extended features */ - u64 features; - - /* - * Capability pointer. There could be more than one IOMMU per PCI - * device function if there are more than one AMD IOMMU capability - * pointers. - */ - u16 cap_ptr; - - /* pci domain of this IOMMU */ - u16 pci_seg; - - /* first device this IOMMU handles. read from PCI */ - u16 first_device; - /* last device this IOMMU handles. read from PCI */ - u16 last_device; - - /* start of exclusion range of that IOMMU */ - u64 exclusion_start; - /* length of exclusion range of that IOMMU */ - u64 exclusion_length; - - /* command buffer virtual address */ - u8 *cmd_buf; - /* size of command buffer */ - u32 cmd_buf_size; - - /* size of event buffer */ - u32 evt_buf_size; - /* event buffer virtual address */ - u8 *evt_buf; - /* MSI number for event interrupt */ - u16 evt_msi_num; - - /* true if interrupts for this IOMMU are already enabled */ - bool int_enabled; - - /* if one, we need to send a completion wait command */ - bool need_sync; - - /* default dma_ops domain for that IOMMU */ - struct dma_ops_domain *default_dom; - - /* - * We can't rely on the BIOS to restore all values on reinit, so we - * need to stash them - */ - - /* The iommu BAR */ - u32 stored_addr_lo; - u32 stored_addr_hi; - - /* - * Each iommu has 6 l1s, each of which is documented as having 0x12 - * registers - */ - u32 stored_l1[6][0x12]; - - /* The l2 indirect registers */ - u32 stored_l2[0x83]; -}; - -/* - * List with all IOMMUs in the system. This list is not locked because it is - * only written and read at driver initialization or suspend time - */ -extern struct list_head amd_iommu_list; - -/* - * Array with pointers to each IOMMU struct - * The indices are referenced in the protection domains - */ -extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; - -/* Number of IOMMUs present in the system */ -extern int amd_iommus_present; - -/* - * Declarations for the global list of all protection domains - */ -extern spinlock_t amd_iommu_pd_lock; -extern struct list_head amd_iommu_pd_list; - -/* - * Structure defining one entry in the device table - */ -struct dev_table_entry { - u32 data[8]; -}; - -/* - * One entry for unity mappings parsed out of the ACPI table. - */ -struct unity_map_entry { - struct list_head list; - - /* starting device id this entry is used for (including) */ - u16 devid_start; - /* end device id this entry is used for (including) */ - u16 devid_end; - - /* start address to unity map (including) */ - u64 address_start; - /* end address to unity map (including) */ - u64 address_end; - - /* required protection */ - int prot; -}; - -/* - * List of all unity mappings. It is not locked because as runtime it is only - * read. It is created at ACPI table parsing time. - */ -extern struct list_head amd_iommu_unity_map; - -/* - * Data structures for device handling - */ - -/* - * Device table used by hardware. Read and write accesses by software are - * locked with the amd_iommu_pd_table lock. - */ -extern struct dev_table_entry *amd_iommu_dev_table; - -/* - * Alias table to find requestor ids to device ids. Not locked because only - * read on runtime. - */ -extern u16 *amd_iommu_alias_table; - -/* - * Reverse lookup table to find the IOMMU which translates a specific device. - */ -extern struct amd_iommu **amd_iommu_rlookup_table; - -/* size of the dma_ops aperture as power of 2 */ -extern unsigned amd_iommu_aperture_order; - -/* largest PCI device id we expect translation requests for */ -extern u16 amd_iommu_last_bdf; - -/* allocation bitmap for domain ids */ -extern unsigned long *amd_iommu_pd_alloc_bitmap; - -/* - * If true, the addresses will be flushed on unmap time, not when - * they are reused - */ -extern bool amd_iommu_unmap_flush; - -/* takes bus and device/function and returns the device id - * FIXME: should that be in generic PCI code? */ -static inline u16 calc_devid(u8 bus, u8 devfn) -{ - return (((u16)bus) << 8) | devfn; -} - -#ifdef CONFIG_AMD_IOMMU_STATS - -struct __iommu_counter { - char *name; - struct dentry *dent; - u64 value; -}; - -#define DECLARE_STATS_COUNTER(nm) \ - static struct __iommu_counter nm = { \ - .name = #nm, \ - } - -#define INC_STATS_COUNTER(name) name.value += 1 -#define ADD_STATS_COUNTER(name, x) name.value += (x) -#define SUB_STATS_COUNTER(name, x) name.value -= (x) - -#else /* CONFIG_AMD_IOMMU_STATS */ - -#define DECLARE_STATS_COUNTER(name) -#define INC_STATS_COUNTER(name) -#define ADD_STATS_COUNTER(name, x) -#define SUB_STATS_COUNTER(name, x) - -#endif /* CONFIG_AMD_IOMMU_STATS */ - -#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ diff --git a/arch/x86/include/asm/apb_timer.h b/arch/x86/include/asm/apb_timer.h index 2fefa501d3b..082cf818493 100644 --- a/arch/x86/include/asm/apb_timer.h +++ b/arch/x86/include/asm/apb_timer.h @@ -50,7 +50,6 @@ #define APBT_DEV_USED 1 extern void apbt_time_init(void); -extern struct clock_event_device *global_clock_event; extern unsigned long apbt_quick_calibrate(void); extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); extern void apbt_setup_secondary_clock(void); @@ -62,7 +61,7 @@ extern int sfi_mtimer_num; #else /* CONFIG_APB_TIMER */ static inline unsigned long apbt_quick_calibrate(void) {return 0; } -static inline void apbt_time_init(void) {return 0; } +static inline void apbt_time_init(void) { } #endif #endif /* ASM_X86_APBT_H */ diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index b3ed1e1460f..9412d6558c8 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -3,9 +3,11 @@ #ifdef __ASSEMBLY__ # define __ASM_FORM(x) x +# define __ASM_FORM_COMMA(x) x, # define __ASM_EX_SEC .section __ex_table, "a" #else # define __ASM_FORM(x) " " #x " " +# define __ASM_FORM_COMMA(x) " " #x "," # define __ASM_EX_SEC " .section __ex_table,\"a\"\n" #endif @@ -15,7 +17,8 @@ # define __ASM_SEL(a,b) __ASM_FORM(b) #endif -#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) +#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \ + inst##q##__VA_ARGS__) #define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) #define _ASM_PTR __ASM_SEL(.long, .quad) diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h index 30af5a83216..a9e3a740f69 100644 --- a/arch/x86/include/asm/calling.h +++ b/arch/x86/include/asm/calling.h @@ -46,6 +46,7 @@ For 32-bit we have the following conventions - kernel is built with */ +#include "dwarf2.h" /* * 64-bit system call stack frame layout defines and helpers, for @@ -84,72 +85,57 @@ For 32-bit we have the following conventions - kernel is built with #define ARGOFFSET R11 #define SWFRAME ORIG_RAX - .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0 + .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1 subq $9*8+\addskip, %rsp CFI_ADJUST_CFA_OFFSET 9*8+\addskip - movq %rdi, 8*8(%rsp) - CFI_REL_OFFSET rdi, 8*8 - movq %rsi, 7*8(%rsp) - CFI_REL_OFFSET rsi, 7*8 - movq %rdx, 6*8(%rsp) - CFI_REL_OFFSET rdx, 6*8 - .if \norcx - .else - movq %rcx, 5*8(%rsp) - CFI_REL_OFFSET rcx, 5*8 + movq_cfi rdi, 8*8 + movq_cfi rsi, 7*8 + movq_cfi rdx, 6*8 + + .if \save_rcx + movq_cfi rcx, 5*8 .endif - movq %rax, 4*8(%rsp) - CFI_REL_OFFSET rax, 4*8 - .if \nor891011 - .else - movq %r8, 3*8(%rsp) - CFI_REL_OFFSET r8, 3*8 - movq %r9, 2*8(%rsp) - CFI_REL_OFFSET r9, 2*8 - movq %r10, 1*8(%rsp) - CFI_REL_OFFSET r10, 1*8 - movq %r11, (%rsp) - CFI_REL_OFFSET r11, 0*8 + + movq_cfi rax, 4*8 + + .if \save_r891011 + movq_cfi r8, 3*8 + movq_cfi r9, 2*8 + movq_cfi r10, 1*8 + movq_cfi r11, 0*8 .endif + .endm #define ARG_SKIP (9*8) - .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ - skipr8910=0, skiprdx=0 - .if \skipr11 - .else - movq (%rsp), %r11 - CFI_RESTORE r11 + .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \ + rstor_r8910=1, rstor_rdx=1 + .if \rstor_r11 + movq_cfi_restore 0*8, r11 .endif - .if \skipr8910 - .else - movq 1*8(%rsp), %r10 - CFI_RESTORE r10 - movq 2*8(%rsp), %r9 - CFI_RESTORE r9 - movq 3*8(%rsp), %r8 - CFI_RESTORE r8 + + .if \rstor_r8910 + movq_cfi_restore 1*8, r10 + movq_cfi_restore 2*8, r9 + movq_cfi_restore 3*8, r8 .endif - .if \skiprax - .else - movq 4*8(%rsp), %rax - CFI_RESTORE rax + + .if \rstor_rax + movq_cfi_restore 4*8, rax .endif - .if \skiprcx - .else - movq 5*8(%rsp), %rcx - CFI_RESTORE rcx + + .if \rstor_rcx + movq_cfi_restore 5*8, rcx .endif - .if \skiprdx - .else - movq 6*8(%rsp), %rdx - CFI_RESTORE rdx + + .if \rstor_rdx + movq_cfi_restore 6*8, rdx .endif - movq 7*8(%rsp), %rsi - CFI_RESTORE rsi - movq 8*8(%rsp), %rdi - CFI_RESTORE rdi + + movq_cfi_restore 7*8, rsi + movq_cfi_restore 8*8, rdi + .if ARG_SKIP+\addskip > 0 addq $ARG_SKIP+\addskip, %rsp CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) @@ -176,33 +162,21 @@ For 32-bit we have the following conventions - kernel is built with .macro SAVE_REST subq $REST_SKIP, %rsp CFI_ADJUST_CFA_OFFSET REST_SKIP - movq %rbx, 5*8(%rsp) - CFI_REL_OFFSET rbx, 5*8 - movq %rbp, 4*8(%rsp) - CFI_REL_OFFSET rbp, 4*8 - movq %r12, 3*8(%rsp) - CFI_REL_OFFSET r12, 3*8 - movq %r13, 2*8(%rsp) - CFI_REL_OFFSET r13, 2*8 - movq %r14, 1*8(%rsp) - CFI_REL_OFFSET r14, 1*8 - movq %r15, (%rsp) - CFI_REL_OFFSET r15, 0*8 + movq_cfi rbx, 5*8 + movq_cfi rbp, 4*8 + movq_cfi r12, 3*8 + movq_cfi r13, 2*8 + movq_cfi r14, 1*8 + movq_cfi r15, 0*8 .endm .macro RESTORE_REST - movq (%rsp), %r15 - CFI_RESTORE r15 - movq 1*8(%rsp), %r14 - CFI_RESTORE r14 - movq 2*8(%rsp), %r13 - CFI_RESTORE r13 - movq 3*8(%rsp), %r12 - CFI_RESTORE r12 - movq 4*8(%rsp), %rbp - CFI_RESTORE rbp - movq 5*8(%rsp), %rbx - CFI_RESTORE rbx + movq_cfi_restore 0*8, r15 + movq_cfi_restore 1*8, r14 + movq_cfi_restore 2*8, r13 + movq_cfi_restore 3*8, r12 + movq_cfi_restore 4*8, rbp + movq_cfi_restore 5*8, rbx addq $REST_SKIP, %rsp CFI_ADJUST_CFA_OFFSET -(REST_SKIP) .endm @@ -214,7 +188,7 @@ For 32-bit we have the following conventions - kernel is built with .macro RESTORE_ALL addskip=0 RESTORE_REST - RESTORE_ARGS 0, \addskip + RESTORE_ARGS 1, \addskip .endm .macro icebp diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h index 2c6fc9e6281..3b629f47eb6 100644 --- a/arch/x86/include/asm/frame.h +++ b/arch/x86/include/asm/frame.h @@ -1,5 +1,6 @@ #ifdef __ASSEMBLY__ +#include <asm/asm.h> #include <asm/dwarf2.h> /* The annotation hides the frame from the unwinder and makes it look @@ -7,13 +8,13 @@ frame pointer later */ #ifdef CONFIG_FRAME_POINTER .macro FRAME - pushl_cfi %ebp - CFI_REL_OFFSET ebp,0 - movl %esp,%ebp + __ASM_SIZE(push,_cfi) %__ASM_REG(bp) + CFI_REL_OFFSET __ASM_REG(bp), 0 + __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) .endm .macro ENDFRAME - popl_cfi %ebp - CFI_RESTORE ebp + __ASM_SIZE(pop,_cfi) %__ASM_REG(bp) + CFI_RESTORE __ASM_REG(bp) .endm #else .macro FRAME diff --git a/arch/x86/include/asm/i8253.h b/arch/x86/include/asm/i8253.h deleted file mode 100644 index 65aaa91d585..00000000000 --- a/arch/x86/include/asm/i8253.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _ASM_X86_I8253_H -#define _ASM_X86_I8253_H - -/* i8253A PIT registers */ -#define PIT_MODE 0x43 -#define PIT_CH0 0x40 -#define PIT_CH2 0x42 - -#define PIT_LATCH LATCH - -extern raw_spinlock_t i8253_lock; - -extern struct clock_event_device *global_clock_event; - -extern void setup_pit_timer(void); - -#define inb_pit inb_p -#define outb_pit outb_p - -#endif /* _ASM_X86_I8253_H */ diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 5745ce8bf10..bba3cf88e62 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -60,23 +60,24 @@ static inline void native_halt(void) #include <asm/paravirt.h> #else #ifndef __ASSEMBLY__ +#include <linux/types.h> -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long arch_local_save_flags(void) { return native_save_fl(); } -static inline void arch_local_irq_restore(unsigned long flags) +static inline notrace void arch_local_irq_restore(unsigned long flags) { native_restore_fl(flags); } -static inline void arch_local_irq_disable(void) +static inline notrace void arch_local_irq_disable(void) { native_irq_disable(); } -static inline void arch_local_irq_enable(void) +static inline notrace void arch_local_irq_enable(void) { native_irq_enable(); } @@ -102,7 +103,7 @@ static inline void halt(void) /* * For spinlocks, etc: */ -static inline unsigned long arch_local_irq_save(void) +static inline notrace unsigned long arch_local_irq_save(void) { unsigned long flags = arch_local_save_flags(); arch_local_irq_disable(); diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index b60f2924c41..879fd7d3387 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h @@ -61,6 +61,7 @@ hcall(unsigned long call, : "memory"); return call; } +/*:*/ /* Can't use our min() macro here: needs to be a constant */ #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 19ae14ba697..0cd3800f33b 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -4,7 +4,6 @@ #define ARCH_DISCARD_MEMBLOCK u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); -void memblock_x86_to_bootmem(u64 start, u64 end); void memblock_x86_reserve_range(u64 start, u64 end, char *name); void memblock_x86_free_range(u64 start, u64 end); @@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); u64 memblock_x86_memory_in_range(u64 addr, u64 limit); +bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align); #endif diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 5e83a416eca..ffa037f28d3 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h @@ -48,17 +48,6 @@ static inline int pfn_to_nid(unsigned long pfn) #endif } -/* - * Following are macros that each numa implmentation must define. - */ - -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) \ -({ \ - pg_data_t *__pgdat = NODE_DATA(nid); \ - __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ -}) - static inline int pfn_valid(int pfn) { int nid = pfn_to_nid(pfn); @@ -68,6 +57,8 @@ static inline int pfn_valid(int pfn) return 0; } +#define early_pfn_valid(pfn) pfn_valid((pfn)) + #endif /* CONFIG_DISCONTIGMEM */ #ifdef CONFIG_NEED_MULTIPLE_NODES diff --git a/arch/x86/include/asm/mmzone_64.h b/arch/x86/include/asm/mmzone_64.h index b3f88d7867c..129d9aa3ceb 100644 --- a/arch/x86/include/asm/mmzone_64.h +++ b/arch/x86/include/asm/mmzone_64.h @@ -13,8 +13,5 @@ extern struct pglist_data *node_data[]; #define NODE_DATA(nid) (node_data[nid]) -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \ - NODE_DATA(nid)->node_spanned_pages) #endif #endif /* _ASM_X86_MMZONE_64_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 485b4f1f079..d96bdb25ca3 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -259,6 +259,9 @@ #define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 +#define ENERGY_PERF_BIAS_PERFORMANCE 0 +#define ENERGY_PERF_BIAS_NORMAL 6 +#define ENERGY_PERF_BIAS_POWERSAVE 15 #define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1 diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index a0a9779084d..3470c9d0ebb 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -388,12 +388,9 @@ do { \ #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) -/* - * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much - * faster than an xchg with forced lock semantics. - */ -#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) -#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) +#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val) +#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) +#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) @@ -485,6 +482,8 @@ do { \ #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) +#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) +#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index d9d4dae305f..094fb30817a 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -152,6 +152,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); (regs)->bp = caller_frame_pointer(); \ (regs)->cs = __KERNEL_CS; \ regs->flags = 0; \ + asm volatile( \ + _ASM_MOV "%%"_ASM_SP ", %0\n" \ + : "=m" ((regs)->sp) \ + :: "memory" \ + ); \ } #else diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index 56fd9e3abbd..4f7e67e2345 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h @@ -102,6 +102,14 @@ #define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT) /* + * If an event has alias it should be marked + * with a special bit. (Don't forget to check + * P4_PEBS_CONFIG_MASK and related bits on + * modification.) + */ +#define P4_CONFIG_ALIASABLE (1 << 9) + +/* * The bits we allow to pass for RAW events */ #define P4_CONFIG_MASK_ESCR \ @@ -123,6 +131,31 @@ (p4_config_pack_escr(P4_CONFIG_MASK_ESCR)) | \ (p4_config_pack_cccr(P4_CONFIG_MASK_CCCR)) +/* + * In case of event aliasing we need to preserve some + * caller bits, otherwise the mapping won't be complete. + */ +#define P4_CONFIG_EVENT_ALIAS_MASK \ + (p4_config_pack_escr(P4_CONFIG_MASK_ESCR) | \ + p4_config_pack_cccr(P4_CCCR_EDGE | \ + P4_CCCR_THRESHOLD_MASK | \ + P4_CCCR_COMPLEMENT | \ + P4_CCCR_COMPARE)) + +#define P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS \ + ((P4_CONFIG_HT) | \ + p4_config_pack_escr(P4_ESCR_T0_OS | \ + P4_ESCR_T0_USR | \ + P4_ESCR_T1_OS | \ + P4_ESCR_T1_USR) | \ + p4_config_pack_cccr(P4_CCCR_OVF | \ + P4_CCCR_CASCADE | \ + P4_CCCR_FORCE_OVF | \ + P4_CCCR_THREAD_ANY | \ + P4_CCCR_OVF_PMI_T0 | \ + P4_CCCR_OVF_PMI_T1 | \ + P4_CONFIG_ALIASABLE)) + static inline bool p4_is_event_cascaded(u64 config) { u32 cccr = p4_config_unpack_cccr(config); diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h index 971e0b46446..df1287019e6 100644 --- a/arch/x86/include/asm/prom.h +++ b/arch/x86/include/asm/prom.h @@ -30,17 +30,6 @@ extern void add_dtb(u64 data); extern void x86_add_irq_domains(void); void __cpuinit x86_of_pci_init(void); void x86_dtb_init(void); - -static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - return pdev ? pdev->dev.of_node : NULL; -} - -static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) -{ - return pci_device_to_OF_node(bus->self); -} - #else static inline void add_dtb(u64 data) { } static inline void x86_add_irq_domains(void) { } diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 31d84acc151..a518c0a4504 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) u64 product; #ifdef __i386__ u32 tmp1, tmp2; +#else + ulong tmp; #endif if (shift < 0) @@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); #elif defined(__x86_64__) __asm__ ( - "mul %%rdx ; shrd $32,%%rdx,%%rax" - : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); + "mul %[mul_frac] ; shrd $32, %[hi], %[lo]" + : [lo]"=a"(product), + [hi]"=d"(tmp) + : "0"(delta), + [mul_frac]"rm"((u64)mul_frac)); #else #error implement me! #endif diff --git a/arch/x86/include/asm/rwlock.h b/arch/x86/include/asm/rwlock.h index 6a8c0d64510..a5370a03d90 100644 --- a/arch/x86/include/asm/rwlock.h +++ b/arch/x86/include/asm/rwlock.h @@ -1,7 +1,48 @@ #ifndef _ASM_X86_RWLOCK_H #define _ASM_X86_RWLOCK_H -#define RW_LOCK_BIAS 0x01000000 +#include <asm/asm.h> + +#if CONFIG_NR_CPUS <= 2048 + +#ifndef __ASSEMBLY__ +typedef union { + s32 lock; + s32 write; +} arch_rwlock_t; +#endif + +#define RW_LOCK_BIAS 0x00100000 +#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##l) +#define READ_LOCK_ATOMIC(n) atomic_##n +#define WRITE_LOCK_ADD(n) __ASM_FORM_COMMA(addl n) +#define WRITE_LOCK_SUB(n) __ASM_FORM_COMMA(subl n) +#define WRITE_LOCK_CMP RW_LOCK_BIAS + +#else /* CONFIG_NR_CPUS > 2048 */ + +#include <linux/const.h> + +#ifndef __ASSEMBLY__ +typedef union { + s64 lock; + struct { + u32 read; + s32 write; + }; +} arch_rwlock_t; +#endif + +#define RW_LOCK_BIAS (_AC(1,L) << 32) +#define READ_LOCK_SIZE(insn) __ASM_FORM(insn##q) +#define READ_LOCK_ATOMIC(n) atomic64_##n +#define WRITE_LOCK_ADD(n) __ASM_FORM(incl) +#define WRITE_LOCK_SUB(n) __ASM_FORM(decl) +#define WRITE_LOCK_CMP 1 + +#endif /* CONFIG_NR_CPUS */ + +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } /* Actual code is in asm/spinlock.h or in arch/x86/lib/rwlock.S */ diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index cd84f7208f7..5e641715c3f 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -162,7 +162,7 @@ #define GDT_ENTRY_DEFAULT_USER32_CS 4 #define GDT_ENTRY_DEFAULT_USER_DS 5 #define GDT_ENTRY_DEFAULT_USER_CS 6 -#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3) +#define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3) #define __USER32_DS __USER_DS #define GDT_ENTRY_TSS 8 /* needs two entries */ diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h index 725b7783199..49adfd7bb4a 100644 --- a/arch/x86/include/asm/smpboot_hooks.h +++ b/arch/x86/include/asm/smpboot_hooks.h @@ -10,7 +10,11 @@ static inline void smpboot_clear_io_apic_irqs(void) static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) { + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); CMOS_WRITE(0xa, 0xf); + spin_unlock_irqrestore(&rtc_lock, flags); local_flush_tlb(); pr_debug("1.\n"); *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) = @@ -23,6 +27,8 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) static inline void smpboot_restore_warm_reset_vector(void) { + unsigned long flags; + /* * Install writable page 0 entry to set BIOS data area. */ @@ -32,7 +38,9 @@ static inline void smpboot_restore_warm_reset_vector(void) * Paranoid: Set warm reset code and vector here back * to default values. */ + spin_lock_irqsave(&rtc_lock, flags); CMOS_WRITE(0, 0xf); + spin_unlock_irqrestore(&rtc_lock, flags); *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0; } diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 3089f70c0c5..e9e51f710e6 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -2,7 +2,6 @@ #define _ASM_X86_SPINLOCK_H #include <asm/atomic.h> -#include <asm/rwlock.h> #include <asm/page.h> #include <asm/processor.h> #include <linux/compiler.h> @@ -234,7 +233,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) */ static inline int arch_read_can_lock(arch_rwlock_t *lock) { - return (int)(lock)->lock > 0; + return lock->lock > 0; } /** @@ -243,12 +242,12 @@ static inline int arch_read_can_lock(arch_rwlock_t *lock) */ static inline int arch_write_can_lock(arch_rwlock_t *lock) { - return (lock)->lock == RW_LOCK_BIAS; + return lock->write == WRITE_LOCK_CMP; } static inline void arch_read_lock(arch_rwlock_t *rw) { - asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t" "jns 1f\n" "call __read_lock_failed\n\t" "1:\n" @@ -257,47 +256,55 @@ static inline void arch_read_lock(arch_rwlock_t *rw) static inline void arch_write_lock(arch_rwlock_t *rw) { - asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" + asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t" "jz 1f\n" "call __write_lock_failed\n\t" "1:\n" - ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); + ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS) + : "memory"); } static inline int arch_read_trylock(arch_rwlock_t *lock) { - atomic_t *count = (atomic_t *)lock; + READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock; - if (atomic_dec_return(count) >= 0) + if (READ_LOCK_ATOMIC(dec_return)(count) >= 0) return 1; - atomic_inc(count); + READ_LOCK_ATOMIC(inc)(count); return 0; } static inline int arch_write_trylock(arch_rwlock_t *lock) { - atomic_t *count = (atomic_t *)lock; + atomic_t *count = (atomic_t *)&lock->write; - if (atomic_sub_and_test(RW_LOCK_BIAS, count)) + if (atomic_sub_and_test(WRITE_LOCK_CMP, count)) return 1; - atomic_add(RW_LOCK_BIAS, count); + atomic_add(WRITE_LOCK_CMP, count); return 0; } static inline void arch_read_unlock(arch_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0" + :"+m" (rw->lock) : : "memory"); } static inline void arch_write_unlock(arch_rwlock_t *rw) { - asm volatile(LOCK_PREFIX "addl %1, %0" - : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" + : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); } #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) +#undef READ_LOCK_SIZE +#undef READ_LOCK_ATOMIC +#undef WRITE_LOCK_ADD +#undef WRITE_LOCK_SUB +#undef WRITE_LOCK_CMP + #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index dcb48b2edc1..7c7a486fcb6 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -11,10 +11,6 @@ typedef struct arch_spinlock { #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } -typedef struct { - unsigned int lock; -} arch_rwlock_t; - -#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#include <asm/rwlock.h> #endif /* _ASM_X86_SPINLOCK_TYPES_H */ diff --git a/arch/x86/include/asm/time.h b/arch/x86/include/asm/time.h index 7bdec4e9b73..92b8aec0697 100644 --- a/arch/x86/include/asm/time.h +++ b/arch/x86/include/asm/time.h @@ -1,10 +1,12 @@ #ifndef _ASM_X86_TIME_H #define _ASM_X86_TIME_H -extern void hpet_time_init(void); - +#include <linux/clocksource.h> #include <asm/mc146818rtc.h> +extern void hpet_time_init(void); extern void time_init(void); +extern struct clock_event_device *global_clock_event; + #endif /* _ASM_X86_TIME_H */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 99ddd148a76..36361bf6fdd 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -555,6 +555,9 @@ struct __large_struct { unsigned long buf[100]; }; #endif /* CONFIG_X86_WP_WORKS_OK */ +extern unsigned long +copy_from_user_nmi(void *to, const void __user *from, unsigned long n); + /* * movsl can be slow when source and dest are not both 8-byte aligned */ diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h index 4fbda9a3f33..968d57dd54c 100644 --- a/arch/x86/include/asm/xen/pci.h +++ b/arch/x86/include/asm/xen/pci.h @@ -14,13 +14,14 @@ static inline int pci_xen_hvm_init(void) } #endif #if defined(CONFIG_XEN_DOM0) -void __init xen_setup_pirqs(void); +int __init pci_xen_initial_domain(void); int xen_find_device_domain_owner(struct pci_dev *dev); int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain); int xen_unregister_device_domain_owner(struct pci_dev *dev); #else -static inline void __init xen_setup_pirqs(void) +static inline int __init pci_xen_initial_domain(void) { + return -1; } static inline int xen_find_device_domain_owner(struct pci_dev *dev) { diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 90b06d4daee..11817ff8539 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -123,7 +123,6 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o - obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S index ead21b66311..b4fd836e405 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.S +++ b/arch/x86/kernel/acpi/realmode/wakeup.S @@ -28,6 +28,8 @@ pmode_cr3: .long 0 /* Saved %cr3 */ pmode_cr4: .long 0 /* Saved %cr4 */ pmode_efer: .quad 0 /* Saved EFER */ pmode_gdt: .quad 0 +pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */ +pmode_behavior: .long 0 /* Wakeup behavior flags */ realmode_flags: .long 0 real_magic: .long 0 trampoline_segment: .word 0 @@ -91,6 +93,18 @@ wakeup_code: /* Call the C code */ calll main + /* Restore MISC_ENABLE before entering protected mode, in case + BIOS decided to clear XD_DISABLE during S3. */ + movl pmode_behavior, %eax + btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax + jnc 1f + + movl pmode_misc_en, %eax + movl pmode_misc_en + 4, %edx + movl $MSR_IA32_MISC_ENABLE, %ecx + wrmsr +1: + /* Do any other stuff... */ #ifndef CONFIG_64BIT diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h index e1828c07e79..97a29e1430e 100644 --- a/arch/x86/kernel/acpi/realmode/wakeup.h +++ b/arch/x86/kernel/acpi/realmode/wakeup.h @@ -21,6 +21,9 @@ struct wakeup_header { u32 pmode_efer_low; /* Protected mode EFER */ u32 pmode_efer_high; u64 pmode_gdt; + u32 pmode_misc_en_low; /* Protected mode MISC_ENABLE */ + u32 pmode_misc_en_high; + u32 pmode_behavior; /* Wakeup routine behavior flags */ u32 realmode_flags; u32 real_magic; u16 trampoline_segment; /* segment with trampoline code, 64-bit only */ @@ -39,4 +42,7 @@ extern struct wakeup_header wakeup_header; #define WAKEUP_HEADER_SIGNATURE 0x51ee1111 #define WAKEUP_END_SIGNATURE 0x65a22c82 +/* Wakeup behavior bits */ +#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0 + #endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 18a857ba7a2..103b6ab368d 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -77,6 +77,12 @@ int acpi_suspend_lowlevel(void) header->pmode_cr0 = read_cr0(); header->pmode_cr4 = read_cr4_safe(); + header->pmode_behavior = 0; + if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, + &header->pmode_misc_en_low, + &header->pmode_misc_en_high)) + header->pmode_behavior |= + (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); header->realmode_flags = acpi_realmode_flags; header->real_magic = 0x12345678; diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c deleted file mode 100644 index cd8cbeb5fa3..00000000000 --- a/arch/x86/kernel/amd_iommu.c +++ /dev/null @@ -1,2722 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * Leo Duran <leo.duran@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/pci.h> -#include <linux/pci-ats.h> -#include <linux/bitmap.h> -#include <linux/slab.h> -#include <linux/debugfs.h> -#include <linux/scatterlist.h> -#include <linux/dma-mapping.h> -#include <linux/iommu-helper.h> -#include <linux/iommu.h> -#include <linux/delay.h> -#include <asm/proto.h> -#include <asm/iommu.h> -#include <asm/gart.h> -#include <asm/amd_iommu_proto.h> -#include <asm/amd_iommu_types.h> -#include <asm/amd_iommu.h> - -#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) - -#define LOOP_TIMEOUT 100000 - -static DEFINE_RWLOCK(amd_iommu_devtable_lock); - -/* A list of preallocated protection domains */ -static LIST_HEAD(iommu_pd_list); -static DEFINE_SPINLOCK(iommu_pd_list_lock); - -/* - * Domain for untranslated devices - only allocated - * if iommu=pt passed on kernel cmd line. - */ -static struct protection_domain *pt_domain; - -static struct iommu_ops amd_iommu_ops; - -/* - * general struct to manage commands send to an IOMMU - */ -struct iommu_cmd { - u32 data[4]; -}; - -static void update_domain(struct protection_domain *domain); - -/**************************************************************************** - * - * Helper functions - * - ****************************************************************************/ - -static inline u16 get_device_id(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - - return calc_devid(pdev->bus->number, pdev->devfn); -} - -static struct iommu_dev_data *get_dev_data(struct device *dev) -{ - return dev->archdata.iommu; -} - -/* - * In this function the list of preallocated protection domains is traversed to - * find the domain for a specific device - */ -static struct dma_ops_domain *find_protection_domain(u16 devid) -{ - struct dma_ops_domain *entry, *ret = NULL; - unsigned long flags; - u16 alias = amd_iommu_alias_table[devid]; - - if (list_empty(&iommu_pd_list)) - return NULL; - - spin_lock_irqsave(&iommu_pd_list_lock, flags); - - list_for_each_entry(entry, &iommu_pd_list, list) { - if (entry->target_dev == devid || - entry->target_dev == alias) { - ret = entry; - break; - } - } - - spin_unlock_irqrestore(&iommu_pd_list_lock, flags); - - return ret; -} - -/* - * This function checks if the driver got a valid device from the caller to - * avoid dereferencing invalid pointers. - */ -static bool check_device(struct device *dev) -{ - u16 devid; - - if (!dev || !dev->dma_mask) - return false; - - /* No device or no PCI device */ - if (dev->bus != &pci_bus_type) - return false; - - devid = get_device_id(dev); - - /* Out of our scope? */ - if (devid > amd_iommu_last_bdf) - return false; - - if (amd_iommu_rlookup_table[devid] == NULL) - return false; - - return true; -} - -static int iommu_init_device(struct device *dev) -{ - struct iommu_dev_data *dev_data; - struct pci_dev *pdev; - u16 devid, alias; - - if (dev->archdata.iommu) - return 0; - - dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); - if (!dev_data) - return -ENOMEM; - - dev_data->dev = dev; - - devid = get_device_id(dev); - alias = amd_iommu_alias_table[devid]; - pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); - if (pdev) - dev_data->alias = &pdev->dev; - - atomic_set(&dev_data->bind, 0); - - dev->archdata.iommu = dev_data; - - - return 0; -} - -static void iommu_uninit_device(struct device *dev) -{ - kfree(dev->archdata.iommu); -} - -void __init amd_iommu_uninit_devices(void) -{ - struct pci_dev *pdev = NULL; - - for_each_pci_dev(pdev) { - - if (!check_device(&pdev->dev)) - continue; - - iommu_uninit_device(&pdev->dev); - } -} - -int __init amd_iommu_init_devices(void) -{ - struct pci_dev *pdev = NULL; - int ret = 0; - - for_each_pci_dev(pdev) { - - if (!check_device(&pdev->dev)) - continue; - - ret = iommu_init_device(&pdev->dev); - if (ret) - goto out_free; - } - - return 0; - -out_free: - - amd_iommu_uninit_devices(); - - return ret; -} -#ifdef CONFIG_AMD_IOMMU_STATS - -/* - * Initialization code for statistics collection - */ - -DECLARE_STATS_COUNTER(compl_wait); -DECLARE_STATS_COUNTER(cnt_map_single); -DECLARE_STATS_COUNTER(cnt_unmap_single); -DECLARE_STATS_COUNTER(cnt_map_sg); -DECLARE_STATS_COUNTER(cnt_unmap_sg); -DECLARE_STATS_COUNTER(cnt_alloc_coherent); -DECLARE_STATS_COUNTER(cnt_free_coherent); -DECLARE_STATS_COUNTER(cross_page); -DECLARE_STATS_COUNTER(domain_flush_single); -DECLARE_STATS_COUNTER(domain_flush_all); -DECLARE_STATS_COUNTER(alloced_io_mem); -DECLARE_STATS_COUNTER(total_map_requests); - -static struct dentry *stats_dir; -static struct dentry *de_fflush; - -static void amd_iommu_stats_add(struct __iommu_counter *cnt) -{ - if (stats_dir == NULL) - return; - - cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir, - &cnt->value); -} - -static void amd_iommu_stats_init(void) -{ - stats_dir = debugfs_create_dir("amd-iommu", NULL); - if (stats_dir == NULL) - return; - - de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, - (u32 *)&amd_iommu_unmap_flush); - - amd_iommu_stats_add(&compl_wait); - amd_iommu_stats_add(&cnt_map_single); - amd_iommu_stats_add(&cnt_unmap_single); - amd_iommu_stats_add(&cnt_map_sg); - amd_iommu_stats_add(&cnt_unmap_sg); - amd_iommu_stats_add(&cnt_alloc_coherent); - amd_iommu_stats_add(&cnt_free_coherent); - amd_iommu_stats_add(&cross_page); - amd_iommu_stats_add(&domain_flush_single); - amd_iommu_stats_add(&domain_flush_all); - amd_iommu_stats_add(&alloced_io_mem); - amd_iommu_stats_add(&total_map_requests); -} - -#endif - -/**************************************************************************** - * - * Interrupt handling functions - * - ****************************************************************************/ - -static void dump_dte_entry(u16 devid) -{ - int i; - - for (i = 0; i < 8; ++i) - pr_err("AMD-Vi: DTE[%d]: %08x\n", i, - amd_iommu_dev_table[devid].data[i]); -} - -static void dump_command(unsigned long phys_addr) -{ - struct iommu_cmd *cmd = phys_to_virt(phys_addr); - int i; - - for (i = 0; i < 4; ++i) - pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); -} - -static void iommu_print_event(struct amd_iommu *iommu, void *__evt) -{ - u32 *event = __evt; - int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; - int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK; - int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK; - int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; - u64 address = (u64)(((u64)event[3]) << 32) | event[2]; - - printk(KERN_ERR "AMD-Vi: Event logged ["); - - switch (type) { - case EVENT_TYPE_ILL_DEV: - printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x " - "address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address, flags); - dump_dte_entry(devid); - break; - case EVENT_TYPE_IO_FAULT: - printk("IO_PAGE_FAULT device=%02x:%02x.%x " - "domain=0x%04x address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - domid, address, flags); - break; - case EVENT_TYPE_DEV_TAB_ERR: - printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " - "address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address, flags); - break; - case EVENT_TYPE_PAGE_TAB_ERR: - printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x " - "domain=0x%04x address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - domid, address, flags); - break; - case EVENT_TYPE_ILL_CMD: - printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); - dump_command(address); - break; - case EVENT_TYPE_CMD_HARD_ERR: - printk("COMMAND_HARDWARE_ERROR address=0x%016llx " - "flags=0x%04x]\n", address, flags); - break; - case EVENT_TYPE_IOTLB_INV_TO: - printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x " - "address=0x%016llx]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address); - break; - case EVENT_TYPE_INV_DEV_REQ: - printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x " - "address=0x%016llx flags=0x%04x]\n", - PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), - address, flags); - break; - default: - printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type); - } -} - -static void iommu_poll_events(struct amd_iommu *iommu) -{ - u32 head, tail; - unsigned long flags; - - spin_lock_irqsave(&iommu->lock, flags); - - head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); - - while (head != tail) { - iommu_print_event(iommu, iommu->evt_buf + head); - head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; - } - - writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); - - spin_unlock_irqrestore(&iommu->lock, flags); -} - -irqreturn_t amd_iommu_int_thread(int irq, void *data) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) - iommu_poll_events(iommu); - - return IRQ_HANDLED; -} - -irqreturn_t amd_iommu_int_handler(int irq, void *data) -{ - return IRQ_WAKE_THREAD; -} - -/**************************************************************************** - * - * IOMMU command queuing functions - * - ****************************************************************************/ - -static int wait_on_sem(volatile u64 *sem) -{ - int i = 0; - - while (*sem == 0 && i < LOOP_TIMEOUT) { - udelay(1); - i += 1; - } - - if (i == LOOP_TIMEOUT) { - pr_alert("AMD-Vi: Completion-Wait loop timed out\n"); - return -EIO; - } - - return 0; -} - -static void copy_cmd_to_buffer(struct amd_iommu *iommu, - struct iommu_cmd *cmd, - u32 tail) -{ - u8 *target; - - target = iommu->cmd_buf + tail; - tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; - - /* Copy command to buffer */ - memcpy(target, cmd, sizeof(*cmd)); - - /* Tell the IOMMU about it */ - writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); -} - -static void build_completion_wait(struct iommu_cmd *cmd, u64 address) -{ - WARN_ON(address & 0x7ULL); - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; - cmd->data[1] = upper_32_bits(__pa(address)); - cmd->data[2] = 1; - CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); -} - -static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) -{ - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = devid; - CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); -} - -static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, - size_t size, u16 domid, int pde) -{ - u64 pages; - int s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = 0; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = 1; - } - - address &= PAGE_MASK; - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[1] |= domid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); - if (s) /* size bit - we flush more than one 4kb page */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; - if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; -} - -static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, - u64 address, size_t size) -{ - u64 pages; - int s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = 0; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = 1; - } - - address &= PAGE_MASK; - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = devid; - cmd->data[0] |= (qdep & 0xff) << 24; - cmd->data[1] = devid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); - if (s) - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; -} - -static void build_inv_all(struct iommu_cmd *cmd) -{ - memset(cmd, 0, sizeof(*cmd)); - CMD_SET_TYPE(cmd, CMD_INV_ALL); -} - -/* - * Writes the command to the IOMMUs command buffer and informs the - * hardware about the new command. - */ -static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) -{ - u32 left, tail, head, next_tail; - unsigned long flags; - - WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); - -again: - spin_lock_irqsave(&iommu->lock, flags); - - head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; - left = (head - next_tail) % iommu->cmd_buf_size; - - if (left <= 2) { - struct iommu_cmd sync_cmd; - volatile u64 sem = 0; - int ret; - - build_completion_wait(&sync_cmd, (u64)&sem); - copy_cmd_to_buffer(iommu, &sync_cmd, tail); - - spin_unlock_irqrestore(&iommu->lock, flags); - - if ((ret = wait_on_sem(&sem)) != 0) - return ret; - - goto again; - } - - copy_cmd_to_buffer(iommu, cmd, tail); - - /* We need to sync now to make sure all commands are processed */ - iommu->need_sync = true; - - spin_unlock_irqrestore(&iommu->lock, flags); - - return 0; -} - -/* - * This function queues a completion wait command into the command - * buffer of an IOMMU - */ -static int iommu_completion_wait(struct amd_iommu *iommu) -{ - struct iommu_cmd cmd; - volatile u64 sem = 0; - int ret; - - if (!iommu->need_sync) - return 0; - - build_completion_wait(&cmd, (u64)&sem); - - ret = iommu_queue_command(iommu, &cmd); - if (ret) - return ret; - - return wait_on_sem(&sem); -} - -static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) -{ - struct iommu_cmd cmd; - - build_inv_dte(&cmd, devid); - - return iommu_queue_command(iommu, &cmd); -} - -static void iommu_flush_dte_all(struct amd_iommu *iommu) -{ - u32 devid; - - for (devid = 0; devid <= 0xffff; ++devid) - iommu_flush_dte(iommu, devid); - - iommu_completion_wait(iommu); -} - -/* - * This function uses heavy locking and may disable irqs for some time. But - * this is no issue because it is only called during resume. - */ -static void iommu_flush_tlb_all(struct amd_iommu *iommu) -{ - u32 dom_id; - - for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { - struct iommu_cmd cmd; - build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, - dom_id, 1); - iommu_queue_command(iommu, &cmd); - } - - iommu_completion_wait(iommu); -} - -static void iommu_flush_all(struct amd_iommu *iommu) -{ - struct iommu_cmd cmd; - - build_inv_all(&cmd); - - iommu_queue_command(iommu, &cmd); - iommu_completion_wait(iommu); -} - -void iommu_flush_all_caches(struct amd_iommu *iommu) -{ - if (iommu_feature(iommu, FEATURE_IA)) { - iommu_flush_all(iommu); - } else { - iommu_flush_dte_all(iommu); - iommu_flush_tlb_all(iommu); - } -} - -/* - * Command send function for flushing on-device TLB - */ -static int device_flush_iotlb(struct device *dev, u64 address, size_t size) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct amd_iommu *iommu; - struct iommu_cmd cmd; - u16 devid; - int qdep; - - qdep = pci_ats_queue_depth(pdev); - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - - build_inv_iotlb_pages(&cmd, devid, qdep, address, size); - - return iommu_queue_command(iommu, &cmd); -} - -/* - * Command send function for invalidating a device table entry - */ -static int device_flush_dte(struct device *dev) -{ - struct amd_iommu *iommu; - struct pci_dev *pdev; - u16 devid; - int ret; - - pdev = to_pci_dev(dev); - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - - ret = iommu_flush_dte(iommu, devid); - if (ret) - return ret; - - if (pci_ats_enabled(pdev)) - ret = device_flush_iotlb(dev, 0, ~0UL); - - return ret; -} - -/* - * TLB invalidation function which is called from the mapping functions. - * It invalidates a single PTE if the range to flush is within a single - * page. Otherwise it flushes the whole TLB of the IOMMU. - */ -static void __domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size, int pde) -{ - struct iommu_dev_data *dev_data; - struct iommu_cmd cmd; - int ret = 0, i; - - build_inv_iommu_pages(&cmd, address, size, domain->id, pde); - - for (i = 0; i < amd_iommus_present; ++i) { - if (!domain->dev_iommu[i]) - continue; - - /* - * Devices of this domain are behind this IOMMU - * We need a TLB flush - */ - ret |= iommu_queue_command(amd_iommus[i], &cmd); - } - - list_for_each_entry(dev_data, &domain->dev_list, list) { - struct pci_dev *pdev = to_pci_dev(dev_data->dev); - - if (!pci_ats_enabled(pdev)) - continue; - - ret |= device_flush_iotlb(dev_data->dev, address, size); - } - - WARN_ON(ret); -} - -static void domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size) -{ - __domain_flush_pages(domain, address, size, 0); -} - -/* Flush the whole IO/TLB for a given protection domain */ -static void domain_flush_tlb(struct protection_domain *domain) -{ - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); -} - -/* Flush the whole IO/TLB for a given protection domain - including PDE */ -static void domain_flush_tlb_pde(struct protection_domain *domain) -{ - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); -} - -static void domain_flush_complete(struct protection_domain *domain) -{ - int i; - - for (i = 0; i < amd_iommus_present; ++i) { - if (!domain->dev_iommu[i]) - continue; - - /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. - */ - iommu_completion_wait(amd_iommus[i]); - } -} - - -/* - * This function flushes the DTEs for all devices in domain - */ -static void domain_flush_devices(struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data; - unsigned long flags; - - spin_lock_irqsave(&domain->lock, flags); - - list_for_each_entry(dev_data, &domain->dev_list, list) - device_flush_dte(dev_data->dev); - - spin_unlock_irqrestore(&domain->lock, flags); -} - -/**************************************************************************** - * - * The functions below are used the create the page table mappings for - * unity mapped regions. - * - ****************************************************************************/ - -/* - * This function is used to add another level to an IO page table. Adding - * another level increases the size of the address space by 9 bits to a size up - * to 64 bits. - */ -static bool increase_address_space(struct protection_domain *domain, - gfp_t gfp) -{ - u64 *pte; - - if (domain->mode == PAGE_MODE_6_LEVEL) - /* address space already 64 bit large */ - return false; - - pte = (void *)get_zeroed_page(gfp); - if (!pte) - return false; - - *pte = PM_LEVEL_PDE(domain->mode, - virt_to_phys(domain->pt_root)); - domain->pt_root = pte; - domain->mode += 1; - domain->updated = true; - - return true; -} - -static u64 *alloc_pte(struct protection_domain *domain, - unsigned long address, - unsigned long page_size, - u64 **pte_page, - gfp_t gfp) -{ - int level, end_lvl; - u64 *pte, *page; - - BUG_ON(!is_power_of_2(page_size)); - - while (address > PM_LEVEL_SIZE(domain->mode)) - increase_address_space(domain, gfp); - - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; - address = PAGE_SIZE_ALIGN(address, page_size); - end_lvl = PAGE_SIZE_LEVEL(page_size); - - while (level > end_lvl) { - if (!IOMMU_PTE_PRESENT(*pte)) { - page = (u64 *)get_zeroed_page(gfp); - if (!page) - return NULL; - *pte = PM_LEVEL_PDE(level, virt_to_phys(page)); - } - - /* No level skipping support yet */ - if (PM_PTE_LEVEL(*pte) != level) - return NULL; - - level -= 1; - - pte = IOMMU_PTE_PAGE(*pte); - - if (pte_page && level == end_lvl) - *pte_page = pte; - - pte = &pte[PM_LEVEL_INDEX(level, address)]; - } - - return pte; -} - -/* - * This function checks if there is a PTE for a given dma address. If - * there is one, it returns the pointer to it. - */ -static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) -{ - int level; - u64 *pte; - - if (address > PM_LEVEL_SIZE(domain->mode)) - return NULL; - - level = domain->mode - 1; - pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; - - while (level > 0) { - - /* Not Present */ - if (!IOMMU_PTE_PRESENT(*pte)) - return NULL; - - /* Large PTE */ - if (PM_PTE_LEVEL(*pte) == 0x07) { - unsigned long pte_mask, __pte; - - /* - * If we have a series of large PTEs, make - * sure to return a pointer to the first one. - */ - pte_mask = PTE_PAGE_SIZE(*pte); - pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1); - __pte = ((unsigned long)pte) & pte_mask; - - return (u64 *)__pte; - } - - /* No level skipping support yet */ - if (PM_PTE_LEVEL(*pte) != level) - return NULL; - - level -= 1; - - /* Walk to the next level */ - pte = IOMMU_PTE_PAGE(*pte); - pte = &pte[PM_LEVEL_INDEX(level, address)]; - } - - return pte; -} - -/* - * Generic mapping functions. It maps a physical address into a DMA - * address space. It allocates the page table pages if necessary. - * In the future it can be extended to a generic mapping function - * supporting all features of AMD IOMMU page tables like level skipping - * and full 64 bit address spaces. - */ -static int iommu_map_page(struct protection_domain *dom, - unsigned long bus_addr, - unsigned long phys_addr, - int prot, - unsigned long page_size) -{ - u64 __pte, *pte; - int i, count; - - if (!(prot & IOMMU_PROT_MASK)) - return -EINVAL; - - bus_addr = PAGE_ALIGN(bus_addr); - phys_addr = PAGE_ALIGN(phys_addr); - count = PAGE_SIZE_PTE_COUNT(page_size); - pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL); - - for (i = 0; i < count; ++i) - if (IOMMU_PTE_PRESENT(pte[i])) - return -EBUSY; - - if (page_size > PAGE_SIZE) { - __pte = PAGE_SIZE_PTE(phys_addr, page_size); - __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC; - } else - __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC; - - if (prot & IOMMU_PROT_IR) - __pte |= IOMMU_PTE_IR; - if (prot & IOMMU_PROT_IW) - __pte |= IOMMU_PTE_IW; - - for (i = 0; i < count; ++i) - pte[i] = __pte; - - update_domain(dom); - - return 0; -} - -static unsigned long iommu_unmap_page(struct protection_domain *dom, - unsigned long bus_addr, - unsigned long page_size) -{ - unsigned long long unmap_size, unmapped; - u64 *pte; - - BUG_ON(!is_power_of_2(page_size)); - - unmapped = 0; - - while (unmapped < page_size) { - - pte = fetch_pte(dom, bus_addr); - - if (!pte) { - /* - * No PTE for this address - * move forward in 4kb steps - */ - unmap_size = PAGE_SIZE; - } else if (PM_PTE_LEVEL(*pte) == 0) { - /* 4kb PTE found for this address */ - unmap_size = PAGE_SIZE; - *pte = 0ULL; - } else { - int count, i; - - /* Large PTE found which maps this address */ - unmap_size = PTE_PAGE_SIZE(*pte); - count = PAGE_SIZE_PTE_COUNT(unmap_size); - for (i = 0; i < count; i++) - pte[i] = 0ULL; - } - - bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size; - unmapped += unmap_size; - } - - BUG_ON(!is_power_of_2(unmapped)); - - return unmapped; -} - -/* - * This function checks if a specific unity mapping entry is needed for - * this specific IOMMU. - */ -static int iommu_for_unity_map(struct amd_iommu *iommu, - struct unity_map_entry *entry) -{ - u16 bdf, i; - - for (i = entry->devid_start; i <= entry->devid_end; ++i) { - bdf = amd_iommu_alias_table[i]; - if (amd_iommu_rlookup_table[bdf] == iommu) - return 1; - } - - return 0; -} - -/* - * This function actually applies the mapping to the page table of the - * dma_ops domain. - */ -static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, - struct unity_map_entry *e) -{ - u64 addr; - int ret; - - for (addr = e->address_start; addr < e->address_end; - addr += PAGE_SIZE) { - ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, - PAGE_SIZE); - if (ret) - return ret; - /* - * if unity mapping is in aperture range mark the page - * as allocated in the aperture - */ - if (addr < dma_dom->aperture_size) - __set_bit(addr >> PAGE_SHIFT, - dma_dom->aperture[0]->bitmap); - } - - return 0; -} - -/* - * Init the unity mappings for a specific IOMMU in the system - * - * Basically iterates over all unity mapping entries and applies them to - * the default domain DMA of that IOMMU if necessary. - */ -static int iommu_init_unity_mappings(struct amd_iommu *iommu) -{ - struct unity_map_entry *entry; - int ret; - - list_for_each_entry(entry, &amd_iommu_unity_map, list) { - if (!iommu_for_unity_map(iommu, entry)) - continue; - ret = dma_ops_unity_map(iommu->default_dom, entry); - if (ret) - return ret; - } - - return 0; -} - -/* - * Inits the unity mappings required for a specific device - */ -static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, - u16 devid) -{ - struct unity_map_entry *e; - int ret; - - list_for_each_entry(e, &amd_iommu_unity_map, list) { - if (!(devid >= e->devid_start && devid <= e->devid_end)) - continue; - ret = dma_ops_unity_map(dma_dom, e); - if (ret) - return ret; - } - - return 0; -} - -/**************************************************************************** - * - * The next functions belong to the address allocator for the dma_ops - * interface functions. They work like the allocators in the other IOMMU - * drivers. Its basically a bitmap which marks the allocated pages in - * the aperture. Maybe it could be enhanced in the future to a more - * efficient allocator. - * - ****************************************************************************/ - -/* - * The address allocator core functions. - * - * called with domain->lock held - */ - -/* - * Used to reserve address ranges in the aperture (e.g. for exclusion - * ranges. - */ -static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, - unsigned long start_page, - unsigned int pages) -{ - unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT; - - if (start_page + pages > last_page) - pages = last_page - start_page; - - for (i = start_page; i < start_page + pages; ++i) { - int index = i / APERTURE_RANGE_PAGES; - int page = i % APERTURE_RANGE_PAGES; - __set_bit(page, dom->aperture[index]->bitmap); - } -} - -/* - * This function is used to add a new aperture range to an existing - * aperture in case of dma_ops domain allocation or address allocation - * failure. - */ -static int alloc_new_range(struct dma_ops_domain *dma_dom, - bool populate, gfp_t gfp) -{ - int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; - struct amd_iommu *iommu; - unsigned long i; - -#ifdef CONFIG_IOMMU_STRESS - populate = false; -#endif - - if (index >= APERTURE_MAX_RANGES) - return -ENOMEM; - - dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp); - if (!dma_dom->aperture[index]) - return -ENOMEM; - - dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp); - if (!dma_dom->aperture[index]->bitmap) - goto out_free; - - dma_dom->aperture[index]->offset = dma_dom->aperture_size; - - if (populate) { - unsigned long address = dma_dom->aperture_size; - int i, num_ptes = APERTURE_RANGE_PAGES / 512; - u64 *pte, *pte_page; - - for (i = 0; i < num_ptes; ++i) { - pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, - &pte_page, gfp); - if (!pte) - goto out_free; - - dma_dom->aperture[index]->pte_pages[i] = pte_page; - - address += APERTURE_RANGE_SIZE / 64; - } - } - - dma_dom->aperture_size += APERTURE_RANGE_SIZE; - - /* Initialize the exclusion range if necessary */ - for_each_iommu(iommu) { - if (iommu->exclusion_start && - iommu->exclusion_start >= dma_dom->aperture[index]->offset - && iommu->exclusion_start < dma_dom->aperture_size) { - unsigned long startpage; - int pages = iommu_num_pages(iommu->exclusion_start, - iommu->exclusion_length, - PAGE_SIZE); - startpage = iommu->exclusion_start >> PAGE_SHIFT; - dma_ops_reserve_addresses(dma_dom, startpage, pages); - } - } - - /* - * Check for areas already mapped as present in the new aperture - * range and mark those pages as reserved in the allocator. Such - * mappings may already exist as a result of requested unity - * mappings for devices. - */ - for (i = dma_dom->aperture[index]->offset; - i < dma_dom->aperture_size; - i += PAGE_SIZE) { - u64 *pte = fetch_pte(&dma_dom->domain, i); - if (!pte || !IOMMU_PTE_PRESENT(*pte)) - continue; - - dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); - } - - update_domain(&dma_dom->domain); - - return 0; - -out_free: - update_domain(&dma_dom->domain); - - free_page((unsigned long)dma_dom->aperture[index]->bitmap); - - kfree(dma_dom->aperture[index]); - dma_dom->aperture[index] = NULL; - - return -ENOMEM; -} - -static unsigned long dma_ops_area_alloc(struct device *dev, - struct dma_ops_domain *dom, - unsigned int pages, - unsigned long align_mask, - u64 dma_mask, - unsigned long start) -{ - unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE; - int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT; - int i = start >> APERTURE_RANGE_SHIFT; - unsigned long boundary_size; - unsigned long address = -1; - unsigned long limit; - - next_bit >>= PAGE_SHIFT; - - boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, - PAGE_SIZE) >> PAGE_SHIFT; - - for (;i < max_index; ++i) { - unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT; - - if (dom->aperture[i]->offset >= dma_mask) - break; - - limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset, - dma_mask >> PAGE_SHIFT); - - address = iommu_area_alloc(dom->aperture[i]->bitmap, - limit, next_bit, pages, 0, - boundary_size, align_mask); - if (address != -1) { - address = dom->aperture[i]->offset + - (address << PAGE_SHIFT); - dom->next_address = address + (pages << PAGE_SHIFT); - break; - } - - next_bit = 0; - } - - return address; -} - -static unsigned long dma_ops_alloc_addresses(struct device *dev, - struct dma_ops_domain *dom, - unsigned int pages, - unsigned long align_mask, - u64 dma_mask) -{ - unsigned long address; - -#ifdef CONFIG_IOMMU_STRESS - dom->next_address = 0; - dom->need_flush = true; -#endif - - address = dma_ops_area_alloc(dev, dom, pages, align_mask, - dma_mask, dom->next_address); - - if (address == -1) { - dom->next_address = 0; - address = dma_ops_area_alloc(dev, dom, pages, align_mask, - dma_mask, 0); - dom->need_flush = true; - } - - if (unlikely(address == -1)) - address = DMA_ERROR_CODE; - - WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); - - return address; -} - -/* - * The address free function. - * - * called with domain->lock held - */ -static void dma_ops_free_addresses(struct dma_ops_domain *dom, - unsigned long address, - unsigned int pages) -{ - unsigned i = address >> APERTURE_RANGE_SHIFT; - struct aperture_range *range = dom->aperture[i]; - - BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL); - -#ifdef CONFIG_IOMMU_STRESS - if (i < 4) - return; -#endif - - if (address >= dom->next_address) - dom->need_flush = true; - - address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; - - bitmap_clear(range->bitmap, address, pages); - -} - -/**************************************************************************** - * - * The next functions belong to the domain allocation. A domain is - * allocated for every IOMMU as the default domain. If device isolation - * is enabled, every device get its own domain. The most important thing - * about domains is the page table mapping the DMA address space they - * contain. - * - ****************************************************************************/ - -/* - * This function adds a protection domain to the global protection domain list - */ -static void add_domain_to_list(struct protection_domain *domain) -{ - unsigned long flags; - - spin_lock_irqsave(&amd_iommu_pd_lock, flags); - list_add(&domain->list, &amd_iommu_pd_list); - spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); -} - -/* - * This function removes a protection domain to the global - * protection domain list - */ -static void del_domain_from_list(struct protection_domain *domain) -{ - unsigned long flags; - - spin_lock_irqsave(&amd_iommu_pd_lock, flags); - list_del(&domain->list); - spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); -} - -static u16 domain_id_alloc(void) -{ - unsigned long flags; - int id; - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); - BUG_ON(id == 0); - if (id > 0 && id < MAX_DOMAIN_ID) - __set_bit(id, amd_iommu_pd_alloc_bitmap); - else - id = 0; - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - return id; -} - -static void domain_id_free(int id) -{ - unsigned long flags; - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - if (id > 0 && id < MAX_DOMAIN_ID) - __clear_bit(id, amd_iommu_pd_alloc_bitmap); - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); -} - -static void free_pagetable(struct protection_domain *domain) -{ - int i, j; - u64 *p1, *p2, *p3; - - p1 = domain->pt_root; - - if (!p1) - return; - - for (i = 0; i < 512; ++i) { - if (!IOMMU_PTE_PRESENT(p1[i])) - continue; - - p2 = IOMMU_PTE_PAGE(p1[i]); - for (j = 0; j < 512; ++j) { - if (!IOMMU_PTE_PRESENT(p2[j])) - continue; - p3 = IOMMU_PTE_PAGE(p2[j]); - free_page((unsigned long)p3); - } - - free_page((unsigned long)p2); - } - - free_page((unsigned long)p1); - - domain->pt_root = NULL; -} - -/* - * Free a domain, only used if something went wrong in the - * allocation path and we need to free an already allocated page table - */ -static void dma_ops_domain_free(struct dma_ops_domain *dom) -{ - int i; - - if (!dom) - return; - - del_domain_from_list(&dom->domain); - - free_pagetable(&dom->domain); - - for (i = 0; i < APERTURE_MAX_RANGES; ++i) { - if (!dom->aperture[i]) - continue; - free_page((unsigned long)dom->aperture[i]->bitmap); - kfree(dom->aperture[i]); - } - - kfree(dom); -} - -/* - * Allocates a new protection domain usable for the dma_ops functions. - * It also initializes the page table and the address allocator data - * structures required for the dma_ops interface - */ -static struct dma_ops_domain *dma_ops_domain_alloc(void) -{ - struct dma_ops_domain *dma_dom; - - dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); - if (!dma_dom) - return NULL; - - spin_lock_init(&dma_dom->domain.lock); - - dma_dom->domain.id = domain_id_alloc(); - if (dma_dom->domain.id == 0) - goto free_dma_dom; - INIT_LIST_HEAD(&dma_dom->domain.dev_list); - dma_dom->domain.mode = PAGE_MODE_2_LEVEL; - dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); - dma_dom->domain.flags = PD_DMA_OPS_MASK; - dma_dom->domain.priv = dma_dom; - if (!dma_dom->domain.pt_root) - goto free_dma_dom; - - dma_dom->need_flush = false; - dma_dom->target_dev = 0xffff; - - add_domain_to_list(&dma_dom->domain); - - if (alloc_new_range(dma_dom, true, GFP_KERNEL)) - goto free_dma_dom; - - /* - * mark the first page as allocated so we never return 0 as - * a valid dma-address. So we can use 0 as error value - */ - dma_dom->aperture[0]->bitmap[0] = 1; - dma_dom->next_address = 0; - - - return dma_dom; - -free_dma_dom: - dma_ops_domain_free(dma_dom); - - return NULL; -} - -/* - * little helper function to check whether a given protection domain is a - * dma_ops domain - */ -static bool dma_ops_domain(struct protection_domain *domain) -{ - return domain->flags & PD_DMA_OPS_MASK; -} - -static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) -{ - u64 pte_root = virt_to_phys(domain->pt_root); - u32 flags = 0; - - pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) - << DEV_ENTRY_MODE_SHIFT; - pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; - - if (ats) - flags |= DTE_FLAG_IOTLB; - - amd_iommu_dev_table[devid].data[3] |= flags; - amd_iommu_dev_table[devid].data[2] = domain->id; - amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); - amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); -} - -static void clear_dte_entry(u16 devid) -{ - /* remove entry from the device table seen by the hardware */ - amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; - amd_iommu_dev_table[devid].data[1] = 0; - amd_iommu_dev_table[devid].data[2] = 0; - - amd_iommu_apply_erratum_63(devid); -} - -static void do_attach(struct device *dev, struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; - struct pci_dev *pdev; - bool ats = false; - u16 devid; - - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - dev_data = get_dev_data(dev); - pdev = to_pci_dev(dev); - - if (amd_iommu_iotlb_sup) - ats = pci_ats_enabled(pdev); - - /* Update data structures */ - dev_data->domain = domain; - list_add(&dev_data->list, &domain->dev_list); - set_dte_entry(devid, domain, ats); - - /* Do reference counting */ - domain->dev_iommu[iommu->index] += 1; - domain->dev_cnt += 1; - - /* Flush the DTE entry */ - device_flush_dte(dev); -} - -static void do_detach(struct device *dev) -{ - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; - u16 devid; - - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - dev_data = get_dev_data(dev); - - /* decrease reference counters */ - dev_data->domain->dev_iommu[iommu->index] -= 1; - dev_data->domain->dev_cnt -= 1; - - /* Update data structures */ - dev_data->domain = NULL; - list_del(&dev_data->list); - clear_dte_entry(devid); - - /* Flush the DTE entry */ - device_flush_dte(dev); -} - -/* - * If a device is not yet associated with a domain, this function does - * assigns it visible for the hardware - */ -static int __attach_device(struct device *dev, - struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data, *alias_data; - int ret; - - dev_data = get_dev_data(dev); - alias_data = get_dev_data(dev_data->alias); - - if (!alias_data) - return -EINVAL; - - /* lock domain */ - spin_lock(&domain->lock); - - /* Some sanity checks */ - ret = -EBUSY; - if (alias_data->domain != NULL && - alias_data->domain != domain) - goto out_unlock; - - if (dev_data->domain != NULL && - dev_data->domain != domain) - goto out_unlock; - - /* Do real assignment */ - if (dev_data->alias != dev) { - alias_data = get_dev_data(dev_data->alias); - if (alias_data->domain == NULL) - do_attach(dev_data->alias, domain); - - atomic_inc(&alias_data->bind); - } - - if (dev_data->domain == NULL) - do_attach(dev, domain); - - atomic_inc(&dev_data->bind); - - ret = 0; - -out_unlock: - - /* ready */ - spin_unlock(&domain->lock); - - return ret; -} - -/* - * If a device is not yet associated with a domain, this function does - * assigns it visible for the hardware - */ -static int attach_device(struct device *dev, - struct protection_domain *domain) -{ - struct pci_dev *pdev = to_pci_dev(dev); - unsigned long flags; - int ret; - - if (amd_iommu_iotlb_sup) - pci_enable_ats(pdev, PAGE_SHIFT); - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - ret = __attach_device(dev, domain); - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - /* - * We might boot into a crash-kernel here. The crashed kernel - * left the caches in the IOMMU dirty. So we have to flush - * here to evict all dirty stuff. - */ - domain_flush_tlb_pde(domain); - - return ret; -} - -/* - * Removes a device from a protection domain (unlocked) - */ -static void __detach_device(struct device *dev) -{ - struct iommu_dev_data *dev_data = get_dev_data(dev); - struct iommu_dev_data *alias_data; - struct protection_domain *domain; - unsigned long flags; - - BUG_ON(!dev_data->domain); - - domain = dev_data->domain; - - spin_lock_irqsave(&domain->lock, flags); - - if (dev_data->alias != dev) { - alias_data = get_dev_data(dev_data->alias); - if (atomic_dec_and_test(&alias_data->bind)) - do_detach(dev_data->alias); - } - - if (atomic_dec_and_test(&dev_data->bind)) - do_detach(dev); - - spin_unlock_irqrestore(&domain->lock, flags); - - /* - * If we run in passthrough mode the device must be assigned to the - * passthrough domain if it is detached from any other domain. - * Make sure we can deassign from the pt_domain itself. - */ - if (iommu_pass_through && - (dev_data->domain == NULL && domain != pt_domain)) - __attach_device(dev, pt_domain); -} - -/* - * Removes a device from a protection domain (with devtable_lock held) - */ -static void detach_device(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - unsigned long flags; - - /* lock device table */ - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - __detach_device(dev); - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) - pci_disable_ats(pdev); -} - -/* - * Find out the protection domain structure for a given PCI device. This - * will give us the pointer to the page table root for example. - */ -static struct protection_domain *domain_for_device(struct device *dev) -{ - struct protection_domain *dom; - struct iommu_dev_data *dev_data, *alias_data; - unsigned long flags; - u16 devid; - - devid = get_device_id(dev); - dev_data = get_dev_data(dev); - alias_data = get_dev_data(dev_data->alias); - if (!alias_data) - return NULL; - - read_lock_irqsave(&amd_iommu_devtable_lock, flags); - dom = dev_data->domain; - if (dom == NULL && - alias_data->domain != NULL) { - __attach_device(dev, alias_data->domain); - dom = alias_data->domain; - } - - read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - return dom; -} - -static int device_change_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - u16 devid; - struct protection_domain *domain; - struct dma_ops_domain *dma_domain; - struct amd_iommu *iommu; - unsigned long flags; - - if (!check_device(dev)) - return 0; - - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; - - switch (action) { - case BUS_NOTIFY_UNBOUND_DRIVER: - - domain = domain_for_device(dev); - - if (!domain) - goto out; - if (iommu_pass_through) - break; - detach_device(dev); - break; - case BUS_NOTIFY_ADD_DEVICE: - - iommu_init_device(dev); - - domain = domain_for_device(dev); - - /* allocate a protection domain if a device is added */ - dma_domain = find_protection_domain(devid); - if (dma_domain) - goto out; - dma_domain = dma_ops_domain_alloc(); - if (!dma_domain) - goto out; - dma_domain->target_dev = devid; - - spin_lock_irqsave(&iommu_pd_list_lock, flags); - list_add_tail(&dma_domain->list, &iommu_pd_list); - spin_unlock_irqrestore(&iommu_pd_list_lock, flags); - - break; - case BUS_NOTIFY_DEL_DEVICE: - - iommu_uninit_device(dev); - - default: - goto out; - } - - device_flush_dte(dev); - iommu_completion_wait(iommu); - -out: - return 0; -} - -static struct notifier_block device_nb = { - .notifier_call = device_change_notifier, -}; - -void amd_iommu_init_notifier(void) -{ - bus_register_notifier(&pci_bus_type, &device_nb); -} - -/***************************************************************************** - * - * The next functions belong to the dma_ops mapping/unmapping code. - * - *****************************************************************************/ - -/* - * In the dma_ops path we only have the struct device. This function - * finds the corresponding IOMMU, the protection domain and the - * requestor id for a given device. - * If the device is not yet associated with a domain this is also done - * in this function. - */ -static struct protection_domain *get_domain(struct device *dev) -{ - struct protection_domain *domain; - struct dma_ops_domain *dma_dom; - u16 devid = get_device_id(dev); - - if (!check_device(dev)) - return ERR_PTR(-EINVAL); - - domain = domain_for_device(dev); - if (domain != NULL && !dma_ops_domain(domain)) - return ERR_PTR(-EBUSY); - - if (domain != NULL) - return domain; - - /* Device not bount yet - bind it */ - dma_dom = find_protection_domain(devid); - if (!dma_dom) - dma_dom = amd_iommu_rlookup_table[devid]->default_dom; - attach_device(dev, &dma_dom->domain); - DUMP_printk("Using protection domain %d for device %s\n", - dma_dom->domain.id, dev_name(dev)); - - return &dma_dom->domain; -} - -static void update_device_table(struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data; - - list_for_each_entry(dev_data, &domain->dev_list, list) { - struct pci_dev *pdev = to_pci_dev(dev_data->dev); - u16 devid = get_device_id(dev_data->dev); - set_dte_entry(devid, domain, pci_ats_enabled(pdev)); - } -} - -static void update_domain(struct protection_domain *domain) -{ - if (!domain->updated) - return; - - update_device_table(domain); - - domain_flush_devices(domain); - domain_flush_tlb_pde(domain); - - domain->updated = false; -} - -/* - * This function fetches the PTE for a given address in the aperture - */ -static u64* dma_ops_get_pte(struct dma_ops_domain *dom, - unsigned long address) -{ - struct aperture_range *aperture; - u64 *pte, *pte_page; - - aperture = dom->aperture[APERTURE_RANGE_INDEX(address)]; - if (!aperture) - return NULL; - - pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; - if (!pte) { - pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, - GFP_ATOMIC); - aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; - } else - pte += PM_LEVEL_INDEX(0, address); - - update_domain(&dom->domain); - - return pte; -} - -/* - * This is the generic map function. It maps one 4kb page at paddr to - * the given address in the DMA address space for the domain. - */ -static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom, - unsigned long address, - phys_addr_t paddr, - int direction) -{ - u64 *pte, __pte; - - WARN_ON(address > dom->aperture_size); - - paddr &= PAGE_MASK; - - pte = dma_ops_get_pte(dom, address); - if (!pte) - return DMA_ERROR_CODE; - - __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; - - if (direction == DMA_TO_DEVICE) - __pte |= IOMMU_PTE_IR; - else if (direction == DMA_FROM_DEVICE) - __pte |= IOMMU_PTE_IW; - else if (direction == DMA_BIDIRECTIONAL) - __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW; - - WARN_ON(*pte); - - *pte = __pte; - - return (dma_addr_t)address; -} - -/* - * The generic unmapping function for on page in the DMA address space. - */ -static void dma_ops_domain_unmap(struct dma_ops_domain *dom, - unsigned long address) -{ - struct aperture_range *aperture; - u64 *pte; - - if (address >= dom->aperture_size) - return; - - aperture = dom->aperture[APERTURE_RANGE_INDEX(address)]; - if (!aperture) - return; - - pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; - if (!pte) - return; - - pte += PM_LEVEL_INDEX(0, address); - - WARN_ON(!*pte); - - *pte = 0ULL; -} - -/* - * This function contains common code for mapping of a physically - * contiguous memory region into DMA address space. It is used by all - * mapping functions provided with this IOMMU driver. - * Must be called with the domain lock held. - */ -static dma_addr_t __map_single(struct device *dev, - struct dma_ops_domain *dma_dom, - phys_addr_t paddr, - size_t size, - int dir, - bool align, - u64 dma_mask) -{ - dma_addr_t offset = paddr & ~PAGE_MASK; - dma_addr_t address, start, ret; - unsigned int pages; - unsigned long align_mask = 0; - int i; - - pages = iommu_num_pages(paddr, size, PAGE_SIZE); - paddr &= PAGE_MASK; - - INC_STATS_COUNTER(total_map_requests); - - if (pages > 1) - INC_STATS_COUNTER(cross_page); - - if (align) - align_mask = (1UL << get_order(size)) - 1; - -retry: - address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, - dma_mask); - if (unlikely(address == DMA_ERROR_CODE)) { - /* - * setting next_address here will let the address - * allocator only scan the new allocated range in the - * first run. This is a small optimization. - */ - dma_dom->next_address = dma_dom->aperture_size; - - if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) - goto out; - - /* - * aperture was successfully enlarged by 128 MB, try - * allocation again - */ - goto retry; - } - - start = address; - for (i = 0; i < pages; ++i) { - ret = dma_ops_domain_map(dma_dom, start, paddr, dir); - if (ret == DMA_ERROR_CODE) - goto out_unmap; - - paddr += PAGE_SIZE; - start += PAGE_SIZE; - } - address += offset; - - ADD_STATS_COUNTER(alloced_io_mem, size); - - if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { - domain_flush_tlb(&dma_dom->domain); - dma_dom->need_flush = false; - } else if (unlikely(amd_iommu_np_cache)) - domain_flush_pages(&dma_dom->domain, address, size); - -out: - return address; - -out_unmap: - - for (--i; i >= 0; --i) { - start -= PAGE_SIZE; - dma_ops_domain_unmap(dma_dom, start); - } - - dma_ops_free_addresses(dma_dom, address, pages); - - return DMA_ERROR_CODE; -} - -/* - * Does the reverse of the __map_single function. Must be called with - * the domain lock held too - */ -static void __unmap_single(struct dma_ops_domain *dma_dom, - dma_addr_t dma_addr, - size_t size, - int dir) -{ - dma_addr_t flush_addr; - dma_addr_t i, start; - unsigned int pages; - - if ((dma_addr == DMA_ERROR_CODE) || - (dma_addr + size > dma_dom->aperture_size)) - return; - - flush_addr = dma_addr; - pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); - dma_addr &= PAGE_MASK; - start = dma_addr; - - for (i = 0; i < pages; ++i) { - dma_ops_domain_unmap(dma_dom, start); - start += PAGE_SIZE; - } - - SUB_STATS_COUNTER(alloced_io_mem, size); - - dma_ops_free_addresses(dma_dom, dma_addr, pages); - - if (amd_iommu_unmap_flush || dma_dom->need_flush) { - domain_flush_pages(&dma_dom->domain, flush_addr, size); - dma_dom->need_flush = false; - } -} - -/* - * The exported map_single function for dma_ops. - */ -static dma_addr_t map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - unsigned long flags; - struct protection_domain *domain; - dma_addr_t addr; - u64 dma_mask; - phys_addr_t paddr = page_to_phys(page) + offset; - - INC_STATS_COUNTER(cnt_map_single); - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) - return (dma_addr_t)paddr; - else if (IS_ERR(domain)) - return DMA_ERROR_CODE; - - dma_mask = *dev->dma_mask; - - spin_lock_irqsave(&domain->lock, flags); - - addr = __map_single(dev, domain->priv, paddr, size, dir, false, - dma_mask); - if (addr == DMA_ERROR_CODE) - goto out; - - domain_flush_complete(domain); - -out: - spin_unlock_irqrestore(&domain->lock, flags); - - return addr; -} - -/* - * The exported unmap_single function for dma_ops. - */ -static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) -{ - unsigned long flags; - struct protection_domain *domain; - - INC_STATS_COUNTER(cnt_unmap_single); - - domain = get_domain(dev); - if (IS_ERR(domain)) - return; - - spin_lock_irqsave(&domain->lock, flags); - - __unmap_single(domain->priv, dma_addr, size, dir); - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); -} - -/* - * This is a special map_sg function which is used if we should map a - * device which is not handled by an AMD IOMMU in the system. - */ -static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist, - int nelems, int dir) -{ - struct scatterlist *s; - int i; - - for_each_sg(sglist, s, nelems, i) { - s->dma_address = (dma_addr_t)sg_phys(s); - s->dma_length = s->length; - } - - return nelems; -} - -/* - * The exported map_sg function for dma_ops (handles scatter-gather - * lists). - */ -static int map_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - unsigned long flags; - struct protection_domain *domain; - int i; - struct scatterlist *s; - phys_addr_t paddr; - int mapped_elems = 0; - u64 dma_mask; - - INC_STATS_COUNTER(cnt_map_sg); - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) - return map_sg_no_iommu(dev, sglist, nelems, dir); - else if (IS_ERR(domain)) - return 0; - - dma_mask = *dev->dma_mask; - - spin_lock_irqsave(&domain->lock, flags); - - for_each_sg(sglist, s, nelems, i) { - paddr = sg_phys(s); - - s->dma_address = __map_single(dev, domain->priv, - paddr, s->length, dir, false, - dma_mask); - - if (s->dma_address) { - s->dma_length = s->length; - mapped_elems++; - } else - goto unmap; - } - - domain_flush_complete(domain); - -out: - spin_unlock_irqrestore(&domain->lock, flags); - - return mapped_elems; -unmap: - for_each_sg(sglist, s, mapped_elems, i) { - if (s->dma_address) - __unmap_single(domain->priv, s->dma_address, - s->dma_length, dir); - s->dma_address = s->dma_length = 0; - } - - mapped_elems = 0; - - goto out; -} - -/* - * The exported map_sg function for dma_ops (handles scatter-gather - * lists). - */ -static void unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - unsigned long flags; - struct protection_domain *domain; - struct scatterlist *s; - int i; - - INC_STATS_COUNTER(cnt_unmap_sg); - - domain = get_domain(dev); - if (IS_ERR(domain)) - return; - - spin_lock_irqsave(&domain->lock, flags); - - for_each_sg(sglist, s, nelems, i) { - __unmap_single(domain->priv, s->dma_address, - s->dma_length, dir); - s->dma_address = s->dma_length = 0; - } - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); -} - -/* - * The exported alloc_coherent function for dma_ops. - */ -static void *alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag) -{ - unsigned long flags; - void *virt_addr; - struct protection_domain *domain; - phys_addr_t paddr; - u64 dma_mask = dev->coherent_dma_mask; - - INC_STATS_COUNTER(cnt_alloc_coherent); - - domain = get_domain(dev); - if (PTR_ERR(domain) == -EINVAL) { - virt_addr = (void *)__get_free_pages(flag, get_order(size)); - *dma_addr = __pa(virt_addr); - return virt_addr; - } else if (IS_ERR(domain)) - return NULL; - - dma_mask = dev->coherent_dma_mask; - flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - flag |= __GFP_ZERO; - - virt_addr = (void *)__get_free_pages(flag, get_order(size)); - if (!virt_addr) - return NULL; - - paddr = virt_to_phys(virt_addr); - - if (!dma_mask) - dma_mask = *dev->dma_mask; - - spin_lock_irqsave(&domain->lock, flags); - - *dma_addr = __map_single(dev, domain->priv, paddr, - size, DMA_BIDIRECTIONAL, true, dma_mask); - - if (*dma_addr == DMA_ERROR_CODE) { - spin_unlock_irqrestore(&domain->lock, flags); - goto out_free; - } - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); - - return virt_addr; - -out_free: - - free_pages((unsigned long)virt_addr, get_order(size)); - - return NULL; -} - -/* - * The exported free_coherent function for dma_ops. - */ -static void free_coherent(struct device *dev, size_t size, - void *virt_addr, dma_addr_t dma_addr) -{ - unsigned long flags; - struct protection_domain *domain; - - INC_STATS_COUNTER(cnt_free_coherent); - - domain = get_domain(dev); - if (IS_ERR(domain)) - goto free_mem; - - spin_lock_irqsave(&domain->lock, flags); - - __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); - - domain_flush_complete(domain); - - spin_unlock_irqrestore(&domain->lock, flags); - -free_mem: - free_pages((unsigned long)virt_addr, get_order(size)); -} - -/* - * This function is called by the DMA layer to find out if we can handle a - * particular device. It is part of the dma_ops. - */ -static int amd_iommu_dma_supported(struct device *dev, u64 mask) -{ - return check_device(dev); -} - -/* - * The function for pre-allocating protection domains. - * - * If the driver core informs the DMA layer if a driver grabs a device - * we don't need to preallocate the protection domains anymore. - * For now we have to. - */ -static void prealloc_protection_domains(void) -{ - struct pci_dev *dev = NULL; - struct dma_ops_domain *dma_dom; - u16 devid; - - for_each_pci_dev(dev) { - - /* Do we handle this device? */ - if (!check_device(&dev->dev)) - continue; - - /* Is there already any domain for it? */ - if (domain_for_device(&dev->dev)) - continue; - - devid = get_device_id(&dev->dev); - - dma_dom = dma_ops_domain_alloc(); - if (!dma_dom) - continue; - init_unity_mappings_for_device(dma_dom, devid); - dma_dom->target_dev = devid; - - attach_device(&dev->dev, &dma_dom->domain); - - list_add_tail(&dma_dom->list, &iommu_pd_list); - } -} - -static struct dma_map_ops amd_iommu_dma_ops = { - .alloc_coherent = alloc_coherent, - .free_coherent = free_coherent, - .map_page = map_page, - .unmap_page = unmap_page, - .map_sg = map_sg, - .unmap_sg = unmap_sg, - .dma_supported = amd_iommu_dma_supported, -}; - -/* - * The function which clues the AMD IOMMU driver into dma_ops. - */ - -void __init amd_iommu_init_api(void) -{ - register_iommu(&amd_iommu_ops); -} - -int __init amd_iommu_init_dma_ops(void) -{ - struct amd_iommu *iommu; - int ret; - - /* - * first allocate a default protection domain for every IOMMU we - * found in the system. Devices not assigned to any other - * protection domain will be assigned to the default one. - */ - for_each_iommu(iommu) { - iommu->default_dom = dma_ops_domain_alloc(); - if (iommu->default_dom == NULL) - return -ENOMEM; - iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; - ret = iommu_init_unity_mappings(iommu); - if (ret) - goto free_domains; - } - - /* - * Pre-allocate the protection domains for each device. - */ - prealloc_protection_domains(); - - iommu_detected = 1; - swiotlb = 0; - - /* Make the driver finally visible to the drivers */ - dma_ops = &amd_iommu_dma_ops; - - amd_iommu_stats_init(); - - return 0; - -free_domains: - - for_each_iommu(iommu) { - if (iommu->default_dom) - dma_ops_domain_free(iommu->default_dom); - } - - return ret; -} - -/***************************************************************************** - * - * The following functions belong to the exported interface of AMD IOMMU - * - * This interface allows access to lower level functions of the IOMMU - * like protection domain handling and assignement of devices to domains - * which is not possible with the dma_ops interface. - * - *****************************************************************************/ - -static void cleanup_domain(struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data, *next; - unsigned long flags; - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - - list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { - struct device *dev = dev_data->dev; - - __detach_device(dev); - atomic_set(&dev_data->bind, 0); - } - - write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); -} - -static void protection_domain_free(struct protection_domain *domain) -{ - if (!domain) - return; - - del_domain_from_list(domain); - - if (domain->id) - domain_id_free(domain->id); - - kfree(domain); -} - -static struct protection_domain *protection_domain_alloc(void) -{ - struct protection_domain *domain; - - domain = kzalloc(sizeof(*domain), GFP_KERNEL); - if (!domain) - return NULL; - - spin_lock_init(&domain->lock); - mutex_init(&domain->api_lock); - domain->id = domain_id_alloc(); - if (!domain->id) - goto out_err; - INIT_LIST_HEAD(&domain->dev_list); - - add_domain_to_list(domain); - - return domain; - -out_err: - kfree(domain); - - return NULL; -} - -static int amd_iommu_domain_init(struct iommu_domain *dom) -{ - struct protection_domain *domain; - - domain = protection_domain_alloc(); - if (!domain) - goto out_free; - - domain->mode = PAGE_MODE_3_LEVEL; - domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); - if (!domain->pt_root) - goto out_free; - - dom->priv = domain; - - return 0; - -out_free: - protection_domain_free(domain); - - return -ENOMEM; -} - -static void amd_iommu_domain_destroy(struct iommu_domain *dom) -{ - struct protection_domain *domain = dom->priv; - - if (!domain) - return; - - if (domain->dev_cnt > 0) - cleanup_domain(domain); - - BUG_ON(domain->dev_cnt != 0); - - free_pagetable(domain); - - protection_domain_free(domain); - - dom->priv = NULL; -} - -static void amd_iommu_detach_device(struct iommu_domain *dom, - struct device *dev) -{ - struct iommu_dev_data *dev_data = dev->archdata.iommu; - struct amd_iommu *iommu; - u16 devid; - - if (!check_device(dev)) - return; - - devid = get_device_id(dev); - - if (dev_data->domain != NULL) - detach_device(dev); - - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - return; - - device_flush_dte(dev); - iommu_completion_wait(iommu); -} - -static int amd_iommu_attach_device(struct iommu_domain *dom, - struct device *dev) -{ - struct protection_domain *domain = dom->priv; - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; - int ret; - u16 devid; - - if (!check_device(dev)) - return -EINVAL; - - dev_data = dev->archdata.iommu; - - devid = get_device_id(dev); - - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - return -EINVAL; - - if (dev_data->domain) - detach_device(dev); - - ret = attach_device(dev, domain); - - iommu_completion_wait(iommu); - - return ret; -} - -static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, - phys_addr_t paddr, int gfp_order, int iommu_prot) -{ - unsigned long page_size = 0x1000UL << gfp_order; - struct protection_domain *domain = dom->priv; - int prot = 0; - int ret; - - if (iommu_prot & IOMMU_READ) - prot |= IOMMU_PROT_IR; - if (iommu_prot & IOMMU_WRITE) - prot |= IOMMU_PROT_IW; - - mutex_lock(&domain->api_lock); - ret = iommu_map_page(domain, iova, paddr, prot, page_size); - mutex_unlock(&domain->api_lock); - - return ret; -} - -static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, - int gfp_order) -{ - struct protection_domain *domain = dom->priv; - unsigned long page_size, unmap_size; - - page_size = 0x1000UL << gfp_order; - - mutex_lock(&domain->api_lock); - unmap_size = iommu_unmap_page(domain, iova, page_size); - mutex_unlock(&domain->api_lock); - - domain_flush_tlb_pde(domain); - - return get_order(unmap_size); -} - -static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, - unsigned long iova) -{ - struct protection_domain *domain = dom->priv; - unsigned long offset_mask; - phys_addr_t paddr; - u64 *pte, __pte; - - pte = fetch_pte(domain, iova); - - if (!pte || !IOMMU_PTE_PRESENT(*pte)) - return 0; - - if (PM_PTE_LEVEL(*pte) == 0) - offset_mask = PAGE_SIZE - 1; - else - offset_mask = PTE_PAGE_SIZE(*pte) - 1; - - __pte = *pte & PM_ADDR_MASK; - paddr = (__pte & ~offset_mask) | (iova & offset_mask); - - return paddr; -} - -static int amd_iommu_domain_has_cap(struct iommu_domain *domain, - unsigned long cap) -{ - switch (cap) { - case IOMMU_CAP_CACHE_COHERENCY: - return 1; - } - - return 0; -} - -static struct iommu_ops amd_iommu_ops = { - .domain_init = amd_iommu_domain_init, - .domain_destroy = amd_iommu_domain_destroy, - .attach_dev = amd_iommu_attach_device, - .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map, - .unmap = amd_iommu_unmap, - .iova_to_phys = amd_iommu_iova_to_phys, - .domain_has_cap = amd_iommu_domain_has_cap, -}; - -/***************************************************************************** - * - * The next functions do a basic initialization of IOMMU for pass through - * mode - * - * In passthrough mode the IOMMU is initialized and enabled but not used for - * DMA-API translation. - * - *****************************************************************************/ - -int __init amd_iommu_init_passthrough(void) -{ - struct amd_iommu *iommu; - struct pci_dev *dev = NULL; - u16 devid; - - /* allocate passthrough domain */ - pt_domain = protection_domain_alloc(); - if (!pt_domain) - return -ENOMEM; - - pt_domain->mode |= PAGE_MODE_NONE; - - for_each_pci_dev(dev) { - if (!check_device(&dev->dev)) - continue; - - devid = get_device_id(&dev->dev); - - iommu = amd_iommu_rlookup_table[devid]; - if (!iommu) - continue; - - attach_device(&dev->dev, pt_domain); - } - - pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); - - return 0; -} diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c deleted file mode 100644 index 9179c21120a..00000000000 --- a/arch/x86/kernel/amd_iommu_init.c +++ /dev/null @@ -1,1572 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel <joerg.roedel@amd.com> - * Leo Duran <leo.duran@amd.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/pci.h> -#include <linux/acpi.h> -#include <linux/list.h> -#include <linux/slab.h> -#include <linux/syscore_ops.h> -#include <linux/interrupt.h> -#include <linux/msi.h> -#include <asm/pci-direct.h> -#include <asm/amd_iommu_proto.h> -#include <asm/amd_iommu_types.h> -#include <asm/amd_iommu.h> -#include <asm/iommu.h> -#include <asm/gart.h> -#include <asm/x86_init.h> -#include <asm/iommu_table.h> -/* - * definitions for the ACPI scanning code - */ -#define IVRS_HEADER_LENGTH 48 - -#define ACPI_IVHD_TYPE 0x10 -#define ACPI_IVMD_TYPE_ALL 0x20 -#define ACPI_IVMD_TYPE 0x21 -#define ACPI_IVMD_TYPE_RANGE 0x22 - -#define IVHD_DEV_ALL 0x01 -#define IVHD_DEV_SELECT 0x02 -#define IVHD_DEV_SELECT_RANGE_START 0x03 -#define IVHD_DEV_RANGE_END 0x04 -#define IVHD_DEV_ALIAS 0x42 -#define IVHD_DEV_ALIAS_RANGE 0x43 -#define IVHD_DEV_EXT_SELECT 0x46 -#define IVHD_DEV_EXT_SELECT_RANGE 0x47 - -#define IVHD_FLAG_HT_TUN_EN_MASK 0x01 -#define IVHD_FLAG_PASSPW_EN_MASK 0x02 -#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 -#define IVHD_FLAG_ISOC_EN_MASK 0x08 - -#define IVMD_FLAG_EXCL_RANGE 0x08 -#define IVMD_FLAG_UNITY_MAP 0x01 - -#define ACPI_DEVFLAG_INITPASS 0x01 -#define ACPI_DEVFLAG_EXTINT 0x02 -#define ACPI_DEVFLAG_NMI 0x04 -#define ACPI_DEVFLAG_SYSMGT1 0x10 -#define ACPI_DEVFLAG_SYSMGT2 0x20 -#define ACPI_DEVFLAG_LINT0 0x40 -#define ACPI_DEVFLAG_LINT1 0x80 -#define ACPI_DEVFLAG_ATSDIS 0x10000000 - -/* - * ACPI table definitions - * - * These data structures are laid over the table to parse the important values - * out of it. - */ - -/* - * structure describing one IOMMU in the ACPI table. Typically followed by one - * or more ivhd_entrys. - */ -struct ivhd_header { - u8 type; - u8 flags; - u16 length; - u16 devid; - u16 cap_ptr; - u64 mmio_phys; - u16 pci_seg; - u16 info; - u32 reserved; -} __attribute__((packed)); - -/* - * A device entry describing which devices a specific IOMMU translates and - * which requestor ids they use. - */ -struct ivhd_entry { - u8 type; - u16 devid; - u8 flags; - u32 ext; -} __attribute__((packed)); - -/* - * An AMD IOMMU memory definition structure. It defines things like exclusion - * ranges for devices and regions that should be unity mapped. - */ -struct ivmd_header { - u8 type; - u8 flags; - u16 length; - u16 devid; - u16 aux; - u64 resv; - u64 range_start; - u64 range_length; -} __attribute__((packed)); - -bool amd_iommu_dump; - -static int __initdata amd_iommu_detected; -static bool __initdata amd_iommu_disabled; - -u16 amd_iommu_last_bdf; /* largest PCI device id we have - to handle */ -LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings - we find in ACPI */ -bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ - -LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the - system */ - -/* Array to assign indices to IOMMUs*/ -struct amd_iommu *amd_iommus[MAX_IOMMUS]; -int amd_iommus_present; - -/* IOMMUs have a non-present cache? */ -bool amd_iommu_np_cache __read_mostly; -bool amd_iommu_iotlb_sup __read_mostly = true; - -/* - * The ACPI table parsing functions set this variable on an error - */ -static int __initdata amd_iommu_init_err; - -/* - * List of protection domains - used during resume - */ -LIST_HEAD(amd_iommu_pd_list); -spinlock_t amd_iommu_pd_lock; - -/* - * Pointer to the device table which is shared by all AMD IOMMUs - * it is indexed by the PCI device id or the HT unit id and contains - * information about the domain the device belongs to as well as the - * page table root pointer. - */ -struct dev_table_entry *amd_iommu_dev_table; - -/* - * The alias table is a driver specific data structure which contains the - * mappings of the PCI device ids to the actual requestor ids on the IOMMU. - * More than one device can share the same requestor id. - */ -u16 *amd_iommu_alias_table; - -/* - * The rlookup table is used to find the IOMMU which is responsible - * for a specific device. It is also indexed by the PCI device id. - */ -struct amd_iommu **amd_iommu_rlookup_table; - -/* - * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap - * to know which ones are already in use. - */ -unsigned long *amd_iommu_pd_alloc_bitmap; - -static u32 dev_table_size; /* size of the device table */ -static u32 alias_table_size; /* size of the alias table */ -static u32 rlookup_table_size; /* size if the rlookup table */ - -/* - * This function flushes all internal caches of - * the IOMMU used by this driver. - */ -extern void iommu_flush_all_caches(struct amd_iommu *iommu); - -static inline void update_last_devid(u16 devid) -{ - if (devid > amd_iommu_last_bdf) - amd_iommu_last_bdf = devid; -} - -static inline unsigned long tbl_size(int entry_size) -{ - unsigned shift = PAGE_SHIFT + - get_order(((int)amd_iommu_last_bdf + 1) * entry_size); - - return 1UL << shift; -} - -/* Access to l1 and l2 indexed register spaces */ - -static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) -{ - u32 val; - - pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); - pci_read_config_dword(iommu->dev, 0xfc, &val); - return val; -} - -static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) -{ - pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); - pci_write_config_dword(iommu->dev, 0xfc, val); - pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); -} - -static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) -{ - u32 val; - - pci_write_config_dword(iommu->dev, 0xf0, address); - pci_read_config_dword(iommu->dev, 0xf4, &val); - return val; -} - -static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) -{ - pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); - pci_write_config_dword(iommu->dev, 0xf4, val); -} - -/**************************************************************************** - * - * AMD IOMMU MMIO register space handling functions - * - * These functions are used to program the IOMMU device registers in - * MMIO space required for that driver. - * - ****************************************************************************/ - -/* - * This function set the exclusion range in the IOMMU. DMA accesses to the - * exclusion range are passed through untranslated - */ -static void iommu_set_exclusion_range(struct amd_iommu *iommu) -{ - u64 start = iommu->exclusion_start & PAGE_MASK; - u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; - u64 entry; - - if (!iommu->exclusion_start) - return; - - entry = start | MMIO_EXCL_ENABLE_MASK; - memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, - &entry, sizeof(entry)); - - entry = limit; - memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, - &entry, sizeof(entry)); -} - -/* Programs the physical address of the device table into the IOMMU hardware */ -static void __init iommu_set_device_table(struct amd_iommu *iommu) -{ - u64 entry; - - BUG_ON(iommu->mmio_base == NULL); - - entry = virt_to_phys(amd_iommu_dev_table); - entry |= (dev_table_size >> 12) - 1; - memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, - &entry, sizeof(entry)); -} - -/* Generic functions to enable/disable certain features of the IOMMU. */ -static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) -{ - u32 ctrl; - - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl |= (1 << bit); - writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); -} - -static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) -{ - u32 ctrl; - - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl &= ~(1 << bit); - writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); -} - -/* Function to enable the hardware */ -static void iommu_enable(struct amd_iommu *iommu) -{ - static const char * const feat_str[] = { - "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", - "IA", "GA", "HE", "PC", NULL - }; - int i; - - printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", - dev_name(&iommu->dev->dev), iommu->cap_ptr); - - if (iommu->cap & (1 << IOMMU_CAP_EFR)) { - printk(KERN_CONT " extended features: "); - for (i = 0; feat_str[i]; ++i) - if (iommu_feature(iommu, (1ULL << i))) - printk(KERN_CONT " %s", feat_str[i]); - } - printk(KERN_CONT "\n"); - - iommu_feature_enable(iommu, CONTROL_IOMMU_EN); -} - -static void iommu_disable(struct amd_iommu *iommu) -{ - /* Disable command buffer */ - iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); - - /* Disable event logging and event interrupts */ - iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); - iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); - - /* Disable IOMMU hardware itself */ - iommu_feature_disable(iommu, CONTROL_IOMMU_EN); -} - -/* - * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in - * the system has one. - */ -static u8 * __init iommu_map_mmio_space(u64 address) -{ - u8 *ret; - - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { - pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", - address); - pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); - return NULL; - } - - ret = ioremap_nocache(address, MMIO_REGION_LENGTH); - if (ret != NULL) - return ret; - - release_mem_region(address, MMIO_REGION_LENGTH); - - return NULL; -} - -static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) -{ - if (iommu->mmio_base) - iounmap(iommu->mmio_base); - release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); -} - -/**************************************************************************** - * - * The functions below belong to the first pass of AMD IOMMU ACPI table - * parsing. In this pass we try to find out the highest device id this - * code has to handle. Upon this information the size of the shared data - * structures is determined later. - * - ****************************************************************************/ - -/* - * This function calculates the length of a given IVHD entry - */ -static inline int ivhd_entry_length(u8 *ivhd) -{ - return 0x04 << (*ivhd >> 6); -} - -/* - * This function reads the last device id the IOMMU has to handle from the PCI - * capability header for this IOMMU - */ -static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) -{ - u32 cap; - - cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); - update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); - - return 0; -} - -/* - * After reading the highest device id from the IOMMU PCI capability header - * this function looks if there is a higher device id defined in the ACPI table - */ -static int __init find_last_devid_from_ivhd(struct ivhd_header *h) -{ - u8 *p = (void *)h, *end = (void *)h; - struct ivhd_entry *dev; - - p += sizeof(*h); - end += h->length; - - find_last_devid_on_pci(PCI_BUS(h->devid), - PCI_SLOT(h->devid), - PCI_FUNC(h->devid), - h->cap_ptr); - - while (p < end) { - dev = (struct ivhd_entry *)p; - switch (dev->type) { - case IVHD_DEV_SELECT: - case IVHD_DEV_RANGE_END: - case IVHD_DEV_ALIAS: - case IVHD_DEV_EXT_SELECT: - /* all the above subfield types refer to device ids */ - update_last_devid(dev->devid); - break; - default: - break; - } - p += ivhd_entry_length(p); - } - - WARN_ON(p != end); - - return 0; -} - -/* - * Iterate over all IVHD entries in the ACPI table and find the highest device - * id which we need to handle. This is the first of three functions which parse - * the ACPI table. So we check the checksum here. - */ -static int __init find_last_devid_acpi(struct acpi_table_header *table) -{ - int i; - u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; - struct ivhd_header *h; - - /* - * Validate checksum here so we don't need to do it when - * we actually parse the table - */ - for (i = 0; i < table->length; ++i) - checksum += p[i]; - if (checksum != 0) { - /* ACPI table corrupt */ - amd_iommu_init_err = -ENODEV; - return 0; - } - - p += IVRS_HEADER_LENGTH; - - end += table->length; - while (p < end) { - h = (struct ivhd_header *)p; - switch (h->type) { - case ACPI_IVHD_TYPE: - find_last_devid_from_ivhd(h); - break; - default: - break; - } - p += h->length; - } - WARN_ON(p != end); - - return 0; -} - -/**************************************************************************** - * - * The following functions belong the the code path which parses the ACPI table - * the second time. In this ACPI parsing iteration we allocate IOMMU specific - * data structures, initialize the device/alias/rlookup table and also - * basically initialize the hardware. - * - ****************************************************************************/ - -/* - * Allocates the command buffer. This buffer is per AMD IOMMU. We can - * write commands to that buffer later and the IOMMU will execute them - * asynchronously - */ -static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) -{ - u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(CMD_BUFFER_SIZE)); - - if (cmd_buf == NULL) - return NULL; - - iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; - - return cmd_buf; -} - -/* - * This function resets the command buffer if the IOMMU stopped fetching - * commands from it. - */ -void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) -{ - iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); - - writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); -} - -/* - * This function writes the command buffer address to the hardware and - * enables it. - */ -static void iommu_enable_command_buffer(struct amd_iommu *iommu) -{ - u64 entry; - - BUG_ON(iommu->cmd_buf == NULL); - - entry = (u64)virt_to_phys(iommu->cmd_buf); - entry |= MMIO_CMD_SIZE_512; - - memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, - &entry, sizeof(entry)); - - amd_iommu_reset_cmd_buffer(iommu); - iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); -} - -static void __init free_command_buffer(struct amd_iommu *iommu) -{ - free_pages((unsigned long)iommu->cmd_buf, - get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); -} - -/* allocates the memory where the IOMMU will log its events to */ -static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) -{ - iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(EVT_BUFFER_SIZE)); - - if (iommu->evt_buf == NULL) - return NULL; - - iommu->evt_buf_size = EVT_BUFFER_SIZE; - - return iommu->evt_buf; -} - -static void iommu_enable_event_buffer(struct amd_iommu *iommu) -{ - u64 entry; - - BUG_ON(iommu->evt_buf == NULL); - - entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; - - memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, - &entry, sizeof(entry)); - - /* set head and tail to zero manually */ - writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); -} - -static void __init free_event_buffer(struct amd_iommu *iommu) -{ - free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); -} - -/* sets a specific bit in the device table entry. */ -static void set_dev_entry_bit(u16 devid, u8 bit) -{ - int i = (bit >> 5) & 0x07; - int _bit = bit & 0x1f; - - amd_iommu_dev_table[devid].data[i] |= (1 << _bit); -} - -static int get_dev_entry_bit(u16 devid, u8 bit) -{ - int i = (bit >> 5) & 0x07; - int _bit = bit & 0x1f; - - return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; -} - - -void amd_iommu_apply_erratum_63(u16 devid) -{ - int sysmgt; - - sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | - (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); - - if (sysmgt == 0x01) - set_dev_entry_bit(devid, DEV_ENTRY_IW); -} - -/* Writes the specific IOMMU for a device into the rlookup table */ -static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) -{ - amd_iommu_rlookup_table[devid] = iommu; -} - -/* - * This function takes the device specific flags read from the ACPI - * table and sets up the device table entry with that information - */ -static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, - u16 devid, u32 flags, u32 ext_flags) -{ - if (flags & ACPI_DEVFLAG_INITPASS) - set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); - if (flags & ACPI_DEVFLAG_EXTINT) - set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); - if (flags & ACPI_DEVFLAG_NMI) - set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); - if (flags & ACPI_DEVFLAG_SYSMGT1) - set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); - if (flags & ACPI_DEVFLAG_SYSMGT2) - set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); - if (flags & ACPI_DEVFLAG_LINT0) - set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); - if (flags & ACPI_DEVFLAG_LINT1) - set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); - - amd_iommu_apply_erratum_63(devid); - - set_iommu_for_device(iommu, devid); -} - -/* - * Reads the device exclusion range from ACPI and initialize IOMMU with - * it - */ -static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) -{ - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; - - if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) - return; - - if (iommu) { - /* - * We only can configure exclusion ranges per IOMMU, not - * per device. But we can enable the exclusion range per - * device. This is done here - */ - set_dev_entry_bit(m->devid, DEV_ENTRY_EX); - iommu->exclusion_start = m->range_start; - iommu->exclusion_length = m->range_length; - } -} - -/* - * This function reads some important data from the IOMMU PCI space and - * initializes the driver data structure with it. It reads the hardware - * capabilities and the first/last device entries - */ -static void __init init_iommu_from_pci(struct amd_iommu *iommu) -{ - int cap_ptr = iommu->cap_ptr; - u32 range, misc, low, high; - int i, j; - - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, - &iommu->cap); - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, - &range); - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, - &misc); - - iommu->first_device = calc_devid(MMIO_GET_BUS(range), - MMIO_GET_FD(range)); - iommu->last_device = calc_devid(MMIO_GET_BUS(range), - MMIO_GET_LD(range)); - iommu->evt_msi_num = MMIO_MSI_NUM(misc); - - if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) - amd_iommu_iotlb_sup = false; - - /* read extended feature bits */ - low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); - high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); - - iommu->features = ((u64)high << 32) | low; - - if (!is_rd890_iommu(iommu->dev)) - return; - - /* - * Some rd890 systems may not be fully reconfigured by the BIOS, so - * it's necessary for us to store this information so it can be - * reprogrammed on resume - */ - - pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, - &iommu->stored_addr_lo); - pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, - &iommu->stored_addr_hi); - - /* Low bit locks writes to configuration space */ - iommu->stored_addr_lo &= ~1; - - for (i = 0; i < 6; i++) - for (j = 0; j < 0x12; j++) - iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); - - for (i = 0; i < 0x83; i++) - iommu->stored_l2[i] = iommu_read_l2(iommu, i); -} - -/* - * Takes a pointer to an AMD IOMMU entry in the ACPI table and - * initializes the hardware and our data structures with it. - */ -static void __init init_iommu_from_acpi(struct amd_iommu *iommu, - struct ivhd_header *h) -{ - u8 *p = (u8 *)h; - u8 *end = p, flags = 0; - u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; - u32 ext_flags = 0; - bool alias = false; - struct ivhd_entry *e; - - /* - * First save the recommended feature enable bits from ACPI - */ - iommu->acpi_flags = h->flags; - - /* - * Done. Now parse the device entries - */ - p += sizeof(struct ivhd_header); - end += h->length; - - - while (p < end) { - e = (struct ivhd_entry *)p; - switch (e->type) { - case IVHD_DEV_ALL: - - DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" - " last device %02x:%02x.%x flags: %02x\n", - PCI_BUS(iommu->first_device), - PCI_SLOT(iommu->first_device), - PCI_FUNC(iommu->first_device), - PCI_BUS(iommu->last_device), - PCI_SLOT(iommu->last_device), - PCI_FUNC(iommu->last_device), - e->flags); - - for (dev_i = iommu->first_device; - dev_i <= iommu->last_device; ++dev_i) - set_dev_entry_from_acpi(iommu, dev_i, - e->flags, 0); - break; - case IVHD_DEV_SELECT: - - DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " - "flags: %02x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags); - - devid = e->devid; - set_dev_entry_from_acpi(iommu, devid, e->flags, 0); - break; - case IVHD_DEV_SELECT_RANGE_START: - - DUMP_printk(" DEV_SELECT_RANGE_START\t " - "devid: %02x:%02x.%x flags: %02x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags); - - devid_start = e->devid; - flags = e->flags; - ext_flags = 0; - alias = false; - break; - case IVHD_DEV_ALIAS: - - DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " - "flags: %02x devid_to: %02x:%02x.%x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, - PCI_BUS(e->ext >> 8), - PCI_SLOT(e->ext >> 8), - PCI_FUNC(e->ext >> 8)); - - devid = e->devid; - devid_to = e->ext >> 8; - set_dev_entry_from_acpi(iommu, devid , e->flags, 0); - set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); - amd_iommu_alias_table[devid] = devid_to; - break; - case IVHD_DEV_ALIAS_RANGE: - - DUMP_printk(" DEV_ALIAS_RANGE\t\t " - "devid: %02x:%02x.%x flags: %02x " - "devid_to: %02x:%02x.%x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, - PCI_BUS(e->ext >> 8), - PCI_SLOT(e->ext >> 8), - PCI_FUNC(e->ext >> 8)); - - devid_start = e->devid; - flags = e->flags; - devid_to = e->ext >> 8; - ext_flags = 0; - alias = true; - break; - case IVHD_DEV_EXT_SELECT: - - DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " - "flags: %02x ext: %08x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, e->ext); - - devid = e->devid; - set_dev_entry_from_acpi(iommu, devid, e->flags, - e->ext); - break; - case IVHD_DEV_EXT_SELECT_RANGE: - - DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " - "%02x:%02x.%x flags: %02x ext: %08x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, e->ext); - - devid_start = e->devid; - flags = e->flags; - ext_flags = e->ext; - alias = false; - break; - case IVHD_DEV_RANGE_END: - - DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid)); - - devid = e->devid; - for (dev_i = devid_start; dev_i <= devid; ++dev_i) { - if (alias) { - amd_iommu_alias_table[dev_i] = devid_to; - set_dev_entry_from_acpi(iommu, - devid_to, flags, ext_flags); - } - set_dev_entry_from_acpi(iommu, dev_i, - flags, ext_flags); - } - break; - default: - break; - } - - p += ivhd_entry_length(p); - } -} - -/* Initializes the device->iommu mapping for the driver */ -static int __init init_iommu_devices(struct amd_iommu *iommu) -{ - u16 i; - - for (i = iommu->first_device; i <= iommu->last_device; ++i) - set_iommu_for_device(iommu, i); - - return 0; -} - -static void __init free_iommu_one(struct amd_iommu *iommu) -{ - free_command_buffer(iommu); - free_event_buffer(iommu); - iommu_unmap_mmio_space(iommu); -} - -static void __init free_iommu_all(void) -{ - struct amd_iommu *iommu, *next; - - for_each_iommu_safe(iommu, next) { - list_del(&iommu->list); - free_iommu_one(iommu); - kfree(iommu); - } -} - -/* - * This function clues the initialization function for one IOMMU - * together and also allocates the command buffer and programs the - * hardware. It does NOT enable the IOMMU. This is done afterwards. - */ -static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) -{ - spin_lock_init(&iommu->lock); - - /* Add IOMMU to internal data structures */ - list_add_tail(&iommu->list, &amd_iommu_list); - iommu->index = amd_iommus_present++; - - if (unlikely(iommu->index >= MAX_IOMMUS)) { - WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); - return -ENOSYS; - } - - /* Index is fine - add IOMMU to the array */ - amd_iommus[iommu->index] = iommu; - - /* - * Copy data from ACPI table entry to the iommu struct - */ - iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); - if (!iommu->dev) - return 1; - - iommu->cap_ptr = h->cap_ptr; - iommu->pci_seg = h->pci_seg; - iommu->mmio_phys = h->mmio_phys; - iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); - if (!iommu->mmio_base) - return -ENOMEM; - - iommu->cmd_buf = alloc_command_buffer(iommu); - if (!iommu->cmd_buf) - return -ENOMEM; - - iommu->evt_buf = alloc_event_buffer(iommu); - if (!iommu->evt_buf) - return -ENOMEM; - - iommu->int_enabled = false; - - init_iommu_from_pci(iommu); - init_iommu_from_acpi(iommu, h); - init_iommu_devices(iommu); - - if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) - amd_iommu_np_cache = true; - - return pci_enable_device(iommu->dev); -} - -/* - * Iterates over all IOMMU entries in the ACPI table, allocates the - * IOMMU structure and initializes it with init_iommu_one() - */ -static int __init init_iommu_all(struct acpi_table_header *table) -{ - u8 *p = (u8 *)table, *end = (u8 *)table; - struct ivhd_header *h; - struct amd_iommu *iommu; - int ret; - - end += table->length; - p += IVRS_HEADER_LENGTH; - - while (p < end) { - h = (struct ivhd_header *)p; - switch (*p) { - case ACPI_IVHD_TYPE: - - DUMP_printk("device: %02x:%02x.%01x cap: %04x " - "seg: %d flags: %01x info %04x\n", - PCI_BUS(h->devid), PCI_SLOT(h->devid), - PCI_FUNC(h->devid), h->cap_ptr, - h->pci_seg, h->flags, h->info); - DUMP_printk(" mmio-addr: %016llx\n", - h->mmio_phys); - - iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); - if (iommu == NULL) { - amd_iommu_init_err = -ENOMEM; - return 0; - } - - ret = init_iommu_one(iommu, h); - if (ret) { - amd_iommu_init_err = ret; - return 0; - } - break; - default: - break; - } - p += h->length; - - } - WARN_ON(p != end); - - return 0; -} - -/**************************************************************************** - * - * The following functions initialize the MSI interrupts for all IOMMUs - * in the system. Its a bit challenging because there could be multiple - * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per - * pci_dev. - * - ****************************************************************************/ - -static int iommu_setup_msi(struct amd_iommu *iommu) -{ - int r; - - if (pci_enable_msi(iommu->dev)) - return 1; - - r = request_threaded_irq(iommu->dev->irq, - amd_iommu_int_handler, - amd_iommu_int_thread, - 0, "AMD-Vi", - iommu->dev); - - if (r) { - pci_disable_msi(iommu->dev); - return 1; - } - - iommu->int_enabled = true; - iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); - - return 0; -} - -static int iommu_init_msi(struct amd_iommu *iommu) -{ - if (iommu->int_enabled) - return 0; - - if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) - return iommu_setup_msi(iommu); - - return 1; -} - -/**************************************************************************** - * - * The next functions belong to the third pass of parsing the ACPI - * table. In this last pass the memory mapping requirements are - * gathered (like exclusion and unity mapping reanges). - * - ****************************************************************************/ - -static void __init free_unity_maps(void) -{ - struct unity_map_entry *entry, *next; - - list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { - list_del(&entry->list); - kfree(entry); - } -} - -/* called when we find an exclusion range definition in ACPI */ -static int __init init_exclusion_range(struct ivmd_header *m) -{ - int i; - - switch (m->type) { - case ACPI_IVMD_TYPE: - set_device_exclusion_range(m->devid, m); - break; - case ACPI_IVMD_TYPE_ALL: - for (i = 0; i <= amd_iommu_last_bdf; ++i) - set_device_exclusion_range(i, m); - break; - case ACPI_IVMD_TYPE_RANGE: - for (i = m->devid; i <= m->aux; ++i) - set_device_exclusion_range(i, m); - break; - default: - break; - } - - return 0; -} - -/* called for unity map ACPI definition */ -static int __init init_unity_map_range(struct ivmd_header *m) -{ - struct unity_map_entry *e = 0; - char *s; - - e = kzalloc(sizeof(*e), GFP_KERNEL); - if (e == NULL) - return -ENOMEM; - - switch (m->type) { - default: - kfree(e); - return 0; - case ACPI_IVMD_TYPE: - s = "IVMD_TYPEi\t\t\t"; - e->devid_start = e->devid_end = m->devid; - break; - case ACPI_IVMD_TYPE_ALL: - s = "IVMD_TYPE_ALL\t\t"; - e->devid_start = 0; - e->devid_end = amd_iommu_last_bdf; - break; - case ACPI_IVMD_TYPE_RANGE: - s = "IVMD_TYPE_RANGE\t\t"; - e->devid_start = m->devid; - e->devid_end = m->aux; - break; - } - e->address_start = PAGE_ALIGN(m->range_start); - e->address_end = e->address_start + PAGE_ALIGN(m->range_length); - e->prot = m->flags >> 1; - - DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" - " range_start: %016llx range_end: %016llx flags: %x\n", s, - PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start), - PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end), - PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), - e->address_start, e->address_end, m->flags); - - list_add_tail(&e->list, &amd_iommu_unity_map); - - return 0; -} - -/* iterates over all memory definitions we find in the ACPI table */ -static int __init init_memory_definitions(struct acpi_table_header *table) -{ - u8 *p = (u8 *)table, *end = (u8 *)table; - struct ivmd_header *m; - - end += table->length; - p += IVRS_HEADER_LENGTH; - - while (p < end) { - m = (struct ivmd_header *)p; - if (m->flags & IVMD_FLAG_EXCL_RANGE) - init_exclusion_range(m); - else if (m->flags & IVMD_FLAG_UNITY_MAP) - init_unity_map_range(m); - - p += m->length; - } - - return 0; -} - -/* - * Init the device table to not allow DMA access for devices and - * suppress all page faults - */ -static void init_device_table(void) -{ - u16 devid; - - for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { - set_dev_entry_bit(devid, DEV_ENTRY_VALID); - set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); - } -} - -static void iommu_init_flags(struct amd_iommu *iommu) -{ - iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : - iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); - - iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_PASSPW_EN); - - iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); - - iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_ISOC_EN) : - iommu_feature_disable(iommu, CONTROL_ISOC_EN); - - /* - * make IOMMU memory accesses cache coherent - */ - iommu_feature_enable(iommu, CONTROL_COHERENT_EN); -} - -static void iommu_apply_resume_quirks(struct amd_iommu *iommu) -{ - int i, j; - u32 ioc_feature_control; - struct pci_dev *pdev = NULL; - - /* RD890 BIOSes may not have completely reconfigured the iommu */ - if (!is_rd890_iommu(iommu->dev)) - return; - - /* - * First, we need to ensure that the iommu is enabled. This is - * controlled by a register in the northbridge - */ - pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); - - if (!pdev) - return; - - /* Select Northbridge indirect register 0x75 and enable writing */ - pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); - pci_read_config_dword(pdev, 0x64, &ioc_feature_control); - - /* Enable the iommu */ - if (!(ioc_feature_control & 0x1)) - pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); - - pci_dev_put(pdev); - - /* Restore the iommu BAR */ - pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, - iommu->stored_addr_lo); - pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, - iommu->stored_addr_hi); - - /* Restore the l1 indirect regs for each of the 6 l1s */ - for (i = 0; i < 6; i++) - for (j = 0; j < 0x12; j++) - iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); - - /* Restore the l2 indirect regs */ - for (i = 0; i < 0x83; i++) - iommu_write_l2(iommu, i, iommu->stored_l2[i]); - - /* Lock PCI setup registers */ - pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, - iommu->stored_addr_lo | 1); -} - -/* - * This function finally enables all IOMMUs found in the system after - * they have been initialized - */ -static void enable_iommus(void) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) { - iommu_disable(iommu); - iommu_init_flags(iommu); - iommu_set_device_table(iommu); - iommu_enable_command_buffer(iommu); - iommu_enable_event_buffer(iommu); - iommu_set_exclusion_range(iommu); - iommu_init_msi(iommu); - iommu_enable(iommu); - iommu_flush_all_caches(iommu); - } -} - -static void disable_iommus(void) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) - iommu_disable(iommu); -} - -/* - * Suspend/Resume support - * disable suspend until real resume implemented - */ - -static void amd_iommu_resume(void) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) - iommu_apply_resume_quirks(iommu); - - /* re-load the hardware */ - enable_iommus(); - - /* - * we have to flush after the IOMMUs are enabled because a - * disabled IOMMU will never execute the commands we send - */ - for_each_iommu(iommu) - iommu_flush_all_caches(iommu); -} - -static int amd_iommu_suspend(void) -{ - /* disable IOMMUs to go out of the way for BIOS */ - disable_iommus(); - - return 0; -} - -static struct syscore_ops amd_iommu_syscore_ops = { - .suspend = amd_iommu_suspend, - .resume = amd_iommu_resume, -}; - -/* - * This is the core init function for AMD IOMMU hardware in the system. - * This function is called from the generic x86 DMA layer initialization - * code. - * - * This function basically parses the ACPI table for AMD IOMMU (IVRS) - * three times: - * - * 1 pass) Find the highest PCI device id the driver has to handle. - * Upon this information the size of the data structures is - * determined that needs to be allocated. - * - * 2 pass) Initialize the data structures just allocated with the - * information in the ACPI table about available AMD IOMMUs - * in the system. It also maps the PCI devices in the - * system to specific IOMMUs - * - * 3 pass) After the basic data structures are allocated and - * initialized we update them with information about memory - * remapping requirements parsed out of the ACPI table in - * this last pass. - * - * After that the hardware is initialized and ready to go. In the last - * step we do some Linux specific things like registering the driver in - * the dma_ops interface and initializing the suspend/resume support - * functions. Finally it prints some information about AMD IOMMUs and - * the driver state and enables the hardware. - */ -static int __init amd_iommu_init(void) -{ - int i, ret = 0; - - /* - * First parse ACPI tables to find the largest Bus/Dev/Func - * we need to handle. Upon this information the shared data - * structures for the IOMMUs in the system will be allocated - */ - if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) - return -ENODEV; - - ret = amd_iommu_init_err; - if (ret) - goto out; - - dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); - alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); - rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); - - ret = -ENOMEM; - - /* Device table - directly used by all IOMMUs */ - amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(dev_table_size)); - if (amd_iommu_dev_table == NULL) - goto out; - - /* - * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the - * IOMMU see for that device - */ - amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, - get_order(alias_table_size)); - if (amd_iommu_alias_table == NULL) - goto free; - - /* IOMMU rlookup table - find the IOMMU for a specific device */ - amd_iommu_rlookup_table = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(rlookup_table_size)); - if (amd_iommu_rlookup_table == NULL) - goto free; - - amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(MAX_DOMAIN_ID/8)); - if (amd_iommu_pd_alloc_bitmap == NULL) - goto free; - - /* init the device table */ - init_device_table(); - - /* - * let all alias entries point to itself - */ - for (i = 0; i <= amd_iommu_last_bdf; ++i) - amd_iommu_alias_table[i] = i; - - /* - * never allocate domain 0 because its used as the non-allocated and - * error value placeholder - */ - amd_iommu_pd_alloc_bitmap[0] = 1; - - spin_lock_init(&amd_iommu_pd_lock); - - /* - * now the data structures are allocated and basically initialized - * start the real acpi table scan - */ - ret = -ENODEV; - if (acpi_table_parse("IVRS", init_iommu_all) != 0) - goto free; - - if (amd_iommu_init_err) { - ret = amd_iommu_init_err; - goto free; - } - - if (acpi_table_parse("IVRS", init_memory_definitions) != 0) - goto free; - - if (amd_iommu_init_err) { - ret = amd_iommu_init_err; - goto free; - } - - ret = amd_iommu_init_devices(); - if (ret) - goto free; - - enable_iommus(); - - if (iommu_pass_through) - ret = amd_iommu_init_passthrough(); - else - ret = amd_iommu_init_dma_ops(); - - if (ret) - goto free_disable; - - amd_iommu_init_api(); - - amd_iommu_init_notifier(); - - register_syscore_ops(&amd_iommu_syscore_ops); - - if (iommu_pass_through) - goto out; - - if (amd_iommu_unmap_flush) - printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); - else - printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); - - x86_platform.iommu_shutdown = disable_iommus; -out: - return ret; - -free_disable: - disable_iommus(); - -free: - amd_iommu_uninit_devices(); - - free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, - get_order(MAX_DOMAIN_ID/8)); - - free_pages((unsigned long)amd_iommu_rlookup_table, - get_order(rlookup_table_size)); - - free_pages((unsigned long)amd_iommu_alias_table, - get_order(alias_table_size)); - - free_pages((unsigned long)amd_iommu_dev_table, - get_order(dev_table_size)); - - free_iommu_all(); - - free_unity_maps(); - -#ifdef CONFIG_GART_IOMMU - /* - * We failed to initialize the AMD IOMMU - try fallback to GART - * if possible. - */ - gart_iommu_init(); - -#endif - - goto out; -} - -/**************************************************************************** - * - * Early detect code. This code runs at IOMMU detection time in the DMA - * layer. It just looks if there is an IVRS ACPI table to detect AMD - * IOMMUs - * - ****************************************************************************/ -static int __init early_amd_iommu_detect(struct acpi_table_header *table) -{ - return 0; -} - -int __init amd_iommu_detect(void) -{ - if (no_iommu || (iommu_detected && !gart_iommu_aperture)) - return -ENODEV; - - if (amd_iommu_disabled) - return -ENODEV; - - if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { - iommu_detected = 1; - amd_iommu_detected = 1; - x86_init.iommu.iommu_init = amd_iommu_init; - - /* Make sure ACS will be enabled */ - pci_request_acs(); - return 1; - } - return -ENODEV; -} - -/**************************************************************************** - * - * Parsing functions for the AMD IOMMU specific kernel command line - * options. - * - ****************************************************************************/ - -static int __init parse_amd_iommu_dump(char *str) -{ - amd_iommu_dump = true; - - return 1; -} - -static int __init parse_amd_iommu_options(char *str) -{ - for (; *str; ++str) { - if (strncmp(str, "fullflush", 9) == 0) - amd_iommu_unmap_flush = true; - if (strncmp(str, "off", 3) == 0) - amd_iommu_disabled = true; - } - - return 1; -} - -__setup("amd_iommu_dump", parse_amd_iommu_dump); -__setup("amd_iommu=", parse_amd_iommu_options); - -IOMMU_INIT_FINISH(amd_iommu_detect, - gart_iommu_hole_init, - 0, - 0); diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 289e92862fd..2b6630d75e1 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -44,6 +44,7 @@ #include <asm/fixmap.h> #include <asm/apb_timer.h> #include <asm/mrst.h> +#include <asm/time.h> #define APBT_MASK CLOCKSOURCE_MASK(32) #define APBT_SHIFT 22 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index b961af86bfe..9498b844518 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -27,6 +27,7 @@ #include <linux/syscore_ops.h> #include <linux/delay.h> #include <linux/timex.h> +#include <linux/i8253.h> #include <linux/dmar.h> #include <linux/init.h> #include <linux/cpu.h> @@ -39,7 +40,6 @@ #include <asm/pgalloc.h> #include <asm/atomic.h> #include <asm/mpspec.h> -#include <asm/i8253.h> #include <asm/i8259.h> #include <asm/proto.h> #include <asm/apic.h> @@ -48,6 +48,7 @@ #include <asm/hpet.h> #include <asm/idle.h> #include <asm/mtrr.h> +#include <asm/time.h> #include <asm/smp.h> #include <asm/mce.h> #include <asm/tsc.h> @@ -390,7 +391,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new) /* * If mask=1, the LVT entry does not generate interrupts while mask=0 - * enables the vector. See also the BKDGs. + * enables the vector. See also the BKDGs. Must be called with + * preemption disabled. */ int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) @@ -1428,7 +1430,7 @@ void enable_x2apic(void) rdmsr(MSR_IA32_APICBASE, msr, msr2); if (!(msr & X2APIC_ENABLE)) { printk_once(KERN_INFO "Enabling x2apic\n"); - wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0); + wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, msr2); } } #endif /* CONFIG_X86_X2APIC */ diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e5293394b54..8eb863e27ea 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1295,6 +1295,16 @@ static int setup_ioapic_entry(int apic_id, int irq, * irq handler will do the explicit EOI to the io-apic. */ ir_entry->vector = pin; + + apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " + "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " + "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " + "Avail:%X Vector:%02X Dest:%08X " + "SID:%04X SQ:%X SVT:%X)\n", + apic_id, irte.present, irte.fpd, irte.dst_mode, + irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, + irte.avail, irte.vector, irte.dest_id, + irte.sid, irte.sq, irte.svt); } else { entry->delivery_mode = apic->irq_delivery_mode; entry->dest_mode = apic->irq_dest_mode; @@ -1337,9 +1347,9 @@ static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " - "IRQ %d Mode:%i Active:%i)\n", + "IRQ %d Mode:%i Active:%i Dest:%d)\n", apic_id, mpc_ioapic_id(apic_id), pin, cfg->vector, - irq, trigger, polarity); + irq, trigger, polarity, dest); if (setup_ioapic_entry(mpc_ioapic_id(apic_id), irq, &entry, @@ -1522,10 +1532,12 @@ __apicdebuginit(void) print_IO_APIC(void) printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); - printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); + printk(KERN_DEBUG "....... : max redirection entries: %02X\n", + reg_01.bits.entries); printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); - printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); + printk(KERN_DEBUG "....... : IO APIC version: %02X\n", + reg_01.bits.version); /* * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, @@ -1550,31 +1562,60 @@ __apicdebuginit(void) print_IO_APIC(void) printk(KERN_DEBUG ".... IRQ redirection table:\n"); - printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" - " Stat Dmod Deli Vect:\n"); + if (intr_remapping_enabled) { + printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR" + " Pol Stat Indx2 Zero Vect:\n"); + } else { + printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" + " Stat Dmod Deli Vect:\n"); + } for (i = 0; i <= reg_01.bits.entries; i++) { - struct IO_APIC_route_entry entry; - - entry = ioapic_read_entry(apic, i); - - printk(KERN_DEBUG " %02x %03X ", - i, - entry.dest - ); + if (intr_remapping_enabled) { + struct IO_APIC_route_entry entry; + struct IR_IO_APIC_route_entry *ir_entry; + + entry = ioapic_read_entry(apic, i); + ir_entry = (struct IR_IO_APIC_route_entry *) &entry; + printk(KERN_DEBUG " %02x %04X ", + i, + ir_entry->index + ); + printk("%1d %1d %1d %1d %1d " + "%1d %1d %X %02X\n", + ir_entry->format, + ir_entry->mask, + ir_entry->trigger, + ir_entry->irr, + ir_entry->polarity, + ir_entry->delivery_status, + ir_entry->index2, + ir_entry->zero, + ir_entry->vector + ); + } else { + struct IO_APIC_route_entry entry; - printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", - entry.mask, - entry.trigger, - entry.irr, - entry.polarity, - entry.delivery_status, - entry.dest_mode, - entry.delivery_mode, - entry.vector - ); + entry = ioapic_read_entry(apic, i); + printk(KERN_DEBUG " %02x %02X ", + i, + entry.dest + ); + printk("%1d %1d %1d %1d %1d " + "%1d %1d %02X\n", + entry.mask, + entry.trigger, + entry.irr, + entry.polarity, + entry.delivery_status, + entry.dest_mode, + entry.delivery_mode, + entry.vector + ); + } } } + printk(KERN_DEBUG "IRQ to pin mappings:\n"); for_each_active_irq(irq) { struct irq_pin_list *entry; @@ -1792,7 +1833,7 @@ __apicdebuginit(int) print_ICs(void) return 0; } -fs_initcall(print_ICs); +late_initcall(print_ICs); /* Where if anywhere is the i8259 connect in external int mode */ diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index b511a011b7d..adc66c3a1fe 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -632,14 +632,14 @@ late_initcall(uv_init_heartbeat); /* Direct Legacy VGA I/O traffic to designated IOH */ int uv_set_vga_state(struct pci_dev *pdev, bool decode, - unsigned int command_bits, bool change_bridge) + unsigned int command_bits, u32 flags) { int domain, bus, rc; - PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n", - pdev->devfn, decode, command_bits, change_bridge); + PR_DEVEL("devfn %x decode %d cmd %x flags %d\n", + pdev->devfn, decode, command_bits, flags); - if (!change_bridge) + if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) return 0; if ((command_bits & PCI_COMMAND_IO) == 0) diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 965a7666c28..0371c484bb8 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -229,11 +229,11 @@ #include <linux/jiffies.h> #include <linux/acpi.h> #include <linux/syscore_ops.h> +#include <linux/i8253.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/desc.h> -#include <asm/i8253.h> #include <asm/olpc.h> #include <asm/paravirt.h> #include <asm/reboot.h> @@ -1220,11 +1220,11 @@ static void reinit_timer(void) raw_spin_lock_irqsave(&i8253_lock, flags); /* set the clock to HZ */ - outb_pit(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ + outb_p(0x34, PIT_MODE); /* binary, mode 2, LSB/MSB, ch 0 */ udelay(10); - outb_pit(LATCH & 0xff, PIT_CH0); /* LSB */ + outb_p(LATCH & 0xff, PIT_CH0); /* LSB */ udelay(10); - outb_pit(LATCH >> 8, PIT_CH0); /* MSB */ + outb_p(LATCH >> 8, PIT_CH0); /* MSB */ udelay(10); raw_spin_unlock_irqrestore(&i8253_lock, flags); #endif diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index c29d631af6f..395a10e6806 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -63,7 +63,6 @@ void foo(void) BLANK(); OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending); - OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); BLANK(); OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 1edf5ba4fb2..ed6086eedf1 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -456,6 +456,24 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) if (cpu_has(c, X86_FEATURE_VMX)) detect_vmx_virtcap(c); + + /* + * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not. + * x86_energy_perf_policy(8) is available to change it at run-time + */ + if (cpu_has(c, X86_FEATURE_EPB)) { + u64 epb; + + rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); + if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) { + printk_once(KERN_WARNING "ENERGY_PERF_BIAS:" + " Set to 'normal', was 'performance'\n" + "ENERGY_PERF_BIAS: View and update with" + " x86_energy_perf_policy(8)\n"); + epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; + wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); + } + } } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 3a0338b4b17..4ee3abf20ed 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -22,7 +22,6 @@ #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/slab.h> -#include <linux/highmem.h> #include <linux/cpu.h> #include <linux/bitops.h> @@ -45,38 +44,27 @@ do { \ #endif /* - * best effort, GUP based copy_from_user() that assumes IRQ or NMI context + * | NHM/WSM | SNB | + * register ------------------------------- + * | HT | no HT | HT | no HT | + *----------------------------------------- + * offcore | core | core | cpu | core | + * lbr_sel | core | core | cpu | core | + * ld_lat | cpu | core | cpu | core | + *----------------------------------------- + * + * Given that there is a small number of shared regs, + * we can pre-allocate their slot in the per-cpu + * per-core reg tables. */ -static unsigned long -copy_from_user_nmi(void *to, const void __user *from, unsigned long n) -{ - unsigned long offset, addr = (unsigned long)from; - unsigned long size, len = 0; - struct page *page; - void *map; - int ret; - - do { - ret = __get_user_pages_fast(addr, 1, 0, &page); - if (!ret) - break; - - offset = addr & (PAGE_SIZE - 1); - size = min(PAGE_SIZE - offset, n - len); - - map = kmap_atomic(page); - memcpy(to, map+offset, size); - kunmap_atomic(map); - put_page(page); +enum extra_reg_type { + EXTRA_REG_NONE = -1, /* not used */ - len += size; - to += size; - addr += size; + EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ + EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ - } while (len < n); - - return len; -} + EXTRA_REG_MAX /* number of entries needed */ +}; struct event_constraint { union { @@ -132,11 +120,10 @@ struct cpu_hw_events { struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; /* - * Intel percore register state. - * Coordinate shared resources between HT threads. + * manage shared (per-core, per-cpu) registers + * used on Intel NHM/WSM/SNB */ - int percore_used; /* Used by this CPU? */ - struct intel_percore *per_core; + struct intel_shared_regs *shared_regs; /* * AMD specific bits @@ -187,26 +174,45 @@ struct cpu_hw_events { for ((e) = (c); (e)->weight; (e)++) /* + * Per register state. + */ +struct er_account { + raw_spinlock_t lock; /* per-core: protect structure */ + u64 config; /* extra MSR config */ + u64 reg; /* extra MSR number */ + atomic_t ref; /* reference count */ +}; + +/* * Extra registers for specific events. + * * Some events need large masks and require external MSRs. - * Define a mapping to these extra registers. + * Those extra MSRs end up being shared for all events on + * a PMU and sometimes between PMU of sibling HT threads. + * In either case, the kernel needs to handle conflicting + * accesses to those extra, shared, regs. The data structure + * to manage those registers is stored in cpu_hw_event. */ struct extra_reg { unsigned int event; unsigned int msr; u64 config_mask; u64 valid_mask; + int idx; /* per_xxx->regs[] reg index */ }; -#define EVENT_EXTRA_REG(e, ms, m, vm) { \ +#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ .event = (e), \ .msr = (ms), \ .config_mask = (m), \ .valid_mask = (vm), \ + .idx = EXTRA_REG_##i \ } -#define INTEL_EVENT_EXTRA_REG(event, msr, vm) \ - EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm) -#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0) + +#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ + EVENT_EXTRA_REG(event, msr, ARCH_PERFMON_EVENTSEL_EVENT, vm, idx) + +#define EVENT_EXTRA_END EVENT_EXTRA_REG(0, 0, 0, 0, RSP_0) union perf_capabilities { struct { @@ -252,7 +258,6 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; - struct event_constraint *percore_constraints; void (*quirks)(void); int perfctr_second_write; @@ -286,8 +291,12 @@ struct x86_pmu { * Extra registers for events */ struct extra_reg *extra_regs; + unsigned int er_flags; }; +#define ERF_NO_HT_SHARING 1 +#define ERF_HAS_RSP_1 2 + static struct x86_pmu x86_pmu __read_mostly; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { @@ -393,10 +402,10 @@ static inline unsigned int x86_pmu_event_addr(int index) */ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) { + struct hw_perf_event_extra *reg; struct extra_reg *er; - event->hw.extra_reg = 0; - event->hw.extra_config = 0; + reg = &event->hw.extra_reg; if (!x86_pmu.extra_regs) return 0; @@ -406,8 +415,10 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) continue; if (event->attr.config1 & ~er->valid_mask) return -EINVAL; - event->hw.extra_reg = er->msr; - event->hw.extra_config = event->attr.config1; + + reg->idx = er->idx; + reg->config = event->attr.config1; + reg->reg = er->msr; break; } return 0; @@ -706,6 +717,9 @@ static int __x86_pmu_event_init(struct perf_event *event) event->hw.last_cpu = -1; event->hw.last_tag = ~0ULL; + /* mark unused */ + event->hw.extra_reg.idx = EXTRA_REG_NONE; + return x86_pmu.hw_config(event); } @@ -747,8 +761,8 @@ static void x86_pmu_disable(struct pmu *pmu) static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, u64 enable_mask) { - if (hwc->extra_reg) - wrmsrl(hwc->extra_reg, hwc->extra_config); + if (hwc->extra_reg.reg) + wrmsrl(hwc->extra_reg.reg, hwc->extra_reg.config); wrmsrl(hwc->config_base, hwc->config | enable_mask); } @@ -1332,7 +1346,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) if (!x86_perf_event_set_period(event)) continue; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); } @@ -1637,6 +1651,40 @@ static int x86_pmu_commit_txn(struct pmu *pmu) perf_pmu_enable(pmu); return 0; } +/* + * a fake_cpuc is used to validate event groups. Due to + * the extra reg logic, we need to also allocate a fake + * per_core and per_cpu structure. Otherwise, group events + * using extra reg may conflict without the kernel being + * able to catch this when the last event gets added to + * the group. + */ +static void free_fake_cpuc(struct cpu_hw_events *cpuc) +{ + kfree(cpuc->shared_regs); + kfree(cpuc); +} + +static struct cpu_hw_events *allocate_fake_cpuc(void) +{ + struct cpu_hw_events *cpuc; + int cpu = raw_smp_processor_id(); + + cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); + if (!cpuc) + return ERR_PTR(-ENOMEM); + + /* only needed, if we have extra_regs */ + if (x86_pmu.extra_regs) { + cpuc->shared_regs = allocate_shared_regs(cpu); + if (!cpuc->shared_regs) + goto error; + } + return cpuc; +error: + free_fake_cpuc(cpuc); + return ERR_PTR(-ENOMEM); +} /* * validate that we can schedule this event @@ -1647,9 +1695,9 @@ static int validate_event(struct perf_event *event) struct event_constraint *c; int ret = 0; - fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); - if (!fake_cpuc) - return -ENOMEM; + fake_cpuc = allocate_fake_cpuc(); + if (IS_ERR(fake_cpuc)) + return PTR_ERR(fake_cpuc); c = x86_pmu.get_event_constraints(fake_cpuc, event); @@ -1659,7 +1707,7 @@ static int validate_event(struct perf_event *event) if (x86_pmu.put_event_constraints) x86_pmu.put_event_constraints(fake_cpuc, event); - kfree(fake_cpuc); + free_fake_cpuc(fake_cpuc); return ret; } @@ -1679,36 +1727,32 @@ static int validate_group(struct perf_event *event) { struct perf_event *leader = event->group_leader; struct cpu_hw_events *fake_cpuc; - int ret, n; - - ret = -ENOMEM; - fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO); - if (!fake_cpuc) - goto out; + int ret = -ENOSPC, n; + fake_cpuc = allocate_fake_cpuc(); + if (IS_ERR(fake_cpuc)) + return PTR_ERR(fake_cpuc); /* * the event is not yet connected with its * siblings therefore we must first collect * existing siblings, then add the new event * before we can simulate the scheduling */ - ret = -ENOSPC; n = collect_events(fake_cpuc, leader, true); if (n < 0) - goto out_free; + goto out; fake_cpuc->n_events = n; n = collect_events(fake_cpuc, event, false); if (n < 0) - goto out_free; + goto out; fake_cpuc->n_events = n; ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); -out_free: - kfree(fake_cpuc); out: + free_fake_cpuc(fake_cpuc); return ret; } diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index fe29c1d2219..941caa2e449 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -89,6 +89,20 @@ static __initconst const u64 amd_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */ + [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */ + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; /* diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 41178c826c4..45fbb8f7f54 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1,25 +1,15 @@ #ifdef CONFIG_CPU_SUP_INTEL -#define MAX_EXTRA_REGS 2 - -/* - * Per register state. - */ -struct er_account { - int ref; /* reference count */ - unsigned int extra_reg; /* extra MSR number */ - u64 extra_config; /* extra MSR config */ -}; - /* - * Per core state - * This used to coordinate shared registers for HT threads. + * Per core/cpu state + * + * Used to coordinate shared registers between HT threads or + * among events on a single PMU. */ -struct intel_percore { - raw_spinlock_t lock; /* protect structure */ - struct er_account regs[MAX_EXTRA_REGS]; - int refcnt; /* number of threads */ - unsigned core_id; +struct intel_shared_regs { + struct er_account regs[EXTRA_REG_MAX]; + int refcnt; /* per-core: #HT threads */ + unsigned core_id; /* per-core: core id */ }; /* @@ -88,16 +78,10 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), EVENT_EXTRA_END }; -static struct event_constraint intel_nehalem_percore_constraints[] __read_mostly = -{ - INTEL_EVENT_CONSTRAINT(0xb7, 0), - EVENT_CONSTRAINT_END -}; - static struct event_constraint intel_westmere_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ @@ -116,8 +100,6 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */ - INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ EVENT_CONSTRAINT_END @@ -125,15 +107,13 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = static struct extra_reg intel_westmere_extra_regs[] __read_mostly = { - INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff), - INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff), + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), EVENT_EXTRA_END }; -static struct event_constraint intel_westmere_percore_constraints[] __read_mostly = +static struct event_constraint intel_v1_event_constraints[] __read_mostly = { - INTEL_EVENT_CONSTRAINT(0xb7, 0), - INTEL_EVENT_CONSTRAINT(0xbb, 0), EVENT_CONSTRAINT_END }; @@ -145,6 +125,12 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = EVENT_CONSTRAINT_END }; +static struct extra_reg intel_snb_extra_regs[] __read_mostly = { + INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), + INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), + EVENT_EXTRA_END +}; + static u64 intel_pmu_event_map(int hw_event) { return intel_perfmon_event_map[hw_event]; @@ -245,6 +231,21 @@ static __initconst const u64 snb_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + }; static __initconst const u64 westmere_hw_cache_event_ids @@ -346,6 +347,20 @@ static __initconst const u64 westmere_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + }, }; /* @@ -398,7 +413,21 @@ static __initconst const u64 nehalem_hw_cache_extra_regs [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS, [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS, }, - } + }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_ALL_DRAM, + [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE_DRAM, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_ALL_DRAM, + [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE_DRAM, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_ALL_DRAM, + [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE_DRAM, + }, + }, }; static __initconst const u64 nehalem_hw_cache_event_ids @@ -500,6 +529,20 @@ static __initconst const u64 nehalem_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0x01b7, + [ C(RESULT_MISS) ] = 0x01b7, + }, + }, }; static __initconst const u64 core2_hw_cache_event_ids @@ -1003,7 +1046,7 @@ again: data.period = event->hw.last_period; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); } @@ -1037,65 +1080,121 @@ intel_bts_constraints(struct perf_event *event) return NULL; } +static bool intel_try_alt_er(struct perf_event *event, int orig_idx) +{ + if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) + return false; + + if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { + event->hw.config &= ~INTEL_ARCH_EVENT_MASK; + event->hw.config |= 0x01bb; + event->hw.extra_reg.idx = EXTRA_REG_RSP_1; + event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; + } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { + event->hw.config &= ~INTEL_ARCH_EVENT_MASK; + event->hw.config |= 0x01b7; + event->hw.extra_reg.idx = EXTRA_REG_RSP_0; + event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; + } + + if (event->hw.extra_reg.idx == orig_idx) + return false; + + return true; +} + +/* + * manage allocation of shared extra msr for certain events + * + * sharing can be: + * per-cpu: to be shared between the various events on a single PMU + * per-core: per-cpu + shared by HT threads + */ static struct event_constraint * -intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) +__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT; - struct event_constraint *c; - struct intel_percore *pc; + struct event_constraint *c = &emptyconstraint; + struct hw_perf_event_extra *reg = &event->hw.extra_reg; struct er_account *era; - int i; - int free_slot; - int found; + unsigned long flags; + int orig_idx = reg->idx; - if (!x86_pmu.percore_constraints || hwc->extra_alloc) - return NULL; + /* already allocated shared msr */ + if (reg->alloc) + return &unconstrained; - for (c = x86_pmu.percore_constraints; c->cmask; c++) { - if (e != c->code) - continue; +again: + era = &cpuc->shared_regs->regs[reg->idx]; + /* + * we use spin_lock_irqsave() to avoid lockdep issues when + * passing a fake cpuc + */ + raw_spin_lock_irqsave(&era->lock, flags); + + if (!atomic_read(&era->ref) || era->config == reg->config) { + + /* lock in msr value */ + era->config = reg->config; + era->reg = reg->reg; + + /* one more user */ + atomic_inc(&era->ref); + + /* no need to reallocate during incremental event scheduling */ + reg->alloc = 1; /* - * Allocate resource per core. + * All events using extra_reg are unconstrained. + * Avoids calling x86_get_event_constraints() + * + * Must revisit if extra_reg controlling events + * ever have constraints. Worst case we go through + * the regular event constraint table. */ - pc = cpuc->per_core; - if (!pc) - break; - c = &emptyconstraint; - raw_spin_lock(&pc->lock); - free_slot = -1; - found = 0; - for (i = 0; i < MAX_EXTRA_REGS; i++) { - era = &pc->regs[i]; - if (era->ref > 0 && hwc->extra_reg == era->extra_reg) { - /* Allow sharing same config */ - if (hwc->extra_config == era->extra_config) { - era->ref++; - cpuc->percore_used = 1; - hwc->extra_alloc = 1; - c = NULL; - } - /* else conflict */ - found = 1; - break; - } else if (era->ref == 0 && free_slot == -1) - free_slot = i; - } - if (!found && free_slot != -1) { - era = &pc->regs[free_slot]; - era->ref = 1; - era->extra_reg = hwc->extra_reg; - era->extra_config = hwc->extra_config; - cpuc->percore_used = 1; - hwc->extra_alloc = 1; - c = NULL; - } - raw_spin_unlock(&pc->lock); - return c; + c = &unconstrained; + } else if (intel_try_alt_er(event, orig_idx)) { + raw_spin_unlock(&era->lock); + goto again; } + raw_spin_unlock_irqrestore(&era->lock, flags); - return NULL; + return c; +} + +static void +__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, + struct hw_perf_event_extra *reg) +{ + struct er_account *era; + + /* + * only put constraint if extra reg was actually + * allocated. Also takes care of event which do + * not use an extra shared reg + */ + if (!reg->alloc) + return; + + era = &cpuc->shared_regs->regs[reg->idx]; + + /* one fewer user */ + atomic_dec(&era->ref); + + /* allocate again next time */ + reg->alloc = 0; +} + +static struct event_constraint * +intel_shared_regs_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + struct event_constraint *c = NULL; + + if (event->hw.extra_reg.idx != EXTRA_REG_NONE) + c = __intel_shared_reg_get_constraints(cpuc, event); + + return c; } static struct event_constraint * @@ -1111,49 +1210,28 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event if (c) return c; - c = intel_percore_constraints(cpuc, event); + c = intel_shared_regs_constraints(cpuc, event); if (c) return c; return x86_get_event_constraints(cpuc, event); } -static void intel_put_event_constraints(struct cpu_hw_events *cpuc, +static void +intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) { - struct extra_reg *er; - struct intel_percore *pc; - struct er_account *era; - struct hw_perf_event *hwc = &event->hw; - int i, allref; + struct hw_perf_event_extra *reg; - if (!cpuc->percore_used) - return; - - for (er = x86_pmu.extra_regs; er->msr; er++) { - if (er->event != (hwc->config & er->config_mask)) - continue; + reg = &event->hw.extra_reg; + if (reg->idx != EXTRA_REG_NONE) + __intel_shared_reg_put_constraints(cpuc, reg); +} - pc = cpuc->per_core; - raw_spin_lock(&pc->lock); - for (i = 0; i < MAX_EXTRA_REGS; i++) { - era = &pc->regs[i]; - if (era->ref > 0 && - era->extra_config == hwc->extra_config && - era->extra_reg == er->msr) { - era->ref--; - hwc->extra_alloc = 0; - break; - } - } - allref = 0; - for (i = 0; i < MAX_EXTRA_REGS; i++) - allref += pc->regs[i].ref; - if (allref == 0) - cpuc->percore_used = 0; - raw_spin_unlock(&pc->lock); - break; - } +static void intel_put_event_constraints(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + intel_put_shared_regs_event_constraints(cpuc, event); } static int intel_pmu_hw_config(struct perf_event *event) @@ -1231,20 +1309,36 @@ static __initconst const struct x86_pmu core_pmu = { .event_constraints = intel_core_event_constraints, }; +static struct intel_shared_regs *allocate_shared_regs(int cpu) +{ + struct intel_shared_regs *regs; + int i; + + regs = kzalloc_node(sizeof(struct intel_shared_regs), + GFP_KERNEL, cpu_to_node(cpu)); + if (regs) { + /* + * initialize the locks to keep lockdep happy + */ + for (i = 0; i < EXTRA_REG_MAX; i++) + raw_spin_lock_init(®s->regs[i].lock); + + regs->core_id = -1; + } + return regs; +} + static int intel_pmu_cpu_prepare(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); - if (!cpu_has_ht_siblings()) + if (!x86_pmu.extra_regs) return NOTIFY_OK; - cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), - GFP_KERNEL, cpu_to_node(cpu)); - if (!cpuc->per_core) + cpuc->shared_regs = allocate_shared_regs(cpu); + if (!cpuc->shared_regs) return NOTIFY_BAD; - raw_spin_lock_init(&cpuc->per_core->lock); - cpuc->per_core->core_id = -1; return NOTIFY_OK; } @@ -1260,32 +1354,34 @@ static void intel_pmu_cpu_starting(int cpu) */ intel_pmu_lbr_reset(); - if (!cpu_has_ht_siblings()) + if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING)) return; for_each_cpu(i, topology_thread_cpumask(cpu)) { - struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; + struct intel_shared_regs *pc; + pc = per_cpu(cpu_hw_events, i).shared_regs; if (pc && pc->core_id == core_id) { - kfree(cpuc->per_core); - cpuc->per_core = pc; + kfree(cpuc->shared_regs); + cpuc->shared_regs = pc; break; } } - cpuc->per_core->core_id = core_id; - cpuc->per_core->refcnt++; + cpuc->shared_regs->core_id = core_id; + cpuc->shared_regs->refcnt++; } static void intel_pmu_cpu_dying(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); - struct intel_percore *pc = cpuc->per_core; + struct intel_shared_regs *pc; + pc = cpuc->shared_regs; if (pc) { if (pc->core_id == -1 || --pc->refcnt == 0) kfree(pc); - cpuc->per_core = NULL; + cpuc->shared_regs = NULL; } fini_debug_store_on_cpu(cpu); @@ -1436,7 +1532,6 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_nehalem_event_constraints; x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; - x86_pmu.percore_constraints = intel_nehalem_percore_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.extra_regs = intel_nehalem_extra_regs; @@ -1481,10 +1576,10 @@ static __init int intel_pmu_init(void) intel_pmu_lbr_init_nhm(); x86_pmu.event_constraints = intel_westmere_event_constraints; - x86_pmu.percore_constraints = intel_westmere_percore_constraints; x86_pmu.enable_all = intel_pmu_nhm_enable_all; x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints; x86_pmu.extra_regs = intel_westmere_extra_regs; + x86_pmu.er_flags |= ERF_HAS_RSP_1; /* UOPS_ISSUED.STALLED_CYCLES */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; @@ -1502,6 +1597,10 @@ static __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_snb_event_constraints; x86_pmu.pebs_constraints = intel_snb_pebs_events; + x86_pmu.extra_regs = intel_snb_extra_regs; + /* all extra regs are per-cpu when HT is on */ + x86_pmu.er_flags |= ERF_HAS_RSP_1; + x86_pmu.er_flags |= ERF_NO_HT_SHARING; /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e; @@ -1512,11 +1611,19 @@ static __init int intel_pmu_init(void) break; default: - /* - * default constraints for v2 and up - */ - x86_pmu.event_constraints = intel_gen_event_constraints; - pr_cont("generic architected perfmon, "); + switch (x86_pmu.version) { + case 1: + x86_pmu.event_constraints = intel_v1_event_constraints; + pr_cont("generic architected perfmon v1, "); + break; + default: + /* + * default constraints for v2 and up + */ + x86_pmu.event_constraints = intel_gen_event_constraints; + pr_cont("generic architected perfmon, "); + break; + } } return 0; } @@ -1528,4 +1635,8 @@ static int intel_pmu_init(void) return 0; } +static struct intel_shared_regs *allocate_shared_regs(int cpu) +{ + return NULL; +} #endif /* CONFIG_CPU_SUP_INTEL */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index bab491b8ee2..1b1ef3addcf 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -340,7 +340,7 @@ static int intel_pmu_drain_bts_buffer(void) */ perf_prepare_sample(&header, &data, event, ®s); - if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) + if (perf_output_begin(&handle, event, header.size * (top - at))) return 1; for (; at < top; at++) { @@ -616,7 +616,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event, else regs.flags &= ~PERF_EFLAGS_EXACT; - if (perf_event_overflow(event, 1, &data, ®s)) + if (perf_event_overflow(event, &data, ®s)) x86_pmu_stop(event, 0); } diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ead584fb6a7..7809d2bcb20 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -554,13 +554,102 @@ static __initconst const u64 p4_hw_cache_event_ids [ C(RESULT_MISS) ] = -1, }, }, + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, }; +/* + * Because of Netburst being quite restricted in how many + * identical events may run simultaneously, we introduce event aliases, + * ie the different events which have the same functionality but + * utilize non-intersected resources (ESCR/CCCR/counter registers). + * + * This allow us to relax restrictions a bit and run two or more + * identical events together. + * + * Never set any custom internal bits such as P4_CONFIG_HT, + * P4_CONFIG_ALIASABLE or bits for P4_PEBS_METRIC, they are + * either up to date automatically or not applicable at all. + */ +struct p4_event_alias { + u64 original; + u64 alternative; +} p4_event_aliases[] = { + { + /* + * Non-halted cycles can be substituted with non-sleeping cycles (see + * Intel SDM Vol3b for details). We need this alias to be able + * to run nmi-watchdog and 'perf top' (or any other user space tool + * which is interested in running PERF_COUNT_HW_CPU_CYCLES) + * simultaneously. + */ + .original = + p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) | + P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)), + .alternative = + p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_EXECUTION_EVENT) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS0)| + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS1)| + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS2)| + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, NBOGUS3)| + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS0) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS1) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS2) | + P4_ESCR_EMASK_BIT(P4_EVENT_EXECUTION_EVENT, BOGUS3))| + p4_config_pack_cccr(P4_CCCR_THRESHOLD(15) | P4_CCCR_COMPLEMENT | + P4_CCCR_COMPARE), + }, +}; + +static u64 p4_get_alias_event(u64 config) +{ + u64 config_match; + int i; + + /* + * Only event with special mark is allowed, + * we're to be sure it didn't come as malformed + * RAW event. + */ + if (!(config & P4_CONFIG_ALIASABLE)) + return 0; + + config_match = config & P4_CONFIG_EVENT_ALIAS_MASK; + + for (i = 0; i < ARRAY_SIZE(p4_event_aliases); i++) { + if (config_match == p4_event_aliases[i].original) { + config_match = p4_event_aliases[i].alternative; + break; + } else if (config_match == p4_event_aliases[i].alternative) { + config_match = p4_event_aliases[i].original; + break; + } + } + + if (i >= ARRAY_SIZE(p4_event_aliases)) + return 0; + + return config_match | (config & P4_CONFIG_EVENT_ALIAS_IMMUTABLE_BITS); +} + static u64 p4_general_events[PERF_COUNT_HW_MAX] = { /* non-halted CPU clocks */ [PERF_COUNT_HW_CPU_CYCLES] = p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) | - P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)), + P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)) | + P4_CONFIG_ALIASABLE, /* * retired instructions @@ -945,7 +1034,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) if (!x86_perf_event_set_period(event)) continue; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); } @@ -1120,6 +1209,8 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign struct p4_event_bind *bind; unsigned int i, thread, num; int cntr_idx, escr_idx; + u64 config_alias; + int pass; bitmap_zero(used_mask, X86_PMC_IDX_MAX); bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE); @@ -1128,6 +1219,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign hwc = &cpuc->event_list[i]->hw; thread = p4_ht_thread(cpu); + pass = 0; + +again: + /* + * It's possible to hit a circular lock + * between original and alternative events + * if both are scheduled already. + */ + if (pass > 2) + goto done; + bind = p4_config_get_bind(hwc->config); escr_idx = p4_get_escr_idx(bind->escr_msr[thread]); if (unlikely(escr_idx == -1)) @@ -1141,8 +1243,17 @@ static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign } cntr_idx = p4_next_cntr(thread, used_mask, bind); - if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) - goto done; + if (cntr_idx == -1 || test_bit(escr_idx, escr_mask)) { + /* + * Check whether an event alias is still available. + */ + config_alias = p4_get_alias_event(hwc->config); + if (!config_alias) + goto done; + hwc->config = config_alias; + pass++; + goto again; + } p4_pmu_swap_config_ts(hwc, cpu); if (assign) diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 690bc846183..a621f342768 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/pci.h> #include <linux/of_pci.h> +#include <linux/initrd.h> #include <asm/hpet.h> #include <asm/irq_controller.h> @@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); } +#ifdef CONFIG_BLK_DEV_INITRD +void __init early_init_dt_setup_initrd_arch(unsigned long start, + unsigned long end) +{ + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(end); + initrd_below_start_ok = 1; +} +#endif + void __init add_dtb(u64 data) { initial_dtb = data + offsetof(struct setup_data, data); @@ -123,6 +134,24 @@ static int __init add_bus_probe(void) module_init(add_bus_probe); #ifdef CONFIG_PCI +struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) +{ + struct device_node *np; + + for_each_node_by_type(np, "pci") { + const void *prop; + unsigned int bus_min; + + prop = of_get_property(np, "bus-range", NULL); + if (!prop) + continue; + bus_min = be32_to_cpup(prop); + if (bus->number == bus_min) + return np; + } + return NULL; +} + static int x86_of_pci_irq_enable(struct pci_dev *dev) { struct of_irq oirq; @@ -154,50 +183,8 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev) void __cpuinit x86_of_pci_init(void) { - struct device_node *np; - pcibios_enable_irq = x86_of_pci_irq_enable; pcibios_disable_irq = x86_of_pci_irq_disable; - - for_each_node_by_type(np, "pci") { - const void *prop; - struct pci_bus *bus; - unsigned int bus_min; - struct device_node *child; - - prop = of_get_property(np, "bus-range", NULL); - if (!prop) - continue; - bus_min = be32_to_cpup(prop); - - bus = pci_find_bus(0, bus_min); - if (!bus) { - printk(KERN_ERR "Can't find a node for bus %s.\n", - np->full_name); - continue; - } - - if (bus->self) - bus->self->dev.of_node = np; - else - bus->dev.of_node = np; - - for_each_child_of_node(np, child) { - struct pci_dev *dev; - u32 devfn; - - prop = of_get_property(child, "reg", NULL); - if (!prop) - continue; - - devfn = (be32_to_cpup(prop) >> 8) & 0xff; - dev = pci_get_slot(bus, devfn); - if (!dev) - continue; - dev->dev.of_node = child; - pci_dev_put(dev); - } - } } #endif diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index e71c98d3c0d..19853ad8afc 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -105,34 +105,6 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack, } /* - * We are returning from the irq stack and go to the previous one. - * If the previous stack is also in the irq stack, then bp in the first - * frame of the irq stack points to the previous, interrupted one. - * Otherwise we have another level of indirection: We first save - * the bp of the previous stack, then we switch the stack to the irq one - * and save a new bp that links to the previous one. - * (See save_args()) - */ -static inline unsigned long -fixup_bp_irq_link(unsigned long bp, unsigned long *stack, - unsigned long *irq_stack, unsigned long *irq_stack_end) -{ -#ifdef CONFIG_FRAME_POINTER - struct stack_frame *frame = (struct stack_frame *)bp; - unsigned long next; - - if (!in_irq_stack(stack, irq_stack, irq_stack_end)) { - if (!probe_kernel_address(&frame->next_frame, next)) - return next; - else - WARN_ONCE(1, "Perf: bad frame pointer = %p in " - "callchain\n", &frame->next_frame); - } -#endif - return bp; -} - -/* * x86-64 can have up to three kernel stacks: * process stack * interrupt stack @@ -155,9 +127,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, task = current; if (!stack) { - stack = &dummy; - if (task && task != current) + if (regs) + stack = (unsigned long *)regs->sp; + else if (task && task != current) stack = (unsigned long *)task->thread.sp; + else + stack = &dummy; } if (!bp) @@ -205,8 +180,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, * pointer (index -1 to end) in the IRQ stack: */ stack = (unsigned long *) (irq_stack_end[-1]); - bp = fixup_bp_irq_link(bp, stack, irq_stack, - irq_stack_end); irq_stack_end = NULL; ops->stack(data, "EOI"); continue; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 8a445a0c989..d130b20a6b9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -297,27 +297,26 @@ ENDPROC(native_usergs_sysret64) .endm /* save partial stack frame */ - .pushsection .kprobes.text, "ax" -ENTRY(save_args) - XCPT_FRAME + .macro SAVE_ARGS_IRQ cld - /* - * start from rbp in pt_regs and jump over - * return address. - */ - movq_cfi rdi, RDI+8-RBP - movq_cfi rsi, RSI+8-RBP - movq_cfi rdx, RDX+8-RBP - movq_cfi rcx, RCX+8-RBP - movq_cfi rax, RAX+8-RBP - movq_cfi r8, R8+8-RBP - movq_cfi r9, R9+8-RBP - movq_cfi r10, R10+8-RBP - movq_cfi r11, R11+8-RBP - - leaq -RBP+8(%rsp),%rdi /* arg1 for handler */ - movq_cfi rbp, 8 /* push %rbp */ - leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ + /* start from rbp in pt_regs and jump over */ + movq_cfi rdi, RDI-RBP + movq_cfi rsi, RSI-RBP + movq_cfi rdx, RDX-RBP + movq_cfi rcx, RCX-RBP + movq_cfi rax, RAX-RBP + movq_cfi r8, R8-RBP + movq_cfi r9, R9-RBP + movq_cfi r10, R10-RBP + movq_cfi r11, R11-RBP + + /* Save rbp so that we can unwind from get_irq_regs() */ + movq_cfi rbp, 0 + + /* Save previous stack value */ + movq %rsp, %rsi + + leaq -RBP(%rsp),%rdi /* arg1 for handler */ testl $3, CS(%rdi) je 1f SWAPGS @@ -329,19 +328,14 @@ ENTRY(save_args) */ 1: incl PER_CPU_VAR(irq_count) jne 2f - popq_cfi %rax /* move return address... */ mov PER_CPU_VAR(irq_stack_ptr),%rsp EMPTY_FRAME 0 - pushq_cfi %rbp /* backlink for unwinder */ - pushq_cfi %rax /* ... to the new stack */ - /* - * We entered an interrupt context - irqs are off: - */ -2: TRACE_IRQS_OFF - ret - CFI_ENDPROC -END(save_args) - .popsection + +2: /* Store previous stack value */ + pushq %rsi + /* We entered an interrupt context - irqs are off: */ + TRACE_IRQS_OFF + .endm ENTRY(save_rest) PARTIAL_FRAME 1 REST_SKIP+8 @@ -473,7 +467,7 @@ ENTRY(system_call_after_swapgs) * and short: */ ENABLE_INTERRUPTS(CLBR_NONE) - SAVE_ARGS 8,1 + SAVE_ARGS 8,0 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) movq %rcx,RIP-ARGOFFSET(%rsp) CFI_REL_OFFSET rip,RIP-ARGOFFSET @@ -508,7 +502,7 @@ sysret_check: TRACE_IRQS_ON movq RIP-ARGOFFSET(%rsp),%rcx CFI_REGISTER rip,rcx - RESTORE_ARGS 0,-ARG_SKIP,1 + RESTORE_ARGS 1,-ARG_SKIP,0 /*CFI_REGISTER rflags,r11*/ movq PER_CPU_VAR(old_rsp), %rsp USERGS_SYSRET64 @@ -791,7 +785,7 @@ END(interrupt) /* reserve pt_regs for scratch regs and rbp */ subq $ORIG_RAX-RBP, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP - call save_args + SAVE_ARGS_IRQ PARTIAL_FRAME 0 call \func .endm @@ -814,15 +808,14 @@ ret_from_intr: DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF decl PER_CPU_VAR(irq_count) - leaveq - CFI_RESTORE rbp + /* Restore saved previous stack */ + popq %rsi + leaq 16(%rsi), %rsp + CFI_DEF_CFA_REGISTER rsp - CFI_ADJUST_CFA_OFFSET -8 + CFI_ADJUST_CFA_OFFSET -16 - /* we did not save rbx, restore only from ARGOFFSET */ - addq $8, %rsp - CFI_ADJUST_CFA_OFFSET -8 exit_intr: GET_THREAD_INFO(%rcx) testl $3,CS-ARGOFFSET(%rsp) @@ -858,7 +851,7 @@ retint_restore_args: /* return to kernel space */ */ TRACE_IRQS_IRETQ restore_args: - RESTORE_ARGS 0,8,0 + RESTORE_ARGS 1,8,1 irq_return: INTERRUPT_RETURN diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 6781765b3a0..0f4b0651cd3 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -4,6 +4,7 @@ #include <linux/sysdev.h> #include <linux/delay.h> #include <linux/errno.h> +#include <linux/i8253.h> #include <linux/slab.h> #include <linux/hpet.h> #include <linux/init.h> @@ -12,8 +13,8 @@ #include <linux/io.h> #include <asm/fixmap.h> -#include <asm/i8253.h> #include <asm/hpet.h> +#include <asm/time.h> #define HPET_MASK CLOCKSOURCE_MASK(32) diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index fb66dc9e36c..f2b96de3c7c 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -3,113 +3,24 @@ * */ #include <linux/clockchips.h> -#include <linux/interrupt.h> -#include <linux/spinlock.h> -#include <linux/jiffies.h> #include <linux/module.h> #include <linux/timex.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/io.h> +#include <linux/i8253.h> -#include <asm/i8253.h> #include <asm/hpet.h> +#include <asm/time.h> #include <asm/smp.h> -DEFINE_RAW_SPINLOCK(i8253_lock); -EXPORT_SYMBOL(i8253_lock); - /* * HPET replaces the PIT, when enabled. So we need to know, which of * the two timers is used */ struct clock_event_device *global_clock_event; -/* - * Initialize the PIT timer. - * - * This is also called after resume to bring the PIT into operation again. - */ -static void init_pit_timer(enum clock_event_mode mode, - struct clock_event_device *evt) -{ - raw_spin_lock(&i8253_lock); - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: - /* binary, mode 2, LSB/MSB, ch 0 */ - outb_pit(0x34, PIT_MODE); - outb_pit(LATCH & 0xff , PIT_CH0); /* LSB */ - outb_pit(LATCH >> 8 , PIT_CH0); /* MSB */ - break; - - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: - if (evt->mode == CLOCK_EVT_MODE_PERIODIC || - evt->mode == CLOCK_EVT_MODE_ONESHOT) { - outb_pit(0x30, PIT_MODE); - outb_pit(0, PIT_CH0); - outb_pit(0, PIT_CH0); - } - break; - - case CLOCK_EVT_MODE_ONESHOT: - /* One shot setup */ - outb_pit(0x38, PIT_MODE); - break; - - case CLOCK_EVT_MODE_RESUME: - /* Nothing to do here */ - break; - } - raw_spin_unlock(&i8253_lock); -} - -/* - * Program the next event in oneshot mode - * - * Delta is given in PIT ticks - */ -static int pit_next_event(unsigned long delta, struct clock_event_device *evt) -{ - raw_spin_lock(&i8253_lock); - outb_pit(delta & 0xff , PIT_CH0); /* LSB */ - outb_pit(delta >> 8 , PIT_CH0); /* MSB */ - raw_spin_unlock(&i8253_lock); - - return 0; -} - -/* - * On UP the PIT can serve all of the possible timer functions. On SMP systems - * it can be solely used for the global tick. - * - * The profiling and update capabilities are switched off once the local apic is - * registered. This mechanism replaces the previous #ifdef LOCAL_APIC - - * !using_apic_timer decisions in do_timer_interrupt_hook() - */ -static struct clock_event_device pit_ce = { - .name = "pit", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = init_pit_timer, - .set_next_event = pit_next_event, - .irq = 0, -}; - -/* - * Initialize the conversion factor and the min/max deltas of the clock event - * structure and register the clock event source with the framework. - */ void __init setup_pit_timer(void) { - /* - * Start pit with the boot cpu mask and make it global after the - * IO_APIC has been initialized. - */ - pit_ce.cpumask = cpumask_of(smp_processor_id()); - - clockevents_config_and_register(&pit_ce, CLOCK_TICK_RATE, 0xF, 0x7FFF); - global_clock_event = &pit_ce; + clockevent_i8253_init(true); + global_clock_event = &i8253_clockevent; } #ifndef CONFIG_X86_64 @@ -123,7 +34,7 @@ static int __init init_pit_clocksource(void) * - when local APIC timer is active (PIT is switched off) */ if (num_possible_cpus() > 1 || is_hpet_enabled() || - pit_ce.mode != CLOCK_EVT_MODE_PERIODIC) + i8253_clockevent.mode != CLOCK_EVT_MODE_PERIODIC) return 0; return clocksource_i8253_init(); diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 5f9ecff328b..00354d4919a 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -608,7 +608,7 @@ int kgdb_arch_init(void) return register_die_notifier(&kgdb_notifier); } -static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi, +static void kgdb_hw_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { struct task_struct *tsk = current; @@ -638,7 +638,7 @@ void kgdb_arch_late(void) for (i = 0; i < HBP_NUM; i++) { if (breakinfo[i].pev) continue; - breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL); + breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); if (IS_ERR((void * __force)breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 2e4928d45a2..e1ba8cb24e4 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -337,7 +337,7 @@ EXPORT_SYMBOL(boot_option_idle_override); * Powermanagement idle function, if any.. */ void (*pm_idle)(void); -#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) +#ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(pm_idle); #endif @@ -399,7 +399,7 @@ void default_idle(void) cpu_relax(); } } -#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) +#ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); #endif diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 8d128783af4..a3d0dc59067 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { set_user_gs(regs, 0); regs->fs = 0; - set_fs(USER_DS); regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6c9dd922ac0..ca6f7ab8df3 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip, regs->cs = _cs; regs->ss = _ss; regs->flags = X86_EFLAGS_IF; - set_fs(USER_DS); /* * Free the old FP and other extended state */ diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 807c2a2b80f..82528799c5d 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -528,7 +528,7 @@ static int genregs_set(struct task_struct *target, return ret; } -static void ptrace_triggered(struct perf_event *bp, int nmi, +static void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { @@ -715,7 +715,8 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, attr.bp_type = HW_BREAKPOINT_W; attr.disabled = 1; - bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk); + bp = register_user_hw_breakpoint(&attr, ptrace_triggered, + NULL, tsk); /* * CHECKME: the previous code returned -EIO if the addr wasn't diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 0c016f72769..9242436e993 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -294,6 +294,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), }, }, + { /* Handle reboot issue on Acer Aspire one */ + .callback = set_bios_reboot, + .ident = "Acer Aspire One A110", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), + }, + }, { } }; @@ -411,6 +419,30 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), }, }, + { /* Handle problems with rebooting on the Latitude E6320. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E6320", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), + }, + }, + { /* Handle problems with rebooting on the Latitude E5420. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E5420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"), + }, + }, + { /* Handle problems with rebooting on the Latitude E6420. */ + .callback = set_pci_reboot, + .ident = "Dell Latitude E6420", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"), + }, + }, { } }; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 33a0c11797d..9f548cb4a95 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused) per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; x86_platform.nmi_init(); + /* + * Wait until the cpu which brought this one up marked it + * online before enabling interrupts. If we don't do that then + * we can end up waking up the softirq thread before this cpu + * reached the active state, which makes the scheduler unhappy + * and schedule the softirq thread on the wrong cpu. This is + * only observable with forced threaded interrupts, but in + * theory it could also happen w/o them. It's just way harder + * to achieve. + */ + while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) + cpu_relax(); + /* enable local interrupts */ local_irq_enable(); @@ -425,7 +438,7 @@ static void impress_friends(void) void __inquire_remote_apic(int apicid) { unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; - char *names[] = { "ID", "VERSION", "SPIV" }; + const char * const names[] = { "ID", "VERSION", "SPIV" }; int timeout; u32 status; diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 55d9bc03f69..fdd0c6430e5 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -66,7 +66,7 @@ void save_stack_trace(struct stack_trace *trace) } EXPORT_SYMBOL_GPL(save_stack_trace); -void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs) +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) { dump_trace(current, regs, NULL, 0, &save_stack_ops, trace); if (trace->nr_entries < trace->max_entries) diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 30ac65df7d4..e07a2fc876b 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -36,6 +36,7 @@ #include <asm/bootparam.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> +#include <asm/swiotlb.h> #include <asm/fixmap.h> #include <asm/proto.h> #include <asm/setup.h> diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 00cbb272627..5a64d057be5 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -11,13 +11,13 @@ #include <linux/clockchips.h> #include <linux/interrupt.h> +#include <linux/i8253.h> #include <linux/time.h> #include <linux/mca.h> #include <asm/vsyscall.h> #include <asm/x86_init.h> #include <asm/i8259.h> -#include <asm/i8253.h> #include <asm/timer.h> #include <asm/hpet.h> #include <asm/time.h> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d6e2477feb1..adc98675cda 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -47,38 +47,40 @@ #define DstDI (5<<1) /* Destination is in ES:(E)DI */ #define DstMem64 (6<<1) /* 64bit memory operand */ #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ -#define DstMask (7<<1) +#define DstDX (8<<1) /* Destination is in DX register */ +#define DstMask (0xf<<1) /* Source operand type. */ -#define SrcNone (0<<4) /* No source operand. */ -#define SrcReg (1<<4) /* Register operand. */ -#define SrcMem (2<<4) /* Memory operand. */ -#define SrcMem16 (3<<4) /* Memory operand (16-bit). */ -#define SrcMem32 (4<<4) /* Memory operand (32-bit). */ -#define SrcImm (5<<4) /* Immediate operand. */ -#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ -#define SrcOne (7<<4) /* Implied '1' */ -#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ -#define SrcImmU (9<<4) /* Immediate operand, unsigned */ -#define SrcSI (0xa<<4) /* Source is in the DS:RSI */ -#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ -#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ -#define SrcAcc (0xd<<4) /* Source Accumulator */ -#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */ -#define SrcMask (0xf<<4) +#define SrcNone (0<<5) /* No source operand. */ +#define SrcReg (1<<5) /* Register operand. */ +#define SrcMem (2<<5) /* Memory operand. */ +#define SrcMem16 (3<<5) /* Memory operand (16-bit). */ +#define SrcMem32 (4<<5) /* Memory operand (32-bit). */ +#define SrcImm (5<<5) /* Immediate operand. */ +#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */ +#define SrcOne (7<<5) /* Implied '1' */ +#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */ +#define SrcImmU (9<<5) /* Immediate operand, unsigned */ +#define SrcSI (0xa<<5) /* Source is in the DS:RSI */ +#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */ +#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */ +#define SrcAcc (0xd<<5) /* Source Accumulator */ +#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */ +#define SrcDX (0xf<<5) /* Source is in DX register */ +#define SrcMask (0xf<<5) /* Generic ModRM decode. */ -#define ModRM (1<<8) +#define ModRM (1<<9) /* Destination is only written; never read. */ -#define Mov (1<<9) -#define BitOp (1<<10) -#define MemAbs (1<<11) /* Memory operand is absolute displacement */ -#define String (1<<12) /* String instruction (rep capable) */ -#define Stack (1<<13) /* Stack instruction (push/pop) */ -#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */ -#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ -#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */ -#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */ -#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */ -#define Sse (1<<17) /* SSE Vector instruction */ +#define Mov (1<<10) +#define BitOp (1<<11) +#define MemAbs (1<<12) /* Memory operand is absolute displacement */ +#define String (1<<13) /* String instruction (rep capable) */ +#define Stack (1<<14) /* Stack instruction (push/pop) */ +#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ +#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ +#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ +#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ +#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ +#define Sse (1<<18) /* SSE Vector instruction */ /* Misc flags */ #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ #define VendorSpecific (1<<22) /* Vendor specific instruction */ @@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = { I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), I(SrcImmByte | Mov | Stack, em_push), I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), - D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */ - D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */ + D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */ + D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */ /* 0x70 - 0x7F */ X16(D(SrcImmByte)), /* 0x80 - 0x87 */ @@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = { /* 0xE8 - 0xEF */ D(SrcImm | Stack), D(SrcImm | ImplicitOps), D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), - D2bvIP(SrcNone | DstAcc, in, check_perm_in), - D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out), + D2bvIP(SrcDX | DstAcc, in, check_perm_in), + D2bvIP(SrcAcc | DstDX, out, check_perm_out), /* 0xF0 - 0xF7 */ N, DI(ImplicitOps, icebp), N, N, DI(ImplicitOps | Priv, hlt), D(ImplicitOps), @@ -3370,7 +3372,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; struct opcode opcode; - struct operand memop = { .type = OP_NONE }; + struct operand memop = { .type = OP_NONE }, *memopp = NULL; c->eip = ctxt->eip; c->fetch.start = c->eip; @@ -3545,9 +3547,6 @@ done_prefixes: if (memop.type == OP_MEM && c->ad_bytes != 8) memop.addr.mem.ea = (u32)memop.addr.mem.ea; - if (memop.type == OP_MEM && c->rip_relative) - memop.addr.mem.ea += c->eip; - /* * Decode and fetch the source operand: register, memory * or immediate. @@ -3569,6 +3568,7 @@ done_prefixes: c->op_bytes; srcmem_common: c->src = memop; + memopp = &c->src; break; case SrcImmU16: rc = decode_imm(ctxt, &c->src, 2, false); @@ -3613,6 +3613,12 @@ done_prefixes: memop.bytes = c->op_bytes + 2; goto srcmem_common; break; + case SrcDX: + c->src.type = OP_REG; + c->src.bytes = 2; + c->src.addr.reg = &c->regs[VCPU_REGS_RDX]; + fetch_register_operand(&c->src); + break; } if (rc != X86EMUL_CONTINUE) @@ -3659,6 +3665,7 @@ done_prefixes: case DstMem: case DstMem64: c->dst = memop; + memopp = &c->dst; if ((c->d & DstMask) == DstMem64) c->dst.bytes = 8; else @@ -3682,14 +3689,23 @@ done_prefixes: c->dst.addr.mem.seg = VCPU_SREG_ES; c->dst.val = 0; break; + case DstDX: + c->dst.type = OP_REG; + c->dst.bytes = 2; + c->dst.addr.reg = &c->regs[VCPU_REGS_RDX]; + fetch_register_operand(&c->dst); + break; case ImplicitOps: /* Special instructions do their own operand decoding. */ default: c->dst.type = OP_NONE; /* Disable writeback. */ - return 0; + break; } done: + if (memopp && memopp->type == OP_MEM && c->rip_relative) + memopp->addr.mem.ea += c->eip; + return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; } @@ -4027,7 +4043,6 @@ special_insn: break; case 0xec: /* in al,dx */ case 0xed: /* in (e/r)ax,dx */ - c->src.val = c->regs[VCPU_REGS_RDX]; do_io_in: if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, &c->dst.val)) @@ -4035,7 +4050,6 @@ special_insn: break; case 0xee: /* out dx,al */ case 0xef: /* out dx,(e/r)ax */ - c->dst.val = c->regs[VCPU_REGS_RDX]; do_io_out: ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, &c->src.val, 1); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index bd14bb4c859..aee38623b76 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) { - return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); + return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); } static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 6c4dc010c4c..9d03ad4dd5e 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, gva_t addr, u32 access) { pt_element_t pte; - pt_element_t __user *ptep_user; + pt_element_t __user *uninitialized_var(ptep_user); gfn_t table_gfn; unsigned index, pt_access, uninitialized_var(pte_access); gpa_t pte_gpa; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4c3fa0f6746..d48ec60ea42 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, struct kvm_vcpu *vcpu) { - vmx_decache_cr3(vcpu); + if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) + vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index db832fd65ec..13ee258442a 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -71,7 +71,8 @@ #include <asm/stackprotector.h> #include <asm/reboot.h> /* for struct machine_ops */ -/*G:010 Welcome to the Guest! +/*G:010 + * Welcome to the Guest! * * The Guest in our tale is a simple creature: identical to the Host but * behaving in simplified but equivalent ways. In particular, the Guest is the @@ -190,15 +191,23 @@ static void lazy_hcall4(unsigned long call, #endif /*G:036 - * When lazy mode is turned off reset the per-cpu lazy mode variable and then - * issue the do-nothing hypercall to flush any stored calls. -:*/ + * When lazy mode is turned off, we issue the do-nothing hypercall to + * flush any stored calls, and call the generic helper to reset the + * per-cpu lazy mode variable. + */ static void lguest_leave_lazy_mmu_mode(void) { hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); paravirt_leave_lazy_mmu(); } +/* + * We also catch the end of context switch; we enter lazy mode for much of + * that too, so again we need to flush here. + * + * (Technically, this is lazy CPU mode, and normally we're in lazy MMU + * mode, but unlike Xen, lguest doesn't care about the difference). + */ static void lguest_end_context_switch(struct task_struct *next) { hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); @@ -391,7 +400,7 @@ static void lguest_load_tr_desc(void) * giant ball of hair. Its entry in the current Intel manual runs to 28 pages. * * This instruction even it has its own Wikipedia entry. The Wikipedia entry - * has been translated into 5 languages. I am not making this up! + * has been translated into 6 languages. I am not making this up! * * We could get funky here and identify ourselves as "GenuineLguest", but * instead we just use the real "cpuid" instruction. Then I pretty much turned @@ -458,7 +467,7 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, /* * PAE systems can mark pages as non-executable. Linux calls this the * NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced - * Virus Protection). We just switch turn if off here, since we don't + * Virus Protection). We just switch it off here, since we don't * support it. */ case 0x80000001: @@ -520,17 +529,16 @@ static unsigned long lguest_read_cr2(void) /* See lguest_set_pte() below. */ static bool cr3_changed = false; +static unsigned long current_cr3; /* * cr3 is the current toplevel pagetable page: the principle is the same as - * cr0. Keep a local copy, and tell the Host when it changes. The only - * difference is that our local copy is in lguest_data because the Host needs - * to set it upon our initial hypercall. + * cr0. Keep a local copy, and tell the Host when it changes. */ static void lguest_write_cr3(unsigned long cr3) { - lguest_data.pgdir = cr3; lazy_hcall1(LHCALL_NEW_PGTABLE, cr3); + current_cr3 = cr3; /* These two page tables are simple, linear, and used during boot */ if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table)) @@ -539,7 +547,7 @@ static void lguest_write_cr3(unsigned long cr3) static unsigned long lguest_read_cr3(void) { - return lguest_data.pgdir; + return current_cr3; } /* cr4 is used to enable and disable PGE, but we don't care. */ @@ -641,7 +649,7 @@ static void lguest_write_cr4(unsigned long val) /* * The Guest calls this after it has set a second-level entry (pte), ie. to map - * a page into a process' address space. Wetell the Host the toplevel and + * a page into a process' address space. We tell the Host the toplevel and * address this corresponds to. The Guest uses one pagetable per process, so * we need to tell the Host which one we're changing (mm->pgd). */ @@ -758,7 +766,7 @@ static void lguest_pmd_clear(pmd_t *pmdp) static void lguest_flush_tlb_single(unsigned long addr) { /* Simply set it to zero: if it was not, it will fault back in. */ - lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0); + lazy_hcall3(LHCALL_SET_PTE, current_cr3, addr, 0); } /* @@ -1140,7 +1148,7 @@ static struct notifier_block paniced = { static __init char *lguest_memory_setup(void) { /* - *The Linux bootloader header contains an "e820" memory map: the + * The Linux bootloader header contains an "e820" memory map: the * Launcher populated the first entry with our memory limit. */ e820_add_region(boot_params.e820_map[0].addr, diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 4f420c2f2d5..6ddfe4fc23c 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S @@ -6,18 +6,22 @@ #include <asm/processor-flags.h> /*G:020 - * Our story starts with the kernel booting into startup_32 in - * arch/x86/kernel/head_32.S. It expects a boot header, which is created by - * the bootloader (the Launcher in our case). + + * Our story starts with the bzImage: booting starts at startup_32 in + * arch/x86/boot/compressed/head_32.S. This merely uncompresses the real + * kernel in place and then jumps into it: startup_32 in + * arch/x86/kernel/head_32.S. Both routines expects a boot header in the %esi + * register, which is created by the bootloader (the Launcher in our case). * * The startup_32 function does very little: it clears the uninitialized global * C variables which we expect to be zero (ie. BSS) and then copies the boot - * header and kernel command line somewhere safe. Finally it checks the - * 'hardware_subarch' field. This was introduced in 2.6.24 for lguest and Xen: - * if it's set to '1' (lguest's assigned number), then it calls us here. + * header and kernel command line somewhere safe, and populates some initial + * page tables. Finally it checks the 'hardware_subarch' field. This was + * introduced in 2.6.24 for lguest and Xen: if it's set to '1' (lguest's + * assigned number), then it calls us here. * * WARNING: be very careful here! We're running at addresses equal to physical - * addesses (around 0), not above PAGE_OFFSET as most code expectes + * addresses (around 0), not above PAGE_OFFSET as most code expects * (eg. 0xC0000000). Jumps are relative, so they're OK, but we can't touch any * data without remembering to subtract __PAGE_OFFSET! * @@ -27,13 +31,18 @@ .section .init.text, "ax", @progbits ENTRY(lguest_entry) /* - * We make the "initialization" hypercall now to tell the Host about - * us, and also find out where it put our page tables. + * We make the "initialization" hypercall now to tell the Host where + * our lguest_data struct is. */ movl $LHCALL_LGUEST_INIT, %eax movl $lguest_data - __PAGE_OFFSET, %ebx int $LGUEST_TRAP_ENTRY + /* Now turn our pagetables on; setup by arch/x86/kernel/head_32.S. */ + movl $LHCALL_NEW_PGTABLE, %eax + movl $(initial_page_table - __PAGE_OFFSET), %ebx + int $LGUEST_TRAP_ENTRY + /* Set up the initial stack so we can run C code. */ movl $(init_thread_union+THREAD_SIZE),%esp @@ -96,12 +105,8 @@ send_interrupts: */ pushl %eax movl $LHCALL_SEND_INTERRUPTS, %eax - /* - * This is a vmcall instruction (same thing that KVM uses). Older - * assembler versions might not know the "vmcall" instruction, so we - * create one manually here. - */ - .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ + /* This is the actual hypercall trap. */ + int $LGUEST_TRAP_ENTRY /* Put eax back the way we found it. */ popl %eax ret diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index f2479f19ddd..b00f6785da7 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -18,8 +18,10 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o lib-y := delay.o lib-y += thunk_$(BITS).o -lib-y += usercopy_$(BITS).o getuser.o putuser.o +lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o lib-y += memcpy_$(BITS).o +lib-$(CONFIG_SMP) += rwlock.o +lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o obj-y += msr.o msr-reg.o msr-reg-export.o @@ -29,7 +31,7 @@ ifeq ($(CONFIG_X86_32),y) lib-y += atomic64_cx8_32.o lib-y += checksum_32.o lib-y += strstr_32.o - lib-y += semaphore_32.o string_32.o + lib-y += string_32.o lib-y += cmpxchg.o ifneq ($(CONFIG_X86_CMPXCHG64),y) lib-y += cmpxchg8b_emu.o atomic64_386_32.o @@ -40,7 +42,6 @@ else lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o lib-y += thunk_64.o clear_page_64.o copy_page_64.o lib-y += memmove_64.o memset_64.o - lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o - lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem_64.o + lib-y += copy_user_64.o copy_user_nocache_64.o lib-y += cmpxchg16b_emu.o endif diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S new file mode 100644 index 00000000000..1cad22139c8 --- /dev/null +++ b/arch/x86/lib/rwlock.S @@ -0,0 +1,44 @@ +/* Slow paths of read/write spinlocks. */ + +#include <linux/linkage.h> +#include <asm/alternative-asm.h> +#include <asm/frame.h> +#include <asm/rwlock.h> + +#ifdef CONFIG_X86_32 +# define __lock_ptr eax +#else +# define __lock_ptr rdi +#endif + +ENTRY(__write_lock_failed) + CFI_STARTPROC + FRAME +0: LOCK_PREFIX + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr) +1: rep; nop + cmpl $WRITE_LOCK_CMP, (%__lock_ptr) + jne 1b + LOCK_PREFIX + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr) + jnz 0b + ENDFRAME + ret + CFI_ENDPROC +END(__write_lock_failed) + +ENTRY(__read_lock_failed) + CFI_STARTPROC + FRAME +0: LOCK_PREFIX + READ_LOCK_SIZE(inc) (%__lock_ptr) +1: rep; nop + READ_LOCK_SIZE(cmp) $1, (%__lock_ptr) + js 1b + LOCK_PREFIX + READ_LOCK_SIZE(dec) (%__lock_ptr) + js 0b + ENDFRAME + ret + CFI_ENDPROC +END(__read_lock_failed) diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S deleted file mode 100644 index 05ea55f7140..00000000000 --- a/arch/x86/lib/rwlock_64.S +++ /dev/null @@ -1,38 +0,0 @@ -/* Slow paths of read/write spinlocks. */ - -#include <linux/linkage.h> -#include <asm/rwlock.h> -#include <asm/alternative-asm.h> -#include <asm/dwarf2.h> - -/* rdi: pointer to rwlock_t */ -ENTRY(__write_lock_failed) - CFI_STARTPROC - LOCK_PREFIX - addl $RW_LOCK_BIAS,(%rdi) -1: rep - nop - cmpl $RW_LOCK_BIAS,(%rdi) - jne 1b - LOCK_PREFIX - subl $RW_LOCK_BIAS,(%rdi) - jnz __write_lock_failed - ret - CFI_ENDPROC -END(__write_lock_failed) - -/* rdi: pointer to rwlock_t */ -ENTRY(__read_lock_failed) - CFI_STARTPROC - LOCK_PREFIX - incl (%rdi) -1: rep - nop - cmpl $1,(%rdi) - js 1b - LOCK_PREFIX - decl (%rdi) - js __read_lock_failed - ret - CFI_ENDPROC -END(__read_lock_failed) diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem.S index 67743977398..5dff5f04246 100644 --- a/arch/x86/lib/rwsem_64.S +++ b/arch/x86/lib/rwsem.S @@ -1,4 +1,51 @@ /* + * x86 semaphore implementation. + * + * (C) Copyright 1999 Linus Torvalds + * + * Portions Copyright 1999 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> + */ + +#include <linux/linkage.h> +#include <asm/alternative-asm.h> +#include <asm/dwarf2.h> + +#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) +#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) + +#ifdef CONFIG_X86_32 + +/* + * The semaphore operations have a special calling sequence that + * allow us to do a simpler in-line version of them. These routines + * need to convert that sequence back into the C sequence when + * there is contention on the semaphore. + * + * %eax contains the semaphore pointer on entry. Save the C-clobbered + * registers (%eax, %edx and %ecx) except %eax whish is either a return + * value or just clobbered.. + */ + +#define save_common_regs \ + pushl_cfi %ecx; CFI_REL_OFFSET ecx, 0 + +#define restore_common_regs \ + popl_cfi %ecx; CFI_RESTORE ecx + + /* Avoid uglifying the argument copying x86-64 needs to do. */ + .macro movq src, dst + .endm + +#else + +/* * x86-64 rwsem wrappers * * This interfaces the inline asm code to the slow-path @@ -16,12 +63,6 @@ * but %rdi, %rsi, %rcx, %r8-r11 always need saving. */ -#include <linux/linkage.h> -#include <asm/rwlock.h> -#include <asm/alternative-asm.h> -#include <asm/frame.h> -#include <asm/dwarf2.h> - #define save_common_regs \ pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \ pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \ @@ -40,16 +81,18 @@ popq_cfi %rsi; CFI_RESTORE rsi; \ popq_cfi %rdi; CFI_RESTORE rdi +#endif + /* Fix up special calling conventions */ ENTRY(call_rwsem_down_read_failed) CFI_STARTPROC save_common_regs - pushq_cfi %rdx - CFI_REL_OFFSET rdx, 0 + __ASM_SIZE(push,_cfi) %__ASM_REG(dx) + CFI_REL_OFFSET __ASM_REG(dx), 0 movq %rax,%rdi call rwsem_down_read_failed - popq_cfi %rdx - CFI_RESTORE rdx + __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) + CFI_RESTORE __ASM_REG(dx) restore_common_regs ret CFI_ENDPROC @@ -67,7 +110,8 @@ ENDPROC(call_rwsem_down_write_failed) ENTRY(call_rwsem_wake) CFI_STARTPROC - decl %edx /* do nothing if still outstanding active readers */ + /* do nothing if still outstanding active readers */ + __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) jnz 1f save_common_regs movq %rax,%rdi @@ -77,16 +121,15 @@ ENTRY(call_rwsem_wake) CFI_ENDPROC ENDPROC(call_rwsem_wake) -/* Fix up special calling conventions */ ENTRY(call_rwsem_downgrade_wake) CFI_STARTPROC save_common_regs - pushq_cfi %rdx - CFI_REL_OFFSET rdx, 0 + __ASM_SIZE(push,_cfi) %__ASM_REG(dx) + CFI_REL_OFFSET __ASM_REG(dx), 0 movq %rax,%rdi call rwsem_downgrade_wake - popq_cfi %rdx - CFI_RESTORE rdx + __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) + CFI_RESTORE __ASM_REG(dx) restore_common_regs ret CFI_ENDPROC diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S deleted file mode 100644 index 06691daa410..00000000000 --- a/arch/x86/lib/semaphore_32.S +++ /dev/null @@ -1,124 +0,0 @@ -/* - * i386 semaphore implementation. - * - * (C) Copyright 1999 Linus Torvalds - * - * Portions Copyright 1999 Red Hat, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> - */ - -#include <linux/linkage.h> -#include <asm/rwlock.h> -#include <asm/alternative-asm.h> -#include <asm/frame.h> -#include <asm/dwarf2.h> - -/* - * The semaphore operations have a special calling sequence that - * allow us to do a simpler in-line version of them. These routines - * need to convert that sequence back into the C sequence when - * there is contention on the semaphore. - * - * %eax contains the semaphore pointer on entry. Save the C-clobbered - * registers (%eax, %edx and %ecx) except %eax whish is either a return - * value or just clobbered.. - */ - .section .sched.text, "ax" - -/* - * rw spinlock fallbacks - */ -#ifdef CONFIG_SMP -ENTRY(__write_lock_failed) - CFI_STARTPROC - FRAME -2: LOCK_PREFIX - addl $ RW_LOCK_BIAS,(%eax) -1: rep; nop - cmpl $ RW_LOCK_BIAS,(%eax) - jne 1b - LOCK_PREFIX - subl $ RW_LOCK_BIAS,(%eax) - jnz 2b - ENDFRAME - ret - CFI_ENDPROC - ENDPROC(__write_lock_failed) - -ENTRY(__read_lock_failed) - CFI_STARTPROC - FRAME -2: LOCK_PREFIX - incl (%eax) -1: rep; nop - cmpl $1,(%eax) - js 1b - LOCK_PREFIX - decl (%eax) - js 2b - ENDFRAME - ret - CFI_ENDPROC - ENDPROC(__read_lock_failed) - -#endif - -#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM - -/* Fix up special calling conventions */ -ENTRY(call_rwsem_down_read_failed) - CFI_STARTPROC - pushl_cfi %ecx - CFI_REL_OFFSET ecx,0 - pushl_cfi %edx - CFI_REL_OFFSET edx,0 - call rwsem_down_read_failed - popl_cfi %edx - popl_cfi %ecx - ret - CFI_ENDPROC - ENDPROC(call_rwsem_down_read_failed) - -ENTRY(call_rwsem_down_write_failed) - CFI_STARTPROC - pushl_cfi %ecx - CFI_REL_OFFSET ecx,0 - calll rwsem_down_write_failed - popl_cfi %ecx - ret - CFI_ENDPROC - ENDPROC(call_rwsem_down_write_failed) - -ENTRY(call_rwsem_wake) - CFI_STARTPROC - decw %dx /* do nothing if still outstanding active readers */ - jnz 1f - pushl_cfi %ecx - CFI_REL_OFFSET ecx,0 - call rwsem_wake - popl_cfi %ecx -1: ret - CFI_ENDPROC - ENDPROC(call_rwsem_wake) - -/* Fix up special calling conventions */ -ENTRY(call_rwsem_downgrade_wake) - CFI_STARTPROC - pushl_cfi %ecx - CFI_REL_OFFSET ecx,0 - pushl_cfi %edx - CFI_REL_OFFSET edx,0 - call rwsem_downgrade_wake - popl_cfi %edx - popl_cfi %ecx - ret - CFI_ENDPROC - ENDPROC(call_rwsem_downgrade_wake) - -#endif diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index 782b082c9ff..a63efd6bb6a 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S @@ -5,50 +5,41 @@ * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc. * Subject to the GNU public license, v.2. No warranty of any kind. */ +#include <linux/linkage.h> +#include <asm/dwarf2.h> +#include <asm/calling.h> - #include <linux/linkage.h> - #include <asm/dwarf2.h> - #include <asm/calling.h> - #include <asm/rwlock.h> - - /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ - .macro thunk name,func - .globl \name -\name: - CFI_STARTPROC - SAVE_ARGS - call \func - jmp restore - CFI_ENDPROC - .endm - -#ifdef CONFIG_TRACE_IRQFLAGS - /* put return address in rdi (arg1) */ - .macro thunk_ra name,func + /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ + .macro THUNK name, func, put_ret_addr_in_rdi=0 .globl \name \name: CFI_STARTPROC + + /* this one pushes 9 elems, the next one would be %rIP */ SAVE_ARGS - /* SAVE_ARGS pushs 9 elements */ - /* the next element would be the rip */ - movq 9*8(%rsp), %rdi + + .if \put_ret_addr_in_rdi + movq_cfi_restore 9*8, rdi + .endif + call \func jmp restore CFI_ENDPROC .endm - thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller - thunk_ra trace_hardirqs_off_thunk,trace_hardirqs_off_caller +#ifdef CONFIG_TRACE_IRQFLAGS + THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1 + THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1 #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC - thunk lockdep_sys_exit_thunk,lockdep_sys_exit + THUNK lockdep_sys_exit_thunk,lockdep_sys_exit #endif - + /* SAVE_ARGS below is used only for the .cfi directives it contains. */ CFI_STARTPROC SAVE_ARGS restore: RESTORE_ARGS - ret + ret CFI_ENDPROC diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c new file mode 100644 index 00000000000..97be9cb5448 --- /dev/null +++ b/arch/x86/lib/usercopy.c @@ -0,0 +1,43 @@ +/* + * User address space access functions. + * + * For licencing details see kernel-base/COPYING + */ + +#include <linux/highmem.h> +#include <linux/module.h> + +/* + * best effort, GUP based copy_from_user() that is NMI-safe + */ +unsigned long +copy_from_user_nmi(void *to, const void __user *from, unsigned long n) +{ + unsigned long offset, addr = (unsigned long)from; + unsigned long size, len = 0; + struct page *page; + void *map; + int ret; + + do { + ret = __get_user_pages_fast(addr, 1, 0, &page); + if (!ret) + break; + + offset = addr & (PAGE_SIZE - 1); + size = min(PAGE_SIZE - offset, n - len); + + map = kmap_atomic(page); + memcpy(to, map+offset, size); + kunmap_atomic(map); + put_page(page); + + len += size; + to += size; + addr += size; + + } while (len < n); + + return len; +} +EXPORT_SYMBOL_GPL(copy_from_user_nmi); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2dbf6bf4c7e..4d09df054e3 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) if (unlikely(error_code & PF_RSVD)) pgtable_bad(regs, error_code, address); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* * If we're in an interrupt, have no user context or are running @@ -1161,11 +1161,11 @@ good_area: if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index d865c4aeec5..bbaaa005bf0 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -28,6 +28,7 @@ #include <linux/poison.h> #include <linux/dma-mapping.h> #include <linux/module.h> +#include <linux/memory.h> #include <linux/memory_hotplug.h> #include <linux/nmi.h> #include <linux/gfp.h> @@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma) } #ifdef CONFIG_X86_UV -#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) - unsigned long memory_block_size_bytes(void) { if (is_uv_system()) { diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c index 704a37ceddd..dab41876cdd 100644 --- a/arch/x86/mm/kmemcheck/error.c +++ b/arch/x86/mm/kmemcheck/error.c @@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state, e->trace.entries = e->trace_entries; e->trace.max_entries = ARRAY_SIZE(e->trace_entries); e->trace.skip = 0; - save_stack_trace_regs(&e->trace, regs); + save_stack_trace_regs(regs, &e->trace); /* Round address down to nearest 16 bytes */ shadow_copy = kmemcheck_shadow_lookup(address diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index aa1169392b8..992da5ec5a6 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -8,7 +8,7 @@ #include <linux/range.h> /* Check for already reserved areas */ -static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) +bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align) { struct memblock_region *r; u64 addr = *addrp, last; @@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) if (addr >= ei_last) continue; *sizep = ei_last - addr; - while (check_with_memblock_reserved_size(&addr, sizep, align)) + while (memblock_x86_check_reserved_size(&addr, sizep, align)) ; if (*sizep) diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index e1d10690921..b0086567271 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -123,12 +123,11 @@ static int pageattr_test(void) if (print) printk(KERN_INFO "CPA self-test:\n"); - bm = vmalloc((max_pfn_mapped + 7) / 8); + bm = vzalloc((max_pfn_mapped + 7) / 8); if (!bm) { printk(KERN_ERR "CPA Cannot vmalloc bitmap\n"); return -ENOMEM; } - memset(bm, 0, (max_pfn_mapped + 7) / 8); failed += print_split(&sa); srandom32(100); diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index a5b64ab4cd6..bff89dfe361 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c @@ -11,10 +11,11 @@ #include <linux/oprofile.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/compat.h> +#include <linux/uaccess.h> + #include <asm/ptrace.h> -#include <asm/uaccess.h> #include <asm/stacktrace.h> -#include <linux/compat.h> static int backtrace_stack(void *data, char *name) { @@ -40,13 +41,13 @@ static struct stacktrace_ops backtrace_ops = { static struct stack_frame_ia32 * dump_user_backtrace_32(struct stack_frame_ia32 *head) { + /* Also check accessibility of one struct frame_head beyond: */ struct stack_frame_ia32 bufhead[2]; struct stack_frame_ia32 *fp; + unsigned long bytes; - /* Also check accessibility of one struct frame_head beyond */ - if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) - return NULL; - if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) + bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); + if (bytes != sizeof(bufhead)) return NULL; fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); @@ -87,12 +88,12 @@ x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) static struct stack_frame *dump_user_backtrace(struct stack_frame *head) { + /* Also check accessibility of one struct frame_head beyond: */ struct stack_frame bufhead[2]; + unsigned long bytes; - /* Also check accessibility of one struct stack_frame beyond */ - if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) - return NULL; - if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) + bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); + if (bytes != sizeof(bufhead)) return NULL; oprofile_add_trace(bufhead[0].return_address); diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index cf9750004a0..68894fdc034 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy) static int nmi_start(void) { get_online_cpus(); - on_each_cpu(nmi_cpu_start, NULL, 1); ctr_running = 1; + /* make ctr_running visible to the nmi handler: */ + smp_mb(); + on_each_cpu(nmi_cpu_start, NULL, 1); put_online_cpus(); return 0; } @@ -504,15 +506,18 @@ static int nmi_setup(void) nmi_enabled = 0; ctr_running = 0; - barrier(); + /* make variables visible to the nmi handler: */ + smp_mb(); err = register_die_notifier(&profile_exceptions_nb); if (err) goto fail; get_online_cpus(); register_cpu_notifier(&oprofile_cpu_nb); - on_each_cpu(nmi_cpu_setup, NULL, 1); nmi_enabled = 1; + /* make nmi_enabled visible to the nmi handler: */ + smp_mb(); + on_each_cpu(nmi_cpu_setup, NULL, 1); put_online_cpus(); return 0; @@ -531,7 +536,8 @@ static void nmi_shutdown(void) nmi_enabled = 0; ctr_running = 0; put_online_cpus(); - barrier(); + /* make variables visible to the nmi handler: */ + smp_mb(); unregister_die_notifier(&profile_exceptions_nb); msrs = &get_cpu_var(cpu_msrs); model->shutdown(msrs); diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 9fd8a567fe1..9cbb710dc94 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off) return 0; } +/* + * This runs only on the current cpu. We try to find an LVT offset and + * setup the local APIC. For this we must disable preemption. On + * success we initialize all nodes with this offset. This updates then + * the offset in the IBS_CTL per-node msr. The per-core APIC setup of + * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_- + * amd_cpu_shutdown() using the new offset. + */ static int force_ibs_eilvt_setup(void) { int offset; int ret; - /* - * find the next free available EILVT entry, skip offset 0, - * pin search to this cpu - */ preempt_disable(); + /* find the next free available EILVT entry, skip offset 0 */ for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { if (get_eilvt(offset)) break; diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 0972315c386..68c3c139520 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -188,7 +188,7 @@ static bool resource_contains(struct resource *res, resource_size_t point) return false; } -static void coalesce_windows(struct pci_root_info *info, int type) +static void coalesce_windows(struct pci_root_info *info, unsigned long type) { int i, j; struct resource *res1, *res2; diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 8214724ce54..1017c7bee38 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -1,8 +1,13 @@ /* - * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux - * x86 PCI core to support the Xen PCI Frontend + * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and + * initial domain support. We also handle the DSDT _PRT callbacks for GSI's + * used in HVM and initial domain mode (PV does not parse ACPI, so it has no + * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and + * 0xcf8 PCI configuration read/write. * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> + * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> + * Stefano Stabellini <stefano.stabellini@eu.citrix.com> */ #include <linux/module.h> #include <linux/init.h> @@ -19,22 +24,53 @@ #include <xen/events.h> #include <asm/xen/pci.h> +static int xen_pcifront_enable_irq(struct pci_dev *dev) +{ + int rc; + int share = 1; + int pirq; + u8 gsi; + + rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); + if (rc < 0) { + dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", + rc); + return rc; + } + /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/ + pirq = gsi; + + if (gsi < NR_IRQS_LEGACY) + share = 0; + + rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); + if (rc < 0) { + dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n", + gsi, pirq, rc); + return rc; + } + + dev->irq = rc; + dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); + return 0; +} + #ifdef CONFIG_ACPI -static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, - int trigger, int polarity) +static int xen_register_pirq(u32 gsi, int gsi_override, int triggering, + bool set_pirq) { - int rc, irq; + int rc, pirq = -1, irq = -1; struct physdev_map_pirq map_irq; int shareable = 0; char *name; - if (!xen_hvm_domain()) - return -1; + if (set_pirq) + pirq = gsi; map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_GSI; map_irq.index = gsi; - map_irq.pirq = -1; + map_irq.pirq = pirq; rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (rc) { @@ -42,7 +78,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, return -1; } - if (trigger == ACPI_EDGE_SENSITIVE) { + if (triggering == ACPI_EDGE_SENSITIVE) { shareable = 0; name = "ioapic-edge"; } else { @@ -50,12 +86,63 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, name = "ioapic-level"; } + if (gsi_override >= 0) + gsi = gsi_override; + irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name); + if (irq < 0) + goto out; - printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); + printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi); +out: + return irq; +} + +static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, + int trigger, int polarity) +{ + if (!xen_hvm_domain()) + return -1; + + return xen_register_pirq(gsi, -1 /* no GSI override */, trigger, + false /* no mapping of GSI to PIRQ */); +} + +#ifdef CONFIG_XEN_DOM0 +static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity) +{ + int rc, irq; + struct physdev_setup_gsi setup_gsi; + + if (!xen_pv_domain()) + return -1; + + printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", + gsi, triggering, polarity); + + irq = xen_register_pirq(gsi, gsi_override, triggering, true); + + setup_gsi.gsi = gsi; + setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); + setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); + if (rc == -EEXIST) + printk(KERN_INFO "Already setup the GSI :%d\n", gsi); + else if (rc) { + printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n", + gsi, rc); + } return irq; } + +static int acpi_register_gsi_xen(struct device *dev, u32 gsi, + int trigger, int polarity) +{ + return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity); +} +#endif #endif #if defined(CONFIG_PCI_MSI) @@ -65,6 +152,43 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, struct xen_pci_frontend_ops *xen_pci_frontend; EXPORT_SYMBOL_GPL(xen_pci_frontend); +static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int irq, ret, i; + struct msi_desc *msidesc; + int *v; + + v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); + if (!v) + return -ENOMEM; + + if (type == PCI_CAP_ID_MSIX) + ret = xen_pci_frontend_enable_msix(dev, v, nvec); + else + ret = xen_pci_frontend_enable_msi(dev, v); + if (ret) + goto error; + i = 0; + list_for_each_entry(msidesc, &dev->msi_list, list) { + irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, + (type == PCI_CAP_ID_MSIX) ? + "pcifront-msi-x" : + "pcifront-msi", + DOMID_SELF); + if (irq < 0) + goto free; + i++; + } + kfree(v); + return 0; + +error: + dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); +free: + kfree(v); + return ret; +} + #define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \ MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0)) @@ -123,67 +247,6 @@ error: return -ENODEV; } -/* - * For MSI interrupts we have to use drivers/xen/event.s functions to - * allocate an irq_desc and setup the right */ - - -static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - int irq, ret, i; - struct msi_desc *msidesc; - int *v; - - v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL); - if (!v) - return -ENOMEM; - - if (type == PCI_CAP_ID_MSIX) - ret = xen_pci_frontend_enable_msix(dev, v, nvec); - else - ret = xen_pci_frontend_enable_msi(dev, v); - if (ret) - goto error; - i = 0; - list_for_each_entry(msidesc, &dev->msi_list, list) { - irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, - (type == PCI_CAP_ID_MSIX) ? - "pcifront-msi-x" : - "pcifront-msi", - DOMID_SELF); - if (irq < 0) - goto free; - i++; - } - kfree(v); - return 0; - -error: - dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); -free: - kfree(v); - return ret; -} - -static void xen_teardown_msi_irqs(struct pci_dev *dev) -{ - struct msi_desc *msidesc; - - msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); - if (msidesc->msi_attrib.is_msix) - xen_pci_frontend_disable_msix(dev); - else - xen_pci_frontend_disable_msi(dev); - - /* Free the IRQ's and the msidesc using the generic code. */ - default_teardown_msi_irqs(dev); -} - -static void xen_teardown_msi_irq(unsigned int irq) -{ - xen_destroy_irq(irq); -} - #ifdef CONFIG_XEN_DOM0 static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { @@ -242,45 +305,28 @@ out: return ret; } #endif -#endif -static int xen_pcifront_enable_irq(struct pci_dev *dev) +static void xen_teardown_msi_irqs(struct pci_dev *dev) { - int rc; - int share = 1; - int pirq; - u8 gsi; - - rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); - if (rc < 0) { - dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", - rc); - return rc; - } - - rc = xen_allocate_pirq_gsi(gsi); - if (rc < 0) { - dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n", - gsi, rc); - return rc; - } - pirq = rc; + struct msi_desc *msidesc; - if (gsi < NR_IRQS_LEGACY) - share = 0; + msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); + if (msidesc->msi_attrib.is_msix) + xen_pci_frontend_disable_msix(dev); + else + xen_pci_frontend_disable_msi(dev); - rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront"); - if (rc < 0) { - dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n", - gsi, pirq, rc); - return rc; - } + /* Free the IRQ's and the msidesc using the generic code. */ + default_teardown_msi_irqs(dev); +} - dev->irq = rc; - dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); - return 0; +static void xen_teardown_msi_irq(unsigned int irq) +{ + xen_destroy_irq(irq); } +#endif + int __init pci_xen_init(void) { if (!xen_pv_domain() || xen_initial_domain()) @@ -327,82 +373,13 @@ int __init pci_xen_hvm_init(void) } #ifdef CONFIG_XEN_DOM0 -static int xen_register_pirq(u32 gsi, int triggering) -{ - int rc, pirq, irq = -1; - struct physdev_map_pirq map_irq; - int shareable = 0; - char *name; - - if (!xen_pv_domain()) - return -1; - - if (triggering == ACPI_EDGE_SENSITIVE) { - shareable = 0; - name = "ioapic-edge"; - } else { - shareable = 1; - name = "ioapic-level"; - } - - pirq = xen_allocate_pirq_gsi(gsi); - if (pirq < 0) - goto out; - - irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name); - if (irq < 0) - goto out; - - printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d\n", pirq, irq); - - map_irq.domid = DOMID_SELF; - map_irq.type = MAP_PIRQ_TYPE_GSI; - map_irq.index = gsi; - map_irq.pirq = pirq; - - rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); - if (rc) { - printk(KERN_WARNING "xen map irq failed %d\n", rc); - return -1; - } - -out: - return irq; -} - -static int xen_register_gsi(u32 gsi, int triggering, int polarity) -{ - int rc, irq; - struct physdev_setup_gsi setup_gsi; - - if (!xen_pv_domain()) - return -1; - - printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", - gsi, triggering, polarity); - - irq = xen_register_pirq(gsi, triggering); - - setup_gsi.gsi = gsi; - setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); - setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1); - - rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi); - if (rc == -EEXIST) - printk(KERN_INFO "Already setup the GSI :%d\n", gsi); - else if (rc) { - printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n", - gsi, rc); - } - - return irq; -} - static __init void xen_setup_acpi_sci(void) { int rc; int trigger, polarity; int gsi = acpi_sci_override_gsi; + int irq = -1; + int gsi_override = -1; if (!gsi) return; @@ -415,51 +392,43 @@ static __init void xen_setup_acpi_sci(void) } trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; - + printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " "polarity=%d\n", gsi, trigger, polarity); - gsi = xen_register_gsi(gsi, trigger, polarity); + /* Before we bind the GSI to a Linux IRQ, check whether + * we need to override it with bus_irq (IRQ) value. Usually for + * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so: + * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level) + * but there are oddballs where the IRQ != GSI: + * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level) + * which ends up being: gsi_to_irq[9] == 20 + * (which is what acpi_gsi_to_irq ends up calling when starting the + * the ACPI interpreter and keels over since IRQ 9 has not been + * setup as we had setup IRQ 20 for it). + */ + if (acpi_gsi_to_irq(gsi, &irq) == 0) { + /* Use the provided value if it's valid. */ + if (irq >= 0) + gsi_override = irq; + } + + gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity); printk(KERN_INFO "xen: acpi sci %d\n", gsi); return; } -static int acpi_register_gsi_xen(struct device *dev, u32 gsi, - int trigger, int polarity) +int __init pci_xen_initial_domain(void) { - return xen_register_gsi(gsi, trigger, polarity); -} + int irq; -static int __init pci_xen_initial_domain(void) -{ #ifdef CONFIG_PCI_MSI x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; #endif xen_setup_acpi_sci(); __acpi_register_gsi = acpi_register_gsi_xen; - - return 0; -} - -void __init xen_setup_pirqs(void) -{ - int pirq, irq; - - pci_xen_initial_domain(); - - if (0 == nr_ioapics) { - for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { - pirq = xen_allocate_pirq_gsi(irq); - if (WARN(pirq < 0, - "Could not allocate PIRQ for legacy interrupt\n")) - break; - irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic"); - } - return; - } - /* Pre-allocate legacy irqs */ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) { int trigger, polarity; @@ -467,13 +436,17 @@ void __init xen_setup_pirqs(void) if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) continue; - xen_register_pirq(irq, - trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE); + xen_register_pirq(irq, -1 /* no GSI override */, + trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE, + true /* Map GSI to PIRQ */); } + if (0 == nr_ioapics) { + for (irq = 0; irq < NR_IRQS_LEGACY; irq++) + xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic"); + } + return 0; } -#endif -#ifdef CONFIG_XEN_DOM0 struct xen_device_domain_owner { domid_t domain; struct pci_dev *dev; diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 11e766deb7b..3ae4128013e 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -89,26 +89,50 @@ early_param("add_efi_memmap", setup_add_efi_memmap); static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { - return efi_call_virt2(get_time, tm, tc); + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&rtc_lock, flags); + status = efi_call_virt2(get_time, tm, tc); + spin_unlock_irqrestore(&rtc_lock, flags); + return status; } static efi_status_t virt_efi_set_time(efi_time_t *tm) { - return efi_call_virt1(set_time, tm); + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&rtc_lock, flags); + status = efi_call_virt1(set_time, tm); + spin_unlock_irqrestore(&rtc_lock, flags); + return status; } static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) { - return efi_call_virt3(get_wakeup_time, - enabled, pending, tm); + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&rtc_lock, flags); + status = efi_call_virt3(get_wakeup_time, + enabled, pending, tm); + spin_unlock_irqrestore(&rtc_lock, flags); + return status; } static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) { - return efi_call_virt2(set_wakeup_time, - enabled, tm); + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&rtc_lock, flags); + status = efi_call_virt2(set_wakeup_time, + enabled, tm); + spin_unlock_irqrestore(&rtc_lock, flags); + return status; } static efi_status_t virt_efi_get_variable(efi_char16_t *name, @@ -208,11 +232,14 @@ static efi_status_t __init phys_efi_set_virtual_address_map( static efi_status_t __init phys_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) { + unsigned long flags; efi_status_t status; + spin_lock_irqsave(&rtc_lock, flags); efi_call_phys_prelog(); status = efi_call_phys2(efi_phys.get_time, tm, tc); efi_call_phys_epilog(); + spin_unlock_irqrestore(&rtc_lock, flags); return status; } @@ -354,14 +381,31 @@ void __init efi_reserve_boot_services(void) for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; - unsigned long long start = md->phys_addr; - unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; + u64 start = md->phys_addr; + u64 size = md->num_pages << EFI_PAGE_SHIFT; if (md->type != EFI_BOOT_SERVICES_CODE && md->type != EFI_BOOT_SERVICES_DATA) continue; - - memblock_x86_reserve_range(start, start + size, "EFI Boot"); + /* Only reserve where possible: + * - Not within any already allocated areas + * - Not over any memory area (really needed, if above?) + * - Not within any part of the kernel + * - Not the bios reserved area + */ + if ((start+size >= virt_to_phys(_text) + && start <= virt_to_phys(_end)) || + !e820_all_mapped(start, start+size, E820_RAM) || + memblock_x86_check_reserved_size(&start, &size, + 1<<EFI_PAGE_SHIFT)) { + /* Could not reserve, skip it */ + md->num_pages = 0; + memblock_dbg(PFX "Could not reserve boot range " + "[0x%010llx-0x%010llx]\n", + start, start+size-1); + } else + memblock_x86_reserve_range(start, start+size, + "EFI Boot"); } } @@ -378,6 +422,10 @@ static void __init efi_free_boot_services(void) md->type != EFI_BOOT_SERVICES_DATA) continue; + /* Could not reserve boot area */ + if (!size) + continue; + free_bootmem_late(start, size); } } @@ -527,9 +575,6 @@ void __init efi_init(void) x86_platform.set_wallclock = efi_set_rtc_mmss; #endif - /* Setup for EFI runtime service */ - reboot_type = BOOT_EFI; - #if EFI_DEBUG print_efi_memmap(); #endif diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 17c565de3d6..a6575b949b1 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -18,5 +18,5 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o - +obj-$(CONFIG_XEN_DOM0) += vga.o obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index dd7b88f2ec7..53257421082 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1033,6 +1033,13 @@ static void xen_machine_halt(void) xen_reboot(SHUTDOWN_poweroff); } +static void xen_machine_power_off(void) +{ + if (pm_power_off) + pm_power_off(); + xen_reboot(SHUTDOWN_poweroff); +} + static void xen_crash_shutdown(struct pt_regs *regs) { xen_reboot(SHUTDOWN_crash); @@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void) static const struct machine_ops xen_machine_ops __initconst = { .restart = xen_restart, .halt = xen_machine_halt, - .power_off = xen_machine_halt, + .power_off = xen_machine_power_off, .shutdown = xen_machine_halt, .crash_shutdown = xen_crash_shutdown, .emergency_restart = xen_emergency_restart, @@ -1241,6 +1248,14 @@ asmlinkage void __init xen_start_kernel(void) if (pci_xen) x86_init.pci.arch_init = pci_xen_init; } else { + const struct dom0_vga_console_info *info = + (void *)((char *)xen_start_info + + xen_start_info->console.dom0.info_off); + + xen_init_vga(info, xen_start_info->console.dom0.info_size); + xen_start_info->console.domU.mfn = 0; + xen_start_info->console.domU.evtchn = 0; + /* Make sure ACS will be enabled */ pci_request_acs(); } diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index dc708dcc62f..0ccccb67a99 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -59,6 +59,7 @@ #include <asm/page.h> #include <asm/init.h> #include <asm/pat.h> +#include <asm/smp.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> @@ -1231,7 +1232,11 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, { struct { struct mmuext_op op; +#ifdef CONFIG_SMP + DECLARE_BITMAP(mask, num_processors); +#else DECLARE_BITMAP(mask, NR_CPUS); +#endif } *args; struct multicall_space mcs; @@ -1599,6 +1604,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; +#ifdef CONFIG_X86_32 + if (pfn > max_pfn_mapped) + max_pfn_mapped = pfn; +#endif + if (!pte_none(pte_page[pteidx])) continue; @@ -1766,7 +1776,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, initial_kernel_pmd = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + + xen_start_info->nr_pt_frames * PAGE_SIZE + + 512*1024); kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 8bff7e7c290..1b2b73ff0a6 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -189,10 +189,10 @@ struct multicall_space __xen_mc_entry(size_t args) unsigned argidx = roundup(b->argidx, sizeof(u64)); BUG_ON(preemptible()); - BUG_ON(b->argidx > MC_ARGS); + BUG_ON(b->argidx >= MC_ARGS); if (b->mcidx == MC_BATCH || - (argidx + args) > MC_ARGS) { + (argidx + args) >= MC_ARGS) { mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); xen_mc_flush(); argidx = roundup(b->argidx, sizeof(u64)); @@ -206,7 +206,7 @@ struct multicall_space __xen_mc_entry(size_t args) ret.args = &b->args[argidx]; b->argidx = argidx + args; - BUG_ON(b->argidx > MC_ARGS); + BUG_ON(b->argidx >= MC_ARGS); return ret; } @@ -216,7 +216,7 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) struct multicall_space ret = { NULL, NULL }; BUG_ON(preemptible()); - BUG_ON(b->argidx > MC_ARGS); + BUG_ON(b->argidx >= MC_ARGS); if (b->mcidx == 0) return ret; @@ -224,14 +224,14 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) if (b->entries[b->mcidx - 1].op != op) return ret; - if ((b->argidx + size) > MC_ARGS) + if ((b->argidx + size) >= MC_ARGS) return ret; ret.mc = &b->entries[b->mcidx - 1]; ret.args = &b->args[b->argidx]; b->argidx += size; - BUG_ON(b->argidx > MC_ARGS); + BUG_ON(b->argidx >= MC_ARGS); return ret; } diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c index 25c52f94a27..ffcf2615640 100644 --- a/arch/x86/xen/platform-pci-unplug.c +++ b/arch/x86/xen/platform-pci-unplug.c @@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); #ifdef CONFIG_XEN_PVHVM static int xen_emul_unplug; -static int __init check_platform_magic(void) +static int check_platform_magic(void) { short magic; char protocol; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index be1a464f6d6..60aeeb56948 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -227,11 +227,7 @@ char * __init xen_memory_setup(void) memcpy(map_raw, map, sizeof(map)); e820.nr_map = 0; -#ifdef CONFIG_X86_32 xen_extra_mem_start = mem_end; -#else - xen_extra_mem_start = max((1ULL << 32), mem_end); -#endif for (i = 0; i < memmap.nr_entries; i++) { unsigned long long end; @@ -266,6 +262,12 @@ char * __init xen_memory_setup(void) if (map[i].size > 0) e820_add_region(map[i].addr, map[i].size, map[i].type); } + /* Align the balloon area so that max_low_pfn does not get set + * to be at the _end_ of the PCI gap at the far end (fee01000). + * Note that xen_extra_mem_start gets set in the loop above to be + * past the last E820 region. */ + if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) + xen_extra_mem_start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 41038c01de4..b4533a86d7e 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void) static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; + unsigned int i; xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; + + for_each_possible_cpu(i) { + zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); + } set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c new file mode 100644 index 00000000000..1cd7f4d11e2 --- /dev/null +++ b/arch/x86/xen/vga.c @@ -0,0 +1,67 @@ +#include <linux/screen_info.h> +#include <linux/init.h> + +#include <asm/bootparam.h> +#include <asm/setup.h> + +#include <xen/interface/xen.h> + +#include "xen-ops.h" + +void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) +{ + struct screen_info *screen_info = &boot_params.screen_info; + + /* This is drawn from a dump from vgacon:startup in + * standard Linux. */ + screen_info->orig_video_mode = 3; + screen_info->orig_video_isVGA = 1; + screen_info->orig_video_lines = 25; + screen_info->orig_video_cols = 80; + screen_info->orig_video_ega_bx = 3; + screen_info->orig_video_points = 16; + screen_info->orig_y = screen_info->orig_video_lines - 1; + + switch (info->video_type) { + case XEN_VGATYPE_TEXT_MODE_3: + if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3) + + sizeof(info->u.text_mode_3)) + break; + screen_info->orig_video_lines = info->u.text_mode_3.rows; + screen_info->orig_video_cols = info->u.text_mode_3.columns; + screen_info->orig_x = info->u.text_mode_3.cursor_x; + screen_info->orig_y = info->u.text_mode_3.cursor_y; + screen_info->orig_video_points = + info->u.text_mode_3.font_height; + break; + + case XEN_VGATYPE_VESA_LFB: + if (size < offsetof(struct dom0_vga_console_info, + u.vesa_lfb.gbl_caps)) + break; + screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB; + screen_info->lfb_width = info->u.vesa_lfb.width; + screen_info->lfb_height = info->u.vesa_lfb.height; + screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel; + screen_info->lfb_base = info->u.vesa_lfb.lfb_base; + screen_info->lfb_size = info->u.vesa_lfb.lfb_size; + screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line; + screen_info->red_size = info->u.vesa_lfb.red_size; + screen_info->red_pos = info->u.vesa_lfb.red_pos; + screen_info->green_size = info->u.vesa_lfb.green_size; + screen_info->green_pos = info->u.vesa_lfb.green_pos; + screen_info->blue_size = info->u.vesa_lfb.blue_size; + screen_info->blue_pos = info->u.vesa_lfb.blue_pos; + screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size; + screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos; + if (size >= offsetof(struct dom0_vga_console_info, + u.vesa_lfb.gbl_caps) + + sizeof(info->u.vesa_lfb.gbl_caps)) + screen_info->capabilities = info->u.vesa_lfb.gbl_caps; + if (size >= offsetof(struct dom0_vga_console_info, + u.vesa_lfb.mode_attrs) + + sizeof(info->u.vesa_lfb.mode_attrs)) + screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs; + break; + } +} diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 97dfdc8757b..b095739ccd4 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -88,6 +88,17 @@ static inline void xen_uninit_lock_cpu(int cpu) } #endif +struct dom0_vga_console_info; + +#ifdef CONFIG_XEN_DOM0 +void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); +#else +static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, + size_t size) +{ +} +#endif + /* Declare an asm function, along with symbols needed to make it inlineable */ #define DECL_ASM(ret, name, ...) \ diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 5d43c1f8ada..c346ccdce0d 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -80,18 +80,7 @@ config XTENSA_UNALIGNED_USER Say Y here to enable unaligned memory access in user space. -config PREEMPT - bool "Preemptible Kernel" - help - This option reduces the latency of the kernel when reacting to - real-time or interactive events by allowing a low priority process to - be preempted even if it is in kernel mode executing a system call. - Unfortunately the kernel code has some race conditions if both - CONFIG_SMP and CONFIG_PREEMPT are enabled, so this option is - currently disabled if you are building an SMP kernel. - - Say Y here if you are building a kernel for a desktop, embedded - or real-time system. Say N if you are unsure. +source "kernel/Kconfig.preempt" config MATH_EMULATION bool "Math emulation" |