diff options
Diffstat (limited to 'arch')
1085 files changed, 21226 insertions, 19604 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index ded747c7b74..f1cf895c040 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -207,9 +207,6 @@ config HAVE_DMA_ATTRS config HAVE_DMA_CONTIGUOUS bool -config USE_GENERIC_SMP_HELPERS - bool - config GENERIC_SMP_IDLE_THREAD bool diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 35a300d4a9f..135c674eaf9 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -1,6 +1,7 @@ config ALPHA bool default y + select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_AOUT select HAVE_IDE select HAVE_OPROFILE @@ -522,7 +523,6 @@ config ARCH_MAY_HAVE_PC_FDC config SMP bool "Symmetric multi-processing support" depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL - select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h index bc2a0daf2d9..aab14a019c2 100644 --- a/arch/alpha/include/asm/pgalloc.h +++ b/arch/alpha/include/asm/pgalloc.h @@ -72,7 +72,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address) if (!pte) return NULL; page = virt_to_page(pte); - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } return page; } diff --git a/arch/alpha/include/uapi/asm/errno.h b/arch/alpha/include/uapi/asm/errno.h index e5f29ca2818..17f92aa76b2 100644 --- a/arch/alpha/include/uapi/asm/errno.h +++ b/arch/alpha/include/uapi/asm/errno.h @@ -43,7 +43,7 @@ #define EUSERS 68 /* Too many users */ #define EDQUOT 69 /* Quota exceeded */ -#define ESTALE 70 /* Stale NFS file handle */ +#define ESTALE 70 /* Stale file handle */ #define EREMOTE 71 /* Object is remote */ #define ENOLCK 77 /* No record locks available */ diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 467de010ea7..e3a1491d507 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -81,6 +81,8 @@ #define SO_SELECT_ERR_QUEUE 45 -#define SO_BUSY_POLL 46 +#define SO_BUSY_POLL 46 + +#define SO_MAX_PACING_RATE 47 #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 5ede5460c80..2ee0c9bfd03 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -125,7 +125,6 @@ config ARC_PLAT_NEEDS_CPU_TO_DMA config SMP bool "Symmetric Multi-Processing (Incomplete)" default n - select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index 9998dc846eb..e8993a2be6c 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h @@ -51,22 +51,12 @@ struct machine_desc { /* * Current machine - only accessible during boot. */ -extern struct machine_desc *machine_desc; +extern const struct machine_desc *machine_desc; /* * Machine type table - also only accessible during boot */ -extern struct machine_desc __arch_info_begin[], __arch_info_end[]; -#define for_each_machine_desc(p) \ - for (p = __arch_info_begin; p < __arch_info_end; p++) - -static inline struct machine_desc *default_machine_desc(void) -{ - /* the default machine is the last one linked in */ - if (__arch_info_end - 1 < __arch_info_begin) - return NULL; - return __arch_info_end - 1; -} +extern const struct machine_desc __arch_info_begin[], __arch_info_end[]; /* * Set of macros to define architecture features. @@ -81,7 +71,6 @@ __attribute__((__section__(".arch.info.init"))) = { \ #define MACHINE_END \ }; -extern struct machine_desc *setup_machine_fdt(void *dt); -extern void __init copy_devtree(void); +extern const struct machine_desc *setup_machine_fdt(void *dt); #endif diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 36a9f20c21a..81208bfd9dc 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -105,11 +105,16 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { pgtable_t pte_pg; + struct page *page; pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); - if (pte_pg) { - memzero((void *)pte_pg, PTRS_PER_PTE * 4); - pgtable_page_ctor(virt_to_page(pte_pg)); + if (!pte_pg) + return 0; + memzero((void *)pte_pg, PTRS_PER_PTE * 4); + page = virt_to_page(pte_pg); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return 0; } return pte_pg; diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h deleted file mode 100644 index 692d0d0789a..00000000000 --- a/arch/arc/include/asm/prom.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef _ASM_ARC_PROM_H_ -#define _ASM_ARC_PROM_H_ - -#define HAVE_ARCH_DEVTREE_FIXUPS - -#endif diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c index 2340af0e1d6..b6dc4e21fd3 100644 --- a/arch/arc/kernel/devtree.c +++ b/arch/arc/kernel/devtree.c @@ -14,10 +14,22 @@ #include <linux/memblock.h> #include <linux/of.h> #include <linux/of_fdt.h> -#include <asm/prom.h> #include <asm/clk.h> #include <asm/mach_desc.h> +static const void * __init arch_get_next_mach(const char *const **match) +{ + static const struct machine_desc *mdesc = __arch_info_begin; + const struct machine_desc *m = mdesc; + + if (m >= __arch_info_end) + return NULL; + + mdesc++; + *match = m->dt_compat; + return m; +} + /** * setup_machine_fdt - Machine setup when an dtb was passed to the kernel * @dt: virtual address pointer to dt blob @@ -25,93 +37,24 @@ * If a dtb was passed to the kernel, then use it to choose the correct * machine_desc and to setup the system. */ -struct machine_desc * __init setup_machine_fdt(void *dt) +const struct machine_desc * __init setup_machine_fdt(void *dt) { - struct boot_param_header *devtree = dt; - struct machine_desc *mdesc = NULL, *mdesc_best = NULL; - unsigned int score, mdesc_score = ~1; + const struct machine_desc *mdesc; unsigned long dt_root; - const char *model, *compat; void *clk; - char manufacturer[16]; unsigned long len; - /* check device tree validity */ - if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) + if (!early_init_dt_scan(dt)) return NULL; - initial_boot_params = devtree; - dt_root = of_get_flat_dt_root(); - - /* - * The kernel could be multi-platform enabled, thus could have many - * "baked-in" machine descriptors. Search thru all for the best - * "compatible" string match. - */ - for_each_machine_desc(mdesc) { - score = of_flat_dt_match(dt_root, mdesc->dt_compat); - if (score > 0 && score < mdesc_score) { - mdesc_best = mdesc; - mdesc_score = score; - } - } - if (!mdesc_best) { - const char *prop; - long size; - - pr_err("\n unrecognized device tree list:\n[ "); - - prop = of_get_flat_dt_prop(dt_root, "compatible", &size); - if (prop) { - while (size > 0) { - printk("'%s' ", prop); - size -= strlen(prop) + 1; - prop += strlen(prop) + 1; - } - } - printk("]\n\n"); - + mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach); + if (!mdesc) machine_halt(); - } - - /* compat = "<manufacturer>,<model>" */ - compat = mdesc_best->dt_compat[0]; - - model = strchr(compat, ','); - if (model) - model++; - - strlcpy(manufacturer, compat, model ? model - compat : strlen(compat)); - - pr_info("Board \"%s\" from %s (Manufacturer)\n", model, manufacturer); - - /* Retrieve various information from the /chosen node */ - of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); - - /* Initialize {size,address}-cells info */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); - - /* Setup memory, calling early_init_dt_add_memory_arch */ - of_scan_flat_dt(early_init_dt_scan_memory, NULL); + dt_root = of_get_flat_dt_root(); clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len); if (clk) arc_set_core_freq(of_read_ulong(clk, len/4)); - return mdesc_best; -} - -/* - * Copy the flattened DT out of .init since unflattening doesn't copy strings - * and the normal DT APIs refs them from orig flat DT - */ -void __init copy_devtree(void) -{ - void *alloc = early_init_dt_alloc_memory_arch( - be32_to_cpu(initial_boot_params->totalsize), 64); - if (alloc) { - memcpy(alloc, initial_boot_params, - be32_to_cpu(initial_boot_params->totalsize)); - initial_boot_params = alloc; - } + return mdesc; } diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c index eb1c2ee5eaf..42b05046fad 100644 --- a/arch/arc/kernel/kprobes.c +++ b/arch/arc/kernel/kprobes.c @@ -327,7 +327,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr) */ /* We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accouting + * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index d9e15f16633..643eae4436e 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -21,7 +21,6 @@ #include <asm/setup.h> #include <asm/page.h> #include <asm/irq.h> -#include <asm/prom.h> #include <asm/unwind.h> #include <asm/clk.h> #include <asm/mach_desc.h> @@ -31,7 +30,7 @@ int running_on_hw = 1; /* vs. on ISS */ char __initdata command_line[COMMAND_LINE_SIZE]; -struct machine_desc *machine_desc; +const struct machine_desc *machine_desc; struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ @@ -345,8 +344,7 @@ void __init setup_arch(char **cmdline_p) setup_arch_memory(); /* copy flat DT out of .init and then unflatten it */ - copy_devtree(); - unflatten_device_tree(); + unflatten_and_copy_device_tree(); /* Can be issue if someone passes cmd line arg "ro" * But that is unlikely so keeping it as it is diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 81279ec73a6..55e0a85bea7 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -125,10 +125,3 @@ void __init free_initrd_mem(unsigned long start, unsigned long end) free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif - -#ifdef CONFIG_OF_FLATTREE -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - pr_err("%s(%llx, %llx)\n", __func__, start, end); -} -#endif /* CONFIG_OF_FLATTREE */ diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index aa8300333bc..214b698cefe 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -5,6 +5,8 @@ config ARM select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT if MMU select CLONE_BACKWARDS @@ -51,6 +53,8 @@ config ARM select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND select HAVE_OPROFILE if (HAVE_PERF_EVENTS) select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 @@ -389,7 +393,6 @@ config ARCH_GEMINI select CLKSRC_MMIO select CPU_FA526 select GENERIC_CLOCKEVENTS - select NEED_MACH_GPIO_H help Support for the Cortina Systems Gemini family SoCs @@ -458,7 +461,7 @@ config ARCH_IOP32X depends on MMU select ARCH_REQUIRE_GPIOLIB select CPU_XSCALE - select NEED_MACH_GPIO_H + select GPIO_IOP select NEED_RET_TO_USER select PCI select PLAT_IOP @@ -471,7 +474,7 @@ config ARCH_IOP33X depends on MMU select ARCH_REQUIRE_GPIOLIB select CPU_XSCALE - select NEED_MACH_GPIO_H + select GPIO_IOP select NEED_RET_TO_USER select PCI select PLAT_IOP @@ -482,6 +485,7 @@ config ARCH_IXP4XX bool "IXP4xx-based" depends on MMU select ARCH_HAS_DMA_SET_COHERENT_MASK + select ARCH_SUPPORTS_BIG_ENDIAN select ARCH_REQUIRE_GPIOLIB select CLKSRC_MMIO select CPU_XSCALE @@ -560,7 +564,6 @@ config ARCH_MMP select GPIO_PXA select IRQ_DOMAIN select MULTI_IRQ_HANDLER - select NEED_MACH_GPIO_H select PINCTRL select PLAT_PXA select SPARSE_IRQ @@ -623,7 +626,6 @@ config ARCH_PXA select GPIO_PXA select HAVE_IDE select MULTI_IRQ_HANDLER - select NEED_MACH_GPIO_H select PLAT_PXA select SPARSE_IRQ help @@ -691,7 +693,6 @@ config ARCH_SA1100 select GENERIC_CLOCKEVENTS select HAVE_IDE select ISA - select NEED_MACH_GPIO_H select NEED_MACH_MEMORY_H select SPARSE_IRQ help @@ -1067,11 +1068,6 @@ config IWMMXT Enable support for iWMMXt context switching at run time if running on a CPU that supports it. -config XSCALE_PMU - bool - depends on CPU_XSCALE - default y - config MULTI_IRQ_HANDLER bool help @@ -1437,7 +1433,6 @@ config SMP depends on GENERIC_CLOCKEVENTS depends on HAVE_SMP depends on MMU || ARM_MPU - select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If @@ -1519,6 +1514,32 @@ config MCPM for (multi-)cluster based systems, such as big.LITTLE based systems. +config BIG_LITTLE + bool "big.LITTLE support (Experimental)" + depends on CPU_V7 && SMP + select MCPM + help + This option enables support selections for the big.LITTLE + system architecture. + +config BL_SWITCHER + bool "big.LITTLE switcher support" + depends on BIG_LITTLE && MCPM && HOTPLUG_CPU + select CPU_PM + select ARM_CPU_SUSPEND + help + The big.LITTLE "switcher" provides the core functionality to + transparently handle transition between a cluster of A15's + and a cluster of A7's in a big.LITTLE system. + +config BL_SWITCHER_DUMMY_IF + tristate "Simple big.LITTLE switcher user interface" + depends on BL_SWITCHER && DEBUG_KERNEL + help + This is a simple and dummy char dev interface to control + the big.LITTLE switcher core code. It is meant for + debugging purposes only. + choice prompt "Memory split" default VMSPLIT_3G @@ -1842,6 +1863,12 @@ config CC_STACKPROTECTOR neutralized via a kernel panic. This feature requires gcc version 4.2 or above. +config SWIOTLB + def_bool y + +config IOMMU_HELPER + def_bool SWIOTLB + config XEN_DOM0 def_bool y depends on XEN @@ -1852,6 +1879,7 @@ config XEN depends on CPU_V7 && !CPU_V6 depends on !GENERIC_ATOMIC64 select ARM_PSCI + select SWIOTLB_XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index d597c6b8488..5765abf5ce8 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -318,6 +318,7 @@ choice config DEBUG_MSM_UART1 bool "Kernel low-level debugging messages via MSM UART1" depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50 + select DEBUG_MSM_UART help Say Y here if you want the debug print routines to direct their output to the first serial port on MSM devices. @@ -325,6 +326,7 @@ choice config DEBUG_MSM_UART2 bool "Kernel low-level debugging messages via MSM UART2" depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50 + select DEBUG_MSM_UART help Say Y here if you want the debug print routines to direct their output to the second serial port on MSM devices. @@ -332,6 +334,7 @@ choice config DEBUG_MSM_UART3 bool "Kernel low-level debugging messages via MSM UART3" depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50 + select DEBUG_MSM_UART help Say Y here if you want the debug print routines to direct their output to the third serial port on MSM devices. @@ -340,6 +343,7 @@ choice bool "Kernel low-level debugging messages via MSM 8660 UART" depends on ARCH_MSM8X60 select MSM_HAS_DEBUG_UART_HS + select DEBUG_MSM_UART help Say Y here if you want the debug print routines to direct their output to the serial port on MSM 8660 devices. @@ -348,10 +352,20 @@ choice bool "Kernel low-level debugging messages via MSM 8960 UART" depends on ARCH_MSM8960 select MSM_HAS_DEBUG_UART_HS + select DEBUG_MSM_UART help Say Y here if you want the debug print routines to direct their output to the serial port on MSM 8960 devices. + config DEBUG_MSM8974_UART + bool "Kernel low-level debugging messages via MSM 8974 UART" + depends on ARCH_MSM8974 + select MSM_HAS_DEBUG_UART_HS + select DEBUG_MSM_UART + help + Say Y here if you want the debug print routines to direct + their output to the serial port on MSM 8974 devices. + config DEBUG_MVEBU_UART bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)" depends on ARCH_MVEBU @@ -841,6 +855,20 @@ choice options; the platform specific options are deprecated and will be soon removed. + config DEBUG_LL_UART_EFM32 + bool "Kernel low-level debugging via efm32 UART" + depends on ARCH_EFM32 + help + Say Y here if you want the debug print routines to direct + their output to an UART or USART port on efm32 based + machines. Use the following addresses for DEBUG_UART_PHYS: + + 0x4000c000 | USART0 + 0x4000c400 | USART1 + 0x4000c800 | USART2 + 0x4000e000 | UART0 + 0x4000e400 | UART1 + config DEBUG_LL_UART_PL01X bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART" help @@ -887,11 +915,16 @@ config DEBUG_STI_UART bool depends on ARCH_STI +config DEBUG_MSM_UART + bool + depends on ARCH_MSM + config DEBUG_LL_INCLUDE string default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250 default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X default "debug/exynos.S" if DEBUG_EXYNOS_UART + default "debug/efm32.S" if DEBUG_LL_UART_EFM32 default "debug/icedcc.S" if DEBUG_ICEDCC default "debug/imx.S" if DEBUG_IMX1_UART || \ DEBUG_IMX25_UART || \ @@ -902,11 +935,7 @@ config DEBUG_LL_INCLUDE DEBUG_IMX53_UART ||\ DEBUG_IMX6Q_UART || \ DEBUG_IMX6SL_UART - default "debug/msm.S" if DEBUG_MSM_UART1 || \ - DEBUG_MSM_UART2 || \ - DEBUG_MSM_UART3 || \ - DEBUG_MSM8660_UART || \ - DEBUG_MSM8960_UART + default "debug/msm.S" if DEBUG_MSM_UART default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1 default "debug/sti.S" if DEBUG_STI_UART @@ -959,6 +988,7 @@ config DEBUG_UART_PHYS default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2 default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3 default 0x20201000 if DEBUG_BCM2835 + default 0x4000e400 if DEBUG_LL_UART_EFM32 default 0x40090000 if ARCH_LPC32XX default 0x40100000 if DEBUG_PXA_UART1 default 0x42000000 if ARCH_GEMINI @@ -989,6 +1019,7 @@ config DEBUG_UART_PHYS default 0xfff36000 if DEBUG_HIGHBANK_UART default 0xfffff700 if ARCH_IOP33X depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \ + DEBUG_LL_UART_EFM32 || \ DEBUG_UART_8250 || DEBUG_UART_PL01X config DEBUG_UART_VIRT diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 8b667132d7b..c99b1086d83 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -16,6 +16,7 @@ LDFLAGS := LDFLAGS_vmlinux :=-p --no-undefined -X ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) LDFLAGS_vmlinux += --be8 +LDFLAGS_MODULE += --be8 endif OBJCOPYFLAGS :=-O binary -R .comment -S diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 75189f13cf5..066b03480b6 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -135,6 +135,7 @@ start: .word _edata @ zImage end address THUMB( .thumb ) 1: + ARM_BE8( setend be ) @ go BE8 if compiled for BE8 mrs r9, cpsr #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install @ get into SVC mode, reversibly @@ -699,9 +700,7 @@ __armv4_mmu_cache_on: mrc p15, 0, r0, c1, c0, 0 @ read control reg orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x0030 -#ifdef CONFIG_CPU_ENDIAN_BE8 - orr r0, r0, #1 << 25 @ big-endian page tables -#endif + ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables bl __common_mmu_cache_on mov r0, #0 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs @@ -728,9 +727,7 @@ __armv7_mmu_cache_on: orr r0, r0, #1 << 22 @ U (v6 unaligned access model) @ (needed for ARM1176) #ifdef CONFIG_MMU -#ifdef CONFIG_CPU_ENDIAN_BE8 - orr r0, r0, #1 << 25 @ big-endian page tables -#endif + ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg orrne r0, r0, #1 @ MMU enabled movne r1, #0xfffffffd @ domain 0 = client diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index 98742943617..7e6c64ed966 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -630,7 +630,7 @@ tsc { ti,wires = <4>; ti,x-plate-resistance = <200>; - ti,coordiante-readouts = <5>; + ti,coordinate-readouts = <5>; ti,wire-config = <0x00 0x11 0x22 0x33>; }; diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 03febf85fd2..4718ec4a4db 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -485,3 +485,13 @@ tx-num-evt = <1>; rx-num-evt = <1>; }; + +&tscadc { + status = "okay"; + tsc { + ti,wires = <4>; + ti,x-plate-resistance = <200>; + ti,coordinate-readouts = <5>; + ti,wire-config = <0x00 0x11 0x22 0x33>; + }; +}; diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index fcb9c8e42ab..f6d8ffe98d0 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -670,6 +670,12 @@ /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; }; + + phy_sel: cpsw-phy-sel@44e10650 { + compatible = "ti,am3352-cpsw-phy-sel"; + reg= <0x44e10650 0x4>; + reg-names = "gmii-sel"; + }; }; ocmcram: ocmcram@40300000 { diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi index a49032c6e19..978bab4991d 100644 --- a/arch/arm/boot/dts/atlas6.dtsi +++ b/arch/arm/boot/dts/atlas6.dtsi @@ -558,6 +558,18 @@ sirf,function = "usb1_utmi_drvbus"; }; }; + usb1_dp_dn_pins_a: usb1_dp_dn@0 { + usb1_dp_dn { + sirf,pins = "usb1_dp_dngrp"; + sirf,function = "usb1_dp_dn"; + }; + }; + uart1_route_io_usb1_pins_a: uart1_route_io_usb1@0 { + uart1_route_io_usb1 { + sirf,pins = "uart1_route_io_usb1grp"; + sirf,function = "uart1_route_io_usb1"; + }; + }; warm_rst_pins_a: warm_rst@0 { warm_rst { sirf,pins = "warm_rstgrp"; diff --git a/arch/arm/boot/dts/ecx-2000.dts b/arch/arm/boot/dts/ecx-2000.dts index 139b40cc3a2..2ccbb57fbfa 100644 --- a/arch/arm/boot/dts/ecx-2000.dts +++ b/arch/arm/boot/dts/ecx-2000.dts @@ -85,6 +85,12 @@ <1 10 0xf08>; }; + memory-controller@fff00000 { + compatible = "calxeda,ecx-2000-ddr-ctrl"; + reg = <0xfff00000 0x1000>; + interrupts = <0 91 4>; + }; + intc: interrupt-controller@fff11000 { compatible = "arm,cortex-a15-gic"; #interrupt-cells = <3>; diff --git a/arch/arm/boot/dts/ecx-common.dtsi b/arch/arm/boot/dts/ecx-common.dtsi index bc22557d7a6..b90045a8f8e 100644 --- a/arch/arm/boot/dts/ecx-common.dtsi +++ b/arch/arm/boot/dts/ecx-common.dtsi @@ -53,12 +53,6 @@ status = "disabled"; }; - memory-controller@fff00000 { - compatible = "calxeda,hb-ddr-ctrl"; - reg = <0xfff00000 0x1000>; - interrupts = <0 91 4>; - }; - ipc@fff20000 { compatible = "arm,pl320", "arm,primecell"; reg = <0xfff20000 0x1000>; diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts index 6aad34ad951..ed14aeac056 100644 --- a/arch/arm/boot/dts/highbank.dts +++ b/arch/arm/boot/dts/highbank.dts @@ -86,6 +86,12 @@ soc { ranges = <0x00000000 0x00000000 0xffffffff>; + memory-controller@fff00000 { + compatible = "calxeda,hb-ddr-ctrl"; + reg = <0xfff00000 0x1000>; + interrupts = <0 91 4>; + }; + timer@fff10600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0xfff10600 0x20>; diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index f4dcff3a996..4bcdd3ad15e 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi @@ -190,7 +190,7 @@ usbphy0: usbphy@0 { compatible = "usb-nop-xceiv"; - clocks = <&clks 124>; + clocks = <&clks 75>; clock-names = "main_clk"; status = "okay"; }; diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi index 7cf78afee7b..daee58944e1 100644 --- a/arch/arm/boot/dts/prima2.dtsi +++ b/arch/arm/boot/dts/prima2.dtsi @@ -388,6 +388,12 @@ sirf,function = "uart0"; }; }; + uart0_noflow_pins_a: uart0@1 { + uart { + sirf,pins = "uart0_nostreamctrlgrp"; + sirf,function = "uart0_nostreamctrl"; + }; + }; uart1_pins_a: uart1@0 { uart { sirf,pins = "uart1grp"; @@ -526,18 +532,42 @@ sirf,function = "usp0"; }; }; + usp0_uart_nostreamctrl_pins_a: usp0@1 { + usp0 { + sirf,pins = + "usp0_uart_nostreamctrl_grp"; + sirf,function = + "usp0_uart_nostreamctrl"; + }; + }; usp1_pins_a: usp1@0 { usp1 { sirf,pins = "usp1grp"; sirf,function = "usp1"; }; }; + usp1_uart_nostreamctrl_pins_a: usp1@1 { + usp1 { + sirf,pins = + "usp1_uart_nostreamctrl_grp"; + sirf,function = + "usp1_uart_nostreamctrl"; + }; + }; usp2_pins_a: usp2@0 { usp2 { sirf,pins = "usp2grp"; sirf,function = "usp2"; }; }; + usp2_uart_nostreamctrl_pins_a: usp2@1 { + usp2 { + sirf,pins = + "usp2_uart_nostreamctrl_grp"; + sirf,function = + "usp2_uart_nostreamctrl"; + }; + }; usb0_utmi_drvbus_pins_a: usb0_utmi_drvbus@0 { usb0_utmi_drvbus { sirf,pins = "usb0_utmi_drvbusgrp"; @@ -550,6 +580,18 @@ sirf,function = "usb1_utmi_drvbus"; }; }; + usb1_dp_dn_pins_a: usb1_dp_dn@0 { + usb1_dp_dn { + sirf,pins = "usb1_dp_dngrp"; + sirf,function = "usb1_dp_dn"; + }; + }; + uart1_route_io_usb1_pins_a: uart1_route_io_usb1@0 { + uart1_route_io_usb1 { + sirf,pins = "uart1_route_io_usb1grp"; + sirf,function = "uart1_route_io_usb1"; + }; + }; warm_rst_pins_a: warm_rst@0 { warm_rst { sirf,pins = "warm_rstgrp"; diff --git a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi b/arch/arm/boot/dts/testcases/tests-interrupts.dtsi new file mode 100644 index 00000000000..c843720bd3e --- /dev/null +++ b/arch/arm/boot/dts/testcases/tests-interrupts.dtsi @@ -0,0 +1,58 @@ + +/ { + testcase-data { + interrupts { + #address-cells = <1>; + #size-cells = <1>; + test_intc0: intc0 { + interrupt-controller; + #interrupt-cells = <1>; + }; + + test_intc1: intc1 { + interrupt-controller; + #interrupt-cells = <3>; + }; + + test_intc2: intc2 { + interrupt-controller; + #interrupt-cells = <2>; + }; + + test_intmap0: intmap0 { + #interrupt-cells = <1>; + #address-cells = <0>; + interrupt-map = <1 &test_intc0 9>, + <2 &test_intc1 10 11 12>, + <3 &test_intc2 13 14>, + <4 &test_intc2 15 16>; + }; + + test_intmap1: intmap1 { + #interrupt-cells = <2>; + interrupt-map = <0x5000 1 2 &test_intc0 15>; + }; + + interrupts0 { + interrupt-parent = <&test_intc0>; + interrupts = <1>, <2>, <3>, <4>; + }; + + interrupts1 { + interrupt-parent = <&test_intmap0>; + interrupts = <1>, <2>, <3>, <4>; + }; + + interrupts-extended0 { + reg = <0x5000 0x100>; + interrupts-extended = <&test_intc0 1>, + <&test_intc1 2 3 4>, + <&test_intc2 5 6>, + <&test_intmap0 1>, + <&test_intmap0 2>, + <&test_intmap0 3>, + <&test_intmap1 1 2>; + }; + }; + }; +}; diff --git a/arch/arm/boot/dts/testcases/tests.dtsi b/arch/arm/boot/dts/testcases/tests.dtsi index a7c5067622e..3f123ecc9dd 100644 --- a/arch/arm/boot/dts/testcases/tests.dtsi +++ b/arch/arm/boot/dts/testcases/tests.dtsi @@ -1 +1,2 @@ /include/ "tests-phandle.dtsi" +/include/ "tests-interrupts.dtsi" diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi index fb1b2ec8eaa..4217096ee67 100644 --- a/arch/arm/boot/dts/twl4030.dtsi +++ b/arch/arm/boot/dts/twl4030.dtsi @@ -19,6 +19,12 @@ interrupts = <11>; }; + charger: bci { + compatible = "ti,twl4030-bci"; + interrupts = <9>, <2>; + bci3v1-supply = <&vusb3v1>; + }; + watchdog { compatible = "ti,twl4030-wdt"; }; diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index dde75ae8b4b..e01e5a081de 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -185,7 +185,7 @@ mmc@5000 { compatible = "arm,primecell"; reg = < 0x5000 0x1000>; - interrupts = <22 34>; + interrupts-extended = <&vic 22 &sic 2>; }; kmi@6000 { compatible = "arm,pl050", "arm,primecell"; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 7e817526906..f43907c40c9 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -41,7 +41,7 @@ mmc@b000 { compatible = "arm,primecell"; reg = <0xb000 0x1000>; - interrupts = <23 34>; + interrupts-extended = <&vic 23 &sic 2>; }; }; }; diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index eaa9cf4705a..4bdc41622c3 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -16,3 +16,5 @@ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o AFLAGS_mcpm_head.o := -march=armv7-a AFLAGS_vlock.o := -march=armv7-a obj-$(CONFIG_TI_PRIV_EDMA) += edma.o +obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o +obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c new file mode 100644 index 00000000000..5774b6ea7ad --- /dev/null +++ b/arch/arm/common/bL_switcher.c @@ -0,0 +1,822 @@ +/* + * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver + * + * Created by: Nicolas Pitre, March 2012 + * Copyright: (C) 2012-2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/atomic.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/cpu_pm.h> +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/time.h> +#include <linux/clockchips.h> +#include <linux/hrtimer.h> +#include <linux/tick.h> +#include <linux/notifier.h> +#include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/irqchip/arm-gic.h> +#include <linux/moduleparam.h> + +#include <asm/smp_plat.h> +#include <asm/cputype.h> +#include <asm/suspend.h> +#include <asm/mcpm.h> +#include <asm/bL_switcher.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/power_cpu_migrate.h> + + +/* + * Use our own MPIDR accessors as the generic ones in asm/cputype.h have + * __attribute_const__ and we don't want the compiler to assume any + * constness here as the value _does_ change along some code paths. + */ + +static int read_mpidr(void) +{ + unsigned int id; + asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); + return id & MPIDR_HWID_BITMASK; +} + +/* + * Get a global nanosecond time stamp for tracing. + */ +static s64 get_ns(void) +{ + struct timespec ts; + getnstimeofday(&ts); + return timespec_to_ns(&ts); +} + +/* + * bL switcher core code. + */ + +static void bL_do_switch(void *_arg) +{ + unsigned ib_mpidr, ib_cpu, ib_cluster; + long volatile handshake, **handshake_ptr = _arg; + + pr_debug("%s\n", __func__); + + ib_mpidr = cpu_logical_map(smp_processor_id()); + ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); + ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); + + /* Advertise our handshake location */ + if (handshake_ptr) { + handshake = 0; + *handshake_ptr = &handshake; + } else + handshake = -1; + + /* + * Our state has been saved at this point. Let's release our + * inbound CPU. + */ + mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume); + sev(); + + /* + * From this point, we must assume that our counterpart CPU might + * have taken over in its parallel world already, as if execution + * just returned from cpu_suspend(). It is therefore important to + * be very careful not to make any change the other guy is not + * expecting. This is why we need stack isolation. + * + * Fancy under cover tasks could be performed here. For now + * we have none. + */ + + /* + * Let's wait until our inbound is alive. + */ + while (!handshake) { + wfe(); + smp_mb(); + } + + /* Let's put ourself down. */ + mcpm_cpu_power_down(); + + /* should never get here */ + BUG(); +} + +/* + * Stack isolation. To ensure 'current' remains valid, we just use another + * piece of our thread's stack space which should be fairly lightly used. + * The selected area starts just above the thread_info structure located + * at the very bottom of the stack, aligned to a cache line, and indexed + * with the cluster number. + */ +#define STACK_SIZE 512 +extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); +static int bL_switchpoint(unsigned long _arg) +{ + unsigned int mpidr = read_mpidr(); + unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); + void *stack = current_thread_info() + 1; + stack = PTR_ALIGN(stack, L1_CACHE_BYTES); + stack += clusterid * STACK_SIZE + STACK_SIZE; + call_with_stack(bL_do_switch, (void *)_arg, stack); + BUG(); +} + +/* + * Generic switcher interface + */ + +static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS]; +static int bL_switcher_cpu_pairing[NR_CPUS]; + +/* + * bL_switch_to - Switch to a specific cluster for the current CPU + * @new_cluster_id: the ID of the cluster to switch to. + * + * This function must be called on the CPU to be switched. + * Returns 0 on success, else a negative status code. + */ +static int bL_switch_to(unsigned int new_cluster_id) +{ + unsigned int mpidr, this_cpu, that_cpu; + unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; + struct completion inbound_alive; + struct tick_device *tdev; + enum clock_event_mode tdev_mode; + long volatile *handshake_ptr; + int ipi_nr, ret; + + this_cpu = smp_processor_id(); + ob_mpidr = read_mpidr(); + ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0); + ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1); + BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr); + + if (new_cluster_id == ob_cluster) + return 0; + + that_cpu = bL_switcher_cpu_pairing[this_cpu]; + ib_mpidr = cpu_logical_map(that_cpu); + ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0); + ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1); + + pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n", + this_cpu, ob_mpidr, ib_mpidr); + + this_cpu = smp_processor_id(); + + /* Close the gate for our entry vectors */ + mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL); + mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL); + + /* Install our "inbound alive" notifier. */ + init_completion(&inbound_alive); + ipi_nr = register_ipi_completion(&inbound_alive, this_cpu); + ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]); + mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr); + + /* + * Let's wake up the inbound CPU now in case it requires some delay + * to come online, but leave it gated in our entry vector code. + */ + ret = mcpm_cpu_power_up(ib_cpu, ib_cluster); + if (ret) { + pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret); + return ret; + } + + /* + * Raise a SGI on the inbound CPU to make sure it doesn't stall + * in a possible WFI, such as in bL_power_down(). + */ + gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0); + + /* + * Wait for the inbound to come up. This allows for other + * tasks to be scheduled in the mean time. + */ + wait_for_completion(&inbound_alive); + mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0); + + /* + * From this point we are entering the switch critical zone + * and can't take any interrupts anymore. + */ + local_irq_disable(); + local_fiq_disable(); + trace_cpu_migrate_begin(get_ns(), ob_mpidr); + + /* redirect GIC's SGIs to our counterpart */ + gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); + + tdev = tick_get_device(this_cpu); + if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu))) + tdev = NULL; + if (tdev) { + tdev_mode = tdev->evtdev->mode; + clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN); + } + + ret = cpu_pm_enter(); + + /* we can not tolerate errors at this point */ + if (ret) + panic("%s: cpu_pm_enter() returned %d\n", __func__, ret); + + /* Swap the physical CPUs in the logical map for this logical CPU. */ + cpu_logical_map(this_cpu) = ib_mpidr; + cpu_logical_map(that_cpu) = ob_mpidr; + + /* Let's do the actual CPU switch. */ + ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint); + if (ret > 0) + panic("%s: cpu_suspend() returned %d\n", __func__, ret); + + /* We are executing on the inbound CPU at this point */ + mpidr = read_mpidr(); + pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr); + BUG_ON(mpidr != ib_mpidr); + + mcpm_cpu_powered_up(); + + ret = cpu_pm_exit(); + + if (tdev) { + clockevents_set_mode(tdev->evtdev, tdev_mode); + clockevents_program_event(tdev->evtdev, + tdev->evtdev->next_event, 1); + } + + trace_cpu_migrate_finish(get_ns(), ib_mpidr); + local_fiq_enable(); + local_irq_enable(); + + *handshake_ptr = 1; + dsb_sev(); + + if (ret) + pr_err("%s exiting with error %d\n", __func__, ret); + return ret; +} + +struct bL_thread { + spinlock_t lock; + struct task_struct *task; + wait_queue_head_t wq; + int wanted_cluster; + struct completion started; + bL_switch_completion_handler completer; + void *completer_cookie; +}; + +static struct bL_thread bL_threads[NR_CPUS]; + +static int bL_switcher_thread(void *arg) +{ + struct bL_thread *t = arg; + struct sched_param param = { .sched_priority = 1 }; + int cluster; + bL_switch_completion_handler completer; + void *completer_cookie; + + sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); + complete(&t->started); + + do { + if (signal_pending(current)) + flush_signals(current); + wait_event_interruptible(t->wq, + t->wanted_cluster != -1 || + kthread_should_stop()); + + spin_lock(&t->lock); + cluster = t->wanted_cluster; + completer = t->completer; + completer_cookie = t->completer_cookie; + t->wanted_cluster = -1; + t->completer = NULL; + spin_unlock(&t->lock); + + if (cluster != -1) { + bL_switch_to(cluster); + + if (completer) + completer(completer_cookie); + } + } while (!kthread_should_stop()); + + return 0; +} + +static struct task_struct *bL_switcher_thread_create(int cpu, void *arg) +{ + struct task_struct *task; + + task = kthread_create_on_node(bL_switcher_thread, arg, + cpu_to_node(cpu), "kswitcher_%d", cpu); + if (!IS_ERR(task)) { + kthread_bind(task, cpu); + wake_up_process(task); + } else + pr_err("%s failed for CPU %d\n", __func__, cpu); + return task; +} + +/* + * bL_switch_request_cb - Switch to a specific cluster for the given CPU, + * with completion notification via a callback + * + * @cpu: the CPU to switch + * @new_cluster_id: the ID of the cluster to switch to. + * @completer: switch completion callback. if non-NULL, + * @completer(@completer_cookie) will be called on completion of + * the switch, in non-atomic context. + * @completer_cookie: opaque context argument for @completer. + * + * This function causes a cluster switch on the given CPU by waking up + * the appropriate switcher thread. This function may or may not return + * before the switch has occurred. + * + * If a @completer callback function is supplied, it will be called when + * the switch is complete. This can be used to determine asynchronously + * when the switch is complete, regardless of when bL_switch_request() + * returns. When @completer is supplied, no new switch request is permitted + * for the affected CPU until after the switch is complete, and @completer + * has returned. + */ +int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, + bL_switch_completion_handler completer, + void *completer_cookie) +{ + struct bL_thread *t; + + if (cpu >= ARRAY_SIZE(bL_threads)) { + pr_err("%s: cpu %d out of bounds\n", __func__, cpu); + return -EINVAL; + } + + t = &bL_threads[cpu]; + + if (IS_ERR(t->task)) + return PTR_ERR(t->task); + if (!t->task) + return -ESRCH; + + spin_lock(&t->lock); + if (t->completer) { + spin_unlock(&t->lock); + return -EBUSY; + } + t->completer = completer; + t->completer_cookie = completer_cookie; + t->wanted_cluster = new_cluster_id; + spin_unlock(&t->lock); + wake_up(&t->wq); + return 0; +} +EXPORT_SYMBOL_GPL(bL_switch_request_cb); + +/* + * Activation and configuration code. + */ + +static DEFINE_MUTEX(bL_switcher_activation_lock); +static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier); +static unsigned int bL_switcher_active; +static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS]; +static cpumask_t bL_switcher_removed_logical_cpus; + +int bL_switcher_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&bL_activation_notifier, nb); +} +EXPORT_SYMBOL_GPL(bL_switcher_register_notifier); + +int bL_switcher_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&bL_activation_notifier, nb); +} +EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier); + +static int bL_activation_notify(unsigned long val) +{ + int ret; + + ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL); + if (ret & NOTIFY_STOP_MASK) + pr_err("%s: notifier chain failed with status 0x%x\n", + __func__, ret); + return notifier_to_errno(ret); +} + +static void bL_switcher_restore_cpus(void) +{ + int i; + + for_each_cpu(i, &bL_switcher_removed_logical_cpus) + cpu_up(i); +} + +static int bL_switcher_halve_cpus(void) +{ + int i, j, cluster_0, gic_id, ret; + unsigned int cpu, cluster, mask; + cpumask_t available_cpus; + + /* First pass to validate what we have */ + mask = 0; + for_each_online_cpu(i) { + cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); + cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); + if (cluster >= 2) { + pr_err("%s: only dual cluster systems are supported\n", __func__); + return -EINVAL; + } + if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER)) + return -EINVAL; + mask |= (1 << cluster); + } + if (mask != 3) { + pr_err("%s: no CPU pairing possible\n", __func__); + return -EINVAL; + } + + /* + * Now let's do the pairing. We match each CPU with another CPU + * from a different cluster. To get a uniform scheduling behavior + * without fiddling with CPU topology and compute capacity data, + * we'll use logical CPUs initially belonging to the same cluster. + */ + memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing)); + cpumask_copy(&available_cpus, cpu_online_mask); + cluster_0 = -1; + for_each_cpu(i, &available_cpus) { + int match = -1; + cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); + if (cluster_0 == -1) + cluster_0 = cluster; + if (cluster != cluster_0) + continue; + cpumask_clear_cpu(i, &available_cpus); + for_each_cpu(j, &available_cpus) { + cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1); + /* + * Let's remember the last match to create "odd" + * pairings on purpose in order for other code not + * to assume any relation between physical and + * logical CPU numbers. + */ + if (cluster != cluster_0) + match = j; + } + if (match != -1) { + bL_switcher_cpu_pairing[i] = match; + cpumask_clear_cpu(match, &available_cpus); + pr_info("CPU%d paired with CPU%d\n", i, match); + } + } + + /* + * Now we disable the unwanted CPUs i.e. everything that has no + * pairing information (that includes the pairing counterparts). + */ + cpumask_clear(&bL_switcher_removed_logical_cpus); + for_each_online_cpu(i) { + cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0); + cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); + + /* Let's take note of the GIC ID for this CPU */ + gic_id = gic_get_cpu_id(i); + if (gic_id < 0) { + pr_err("%s: bad GIC ID for CPU %d\n", __func__, i); + bL_switcher_restore_cpus(); + return -EINVAL; + } + bL_gic_id[cpu][cluster] = gic_id; + pr_info("GIC ID for CPU %u cluster %u is %u\n", + cpu, cluster, gic_id); + + if (bL_switcher_cpu_pairing[i] != -1) { + bL_switcher_cpu_original_cluster[i] = cluster; + continue; + } + + ret = cpu_down(i); + if (ret) { + bL_switcher_restore_cpus(); + return ret; + } + cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus); + } + + return 0; +} + +/* Determine the logical CPU a given physical CPU is grouped on. */ +int bL_switcher_get_logical_index(u32 mpidr) +{ + int cpu; + + if (!bL_switcher_active) + return -EUNATCH; + + mpidr &= MPIDR_HWID_BITMASK; + for_each_online_cpu(cpu) { + int pairing = bL_switcher_cpu_pairing[cpu]; + if (pairing == -1) + continue; + if ((mpidr == cpu_logical_map(cpu)) || + (mpidr == cpu_logical_map(pairing))) + return cpu; + } + return -EINVAL; +} + +static void bL_switcher_trace_trigger_cpu(void *__always_unused info) +{ + trace_cpu_migrate_current(get_ns(), read_mpidr()); +} + +int bL_switcher_trace_trigger(void) +{ + int ret; + + preempt_disable(); + + bL_switcher_trace_trigger_cpu(NULL); + ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true); + + preempt_enable(); + + return ret; +} +EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger); + +static int bL_switcher_enable(void) +{ + int cpu, ret; + + mutex_lock(&bL_switcher_activation_lock); + lock_device_hotplug(); + if (bL_switcher_active) { + unlock_device_hotplug(); + mutex_unlock(&bL_switcher_activation_lock); + return 0; + } + + pr_info("big.LITTLE switcher initializing\n"); + + ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE); + if (ret) + goto error; + + ret = bL_switcher_halve_cpus(); + if (ret) + goto error; + + bL_switcher_trace_trigger(); + + for_each_online_cpu(cpu) { + struct bL_thread *t = &bL_threads[cpu]; + spin_lock_init(&t->lock); + init_waitqueue_head(&t->wq); + init_completion(&t->started); + t->wanted_cluster = -1; + t->task = bL_switcher_thread_create(cpu, t); + } + + bL_switcher_active = 1; + bL_activation_notify(BL_NOTIFY_POST_ENABLE); + pr_info("big.LITTLE switcher initialized\n"); + goto out; + +error: + pr_warn("big.LITTLE switcher initialization failed\n"); + bL_activation_notify(BL_NOTIFY_POST_DISABLE); + +out: + unlock_device_hotplug(); + mutex_unlock(&bL_switcher_activation_lock); + return ret; +} + +#ifdef CONFIG_SYSFS + +static void bL_switcher_disable(void) +{ + unsigned int cpu, cluster; + struct bL_thread *t; + struct task_struct *task; + + mutex_lock(&bL_switcher_activation_lock); + lock_device_hotplug(); + + if (!bL_switcher_active) + goto out; + + if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) { + bL_activation_notify(BL_NOTIFY_POST_ENABLE); + goto out; + } + + bL_switcher_active = 0; + + /* + * To deactivate the switcher, we must shut down the switcher + * threads to prevent any other requests from being accepted. + * Then, if the final cluster for given logical CPU is not the + * same as the original one, we'll recreate a switcher thread + * just for the purpose of switching the CPU back without any + * possibility for interference from external requests. + */ + for_each_online_cpu(cpu) { + t = &bL_threads[cpu]; + task = t->task; + t->task = NULL; + if (!task || IS_ERR(task)) + continue; + kthread_stop(task); + /* no more switch may happen on this CPU at this point */ + cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); + if (cluster == bL_switcher_cpu_original_cluster[cpu]) + continue; + init_completion(&t->started); + t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu]; + task = bL_switcher_thread_create(cpu, t); + if (!IS_ERR(task)) { + wait_for_completion(&t->started); + kthread_stop(task); + cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); + if (cluster == bL_switcher_cpu_original_cluster[cpu]) + continue; + } + /* If execution gets here, we're in trouble. */ + pr_crit("%s: unable to restore original cluster for CPU %d\n", + __func__, cpu); + pr_crit("%s: CPU %d can't be restored\n", + __func__, bL_switcher_cpu_pairing[cpu]); + cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu], + &bL_switcher_removed_logical_cpus); + } + + bL_switcher_restore_cpus(); + bL_switcher_trace_trigger(); + + bL_activation_notify(BL_NOTIFY_POST_DISABLE); + +out: + unlock_device_hotplug(); + mutex_unlock(&bL_switcher_activation_lock); +} + +static ssize_t bL_switcher_active_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", bL_switcher_active); +} + +static ssize_t bL_switcher_active_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + int ret; + + switch (buf[0]) { + case '0': + bL_switcher_disable(); + ret = 0; + break; + case '1': + ret = bL_switcher_enable(); + break; + default: + ret = -EINVAL; + } + + return (ret >= 0) ? count : ret; +} + +static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + int ret = bL_switcher_trace_trigger(); + + return ret ? ret : count; +} + +static struct kobj_attribute bL_switcher_active_attr = + __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store); + +static struct kobj_attribute bL_switcher_trace_trigger_attr = + __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store); + +static struct attribute *bL_switcher_attrs[] = { + &bL_switcher_active_attr.attr, + &bL_switcher_trace_trigger_attr.attr, + NULL, +}; + +static struct attribute_group bL_switcher_attr_group = { + .attrs = bL_switcher_attrs, +}; + +static struct kobject *bL_switcher_kobj; + +static int __init bL_switcher_sysfs_init(void) +{ + int ret; + + bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj); + if (!bL_switcher_kobj) + return -ENOMEM; + ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group); + if (ret) + kobject_put(bL_switcher_kobj); + return ret; +} + +#endif /* CONFIG_SYSFS */ + +bool bL_switcher_get_enabled(void) +{ + mutex_lock(&bL_switcher_activation_lock); + + return bL_switcher_active; +} +EXPORT_SYMBOL_GPL(bL_switcher_get_enabled); + +void bL_switcher_put_enabled(void) +{ + mutex_unlock(&bL_switcher_activation_lock); +} +EXPORT_SYMBOL_GPL(bL_switcher_put_enabled); + +/* + * Veto any CPU hotplug operation on those CPUs we've removed + * while the switcher is active. + * We're just not ready to deal with that given the trickery involved. + */ +static int bL_switcher_hotplug_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + if (bL_switcher_active) { + int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu]; + switch (action & 0xf) { + case CPU_UP_PREPARE: + case CPU_DOWN_PREPARE: + if (pairing == -1) + return NOTIFY_BAD; + } + } + return NOTIFY_DONE; +} + +static bool no_bL_switcher; +core_param(no_bL_switcher, no_bL_switcher, bool, 0644); + +static int __init bL_switcher_init(void) +{ + int ret; + + if (MAX_NR_CLUSTERS != 2) { + pr_err("%s: only dual cluster systems are supported\n", __func__); + return -EINVAL; + } + + cpu_notifier(bL_switcher_hotplug_callback, 0); + + if (!no_bL_switcher) { + ret = bL_switcher_enable(); + if (ret) + return ret; + } + +#ifdef CONFIG_SYSFS + ret = bL_switcher_sysfs_init(); + if (ret) + pr_err("%s: unable to create sysfs entry\n", __func__); +#endif + + return 0; +} + +late_initcall(bL_switcher_init); diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c new file mode 100644 index 00000000000..3f47f1203c6 --- /dev/null +++ b/arch/arm/common/bL_switcher_dummy_if.c @@ -0,0 +1,71 @@ +/* + * arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface + * + * Created by: Nicolas Pitre, November 2012 + * Copyright: (C) 2012-2013 Linaro Limited + * + * Dummy interface to user space for debugging purpose only. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <asm/uaccess.h> +#include <asm/bL_switcher.h> + +static ssize_t bL_switcher_write(struct file *file, const char __user *buf, + size_t len, loff_t *pos) +{ + unsigned char val[3]; + unsigned int cpu, cluster; + int ret; + + pr_debug("%s\n", __func__); + + if (len < 3) + return -EINVAL; + + if (copy_from_user(val, buf, 3)) + return -EFAULT; + + /* format: <cpu#>,<cluster#> */ + if (val[0] < '0' || val[0] > '9' || + val[1] != ',' || + val[2] < '0' || val[2] > '1') + return -EINVAL; + + cpu = val[0] - '0'; + cluster = val[2] - '0'; + ret = bL_switch_request(cpu, cluster); + + return ret ? : len; +} + +static const struct file_operations bL_switcher_fops = { + .write = bL_switcher_write, + .owner = THIS_MODULE, +}; + +static struct miscdevice bL_switcher_device = { + MISC_DYNAMIC_MINOR, + "b.L_switcher", + &bL_switcher_fops +}; + +static int __init bL_switcher_dummy_if_init(void) +{ + return misc_register(&bL_switcher_device); +} + +static void __exit bL_switcher_dummy_if_exit(void) +{ + misc_deregister(&bL_switcher_device); +} + +module_init(bL_switcher_dummy_if_init); +module_exit(bL_switcher_dummy_if_exit); diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 990250965f2..26020a03f65 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -27,6 +27,18 @@ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); } +extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2]; + +void mcpm_set_early_poke(unsigned cpu, unsigned cluster, + unsigned long poke_phys_addr, unsigned long poke_val) +{ + unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; + poke[0] = poke_phys_addr; + poke[1] = poke_val; + __cpuc_flush_dcache_area((void *)poke, 8); + outer_clean_range(__pa(poke), __pa(poke + 2)); +} + static const struct mcpm_platform_ops *platform_ops; int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) @@ -90,6 +102,21 @@ void mcpm_cpu_power_down(void) BUG(); } +int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster) +{ + int ret; + + if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish)) + return -EUNATCH; + + ret = platform_ops->power_down_finish(cpu, cluster); + if (ret) + pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", + __func__, cpu, cluster, ret); + + return ret; +} + void mcpm_cpu_suspend(u64 expected_residency) { phys_reset_t phys_reset; diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S index 39c96df3477..e02db4b81a6 100644 --- a/arch/arm/common/mcpm_head.S +++ b/arch/arm/common/mcpm_head.S @@ -15,6 +15,7 @@ #include <linux/linkage.h> #include <asm/mcpm.h> +#include <asm/assembler.h> #include "vlock.h" @@ -47,6 +48,7 @@ ENTRY(mcpm_entry_point) + ARM_BE8(setend be) THUMB( adr r12, BSYM(1f) ) THUMB( bx r12 ) THUMB( .thumb ) @@ -71,12 +73,19 @@ ENTRY(mcpm_entry_point) * position independent way. */ adr r5, 3f - ldmia r5, {r6, r7, r8, r11} + ldmia r5, {r0, r6, r7, r8, r11} + add r0, r5, r0 @ r0 = mcpm_entry_early_pokes add r6, r5, r6 @ r6 = mcpm_entry_vectors ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys add r8, r5, r8 @ r8 = mcpm_sync add r11, r5, r11 @ r11 = first_man_locks + @ Perform an early poke, if any + add r0, r0, r4, lsl #3 + ldmia r0, {r0, r1} + teq r0, #0 + strne r1, [r0] + mov r0, #MCPM_SYNC_CLUSTER_SIZE mla r8, r0, r10, r8 @ r8 = sync cluster base @@ -195,7 +204,8 @@ mcpm_entry_gated: .align 2 -3: .word mcpm_entry_vectors - . +3: .word mcpm_entry_early_pokes - . + .word mcpm_entry_vectors - 3b .word mcpm_power_up_setup_phys - 3b .word mcpm_sync - 3b .word first_man_locks - 3b @@ -214,6 +224,10 @@ first_man_locks: ENTRY(mcpm_entry_vectors) .space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER + .type mcpm_entry_early_pokes, #object +ENTRY(mcpm_entry_early_pokes) + .space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER + .type mcpm_power_up_setup_phys, #object ENTRY(mcpm_power_up_setup_phys) .space 4 @ set by mcpm_sync_init() diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 1bc34c7567f..177251a4dd9 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c @@ -19,14 +19,23 @@ #include <asm/smp.h> #include <asm/smp_plat.h> +static void cpu_to_pcpu(unsigned int cpu, + unsigned int *pcpu, unsigned int *pcluster) +{ + unsigned int mpidr; + + mpidr = cpu_logical_map(cpu); + *pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + *pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); +} + static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) { - unsigned int mpidr, pcpu, pcluster, ret; + unsigned int pcpu, pcluster, ret; extern void secondary_startup(void); - mpidr = cpu_logical_map(cpu); - pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); - pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + cpu_to_pcpu(cpu, &pcpu, &pcluster); + pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n", __func__, cpu, pcpu, pcluster); @@ -47,6 +56,15 @@ static void mcpm_secondary_init(unsigned int cpu) #ifdef CONFIG_HOTPLUG_CPU +static int mcpm_cpu_kill(unsigned int cpu) +{ + unsigned int pcpu, pcluster; + + cpu_to_pcpu(cpu, &pcpu, &pcluster); + + return !mcpm_cpu_power_down_finish(pcpu, pcluster); +} + static int mcpm_cpu_disable(unsigned int cpu) { /* @@ -73,6 +91,7 @@ static struct smp_operations __initdata mcpm_smp_ops = { .smp_boot_secondary = mcpm_boot_secondary, .smp_secondary_init = mcpm_secondary_init, #ifdef CONFIG_HOTPLUG_CPU + .cpu_kill = mcpm_cpu_kill, .cpu_disable = mcpm_cpu_disable, .cpu_die = mcpm_cpu_die, #endif diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c index e901d0f3e0b..ce922d0ea7a 100644 --- a/arch/arm/common/timer-sp.c +++ b/arch/arm/common/timer-sp.c @@ -175,7 +175,7 @@ static struct clock_event_device sp804_clockevent = { static struct irqaction sp804_timer_irq = { .name = "timer", - .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .flags = IRQF_TIMER | IRQF_IRQPOLL, .handler = sp804_timer_interrupt, .dev_id = &sp804_clockevent, }; diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig index 317960f1248..0142ec37e0b 100644 --- a/arch/arm/configs/h3600_defconfig +++ b/arch/arm/configs/h3600_defconfig @@ -1,5 +1,6 @@ -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y +CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y CONFIG_MODULES=y @@ -11,11 +12,11 @@ CONFIG_ARCH_SA1100=y CONFIG_SA1100_H3600=y CONFIG_PCCARD=y CONFIG_PCMCIA_SA1100=y +CONFIG_PREEMPT=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 # CONFIG_CPU_FREQ_STAT is not set CONFIG_FPE_NWFPE=y -CONFIG_PM=y CONFIG_NET=y CONFIG_UNIX=y CONFIG_INET=y @@ -24,13 +25,10 @@ CONFIG_IRDA=m CONFIG_IRLAN=m CONFIG_IRNET=m CONFIG_IRCOMM=m -CONFIG_SA1100_FIR=m # CONFIG_WIRELESS is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_REDBOOT_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_ADV_OPTIONS=y @@ -41,19 +39,15 @@ CONFIG_MTD_SA1100=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 -# CONFIG_MISC_DEVICES is not set CONFIG_IDE=y CONFIG_BLK_DEV_IDECS=y CONFIG_NETDEVICES=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set -# CONFIG_WLAN is not set -CONFIG_NET_PCMCIA=y CONFIG_PCMCIA_PCNET=y CONFIG_PPP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_DEFLATE=m CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_ASYNC=m +# CONFIG_WLAN is not set # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set @@ -64,8 +58,6 @@ CONFIG_SERIAL_SA1100_CONSOLE=y # CONFIG_HWMON is not set CONFIG_FB=y CONFIG_FB_SA1100=y -# CONFIG_VGA_CONSOLE is not set -# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y CONFIG_MSDOS_FS=m @@ -74,6 +66,4 @@ CONFIG_JFFS2_FS=y CONFIG_CRAMFS=m CONFIG_NFS_FS=y CONFIG_NFSD=m -CONFIG_SMB_FS=m CONFIG_NLS=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig index 002a1ceadce..23591dba47a 100644 --- a/arch/arm/configs/prima2_defconfig +++ b/arch/arm/configs/prima2_defconfig @@ -39,6 +39,7 @@ CONFIG_SPI=y CONFIG_SPI_SIRF=y CONFIG_SPI_SPIDEV=y # CONFIG_HWMON is not set +CONFIG_WATCHDOG=y CONFIG_USB_GADGET=y CONFIG_USB_MASS_STORAGE=m CONFIG_MMC=y diff --git a/arch/arm/configs/vt8500_v6_v7_defconfig b/arch/arm/configs/vt8500_v6_v7_defconfig new file mode 100644 index 00000000000..f0520176acd --- /dev/null +++ b/arch/arm/configs/vt8500_v6_v7_defconfig @@ -0,0 +1,90 @@ +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_ARCH_MULTI_V6=y +CONFIG_ARCH_WM8750=y +CONFIG_ARCH_WM8850=y +CONFIG_ARM_ERRATA_720789=y +CONFIG_ARM_ERRATA_754322=y +CONFIG_ARM_ERRATA_775420=y +CONFIG_HAVE_ARM_ARCH_TIMER=y +CONFIG_AEABI=y +CONFIG_HIGHMEM=y +CONFIG_HIGHPTE=y +CONFIG_ARM_APPENDED_DTB=y +CONFIG_ARM_ATAG_DTB_COMPAT=y +CONFIG_VFP=y +CONFIG_NEON=y +CONFIG_PM_RUNTIME=y +CONFIG_NET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_PROC_DEVICETREE=y +CONFIG_EEPROM_93CX6=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_NETDEVICES=y +# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_FARADAY is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +CONFIG_VIA_VELOCITY=y +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_PHYLIB=y +CONFIG_INPUT_MATRIXKMAP=y +CONFIG_SERIAL_VT8500=y +CONFIG_SERIAL_VT8500_CONSOLE=y +CONFIG_I2C=y +CONFIG_I2C_WMT=y +CONFIG_PINCTRL_SINGLE=y +CONFIG_PINCTRL_WM8750=y +CONFIG_GPIO_GENERIC_PLATFORM=y +CONFIG_POWER_SUPPLY=y +CONFIG_POWER_RESET=y +CONFIG_MFD_SYSCON=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_GPIO=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_GPIO_VBUS=y +CONFIG_USB_ULPI=y +CONFIG_MMC=y +CONFIG_MMC_DEBUG=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_VT8500=y +CONFIG_DMADEVICES=y +CONFIG_COMMON_CLK_DEBUG=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_PWM=y +CONFIG_PWM_VT8500=y +CONFIG_RESET_CONTROLLER=y +CONFIG_GENERIC_PHY=y +CONFIG_EXT4_FS=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_ROOT_NFS=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_KERNEL=y +CONFIG_LOCKUP_DETECTOR=y diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore new file mode 100644 index 00000000000..6231d36b363 --- /dev/null +++ b/arch/arm/crypto/.gitignore @@ -0,0 +1 @@ +aesbs-core.S diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index a2c83851bc9..81cda39860c 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -3,7 +3,17 @@ # obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o +obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o -aes-arm-y := aes-armv4.o aes_glue.o -sha1-arm-y := sha1-armv4-large.o sha1_glue.o +aes-arm-y := aes-armv4.o aes_glue.o +aes-arm-bs-y := aesbs-core.o aesbs-glue.o +sha1-arm-y := sha1-armv4-large.o sha1_glue.o + +quiet_cmd_perl = PERL $@ + cmd_perl = $(PERL) $(<) > $(@) + +$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl + $(call cmd,perl) + +.PRECIOUS: $(obj)/aesbs-core.S diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c index 59f7877ead6..3003fa1f6fb 100644 --- a/arch/arm/crypto/aes_glue.c +++ b/arch/arm/crypto/aes_glue.c @@ -6,22 +6,12 @@ #include <linux/crypto.h> #include <crypto/aes.h> -#define AES_MAXNR 14 +#include "aes_glue.h" -typedef struct { - unsigned int rd_key[4 *(AES_MAXNR + 1)]; - int rounds; -} AES_KEY; - -struct AES_CTX { - AES_KEY enc_key; - AES_KEY dec_key; -}; - -asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx); -asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx); -asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key); -asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key); +EXPORT_SYMBOL(AES_encrypt); +EXPORT_SYMBOL(AES_decrypt); +EXPORT_SYMBOL(private_AES_set_encrypt_key); +EXPORT_SYMBOL(private_AES_set_decrypt_key); static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { @@ -81,7 +71,7 @@ static struct crypto_alg aes_alg = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = aes_set_key, + .cia_setkey = aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt } diff --git a/arch/arm/crypto/aes_glue.h b/arch/arm/crypto/aes_glue.h new file mode 100644 index 00000000000..cca3e51eb60 --- /dev/null +++ b/arch/arm/crypto/aes_glue.h @@ -0,0 +1,19 @@ + +#define AES_MAXNR 14 + +struct AES_KEY { + unsigned int rd_key[4 * (AES_MAXNR + 1)]; + int rounds; +}; + +struct AES_CTX { + struct AES_KEY enc_key; + struct AES_KEY dec_key; +}; + +asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx); +asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx); +asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, + const int bits, struct AES_KEY *key); +asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, + const int bits, struct AES_KEY *key); diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped new file mode 100644 index 00000000000..64205d45326 --- /dev/null +++ b/arch/arm/crypto/aesbs-core.S_shipped @@ -0,0 +1,2544 @@ + +@ ==================================================================== +@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +@ project. The module is, however, dual licensed under OpenSSL and +@ CRYPTOGAMS licenses depending on where you obtain it. For further +@ details see http://www.openssl.org/~appro/cryptogams/. +@ +@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel +@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is +@ granted. +@ ==================================================================== + +@ Bit-sliced AES for ARM NEON +@ +@ February 2012. +@ +@ This implementation is direct adaptation of bsaes-x86_64 module for +@ ARM NEON. Except that this module is endian-neutral [in sense that +@ it can be compiled for either endianness] by courtesy of vld1.8's +@ neutrality. Initial version doesn't implement interface to OpenSSL, +@ only low-level primitives and unsupported entry points, just enough +@ to collect performance results, which for Cortex-A8 core are: +@ +@ encrypt 19.5 cycles per byte processed with 128-bit key +@ decrypt 22.1 cycles per byte processed with 128-bit key +@ key conv. 440 cycles per 128-bit key/0.18 of 8x block +@ +@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7, +@ which is [much] worse than anticipated (for further details see +@ http://www.openssl.org/~appro/Snapdragon-S4.html). +@ +@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code +@ manages in 20.0 cycles]. +@ +@ When comparing to x86_64 results keep in mind that NEON unit is +@ [mostly] single-issue and thus can't [fully] benefit from +@ instruction-level parallelism. And when comparing to aes-armv4 +@ results keep in mind key schedule conversion overhead (see +@ bsaes-x86_64.pl for further details)... +@ +@ <appro@openssl.org> + +@ April-August 2013 +@ +@ Add CBC, CTR and XTS subroutines, adapt for kernel use. +@ +@ <ard.biesheuvel@linaro.org> + +#ifndef __KERNEL__ +# include "arm_arch.h" + +# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} +# define VFP_ABI_POP vldmia sp!,{d8-d15} +# define VFP_ABI_FRAME 0x40 +#else +# define VFP_ABI_PUSH +# define VFP_ABI_POP +# define VFP_ABI_FRAME 0 +# define BSAES_ASM_EXTENDED_KEY +# define XTS_CHAIN_TWEAK +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +#endif + +#ifdef __thumb__ +# define adrl adr +#endif + +#if __ARM_ARCH__>=7 +.text +.syntax unified @ ARMv7-capable assembler is expected to handle this +#ifdef __thumb2__ +.thumb +#else +.code 32 +#endif + +.fpu neon + +.type _bsaes_decrypt8,%function +.align 4 +_bsaes_decrypt8: + adr r6,_bsaes_decrypt8 + vldmia r4!, {q9} @ round 0 key + add r6,r6,#.LM0ISR-_bsaes_decrypt8 + + vldmia r6!, {q8} @ .LM0ISR + veor q10, q0, q9 @ xor with round0 key + veor q11, q1, q9 + vtbl.8 d0, {q10}, d16 + vtbl.8 d1, {q10}, d17 + veor q12, q2, q9 + vtbl.8 d2, {q11}, d16 + vtbl.8 d3, {q11}, d17 + veor q13, q3, q9 + vtbl.8 d4, {q12}, d16 + vtbl.8 d5, {q12}, d17 + veor q14, q4, q9 + vtbl.8 d6, {q13}, d16 + vtbl.8 d7, {q13}, d17 + veor q15, q5, q9 + vtbl.8 d8, {q14}, d16 + vtbl.8 d9, {q14}, d17 + veor q10, q6, q9 + vtbl.8 d10, {q15}, d16 + vtbl.8 d11, {q15}, d17 + veor q11, q7, q9 + vtbl.8 d12, {q10}, d16 + vtbl.8 d13, {q10}, d17 + vtbl.8 d14, {q11}, d16 + vtbl.8 d15, {q11}, d17 + vmov.i8 q8,#0x55 @ compose .LBS0 + vmov.i8 q9,#0x33 @ compose .LBS1 + vshr.u64 q10, q6, #1 + vshr.u64 q11, q4, #1 + veor q10, q10, q7 + veor q11, q11, q5 + vand q10, q10, q8 + vand q11, q11, q8 + veor q7, q7, q10 + vshl.u64 q10, q10, #1 + veor q5, q5, q11 + vshl.u64 q11, q11, #1 + veor q6, q6, q10 + veor q4, q4, q11 + vshr.u64 q10, q2, #1 + vshr.u64 q11, q0, #1 + veor q10, q10, q3 + veor q11, q11, q1 + vand q10, q10, q8 + vand q11, q11, q8 + veor q3, q3, q10 + vshl.u64 q10, q10, #1 + veor q1, q1, q11 + vshl.u64 q11, q11, #1 + veor q2, q2, q10 + veor q0, q0, q11 + vmov.i8 q8,#0x0f @ compose .LBS2 + vshr.u64 q10, q5, #2 + vshr.u64 q11, q4, #2 + veor q10, q10, q7 + veor q11, q11, q6 + vand q10, q10, q9 + vand q11, q11, q9 + veor q7, q7, q10 + vshl.u64 q10, q10, #2 + veor q6, q6, q11 + vshl.u64 q11, q11, #2 + veor q5, q5, q10 + veor q4, q4, q11 + vshr.u64 q10, q1, #2 + vshr.u64 q11, q0, #2 + veor q10, q10, q3 + veor q11, q11, q2 + vand q10, q10, q9 + vand q11, q11, q9 + veor q3, q3, q10 + vshl.u64 q10, q10, #2 + veor q2, q2, q11 + vshl.u64 q11, q11, #2 + veor q1, q1, q10 + veor q0, q0, q11 + vshr.u64 q10, q3, #4 + vshr.u64 q11, q2, #4 + veor q10, q10, q7 + veor q11, q11, q6 + vand q10, q10, q8 + vand q11, q11, q8 + veor q7, q7, q10 + vshl.u64 q10, q10, #4 + veor q6, q6, q11 + vshl.u64 q11, q11, #4 + veor q3, q3, q10 + veor q2, q2, q11 + vshr.u64 q10, q1, #4 + vshr.u64 q11, q0, #4 + veor q10, q10, q5 + veor q11, q11, q4 + vand q10, q10, q8 + vand q11, q11, q8 + veor q5, q5, q10 + vshl.u64 q10, q10, #4 + veor q4, q4, q11 + vshl.u64 q11, q11, #4 + veor q1, q1, q10 + veor q0, q0, q11 + sub r5,r5,#1 + b .Ldec_sbox +.align 4 +.Ldec_loop: + vldmia r4!, {q8-q11} + veor q8, q8, q0 + veor q9, q9, q1 + vtbl.8 d0, {q8}, d24 + vtbl.8 d1, {q8}, d25 + vldmia r4!, {q8} + veor q10, q10, q2 + vtbl.8 d2, {q9}, d24 + vtbl.8 d3, {q9}, d25 + vldmia r4!, {q9} + veor q11, q11, q3 + vtbl.8 d4, {q10}, d24 + vtbl.8 d5, {q10}, d25 + vldmia r4!, {q10} + vtbl.8 d6, {q11}, d24 + vtbl.8 d7, {q11}, d25 + vldmia r4!, {q11} + veor q8, q8, q4 + veor q9, q9, q5 + vtbl.8 d8, {q8}, d24 + vtbl.8 d9, {q8}, d25 + veor q10, q10, q6 + vtbl.8 d10, {q9}, d24 + vtbl.8 d11, {q9}, d25 + veor q11, q11, q7 + vtbl.8 d12, {q10}, d24 + vtbl.8 d13, {q10}, d25 + vtbl.8 d14, {q11}, d24 + vtbl.8 d15, {q11}, d25 +.Ldec_sbox: + veor q1, q1, q4 + veor q3, q3, q4 + + veor q4, q4, q7 + veor q1, q1, q6 + veor q2, q2, q7 + veor q6, q6, q4 + + veor q0, q0, q1 + veor q2, q2, q5 + veor q7, q7, q6 + veor q3, q3, q0 + veor q5, q5, q0 + veor q1, q1, q3 + veor q11, q3, q0 + veor q10, q7, q4 + veor q9, q1, q6 + veor q13, q4, q0 + vmov q8, q10 + veor q12, q5, q2 + + vorr q10, q10, q9 + veor q15, q11, q8 + vand q14, q11, q12 + vorr q11, q11, q12 + veor q12, q12, q9 + vand q8, q8, q9 + veor q9, q6, q2 + vand q15, q15, q12 + vand q13, q13, q9 + veor q9, q3, q7 + veor q12, q1, q5 + veor q11, q11, q13 + veor q10, q10, q13 + vand q13, q9, q12 + vorr q9, q9, q12 + veor q11, q11, q15 + veor q8, q8, q13 + veor q10, q10, q14 + veor q9, q9, q15 + veor q8, q8, q14 + vand q12, q4, q6 + veor q9, q9, q14 + vand q13, q0, q2 + vand q14, q7, q1 + vorr q15, q3, q5 + veor q11, q11, q12 + veor q9, q9, q14 + veor q8, q8, q15 + veor q10, q10, q13 + + @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 + + @ new smaller inversion + + vand q14, q11, q9 + vmov q12, q8 + + veor q13, q10, q14 + veor q15, q8, q14 + veor q14, q8, q14 @ q14=q15 + + vbsl q13, q9, q8 + vbsl q15, q11, q10 + veor q11, q11, q10 + + vbsl q12, q13, q14 + vbsl q8, q14, q13 + + vand q14, q12, q15 + veor q9, q9, q8 + + veor q14, q14, q11 + veor q12, q5, q2 + veor q8, q1, q6 + veor q10, q15, q14 + vand q10, q10, q5 + veor q5, q5, q1 + vand q11, q1, q15 + vand q5, q5, q14 + veor q1, q11, q10 + veor q5, q5, q11 + veor q15, q15, q13 + veor q14, q14, q9 + veor q11, q15, q14 + veor q10, q13, q9 + vand q11, q11, q12 + vand q10, q10, q2 + veor q12, q12, q8 + veor q2, q2, q6 + vand q8, q8, q15 + vand q6, q6, q13 + vand q12, q12, q14 + vand q2, q2, q9 + veor q8, q8, q12 + veor q2, q2, q6 + veor q12, q12, q11 + veor q6, q6, q10 + veor q5, q5, q12 + veor q2, q2, q12 + veor q1, q1, q8 + veor q6, q6, q8 + + veor q12, q3, q0 + veor q8, q7, q4 + veor q11, q15, q14 + veor q10, q13, q9 + vand q11, q11, q12 + vand q10, q10, q0 + veor q12, q12, q8 + veor q0, q0, q4 + vand q8, q8, q15 + vand q4, q4, q13 + vand q12, q12, q14 + vand q0, q0, q9 + veor q8, q8, q12 + veor q0, q0, q4 + veor q12, q12, q11 + veor q4, q4, q10 + veor q15, q15, q13 + veor q14, q14, q9 + veor q10, q15, q14 + vand q10, q10, q3 + veor q3, q3, q7 + vand q11, q7, q15 + vand q3, q3, q14 + veor q7, q11, q10 + veor q3, q3, q11 + veor q3, q3, q12 + veor q0, q0, q12 + veor q7, q7, q8 + veor q4, q4, q8 + veor q1, q1, q7 + veor q6, q6, q5 + + veor q4, q4, q1 + veor q2, q2, q7 + veor q5, q5, q7 + veor q4, q4, q2 + veor q7, q7, q0 + veor q4, q4, q5 + veor q3, q3, q6 + veor q6, q6, q1 + veor q3, q3, q4 + + veor q4, q4, q0 + veor q7, q7, q3 + subs r5,r5,#1 + bcc .Ldec_done + @ multiplication by 0x05-0x00-0x04-0x00 + vext.8 q8, q0, q0, #8 + vext.8 q14, q3, q3, #8 + vext.8 q15, q5, q5, #8 + veor q8, q8, q0 + vext.8 q9, q1, q1, #8 + veor q14, q14, q3 + vext.8 q10, q6, q6, #8 + veor q15, q15, q5 + vext.8 q11, q4, q4, #8 + veor q9, q9, q1 + vext.8 q12, q2, q2, #8 + veor q10, q10, q6 + vext.8 q13, q7, q7, #8 + veor q11, q11, q4 + veor q12, q12, q2 + veor q13, q13, q7 + + veor q0, q0, q14 + veor q1, q1, q14 + veor q6, q6, q8 + veor q2, q2, q10 + veor q4, q4, q9 + veor q1, q1, q15 + veor q6, q6, q15 + veor q2, q2, q14 + veor q7, q7, q11 + veor q4, q4, q14 + veor q3, q3, q12 + veor q2, q2, q15 + veor q7, q7, q15 + veor q5, q5, q13 + vext.8 q8, q0, q0, #12 @ x0 <<< 32 + vext.8 q9, q1, q1, #12 + veor q0, q0, q8 @ x0 ^ (x0 <<< 32) + vext.8 q10, q6, q6, #12 + veor q1, q1, q9 + vext.8 q11, q4, q4, #12 + veor q6, q6, q10 + vext.8 q12, q2, q2, #12 + veor q4, q4, q11 + vext.8 q13, q7, q7, #12 + veor q2, q2, q12 + vext.8 q14, q3, q3, #12 + veor q7, q7, q13 + vext.8 q15, q5, q5, #12 + veor q3, q3, q14 + + veor q9, q9, q0 + veor q5, q5, q15 + vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) + veor q10, q10, q1 + veor q8, q8, q5 + veor q9, q9, q5 + vext.8 q1, q1, q1, #8 + veor q13, q13, q2 + veor q0, q0, q8 + veor q14, q14, q7 + veor q1, q1, q9 + vext.8 q8, q2, q2, #8 + veor q12, q12, q4 + vext.8 q9, q7, q7, #8 + veor q15, q15, q3 + vext.8 q2, q4, q4, #8 + veor q11, q11, q6 + vext.8 q7, q5, q5, #8 + veor q12, q12, q5 + vext.8 q4, q3, q3, #8 + veor q11, q11, q5 + vext.8 q3, q6, q6, #8 + veor q5, q9, q13 + veor q11, q11, q2 + veor q7, q7, q15 + veor q6, q4, q14 + veor q4, q8, q12 + veor q2, q3, q10 + vmov q3, q11 + @ vmov q5, q9 + vldmia r6, {q12} @ .LISR + ite eq @ Thumb2 thing, sanity check in ARM + addeq r6,r6,#0x10 + bne .Ldec_loop + vldmia r6, {q12} @ .LISRM0 + b .Ldec_loop +.align 4 +.Ldec_done: + vmov.i8 q8,#0x55 @ compose .LBS0 + vmov.i8 q9,#0x33 @ compose .LBS1 + vshr.u64 q10, q3, #1 + vshr.u64 q11, q2, #1 + veor q10, q10, q5 + veor q11, q11, q7 + vand q10, q10, q8 + vand q11, q11, q8 + veor q5, q5, q10 + vshl.u64 q10, q10, #1 + veor q7, q7, q11 + vshl.u64 q11, q11, #1 + veor q3, q3, q10 + veor q2, q2, q11 + vshr.u64 q10, q6, #1 + vshr.u64 q11, q0, #1 + veor q10, q10, q4 + veor q11, q11, q1 + vand q10, q10, q8 + vand q11, q11, q8 + veor q4, q4, q10 + vshl.u64 q10, q10, #1 + veor q1, q1, q11 + vshl.u64 q11, q11, #1 + veor q6, q6, q10 + veor q0, q0, q11 + vmov.i8 q8,#0x0f @ compose .LBS2 + vshr.u64 q10, q7, #2 + vshr.u64 q11, q2, #2 + veor q10, q10, q5 + veor q11, q11, q3 + vand q10, q10, q9 + vand q11, q11, q9 + veor q5, q5, q10 + vshl.u64 q10, q10, #2 + veor q3, q3, q11 + vshl.u64 q11, q11, #2 + veor q7, q7, q10 + veor q2, q2, q11 + vshr.u64 q10, q1, #2 + vshr.u64 q11, q0, #2 + veor q10, q10, q4 + veor q11, q11, q6 + vand q10, q10, q9 + vand q11, q11, q9 + veor q4, q4, q10 + vshl.u64 q10, q10, #2 + veor q6, q6, q11 + vshl.u64 q11, q11, #2 + veor q1, q1, q10 + veor q0, q0, q11 + vshr.u64 q10, q4, #4 + vshr.u64 q11, q6, #4 + veor q10, q10, q5 + veor q11, q11, q3 + vand q10, q10, q8 + vand q11, q11, q8 + veor q5, q5, q10 + vshl.u64 q10, q10, #4 + veor q3, q3, q11 + vshl.u64 q11, q11, #4 + veor q4, q4, q10 + veor q6, q6, q11 + vshr.u64 q10, q1, #4 + vshr.u64 q11, q0, #4 + veor q10, q10, q7 + veor q11, q11, q2 + vand q10, q10, q8 + vand q11, q11, q8 + veor q7, q7, q10 + vshl.u64 q10, q10, #4 + veor q2, q2, q11 + vshl.u64 q11, q11, #4 + veor q1, q1, q10 + veor q0, q0, q11 + vldmia r4, {q8} @ last round key + veor q6, q6, q8 + veor q4, q4, q8 + veor q2, q2, q8 + veor q7, q7, q8 + veor q3, q3, q8 + veor q5, q5, q8 + veor q0, q0, q8 + veor q1, q1, q8 + bx lr +.size _bsaes_decrypt8,.-_bsaes_decrypt8 + +.type _bsaes_const,%object +.align 6 +_bsaes_const: +.LM0ISR: @ InvShiftRows constants + .quad 0x0a0e0206070b0f03, 0x0004080c0d010509 +.LISR: + .quad 0x0504070602010003, 0x0f0e0d0c080b0a09 +.LISRM0: + .quad 0x01040b0e0205080f, 0x0306090c00070a0d +.LM0SR: @ ShiftRows constants + .quad 0x0a0e02060f03070b, 0x0004080c05090d01 +.LSR: + .quad 0x0504070600030201, 0x0f0e0d0c0a09080b +.LSRM0: + .quad 0x0304090e00050a0f, 0x01060b0c0207080d +.LM0: + .quad 0x02060a0e03070b0f, 0x0004080c0105090d +.LREVM0SR: + .quad 0x090d01050c000408, 0x03070b0f060a0e02 +.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>" +.align 6 +.size _bsaes_const,.-_bsaes_const + +.type _bsaes_encrypt8,%function +.align 4 +_bsaes_encrypt8: + adr r6,_bsaes_encrypt8 + vldmia r4!, {q9} @ round 0 key + sub r6,r6,#_bsaes_encrypt8-.LM0SR + + vldmia r6!, {q8} @ .LM0SR +_bsaes_encrypt8_alt: + veor q10, q0, q9 @ xor with round0 key + veor q11, q1, q9 + vtbl.8 d0, {q10}, d16 + vtbl.8 d1, {q10}, d17 + veor q12, q2, q9 + vtbl.8 d2, {q11}, d16 + vtbl.8 d3, {q11}, d17 + veor q13, q3, q9 + vtbl.8 d4, {q12}, d16 + vtbl.8 d5, {q12}, d17 + veor q14, q4, q9 + vtbl.8 d6, {q13}, d16 + vtbl.8 d7, {q13}, d17 + veor q15, q5, q9 + vtbl.8 d8, {q14}, d16 + vtbl.8 d9, {q14}, d17 + veor q10, q6, q9 + vtbl.8 d10, {q15}, d16 + vtbl.8 d11, {q15}, d17 + veor q11, q7, q9 + vtbl.8 d12, {q10}, d16 + vtbl.8 d13, {q10}, d17 + vtbl.8 d14, {q11}, d16 + vtbl.8 d15, {q11}, d17 +_bsaes_encrypt8_bitslice: + vmov.i8 q8,#0x55 @ compose .LBS0 + vmov.i8 q9,#0x33 @ compose .LBS1 + vshr.u64 q10, q6, #1 + vshr.u64 q11, q4, #1 + veor q10, q10, q7 + veor q11, q11, q5 + vand q10, q10, q8 + vand q11, q11, q8 + veor q7, q7, q10 + vshl.u64 q10, q10, #1 + veor q5, q5, q11 + vshl.u64 q11, q11, #1 + veor q6, q6, q10 + veor q4, q4, q11 + vshr.u64 q10, q2, #1 + vshr.u64 q11, q0, #1 + veor q10, q10, q3 + veor q11, q11, q1 + vand q10, q10, q8 + vand q11, q11, q8 + veor q3, q3, q10 + vshl.u64 q10, q10, #1 + veor q1, q1, q11 + vshl.u64 q11, q11, #1 + veor q2, q2, q10 + veor q0, q0, q11 + vmov.i8 q8,#0x0f @ compose .LBS2 + vshr.u64 q10, q5, #2 + vshr.u64 q11, q4, #2 + veor q10, q10, q7 + veor q11, q11, q6 + vand q10, q10, q9 + vand q11, q11, q9 + veor q7, q7, q10 + vshl.u64 q10, q10, #2 + veor q6, q6, q11 + vshl.u64 q11, q11, #2 + veor q5, q5, q10 + veor q4, q4, q11 + vshr.u64 q10, q1, #2 + vshr.u64 q11, q0, #2 + veor q10, q10, q3 + veor q11, q11, q2 + vand q10, q10, q9 + vand q11, q11, q9 + veor q3, q3, q10 + vshl.u64 q10, q10, #2 + veor q2, q2, q11 + vshl.u64 q11, q11, #2 + veor q1, q1, q10 + veor q0, q0, q11 + vshr.u64 q10, q3, #4 + vshr.u64 q11, q2, #4 + veor q10, q10, q7 + veor q11, q11, q6 + vand q10, q10, q8 + vand q11, q11, q8 + veor q7, q7, q10 + vshl.u64 q10, q10, #4 + veor q6, q6, q11 + vshl.u64 q11, q11, #4 + veor q3, q3, q10 + veor q2, q2, q11 + vshr.u64 q10, q1, #4 + vshr.u64 q11, q0, #4 + veor q10, q10, q5 + veor q11, q11, q4 + vand q10, q10, q8 + vand q11, q11, q8 + veor q5, q5, q10 + vshl.u64 q10, q10, #4 + veor q4, q4, q11 + vshl.u64 q11, q11, #4 + veor q1, q1, q10 + veor q0, q0, q11 + sub r5,r5,#1 + b .Lenc_sbox +.align 4 +.Lenc_loop: + vldmia r4!, {q8-q11} + veor q8, q8, q0 + veor q9, q9, q1 + vtbl.8 d0, {q8}, d24 + vtbl.8 d1, {q8}, d25 + vldmia r4!, {q8} + veor q10, q10, q2 + vtbl.8 d2, {q9}, d24 + vtbl.8 d3, {q9}, d25 + vldmia r4!, {q9} + veor q11, q11, q3 + vtbl.8 d4, {q10}, d24 + vtbl.8 d5, {q10}, d25 + vldmia r4!, {q10} + vtbl.8 d6, {q11}, d24 + vtbl.8 d7, {q11}, d25 + vldmia r4!, {q11} + veor q8, q8, q4 + veor q9, q9, q5 + vtbl.8 d8, {q8}, d24 + vtbl.8 d9, {q8}, d25 + veor q10, q10, q6 + vtbl.8 d10, {q9}, d24 + vtbl.8 d11, {q9}, d25 + veor q11, q11, q7 + vtbl.8 d12, {q10}, d24 + vtbl.8 d13, {q10}, d25 + vtbl.8 d14, {q11}, d24 + vtbl.8 d15, {q11}, d25 +.Lenc_sbox: + veor q2, q2, q1 + veor q5, q5, q6 + veor q3, q3, q0 + veor q6, q6, q2 + veor q5, q5, q0 + + veor q6, q6, q3 + veor q3, q3, q7 + veor q7, q7, q5 + veor q3, q3, q4 + veor q4, q4, q5 + + veor q2, q2, q7 + veor q3, q3, q1 + veor q1, q1, q5 + veor q11, q7, q4 + veor q10, q1, q2 + veor q9, q5, q3 + veor q13, q2, q4 + vmov q8, q10 + veor q12, q6, q0 + + vorr q10, q10, q9 + veor q15, q11, q8 + vand q14, q11, q12 + vorr q11, q11, q12 + veor q12, q12, q9 + vand q8, q8, q9 + veor q9, q3, q0 + vand q15, q15, q12 + vand q13, q13, q9 + veor q9, q7, q1 + veor q12, q5, q6 + veor q11, q11, q13 + veor q10, q10, q13 + vand q13, q9, q12 + vorr q9, q9, q12 + veor q11, q11, q15 + veor q8, q8, q13 + veor q10, q10, q14 + veor q9, q9, q15 + veor q8, q8, q14 + vand q12, q2, q3 + veor q9, q9, q14 + vand q13, q4, q0 + vand q14, q1, q5 + vorr q15, q7, q6 + veor q11, q11, q12 + veor q9, q9, q14 + veor q8, q8, q15 + veor q10, q10, q13 + + @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3 + + @ new smaller inversion + + vand q14, q11, q9 + vmov q12, q8 + + veor q13, q10, q14 + veor q15, q8, q14 + veor q14, q8, q14 @ q14=q15 + + vbsl q13, q9, q8 + vbsl q15, q11, q10 + veor q11, q11, q10 + + vbsl q12, q13, q14 + vbsl q8, q14, q13 + + vand q14, q12, q15 + veor q9, q9, q8 + + veor q14, q14, q11 + veor q12, q6, q0 + veor q8, q5, q3 + veor q10, q15, q14 + vand q10, q10, q6 + veor q6, q6, q5 + vand q11, q5, q15 + vand q6, q6, q14 + veor q5, q11, q10 + veor q6, q6, q11 + veor q15, q15, q13 + veor q14, q14, q9 + veor q11, q15, q14 + veor q10, q13, q9 + vand q11, q11, q12 + vand q10, q10, q0 + veor q12, q12, q8 + veor q0, q0, q3 + vand q8, q8, q15 + vand q3, q3, q13 + vand q12, q12, q14 + vand q0, q0, q9 + veor q8, q8, q12 + veor q0, q0, q3 + veor q12, q12, q11 + veor q3, q3, q10 + veor q6, q6, q12 + veor q0, q0, q12 + veor q5, q5, q8 + veor q3, q3, q8 + + veor q12, q7, q4 + veor q8, q1, q2 + veor q11, q15, q14 + veor q10, q13, q9 + vand q11, q11, q12 + vand q10, q10, q4 + veor q12, q12, q8 + veor q4, q4, q2 + vand q8, q8, q15 + vand q2, q2, q13 + vand q12, q12, q14 + vand q4, q4, q9 + veor q8, q8, q12 + veor q4, q4, q2 + veor q12, q12, q11 + veor q2, q2, q10 + veor q15, q15, q13 + veor q14, q14, q9 + veor q10, q15, q14 + vand q10, q10, q7 + veor q7, q7, q1 + vand q11, q1, q15 + vand q7, q7, q14 + veor q1, q11, q10 + veor q7, q7, q11 + veor q7, q7, q12 + veor q4, q4, q12 + veor q1, q1, q8 + veor q2, q2, q8 + veor q7, q7, q0 + veor q1, q1, q6 + veor q6, q6, q0 + veor q4, q4, q7 + veor q0, q0, q1 + + veor q1, q1, q5 + veor q5, q5, q2 + veor q2, q2, q3 + veor q3, q3, q5 + veor q4, q4, q5 + + veor q6, q6, q3 + subs r5,r5,#1 + bcc .Lenc_done + vext.8 q8, q0, q0, #12 @ x0 <<< 32 + vext.8 q9, q1, q1, #12 + veor q0, q0, q8 @ x0 ^ (x0 <<< 32) + vext.8 q10, q4, q4, #12 + veor q1, q1, q9 + vext.8 q11, q6, q6, #12 + veor q4, q4, q10 + vext.8 q12, q3, q3, #12 + veor q6, q6, q11 + vext.8 q13, q7, q7, #12 + veor q3, q3, q12 + vext.8 q14, q2, q2, #12 + veor q7, q7, q13 + vext.8 q15, q5, q5, #12 + veor q2, q2, q14 + + veor q9, q9, q0 + veor q5, q5, q15 + vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64) + veor q10, q10, q1 + veor q8, q8, q5 + veor q9, q9, q5 + vext.8 q1, q1, q1, #8 + veor q13, q13, q3 + veor q0, q0, q8 + veor q14, q14, q7 + veor q1, q1, q9 + vext.8 q8, q3, q3, #8 + veor q12, q12, q6 + vext.8 q9, q7, q7, #8 + veor q15, q15, q2 + vext.8 q3, q6, q6, #8 + veor q11, q11, q4 + vext.8 q7, q5, q5, #8 + veor q12, q12, q5 + vext.8 q6, q2, q2, #8 + veor q11, q11, q5 + vext.8 q2, q4, q4, #8 + veor q5, q9, q13 + veor q4, q8, q12 + veor q3, q3, q11 + veor q7, q7, q15 + veor q6, q6, q14 + @ vmov q4, q8 + veor q2, q2, q10 + @ vmov q5, q9 + vldmia r6, {q12} @ .LSR + ite eq @ Thumb2 thing, samity check in ARM + addeq r6,r6,#0x10 + bne .Lenc_loop + vldmia r6, {q12} @ .LSRM0 + b .Lenc_loop +.align 4 +.Lenc_done: + vmov.i8 q8,#0x55 @ compose .LBS0 + vmov.i8 q9,#0x33 @ compose .LBS1 + vshr.u64 q10, q2, #1 + vshr.u64 q11, q3, #1 + veor q10, q10, q5 + veor q11, q11, q7 + vand q10, q10, q8 + vand q11, q11, q8 + veor q5, q5, q10 + vshl.u64 q10, q10, #1 + veor q7, q7, q11 + vshl.u64 q11, q11, #1 + veor q2, q2, q10 + veor q3, q3, q11 + vshr.u64 q10, q4, #1 + vshr.u64 q11, q0, #1 + veor q10, q10, q6 + veor q11, q11, q1 + vand q10, q10, q8 + vand q11, q11, q8 + veor q6, q6, q10 + vshl.u64 q10, q10, #1 + veor q1, q1, q11 + vshl.u64 q11, q11, #1 + veor q4, q4, q10 + veor q0, q0, q11 + vmov.i8 q8,#0x0f @ compose .LBS2 + vshr.u64 q10, q7, #2 + vshr.u64 q11, q3, #2 + veor q10, q10, q5 + veor q11, q11, q2 + vand q10, q10, q9 + vand q11, q11, q9 + veor q5, q5, q10 + vshl.u64 q10, q10, #2 + veor q2, q2, q11 + vshl.u64 q11, q11, #2 + veor q7, q7, q10 + veor q3, q3, q11 + vshr.u64 q10, q1, #2 + vshr.u64 q11, q0, #2 + veor q10, q10, q6 + veor q11, q11, q4 + vand q10, q10, q9 + vand q11, q11, q9 + veor q6, q6, q10 + vshl.u64 q10, q10, #2 + veor q4, q4, q11 + vshl.u64 q11, q11, #2 + veor q1, q1, q10 + veor q0, q0, q11 + vshr.u64 q10, q6, #4 + vshr.u64 q11, q4, #4 + veor q10, q10, q5 + veor q11, q11, q2 + vand q10, q10, q8 + vand q11, q11, q8 + veor q5, q5, q10 + vshl.u64 q10, q10, #4 + veor q2, q2, q11 + vshl.u64 q11, q11, #4 + veor q6, q6, q10 + veor q4, q4, q11 + vshr.u64 q10, q1, #4 + vshr.u64 q11, q0, #4 + veor q10, q10, q7 + veor q11, q11, q3 + vand q10, q10, q8 + vand q11, q11, q8 + veor q7, q7, q10 + vshl.u64 q10, q10, #4 + veor q3, q3, q11 + vshl.u64 q11, q11, #4 + veor q1, q1, q10 + veor q0, q0, q11 + vldmia r4, {q8} @ last round key + veor q4, q4, q8 + veor q6, q6, q8 + veor q3, q3, q8 + veor q7, q7, q8 + veor q2, q2, q8 + veor q5, q5, q8 + veor q0, q0, q8 + veor q1, q1, q8 + bx lr +.size _bsaes_encrypt8,.-_bsaes_encrypt8 +.type _bsaes_key_convert,%function +.align 4 +_bsaes_key_convert: + adr r6,_bsaes_key_convert + vld1.8 {q7}, [r4]! @ load round 0 key + sub r6,r6,#_bsaes_key_convert-.LM0 + vld1.8 {q15}, [r4]! @ load round 1 key + + vmov.i8 q8, #0x01 @ bit masks + vmov.i8 q9, #0x02 + vmov.i8 q10, #0x04 + vmov.i8 q11, #0x08 + vmov.i8 q12, #0x10 + vmov.i8 q13, #0x20 + vldmia r6, {q14} @ .LM0 + +#ifdef __ARMEL__ + vrev32.8 q7, q7 + vrev32.8 q15, q15 +#endif + sub r5,r5,#1 + vstmia r12!, {q7} @ save round 0 key + b .Lkey_loop + +.align 4 +.Lkey_loop: + vtbl.8 d14,{q15},d28 + vtbl.8 d15,{q15},d29 + vmov.i8 q6, #0x40 + vmov.i8 q15, #0x80 + + vtst.8 q0, q7, q8 + vtst.8 q1, q7, q9 + vtst.8 q2, q7, q10 + vtst.8 q3, q7, q11 + vtst.8 q4, q7, q12 + vtst.8 q5, q7, q13 + vtst.8 q6, q7, q6 + vtst.8 q7, q7, q15 + vld1.8 {q15}, [r4]! @ load next round key + vmvn q0, q0 @ "pnot" + vmvn q1, q1 + vmvn q5, q5 + vmvn q6, q6 +#ifdef __ARMEL__ + vrev32.8 q15, q15 +#endif + subs r5,r5,#1 + vstmia r12!,{q0-q7} @ write bit-sliced round key + bne .Lkey_loop + + vmov.i8 q7,#0x63 @ compose .L63 + @ don't save last round key + bx lr +.size _bsaes_key_convert,.-_bsaes_key_convert +.extern AES_cbc_encrypt +.extern AES_decrypt + +.global bsaes_cbc_encrypt +.type bsaes_cbc_encrypt,%function +.align 5 +bsaes_cbc_encrypt: +#ifndef __KERNEL__ + cmp r2, #128 +#ifndef __thumb__ + blo AES_cbc_encrypt +#else + bhs 1f + b AES_cbc_encrypt +1: +#endif +#endif + + @ it is up to the caller to make sure we are called with enc == 0 + + mov ip, sp + stmdb sp!, {r4-r10, lr} + VFP_ABI_PUSH + ldr r8, [ip] @ IV is 1st arg on the stack + mov r2, r2, lsr#4 @ len in 16 byte blocks + sub sp, #0x10 @ scratch space to carry over the IV + mov r9, sp @ save sp + + ldr r10, [r3, #240] @ get # of rounds +#ifndef BSAES_ASM_EXTENDED_KEY + @ allocate the key schedule on the stack + sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key + add r12, #96 @ sifze of bit-slices key schedule + + @ populate the key schedule + mov r4, r3 @ pass key + mov r5, r10 @ pass # of rounds + mov sp, r12 @ sp is sp + bl _bsaes_key_convert + vldmia sp, {q6} + vstmia r12, {q15} @ save last round key + veor q7, q7, q6 @ fix up round 0 key + vstmia sp, {q7} +#else + ldr r12, [r3, #244] + eors r12, #1 + beq 0f + + @ populate the key schedule + str r12, [r3, #244] + mov r4, r3 @ pass key + mov r5, r10 @ pass # of rounds + add r12, r3, #248 @ pass key schedule + bl _bsaes_key_convert + add r4, r3, #248 + vldmia r4, {q6} + vstmia r12, {q15} @ save last round key + veor q7, q7, q6 @ fix up round 0 key + vstmia r4, {q7} + +.align 2 +0: +#endif + + vld1.8 {q15}, [r8] @ load IV + b .Lcbc_dec_loop + +.align 4 +.Lcbc_dec_loop: + subs r2, r2, #0x8 + bmi .Lcbc_dec_loop_finish + + vld1.8 {q0-q1}, [r0]! @ load input + vld1.8 {q2-q3}, [r0]! +#ifndef BSAES_ASM_EXTENDED_KEY + mov r4, sp @ pass the key +#else + add r4, r3, #248 +#endif + vld1.8 {q4-q5}, [r0]! + mov r5, r10 + vld1.8 {q6-q7}, [r0] + sub r0, r0, #0x60 + vstmia r9, {q15} @ put aside IV + + bl _bsaes_decrypt8 + + vldmia r9, {q14} @ reload IV + vld1.8 {q8-q9}, [r0]! @ reload input + veor q0, q0, q14 @ ^= IV + vld1.8 {q10-q11}, [r0]! + veor q1, q1, q8 + veor q6, q6, q9 + vld1.8 {q12-q13}, [r0]! + veor q4, q4, q10 + veor q2, q2, q11 + vld1.8 {q14-q15}, [r0]! + veor q7, q7, q12 + vst1.8 {q0-q1}, [r1]! @ write output + veor q3, q3, q13 + vst1.8 {q6}, [r1]! + veor q5, q5, q14 + vst1.8 {q4}, [r1]! + vst1.8 {q2}, [r1]! + vst1.8 {q7}, [r1]! + vst1.8 {q3}, [r1]! + vst1.8 {q5}, [r1]! + + b .Lcbc_dec_loop + +.Lcbc_dec_loop_finish: + adds r2, r2, #8 + beq .Lcbc_dec_done + + vld1.8 {q0}, [r0]! @ load input + cmp r2, #2 + blo .Lcbc_dec_one + vld1.8 {q1}, [r0]! +#ifndef BSAES_ASM_EXTENDED_KEY + mov r4, sp @ pass the key +#else + add r4, r3, #248 +#endif + mov r5, r10 + vstmia r9, {q15} @ put aside IV + beq .Lcbc_dec_two + vld1.8 {q2}, [r0]! + cmp r2, #4 + blo .Lcbc_dec_three + vld1.8 {q3}, [r0]! + beq .Lcbc_dec_four + vld1.8 {q4}, [r0]! + cmp r2, #6 + blo .Lcbc_dec_five + vld1.8 {q5}, [r0]! + beq .Lcbc_dec_six + vld1.8 {q6}, [r0]! + sub r0, r0, #0x70 + + bl _bsaes_decrypt8 + + vldmia r9, {q14} @ reload IV + vld1.8 {q8-q9}, [r0]! @ reload input + veor q0, q0, q14 @ ^= IV + vld1.8 {q10-q11}, [r0]! + veor q1, q1, q8 + veor q6, q6, q9 + vld1.8 {q12-q13}, [r0]! + veor q4, q4, q10 + veor q2, q2, q11 + vld1.8 {q15}, [r0]! + veor q7, q7, q12 + vst1.8 {q0-q1}, [r1]! @ write output + veor q3, q3, q13 + vst1.8 {q6}, [r1]! + vst1.8 {q4}, [r1]! + vst1.8 {q2}, [r1]! + vst1.8 {q7}, [r1]! + vst1.8 {q3}, [r1]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_six: + sub r0, r0, #0x60 + bl _bsaes_decrypt8 + vldmia r9,{q14} @ reload IV + vld1.8 {q8-q9}, [r0]! @ reload input + veor q0, q0, q14 @ ^= IV + vld1.8 {q10-q11}, [r0]! + veor q1, q1, q8 + veor q6, q6, q9 + vld1.8 {q12}, [r0]! + veor q4, q4, q10 + veor q2, q2, q11 + vld1.8 {q15}, [r0]! + veor q7, q7, q12 + vst1.8 {q0-q1}, [r1]! @ write output + vst1.8 {q6}, [r1]! + vst1.8 {q4}, [r1]! + vst1.8 {q2}, [r1]! + vst1.8 {q7}, [r1]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_five: + sub r0, r0, #0x50 + bl _bsaes_decrypt8 + vldmia r9, {q14} @ reload IV + vld1.8 {q8-q9}, [r0]! @ reload input + veor q0, q0, q14 @ ^= IV + vld1.8 {q10-q11}, [r0]! + veor q1, q1, q8 + veor q6, q6, q9 + vld1.8 {q15}, [r0]! + veor q4, q4, q10 + vst1.8 {q0-q1}, [r1]! @ write output + veor q2, q2, q11 + vst1.8 {q6}, [r1]! + vst1.8 {q4}, [r1]! + vst1.8 {q2}, [r1]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_four: + sub r0, r0, #0x40 + bl _bsaes_decrypt8 + vldmia r9, {q14} @ reload IV + vld1.8 {q8-q9}, [r0]! @ reload input + veor q0, q0, q14 @ ^= IV + vld1.8 {q10}, [r0]! + veor q1, q1, q8 + veor q6, q6, q9 + vld1.8 {q15}, [r0]! + veor q4, q4, q10 + vst1.8 {q0-q1}, [r1]! @ write output + vst1.8 {q6}, [r1]! + vst1.8 {q4}, [r1]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_three: + sub r0, r0, #0x30 + bl _bsaes_decrypt8 + vldmia r9, {q14} @ reload IV + vld1.8 {q8-q9}, [r0]! @ reload input + veor q0, q0, q14 @ ^= IV + vld1.8 {q15}, [r0]! + veor q1, q1, q8 + veor q6, q6, q9 + vst1.8 {q0-q1}, [r1]! @ write output + vst1.8 {q6}, [r1]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_two: + sub r0, r0, #0x20 + bl _bsaes_decrypt8 + vldmia r9, {q14} @ reload IV + vld1.8 {q8}, [r0]! @ reload input + veor q0, q0, q14 @ ^= IV + vld1.8 {q15}, [r0]! @ reload input + veor q1, q1, q8 + vst1.8 {q0-q1}, [r1]! @ write output + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_one: + sub r0, r0, #0x10 + mov r10, r1 @ save original out pointer + mov r1, r9 @ use the iv scratch space as out buffer + mov r2, r3 + vmov q4,q15 @ just in case ensure that IV + vmov q5,q0 @ and input are preserved + bl AES_decrypt + vld1.8 {q0}, [r9,:64] @ load result + veor q0, q0, q4 @ ^= IV + vmov q15, q5 @ q5 holds input + vst1.8 {q0}, [r10] @ write output + +.Lcbc_dec_done: +#ifndef BSAES_ASM_EXTENDED_KEY + vmov.i32 q0, #0 + vmov.i32 q1, #0 +.Lcbc_dec_bzero: @ wipe key schedule [if any] + vstmia sp!, {q0-q1} + cmp sp, r9 + bne .Lcbc_dec_bzero +#endif + + mov sp, r9 + add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb + vst1.8 {q15}, [r8] @ return IV + VFP_ABI_POP + ldmia sp!, {r4-r10, pc} +.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt +.extern AES_encrypt +.global bsaes_ctr32_encrypt_blocks +.type bsaes_ctr32_encrypt_blocks,%function +.align 5 +bsaes_ctr32_encrypt_blocks: + cmp r2, #8 @ use plain AES for + blo .Lctr_enc_short @ small sizes + + mov ip, sp + stmdb sp!, {r4-r10, lr} + VFP_ABI_PUSH + ldr r8, [ip] @ ctr is 1st arg on the stack + sub sp, sp, #0x10 @ scratch space to carry over the ctr + mov r9, sp @ save sp + + ldr r10, [r3, #240] @ get # of rounds +#ifndef BSAES_ASM_EXTENDED_KEY + @ allocate the key schedule on the stack + sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key + add r12, #96 @ size of bit-sliced key schedule + + @ populate the key schedule + mov r4, r3 @ pass key + mov r5, r10 @ pass # of rounds + mov sp, r12 @ sp is sp + bl _bsaes_key_convert + veor q7,q7,q15 @ fix up last round key + vstmia r12, {q7} @ save last round key + + vld1.8 {q0}, [r8] @ load counter + add r8, r6, #.LREVM0SR-.LM0 @ borrow r8 + vldmia sp, {q4} @ load round0 key +#else + ldr r12, [r3, #244] + eors r12, #1 + beq 0f + + @ populate the key schedule + str r12, [r3, #244] + mov r4, r3 @ pass key + mov r5, r10 @ pass # of rounds + add r12, r3, #248 @ pass key schedule + bl _bsaes_key_convert + veor q7,q7,q15 @ fix up last round key + vstmia r12, {q7} @ save last round key + +.align 2 +0: add r12, r3, #248 + vld1.8 {q0}, [r8] @ load counter + adrl r8, .LREVM0SR @ borrow r8 + vldmia r12, {q4} @ load round0 key + sub sp, #0x10 @ place for adjusted round0 key +#endif + + vmov.i32 q8,#1 @ compose 1<<96 + veor q9,q9,q9 + vrev32.8 q0,q0 + vext.8 q8,q9,q8,#4 + vrev32.8 q4,q4 + vadd.u32 q9,q8,q8 @ compose 2<<96 + vstmia sp, {q4} @ save adjusted round0 key + b .Lctr_enc_loop + +.align 4 +.Lctr_enc_loop: + vadd.u32 q10, q8, q9 @ compose 3<<96 + vadd.u32 q1, q0, q8 @ +1 + vadd.u32 q2, q0, q9 @ +2 + vadd.u32 q3, q0, q10 @ +3 + vadd.u32 q4, q1, q10 + vadd.u32 q5, q2, q10 + vadd.u32 q6, q3, q10 + vadd.u32 q7, q4, q10 + vadd.u32 q10, q5, q10 @ next counter + + @ Borrow prologue from _bsaes_encrypt8 to use the opportunity + @ to flip byte order in 32-bit counter + + vldmia sp, {q9} @ load round0 key +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x10 @ pass next round key +#else + add r4, r3, #264 +#endif + vldmia r8, {q8} @ .LREVM0SR + mov r5, r10 @ pass rounds + vstmia r9, {q10} @ save next counter + sub r6, r8, #.LREVM0SR-.LSR @ pass constants + + bl _bsaes_encrypt8_alt + + subs r2, r2, #8 + blo .Lctr_enc_loop_done + + vld1.8 {q8-q9}, [r0]! @ load input + vld1.8 {q10-q11}, [r0]! + veor q0, q8 + veor q1, q9 + vld1.8 {q12-q13}, [r0]! + veor q4, q10 + veor q6, q11 + vld1.8 {q14-q15}, [r0]! + veor q3, q12 + vst1.8 {q0-q1}, [r1]! @ write output + veor q7, q13 + veor q2, q14 + vst1.8 {q4}, [r1]! + veor q5, q15 + vst1.8 {q6}, [r1]! + vmov.i32 q8, #1 @ compose 1<<96 + vst1.8 {q3}, [r1]! + veor q9, q9, q9 + vst1.8 {q7}, [r1]! + vext.8 q8, q9, q8, #4 + vst1.8 {q2}, [r1]! + vadd.u32 q9,q8,q8 @ compose 2<<96 + vst1.8 {q5}, [r1]! + vldmia r9, {q0} @ load counter + + bne .Lctr_enc_loop + b .Lctr_enc_done + +.align 4 +.Lctr_enc_loop_done: + add r2, r2, #8 + vld1.8 {q8}, [r0]! @ load input + veor q0, q8 + vst1.8 {q0}, [r1]! @ write output + cmp r2, #2 + blo .Lctr_enc_done + vld1.8 {q9}, [r0]! + veor q1, q9 + vst1.8 {q1}, [r1]! + beq .Lctr_enc_done + vld1.8 {q10}, [r0]! + veor q4, q10 + vst1.8 {q4}, [r1]! + cmp r2, #4 + blo .Lctr_enc_done + vld1.8 {q11}, [r0]! + veor q6, q11 + vst1.8 {q6}, [r1]! + beq .Lctr_enc_done + vld1.8 {q12}, [r0]! + veor q3, q12 + vst1.8 {q3}, [r1]! + cmp r2, #6 + blo .Lctr_enc_done + vld1.8 {q13}, [r0]! + veor q7, q13 + vst1.8 {q7}, [r1]! + beq .Lctr_enc_done + vld1.8 {q14}, [r0] + veor q2, q14 + vst1.8 {q2}, [r1]! + +.Lctr_enc_done: + vmov.i32 q0, #0 + vmov.i32 q1, #0 +#ifndef BSAES_ASM_EXTENDED_KEY +.Lctr_enc_bzero: @ wipe key schedule [if any] + vstmia sp!, {q0-q1} + cmp sp, r9 + bne .Lctr_enc_bzero +#else + vstmia sp, {q0-q1} +#endif + + mov sp, r9 + add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb + VFP_ABI_POP + ldmia sp!, {r4-r10, pc} @ return + +.align 4 +.Lctr_enc_short: + ldr ip, [sp] @ ctr pointer is passed on stack + stmdb sp!, {r4-r8, lr} + + mov r4, r0 @ copy arguments + mov r5, r1 + mov r6, r2 + mov r7, r3 + ldr r8, [ip, #12] @ load counter LSW + vld1.8 {q1}, [ip] @ load whole counter value +#ifdef __ARMEL__ + rev r8, r8 +#endif + sub sp, sp, #0x10 + vst1.8 {q1}, [sp,:64] @ copy counter value + sub sp, sp, #0x10 + +.Lctr_enc_short_loop: + add r0, sp, #0x10 @ input counter value + mov r1, sp @ output on the stack + mov r2, r7 @ key + + bl AES_encrypt + + vld1.8 {q0}, [r4]! @ load input + vld1.8 {q1}, [sp,:64] @ load encrypted counter + add r8, r8, #1 +#ifdef __ARMEL__ + rev r0, r8 + str r0, [sp, #0x1c] @ next counter value +#else + str r8, [sp, #0x1c] @ next counter value +#endif + veor q0,q0,q1 + vst1.8 {q0}, [r5]! @ store output + subs r6, r6, #1 + bne .Lctr_enc_short_loop + + vmov.i32 q0, #0 + vmov.i32 q1, #0 + vstmia sp!, {q0-q1} + + ldmia sp!, {r4-r8, pc} +.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks +.globl bsaes_xts_encrypt +.type bsaes_xts_encrypt,%function +.align 4 +bsaes_xts_encrypt: + mov ip, sp + stmdb sp!, {r4-r10, lr} @ 0x20 + VFP_ABI_PUSH + mov r6, sp @ future r3 + + mov r7, r0 + mov r8, r1 + mov r9, r2 + mov r10, r3 + + sub r0, sp, #0x10 @ 0x10 + bic r0, #0xf @ align at 16 bytes + mov sp, r0 + +#ifdef XTS_CHAIN_TWEAK + ldr r0, [ip] @ pointer to input tweak +#else + @ generate initial tweak + ldr r0, [ip, #4] @ iv[] + mov r1, sp + ldr r2, [ip, #0] @ key2 + bl AES_encrypt + mov r0,sp @ pointer to initial tweak +#endif + + ldr r1, [r10, #240] @ get # of rounds + mov r3, r6 +#ifndef BSAES_ASM_EXTENDED_KEY + @ allocate the key schedule on the stack + sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key + @ add r12, #96 @ size of bit-sliced key schedule + sub r12, #48 @ place for tweak[9] + + @ populate the key schedule + mov r4, r10 @ pass key + mov r5, r1 @ pass # of rounds + mov sp, r12 + add r12, #0x90 @ pass key schedule + bl _bsaes_key_convert + veor q7, q7, q15 @ fix up last round key + vstmia r12, {q7} @ save last round key +#else + ldr r12, [r10, #244] + eors r12, #1 + beq 0f + + str r12, [r10, #244] + mov r4, r10 @ pass key + mov r5, r1 @ pass # of rounds + add r12, r10, #248 @ pass key schedule + bl _bsaes_key_convert + veor q7, q7, q15 @ fix up last round key + vstmia r12, {q7} + +.align 2 +0: sub sp, #0x90 @ place for tweak[9] +#endif + + vld1.8 {q8}, [r0] @ initial tweak + adr r2, .Lxts_magic + + subs r9, #0x80 + blo .Lxts_enc_short + b .Lxts_enc_loop + +.align 4 +.Lxts_enc_loop: + vldmia r2, {q5} @ load XTS magic + vshr.s64 q6, q8, #63 + mov r0, sp + vand q6, q6, q5 + vadd.u64 q9, q8, q8 + vst1.64 {q8}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q9, #63 + veor q9, q9, q6 + vand q7, q7, q5 + vadd.u64 q10, q9, q9 + vst1.64 {q9}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q10, #63 + veor q10, q10, q7 + vand q6, q6, q5 + vld1.8 {q0}, [r7]! + vadd.u64 q11, q10, q10 + vst1.64 {q10}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q11, #63 + veor q11, q11, q6 + vand q7, q7, q5 + vld1.8 {q1}, [r7]! + veor q0, q0, q8 + vadd.u64 q12, q11, q11 + vst1.64 {q11}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q12, #63 + veor q12, q12, q7 + vand q6, q6, q5 + vld1.8 {q2}, [r7]! + veor q1, q1, q9 + vadd.u64 q13, q12, q12 + vst1.64 {q12}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q13, #63 + veor q13, q13, q6 + vand q7, q7, q5 + vld1.8 {q3}, [r7]! + veor q2, q2, q10 + vadd.u64 q14, q13, q13 + vst1.64 {q13}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q14, #63 + veor q14, q14, q7 + vand q6, q6, q5 + vld1.8 {q4}, [r7]! + veor q3, q3, q11 + vadd.u64 q15, q14, q14 + vst1.64 {q14}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q15, #63 + veor q15, q15, q6 + vand q7, q7, q5 + vld1.8 {q5}, [r7]! + veor q4, q4, q12 + vadd.u64 q8, q15, q15 + vst1.64 {q15}, [r0,:128]! + vswp d15,d14 + veor q8, q8, q7 + vst1.64 {q8}, [r0,:128] @ next round tweak + + vld1.8 {q6-q7}, [r7]! + veor q5, q5, q13 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q6, q6, q14 + mov r5, r1 @ pass rounds + veor q7, q7, q15 + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + vld1.64 {q12-q13}, [r0,:128]! + veor q1, q1, q9 + veor q8, q4, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q6, q11 + vld1.64 {q14-q15}, [r0,:128]! + veor q10, q3, q12 + vst1.8 {q8-q9}, [r8]! + veor q11, q7, q13 + veor q12, q2, q14 + vst1.8 {q10-q11}, [r8]! + veor q13, q5, q15 + vst1.8 {q12-q13}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + + subs r9, #0x80 + bpl .Lxts_enc_loop + +.Lxts_enc_short: + adds r9, #0x70 + bmi .Lxts_enc_done + + vldmia r2, {q5} @ load XTS magic + vshr.s64 q7, q8, #63 + mov r0, sp + vand q7, q7, q5 + vadd.u64 q9, q8, q8 + vst1.64 {q8}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q9, #63 + veor q9, q9, q7 + vand q6, q6, q5 + vadd.u64 q10, q9, q9 + vst1.64 {q9}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q10, #63 + veor q10, q10, q6 + vand q7, q7, q5 + vld1.8 {q0}, [r7]! + subs r9, #0x10 + bmi .Lxts_enc_1 + vadd.u64 q11, q10, q10 + vst1.64 {q10}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q11, #63 + veor q11, q11, q7 + vand q6, q6, q5 + vld1.8 {q1}, [r7]! + subs r9, #0x10 + bmi .Lxts_enc_2 + veor q0, q0, q8 + vadd.u64 q12, q11, q11 + vst1.64 {q11}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q12, #63 + veor q12, q12, q6 + vand q7, q7, q5 + vld1.8 {q2}, [r7]! + subs r9, #0x10 + bmi .Lxts_enc_3 + veor q1, q1, q9 + vadd.u64 q13, q12, q12 + vst1.64 {q12}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q13, #63 + veor q13, q13, q7 + vand q6, q6, q5 + vld1.8 {q3}, [r7]! + subs r9, #0x10 + bmi .Lxts_enc_4 + veor q2, q2, q10 + vadd.u64 q14, q13, q13 + vst1.64 {q13}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q14, #63 + veor q14, q14, q6 + vand q7, q7, q5 + vld1.8 {q4}, [r7]! + subs r9, #0x10 + bmi .Lxts_enc_5 + veor q3, q3, q11 + vadd.u64 q15, q14, q14 + vst1.64 {q14}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q15, #63 + veor q15, q15, q7 + vand q6, q6, q5 + vld1.8 {q5}, [r7]! + subs r9, #0x10 + bmi .Lxts_enc_6 + veor q4, q4, q12 + sub r9, #0x10 + vst1.64 {q15}, [r0,:128] @ next round tweak + + vld1.8 {q6}, [r7]! + veor q5, q5, q13 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q6, q6, q14 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + vld1.64 {q12-q13}, [r0,:128]! + veor q1, q1, q9 + veor q8, q4, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q6, q11 + vld1.64 {q14}, [r0,:128]! + veor q10, q3, q12 + vst1.8 {q8-q9}, [r8]! + veor q11, q7, q13 + veor q12, q2, q14 + vst1.8 {q10-q11}, [r8]! + vst1.8 {q12}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_6: + vst1.64 {q14}, [r0,:128] @ next round tweak + + veor q4, q4, q12 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q5, q5, q13 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + vld1.64 {q12-q13}, [r0,:128]! + veor q1, q1, q9 + veor q8, q4, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q6, q11 + veor q10, q3, q12 + vst1.8 {q8-q9}, [r8]! + veor q11, q7, q13 + vst1.8 {q10-q11}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_enc_done + +@ put this in range for both ARM and Thumb mode adr instructions +.align 5 +.Lxts_magic: + .quad 1, 0x87 + +.align 5 +.Lxts_enc_5: + vst1.64 {q13}, [r0,:128] @ next round tweak + + veor q3, q3, q11 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q4, q4, q12 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + vld1.64 {q12}, [r0,:128]! + veor q1, q1, q9 + veor q8, q4, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q6, q11 + veor q10, q3, q12 + vst1.8 {q8-q9}, [r8]! + vst1.8 {q10}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_4: + vst1.64 {q12}, [r0,:128] @ next round tweak + + veor q2, q2, q10 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q3, q3, q11 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + veor q1, q1, q9 + veor q8, q4, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q6, q11 + vst1.8 {q8-q9}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_3: + vst1.64 {q11}, [r0,:128] @ next round tweak + + veor q1, q1, q9 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q2, q2, q10 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10}, [r0,:128]! + veor q0, q0, q8 + veor q1, q1, q9 + veor q8, q4, q10 + vst1.8 {q0-q1}, [r8]! + vst1.8 {q8}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_2: + vst1.64 {q10}, [r0,:128] @ next round tweak + + veor q0, q0, q8 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q1, q1, q9 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + veor q0, q0, q8 + veor q1, q1, q9 + vst1.8 {q0-q1}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_1: + mov r0, sp + veor q0, q8 + mov r1, sp + vst1.8 {q0}, [sp,:128] + mov r2, r10 + mov r4, r3 @ preserve fp + + bl AES_encrypt + + vld1.8 {q0}, [sp,:128] + veor q0, q0, q8 + vst1.8 {q0}, [r8]! + mov r3, r4 + + vmov q8, q9 @ next round tweak + +.Lxts_enc_done: +#ifndef XTS_CHAIN_TWEAK + adds r9, #0x10 + beq .Lxts_enc_ret + sub r6, r8, #0x10 + +.Lxts_enc_steal: + ldrb r0, [r7], #1 + ldrb r1, [r8, #-0x10] + strb r0, [r8, #-0x10] + strb r1, [r8], #1 + + subs r9, #1 + bhi .Lxts_enc_steal + + vld1.8 {q0}, [r6] + mov r0, sp + veor q0, q0, q8 + mov r1, sp + vst1.8 {q0}, [sp,:128] + mov r2, r10 + mov r4, r3 @ preserve fp + + bl AES_encrypt + + vld1.8 {q0}, [sp,:128] + veor q0, q0, q8 + vst1.8 {q0}, [r6] + mov r3, r4 +#endif + +.Lxts_enc_ret: + bic r0, r3, #0xf + vmov.i32 q0, #0 + vmov.i32 q1, #0 +#ifdef XTS_CHAIN_TWEAK + ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak +#endif +.Lxts_enc_bzero: @ wipe key schedule [if any] + vstmia sp!, {q0-q1} + cmp sp, r0 + bne .Lxts_enc_bzero + + mov sp, r3 +#ifdef XTS_CHAIN_TWEAK + vst1.8 {q8}, [r1] +#endif + VFP_ABI_POP + ldmia sp!, {r4-r10, pc} @ return + +.size bsaes_xts_encrypt,.-bsaes_xts_encrypt + +.globl bsaes_xts_decrypt +.type bsaes_xts_decrypt,%function +.align 4 +bsaes_xts_decrypt: + mov ip, sp + stmdb sp!, {r4-r10, lr} @ 0x20 + VFP_ABI_PUSH + mov r6, sp @ future r3 + + mov r7, r0 + mov r8, r1 + mov r9, r2 + mov r10, r3 + + sub r0, sp, #0x10 @ 0x10 + bic r0, #0xf @ align at 16 bytes + mov sp, r0 + +#ifdef XTS_CHAIN_TWEAK + ldr r0, [ip] @ pointer to input tweak +#else + @ generate initial tweak + ldr r0, [ip, #4] @ iv[] + mov r1, sp + ldr r2, [ip, #0] @ key2 + bl AES_encrypt + mov r0, sp @ pointer to initial tweak +#endif + + ldr r1, [r10, #240] @ get # of rounds + mov r3, r6 +#ifndef BSAES_ASM_EXTENDED_KEY + @ allocate the key schedule on the stack + sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key + @ add r12, #96 @ size of bit-sliced key schedule + sub r12, #48 @ place for tweak[9] + + @ populate the key schedule + mov r4, r10 @ pass key + mov r5, r1 @ pass # of rounds + mov sp, r12 + add r12, #0x90 @ pass key schedule + bl _bsaes_key_convert + add r4, sp, #0x90 + vldmia r4, {q6} + vstmia r12, {q15} @ save last round key + veor q7, q7, q6 @ fix up round 0 key + vstmia r4, {q7} +#else + ldr r12, [r10, #244] + eors r12, #1 + beq 0f + + str r12, [r10, #244] + mov r4, r10 @ pass key + mov r5, r1 @ pass # of rounds + add r12, r10, #248 @ pass key schedule + bl _bsaes_key_convert + add r4, r10, #248 + vldmia r4, {q6} + vstmia r12, {q15} @ save last round key + veor q7, q7, q6 @ fix up round 0 key + vstmia r4, {q7} + +.align 2 +0: sub sp, #0x90 @ place for tweak[9] +#endif + vld1.8 {q8}, [r0] @ initial tweak + adr r2, .Lxts_magic + + tst r9, #0xf @ if not multiple of 16 + it ne @ Thumb2 thing, sanity check in ARM + subne r9, #0x10 @ subtract another 16 bytes + subs r9, #0x80 + + blo .Lxts_dec_short + b .Lxts_dec_loop + +.align 4 +.Lxts_dec_loop: + vldmia r2, {q5} @ load XTS magic + vshr.s64 q6, q8, #63 + mov r0, sp + vand q6, q6, q5 + vadd.u64 q9, q8, q8 + vst1.64 {q8}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q9, #63 + veor q9, q9, q6 + vand q7, q7, q5 + vadd.u64 q10, q9, q9 + vst1.64 {q9}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q10, #63 + veor q10, q10, q7 + vand q6, q6, q5 + vld1.8 {q0}, [r7]! + vadd.u64 q11, q10, q10 + vst1.64 {q10}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q11, #63 + veor q11, q11, q6 + vand q7, q7, q5 + vld1.8 {q1}, [r7]! + veor q0, q0, q8 + vadd.u64 q12, q11, q11 + vst1.64 {q11}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q12, #63 + veor q12, q12, q7 + vand q6, q6, q5 + vld1.8 {q2}, [r7]! + veor q1, q1, q9 + vadd.u64 q13, q12, q12 + vst1.64 {q12}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q13, #63 + veor q13, q13, q6 + vand q7, q7, q5 + vld1.8 {q3}, [r7]! + veor q2, q2, q10 + vadd.u64 q14, q13, q13 + vst1.64 {q13}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q14, #63 + veor q14, q14, q7 + vand q6, q6, q5 + vld1.8 {q4}, [r7]! + veor q3, q3, q11 + vadd.u64 q15, q14, q14 + vst1.64 {q14}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q15, #63 + veor q15, q15, q6 + vand q7, q7, q5 + vld1.8 {q5}, [r7]! + veor q4, q4, q12 + vadd.u64 q8, q15, q15 + vst1.64 {q15}, [r0,:128]! + vswp d15,d14 + veor q8, q8, q7 + vst1.64 {q8}, [r0,:128] @ next round tweak + + vld1.8 {q6-q7}, [r7]! + veor q5, q5, q13 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q6, q6, q14 + mov r5, r1 @ pass rounds + veor q7, q7, q15 + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + vld1.64 {q12-q13}, [r0,:128]! + veor q1, q1, q9 + veor q8, q6, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q4, q11 + vld1.64 {q14-q15}, [r0,:128]! + veor q10, q2, q12 + vst1.8 {q8-q9}, [r8]! + veor q11, q7, q13 + veor q12, q3, q14 + vst1.8 {q10-q11}, [r8]! + veor q13, q5, q15 + vst1.8 {q12-q13}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + + subs r9, #0x80 + bpl .Lxts_dec_loop + +.Lxts_dec_short: + adds r9, #0x70 + bmi .Lxts_dec_done + + vldmia r2, {q5} @ load XTS magic + vshr.s64 q7, q8, #63 + mov r0, sp + vand q7, q7, q5 + vadd.u64 q9, q8, q8 + vst1.64 {q8}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q9, #63 + veor q9, q9, q7 + vand q6, q6, q5 + vadd.u64 q10, q9, q9 + vst1.64 {q9}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q10, #63 + veor q10, q10, q6 + vand q7, q7, q5 + vld1.8 {q0}, [r7]! + subs r9, #0x10 + bmi .Lxts_dec_1 + vadd.u64 q11, q10, q10 + vst1.64 {q10}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q11, #63 + veor q11, q11, q7 + vand q6, q6, q5 + vld1.8 {q1}, [r7]! + subs r9, #0x10 + bmi .Lxts_dec_2 + veor q0, q0, q8 + vadd.u64 q12, q11, q11 + vst1.64 {q11}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q12, #63 + veor q12, q12, q6 + vand q7, q7, q5 + vld1.8 {q2}, [r7]! + subs r9, #0x10 + bmi .Lxts_dec_3 + veor q1, q1, q9 + vadd.u64 q13, q12, q12 + vst1.64 {q12}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q13, #63 + veor q13, q13, q7 + vand q6, q6, q5 + vld1.8 {q3}, [r7]! + subs r9, #0x10 + bmi .Lxts_dec_4 + veor q2, q2, q10 + vadd.u64 q14, q13, q13 + vst1.64 {q13}, [r0,:128]! + vswp d13,d12 + vshr.s64 q7, q14, #63 + veor q14, q14, q6 + vand q7, q7, q5 + vld1.8 {q4}, [r7]! + subs r9, #0x10 + bmi .Lxts_dec_5 + veor q3, q3, q11 + vadd.u64 q15, q14, q14 + vst1.64 {q14}, [r0,:128]! + vswp d15,d14 + vshr.s64 q6, q15, #63 + veor q15, q15, q7 + vand q6, q6, q5 + vld1.8 {q5}, [r7]! + subs r9, #0x10 + bmi .Lxts_dec_6 + veor q4, q4, q12 + sub r9, #0x10 + vst1.64 {q15}, [r0,:128] @ next round tweak + + vld1.8 {q6}, [r7]! + veor q5, q5, q13 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q6, q6, q14 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + vld1.64 {q12-q13}, [r0,:128]! + veor q1, q1, q9 + veor q8, q6, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q4, q11 + vld1.64 {q14}, [r0,:128]! + veor q10, q2, q12 + vst1.8 {q8-q9}, [r8]! + veor q11, q7, q13 + veor q12, q3, q14 + vst1.8 {q10-q11}, [r8]! + vst1.8 {q12}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_6: + vst1.64 {q14}, [r0,:128] @ next round tweak + + veor q4, q4, q12 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q5, q5, q13 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + vld1.64 {q12-q13}, [r0,:128]! + veor q1, q1, q9 + veor q8, q6, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q4, q11 + veor q10, q2, q12 + vst1.8 {q8-q9}, [r8]! + veor q11, q7, q13 + vst1.8 {q10-q11}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_5: + vst1.64 {q13}, [r0,:128] @ next round tweak + + veor q3, q3, q11 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q4, q4, q12 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + vld1.64 {q12}, [r0,:128]! + veor q1, q1, q9 + veor q8, q6, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q4, q11 + veor q10, q2, q12 + vst1.8 {q8-q9}, [r8]! + vst1.8 {q10}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_4: + vst1.64 {q12}, [r0,:128] @ next round tweak + + veor q2, q2, q10 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q3, q3, q11 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10-q11}, [r0,:128]! + veor q0, q0, q8 + veor q1, q1, q9 + veor q8, q6, q10 + vst1.8 {q0-q1}, [r8]! + veor q9, q4, q11 + vst1.8 {q8-q9}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_3: + vst1.64 {q11}, [r0,:128] @ next round tweak + + veor q1, q1, q9 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q2, q2, q10 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + vld1.64 {q10}, [r0,:128]! + veor q0, q0, q8 + veor q1, q1, q9 + veor q8, q6, q10 + vst1.8 {q0-q1}, [r8]! + vst1.8 {q8}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_2: + vst1.64 {q10}, [r0,:128] @ next round tweak + + veor q0, q0, q8 +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, r10, #248 @ pass key schedule +#endif + veor q1, q1, q9 + mov r5, r1 @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {q8-q9}, [r0,:128]! + veor q0, q0, q8 + veor q1, q1, q9 + vst1.8 {q0-q1}, [r8]! + + vld1.64 {q8}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_1: + mov r0, sp + veor q0, q8 + mov r1, sp + vst1.8 {q0}, [sp,:128] + mov r2, r10 + mov r4, r3 @ preserve fp + mov r5, r2 @ preserve magic + + bl AES_decrypt + + vld1.8 {q0}, [sp,:128] + veor q0, q0, q8 + vst1.8 {q0}, [r8]! + mov r3, r4 + mov r2, r5 + + vmov q8, q9 @ next round tweak + +.Lxts_dec_done: +#ifndef XTS_CHAIN_TWEAK + adds r9, #0x10 + beq .Lxts_dec_ret + + @ calculate one round of extra tweak for the stolen ciphertext + vldmia r2, {q5} + vshr.s64 q6, q8, #63 + vand q6, q6, q5 + vadd.u64 q9, q8, q8 + vswp d13,d12 + veor q9, q9, q6 + + @ perform the final decryption with the last tweak value + vld1.8 {q0}, [r7]! + mov r0, sp + veor q0, q0, q9 + mov r1, sp + vst1.8 {q0}, [sp,:128] + mov r2, r10 + mov r4, r3 @ preserve fp + + bl AES_decrypt + + vld1.8 {q0}, [sp,:128] + veor q0, q0, q9 + vst1.8 {q0}, [r8] + + mov r6, r8 +.Lxts_dec_steal: + ldrb r1, [r8] + ldrb r0, [r7], #1 + strb r1, [r8, #0x10] + strb r0, [r8], #1 + + subs r9, #1 + bhi .Lxts_dec_steal + + vld1.8 {q0}, [r6] + mov r0, sp + veor q0, q8 + mov r1, sp + vst1.8 {q0}, [sp,:128] + mov r2, r10 + + bl AES_decrypt + + vld1.8 {q0}, [sp,:128] + veor q0, q0, q8 + vst1.8 {q0}, [r6] + mov r3, r4 +#endif + +.Lxts_dec_ret: + bic r0, r3, #0xf + vmov.i32 q0, #0 + vmov.i32 q1, #0 +#ifdef XTS_CHAIN_TWEAK + ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak +#endif +.Lxts_dec_bzero: @ wipe key schedule [if any] + vstmia sp!, {q0-q1} + cmp sp, r0 + bne .Lxts_dec_bzero + + mov sp, r3 +#ifdef XTS_CHAIN_TWEAK + vst1.8 {q8}, [r1] +#endif + VFP_ABI_POP + ldmia sp!, {r4-r10, pc} @ return + +.size bsaes_xts_decrypt,.-bsaes_xts_decrypt +#endif diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c new file mode 100644 index 00000000000..4522366da75 --- /dev/null +++ b/arch/arm/crypto/aesbs-glue.c @@ -0,0 +1,434 @@ +/* + * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES + * + * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <asm/neon.h> +#include <crypto/aes.h> +#include <crypto/ablk_helper.h> +#include <crypto/algapi.h> +#include <linux/module.h> + +#include "aes_glue.h" + +#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE) + +struct BS_KEY { + struct AES_KEY rk; + int converted; + u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE]; +} __aligned(8); + +asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in); +asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in); + +asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes, + struct BS_KEY *key, u8 iv[]); + +asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks, + struct BS_KEY *key, u8 const iv[]); + +asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes, + struct BS_KEY *key, u8 tweak[]); + +asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes, + struct BS_KEY *key, u8 tweak[]); + +struct aesbs_cbc_ctx { + struct AES_KEY enc; + struct BS_KEY dec; +}; + +struct aesbs_ctr_ctx { + struct BS_KEY enc; +}; + +struct aesbs_xts_ctx { + struct BS_KEY enc; + struct BS_KEY dec; + struct AES_KEY twkey; +}; + +static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + int bits = key_len * 8; + + if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) { + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + ctx->dec.rk = ctx->enc; + private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk); + ctx->dec.converted = 0; + return 0; +} + +static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm); + int bits = key_len * 8; + + if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) { + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + ctx->enc.converted = 0; + return 0; +} + +static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); + int bits = key_len * 4; + + if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) { + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + ctx->dec.rk = ctx->enc.rk; + private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk); + private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey); + ctx->enc.converted = ctx->dec.converted = 0; + return 0; +} + +static int aesbs_cbc_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while (walk.nbytes) { + u32 blocks = walk.nbytes / AES_BLOCK_SIZE; + u8 *src = walk.src.virt.addr; + + if (walk.dst.virt.addr == walk.src.virt.addr) { + u8 *iv = walk.iv; + + do { + crypto_xor(src, iv, AES_BLOCK_SIZE); + AES_encrypt(src, src, &ctx->enc); + iv = src; + src += AES_BLOCK_SIZE; + } while (--blocks); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + } else { + u8 *dst = walk.dst.virt.addr; + + do { + crypto_xor(walk.iv, src, AES_BLOCK_SIZE); + AES_encrypt(walk.iv, dst, &ctx->enc); + memcpy(walk.iv, dst, AES_BLOCK_SIZE); + src += AES_BLOCK_SIZE; + dst += AES_BLOCK_SIZE; + } while (--blocks); + } + err = blkcipher_walk_done(desc, &walk, 0); + } + return err; +} + +static int aesbs_cbc_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE); + + while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) { + kernel_neon_begin(); + bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, + walk.nbytes, &ctx->dec, walk.iv); + kernel_neon_end(); + err = blkcipher_walk_done(desc, &walk, 0); + } + while (walk.nbytes) { + u32 blocks = walk.nbytes / AES_BLOCK_SIZE; + u8 *dst = walk.dst.virt.addr; + u8 *src = walk.src.virt.addr; + u8 bk[2][AES_BLOCK_SIZE]; + u8 *iv = walk.iv; + + do { + if (walk.dst.virt.addr == walk.src.virt.addr) + memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE); + + AES_decrypt(src, dst, &ctx->dec.rk); + crypto_xor(dst, iv, AES_BLOCK_SIZE); + + if (walk.dst.virt.addr == walk.src.virt.addr) + iv = bk[blocks & 1]; + else + iv = src; + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + } while (--blocks); + err = blkcipher_walk_done(desc, &walk, 0); + } + return err; +} + +static void inc_be128_ctr(__be32 ctr[], u32 addend) +{ + int i; + + for (i = 3; i >= 0; i--, addend = 1) { + u32 n = be32_to_cpu(ctr[i]) + addend; + + ctr[i] = cpu_to_be32(n); + if (n >= addend) + break; + } +} + +static int aesbs_ctr_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + u32 blocks; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE); + + while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) { + u32 tail = walk.nbytes % AES_BLOCK_SIZE; + __be32 *ctr = (__be32 *)walk.iv; + u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]); + + /* avoid 32 bit counter overflow in the NEON code */ + if (unlikely(headroom < blocks)) { + blocks = headroom + 1; + tail = walk.nbytes - blocks * AES_BLOCK_SIZE; + } + kernel_neon_begin(); + bsaes_ctr32_encrypt_blocks(walk.src.virt.addr, + walk.dst.virt.addr, blocks, + &ctx->enc, walk.iv); + kernel_neon_end(); + inc_be128_ctr(ctr, blocks); + + nbytes -= blocks * AES_BLOCK_SIZE; + if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE) + break; + + err = blkcipher_walk_done(desc, &walk, tail); + } + if (walk.nbytes) { + u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; + u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; + u8 ks[AES_BLOCK_SIZE]; + + AES_encrypt(walk.iv, ks, &ctx->enc.rk); + if (tdst != tsrc) + memcpy(tdst, tsrc, nbytes); + crypto_xor(tdst, ks, nbytes); + err = blkcipher_walk_done(desc, &walk, 0); + } + return err; +} + +static int aesbs_xts_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE); + + /* generate the initial tweak */ + AES_encrypt(walk.iv, walk.iv, &ctx->twkey); + + while (walk.nbytes) { + kernel_neon_begin(); + bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, + walk.nbytes, &ctx->enc, walk.iv); + kernel_neon_end(); + err = blkcipher_walk_done(desc, &walk, 0); + } + return err; +} + +static int aesbs_xts_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE); + + /* generate the initial tweak */ + AES_encrypt(walk.iv, walk.iv, &ctx->twkey); + + while (walk.nbytes) { + kernel_neon_begin(); + bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, + walk.nbytes, &ctx->dec, walk.iv); + kernel_neon_end(); + err = blkcipher_walk_done(desc, &walk, 0); + } + return err; +} + +static struct crypto_alg aesbs_algs[] = { { + .cra_name = "__cbc-aes-neonbs", + .cra_driver_name = "__driver-cbc-aes-neonbs", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aesbs_cbc_ctx), + .cra_alignmask = 7, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aesbs_cbc_set_key, + .encrypt = aesbs_cbc_encrypt, + .decrypt = aesbs_cbc_decrypt, + }, +}, { + .cra_name = "__ctr-aes-neonbs", + .cra_driver_name = "__driver-ctr-aes-neonbs", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct aesbs_ctr_ctx), + .cra_alignmask = 7, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aesbs_ctr_set_key, + .encrypt = aesbs_ctr_encrypt, + .decrypt = aesbs_ctr_encrypt, + }, +}, { + .cra_name = "__xts-aes-neonbs", + .cra_driver_name = "__driver-xts-aes-neonbs", + .cra_priority = 0, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aesbs_xts_ctx), + .cra_alignmask = 7, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_blkcipher = { + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aesbs_xts_set_key, + .encrypt = aesbs_xts_encrypt, + .decrypt = aesbs_xts_decrypt, + }, +}, { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-neonbs", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct async_helper_ctx), + .cra_alignmask = 7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = ablk_init, + .cra_exit = ablk_exit, + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ablk_set_key, + .encrypt = __ablk_encrypt, + .decrypt = ablk_decrypt, + } +}, { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr-aes-neonbs", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct async_helper_ctx), + .cra_alignmask = 7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = ablk_init, + .cra_exit = ablk_exit, + .cra_ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ablk_set_key, + .encrypt = ablk_encrypt, + .decrypt = ablk_decrypt, + } +}, { + .cra_name = "xts(aes)", + .cra_driver_name = "xts-aes-neonbs", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct async_helper_ctx), + .cra_alignmask = 7, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = ablk_init, + .cra_exit = ablk_exit, + .cra_ablkcipher = { + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ablk_set_key, + .encrypt = ablk_encrypt, + .decrypt = ablk_decrypt, + } +} }; + +static int __init aesbs_mod_init(void) +{ + if (!cpu_has_neon()) + return -ENODEV; + + return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs)); +} + +static void __exit aesbs_mod_exit(void) +{ + crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs)); +} + +module_init(aesbs_mod_init); +module_exit(aesbs_mod_exit); + +MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl new file mode 100644 index 00000000000..f3d96d93257 --- /dev/null +++ b/arch/arm/crypto/bsaes-armv7.pl @@ -0,0 +1,2467 @@ +#!/usr/bin/env perl + +# ==================================================================== +# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL +# project. The module is, however, dual licensed under OpenSSL and +# CRYPTOGAMS licenses depending on where you obtain it. For further +# details see http://www.openssl.org/~appro/cryptogams/. +# +# Specific modes and adaptation for Linux kernel by Ard Biesheuvel +# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is +# granted. +# ==================================================================== + +# Bit-sliced AES for ARM NEON +# +# February 2012. +# +# This implementation is direct adaptation of bsaes-x86_64 module for +# ARM NEON. Except that this module is endian-neutral [in sense that +# it can be compiled for either endianness] by courtesy of vld1.8's +# neutrality. Initial version doesn't implement interface to OpenSSL, +# only low-level primitives and unsupported entry points, just enough +# to collect performance results, which for Cortex-A8 core are: +# +# encrypt 19.5 cycles per byte processed with 128-bit key +# decrypt 22.1 cycles per byte processed with 128-bit key +# key conv. 440 cycles per 128-bit key/0.18 of 8x block +# +# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7, +# which is [much] worse than anticipated (for further details see +# http://www.openssl.org/~appro/Snapdragon-S4.html). +# +# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code +# manages in 20.0 cycles]. +# +# When comparing to x86_64 results keep in mind that NEON unit is +# [mostly] single-issue and thus can't [fully] benefit from +# instruction-level parallelism. And when comparing to aes-armv4 +# results keep in mind key schedule conversion overhead (see +# bsaes-x86_64.pl for further details)... +# +# <appro@openssl.org> + +# April-August 2013 +# +# Add CBC, CTR and XTS subroutines, adapt for kernel use. +# +# <ard.biesheuvel@linaro.org> + +while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} +open STDOUT,">$output"; + +my ($inp,$out,$len,$key)=("r0","r1","r2","r3"); +my @XMM=map("q$_",(0..15)); + +{ +my ($key,$rounds,$const)=("r4","r5","r6"); + +sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; } +sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; } + +sub Sbox { +# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb +# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb +my @b=@_[0..7]; +my @t=@_[8..11]; +my @s=@_[12..15]; + &InBasisChange (@b); + &Inv_GF256 (@b[6,5,0,3,7,1,4,2],@t,@s); + &OutBasisChange (@b[7,1,4,2,6,5,0,3]); +} + +sub InBasisChange { +# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb +# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb +my @b=@_[0..7]; +$code.=<<___; + veor @b[2], @b[2], @b[1] + veor @b[5], @b[5], @b[6] + veor @b[3], @b[3], @b[0] + veor @b[6], @b[6], @b[2] + veor @b[5], @b[5], @b[0] + + veor @b[6], @b[6], @b[3] + veor @b[3], @b[3], @b[7] + veor @b[7], @b[7], @b[5] + veor @b[3], @b[3], @b[4] + veor @b[4], @b[4], @b[5] + + veor @b[2], @b[2], @b[7] + veor @b[3], @b[3], @b[1] + veor @b[1], @b[1], @b[5] +___ +} + +sub OutBasisChange { +# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb +# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb +my @b=@_[0..7]; +$code.=<<___; + veor @b[0], @b[0], @b[6] + veor @b[1], @b[1], @b[4] + veor @b[4], @b[4], @b[6] + veor @b[2], @b[2], @b[0] + veor @b[6], @b[6], @b[1] + + veor @b[1], @b[1], @b[5] + veor @b[5], @b[5], @b[3] + veor @b[3], @b[3], @b[7] + veor @b[7], @b[7], @b[5] + veor @b[2], @b[2], @b[5] + + veor @b[4], @b[4], @b[7] +___ +} + +sub InvSbox { +# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb +# output in lsb > [b0, b1, b6, b4, b2, b7, b3, b5] < msb +my @b=@_[0..7]; +my @t=@_[8..11]; +my @s=@_[12..15]; + &InvInBasisChange (@b); + &Inv_GF256 (@b[5,1,2,6,3,7,0,4],@t,@s); + &InvOutBasisChange (@b[3,7,0,4,5,1,2,6]); +} + +sub InvInBasisChange { # OutBasisChange in reverse (with twist) +my @b=@_[5,1,2,6,3,7,0,4]; +$code.=<<___ + veor @b[1], @b[1], @b[7] + veor @b[4], @b[4], @b[7] + + veor @b[7], @b[7], @b[5] + veor @b[1], @b[1], @b[3] + veor @b[2], @b[2], @b[5] + veor @b[3], @b[3], @b[7] + + veor @b[6], @b[6], @b[1] + veor @b[2], @b[2], @b[0] + veor @b[5], @b[5], @b[3] + veor @b[4], @b[4], @b[6] + veor @b[0], @b[0], @b[6] + veor @b[1], @b[1], @b[4] +___ +} + +sub InvOutBasisChange { # InBasisChange in reverse +my @b=@_[2,5,7,3,6,1,0,4]; +$code.=<<___; + veor @b[1], @b[1], @b[5] + veor @b[2], @b[2], @b[7] + + veor @b[3], @b[3], @b[1] + veor @b[4], @b[4], @b[5] + veor @b[7], @b[7], @b[5] + veor @b[3], @b[3], @b[4] + veor @b[5], @b[5], @b[0] + veor @b[3], @b[3], @b[7] + veor @b[6], @b[6], @b[2] + veor @b[2], @b[2], @b[1] + veor @b[6], @b[6], @b[3] + + veor @b[3], @b[3], @b[0] + veor @b[5], @b[5], @b[6] +___ +} + +sub Mul_GF4 { +#;************************************************************* +#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) * +#;************************************************************* +my ($x0,$x1,$y0,$y1,$t0,$t1)=@_; +$code.=<<___; + veor $t0, $y0, $y1 + vand $t0, $t0, $x0 + veor $x0, $x0, $x1 + vand $t1, $x1, $y0 + vand $x0, $x0, $y1 + veor $x1, $t1, $t0 + veor $x0, $x0, $t1 +___ +} + +sub Mul_GF4_N { # not used, see next subroutine +# multiply and scale by N +my ($x0,$x1,$y0,$y1,$t0)=@_; +$code.=<<___; + veor $t0, $y0, $y1 + vand $t0, $t0, $x0 + veor $x0, $x0, $x1 + vand $x1, $x1, $y0 + vand $x0, $x0, $y1 + veor $x1, $x1, $x0 + veor $x0, $x0, $t0 +___ +} + +sub Mul_GF4_N_GF4 { +# interleaved Mul_GF4_N and Mul_GF4 +my ($x0,$x1,$y0,$y1,$t0, + $x2,$x3,$y2,$y3,$t1)=@_; +$code.=<<___; + veor $t0, $y0, $y1 + veor $t1, $y2, $y3 + vand $t0, $t0, $x0 + vand $t1, $t1, $x2 + veor $x0, $x0, $x1 + veor $x2, $x2, $x3 + vand $x1, $x1, $y0 + vand $x3, $x3, $y2 + vand $x0, $x0, $y1 + vand $x2, $x2, $y3 + veor $x1, $x1, $x0 + veor $x2, $x2, $x3 + veor $x0, $x0, $t0 + veor $x3, $x3, $t1 +___ +} +sub Mul_GF16_2 { +my @x=@_[0..7]; +my @y=@_[8..11]; +my @t=@_[12..15]; +$code.=<<___; + veor @t[0], @x[0], @x[2] + veor @t[1], @x[1], @x[3] +___ + &Mul_GF4 (@x[0], @x[1], @y[0], @y[1], @t[2..3]); +$code.=<<___; + veor @y[0], @y[0], @y[2] + veor @y[1], @y[1], @y[3] +___ + Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3], + @x[2], @x[3], @y[2], @y[3], @t[2]); +$code.=<<___; + veor @x[0], @x[0], @t[0] + veor @x[2], @x[2], @t[0] + veor @x[1], @x[1], @t[1] + veor @x[3], @x[3], @t[1] + + veor @t[0], @x[4], @x[6] + veor @t[1], @x[5], @x[7] +___ + &Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3], + @x[6], @x[7], @y[2], @y[3], @t[2]); +$code.=<<___; + veor @y[0], @y[0], @y[2] + veor @y[1], @y[1], @y[3] +___ + &Mul_GF4 (@x[4], @x[5], @y[0], @y[1], @t[2..3]); +$code.=<<___; + veor @x[4], @x[4], @t[0] + veor @x[6], @x[6], @t[0] + veor @x[5], @x[5], @t[1] + veor @x[7], @x[7], @t[1] +___ +} +sub Inv_GF256 { +#;******************************************************************** +#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144) * +#;******************************************************************** +my @x=@_[0..7]; +my @t=@_[8..11]; +my @s=@_[12..15]; +# direct optimizations from hardware +$code.=<<___; + veor @t[3], @x[4], @x[6] + veor @t[2], @x[5], @x[7] + veor @t[1], @x[1], @x[3] + veor @s[1], @x[7], @x[6] + vmov @t[0], @t[2] + veor @s[0], @x[0], @x[2] + + vorr @t[2], @t[2], @t[1] + veor @s[3], @t[3], @t[0] + vand @s[2], @t[3], @s[0] + vorr @t[3], @t[3], @s[0] + veor @s[0], @s[0], @t[1] + vand @t[0], @t[0], @t[1] + veor @t[1], @x[3], @x[2] + vand @s[3], @s[3], @s[0] + vand @s[1], @s[1], @t[1] + veor @t[1], @x[4], @x[5] + veor @s[0], @x[1], @x[0] + veor @t[3], @t[3], @s[1] + veor @t[2], @t[2], @s[1] + vand @s[1], @t[1], @s[0] + vorr @t[1], @t[1], @s[0] + veor @t[3], @t[3], @s[3] + veor @t[0], @t[0], @s[1] + veor @t[2], @t[2], @s[2] + veor @t[1], @t[1], @s[3] + veor @t[0], @t[0], @s[2] + vand @s[0], @x[7], @x[3] + veor @t[1], @t[1], @s[2] + vand @s[1], @x[6], @x[2] + vand @s[2], @x[5], @x[1] + vorr @s[3], @x[4], @x[0] + veor @t[3], @t[3], @s[0] + veor @t[1], @t[1], @s[2] + veor @t[0], @t[0], @s[3] + veor @t[2], @t[2], @s[1] + + @ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3 + + @ new smaller inversion + + vand @s[2], @t[3], @t[1] + vmov @s[0], @t[0] + + veor @s[1], @t[2], @s[2] + veor @s[3], @t[0], @s[2] + veor @s[2], @t[0], @s[2] @ @s[2]=@s[3] + + vbsl @s[1], @t[1], @t[0] + vbsl @s[3], @t[3], @t[2] + veor @t[3], @t[3], @t[2] + + vbsl @s[0], @s[1], @s[2] + vbsl @t[0], @s[2], @s[1] + + vand @s[2], @s[0], @s[3] + veor @t[1], @t[1], @t[0] + + veor @s[2], @s[2], @t[3] +___ +# output in s3, s2, s1, t1 + +# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3 + +# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3 + &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]); + +### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb +} + +# AES linear components + +sub ShiftRows { +my @x=@_[0..7]; +my @t=@_[8..11]; +my $mask=pop; +$code.=<<___; + vldmia $key!, {@t[0]-@t[3]} + veor @t[0], @t[0], @x[0] + veor @t[1], @t[1], @x[1] + vtbl.8 `&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)` + vtbl.8 `&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)` + vldmia $key!, {@t[0]} + veor @t[2], @t[2], @x[2] + vtbl.8 `&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)` + vtbl.8 `&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)` + vldmia $key!, {@t[1]} + veor @t[3], @t[3], @x[3] + vtbl.8 `&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)` + vtbl.8 `&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)` + vldmia $key!, {@t[2]} + vtbl.8 `&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)` + vtbl.8 `&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)` + vldmia $key!, {@t[3]} + veor @t[0], @t[0], @x[4] + veor @t[1], @t[1], @x[5] + vtbl.8 `&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)` + vtbl.8 `&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)` + veor @t[2], @t[2], @x[6] + vtbl.8 `&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)` + vtbl.8 `&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)` + veor @t[3], @t[3], @x[7] + vtbl.8 `&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)` + vtbl.8 `&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)` + vtbl.8 `&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)` + vtbl.8 `&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)` +___ +} + +sub MixColumns { +# modified to emit output in order suitable for feeding back to aesenc[last] +my @x=@_[0..7]; +my @t=@_[8..15]; +my $inv=@_[16]; # optional +$code.=<<___; + vext.8 @t[0], @x[0], @x[0], #12 @ x0 <<< 32 + vext.8 @t[1], @x[1], @x[1], #12 + veor @x[0], @x[0], @t[0] @ x0 ^ (x0 <<< 32) + vext.8 @t[2], @x[2], @x[2], #12 + veor @x[1], @x[1], @t[1] + vext.8 @t[3], @x[3], @x[3], #12 + veor @x[2], @x[2], @t[2] + vext.8 @t[4], @x[4], @x[4], #12 + veor @x[3], @x[3], @t[3] + vext.8 @t[5], @x[5], @x[5], #12 + veor @x[4], @x[4], @t[4] + vext.8 @t[6], @x[6], @x[6], #12 + veor @x[5], @x[5], @t[5] + vext.8 @t[7], @x[7], @x[7], #12 + veor @x[6], @x[6], @t[6] + + veor @t[1], @t[1], @x[0] + veor @x[7], @x[7], @t[7] + vext.8 @x[0], @x[0], @x[0], #8 @ (x0 ^ (x0 <<< 32)) <<< 64) + veor @t[2], @t[2], @x[1] + veor @t[0], @t[0], @x[7] + veor @t[1], @t[1], @x[7] + vext.8 @x[1], @x[1], @x[1], #8 + veor @t[5], @t[5], @x[4] + veor @x[0], @x[0], @t[0] + veor @t[6], @t[6], @x[5] + veor @x[1], @x[1], @t[1] + vext.8 @t[0], @x[4], @x[4], #8 + veor @t[4], @t[4], @x[3] + vext.8 @t[1], @x[5], @x[5], #8 + veor @t[7], @t[7], @x[6] + vext.8 @x[4], @x[3], @x[3], #8 + veor @t[3], @t[3], @x[2] + vext.8 @x[5], @x[7], @x[7], #8 + veor @t[4], @t[4], @x[7] + vext.8 @x[3], @x[6], @x[6], #8 + veor @t[3], @t[3], @x[7] + vext.8 @x[6], @x[2], @x[2], #8 + veor @x[7], @t[1], @t[5] +___ +$code.=<<___ if (!$inv); + veor @x[2], @t[0], @t[4] + veor @x[4], @x[4], @t[3] + veor @x[5], @x[5], @t[7] + veor @x[3], @x[3], @t[6] + @ vmov @x[2], @t[0] + veor @x[6], @x[6], @t[2] + @ vmov @x[7], @t[1] +___ +$code.=<<___ if ($inv); + veor @t[3], @t[3], @x[4] + veor @x[5], @x[5], @t[7] + veor @x[2], @x[3], @t[6] + veor @x[3], @t[0], @t[4] + veor @x[4], @x[6], @t[2] + vmov @x[6], @t[3] + @ vmov @x[7], @t[1] +___ +} + +sub InvMixColumns_orig { +my @x=@_[0..7]; +my @t=@_[8..15]; + +$code.=<<___; + @ multiplication by 0x0e + vext.8 @t[7], @x[7], @x[7], #12 + vmov @t[2], @x[2] + veor @x[2], @x[2], @x[5] @ 2 5 + veor @x[7], @x[7], @x[5] @ 7 5 + vext.8 @t[0], @x[0], @x[0], #12 + vmov @t[5], @x[5] + veor @x[5], @x[5], @x[0] @ 5 0 [1] + veor @x[0], @x[0], @x[1] @ 0 1 + vext.8 @t[1], @x[1], @x[1], #12 + veor @x[1], @x[1], @x[2] @ 1 25 + veor @x[0], @x[0], @x[6] @ 01 6 [2] + vext.8 @t[3], @x[3], @x[3], #12 + veor @x[1], @x[1], @x[3] @ 125 3 [4] + veor @x[2], @x[2], @x[0] @ 25 016 [3] + veor @x[3], @x[3], @x[7] @ 3 75 + veor @x[7], @x[7], @x[6] @ 75 6 [0] + vext.8 @t[6], @x[6], @x[6], #12 + vmov @t[4], @x[4] + veor @x[6], @x[6], @x[4] @ 6 4 + veor @x[4], @x[4], @x[3] @ 4 375 [6] + veor @x[3], @x[3], @x[7] @ 375 756=36 + veor @x[6], @x[6], @t[5] @ 64 5 [7] + veor @x[3], @x[3], @t[2] @ 36 2 + vext.8 @t[5], @t[5], @t[5], #12 + veor @x[3], @x[3], @t[4] @ 362 4 [5] +___ + my @y = @x[7,5,0,2,1,3,4,6]; +$code.=<<___; + @ multiplication by 0x0b + veor @y[1], @y[1], @y[0] + veor @y[0], @y[0], @t[0] + vext.8 @t[2], @t[2], @t[2], #12 + veor @y[1], @y[1], @t[1] + veor @y[0], @y[0], @t[5] + vext.8 @t[4], @t[4], @t[4], #12 + veor @y[1], @y[1], @t[6] + veor @y[0], @y[0], @t[7] + veor @t[7], @t[7], @t[6] @ clobber t[7] + + veor @y[3], @y[3], @t[0] + veor @y[1], @y[1], @y[0] + vext.8 @t[0], @t[0], @t[0], #12 + veor @y[2], @y[2], @t[1] + veor @y[4], @y[4], @t[1] + vext.8 @t[1], @t[1], @t[1], #12 + veor @y[2], @y[2], @t[2] + veor @y[3], @y[3], @t[2] + veor @y[5], @y[5], @t[2] + veor @y[2], @y[2], @t[7] + vext.8 @t[2], @t[2], @t[2], #12 + veor @y[3], @y[3], @t[3] + veor @y[6], @y[6], @t[3] + veor @y[4], @y[4], @t[3] + veor @y[7], @y[7], @t[4] + vext.8 @t[3], @t[3], @t[3], #12 + veor @y[5], @y[5], @t[4] + veor @y[7], @y[7], @t[7] + veor @t[7], @t[7], @t[5] @ clobber t[7] even more + veor @y[3], @y[3], @t[5] + veor @y[4], @y[4], @t[4] + + veor @y[5], @y[5], @t[7] + vext.8 @t[4], @t[4], @t[4], #12 + veor @y[6], @y[6], @t[7] + veor @y[4], @y[4], @t[7] + + veor @t[7], @t[7], @t[5] + vext.8 @t[5], @t[5], @t[5], #12 + + @ multiplication by 0x0d + veor @y[4], @y[4], @y[7] + veor @t[7], @t[7], @t[6] @ restore t[7] + veor @y[7], @y[7], @t[4] + vext.8 @t[6], @t[6], @t[6], #12 + veor @y[2], @y[2], @t[0] + veor @y[7], @y[7], @t[5] + vext.8 @t[7], @t[7], @t[7], #12 + veor @y[2], @y[2], @t[2] + + veor @y[3], @y[3], @y[1] + veor @y[1], @y[1], @t[1] + veor @y[0], @y[0], @t[0] + veor @y[3], @y[3], @t[0] + veor @y[1], @y[1], @t[5] + veor @y[0], @y[0], @t[5] + vext.8 @t[0], @t[0], @t[0], #12 + veor @y[1], @y[1], @t[7] + veor @y[0], @y[0], @t[6] + veor @y[3], @y[3], @y[1] + veor @y[4], @y[4], @t[1] + vext.8 @t[1], @t[1], @t[1], #12 + + veor @y[7], @y[7], @t[7] + veor @y[4], @y[4], @t[2] + veor @y[5], @y[5], @t[2] + veor @y[2], @y[2], @t[6] + veor @t[6], @t[6], @t[3] @ clobber t[6] + vext.8 @t[2], @t[2], @t[2], #12 + veor @y[4], @y[4], @y[7] + veor @y[3], @y[3], @t[6] + + veor @y[6], @y[6], @t[6] + veor @y[5], @y[5], @t[5] + vext.8 @t[5], @t[5], @t[5], #12 + veor @y[6], @y[6], @t[4] + vext.8 @t[4], @t[4], @t[4], #12 + veor @y[5], @y[5], @t[6] + veor @y[6], @y[6], @t[7] + vext.8 @t[7], @t[7], @t[7], #12 + veor @t[6], @t[6], @t[3] @ restore t[6] + vext.8 @t[3], @t[3], @t[3], #12 + + @ multiplication by 0x09 + veor @y[4], @y[4], @y[1] + veor @t[1], @t[1], @y[1] @ t[1]=y[1] + veor @t[0], @t[0], @t[5] @ clobber t[0] + vext.8 @t[6], @t[6], @t[6], #12 + veor @t[1], @t[1], @t[5] + veor @y[3], @y[3], @t[0] + veor @t[0], @t[0], @y[0] @ t[0]=y[0] + veor @t[1], @t[1], @t[6] + veor @t[6], @t[6], @t[7] @ clobber t[6] + veor @y[4], @y[4], @t[1] + veor @y[7], @y[7], @t[4] + veor @y[6], @y[6], @t[3] + veor @y[5], @y[5], @t[2] + veor @t[4], @t[4], @y[4] @ t[4]=y[4] + veor @t[3], @t[3], @y[3] @ t[3]=y[3] + veor @t[5], @t[5], @y[5] @ t[5]=y[5] + veor @t[2], @t[2], @y[2] @ t[2]=y[2] + veor @t[3], @t[3], @t[7] + veor @XMM[5], @t[5], @t[6] + veor @XMM[6], @t[6], @y[6] @ t[6]=y[6] + veor @XMM[2], @t[2], @t[6] + veor @XMM[7], @t[7], @y[7] @ t[7]=y[7] + + vmov @XMM[0], @t[0] + vmov @XMM[1], @t[1] + @ vmov @XMM[2], @t[2] + vmov @XMM[3], @t[3] + vmov @XMM[4], @t[4] + @ vmov @XMM[5], @t[5] + @ vmov @XMM[6], @t[6] + @ vmov @XMM[7], @t[7] +___ +} + +sub InvMixColumns { +my @x=@_[0..7]; +my @t=@_[8..15]; + +# Thanks to Jussi Kivilinna for providing pointer to +# +# | 0e 0b 0d 09 | | 02 03 01 01 | | 05 00 04 00 | +# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 | +# | 0d 09 0e 0b | | 01 01 02 03 | | 04 00 05 00 | +# | 0b 0d 09 0e | | 03 01 01 02 | | 00 04 00 05 | + +$code.=<<___; + @ multiplication by 0x05-0x00-0x04-0x00 + vext.8 @t[0], @x[0], @x[0], #8 + vext.8 @t[6], @x[6], @x[6], #8 + vext.8 @t[7], @x[7], @x[7], #8 + veor @t[0], @t[0], @x[0] + vext.8 @t[1], @x[1], @x[1], #8 + veor @t[6], @t[6], @x[6] + vext.8 @t[2], @x[2], @x[2], #8 + veor @t[7], @t[7], @x[7] + vext.8 @t[3], @x[3], @x[3], #8 + veor @t[1], @t[1], @x[1] + vext.8 @t[4], @x[4], @x[4], #8 + veor @t[2], @t[2], @x[2] + vext.8 @t[5], @x[5], @x[5], #8 + veor @t[3], @t[3], @x[3] + veor @t[4], @t[4], @x[4] + veor @t[5], @t[5], @x[5] + + veor @x[0], @x[0], @t[6] + veor @x[1], @x[1], @t[6] + veor @x[2], @x[2], @t[0] + veor @x[4], @x[4], @t[2] + veor @x[3], @x[3], @t[1] + veor @x[1], @x[1], @t[7] + veor @x[2], @x[2], @t[7] + veor @x[4], @x[4], @t[6] + veor @x[5], @x[5], @t[3] + veor @x[3], @x[3], @t[6] + veor @x[6], @x[6], @t[4] + veor @x[4], @x[4], @t[7] + veor @x[5], @x[5], @t[7] + veor @x[7], @x[7], @t[5] +___ + &MixColumns (@x,@t,1); # flipped 2<->3 and 4<->6 +} + +sub swapmove { +my ($a,$b,$n,$mask,$t)=@_; +$code.=<<___; + vshr.u64 $t, $b, #$n + veor $t, $t, $a + vand $t, $t, $mask + veor $a, $a, $t + vshl.u64 $t, $t, #$n + veor $b, $b, $t +___ +} +sub swapmove2x { +my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_; +$code.=<<___; + vshr.u64 $t0, $b0, #$n + vshr.u64 $t1, $b1, #$n + veor $t0, $t0, $a0 + veor $t1, $t1, $a1 + vand $t0, $t0, $mask + vand $t1, $t1, $mask + veor $a0, $a0, $t0 + vshl.u64 $t0, $t0, #$n + veor $a1, $a1, $t1 + vshl.u64 $t1, $t1, #$n + veor $b0, $b0, $t0 + veor $b1, $b1, $t1 +___ +} + +sub bitslice { +my @x=reverse(@_[0..7]); +my ($t0,$t1,$t2,$t3)=@_[8..11]; +$code.=<<___; + vmov.i8 $t0,#0x55 @ compose .LBS0 + vmov.i8 $t1,#0x33 @ compose .LBS1 +___ + &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3); + &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3); +$code.=<<___; + vmov.i8 $t0,#0x0f @ compose .LBS2 +___ + &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3); + &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3); + + &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3); + &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3); +} + +$code.=<<___; +#ifndef __KERNEL__ +# include "arm_arch.h" + +# define VFP_ABI_PUSH vstmdb sp!,{d8-d15} +# define VFP_ABI_POP vldmia sp!,{d8-d15} +# define VFP_ABI_FRAME 0x40 +#else +# define VFP_ABI_PUSH +# define VFP_ABI_POP +# define VFP_ABI_FRAME 0 +# define BSAES_ASM_EXTENDED_KEY +# define XTS_CHAIN_TWEAK +# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +#endif + +#ifdef __thumb__ +# define adrl adr +#endif + +#if __ARM_ARCH__>=7 +.text +.syntax unified @ ARMv7-capable assembler is expected to handle this +#ifdef __thumb2__ +.thumb +#else +.code 32 +#endif + +.fpu neon + +.type _bsaes_decrypt8,%function +.align 4 +_bsaes_decrypt8: + adr $const,_bsaes_decrypt8 + vldmia $key!, {@XMM[9]} @ round 0 key + add $const,$const,#.LM0ISR-_bsaes_decrypt8 + + vldmia $const!, {@XMM[8]} @ .LM0ISR + veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key + veor @XMM[11], @XMM[1], @XMM[9] + vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])` + veor @XMM[12], @XMM[2], @XMM[9] + vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])` + veor @XMM[13], @XMM[3], @XMM[9] + vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])` + veor @XMM[14], @XMM[4], @XMM[9] + vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])` + veor @XMM[15], @XMM[5], @XMM[9] + vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])` + veor @XMM[10], @XMM[6], @XMM[9] + vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])` + veor @XMM[11], @XMM[7], @XMM[9] + vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])` + vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])` +___ + &bitslice (@XMM[0..7, 8..11]); +$code.=<<___; + sub $rounds,$rounds,#1 + b .Ldec_sbox +.align 4 +.Ldec_loop: +___ + &ShiftRows (@XMM[0..7, 8..12]); +$code.=".Ldec_sbox:\n"; + &InvSbox (@XMM[0..7, 8..15]); +$code.=<<___; + subs $rounds,$rounds,#1 + bcc .Ldec_done +___ + &InvMixColumns (@XMM[0,1,6,4,2,7,3,5, 8..15]); +$code.=<<___; + vldmia $const, {@XMM[12]} @ .LISR + ite eq @ Thumb2 thing, sanity check in ARM + addeq $const,$const,#0x10 + bne .Ldec_loop + vldmia $const, {@XMM[12]} @ .LISRM0 + b .Ldec_loop +.align 4 +.Ldec_done: +___ + &bitslice (@XMM[0,1,6,4,2,7,3,5, 8..11]); +$code.=<<___; + vldmia $key, {@XMM[8]} @ last round key + veor @XMM[6], @XMM[6], @XMM[8] + veor @XMM[4], @XMM[4], @XMM[8] + veor @XMM[2], @XMM[2], @XMM[8] + veor @XMM[7], @XMM[7], @XMM[8] + veor @XMM[3], @XMM[3], @XMM[8] + veor @XMM[5], @XMM[5], @XMM[8] + veor @XMM[0], @XMM[0], @XMM[8] + veor @XMM[1], @XMM[1], @XMM[8] + bx lr +.size _bsaes_decrypt8,.-_bsaes_decrypt8 + +.type _bsaes_const,%object +.align 6 +_bsaes_const: +.LM0ISR: @ InvShiftRows constants + .quad 0x0a0e0206070b0f03, 0x0004080c0d010509 +.LISR: + .quad 0x0504070602010003, 0x0f0e0d0c080b0a09 +.LISRM0: + .quad 0x01040b0e0205080f, 0x0306090c00070a0d +.LM0SR: @ ShiftRows constants + .quad 0x0a0e02060f03070b, 0x0004080c05090d01 +.LSR: + .quad 0x0504070600030201, 0x0f0e0d0c0a09080b +.LSRM0: + .quad 0x0304090e00050a0f, 0x01060b0c0207080d +.LM0: + .quad 0x02060a0e03070b0f, 0x0004080c0105090d +.LREVM0SR: + .quad 0x090d01050c000408, 0x03070b0f060a0e02 +.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>" +.align 6 +.size _bsaes_const,.-_bsaes_const + +.type _bsaes_encrypt8,%function +.align 4 +_bsaes_encrypt8: + adr $const,_bsaes_encrypt8 + vldmia $key!, {@XMM[9]} @ round 0 key + sub $const,$const,#_bsaes_encrypt8-.LM0SR + + vldmia $const!, {@XMM[8]} @ .LM0SR +_bsaes_encrypt8_alt: + veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key + veor @XMM[11], @XMM[1], @XMM[9] + vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])` + veor @XMM[12], @XMM[2], @XMM[9] + vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])` + veor @XMM[13], @XMM[3], @XMM[9] + vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])` + veor @XMM[14], @XMM[4], @XMM[9] + vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])` + veor @XMM[15], @XMM[5], @XMM[9] + vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])` + veor @XMM[10], @XMM[6], @XMM[9] + vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])` + veor @XMM[11], @XMM[7], @XMM[9] + vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])` + vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])` + vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])` +_bsaes_encrypt8_bitslice: +___ + &bitslice (@XMM[0..7, 8..11]); +$code.=<<___; + sub $rounds,$rounds,#1 + b .Lenc_sbox +.align 4 +.Lenc_loop: +___ + &ShiftRows (@XMM[0..7, 8..12]); +$code.=".Lenc_sbox:\n"; + &Sbox (@XMM[0..7, 8..15]); +$code.=<<___; + subs $rounds,$rounds,#1 + bcc .Lenc_done +___ + &MixColumns (@XMM[0,1,4,6,3,7,2,5, 8..15]); +$code.=<<___; + vldmia $const, {@XMM[12]} @ .LSR + ite eq @ Thumb2 thing, samity check in ARM + addeq $const,$const,#0x10 + bne .Lenc_loop + vldmia $const, {@XMM[12]} @ .LSRM0 + b .Lenc_loop +.align 4 +.Lenc_done: +___ + # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb + &bitslice (@XMM[0,1,4,6,3,7,2,5, 8..11]); +$code.=<<___; + vldmia $key, {@XMM[8]} @ last round key + veor @XMM[4], @XMM[4], @XMM[8] + veor @XMM[6], @XMM[6], @XMM[8] + veor @XMM[3], @XMM[3], @XMM[8] + veor @XMM[7], @XMM[7], @XMM[8] + veor @XMM[2], @XMM[2], @XMM[8] + veor @XMM[5], @XMM[5], @XMM[8] + veor @XMM[0], @XMM[0], @XMM[8] + veor @XMM[1], @XMM[1], @XMM[8] + bx lr +.size _bsaes_encrypt8,.-_bsaes_encrypt8 +___ +} +{ +my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6"); + +sub bitslice_key { +my @x=reverse(@_[0..7]); +my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12]; + + &swapmove (@x[0,1],1,$bs0,$t2,$t3); +$code.=<<___; + @ &swapmove(@x[2,3],1,$t0,$t2,$t3); + vmov @x[2], @x[0] + vmov @x[3], @x[1] +___ + #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3); + + &swapmove2x (@x[0,2,1,3],2,$bs1,$t2,$t3); +$code.=<<___; + @ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3); + vmov @x[4], @x[0] + vmov @x[6], @x[2] + vmov @x[5], @x[1] + vmov @x[7], @x[3] +___ + &swapmove2x (@x[0,4,1,5],4,$bs2,$t2,$t3); + &swapmove2x (@x[2,6,3,7],4,$bs2,$t2,$t3); +} + +$code.=<<___; +.type _bsaes_key_convert,%function +.align 4 +_bsaes_key_convert: + adr $const,_bsaes_key_convert + vld1.8 {@XMM[7]}, [$inp]! @ load round 0 key + sub $const,$const,#_bsaes_key_convert-.LM0 + vld1.8 {@XMM[15]}, [$inp]! @ load round 1 key + + vmov.i8 @XMM[8], #0x01 @ bit masks + vmov.i8 @XMM[9], #0x02 + vmov.i8 @XMM[10], #0x04 + vmov.i8 @XMM[11], #0x08 + vmov.i8 @XMM[12], #0x10 + vmov.i8 @XMM[13], #0x20 + vldmia $const, {@XMM[14]} @ .LM0 + +#ifdef __ARMEL__ + vrev32.8 @XMM[7], @XMM[7] + vrev32.8 @XMM[15], @XMM[15] +#endif + sub $rounds,$rounds,#1 + vstmia $out!, {@XMM[7]} @ save round 0 key + b .Lkey_loop + +.align 4 +.Lkey_loop: + vtbl.8 `&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])` + vtbl.8 `&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])` + vmov.i8 @XMM[6], #0x40 + vmov.i8 @XMM[15], #0x80 + + vtst.8 @XMM[0], @XMM[7], @XMM[8] + vtst.8 @XMM[1], @XMM[7], @XMM[9] + vtst.8 @XMM[2], @XMM[7], @XMM[10] + vtst.8 @XMM[3], @XMM[7], @XMM[11] + vtst.8 @XMM[4], @XMM[7], @XMM[12] + vtst.8 @XMM[5], @XMM[7], @XMM[13] + vtst.8 @XMM[6], @XMM[7], @XMM[6] + vtst.8 @XMM[7], @XMM[7], @XMM[15] + vld1.8 {@XMM[15]}, [$inp]! @ load next round key + vmvn @XMM[0], @XMM[0] @ "pnot" + vmvn @XMM[1], @XMM[1] + vmvn @XMM[5], @XMM[5] + vmvn @XMM[6], @XMM[6] +#ifdef __ARMEL__ + vrev32.8 @XMM[15], @XMM[15] +#endif + subs $rounds,$rounds,#1 + vstmia $out!,{@XMM[0]-@XMM[7]} @ write bit-sliced round key + bne .Lkey_loop + + vmov.i8 @XMM[7],#0x63 @ compose .L63 + @ don't save last round key + bx lr +.size _bsaes_key_convert,.-_bsaes_key_convert +___ +} + +if (0) { # following four functions are unsupported interface + # used for benchmarking... +$code.=<<___; +.globl bsaes_enc_key_convert +.type bsaes_enc_key_convert,%function +.align 4 +bsaes_enc_key_convert: + stmdb sp!,{r4-r6,lr} + vstmdb sp!,{d8-d15} @ ABI specification says so + + ldr r5,[$inp,#240] @ pass rounds + mov r4,$inp @ pass key + mov r12,$out @ pass key schedule + bl _bsaes_key_convert + veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key + vstmia r12, {@XMM[7]} @ save last round key + + vldmia sp!,{d8-d15} + ldmia sp!,{r4-r6,pc} +.size bsaes_enc_key_convert,.-bsaes_enc_key_convert + +.globl bsaes_encrypt_128 +.type bsaes_encrypt_128,%function +.align 4 +bsaes_encrypt_128: + stmdb sp!,{r4-r6,lr} + vstmdb sp!,{d8-d15} @ ABI specification says so +.Lenc128_loop: + vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input + vld1.8 {@XMM[2]-@XMM[3]}, [$inp]! + mov r4,$key @ pass the key + vld1.8 {@XMM[4]-@XMM[5]}, [$inp]! + mov r5,#10 @ pass rounds + vld1.8 {@XMM[6]-@XMM[7]}, [$inp]! + + bl _bsaes_encrypt8 + + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + vst1.8 {@XMM[4]}, [$out]! + vst1.8 {@XMM[6]}, [$out]! + vst1.8 {@XMM[3]}, [$out]! + vst1.8 {@XMM[7]}, [$out]! + vst1.8 {@XMM[2]}, [$out]! + subs $len,$len,#0x80 + vst1.8 {@XMM[5]}, [$out]! + bhi .Lenc128_loop + + vldmia sp!,{d8-d15} + ldmia sp!,{r4-r6,pc} +.size bsaes_encrypt_128,.-bsaes_encrypt_128 + +.globl bsaes_dec_key_convert +.type bsaes_dec_key_convert,%function +.align 4 +bsaes_dec_key_convert: + stmdb sp!,{r4-r6,lr} + vstmdb sp!,{d8-d15} @ ABI specification says so + + ldr r5,[$inp,#240] @ pass rounds + mov r4,$inp @ pass key + mov r12,$out @ pass key schedule + bl _bsaes_key_convert + vldmia $out, {@XMM[6]} + vstmia r12, {@XMM[15]} @ save last round key + veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key + vstmia $out, {@XMM[7]} + + vldmia sp!,{d8-d15} + ldmia sp!,{r4-r6,pc} +.size bsaes_dec_key_convert,.-bsaes_dec_key_convert + +.globl bsaes_decrypt_128 +.type bsaes_decrypt_128,%function +.align 4 +bsaes_decrypt_128: + stmdb sp!,{r4-r6,lr} + vstmdb sp!,{d8-d15} @ ABI specification says so +.Ldec128_loop: + vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input + vld1.8 {@XMM[2]-@XMM[3]}, [$inp]! + mov r4,$key @ pass the key + vld1.8 {@XMM[4]-@XMM[5]}, [$inp]! + mov r5,#10 @ pass rounds + vld1.8 {@XMM[6]-@XMM[7]}, [$inp]! + + bl _bsaes_decrypt8 + + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + vst1.8 {@XMM[6]}, [$out]! + vst1.8 {@XMM[4]}, [$out]! + vst1.8 {@XMM[2]}, [$out]! + vst1.8 {@XMM[7]}, [$out]! + vst1.8 {@XMM[3]}, [$out]! + subs $len,$len,#0x80 + vst1.8 {@XMM[5]}, [$out]! + bhi .Ldec128_loop + + vldmia sp!,{d8-d15} + ldmia sp!,{r4-r6,pc} +.size bsaes_decrypt_128,.-bsaes_decrypt_128 +___ +} +{ +my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10)); +my ($keysched)=("sp"); + +$code.=<<___; +.extern AES_cbc_encrypt +.extern AES_decrypt + +.global bsaes_cbc_encrypt +.type bsaes_cbc_encrypt,%function +.align 5 +bsaes_cbc_encrypt: +#ifndef __KERNEL__ + cmp $len, #128 +#ifndef __thumb__ + blo AES_cbc_encrypt +#else + bhs 1f + b AES_cbc_encrypt +1: +#endif +#endif + + @ it is up to the caller to make sure we are called with enc == 0 + + mov ip, sp + stmdb sp!, {r4-r10, lr} + VFP_ABI_PUSH + ldr $ivp, [ip] @ IV is 1st arg on the stack + mov $len, $len, lsr#4 @ len in 16 byte blocks + sub sp, #0x10 @ scratch space to carry over the IV + mov $fp, sp @ save sp + + ldr $rounds, [$key, #240] @ get # of rounds +#ifndef BSAES_ASM_EXTENDED_KEY + @ allocate the key schedule on the stack + sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key + add r12, #`128-32` @ sifze of bit-slices key schedule + + @ populate the key schedule + mov r4, $key @ pass key + mov r5, $rounds @ pass # of rounds + mov sp, r12 @ sp is $keysched + bl _bsaes_key_convert + vldmia $keysched, {@XMM[6]} + vstmia r12, {@XMM[15]} @ save last round key + veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key + vstmia $keysched, {@XMM[7]} +#else + ldr r12, [$key, #244] + eors r12, #1 + beq 0f + + @ populate the key schedule + str r12, [$key, #244] + mov r4, $key @ pass key + mov r5, $rounds @ pass # of rounds + add r12, $key, #248 @ pass key schedule + bl _bsaes_key_convert + add r4, $key, #248 + vldmia r4, {@XMM[6]} + vstmia r12, {@XMM[15]} @ save last round key + veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key + vstmia r4, {@XMM[7]} + +.align 2 +0: +#endif + + vld1.8 {@XMM[15]}, [$ivp] @ load IV + b .Lcbc_dec_loop + +.align 4 +.Lcbc_dec_loop: + subs $len, $len, #0x8 + bmi .Lcbc_dec_loop_finish + + vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input + vld1.8 {@XMM[2]-@XMM[3]}, [$inp]! +#ifndef BSAES_ASM_EXTENDED_KEY + mov r4, $keysched @ pass the key +#else + add r4, $key, #248 +#endif + vld1.8 {@XMM[4]-@XMM[5]}, [$inp]! + mov r5, $rounds + vld1.8 {@XMM[6]-@XMM[7]}, [$inp] + sub $inp, $inp, #0x60 + vstmia $fp, {@XMM[15]} @ put aside IV + + bl _bsaes_decrypt8 + + vldmia $fp, {@XMM[14]} @ reload IV + vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input + veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV + vld1.8 {@XMM[10]-@XMM[11]}, [$inp]! + veor @XMM[1], @XMM[1], @XMM[8] + veor @XMM[6], @XMM[6], @XMM[9] + vld1.8 {@XMM[12]-@XMM[13]}, [$inp]! + veor @XMM[4], @XMM[4], @XMM[10] + veor @XMM[2], @XMM[2], @XMM[11] + vld1.8 {@XMM[14]-@XMM[15]}, [$inp]! + veor @XMM[7], @XMM[7], @XMM[12] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + veor @XMM[3], @XMM[3], @XMM[13] + vst1.8 {@XMM[6]}, [$out]! + veor @XMM[5], @XMM[5], @XMM[14] + vst1.8 {@XMM[4]}, [$out]! + vst1.8 {@XMM[2]}, [$out]! + vst1.8 {@XMM[7]}, [$out]! + vst1.8 {@XMM[3]}, [$out]! + vst1.8 {@XMM[5]}, [$out]! + + b .Lcbc_dec_loop + +.Lcbc_dec_loop_finish: + adds $len, $len, #8 + beq .Lcbc_dec_done + + vld1.8 {@XMM[0]}, [$inp]! @ load input + cmp $len, #2 + blo .Lcbc_dec_one + vld1.8 {@XMM[1]}, [$inp]! +#ifndef BSAES_ASM_EXTENDED_KEY + mov r4, $keysched @ pass the key +#else + add r4, $key, #248 +#endif + mov r5, $rounds + vstmia $fp, {@XMM[15]} @ put aside IV + beq .Lcbc_dec_two + vld1.8 {@XMM[2]}, [$inp]! + cmp $len, #4 + blo .Lcbc_dec_three + vld1.8 {@XMM[3]}, [$inp]! + beq .Lcbc_dec_four + vld1.8 {@XMM[4]}, [$inp]! + cmp $len, #6 + blo .Lcbc_dec_five + vld1.8 {@XMM[5]}, [$inp]! + beq .Lcbc_dec_six + vld1.8 {@XMM[6]}, [$inp]! + sub $inp, $inp, #0x70 + + bl _bsaes_decrypt8 + + vldmia $fp, {@XMM[14]} @ reload IV + vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input + veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV + vld1.8 {@XMM[10]-@XMM[11]}, [$inp]! + veor @XMM[1], @XMM[1], @XMM[8] + veor @XMM[6], @XMM[6], @XMM[9] + vld1.8 {@XMM[12]-@XMM[13]}, [$inp]! + veor @XMM[4], @XMM[4], @XMM[10] + veor @XMM[2], @XMM[2], @XMM[11] + vld1.8 {@XMM[15]}, [$inp]! + veor @XMM[7], @XMM[7], @XMM[12] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + veor @XMM[3], @XMM[3], @XMM[13] + vst1.8 {@XMM[6]}, [$out]! + vst1.8 {@XMM[4]}, [$out]! + vst1.8 {@XMM[2]}, [$out]! + vst1.8 {@XMM[7]}, [$out]! + vst1.8 {@XMM[3]}, [$out]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_six: + sub $inp, $inp, #0x60 + bl _bsaes_decrypt8 + vldmia $fp,{@XMM[14]} @ reload IV + vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input + veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV + vld1.8 {@XMM[10]-@XMM[11]}, [$inp]! + veor @XMM[1], @XMM[1], @XMM[8] + veor @XMM[6], @XMM[6], @XMM[9] + vld1.8 {@XMM[12]}, [$inp]! + veor @XMM[4], @XMM[4], @XMM[10] + veor @XMM[2], @XMM[2], @XMM[11] + vld1.8 {@XMM[15]}, [$inp]! + veor @XMM[7], @XMM[7], @XMM[12] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + vst1.8 {@XMM[6]}, [$out]! + vst1.8 {@XMM[4]}, [$out]! + vst1.8 {@XMM[2]}, [$out]! + vst1.8 {@XMM[7]}, [$out]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_five: + sub $inp, $inp, #0x50 + bl _bsaes_decrypt8 + vldmia $fp, {@XMM[14]} @ reload IV + vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input + veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV + vld1.8 {@XMM[10]-@XMM[11]}, [$inp]! + veor @XMM[1], @XMM[1], @XMM[8] + veor @XMM[6], @XMM[6], @XMM[9] + vld1.8 {@XMM[15]}, [$inp]! + veor @XMM[4], @XMM[4], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + veor @XMM[2], @XMM[2], @XMM[11] + vst1.8 {@XMM[6]}, [$out]! + vst1.8 {@XMM[4]}, [$out]! + vst1.8 {@XMM[2]}, [$out]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_four: + sub $inp, $inp, #0x40 + bl _bsaes_decrypt8 + vldmia $fp, {@XMM[14]} @ reload IV + vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input + veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV + vld1.8 {@XMM[10]}, [$inp]! + veor @XMM[1], @XMM[1], @XMM[8] + veor @XMM[6], @XMM[6], @XMM[9] + vld1.8 {@XMM[15]}, [$inp]! + veor @XMM[4], @XMM[4], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + vst1.8 {@XMM[6]}, [$out]! + vst1.8 {@XMM[4]}, [$out]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_three: + sub $inp, $inp, #0x30 + bl _bsaes_decrypt8 + vldmia $fp, {@XMM[14]} @ reload IV + vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input + veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV + vld1.8 {@XMM[15]}, [$inp]! + veor @XMM[1], @XMM[1], @XMM[8] + veor @XMM[6], @XMM[6], @XMM[9] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + vst1.8 {@XMM[6]}, [$out]! + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_two: + sub $inp, $inp, #0x20 + bl _bsaes_decrypt8 + vldmia $fp, {@XMM[14]} @ reload IV + vld1.8 {@XMM[8]}, [$inp]! @ reload input + veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV + vld1.8 {@XMM[15]}, [$inp]! @ reload input + veor @XMM[1], @XMM[1], @XMM[8] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + b .Lcbc_dec_done +.align 4 +.Lcbc_dec_one: + sub $inp, $inp, #0x10 + mov $rounds, $out @ save original out pointer + mov $out, $fp @ use the iv scratch space as out buffer + mov r2, $key + vmov @XMM[4],@XMM[15] @ just in case ensure that IV + vmov @XMM[5],@XMM[0] @ and input are preserved + bl AES_decrypt + vld1.8 {@XMM[0]}, [$fp,:64] @ load result + veor @XMM[0], @XMM[0], @XMM[4] @ ^= IV + vmov @XMM[15], @XMM[5] @ @XMM[5] holds input + vst1.8 {@XMM[0]}, [$rounds] @ write output + +.Lcbc_dec_done: +#ifndef BSAES_ASM_EXTENDED_KEY + vmov.i32 q0, #0 + vmov.i32 q1, #0 +.Lcbc_dec_bzero: @ wipe key schedule [if any] + vstmia $keysched!, {q0-q1} + cmp $keysched, $fp + bne .Lcbc_dec_bzero +#endif + + mov sp, $fp + add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb + vst1.8 {@XMM[15]}, [$ivp] @ return IV + VFP_ABI_POP + ldmia sp!, {r4-r10, pc} +.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt +___ +} +{ +my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10))); +my $const = "r6"; # shared with _bsaes_encrypt8_alt +my $keysched = "sp"; + +$code.=<<___; +.extern AES_encrypt +.global bsaes_ctr32_encrypt_blocks +.type bsaes_ctr32_encrypt_blocks,%function +.align 5 +bsaes_ctr32_encrypt_blocks: + cmp $len, #8 @ use plain AES for + blo .Lctr_enc_short @ small sizes + + mov ip, sp + stmdb sp!, {r4-r10, lr} + VFP_ABI_PUSH + ldr $ctr, [ip] @ ctr is 1st arg on the stack + sub sp, sp, #0x10 @ scratch space to carry over the ctr + mov $fp, sp @ save sp + + ldr $rounds, [$key, #240] @ get # of rounds +#ifndef BSAES_ASM_EXTENDED_KEY + @ allocate the key schedule on the stack + sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key + add r12, #`128-32` @ size of bit-sliced key schedule + + @ populate the key schedule + mov r4, $key @ pass key + mov r5, $rounds @ pass # of rounds + mov sp, r12 @ sp is $keysched + bl _bsaes_key_convert + veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key + vstmia r12, {@XMM[7]} @ save last round key + + vld1.8 {@XMM[0]}, [$ctr] @ load counter + add $ctr, $const, #.LREVM0SR-.LM0 @ borrow $ctr + vldmia $keysched, {@XMM[4]} @ load round0 key +#else + ldr r12, [$key, #244] + eors r12, #1 + beq 0f + + @ populate the key schedule + str r12, [$key, #244] + mov r4, $key @ pass key + mov r5, $rounds @ pass # of rounds + add r12, $key, #248 @ pass key schedule + bl _bsaes_key_convert + veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key + vstmia r12, {@XMM[7]} @ save last round key + +.align 2 +0: add r12, $key, #248 + vld1.8 {@XMM[0]}, [$ctr] @ load counter + adrl $ctr, .LREVM0SR @ borrow $ctr + vldmia r12, {@XMM[4]} @ load round0 key + sub sp, #0x10 @ place for adjusted round0 key +#endif + + vmov.i32 @XMM[8],#1 @ compose 1<<96 + veor @XMM[9],@XMM[9],@XMM[9] + vrev32.8 @XMM[0],@XMM[0] + vext.8 @XMM[8],@XMM[9],@XMM[8],#4 + vrev32.8 @XMM[4],@XMM[4] + vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96 + vstmia $keysched, {@XMM[4]} @ save adjusted round0 key + b .Lctr_enc_loop + +.align 4 +.Lctr_enc_loop: + vadd.u32 @XMM[10], @XMM[8], @XMM[9] @ compose 3<<96 + vadd.u32 @XMM[1], @XMM[0], @XMM[8] @ +1 + vadd.u32 @XMM[2], @XMM[0], @XMM[9] @ +2 + vadd.u32 @XMM[3], @XMM[0], @XMM[10] @ +3 + vadd.u32 @XMM[4], @XMM[1], @XMM[10] + vadd.u32 @XMM[5], @XMM[2], @XMM[10] + vadd.u32 @XMM[6], @XMM[3], @XMM[10] + vadd.u32 @XMM[7], @XMM[4], @XMM[10] + vadd.u32 @XMM[10], @XMM[5], @XMM[10] @ next counter + + @ Borrow prologue from _bsaes_encrypt8 to use the opportunity + @ to flip byte order in 32-bit counter + + vldmia $keysched, {@XMM[9]} @ load round0 key +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, $keysched, #0x10 @ pass next round key +#else + add r4, $key, #`248+16` +#endif + vldmia $ctr, {@XMM[8]} @ .LREVM0SR + mov r5, $rounds @ pass rounds + vstmia $fp, {@XMM[10]} @ save next counter + sub $const, $ctr, #.LREVM0SR-.LSR @ pass constants + + bl _bsaes_encrypt8_alt + + subs $len, $len, #8 + blo .Lctr_enc_loop_done + + vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ load input + vld1.8 {@XMM[10]-@XMM[11]}, [$inp]! + veor @XMM[0], @XMM[8] + veor @XMM[1], @XMM[9] + vld1.8 {@XMM[12]-@XMM[13]}, [$inp]! + veor @XMM[4], @XMM[10] + veor @XMM[6], @XMM[11] + vld1.8 {@XMM[14]-@XMM[15]}, [$inp]! + veor @XMM[3], @XMM[12] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output + veor @XMM[7], @XMM[13] + veor @XMM[2], @XMM[14] + vst1.8 {@XMM[4]}, [$out]! + veor @XMM[5], @XMM[15] + vst1.8 {@XMM[6]}, [$out]! + vmov.i32 @XMM[8], #1 @ compose 1<<96 + vst1.8 {@XMM[3]}, [$out]! + veor @XMM[9], @XMM[9], @XMM[9] + vst1.8 {@XMM[7]}, [$out]! + vext.8 @XMM[8], @XMM[9], @XMM[8], #4 + vst1.8 {@XMM[2]}, [$out]! + vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96 + vst1.8 {@XMM[5]}, [$out]! + vldmia $fp, {@XMM[0]} @ load counter + + bne .Lctr_enc_loop + b .Lctr_enc_done + +.align 4 +.Lctr_enc_loop_done: + add $len, $len, #8 + vld1.8 {@XMM[8]}, [$inp]! @ load input + veor @XMM[0], @XMM[8] + vst1.8 {@XMM[0]}, [$out]! @ write output + cmp $len, #2 + blo .Lctr_enc_done + vld1.8 {@XMM[9]}, [$inp]! + veor @XMM[1], @XMM[9] + vst1.8 {@XMM[1]}, [$out]! + beq .Lctr_enc_done + vld1.8 {@XMM[10]}, [$inp]! + veor @XMM[4], @XMM[10] + vst1.8 {@XMM[4]}, [$out]! + cmp $len, #4 + blo .Lctr_enc_done + vld1.8 {@XMM[11]}, [$inp]! + veor @XMM[6], @XMM[11] + vst1.8 {@XMM[6]}, [$out]! + beq .Lctr_enc_done + vld1.8 {@XMM[12]}, [$inp]! + veor @XMM[3], @XMM[12] + vst1.8 {@XMM[3]}, [$out]! + cmp $len, #6 + blo .Lctr_enc_done + vld1.8 {@XMM[13]}, [$inp]! + veor @XMM[7], @XMM[13] + vst1.8 {@XMM[7]}, [$out]! + beq .Lctr_enc_done + vld1.8 {@XMM[14]}, [$inp] + veor @XMM[2], @XMM[14] + vst1.8 {@XMM[2]}, [$out]! + +.Lctr_enc_done: + vmov.i32 q0, #0 + vmov.i32 q1, #0 +#ifndef BSAES_ASM_EXTENDED_KEY +.Lctr_enc_bzero: @ wipe key schedule [if any] + vstmia $keysched!, {q0-q1} + cmp $keysched, $fp + bne .Lctr_enc_bzero +#else + vstmia $keysched, {q0-q1} +#endif + + mov sp, $fp + add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb + VFP_ABI_POP + ldmia sp!, {r4-r10, pc} @ return + +.align 4 +.Lctr_enc_short: + ldr ip, [sp] @ ctr pointer is passed on stack + stmdb sp!, {r4-r8, lr} + + mov r4, $inp @ copy arguments + mov r5, $out + mov r6, $len + mov r7, $key + ldr r8, [ip, #12] @ load counter LSW + vld1.8 {@XMM[1]}, [ip] @ load whole counter value +#ifdef __ARMEL__ + rev r8, r8 +#endif + sub sp, sp, #0x10 + vst1.8 {@XMM[1]}, [sp,:64] @ copy counter value + sub sp, sp, #0x10 + +.Lctr_enc_short_loop: + add r0, sp, #0x10 @ input counter value + mov r1, sp @ output on the stack + mov r2, r7 @ key + + bl AES_encrypt + + vld1.8 {@XMM[0]}, [r4]! @ load input + vld1.8 {@XMM[1]}, [sp,:64] @ load encrypted counter + add r8, r8, #1 +#ifdef __ARMEL__ + rev r0, r8 + str r0, [sp, #0x1c] @ next counter value +#else + str r8, [sp, #0x1c] @ next counter value +#endif + veor @XMM[0],@XMM[0],@XMM[1] + vst1.8 {@XMM[0]}, [r5]! @ store output + subs r6, r6, #1 + bne .Lctr_enc_short_loop + + vmov.i32 q0, #0 + vmov.i32 q1, #0 + vstmia sp!, {q0-q1} + + ldmia sp!, {r4-r8, pc} +.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks +___ +} +{ +###################################################################### +# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len, +# const AES_KEY *key1, const AES_KEY *key2, +# const unsigned char iv[16]); +# +my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3))); +my $const="r6"; # returned by _bsaes_key_convert +my $twmask=@XMM[5]; +my @T=@XMM[6..7]; + +$code.=<<___; +.globl bsaes_xts_encrypt +.type bsaes_xts_encrypt,%function +.align 4 +bsaes_xts_encrypt: + mov ip, sp + stmdb sp!, {r4-r10, lr} @ 0x20 + VFP_ABI_PUSH + mov r6, sp @ future $fp + + mov $inp, r0 + mov $out, r1 + mov $len, r2 + mov $key, r3 + + sub r0, sp, #0x10 @ 0x10 + bic r0, #0xf @ align at 16 bytes + mov sp, r0 + +#ifdef XTS_CHAIN_TWEAK + ldr r0, [ip] @ pointer to input tweak +#else + @ generate initial tweak + ldr r0, [ip, #4] @ iv[] + mov r1, sp + ldr r2, [ip, #0] @ key2 + bl AES_encrypt + mov r0,sp @ pointer to initial tweak +#endif + + ldr $rounds, [$key, #240] @ get # of rounds + mov $fp, r6 +#ifndef BSAES_ASM_EXTENDED_KEY + @ allocate the key schedule on the stack + sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key + @ add r12, #`128-32` @ size of bit-sliced key schedule + sub r12, #`32+16` @ place for tweak[9] + + @ populate the key schedule + mov r4, $key @ pass key + mov r5, $rounds @ pass # of rounds + mov sp, r12 + add r12, #0x90 @ pass key schedule + bl _bsaes_key_convert + veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key + vstmia r12, {@XMM[7]} @ save last round key +#else + ldr r12, [$key, #244] + eors r12, #1 + beq 0f + + str r12, [$key, #244] + mov r4, $key @ pass key + mov r5, $rounds @ pass # of rounds + add r12, $key, #248 @ pass key schedule + bl _bsaes_key_convert + veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key + vstmia r12, {@XMM[7]} + +.align 2 +0: sub sp, #0x90 @ place for tweak[9] +#endif + + vld1.8 {@XMM[8]}, [r0] @ initial tweak + adr $magic, .Lxts_magic + + subs $len, #0x80 + blo .Lxts_enc_short + b .Lxts_enc_loop + +.align 4 +.Lxts_enc_loop: + vldmia $magic, {$twmask} @ load XTS magic + vshr.s64 @T[0], @XMM[8], #63 + mov r0, sp + vand @T[0], @T[0], $twmask +___ +for($i=9;$i<16;$i++) { +$code.=<<___; + vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1] + vst1.64 {@XMM[$i-1]}, [r0,:128]! + vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")` + vshr.s64 @T[1], @XMM[$i], #63 + veor @XMM[$i], @XMM[$i], @T[0] + vand @T[1], @T[1], $twmask +___ + @T=reverse(@T); + +$code.=<<___ if ($i>=10); + vld1.8 {@XMM[$i-10]}, [$inp]! +___ +$code.=<<___ if ($i>=11); + veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3] +___ +} +$code.=<<___; + vadd.u64 @XMM[8], @XMM[15], @XMM[15] + vst1.64 {@XMM[15]}, [r0,:128]! + vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")` + veor @XMM[8], @XMM[8], @T[0] + vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak + + vld1.8 {@XMM[6]-@XMM[7]}, [$inp]! + veor @XMM[5], @XMM[5], @XMM[13] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[6], @XMM[6], @XMM[14] + mov r5, $rounds @ pass rounds + veor @XMM[7], @XMM[7], @XMM[15] + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]! + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[4], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[6], @XMM[11] + vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]! + veor @XMM[10], @XMM[3], @XMM[12] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + veor @XMM[11], @XMM[7], @XMM[13] + veor @XMM[12], @XMM[2], @XMM[14] + vst1.8 {@XMM[10]-@XMM[11]}, [$out]! + veor @XMM[13], @XMM[5], @XMM[15] + vst1.8 {@XMM[12]-@XMM[13]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + + subs $len, #0x80 + bpl .Lxts_enc_loop + +.Lxts_enc_short: + adds $len, #0x70 + bmi .Lxts_enc_done + + vldmia $magic, {$twmask} @ load XTS magic + vshr.s64 @T[0], @XMM[8], #63 + mov r0, sp + vand @T[0], @T[0], $twmask +___ +for($i=9;$i<16;$i++) { +$code.=<<___; + vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1] + vst1.64 {@XMM[$i-1]}, [r0,:128]! + vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")` + vshr.s64 @T[1], @XMM[$i], #63 + veor @XMM[$i], @XMM[$i], @T[0] + vand @T[1], @T[1], $twmask +___ + @T=reverse(@T); + +$code.=<<___ if ($i>=10); + vld1.8 {@XMM[$i-10]}, [$inp]! + subs $len, #0x10 + bmi .Lxts_enc_`$i-9` +___ +$code.=<<___ if ($i>=11); + veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3] +___ +} +$code.=<<___; + sub $len, #0x10 + vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak + + vld1.8 {@XMM[6]}, [$inp]! + veor @XMM[5], @XMM[5], @XMM[13] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[6], @XMM[6], @XMM[14] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]! + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[4], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[6], @XMM[11] + vld1.64 {@XMM[14]}, [r0,:128]! + veor @XMM[10], @XMM[3], @XMM[12] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + veor @XMM[11], @XMM[7], @XMM[13] + veor @XMM[12], @XMM[2], @XMM[14] + vst1.8 {@XMM[10]-@XMM[11]}, [$out]! + vst1.8 {@XMM[12]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_6: + vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak + + veor @XMM[4], @XMM[4], @XMM[12] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[5], @XMM[5], @XMM[13] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]! + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[4], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[6], @XMM[11] + veor @XMM[10], @XMM[3], @XMM[12] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + veor @XMM[11], @XMM[7], @XMM[13] + vst1.8 {@XMM[10]-@XMM[11]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_enc_done + +@ put this in range for both ARM and Thumb mode adr instructions +.align 5 +.Lxts_magic: + .quad 1, 0x87 + +.align 5 +.Lxts_enc_5: + vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak + + veor @XMM[3], @XMM[3], @XMM[11] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[4], @XMM[4], @XMM[12] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + vld1.64 {@XMM[12]}, [r0,:128]! + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[4], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[6], @XMM[11] + veor @XMM[10], @XMM[3], @XMM[12] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + vst1.8 {@XMM[10]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_4: + vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak + + veor @XMM[2], @XMM[2], @XMM[10] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[3], @XMM[3], @XMM[11] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[4], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[6], @XMM[11] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_3: + vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak + + veor @XMM[1], @XMM[1], @XMM[9] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[2], @XMM[2], @XMM[10] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]! + vld1.64 {@XMM[10]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[4], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + vst1.8 {@XMM[8]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_2: + vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak + + veor @XMM[0], @XMM[0], @XMM[8] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[1], @XMM[1], @XMM[9] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_encrypt8 + + vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + veor @XMM[1], @XMM[1], @XMM[ 9] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_enc_done +.align 4 +.Lxts_enc_1: + mov r0, sp + veor @XMM[0], @XMM[8] + mov r1, sp + vst1.8 {@XMM[0]}, [sp,:128] + mov r2, $key + mov r4, $fp @ preserve fp + + bl AES_encrypt + + vld1.8 {@XMM[0]}, [sp,:128] + veor @XMM[0], @XMM[0], @XMM[8] + vst1.8 {@XMM[0]}, [$out]! + mov $fp, r4 + + vmov @XMM[8], @XMM[9] @ next round tweak + +.Lxts_enc_done: +#ifndef XTS_CHAIN_TWEAK + adds $len, #0x10 + beq .Lxts_enc_ret + sub r6, $out, #0x10 + +.Lxts_enc_steal: + ldrb r0, [$inp], #1 + ldrb r1, [$out, #-0x10] + strb r0, [$out, #-0x10] + strb r1, [$out], #1 + + subs $len, #1 + bhi .Lxts_enc_steal + + vld1.8 {@XMM[0]}, [r6] + mov r0, sp + veor @XMM[0], @XMM[0], @XMM[8] + mov r1, sp + vst1.8 {@XMM[0]}, [sp,:128] + mov r2, $key + mov r4, $fp @ preserve fp + + bl AES_encrypt + + vld1.8 {@XMM[0]}, [sp,:128] + veor @XMM[0], @XMM[0], @XMM[8] + vst1.8 {@XMM[0]}, [r6] + mov $fp, r4 +#endif + +.Lxts_enc_ret: + bic r0, $fp, #0xf + vmov.i32 q0, #0 + vmov.i32 q1, #0 +#ifdef XTS_CHAIN_TWEAK + ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak +#endif +.Lxts_enc_bzero: @ wipe key schedule [if any] + vstmia sp!, {q0-q1} + cmp sp, r0 + bne .Lxts_enc_bzero + + mov sp, $fp +#ifdef XTS_CHAIN_TWEAK + vst1.8 {@XMM[8]}, [r1] +#endif + VFP_ABI_POP + ldmia sp!, {r4-r10, pc} @ return + +.size bsaes_xts_encrypt,.-bsaes_xts_encrypt + +.globl bsaes_xts_decrypt +.type bsaes_xts_decrypt,%function +.align 4 +bsaes_xts_decrypt: + mov ip, sp + stmdb sp!, {r4-r10, lr} @ 0x20 + VFP_ABI_PUSH + mov r6, sp @ future $fp + + mov $inp, r0 + mov $out, r1 + mov $len, r2 + mov $key, r3 + + sub r0, sp, #0x10 @ 0x10 + bic r0, #0xf @ align at 16 bytes + mov sp, r0 + +#ifdef XTS_CHAIN_TWEAK + ldr r0, [ip] @ pointer to input tweak +#else + @ generate initial tweak + ldr r0, [ip, #4] @ iv[] + mov r1, sp + ldr r2, [ip, #0] @ key2 + bl AES_encrypt + mov r0, sp @ pointer to initial tweak +#endif + + ldr $rounds, [$key, #240] @ get # of rounds + mov $fp, r6 +#ifndef BSAES_ASM_EXTENDED_KEY + @ allocate the key schedule on the stack + sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key + @ add r12, #`128-32` @ size of bit-sliced key schedule + sub r12, #`32+16` @ place for tweak[9] + + @ populate the key schedule + mov r4, $key @ pass key + mov r5, $rounds @ pass # of rounds + mov sp, r12 + add r12, #0x90 @ pass key schedule + bl _bsaes_key_convert + add r4, sp, #0x90 + vldmia r4, {@XMM[6]} + vstmia r12, {@XMM[15]} @ save last round key + veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key + vstmia r4, {@XMM[7]} +#else + ldr r12, [$key, #244] + eors r12, #1 + beq 0f + + str r12, [$key, #244] + mov r4, $key @ pass key + mov r5, $rounds @ pass # of rounds + add r12, $key, #248 @ pass key schedule + bl _bsaes_key_convert + add r4, $key, #248 + vldmia r4, {@XMM[6]} + vstmia r12, {@XMM[15]} @ save last round key + veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key + vstmia r4, {@XMM[7]} + +.align 2 +0: sub sp, #0x90 @ place for tweak[9] +#endif + vld1.8 {@XMM[8]}, [r0] @ initial tweak + adr $magic, .Lxts_magic + + tst $len, #0xf @ if not multiple of 16 + it ne @ Thumb2 thing, sanity check in ARM + subne $len, #0x10 @ subtract another 16 bytes + subs $len, #0x80 + + blo .Lxts_dec_short + b .Lxts_dec_loop + +.align 4 +.Lxts_dec_loop: + vldmia $magic, {$twmask} @ load XTS magic + vshr.s64 @T[0], @XMM[8], #63 + mov r0, sp + vand @T[0], @T[0], $twmask +___ +for($i=9;$i<16;$i++) { +$code.=<<___; + vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1] + vst1.64 {@XMM[$i-1]}, [r0,:128]! + vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")` + vshr.s64 @T[1], @XMM[$i], #63 + veor @XMM[$i], @XMM[$i], @T[0] + vand @T[1], @T[1], $twmask +___ + @T=reverse(@T); + +$code.=<<___ if ($i>=10); + vld1.8 {@XMM[$i-10]}, [$inp]! +___ +$code.=<<___ if ($i>=11); + veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3] +___ +} +$code.=<<___; + vadd.u64 @XMM[8], @XMM[15], @XMM[15] + vst1.64 {@XMM[15]}, [r0,:128]! + vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")` + veor @XMM[8], @XMM[8], @T[0] + vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak + + vld1.8 {@XMM[6]-@XMM[7]}, [$inp]! + veor @XMM[5], @XMM[5], @XMM[13] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[6], @XMM[6], @XMM[14] + mov r5, $rounds @ pass rounds + veor @XMM[7], @XMM[7], @XMM[15] + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]! + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[6], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[4], @XMM[11] + vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]! + veor @XMM[10], @XMM[2], @XMM[12] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + veor @XMM[11], @XMM[7], @XMM[13] + veor @XMM[12], @XMM[3], @XMM[14] + vst1.8 {@XMM[10]-@XMM[11]}, [$out]! + veor @XMM[13], @XMM[5], @XMM[15] + vst1.8 {@XMM[12]-@XMM[13]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + + subs $len, #0x80 + bpl .Lxts_dec_loop + +.Lxts_dec_short: + adds $len, #0x70 + bmi .Lxts_dec_done + + vldmia $magic, {$twmask} @ load XTS magic + vshr.s64 @T[0], @XMM[8], #63 + mov r0, sp + vand @T[0], @T[0], $twmask +___ +for($i=9;$i<16;$i++) { +$code.=<<___; + vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1] + vst1.64 {@XMM[$i-1]}, [r0,:128]! + vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")` + vshr.s64 @T[1], @XMM[$i], #63 + veor @XMM[$i], @XMM[$i], @T[0] + vand @T[1], @T[1], $twmask +___ + @T=reverse(@T); + +$code.=<<___ if ($i>=10); + vld1.8 {@XMM[$i-10]}, [$inp]! + subs $len, #0x10 + bmi .Lxts_dec_`$i-9` +___ +$code.=<<___ if ($i>=11); + veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3] +___ +} +$code.=<<___; + sub $len, #0x10 + vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak + + vld1.8 {@XMM[6]}, [$inp]! + veor @XMM[5], @XMM[5], @XMM[13] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[6], @XMM[6], @XMM[14] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]! + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[6], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[4], @XMM[11] + vld1.64 {@XMM[14]}, [r0,:128]! + veor @XMM[10], @XMM[2], @XMM[12] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + veor @XMM[11], @XMM[7], @XMM[13] + veor @XMM[12], @XMM[3], @XMM[14] + vst1.8 {@XMM[10]-@XMM[11]}, [$out]! + vst1.8 {@XMM[12]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_6: + vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak + + veor @XMM[4], @XMM[4], @XMM[12] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[5], @XMM[5], @XMM[13] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]! + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[6], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[4], @XMM[11] + veor @XMM[10], @XMM[2], @XMM[12] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + veor @XMM[11], @XMM[7], @XMM[13] + vst1.8 {@XMM[10]-@XMM[11]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_5: + vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak + + veor @XMM[3], @XMM[3], @XMM[11] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[4], @XMM[4], @XMM[12] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + vld1.64 {@XMM[12]}, [r0,:128]! + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[6], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[4], @XMM[11] + veor @XMM[10], @XMM[2], @XMM[12] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + vst1.8 {@XMM[10]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_4: + vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak + + veor @XMM[2], @XMM[2], @XMM[10] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[3], @XMM[3], @XMM[11] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]! + vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[6], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + veor @XMM[9], @XMM[4], @XMM[11] + vst1.8 {@XMM[8]-@XMM[9]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_3: + vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak + + veor @XMM[1], @XMM[1], @XMM[9] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[2], @XMM[2], @XMM[10] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]! + vld1.64 {@XMM[10]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + veor @XMM[1], @XMM[1], @XMM[ 9] + veor @XMM[8], @XMM[6], @XMM[10] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + vst1.8 {@XMM[8]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_2: + vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak + + veor @XMM[0], @XMM[0], @XMM[8] +#ifndef BSAES_ASM_EXTENDED_KEY + add r4, sp, #0x90 @ pass key schedule +#else + add r4, $key, #248 @ pass key schedule +#endif + veor @XMM[1], @XMM[1], @XMM[9] + mov r5, $rounds @ pass rounds + mov r0, sp + + bl _bsaes_decrypt8 + + vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]! + veor @XMM[0], @XMM[0], @XMM[ 8] + veor @XMM[1], @XMM[1], @XMM[ 9] + vst1.8 {@XMM[0]-@XMM[1]}, [$out]! + + vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak + b .Lxts_dec_done +.align 4 +.Lxts_dec_1: + mov r0, sp + veor @XMM[0], @XMM[8] + mov r1, sp + vst1.8 {@XMM[0]}, [sp,:128] + mov r2, $key + mov r4, $fp @ preserve fp + mov r5, $magic @ preserve magic + + bl AES_decrypt + + vld1.8 {@XMM[0]}, [sp,:128] + veor @XMM[0], @XMM[0], @XMM[8] + vst1.8 {@XMM[0]}, [$out]! + mov $fp, r4 + mov $magic, r5 + + vmov @XMM[8], @XMM[9] @ next round tweak + +.Lxts_dec_done: +#ifndef XTS_CHAIN_TWEAK + adds $len, #0x10 + beq .Lxts_dec_ret + + @ calculate one round of extra tweak for the stolen ciphertext + vldmia $magic, {$twmask} + vshr.s64 @XMM[6], @XMM[8], #63 + vand @XMM[6], @XMM[6], $twmask + vadd.u64 @XMM[9], @XMM[8], @XMM[8] + vswp `&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")` + veor @XMM[9], @XMM[9], @XMM[6] + + @ perform the final decryption with the last tweak value + vld1.8 {@XMM[0]}, [$inp]! + mov r0, sp + veor @XMM[0], @XMM[0], @XMM[9] + mov r1, sp + vst1.8 {@XMM[0]}, [sp,:128] + mov r2, $key + mov r4, $fp @ preserve fp + + bl AES_decrypt + + vld1.8 {@XMM[0]}, [sp,:128] + veor @XMM[0], @XMM[0], @XMM[9] + vst1.8 {@XMM[0]}, [$out] + + mov r6, $out +.Lxts_dec_steal: + ldrb r1, [$out] + ldrb r0, [$inp], #1 + strb r1, [$out, #0x10] + strb r0, [$out], #1 + + subs $len, #1 + bhi .Lxts_dec_steal + + vld1.8 {@XMM[0]}, [r6] + mov r0, sp + veor @XMM[0], @XMM[8] + mov r1, sp + vst1.8 {@XMM[0]}, [sp,:128] + mov r2, $key + + bl AES_decrypt + + vld1.8 {@XMM[0]}, [sp,:128] + veor @XMM[0], @XMM[0], @XMM[8] + vst1.8 {@XMM[0]}, [r6] + mov $fp, r4 +#endif + +.Lxts_dec_ret: + bic r0, $fp, #0xf + vmov.i32 q0, #0 + vmov.i32 q1, #0 +#ifdef XTS_CHAIN_TWEAK + ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak +#endif +.Lxts_dec_bzero: @ wipe key schedule [if any] + vstmia sp!, {q0-q1} + cmp sp, r0 + bne .Lxts_dec_bzero + + mov sp, $fp +#ifdef XTS_CHAIN_TWEAK + vst1.8 {@XMM[8]}, [r1] +#endif + VFP_ABI_POP + ldmia sp!, {r4-r10, pc} @ return + +.size bsaes_xts_decrypt,.-bsaes_xts_decrypt +___ +} +$code.=<<___; +#endif +___ + +$code =~ s/\`([^\`]*)\`/eval($1)/gem; + +open SELF,$0; +while(<SELF>) { + next if (/^#!/); + last if (!s/^#/@/ and !/^$/); + print; +} +close SELF; + +print $code; + +close STDOUT; diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 1a7024b4135..c38b58c8020 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -24,6 +24,7 @@ generic-y += sembuf.h generic-y += serial.h generic-y += shmbuf.h generic-y += siginfo.h +generic-y += simd.h generic-y += sizes.h generic-y += socket.h generic-y += sockios.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index fcc1b5bf697..5c228516057 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -53,6 +53,13 @@ #define put_byte_3 lsl #0 #endif +/* Select code for any configuration running in BE8 mode */ +#ifdef CONFIG_CPU_ENDIAN_BE8 +#define ARM_BE8(code...) code +#else +#define ARM_BE8(code...) +#endif + /* * Data preload for architectures that support it */ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index da1c77d3932..62d2cb53b06 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -12,6 +12,7 @@ #define __ASM_ARM_ATOMIC_H #include <linux/compiler.h> +#include <linux/prefetch.h> #include <linux/types.h> #include <linux/irqflags.h> #include <asm/barrier.h> @@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v) unsigned long tmp; int result; + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_add\n" "1: ldrex %0, [%3]\n" " add %0, %0, %4\n" @@ -79,6 +81,7 @@ static inline void atomic_sub(int i, atomic_t *v) unsigned long tmp; int result; + prefetchw(&v->counter); __asm__ __volatile__("@ atomic_sub\n" "1: ldrex %0, [%3]\n" " sub %0, %0, %4\n" @@ -114,7 +117,8 @@ static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) { - unsigned long oldval, res; + int oldval; + unsigned long res; smp_mb(); @@ -134,21 +138,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) return oldval; } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) -{ - unsigned long tmp, tmp2; - - __asm__ __volatile__("@ atomic_clear_mask\n" -"1: ldrex %0, [%3]\n" -" bic %0, %0, %4\n" -" strex %1, %0, [%3]\n" -" teq %1, #0\n" -" bne 1b" - : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) - : "r" (addr), "Ir" (mask) - : "cc"); -} - #else /* ARM_ARCH_6 */ #ifdef CONFIG_SMP @@ -197,15 +186,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) return ret; } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) -{ - unsigned long flags; - - raw_local_irq_save(flags); - *addr &= ~mask; - raw_local_irq_restore(flags); -} - #endif /* __LINUX_ARM_ARCH__ */ #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) @@ -238,15 +218,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) #ifndef CONFIG_GENERIC_ATOMIC64 typedef struct { - u64 __aligned(8) counter; + long long counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_ARM_LPAE -static inline u64 atomic64_read(const atomic64_t *v) +static inline long long atomic64_read(const atomic64_t *v) { - u64 result; + long long result; __asm__ __volatile__("@ atomic64_read\n" " ldrd %0, %H0, [%1]" @@ -257,7 +237,7 @@ static inline u64 atomic64_read(const atomic64_t *v) return result; } -static inline void atomic64_set(atomic64_t *v, u64 i) +static inline void atomic64_set(atomic64_t *v, long long i) { __asm__ __volatile__("@ atomic64_set\n" " strd %2, %H2, [%1]" @@ -266,9 +246,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i) ); } #else -static inline u64 atomic64_read(const atomic64_t *v) +static inline long long atomic64_read(const atomic64_t *v) { - u64 result; + long long result; __asm__ __volatile__("@ atomic64_read\n" " ldrexd %0, %H0, [%1]" @@ -279,10 +259,11 @@ static inline u64 atomic64_read(const atomic64_t *v) return result; } -static inline void atomic64_set(atomic64_t *v, u64 i) +static inline void atomic64_set(atomic64_t *v, long long i) { - u64 tmp; + long long tmp; + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_set\n" "1: ldrexd %0, %H0, [%2]\n" " strexd %0, %3, %H3, [%2]\n" @@ -294,15 +275,16 @@ static inline void atomic64_set(atomic64_t *v, u64 i) } #endif -static inline void atomic64_add(u64 i, atomic64_t *v) +static inline void atomic64_add(long long i, atomic64_t *v) { - u64 result; + long long result; unsigned long tmp; + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_add\n" "1: ldrexd %0, %H0, [%3]\n" -" adds %0, %0, %4\n" -" adc %H0, %H0, %H4\n" +" adds %Q0, %Q0, %Q4\n" +" adc %R0, %R0, %R4\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" @@ -311,17 +293,17 @@ static inline void atomic64_add(u64 i, atomic64_t *v) : "cc"); } -static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +static inline long long atomic64_add_return(long long i, atomic64_t *v) { - u64 result; + long long result; unsigned long tmp; smp_mb(); __asm__ __volatile__("@ atomic64_add_return\n" "1: ldrexd %0, %H0, [%3]\n" -" adds %0, %0, %4\n" -" adc %H0, %H0, %H4\n" +" adds %Q0, %Q0, %Q4\n" +" adc %R0, %R0, %R4\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" @@ -334,15 +316,16 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) return result; } -static inline void atomic64_sub(u64 i, atomic64_t *v) +static inline void atomic64_sub(long long i, atomic64_t *v) { - u64 result; + long long result; unsigned long tmp; + prefetchw(&v->counter); __asm__ __volatile__("@ atomic64_sub\n" "1: ldrexd %0, %H0, [%3]\n" -" subs %0, %0, %4\n" -" sbc %H0, %H0, %H4\n" +" subs %Q0, %Q0, %Q4\n" +" sbc %R0, %R0, %R4\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" @@ -351,17 +334,17 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) : "cc"); } -static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) +static inline long long atomic64_sub_return(long long i, atomic64_t *v) { - u64 result; + long long result; unsigned long tmp; smp_mb(); __asm__ __volatile__("@ atomic64_sub_return\n" "1: ldrexd %0, %H0, [%3]\n" -" subs %0, %0, %4\n" -" sbc %H0, %H0, %H4\n" +" subs %Q0, %Q0, %Q4\n" +" sbc %R0, %R0, %R4\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" " bne 1b" @@ -374,9 +357,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) return result; } -static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) +static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, + long long new) { - u64 oldval; + long long oldval; unsigned long res; smp_mb(); @@ -398,9 +382,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) return oldval; } -static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) +static inline long long atomic64_xchg(atomic64_t *ptr, long long new) { - u64 result; + long long result; unsigned long tmp; smp_mb(); @@ -419,18 +403,18 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) return result; } -static inline u64 atomic64_dec_if_positive(atomic64_t *v) +static inline long long atomic64_dec_if_positive(atomic64_t *v) { - u64 result; + long long result; unsigned long tmp; smp_mb(); __asm__ __volatile__("@ atomic64_dec_if_positive\n" "1: ldrexd %0, %H0, [%3]\n" -" subs %0, %0, #1\n" -" sbc %H0, %H0, #0\n" -" teq %H0, #0\n" +" subs %Q0, %Q0, #1\n" +" sbc %R0, %R0, #0\n" +" teq %R0, #0\n" " bmi 2f\n" " strexd %1, %0, %H0, [%3]\n" " teq %1, #0\n" @@ -445,9 +429,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v) return result; } -static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) { - u64 val; + long long val; unsigned long tmp; int ret = 1; @@ -459,8 +443,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) " teqeq %H0, %H5\n" " moveq %1, #0\n" " beq 2f\n" -" adds %0, %0, %6\n" -" adc %H0, %H0, %H6\n" +" adds %Q0, %Q0, %Q6\n" +" adc %R0, %R0, %R6\n" " strexd %2, %0, %H0, [%4]\n" " teq %2, #0\n" " bne 1b\n" diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h new file mode 100644 index 00000000000..1714800fa11 --- /dev/null +++ b/arch/arm/include/asm/bL_switcher.h @@ -0,0 +1,77 @@ +/* + * arch/arm/include/asm/bL_switcher.h + * + * Created by: Nicolas Pitre, April 2012 + * Copyright: (C) 2012-2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef ASM_BL_SWITCHER_H +#define ASM_BL_SWITCHER_H + +#include <linux/compiler.h> +#include <linux/types.h> + +typedef void (*bL_switch_completion_handler)(void *cookie); + +int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id, + bL_switch_completion_handler completer, + void *completer_cookie); +static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id) +{ + return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL); +} + +/* + * Register here to be notified about runtime enabling/disabling of + * the switcher. + * + * The notifier chain is called with the switcher activation lock held: + * the switcher will not be enabled or disabled during callbacks. + * Callbacks must not call bL_switcher_{get,put}_enabled(). + */ +#define BL_NOTIFY_PRE_ENABLE 0 +#define BL_NOTIFY_POST_ENABLE 1 +#define BL_NOTIFY_PRE_DISABLE 2 +#define BL_NOTIFY_POST_DISABLE 3 + +#ifdef CONFIG_BL_SWITCHER + +int bL_switcher_register_notifier(struct notifier_block *nb); +int bL_switcher_unregister_notifier(struct notifier_block *nb); + +/* + * Use these functions to temporarily prevent enabling/disabling of + * the switcher. + * bL_switcher_get_enabled() returns true if the switcher is currently + * enabled. Each call to bL_switcher_get_enabled() must be followed + * by a call to bL_switcher_put_enabled(). These functions are not + * recursive. + */ +bool bL_switcher_get_enabled(void); +void bL_switcher_put_enabled(void); + +int bL_switcher_trace_trigger(void); +int bL_switcher_get_logical_index(u32 mpidr); + +#else +static inline int bL_switcher_register_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int bL_switcher_unregister_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline bool bL_switcher_get_enabled(void) { return false; } +static inline void bL_switcher_put_enabled(void) { } +static inline int bL_switcher_trace_trigger(void) { return 0; } +static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; } +#endif /* CONFIG_BL_SWITCHER */ + +#endif diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 7af5c6c3653..b274bde2490 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -2,6 +2,8 @@ #define _ASMARM_BUG_H #include <linux/linkage.h> +#include <linux/types.h> +#include <asm/opcodes.h> #ifdef CONFIG_BUG @@ -12,10 +14,10 @@ */ #ifdef CONFIG_THUMB2_KERNEL #define BUG_INSTR_VALUE 0xde02 -#define BUG_INSTR_TYPE ".hword " +#define BUG_INSTR(__value) __inst_thumb16(__value) #else #define BUG_INSTR_VALUE 0xe7f001f2 -#define BUG_INSTR_TYPE ".word " +#define BUG_INSTR(__value) __inst_arm(__value) #endif @@ -33,7 +35,7 @@ #define __BUG(__file, __line, __value) \ do { \ - asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \ + asm volatile("1:\t" BUG_INSTR(__value) "\n" \ ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ "2:\t.asciz " #__file "\n" \ ".popsection\n" \ @@ -48,7 +50,7 @@ do { \ #define __BUG(__file, __line, __value) \ do { \ - asm volatile(BUG_INSTR_TYPE #__value); \ + asm volatile(BUG_INSTR(__value) "\n"); \ unreachable(); \ } while (0) #endif /* CONFIG_DEBUG_BUGVERBOSE */ diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 15f2d5bf887..ee753f1749c 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) +/* + * Disabling cache access for one CPU in an ARMv7 SMP system is tricky. + * To do so we must: + * + * - Clear the SCTLR.C bit to prevent further cache allocations + * - Flush the desired level of cache + * - Clear the ACTLR "SMP" bit to disable local coherency + * + * ... and so without any intervening memory access in between those steps, + * not even to the stack. + * + * WARNING -- After this has been called: + * + * - No ldrex/strex (and similar) instructions must be used. + * - The CPU is obviously no longer coherent with the other CPUs. + * - This is unlikely to work as expected if Linux is running non-secure. + * + * Note: + * + * - This is known to apply to several ARMv7 processor implementations, + * however some exceptions may exist. Caveat emptor. + * + * - The clobber list is dictated by the call to v7_flush_dcache_*. + * fp is preserved to the stack explicitly prior disabling the cache + * since adding it to the clobber list is incompatible with having + * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering + * trampoline are inserted by the linker and to keep sp 64-bit aligned. + */ +#define v7_exit_coherency_flush(level) \ + asm volatile( \ + "stmfd sp!, {fp, ip} \n\t" \ + "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ + "bic r0, r0, #"__stringify(CR_C)" \n\t" \ + "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ + "isb \n\t" \ + "bl v7_flush_dcache_"__stringify(level)" \n\t" \ + "clrex \n\t" \ + "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ + "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ + "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \ + "isb \n\t" \ + "dsb \n\t" \ + "ldmfd sp!, {fp, ip}" \ + : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ + "r9","r10","lr","memory" ) + #endif diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 4f009c10540..df2fbba7efc 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, return ret; } +static inline unsigned long long __cmpxchg64(unsigned long long *ptr, + unsigned long long old, + unsigned long long new) +{ + unsigned long long oldval; + unsigned long res; + + __asm__ __volatile__( +"1: ldrexd %1, %H1, [%3]\n" +" teq %1, %4\n" +" teqeq %H1, %H4\n" +" bne 2f\n" +" strexd %0, %5, %H5, [%3]\n" +" teq %0, #0\n" +" bne 1b\n" +"2:" + : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) + : "r" (ptr), "r" (old), "r" (new) + : "cc"); + + return oldval; +} + +static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, + unsigned long long old, + unsigned long long new) +{ + unsigned long long ret; + + smp_mb(); + ret = __cmpxchg64(ptr, old, new); + smp_mb(); + + return ret; +} + #define cmpxchg_local(ptr,o,n) \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ (unsigned long)(o), \ @@ -230,18 +266,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, sizeof(*(ptr)))) #define cmpxchg64(ptr, o, n) \ - ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ - atomic64_t, \ - counter), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) - -#define cmpxchg64_local(ptr, o, n) \ - ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ - local64_t, \ - a), \ - (unsigned long long)(o), \ - (unsigned long long)(n))) + ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#define cmpxchg64_relaxed(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ + (unsigned long long)(o), \ + (unsigned long long)(n))) + +#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) #endif /* __LINUX_ARM_ARCH__ >= 6 */ diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 9672e978d50..acdde76b39b 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -10,6 +10,7 @@ #define CPUID_TLBTYPE 3 #define CPUID_MPUIR 4 #define CPUID_MPIDR 5 +#define CPUID_REVIDR 6 #ifdef CONFIG_CPU_V7M #define CPUID_EXT_PFR0 0x40 diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 5b579b95150..e701a4d9aa5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -11,17 +11,28 @@ #include <asm-generic/dma-coherent.h> #include <asm/memory.h> +#include <xen/xen.h> +#include <asm/xen/hypervisor.h> + #define DMA_ERROR_CODE (~0) extern struct dma_map_ops arm_dma_ops; extern struct dma_map_ops arm_coherent_dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { if (dev && dev->archdata.dma_ops) return dev->archdata.dma_ops; return &arm_dma_ops; } +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (xen_initial_domain()) + return xen_dma_ops; + else + return __generic_dma_ops(dev); +} + static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) { BUG_ON(!dev); @@ -64,6 +75,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) { return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); } + #else static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) { @@ -86,6 +98,46 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) } #endif +/* The ARM override for dma_max_pfn() */ +static inline unsigned long dma_max_pfn(struct device *dev) +{ + return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); +} +#define dma_max_pfn(dev) dma_max_pfn(dev) + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + unsigned int offset = paddr & ~PAGE_MASK; + return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +{ + unsigned int offset = dev_addr & ~PAGE_MASK; + return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; +} + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + u64 limit, mask; + + if (!dev->dma_mask) + return 0; + + mask = *dev->dma_mask; + + limit = (mask + 1) & ~mask; + if (limit && size > limit) + return 0; + + if ((addr | (addr + size - 1)) & ~mask) + return 0; + + return 1; +} + +static inline void dma_mark_clean(void *addr, size_t size) { } + /* * DMA errors are defined by all-bits-set in the DMA address. */ diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 2740c2a2df6..fe3ea776dc3 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -5,7 +5,7 @@ #include <linux/threads.h> #include <asm/irq.h> -#define NR_IPI 6 +#define NR_IPI 8 typedef struct { unsigned int __softirq_pending; diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h index 0cf7a6b842f..ad774f37c47 100644 --- a/arch/arm/include/asm/hardware/coresight.h +++ b/arch/arm/include/asm/hardware/coresight.h @@ -24,8 +24,8 @@ #define TRACER_TIMEOUT 10000 #define etm_writel(t, v, x) \ - (__raw_writel((v), (t)->etm_regs + (x))) -#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x))) + (writel_relaxed((v), (t)->etm_regs + (x))) +#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x))) /* CoreSight Management Registers */ #define CSMR_LOCKACCESS 0xfb0 @@ -142,8 +142,8 @@ #define ETBFF_TRIGFL BIT(10) #define etb_writel(t, v, x) \ - (__raw_writel((v), (t)->etb_regs + (x))) -#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x))) + (writel_relaxed((v), (t)->etb_regs + (x))) +#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x))) #define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) #define etm_unlock(t) \ diff --git a/arch/arm/include/asm/hardware/iop3xx-gpio.h b/arch/arm/include/asm/hardware/iop3xx-gpio.h deleted file mode 100644 index 9eda7dc92ad..00000000000 --- a/arch/arm/include/asm/hardware/iop3xx-gpio.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * arch/arm/include/asm/hardware/iop3xx-gpio.h - * - * IOP3xx GPIO wrappers - * - * Copyright (c) 2008 Arnaud Patard <arnaud.patard@rtp-net.org> - * Based on IXP4XX gpio.h file - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef __ASM_ARM_HARDWARE_IOP3XX_GPIO_H -#define __ASM_ARM_HARDWARE_IOP3XX_GPIO_H - -#include <mach/hardware.h> -#include <asm-generic/gpio.h> - -#define __ARM_GPIOLIB_COMPLEX - -#define IOP3XX_N_GPIOS 8 - -static inline int gpio_get_value(unsigned gpio) -{ - if (gpio > IOP3XX_N_GPIOS) - return __gpio_get_value(gpio); - - return gpio_line_get(gpio); -} - -static inline void gpio_set_value(unsigned gpio, int value) -{ - if (gpio > IOP3XX_N_GPIOS) { - __gpio_set_value(gpio, value); - return; - } - gpio_line_set(gpio, value); -} - -static inline int gpio_cansleep(unsigned gpio) -{ - if (gpio < IOP3XX_N_GPIOS) - return 0; - else - return __gpio_cansleep(gpio); -} - -/* - * The GPIOs are not generating any interrupt - * Note : manuals are not clear about this - */ -static inline int gpio_to_irq(int gpio) -{ - return -EINVAL; -} - -static inline int irq_to_gpio(int gpio) -{ - return -EINVAL; -} - -#endif - diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 423744bf18e..2594a95ff19 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -18,16 +18,9 @@ /* * IOP3XX GPIO handling */ -#define GPIO_IN 0 -#define GPIO_OUT 1 -#define GPIO_LOW 0 -#define GPIO_HIGH 1 #define IOP3XX_GPIO_LINE(x) (x) #ifndef __ASSEMBLY__ -extern void gpio_line_config(int line, int direction); -extern int gpio_line_get(int line); -extern void gpio_line_set(int line, int value); extern int init_atu; extern int iop3xx_get_init_atu(void); #endif @@ -168,11 +161,6 @@ extern int iop3xx_get_init_atu(void); /* PERCR0 DOESN'T EXIST - index from 1! */ #define IOP3XX_PERCR0 (volatile u32 *)IOP3XX_REG_ADDR(0x0710) -/* General Purpose I/O */ -#define IOP3XX_GPOE (volatile u32 *)IOP3XX_GPIO_REG(0x0000) -#define IOP3XX_GPID (volatile u32 *)IOP3XX_GPIO_REG(0x0004) -#define IOP3XX_GPOD (volatile u32 *)IOP3XX_GPIO_REG(0x0008) - /* Timers */ #define IOP3XX_TU_TMR0 (volatile u32 *)IOP3XX_TIMER_REG(0x0000) #define IOP3XX_TU_TMR1 (volatile u32 *)IOP3XX_TIMER_REG(0x0004) diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d070741b2b3..3c597c222ef 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -24,9 +24,11 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <linux/blk_types.h> #include <asm/byteorder.h> #include <asm/memory.h> #include <asm-generic/pci_iomap.h> +#include <xen/xen.h> /* * ISA I/O bus memory addresses are 1:1 with the physical address. @@ -372,6 +374,13 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr); #define BIOVEC_MERGEABLE(vec1, vec2) \ ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) +struct bio_vec; +extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, + const struct bio_vec *vec2); +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ + (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) + #ifdef CONFIG_MMU #define ARCH_HAS_VALID_PHYS_ADDR_RANGE extern int valid_phys_addr_range(phys_addr_t addr, size_t size); diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 48066ce9ea3..0a9d5dd9329 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h @@ -11,6 +11,7 @@ #define __ARM_KGDB_H__ #include <linux/ptrace.h> +#include <asm/opcodes.h> /* * GDB assumes that we're a user process being debugged, so @@ -41,7 +42,7 @@ static inline void arch_kgdb_breakpoint(void) { - asm(".word 0xe7ffdeff"); + asm(__inst_arm(0xe7ffdeff)); } extern void kgdb_handle_bus_error(void); diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index 64e96960de2..1d3153c7eb4 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -57,6 +57,7 @@ * TSC: Trap SMC * TSW: Trap cache operations by set/way * TWI: Trap WFI + * TWE: Trap WFE * TIDCP: Trap L2CTLR/L2ECTLR * BSU_IS: Upgrade barriers to the inner shareable domain * FB: Force broadcast of all maintainance operations @@ -67,7 +68,7 @@ */ #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ - HCR_SWIO | HCR_TIDCP) + HCR_TWE | HCR_SWIO | HCR_TIDCP) #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) /* System Control Register (SCTLR) bits */ @@ -95,12 +96,12 @@ #define TTBCR_IRGN1 (3 << 24) #define TTBCR_EPD1 (1 << 23) #define TTBCR_A1 (1 << 22) -#define TTBCR_T1SZ (3 << 16) +#define TTBCR_T1SZ (7 << 16) #define TTBCR_SH0 (3 << 12) #define TTBCR_ORGN0 (3 << 10) #define TTBCR_IRGN0 (3 << 8) #define TTBCR_EPD0 (1 << 7) -#define TTBCR_T0SZ 3 +#define TTBCR_T0SZ (7 << 0) #define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) /* Hyp System Trap Register */ @@ -208,6 +209,8 @@ #define HSR_EC_DABT (0x24) #define HSR_EC_DABT_HYP (0x25) +#define HSR_WFI_IS_WFE (1U << 0) + #define HSR_HVC_IMM_MASK ((1UL << 16) - 1) #define HSR_DABT_S1PTW (1U << 7) diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index a2f43ddcc30..661da11f76f 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -39,7 +39,7 @@ #define c6_IFAR 17 /* Instruction Fault Address Register */ #define c7_PAR 18 /* Physical Address Register */ #define c7_PAR_high 19 /* PAR top 32 bits */ -#define c9_L2CTLR 20 /* Cortex A15 L2 Control Register */ +#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */ #define c10_PRRR 21 /* Primary Region Remap Register */ #define c10_NMRR 22 /* Normal Memory Remap Register */ #define c12_VBAR 23 /* Vector Base Address Register */ diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index a464e8d7b6c..0fa90c962ac 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -157,4 +157,55 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; } +static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.cp15[c0_MPIDR]; +} + +static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) +{ + *vcpu_cpsr(vcpu) |= PSR_E_BIT; +} + +static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) +{ + return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); +} + +static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, + unsigned long data, + unsigned int len) +{ + if (kvm_vcpu_is_be(vcpu)) { + switch (len) { + case 1: + return data & 0xff; + case 2: + return be16_to_cpu(data & 0xffff); + default: + return be32_to_cpu(data); + } + } + + return data; /* Leave LE untouched */ +} + +static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, + unsigned long data, + unsigned int len) +{ + if (kvm_vcpu_is_be(vcpu)) { + switch (len) { + case 1: + return data & 0xff; + case 2: + return cpu_to_be16(data & 0xffff); + default: + return cpu_to_be32(data); + } + } + + return data; /* Leave LE untouched */ +} + #endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 7d22517d807..8a6f6db14ee 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -38,11 +38,6 @@ #define KVM_VCPU_MAX_FEATURES 1 -/* We don't currently support large pages. */ -#define KVM_HPAGE_GFN_SHIFT(x) 0 -#define KVM_NR_PAGE_SIZES 1 -#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) - #include <kvm/arm_vgic.h> struct kvm_vcpu; @@ -154,6 +149,7 @@ struct kvm_vcpu_stat { struct kvm_vcpu_init; int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init); +int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); struct kvm_one_reg; diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 9b28c41f4ba..77de4a41cc5 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -62,6 +62,12 @@ phys_addr_t kvm_get_idmap_vector(void); int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); +static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) +{ + *pmd = new_pmd; + flush_pmd_entry(pmd); +} + static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) { *pte = new_pte; @@ -103,9 +109,15 @@ static inline void kvm_set_s2pte_writable(pte_t *pte) pte_val(*pte) |= L_PTE_S2_RDWR; } +static inline void kvm_set_s2pmd_writable(pmd_t *pmd) +{ + pmd_val(*pmd) |= L_PMD_S2_RDWR; +} + struct kvm; -static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, + unsigned long size) { /* * If we are going to insert an instruction page and the icache is @@ -120,8 +132,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) * need any kind of flushing (DDI 0406C.b - Page B3-1392). */ if (icache_is_pipt()) { - unsigned long hva = gfn_to_hva(kvm, gfn); - __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); + __cpuc_coherent_user_range(hva, hva + size); } else if (!icache_is_vivt_asid_tagged()) { /* any kind of VIPT cache */ __flush_icache_all(); diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 402a2bc6aa6..17a3fa2979e 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -49,6 +49,7 @@ struct machine_desc { bool (*smp_init)(void); void (*fixup)(struct tag *, char **, struct meminfo *); + void (*init_meminfo)(void); void (*reserve)(void);/* reserve mem blocks */ void (*map_io)(void);/* IO mapping function */ void (*init_early)(void); diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index fc82a88f5b6..608516ebabf 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h @@ -42,6 +42,14 @@ extern void mcpm_entry_point(void); void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); /* + * This sets an early poke i.e a value to be poked into some address + * from very early assembly code before the CPU is ungated. The + * address must be physical, and if 0 then nothing will happen. + */ +void mcpm_set_early_poke(unsigned cpu, unsigned cluster, + unsigned long poke_phys_addr, unsigned long poke_val); + +/* * CPU/cluster power operations API for higher subsystems to use. */ @@ -81,10 +89,40 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); * * This will return if mcpm_platform_register() has not been called * previously in which case the caller should take appropriate action. + * + * On success, the CPU is not guaranteed to be truly halted until + * mcpm_cpu_power_down_finish() subsequently returns non-zero for the + * specified cpu. Until then, other CPUs should make sure they do not + * trash memory the target CPU might be executing/accessing. */ void mcpm_cpu_power_down(void); /** + * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and + * make sure it is powered off + * + * @cpu: CPU number within given cluster + * @cluster: cluster number for the CPU + * + * Call this function to ensure that a pending powerdown has taken + * effect and the CPU is safely parked before performing non-mcpm + * operations that may affect the CPU (such as kexec trashing the + * kernel text). + * + * It is *not* necessary to call this function if you only need to + * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup + * event. + * + * Do not call this function unless the specified CPU has already + * called mcpm_cpu_power_down() or has committed to doing so. + * + * @return: + * - zero if the CPU is in a safely parked state + * - nonzero otherwise (e.g., timeout) + */ +int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster); + +/** * mcpm_cpu_suspend - bring the calling CPU in a suspended state * * @expected_residency: duration in microseconds the CPU is expected @@ -126,6 +164,7 @@ int mcpm_cpu_powered_up(void); struct mcpm_platform_ops { int (*power_up)(unsigned int cpu, unsigned int cluster); void (*power_down)(void); + int (*power_down_finish)(unsigned int cpu, unsigned int cluster); void (*suspend)(u64); void (*powered_up)(void); }; diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index e750a938fd3..4dd21457ef9 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -172,8 +172,13 @@ * so that all we need to do is modify the 8-bit constant field. */ #define __PV_BITS_31_24 0x81000000 +#define __PV_BITS_7_0 0x81 + +extern u64 __pv_phys_offset; +extern u64 __pv_offset; +extern void fixup_pv_table(const void *, unsigned long); +extern const void *__pv_table_begin, *__pv_table_end; -extern unsigned long __pv_phys_offset; #define PHYS_OFFSET __pv_phys_offset #define __pv_stub(from,to,instr,type) \ @@ -185,22 +190,58 @@ extern unsigned long __pv_phys_offset; : "=r" (to) \ : "r" (from), "I" (type)) -static inline unsigned long __virt_to_phys(unsigned long x) +#define __pv_stub_mov_hi(t) \ + __asm__ volatile("@ __pv_stub_mov\n" \ + "1: mov %R0, %1\n" \ + " .pushsection .pv_table,\"a\"\n" \ + " .long 1b\n" \ + " .popsection\n" \ + : "=r" (t) \ + : "I" (__PV_BITS_7_0)) + +#define __pv_add_carry_stub(x, y) \ + __asm__ volatile("@ __pv_add_carry_stub\n" \ + "1: adds %Q0, %1, %2\n" \ + " adc %R0, %R0, #0\n" \ + " .pushsection .pv_table,\"a\"\n" \ + " .long 1b\n" \ + " .popsection\n" \ + : "+r" (y) \ + : "r" (x), "I" (__PV_BITS_31_24) \ + : "cc") + +static inline phys_addr_t __virt_to_phys(unsigned long x) { - unsigned long t; - __pv_stub(x, t, "add", __PV_BITS_31_24); + phys_addr_t t; + + if (sizeof(phys_addr_t) == 4) { + __pv_stub(x, t, "add", __PV_BITS_31_24); + } else { + __pv_stub_mov_hi(t); + __pv_add_carry_stub(x, t); + } return t; } -static inline unsigned long __phys_to_virt(unsigned long x) +static inline unsigned long __phys_to_virt(phys_addr_t x) { unsigned long t; __pv_stub(x, t, "sub", __PV_BITS_31_24); return t; } + #else -#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) -#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) + +static inline phys_addr_t __virt_to_phys(unsigned long x) +{ + return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; +} + +static inline unsigned long __phys_to_virt(phys_addr_t x) +{ + return x - PHYS_OFFSET + PAGE_OFFSET; +} + #endif #endif #endif /* __ASSEMBLY__ */ @@ -238,16 +279,33 @@ static inline phys_addr_t virt_to_phys(const volatile void *x) static inline void *phys_to_virt(phys_addr_t x) { - return (void *)(__phys_to_virt((unsigned long)(x))); + return (void *)__phys_to_virt(x); } /* * Drivers should NOT use these either. */ #define __pa(x) __virt_to_phys((unsigned long)(x)) -#define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) +#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); + +/* + * These are for systems that have a hardware interconnect supported alias of + * physical memory for idmap purposes. Most cases should leave these + * untouched. + */ +static inline phys_addr_t __virt_to_idmap(unsigned long x) +{ + if (arch_virt_to_idmap) + return arch_virt_to_idmap(x); + else + return __virt_to_phys(x); +} + +#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x)) + /* * Virtual <-> DMA view memory address translations * Again, these are *only* valid on the kernel direct mapped RAM diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 6f18da09668..64fd15159b7 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -16,7 +16,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID #define ASID_BITS 8 #define ASID_MASK ((~0ULL) << ASID_BITS) -#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK) +#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK)) #else #define ASID(mm) (0) #endif diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 943504f53f5..78a77936168 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -102,12 +102,14 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) #else pte = alloc_pages(PGALLOC_GFP, 0); #endif - if (pte) { - if (!PageHighMem(pte)) - clean_pte_table(page_address(pte)); - pgtable_page_ctor(pte); + if (!pte) + return NULL; + if (!PageHighMem(pte)) + clean_pte_table(page_address(pte)); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; } - return pte; } diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index f97ee02386e..86a659a1952 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +/* + * We don't have huge page support for short descriptors, for the moment + * define empty stubs for use by pin_page_for_write. + */ +#define pmd_hugewillfault(pmd) (0) +#define pmd_thp_or_huge(pmd) (0) + #endif /* __ASSEMBLY__ */ #endif /* _ASM_PGTABLE_2LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 5689c18c85f..4f9503908dc 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -126,6 +126,8 @@ #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ +#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ + /* * Hyp-mode PL2 PTE definitions for LPAE. */ @@ -206,6 +208,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define __HAVE_ARCH_PMD_WRITE #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) +#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) +#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 413f3876341..c3d5fc124a0 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -22,6 +22,7 @@ #include <asm/hw_breakpoint.h> #include <asm/ptrace.h> #include <asm/types.h> +#include <asm/unified.h> #ifdef __KERNEL__ #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ @@ -87,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc #define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp +#ifdef CONFIG_SMP +#define __ALT_SMP_ASM(smp, up) \ + "9998: " smp "\n" \ + " .pushsection \".alt.smp.init\", \"a\"\n" \ + " .long 9998b\n" \ + " " up "\n" \ + " .popsection\n" +#else +#define __ALT_SMP_ASM(smp, up) up +#endif + /* * Prefetching support - only ARMv5. */ @@ -97,17 +109,22 @@ static inline void prefetch(const void *ptr) { __asm__ __volatile__( "pld\t%a0" - : - : "p" (ptr) - : "cc"); + :: "p" (ptr)); } +#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) #define ARCH_HAS_PREFETCHW -#define prefetchw(ptr) prefetch(ptr) - -#define ARCH_HAS_SPINLOCK_PREFETCH -#define spin_lock_prefetch(x) do { } while (0) - +static inline void prefetchw(const void *ptr) +{ + __asm__ __volatile__( + ".arch_extension mp\n" + __ALT_SMP_ASM( + WASM(pldw) "\t%a0", + WASM(pld) "\t%a0" + ) + :: "p" (ptr)); +} +#endif #endif #define HAVE_ARCH_PICK_MMAP_LAYOUT diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index 4a2985e2196..b681575ad3d 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -11,8 +11,6 @@ #ifndef __ASMARM_PROM_H #define __ASMARM_PROM_H -#define HAVE_ARCH_DEVTREE_FIXUPS - #ifdef CONFIG_OF extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys); diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index c50f0560950..8d6a089dfb7 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -49,7 +49,7 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -extern int arm_add_memory(phys_addr_t start, phys_addr_t size); +extern int arm_add_memory(u64 start, u64 size); extern void early_print(const char *str, ...); extern void dump_machine_table(void); diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index a8cae71cace..22a3b9b5d4a 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -84,6 +84,8 @@ extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); +extern int register_ipi_completion(struct completion *completion, int cpu); + struct smp_operations { #ifdef CONFIG_SMP /* diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 4f2c28060c9..ef3c6072aa4 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,21 +5,13 @@ #error SMP not supported on pre-ARMv6 CPUs #endif -#include <asm/processor.h> +#include <linux/prefetch.h> /* * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K * extensions, so when running on UP, we have to patch these instructions away. */ -#define ALT_SMP(smp, up) \ - "9998: " smp "\n" \ - " .pushsection \".alt.smp.init\", \"a\"\n" \ - " .long 9998b\n" \ - " " up "\n" \ - " .popsection\n" - #ifdef CONFIG_THUMB2_KERNEL -#define SEV ALT_SMP("sev.w", "nop.w") /* * For Thumb-2, special care is needed to ensure that the conditional WFE * instruction really does assemble to exactly 4 bytes (as required by @@ -31,17 +23,18 @@ * the assembler won't change IT instructions which are explicitly present * in the input. */ -#define WFE(cond) ALT_SMP( \ +#define WFE(cond) __ALT_SMP_ASM( \ "it " cond "\n\t" \ "wfe" cond ".n", \ \ "nop.w" \ ) #else -#define SEV ALT_SMP("sev", "nop") -#define WFE(cond) ALT_SMP("wfe" cond, "nop") +#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop") #endif +#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop)) + static inline void dsb_sev(void) { #if __LINUX_ARM_ARCH__ >= 7 @@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) u32 newval; arch_spinlock_t lockval; + prefetchw(&lock->slock); __asm__ __volatile__( "1: ldrex %0, [%3]\n" " add %1, %0, %4\n" @@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) unsigned long contended, res; u32 slock; + prefetchw(&lock->slock); do { __asm__ __volatile__( " ldrex %0, [%3]\n" @@ -127,10 +122,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) dsb_sev(); } +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) +{ + return lock.tickets.owner == lock.tickets.next; +} + static inline int arch_spin_is_locked(arch_spinlock_t *lock) { - struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); - return tickets.owner != tickets.next; + return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); } static inline int arch_spin_is_contended(arch_spinlock_t *lock) @@ -152,6 +151,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; + prefetchw(&rw->lock); __asm__ __volatile__( "1: ldrex %0, [%1]\n" " teq %0, #0\n" @@ -170,6 +170,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long contended, res; + prefetchw(&rw->lock); do { __asm__ __volatile__( " ldrex %0, [%2]\n" @@ -203,7 +204,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) } /* write_can_lock - would write_trylock() succeed? */ -#define arch_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0) /* * Read locks are a bit more hairy: @@ -221,6 +222,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; + prefetchw(&rw->lock); __asm__ __volatile__( "1: ldrex %0, [%2]\n" " adds %0, %0, #1\n" @@ -241,6 +243,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) smp_mb(); + prefetchw(&rw->lock); __asm__ __volatile__( "1: ldrex %0, [%2]\n" " sub %0, %0, #1\n" @@ -259,6 +262,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long contended, res; + prefetchw(&rw->lock); do { __asm__ __volatile__( " ldrex %0, [%2]\n" @@ -280,7 +284,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) } /* read_can_lock - would read_trylock() succeed? */ -#define arch_read_can_lock(x) ((x)->lock < 0x80000000) +#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000) #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index b262d2f8b47..47663fcb10a 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -25,7 +25,7 @@ typedef struct { #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } typedef struct { - volatile unsigned int lock; + u32 lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 38960264040..def9e570199 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -560,37 +560,6 @@ static inline void __flush_bp_all(void) asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); } -#include <asm/cputype.h> -#ifdef CONFIG_ARM_ERRATA_798181 -static inline int erratum_a15_798181(void) -{ - unsigned int midr = read_cpuid_id(); - - /* Cortex-A15 r0p0..r3p2 affected */ - if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) - return 0; - return 1; -} - -static inline void dummy_flush_tlb_a15_erratum(void) -{ - /* - * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. - */ - asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); - dsb(ish); -} -#else -static inline int erratum_a15_798181(void) -{ - return 0; -} - -static inline void dummy_flush_tlb_a15_erratum(void) -{ -} -#endif - /* * flush_pmd_entry * @@ -697,4 +666,21 @@ extern void flush_bp_all(void); #endif +#ifndef __ASSEMBLY__ +#ifdef CONFIG_ARM_ERRATA_798181 +extern void erratum_a15_798181_init(void); +#else +static inline void erratum_a15_798181_init(void) {} +#endif +extern bool (*erratum_a15_798181_handler)(void); + +static inline bool erratum_a15_798181(void) +{ + if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) && + erratum_a15_798181_handler)) + return erratum_a15_798181_handler(); + return false; +} +#endif + #endif diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h index f5989f46b4d..b88beaba6b4 100644 --- a/arch/arm/include/asm/unified.h +++ b/arch/arm/include/asm/unified.h @@ -38,6 +38,8 @@ #ifdef __ASSEMBLY__ #define W(instr) instr.w #define BSYM(sym) sym + 1 +#else +#define WASM(instr) #instr ".w" #endif #else /* !CONFIG_THUMB2_KERNEL */ @@ -50,6 +52,8 @@ #ifdef __ASSEMBLY__ #define W(instr) instr #define BSYM(sym) sym +#else +#define WASM(instr) #instr #endif #endif /* CONFIG_THUMB2_KERNEL */ diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index d7ab99a0c9e..1317ee40f4d 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return PARAVIRT_LAZY_NONE; } +extern struct dma_map_ops *xen_dma_ops; + #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h new file mode 100644 index 00000000000..1109017499e --- /dev/null +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -0,0 +1,50 @@ +#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H +#define _ASM_ARM_XEN_PAGE_COHERENT_H + +#include <asm/page.h> +#include <linux/dma-attrs.h> +#include <linux/dma-mapping.h> + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); +} + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + if (__generic_dma_ops(hwdev)->unmap_page) + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + if (__generic_dma_ops(hwdev)->sync_single_for_cpu) + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + if (__generic_dma_ops(hwdev)->sync_single_for_device) + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); +} +#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 359a7b50b15..75579a9d6f7 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -6,12 +6,12 @@ #include <linux/pfn.h> #include <linux/types.h> +#include <linux/dma-mapping.h> +#include <xen/xen.h> #include <xen/interface/grant_table.h> -#define pfn_to_mfn(pfn) (pfn) #define phys_to_machine_mapping_valid(pfn) (1) -#define mfn_to_pfn(mfn) (mfn) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) #define pte_mfn pte_pfn @@ -32,6 +32,38 @@ typedef struct xpaddr { #define INVALID_P2M_ENTRY (~0UL) +unsigned long __pfn_to_mfn(unsigned long pfn); +unsigned long __mfn_to_pfn(unsigned long mfn); +extern struct rb_root phys_to_mach; + +static inline unsigned long pfn_to_mfn(unsigned long pfn) +{ + unsigned long mfn; + + if (phys_to_mach.rb_node != NULL) { + mfn = __pfn_to_mfn(pfn); + if (mfn != INVALID_P2M_ENTRY) + return mfn; + } + + return pfn; +} + +static inline unsigned long mfn_to_pfn(unsigned long mfn) +{ + unsigned long pfn; + + if (phys_to_mach.rb_node != NULL) { + pfn = __mfn_to_pfn(mfn); + if (pfn != INVALID_P2M_ENTRY) + return pfn; + } + + return mfn; +} + +#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) + static inline xmaddr_t phys_to_machine(xpaddr_t phys) { unsigned offset = phys.paddr & ~PAGE_MASK; @@ -76,11 +108,9 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte) return 0; } -static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); - return true; -} +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); +bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, + unsigned long nr_pages); static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S new file mode 100644 index 00000000000..2265a199280 --- /dev/null +++ b/arch/arm/include/debug/efm32.S @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2013 Pengutronix + * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define UARTn_CMD 0x000c +#define UARTn_CMD_TXEN 0x0004 + +#define UARTn_STATUS 0x0010 +#define UARTn_STATUS_TXC 0x0020 +#define UARTn_STATUS_TXBL 0x0040 + +#define UARTn_TXDATA 0x0034 + + .macro addruart, rx, tmp + ldr \rx, =(CONFIG_DEBUG_UART_PHYS) + + /* + * enable TX. The driver might disable it to save energy. We + * don't care about disabling at the end as during debug power + * consumption isn't that important. + */ + ldr \tmp, =(UARTn_CMD_TXEN) + str \tmp, [\rx, #UARTn_CMD] + .endm + + .macro senduart,rd,rx + strb \rd, [\rx, #UARTn_TXDATA] + .endm + + .macro waituart,rd,rx +1001: ldr \rd, [\rx, #UARTn_STATUS] + tst \rd, #UARTn_STATUS_TXBL + beq 1001b + .endm + + .macro busyuart,rd,rx +1001: ldr \rd, [\rx, UARTn_STATUS] + tst \rd, #UARTn_STATUS_TXC + bne 1001b + .endm diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S index 9166e1bc470..9d653d47590 100644 --- a/arch/arm/include/debug/msm.S +++ b/arch/arm/include/debug/msm.S @@ -46,6 +46,11 @@ #define MSM_DEBUG_UART_PHYS 0x16440000 #endif +#ifdef CONFIG_DEBUG_MSM8974_UART +#define MSM_DEBUG_UART_BASE 0xFA71E000 +#define MSM_DEBUG_UART_PHYS 0xF991E000 +#endif + .macro addruart, rp, rv, tmp #ifdef MSM_DEBUG_UART_PHYS ldr \rp, =MSM_DEBUG_UART_PHYS diff --git a/arch/arm/include/debug/pl01x.S b/arch/arm/include/debug/pl01x.S index 37c6895b87e..92ef808a233 100644 --- a/arch/arm/include/debug/pl01x.S +++ b/arch/arm/include/debug/pl01x.S @@ -25,12 +25,14 @@ .macro waituart,rd,rx 1001: ldr \rd, [\rx, #UART01x_FR] + ARM_BE8( rev \rd, \rd ) tst \rd, #UART01x_FR_TXFF bne 1001b .endm .macro busyuart,rd,rx 1001: ldr \rd, [\rx, #UART01x_FR] + ARM_BE8( rev \rd, \rd ) tst \rd, #UART01x_FR_BUSY bne 1001b .endm diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild index 18d76fd5a2a..70a1c9da30c 100644 --- a/arch/arm/include/uapi/asm/Kbuild +++ b/arch/arm/include/uapi/asm/Kbuild @@ -7,6 +7,7 @@ header-y += hwcap.h header-y += ioctls.h header-y += kvm_para.h header-y += mman.h +header-y += perf_regs.h header-y += posix_types.h header-y += ptrace.h header-y += setup.h diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index c1ee007523d..c498b60c050 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -63,7 +63,8 @@ struct kvm_regs { /* Supported Processor Types */ #define KVM_ARM_TARGET_CORTEX_A15 0 -#define KVM_ARM_NUM_TARGETS 1 +#define KVM_ARM_TARGET_CORTEX_A7 1 +#define KVM_ARM_NUM_TARGETS 2 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ #define KVM_ARM_DEVICE_TYPE_SHIFT 0 diff --git a/arch/arm/include/uapi/asm/perf_regs.h b/arch/arm/include/uapi/asm/perf_regs.h new file mode 100644 index 00000000000..ce59448458b --- /dev/null +++ b/arch/arm/include/uapi/asm/perf_regs.h @@ -0,0 +1,23 @@ +#ifndef _ASM_ARM_PERF_REGS_H +#define _ASM_ARM_PERF_REGS_H + +enum perf_event_arm_regs { + PERF_REG_ARM_R0, + PERF_REG_ARM_R1, + PERF_REG_ARM_R2, + PERF_REG_ARM_R3, + PERF_REG_ARM_R4, + PERF_REG_ARM_R5, + PERF_REG_ARM_R6, + PERF_REG_ARM_R7, + PERF_REG_ARM_R8, + PERF_REG_ARM_R9, + PERF_REG_ARM_R10, + PERF_REG_ARM_FP, + PERF_REG_ARM_IP, + PERF_REG_ARM_SP, + PERF_REG_ARM_LR, + PERF_REG_ARM_PC, + PERF_REG_ARM_MAX, +}; +#endif /* _ASM_ARM_PERF_REGS_H */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5140df5f23a..a30fc9be9e9 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -17,7 +17,8 @@ CFLAGS_REMOVE_return_address.o = -pg obj-y := elf.o entry-common.o irq.o opcodes.o \ process.o ptrace.o return_address.o \ - setup.o signal.o stacktrace.o sys_arm.o time.o traps.o + setup.o signal.o sigreturn_codes.o \ + stacktrace.o sys_arm.o time.o traps.o obj-$(CONFIG_ATAGS) += atags_parse.o obj-$(CONFIG_ATAGS_PROC) += atags_proc.o @@ -78,6 +79,7 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o +obj-$(CONFIG_PERF_EVENTS) += perf_regs.o obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 60d3b738d42..1f031ddd066 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -155,4 +155,5 @@ EXPORT_SYMBOL(__gnu_mcount_nc); #ifdef CONFIG_ARM_PATCH_PHYS_VIRT EXPORT_SYMBOL(__pv_phys_offset); +EXPORT_SYMBOL(__pv_offset); #endif diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index f35906b3d8c..739c3dfc1da 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c @@ -174,6 +174,19 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id) return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu); } +static const void * __init arch_get_next_mach(const char *const **match) +{ + static const struct machine_desc *mdesc = __arch_info_begin; + const struct machine_desc *m = mdesc; + + if (m >= __arch_info_end) + return NULL; + + mdesc++; + *match = m->dt_compat; + return m; +} + /** * setup_machine_fdt - Machine setup when an dtb was passed to the kernel * @dt_phys: physical address of dt blob @@ -183,11 +196,7 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id) */ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) { - struct boot_param_header *devtree; const struct machine_desc *mdesc, *mdesc_best = NULL; - unsigned int score, mdesc_score = ~1; - unsigned long dt_root; - const char *model; #ifdef CONFIG_ARCH_MULTIPLATFORM DT_MACHINE_START(GENERIC_DT, "Generic DT based system") @@ -196,32 +205,20 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) mdesc_best = &__mach_desc_GENERIC_DT; #endif - if (!dt_phys) + if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) return NULL; - devtree = phys_to_virt(dt_phys); + mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach); - /* check device tree validity */ - if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) - return NULL; - - /* Search the mdescs for the 'best' compatible value match */ - initial_boot_params = devtree; - dt_root = of_get_flat_dt_root(); - for_each_machine_desc(mdesc) { - score = of_flat_dt_match(dt_root, mdesc->dt_compat); - if (score > 0 && score < mdesc_score) { - mdesc_best = mdesc; - mdesc_score = score; - } - } - if (!mdesc_best) { + if (!mdesc) { const char *prop; long size; + unsigned long dt_root; early_print("\nError: unrecognized/unsupported " "device tree compatible list:\n[ "); + dt_root = of_get_flat_dt_root(); prop = of_get_flat_dt_prop(dt_root, "compatible", &size); while (size > 0) { early_print("'%s' ", prop); @@ -233,22 +230,8 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) dump_machine_table(); /* does not return */ } - model = of_get_flat_dt_prop(dt_root, "model", NULL); - if (!model) - model = of_get_flat_dt_prop(dt_root, "compatible", NULL); - if (!model) - model = "<unknown>"; - pr_info("Machine: %s, model: %s\n", mdesc_best->name, model); - - /* Retrieve various information from the /chosen node */ - of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); - /* Initialize {size,address}-cells info */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); - /* Setup memory, calling early_init_dt_add_memory_arch */ - of_scan_flat_dt(early_init_dt_scan_memory, NULL); - /* Change machine number to match the mdesc we're using */ - __machine_arch_type = mdesc_best->nr; + __machine_arch_type = mdesc->nr; - return mdesc_best; + return mdesc; } diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 9cbe70c8b0e..b3fb8c9e1ff 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -192,6 +192,7 @@ __dabt_svc: svc_entry mov r2, sp dabt_helper + THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR svc_exit r5 @ return from exception UNWIND(.fnend ) ENDPROC(__dabt_svc) @@ -416,9 +417,8 @@ __und_usr: bne __und_usr_thumb sub r4, r2, #4 @ ARM instr at LR - 4 1: ldrt r0, [r4] -#ifdef CONFIG_CPU_ENDIAN_BE8 - rev r0, r0 @ little endian instruction -#endif + ARM_BE8(rev r0, r0) @ little endian instruction + @ r0 = 32-bit ARM instruction which caused the exception @ r2 = PC value for the following instruction (:= regs->ARM_pc) @ r4 = PC value for the faulting instruction diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index bc6bd9683ba..a2dcafdf1bc 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -393,9 +393,7 @@ ENTRY(vector_swi) #else USER( ldr r10, [lr, #-4] ) @ get SWI instruction #endif -#ifdef CONFIG_CPU_ENDIAN_BE8 - rev r10, r10 @ little endian instruction -#endif + ARM_BE8(rev r10, r10) @ little endian instruction #elif defined(CONFIG_AEABI) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 476de57dcef..7801866e626 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -77,6 +77,7 @@ __HEAD ENTRY(stext) + ARM_BE8(setend be ) @ ensure we are in BE8 mode THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, @@ -352,6 +353,9 @@ ENTRY(secondary_startup) * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ + + ARM_BE8(setend be) @ ensure we are in BE8 mode + #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install_secondary #endif @@ -555,6 +559,14 @@ ENTRY(fixup_smp) ldmfd sp!, {r4 - r6, pc} ENDPROC(fixup_smp) +#ifdef __ARMEB__ +#define LOW_OFFSET 0x4 +#define HIGH_OFFSET 0x0 +#else +#define LOW_OFFSET 0x0 +#define HIGH_OFFSET 0x4 +#endif + #ifdef CONFIG_ARM_PATCH_PHYS_VIRT /* __fixup_pv_table - patch the stub instructions with the delta between @@ -565,17 +577,20 @@ ENDPROC(fixup_smp) __HEAD __fixup_pv_table: adr r0, 1f - ldmia r0, {r3-r5, r7} - sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET + ldmia r0, {r3-r7} + mvn ip, #0 + subs r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET add r4, r4, r3 @ adjust table start address add r5, r5, r3 @ adjust table end address - add r7, r7, r3 @ adjust __pv_phys_offset address - str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset + add r6, r6, r3 @ adjust __pv_phys_offset address + add r7, r7, r3 @ adjust __pv_offset address + str r8, [r6, #LOW_OFFSET] @ save computed PHYS_OFFSET to __pv_phys_offset + strcc ip, [r7, #HIGH_OFFSET] @ save to __pv_offset high bits mov r6, r3, lsr #24 @ constant for add/sub instructions teq r3, r6, lsl #24 @ must be 16MiB aligned THUMB( it ne @ cross section branch ) bne __error - str r6, [r7, #4] @ save to __pv_offset + str r3, [r7, #LOW_OFFSET] @ save to __pv_offset low bits b __fixup_a_pv_table ENDPROC(__fixup_pv_table) @@ -584,10 +599,19 @@ ENDPROC(__fixup_pv_table) .long __pv_table_begin .long __pv_table_end 2: .long __pv_phys_offset + .long __pv_offset .text __fixup_a_pv_table: + adr r0, 3f + ldr r6, [r0] + add r6, r6, r3 + ldr r0, [r6, #HIGH_OFFSET] @ pv_offset high word + ldr r6, [r6, #LOW_OFFSET] @ pv_offset low word + mov r6, r6, lsr #24 + cmn r0, #1 #ifdef CONFIG_THUMB2_KERNEL + moveq r0, #0x200000 @ set bit 21, mov to mvn instruction lsls r6, #24 beq 2f clz r7, r6 @@ -601,18 +625,42 @@ __fixup_a_pv_table: b 2f 1: add r7, r3 ldrh ip, [r7, #2] - and ip, 0x8f00 - orr ip, r6 @ mask in offset bits 31-24 +ARM_BE8(rev16 ip, ip) + tst ip, #0x4000 + and ip, #0x8f00 + orrne ip, r6 @ mask in offset bits 31-24 + orreq ip, r0 @ mask in offset bits 7-0 +ARM_BE8(rev16 ip, ip) strh ip, [r7, #2] + bne 2f + ldrh ip, [r7] +ARM_BE8(rev16 ip, ip) + bic ip, #0x20 + orr ip, ip, r0, lsr #16 +ARM_BE8(rev16 ip, ip) + strh ip, [r7] 2: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot bcc 1b bx lr #else + moveq r0, #0x400000 @ set bit 22, mov to mvn instruction b 2f 1: ldr ip, [r7, r3] +#ifdef CONFIG_CPU_ENDIAN_BE8 + @ in BE8, we load data in BE, but instructions still in LE + bic ip, ip, #0xff000000 + tst ip, #0x000f0000 @ check the rotation field + orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24 + biceq ip, ip, #0x00004000 @ clear bit 22 + orreq ip, ip, r0, lsl #24 @ mask in offset bits 7-0 +#else bic ip, ip, #0x000000ff - orr ip, ip, r6 @ mask in offset bits 31-24 + tst ip, #0xf00 @ check the rotation field + orrne ip, ip, r6 @ mask in offset bits 31-24 + biceq ip, ip, #0x400000 @ clear bit 22 + orreq ip, ip, r0 @ mask in offset bits 7-0 +#endif str ip, [r7, r3] 2: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot @@ -621,28 +669,30 @@ __fixup_a_pv_table: #endif ENDPROC(__fixup_a_pv_table) + .align +3: .long __pv_offset + ENTRY(fixup_pv_table) stmfd sp!, {r4 - r7, lr} - ldr r2, 2f @ get address of __pv_phys_offset mov r3, #0 @ no offset mov r4, r0 @ r0 = table start add r5, r0, r1 @ r1 = table size - ldr r6, [r2, #4] @ get __pv_offset bl __fixup_a_pv_table ldmfd sp!, {r4 - r7, pc} ENDPROC(fixup_pv_table) - .align -2: .long __pv_phys_offset - .data .globl __pv_phys_offset .type __pv_phys_offset, %object __pv_phys_offset: - .long 0 - .size __pv_phys_offset, . - __pv_phys_offset + .quad 0 + .size __pv_phys_offset, . -__pv_phys_offset + + .globl __pv_offset + .type __pv_offset, %object __pv_offset: - .long 0 + .quad 0 + .size __pv_offset, . -__pv_offset #endif #include "head-common.S" diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 7b95de60135..3d446605cbf 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp) /* Breakpoint */ ctrl_base = ARM_BASE_BCR; val_base = ARM_BASE_BVR; - slots = (struct perf_event **)__get_cpu_var(bp_on_reg); + slots = this_cpu_ptr(bp_on_reg); max_slots = core_num_brps; } else { /* Watchpoint */ ctrl_base = ARM_BASE_WCR; val_base = ARM_BASE_WVR; - slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + slots = this_cpu_ptr(wp_on_reg); max_slots = core_num_wrps; } @@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { /* Breakpoint */ base = ARM_BASE_BCR; - slots = (struct perf_event **)__get_cpu_var(bp_on_reg); + slots = this_cpu_ptr(bp_on_reg); max_slots = core_num_brps; } else { /* Watchpoint */ base = ARM_BASE_WCR; - slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + slots = this_cpu_ptr(wp_on_reg); max_slots = core_num_wrps; } @@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; - slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + slots = this_cpu_ptr(wp_on_reg); for (i = 0; i < core_num_wrps; ++i) { rcu_read_lock(); @@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc) struct perf_event *wp, **slots; struct arch_hw_breakpoint *info; - slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + slots = this_cpu_ptr(wp_on_reg); for (i = 0; i < core_num_wrps; ++i) { rcu_read_lock(); @@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; - slots = (struct perf_event **)__get_cpu_var(bp_on_reg); + slots = this_cpu_ptr(bp_on_reg); /* The exception entry code places the amended lr in the PC. */ addr = regs->ARM_pc; diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 170e9f34003..a7b621ece23 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { - __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; } static void __kprobes set_current_kprobe(struct kprobe *p) { - __get_cpu_var(current_kprobe) = p; + __this_cpu_write(current_kprobe, p); } static void __kprobes @@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) continue; if (ri->rp && ri->rp->handler) { - __get_cpu_var(current_kprobe) = &ri->rp->kp; + __this_cpu_write(current_kprobe, &ri->rp->kp); get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; ri->rp->handler(ri, regs); - __get_cpu_var(current_kprobe) = NULL; + __this_cpu_write(current_kprobe, NULL); } orig_ret_address = (unsigned long)ri->ret_addr; diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 084dc889698..45e47815727 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -24,6 +24,7 @@ #include <asm/sections.h> #include <asm/smp_plat.h> #include <asm/unwind.h> +#include <asm/opcodes.h> #ifdef CONFIG_XIP_KERNEL /* @@ -40,7 +41,7 @@ void *module_alloc(unsigned long size) { return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, -1, + GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, __builtin_return_address(0)); } #endif @@ -60,6 +61,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, Elf32_Sym *sym; const char *symname; s32 offset; + u32 tmp; #ifdef CONFIG_THUMB2_KERNEL u32 upper, lower, sign, j1, j2; #endif @@ -95,7 +97,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, case R_ARM_PC24: case R_ARM_CALL: case R_ARM_JUMP24: - offset = (*(u32 *)loc & 0x00ffffff) << 2; + offset = __mem_to_opcode_arm(*(u32 *)loc); + offset = (offset & 0x00ffffff) << 2; if (offset & 0x02000000) offset -= 0x04000000; @@ -111,9 +114,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, } offset >>= 2; + offset &= 0x00ffffff; - *(u32 *)loc &= 0xff000000; - *(u32 *)loc |= offset & 0x00ffffff; + *(u32 *)loc &= __opcode_to_mem_arm(0xff000000); + *(u32 *)loc |= __opcode_to_mem_arm(offset); break; case R_ARM_V4BX: @@ -121,8 +125,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, * other bits to re-code instruction as * MOV PC,Rm. */ - *(u32 *)loc &= 0xf000000f; - *(u32 *)loc |= 0x01a0f000; + *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f); + *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000); break; case R_ARM_PREL31: @@ -132,7 +136,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, case R_ARM_MOVW_ABS_NC: case R_ARM_MOVT_ABS: - offset = *(u32 *)loc; + offset = tmp = __mem_to_opcode_arm(*(u32 *)loc); offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff); offset = (offset ^ 0x8000) - 0x8000; @@ -140,16 +144,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS) offset >>= 16; - *(u32 *)loc &= 0xfff0f000; - *(u32 *)loc |= ((offset & 0xf000) << 4) | - (offset & 0x0fff); + tmp &= 0xfff0f000; + tmp |= ((offset & 0xf000) << 4) | + (offset & 0x0fff); + + *(u32 *)loc = __opcode_to_mem_arm(tmp); break; #ifdef CONFIG_THUMB2_KERNEL case R_ARM_THM_CALL: case R_ARM_THM_JUMP24: - upper = *(u16 *)loc; - lower = *(u16 *)(loc + 2); + upper = __mem_to_opcode_thumb16(*(u16 *)loc); + lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2)); /* * 25 bit signed address range (Thumb-2 BL and B.W @@ -198,17 +204,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, sign = (offset >> 24) & 1; j1 = sign ^ (~(offset >> 23) & 1); j2 = sign ^ (~(offset >> 22) & 1); - *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) | + upper = (u16)((upper & 0xf800) | (sign << 10) | ((offset >> 12) & 0x03ff)); - *(u16 *)(loc + 2) = (u16)((lower & 0xd000) | - (j1 << 13) | (j2 << 11) | - ((offset >> 1) & 0x07ff)); + lower = (u16)((lower & 0xd000) | + (j1 << 13) | (j2 << 11) | + ((offset >> 1) & 0x07ff)); + + *(u16 *)loc = __opcode_to_mem_thumb16(upper); + *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower); break; case R_ARM_THM_MOVW_ABS_NC: case R_ARM_THM_MOVT_ABS: - upper = *(u16 *)loc; - lower = *(u16 *)(loc + 2); + upper = __mem_to_opcode_thumb16(*(u16 *)loc); + lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2)); /* * MOVT/MOVW instructions encoding in Thumb-2: @@ -229,12 +238,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS) offset >>= 16; - *(u16 *)loc = (u16)((upper & 0xfbf0) | - ((offset & 0xf000) >> 12) | - ((offset & 0x0800) >> 1)); - *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) | - ((offset & 0x0700) << 4) | - (offset & 0x00ff)); + upper = (u16)((upper & 0xfbf0) | + ((offset & 0xf000) >> 12) | + ((offset & 0x0800) >> 1)); + lower = (u16)((lower & 0x8f00) | + ((offset & 0x0700) << 4) | + (offset & 0x00ff)); + *(u16 *)loc = __opcode_to_mem_thumb16(upper); + *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower); break; #endif diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index e186ee1e63f..bc3f2efa0d8 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -256,12 +256,11 @@ validate_event(struct pmu_hw_events *hw_events, struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); - struct pmu *leader_pmu = event->group_leader->pmu; if (is_software_event(event)) return 1; - if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + if (event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 8d6147b2001..d85055cd24b 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters); static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) { - return &__get_cpu_var(cpu_hw_events); + return this_cpu_ptr(&cpu_hw_events); } static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c new file mode 100644 index 00000000000..6e4379c67cb --- /dev/null +++ b/arch/arm/kernel/perf_regs.c @@ -0,0 +1,30 @@ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/perf_event.h> +#include <linux/bug.h> +#include <asm/perf_regs.h> +#include <asm/ptrace.h> + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX)) + return 0; + + return regs->uregs[idx]; +} + +#define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_32; +} diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 5d65438685d..6a1b8a81b1a 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -73,6 +73,8 @@ __setup("fpe=", fpe_setup); #endif extern void paging_init(const struct machine_desc *desc); +extern void early_paging_init(const struct machine_desc *, + struct proc_info_list *); extern void sanity_check_meminfo(void); extern enum reboot_mode reboot_mode; extern void setup_dma_zone(const struct machine_desc *desc); @@ -599,6 +601,8 @@ static void __init setup_processor(void) elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif + erratum_a15_798181_init(); + feat_v6_fixup(); cacheid_init(); @@ -619,9 +623,10 @@ void __init dump_machine_table(void) /* can't use cpu_relax() here as it may require MMU setup */; } -int __init arm_add_memory(phys_addr_t start, phys_addr_t size) +int __init arm_add_memory(u64 start, u64 size) { struct membank *bank = &meminfo.bank[meminfo.nr_banks]; + u64 aligned_start; if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " @@ -634,10 +639,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) * Size is appropriately rounded down, start is rounded up. */ size -= start & ~PAGE_MASK; - bank->start = PAGE_ALIGN(start); + aligned_start = PAGE_ALIGN(start); -#ifndef CONFIG_ARM_LPAE - if (bank->start + size < bank->start) { +#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT + if (aligned_start > ULONG_MAX) { + printk(KERN_CRIT "Ignoring memory at 0x%08llx outside " + "32-bit physical address space\n", (long long)start); + return -EINVAL; + } + + if (aligned_start + size > ULONG_MAX) { printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " "32-bit physical address space\n", (long long)start); /* @@ -645,10 +656,11 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. * This means we lose a page after masking. */ - size = ULONG_MAX - bank->start; + size = ULONG_MAX - aligned_start; } #endif + bank->start = aligned_start; bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); /* @@ -669,8 +681,8 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) static int __init early_mem(char *p) { static int usermem __initdata = 0; - phys_addr_t size; - phys_addr_t start; + u64 size; + u64 start; char *endp; /* @@ -878,6 +890,8 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); + + early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index ab330422527..04d63880037 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -21,29 +21,7 @@ #include <asm/unistd.h> #include <asm/vfp.h> -/* - * For ARM syscalls, we encode the syscall number into the instruction. - */ -#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) -#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) - -/* - * With EABI, the syscall number has to be loaded into r7. - */ -#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE)) -#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) - -/* - * For Thumb syscalls, we pass the syscall number via r7. We therefore - * need two 16-bit instructions. - */ -#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE)) -#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE)) - -static const unsigned long sigreturn_codes[7] = { - MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, - MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN, -}; +extern const unsigned long sigreturn_codes[7]; static unsigned long signal_return_offset; @@ -375,12 +353,18 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, */ thumb = handler & 1; - if (thumb) { - cpsr |= PSR_T_BIT; #if __LINUX_ARM_ARCH__ >= 7 - /* clear the If-Then Thumb-2 execution state */ - cpsr &= ~PSR_IT_MASK; + /* + * Clear the If-Then Thumb-2 execution state + * ARM spec requires this to be all 000s in ARM mode + * Snapdragon S4/Krait misbehaves on a Thumb=>ARM + * signal transition without this. + */ + cpsr &= ~PSR_IT_MASK; #endif + + if (thumb) { + cpsr |= PSR_T_BIT; } else cpsr &= ~PSR_T_BIT; } diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S new file mode 100644 index 00000000000..3c5d0f2170f --- /dev/null +++ b/arch/arm/kernel/sigreturn_codes.S @@ -0,0 +1,80 @@ +/* + * sigreturn_codes.S - code sinpets for sigreturn syscalls + * + * Created by: Victor Kamensky, 2013-08-13 + * Copyright: (C) 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <asm/unistd.h> + +/* + * For ARM syscalls, we encode the syscall number into the instruction. + * With EABI, the syscall number has to be loaded into r7. As result + * ARM syscall sequence snippet will have move and svc in .arm encoding + * + * For Thumb syscalls, we pass the syscall number via r7. We therefore + * need two 16-bit instructions in .thumb encoding + * + * Please note sigreturn_codes code are not executed in place. Instead + * they just copied by kernel into appropriate places. Code inside of + * arch/arm/kernel/signal.c is very sensitive to layout of these code + * snippets. + */ + +#if __LINUX_ARM_ARCH__ <= 4 + /* + * Note we manually set minimally required arch that supports + * required thumb opcodes for early arch versions. It is OK + * for this file to be used in combination with other + * lower arch variants, since these code snippets are only + * used as input data. + */ + .arch armv4t +#endif + + .section .rodata + .global sigreturn_codes + .type sigreturn_codes, #object + + .arm + +sigreturn_codes: + + /* ARM sigreturn syscall code snippet */ + mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) + swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE) + + /* Thumb sigreturn syscall code snippet */ + .thumb + movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE) + swi #0 + + /* ARM sigreturn_rt syscall code snippet */ + .arm + mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) + swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE) + + /* Thumb sigreturn_rt syscall code snippet */ + .thumb + movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) + swi #0 + + /* + * Note on addtional space: setup_return in signal.c + * algorithm uses two words copy regardless whether + * it is thumb case or not, so we need additional + * word after real last entry. + */ + .arm + .space 4 + + .size sigreturn_codes, . - sigreturn_codes diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index db1536b8b30..b907d9b790a 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -55,6 +55,7 @@ * specific registers and some other data for resume. * r0 = suspend function arg0 * r1 = suspend function + * r2 = MPIDR value the resuming CPU will use */ ENTRY(__cpu_suspend) stmfd sp!, {r4 - r11, lr} @@ -67,23 +68,18 @@ ENTRY(__cpu_suspend) mov r5, sp @ current virtual SP add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn sub sp, sp, r4 @ allocate CPU state on stack - stmfd sp!, {r0, r1} @ save suspend func arg and pointer - add r0, sp, #8 @ save pointer to save block - mov r1, r4 @ size of save block - mov r2, r5 @ virtual SP ldr r3, =sleep_save_sp + stmfd sp!, {r0, r1} @ save suspend func arg and pointer ldr r3, [r3, #SLEEP_SAVE_SP_VIRT] - ALT_SMP(mrc p15, 0, r9, c0, c0, 5) - ALT_UP_B(1f) - ldr r8, =mpidr_hash - /* - * This ldmia relies on the memory layout of the mpidr_hash - * struct mpidr_hash. - */ - ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts - compute_mpidr_hash lr, r5, r6, r7, r9, r4 - add r3, r3, lr, lsl #2 -1: + ALT_SMP(ldr r0, =mpidr_hash) + ALT_UP_B(1f) + /* This ldmia relies on the memory layout of the mpidr_hash struct */ + ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts + compute_mpidr_hash r0, r6, r7, r8, r2, r1 + add r3, r3, r0, lsl #2 +1: mov r2, r5 @ virtual SP + mov r1, r4 @ size of save block + add r0, sp, #8 @ pointer to save block bl __cpu_suspend_save adr lr, BSYM(cpu_suspend_abort) ldmfd sp!, {r0, pc} @ call suspend fn @@ -130,6 +126,7 @@ ENDPROC(cpu_resume_after_mmu) .data .align ENTRY(cpu_resume) +ARM_BE8(setend be) @ ensure we are in BE mode mov r1, #0 ALT_SMP(mrc p15, 0, r0, c0, c0, 5) ALT_UP_B(1f) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 72024ea8a3a..dc894ab3622 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -25,6 +25,7 @@ #include <linux/clockchips.h> #include <linux/completion.h> #include <linux/cpufreq.h> +#include <linux/irq_work.h> #include <linux/atomic.h> #include <asm/smp.h> @@ -66,6 +67,8 @@ enum ipi_msg_type { IPI_CALL_FUNC, IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, + IPI_IRQ_WORK, + IPI_COMPLETION, }; static DECLARE_COMPLETION(cpu_running); @@ -80,7 +83,7 @@ void __init smp_set_ops(struct smp_operations *ops) static unsigned long get_arch_pgd(pgd_t *pgd) { - phys_addr_t pgdir = virt_to_phys(pgd); + phys_addr_t pgdir = virt_to_idmap(pgd); BUG_ON(pgdir & ARCH_PGD_MASK); return pgdir >> ARCH_PGD_SHIFT; } @@ -448,6 +451,14 @@ void arch_send_call_function_single_ipi(int cpu) smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); } +#ifdef CONFIG_IRQ_WORK +void arch_irq_work_raise(void) +{ + if (is_smp()) + smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); +} +#endif + static const char *ipi_types[NR_IPI] = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), @@ -456,6 +467,8 @@ static const char *ipi_types[NR_IPI] = { S(IPI_CALL_FUNC, "Function call interrupts"), S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), S(IPI_CPU_STOP, "CPU stop interrupts"), + S(IPI_IRQ_WORK, "IRQ work interrupts"), + S(IPI_COMPLETION, "completion interrupts"), }; void show_ipi_list(struct seq_file *p, int prec) @@ -515,6 +528,19 @@ static void ipi_cpu_stop(unsigned int cpu) cpu_relax(); } +static DEFINE_PER_CPU(struct completion *, cpu_completion); + +int register_ipi_completion(struct completion *completion, int cpu) +{ + per_cpu(cpu_completion, cpu) = completion; + return IPI_COMPLETION; +} + +static void ipi_complete(unsigned int cpu) +{ + complete(per_cpu(cpu_completion, cpu)); +} + /* * Main handler for inter-processor interrupts */ @@ -565,6 +591,20 @@ void handle_IPI(int ipinr, struct pt_regs *regs) irq_exit(); break; +#ifdef CONFIG_IRQ_WORK + case IPI_IRQ_WORK: + irq_enter(); + irq_work_run(); + irq_exit(); + break; +#endif + + case IPI_COMPLETION: + irq_enter(); + ipi_complete(cpu); + irq_exit(); + break; + default: printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 5bc1a63284e..1aafa0d785e 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c @@ -28,7 +28,7 @@ */ unsigned int __init scu_get_core_count(void __iomem *scu_base) { - unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG); + unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG); return (ncores & 0x03) + 1; } @@ -42,19 +42,19 @@ void scu_enable(void __iomem *scu_base) #ifdef CONFIG_ARM_ERRATA_764369 /* Cortex-A9 only */ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) { - scu_ctrl = __raw_readl(scu_base + 0x30); + scu_ctrl = readl_relaxed(scu_base + 0x30); if (!(scu_ctrl & 1)) - __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); + writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30); } #endif - scu_ctrl = __raw_readl(scu_base + SCU_CTRL); + scu_ctrl = readl_relaxed(scu_base + SCU_CTRL); /* already enabled? */ if (scu_ctrl & 1) return; scu_ctrl |= 1; - __raw_writel(scu_ctrl, scu_base + SCU_CTRL); + writel_relaxed(scu_ctrl, scu_base + SCU_CTRL); /* * Ensure that the data accessed by CPU0 before the SCU was @@ -80,9 +80,9 @@ int scu_power_mode(void __iomem *scu_base, unsigned int mode) if (mode > 3 || mode == 1 || cpu > 3) return -EINVAL; - val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03; + val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03; val |= mode; - __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu); + writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu); return 0; } diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 83ccca303df..95d063620b7 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c @@ -70,6 +70,40 @@ static inline void ipi_flush_bp_all(void *ignored) local_flush_bp_all(); } +#ifdef CONFIG_ARM_ERRATA_798181 +bool (*erratum_a15_798181_handler)(void); + +static bool erratum_a15_798181_partial(void) +{ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(ish); + return false; +} + +static bool erratum_a15_798181_broadcast(void) +{ + asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); + dsb(ish); + return true; +} + +void erratum_a15_798181_init(void) +{ + unsigned int midr = read_cpuid_id(); + unsigned int revidr = read_cpuid(CPUID_REVIDR); + + /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */ + if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 || + (revidr & 0x210) == 0x210) { + return; + } + if (revidr & 0x10) + erratum_a15_798181_handler = erratum_a15_798181_partial; + else + erratum_a15_798181_handler = erratum_a15_798181_broadcast; +} +#endif + static void ipi_flush_tlb_a15_erratum(void *arg) { dmb(); @@ -80,7 +114,6 @@ static void broadcast_tlb_a15_erratum(void) if (!erratum_a15_798181()) return; - dummy_flush_tlb_a15_erratum(); smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1); } @@ -92,7 +125,6 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) if (!erratum_a15_798181()) return; - dummy_flush_tlb_a15_erratum(); this_cpu = get_cpu(); a15_erratum_get_cpumask(this_cpu, mm, &mask); smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 2985c9f0905..6591e26fc13 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -45,7 +45,7 @@ static void twd_set_mode(enum clock_event_mode mode, case CLOCK_EVT_MODE_PERIODIC: ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_PERIODIC; - __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), + writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), twd_base + TWD_TIMER_LOAD); break; case CLOCK_EVT_MODE_ONESHOT: @@ -58,18 +58,18 @@ static void twd_set_mode(enum clock_event_mode mode, ctrl = 0; } - __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); + writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL); } static int twd_set_next_event(unsigned long evt, struct clock_event_device *unused) { - unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL); + unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL); ctrl |= TWD_TIMER_CONTROL_ENABLE; - __raw_writel(evt, twd_base + TWD_TIMER_COUNTER); - __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); + writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER); + writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL); return 0; } @@ -82,8 +82,8 @@ static int twd_set_next_event(unsigned long evt, */ static int twd_timer_ack(void) { - if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) { - __raw_writel(1, twd_base + TWD_TIMER_INTSTAT); + if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) { + writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT); return 1; } @@ -211,15 +211,15 @@ static void twd_calibrate_rate(void) waitjiffies += 5; /* enable, no interrupt or reload */ - __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL); + writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL); /* maximum value */ - __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER); + writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER); while (get_jiffies_64() < waitjiffies) udelay(10); - count = __raw_readl(twd_base + TWD_TIMER_COUNTER); + count = readl_relaxed(twd_base + TWD_TIMER_COUNTER); twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); @@ -277,7 +277,7 @@ static void twd_timer_setup(void) * bother with the below. */ if (per_cpu(percpu_setup_called, cpu)) { - __raw_writel(0, twd_base + TWD_TIMER_CONTROL); + writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); clockevents_register_device(clk); enable_percpu_irq(clk->irq, 0); return; @@ -290,7 +290,7 @@ static void twd_timer_setup(void) * The following is done once per CPU the first time .setup() is * called. */ - __raw_writel(0, twd_base + TWD_TIMER_CONTROL); + writel_relaxed(0, twd_base + TWD_TIMER_CONTROL); clk->name = "local_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 41cf3cbf756..2835d35234c 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -10,7 +10,7 @@ #include <asm/suspend.h> #include <asm/tlbflush.h> -extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); +extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid); extern void cpu_resume_mmu(void); #ifdef CONFIG_MMU @@ -21,6 +21,7 @@ extern void cpu_resume_mmu(void); int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { struct mm_struct *mm = current->active_mm; + u32 __mpidr = cpu_logical_map(smp_processor_id()); int ret; if (!idmap_pgd) @@ -32,7 +33,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) * resume (indicated by a zero return code), we need to switch * back to the correct page tables. */ - ret = __cpu_suspend(arg, fn); + ret = __cpu_suspend(arg, fn, __mpidr); if (ret == 0) { cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); @@ -44,7 +45,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) #else int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { - return __cpu_suspend(arg, fn); + u32 __mpidr = cpu_logical_map(smp_processor_id()); + return __cpu_suspend(arg, fn, __mpidr); } #define idmap_pgd NULL #endif diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 8fcda140358..6125f259b7b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -34,6 +34,7 @@ #include <asm/unwind.h> #include <asm/tls.h> #include <asm/system_misc.h> +#include <asm/opcodes.h> static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; @@ -341,15 +342,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs, int is_valid_bugaddr(unsigned long pc) { #ifdef CONFIG_THUMB2_KERNEL - unsigned short bkpt; + u16 bkpt; + u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE); #else - unsigned long bkpt; + u32 bkpt; + u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE); #endif if (probe_kernel_address((unsigned *)pc, bkpt)) return 0; - return bkpt == BUG_INSTR_VALUE; + return bkpt == insn; } #endif @@ -402,25 +405,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) if (processor_mode(regs) == SVC_MODE) { #ifdef CONFIG_THUMB2_KERNEL if (thumb_mode(regs)) { - instr = ((u16 *)pc)[0]; + instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]); if (is_wide_instruction(instr)) { - instr <<= 16; - instr |= ((u16 *)pc)[1]; + u16 inst2; + inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]); + instr = __opcode_thumb32_compose(instr, inst2); } } else #endif - instr = *(u32 *) pc; + instr = __mem_to_opcode_arm(*(u32 *) pc); } else if (thumb_mode(regs)) { if (get_user(instr, (u16 __user *)pc)) goto die_sig; + instr = __mem_to_opcode_thumb16(instr); if (is_wide_instruction(instr)) { unsigned int instr2; if (get_user(instr2, (u16 __user *)pc+1)) goto die_sig; - instr <<= 16; - instr |= instr2; + instr2 = __mem_to_opcode_thumb16(instr2); + instr = __opcode_thumb32_compose(instr, instr2); } } else if (get_user(instr, (u32 __user *)pc)) { + instr = __mem_to_opcode_arm(instr); goto die_sig; } diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index ebf5015508b..466bd299b1a 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig @@ -20,6 +20,7 @@ config KVM bool "Kernel-based Virtual Machine (KVM) support" select PREEMPT_NOTIFIERS select ANON_INODES + select HAVE_KVM_CPU_RELAX_INTERCEPT select KVM_MMIO select KVM_ARM_HOST depends on ARM_VIRT_EXT && ARM_LPAE diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index d99bee4950e..789bca9e64a 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -19,6 +19,6 @@ kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o obj-y += kvm-arm.o init.o interrupts.o obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o -obj-y += coproc.o coproc_a15.o mmio.o psci.o perf.o +obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 9c697db2787..2a700e00528 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -65,7 +65,7 @@ static bool vgic_present; static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) { BUG_ON(preemptible()); - __get_cpu_var(kvm_arm_running_vcpu) = vcpu; + __this_cpu_write(kvm_arm_running_vcpu, vcpu); } /** @@ -75,7 +75,7 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) struct kvm_vcpu *kvm_arm_get_running_vcpu(void) { BUG_ON(preemptible()); - return __get_cpu_var(kvm_arm_running_vcpu); + return __this_cpu_read(kvm_arm_running_vcpu); } /** @@ -152,12 +152,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -void kvm_arch_free_memslot(struct kvm_memory_slot *free, +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) { return 0; } @@ -797,6 +798,19 @@ long kvm_arch_vm_ioctl(struct file *filp, return -EFAULT; return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); } + case KVM_ARM_PREFERRED_TARGET: { + int err; + struct kvm_vcpu_init init; + + err = kvm_vcpu_preferred_target(&init); + if (err) + return err; + + if (copy_to_user(argp, &init, sizeof(init))) + return -EFAULT; + + return 0; + } default: return -EINVAL; } @@ -815,7 +829,7 @@ static void cpu_init_hyp_mode(void *dummy) boot_pgd_ptr = kvm_mmu_get_boot_httbr(); pgd_ptr = kvm_mmu_get_httbr(); - stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); + stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); hyp_stack_ptr = stack_page + PAGE_SIZE; vector_ptr = (unsigned long)__kvm_hyp_vector; diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index db9cf692d4d..78c0885d650 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -71,6 +71,98 @@ int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } +static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) +{ + /* + * Compute guest MPIDR. We build a virtual cluster out of the + * vcpu_id, but we read the 'U' bit from the underlying + * hardware directly. + */ + vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | + ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | + (vcpu->vcpu_id & 3)); +} + +/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ +static bool access_actlr(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r) +{ + if (p->is_write) + return ignore_write(vcpu, p); + + *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; + return true; +} + +/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */ +static bool access_cbar(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r) +{ + if (p->is_write) + return write_to_read_only(vcpu, p); + return read_zero(vcpu, p); +} + +/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */ +static bool access_l2ctlr(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r) +{ + if (p->is_write) + return ignore_write(vcpu, p); + + *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; + return true; +} + +static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) +{ + u32 l2ctlr, ncores; + + asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); + l2ctlr &= ~(3 << 24); + ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; + /* How many cores in the current cluster and the next ones */ + ncores -= (vcpu->vcpu_id & ~3); + /* Cap it to the maximum number of cores in a single cluster */ + ncores = min(ncores, 3U); + l2ctlr |= (ncores & 3) << 24; + + vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; +} + +static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) +{ + u32 actlr; + + /* ACTLR contains SMP bit: make sure you create all cpus first! */ + asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); + /* Make the SMP bit consistent with the guest configuration */ + if (atomic_read(&vcpu->kvm->online_vcpus) > 1) + actlr |= 1U << 6; + else + actlr &= ~(1U << 6); + + vcpu->arch.cp15[c1_ACTLR] = actlr; +} + +/* + * TRM entries: A7:4.3.50, A15:4.3.49 + * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). + */ +static bool access_l2ectlr(struct kvm_vcpu *vcpu, + const struct coproc_params *p, + const struct coproc_reg *r) +{ + if (p->is_write) + return ignore_write(vcpu, p); + + *vcpu_reg(vcpu, p->Rt1) = 0; + return true; +} + /* See note at ARM ARM B1.14.4 */ static bool access_dcsw(struct kvm_vcpu *vcpu, const struct coproc_params *p, @@ -153,10 +245,22 @@ static bool pm_fake(struct kvm_vcpu *vcpu, * registers preceding 32-bit ones. */ static const struct coproc_reg cp15_regs[] = { + /* MPIDR: we use VMPIDR for guest access. */ + { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, + NULL, reset_mpidr, c0_MPIDR }, + /* CSSELR: swapped by interrupt.S. */ { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, NULL, reset_unknown, c0_CSSELR }, + /* ACTLR: trapped by HCR.TAC bit. */ + { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, + access_actlr, reset_actlr, c1_ACTLR }, + + /* CPACR: swapped by interrupt.S. */ + { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, + NULL, reset_val, c1_CPACR, 0x00000000 }, + /* TTBR0/TTBR1: swapped by interrupt.S. */ { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, @@ -195,6 +299,13 @@ static const struct coproc_reg cp15_regs[] = { { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, /* + * L2CTLR access (guest wants to know #CPUs). + */ + { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, + access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, + { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, + + /* * Dummy performance monitor implementation. */ { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, @@ -234,6 +345,9 @@ static const struct coproc_reg cp15_regs[] = { /* CNTKCTL: swapped by interrupt.S. */ { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32, NULL, reset_val, c14_CNTKCTL, 0x00000000 }, + + /* The Configuration Base Address Register. */ + { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, }; /* Target specific emulation tables */ @@ -241,6 +355,12 @@ static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) { + unsigned int i; + + for (i = 1; i < table->num; i++) + BUG_ON(cmp_reg(&table->table[i-1], + &table->table[i]) >= 0); + target_tables[table->target] = table; } diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index cf93472b9dd..bb0cac1410c 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c @@ -17,101 +17,12 @@ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/kvm_host.h> -#include <asm/cputype.h> -#include <asm/kvm_arm.h> -#include <asm/kvm_host.h> -#include <asm/kvm_emulate.h> #include <asm/kvm_coproc.h> +#include <asm/kvm_emulate.h> #include <linux/init.h> -static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - /* - * Compute guest MPIDR: - * (Even if we present only one VCPU to the guest on an SMP - * host we don't set the U bit in the MPIDR, or vice versa, as - * revealing the underlying hardware properties is likely to - * be the best choice). - */ - vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK) - | (vcpu->vcpu_id & MPIDR_LEVEL_MASK); -} - #include "coproc.h" -/* A15 TRM 4.3.28: RO WI */ -static bool access_actlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; - return true; -} - -/* A15 TRM 4.3.60: R/O. */ -static bool access_cbar(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return write_to_read_only(vcpu, p); - return read_zero(vcpu, p); -} - -/* A15 TRM 4.3.48: R/O WI. */ -static bool access_l2ctlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; - return true; -} - -static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - u32 l2ctlr, ncores; - - asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); - l2ctlr &= ~(3 << 24); - ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; - l2ctlr |= (ncores & 3) << 24; - - vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; -} - -static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - u32 actlr; - - /* ACTLR contains SMP bit: make sure you create all cpus first! */ - asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); - /* Make the SMP bit consistent with the guest configuration */ - if (atomic_read(&vcpu->kvm->online_vcpus) > 1) - actlr |= 1U << 6; - else - actlr &= ~(1U << 6); - - vcpu->arch.cp15[c1_ACTLR] = actlr; -} - -/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */ -static bool access_l2ectlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = 0; - return true; -} - /* * A15-specific CP15 registers. * CRn denotes the primary register number, but is copied to the CRm in the @@ -121,29 +32,9 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, * registers preceding 32-bit ones. */ static const struct coproc_reg a15_regs[] = { - /* MPIDR: we use VMPIDR for guest access. */ - { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, - NULL, reset_mpidr, c0_MPIDR }, - /* SCTLR: swapped by interrupt.S. */ { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, NULL, reset_val, c1_SCTLR, 0x00C50078 }, - /* ACTLR: trapped by HCR.TAC bit. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, - access_actlr, reset_actlr, c1_ACTLR }, - /* CPACR: swapped by interrupt.S. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_val, c1_CPACR, 0x00000000 }, - - /* - * L2CTLR access (guest wants to know #CPUs). - */ - { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, - access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, - { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, - - /* The Configuration Base Address Register. */ - { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, }; static struct kvm_coproc_target_table a15_target_table = { @@ -154,12 +45,6 @@ static struct kvm_coproc_target_table a15_target_table = { static int __init coproc_a15_init(void) { - unsigned int i; - - for (i = 1; i < ARRAY_SIZE(a15_regs); i++) - BUG_ON(cmp_reg(&a15_regs[i-1], - &a15_regs[i]) >= 0); - kvm_register_target_coproc_table(&a15_target_table); return 0; } diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c new file mode 100644 index 00000000000..1df76733158 --- /dev/null +++ b/arch/arm/kvm/coproc_a7.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Copyright (C) 2013 - ARM Ltd + * + * Authors: Rusty Russell <rusty@rustcorp.au> + * Christoffer Dall <c.dall@virtualopensystems.com> + * Jonathan Austin <jonathan.austin@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ +#include <linux/kvm_host.h> +#include <asm/kvm_coproc.h> +#include <asm/kvm_emulate.h> +#include <linux/init.h> + +#include "coproc.h" + +/* + * Cortex-A7 specific CP15 registers. + * CRn denotes the primary register number, but is copied to the CRm in the + * user space API for 64-bit register access in line with the terminology used + * in the ARM ARM. + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit + * registers preceding 32-bit ones. + */ +static const struct coproc_reg a7_regs[] = { + /* SCTLR: swapped by interrupt.S. */ + { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, + NULL, reset_val, c1_SCTLR, 0x00C50878 }, +}; + +static struct kvm_coproc_target_table a7_target_table = { + .target = KVM_ARM_TARGET_CORTEX_A7, + .table = a7_regs, + .num = ARRAY_SIZE(a7_regs), +}; + +static int __init coproc_a7_init(void) +{ + kvm_register_target_coproc_table(&a7_target_table); + return 0; +} +late_initcall(coproc_a7_init); diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index bdede9e7da5..d6c00528367 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -354,7 +354,7 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; if (is_pabt) { - /* Set DFAR and DFSR */ + /* Set IFAR and IFSR */ vcpu->arch.cp15[c6_IFAR] = addr; is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); /* Always give debug fault for now - should give guest a clue */ diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 152d0361218..20f8d97904a 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -190,6 +190,8 @@ int __attribute_const__ kvm_target_cpu(void) return -EINVAL; switch (part_number) { + case ARM_CPU_PART_CORTEX_A7: + return KVM_ARM_TARGET_CORTEX_A7; case ARM_CPU_PART_CORTEX_A15: return KVM_ARM_TARGET_CORTEX_A15; default: @@ -202,7 +204,7 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, { unsigned int i; - /* We can only do a cortex A15 for now. */ + /* We can only cope with guest==host and only on A15/A7 (for now). */ if (init->target != kvm_target_cpu()) return -EINVAL; @@ -222,6 +224,26 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, return kvm_reset_vcpu(vcpu); } +int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) +{ + int target = kvm_target_cpu(); + + if (target < 0) + return -ENODEV; + + memset(init, 0, sizeof(*init)); + + /* + * For now, we don't return any features. + * In future, we might use features to return target + * specific features available for the preferred + * target type. + */ + init->target = (__u32)target; + + return 0; +} + int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index df4c82d47ad..a92079011a8 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -73,23 +73,29 @@ static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) } /** - * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest + * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests * @vcpu: the vcpu pointer * @run: the kvm_run structure pointer * - * Simply sets the wait_for_interrupts flag on the vcpu structure, which will - * halt execution of world-switches and schedule other host processes until - * there is an incoming IRQ or FIQ to the VM. + * WFE: Yield the CPU and come back to this vcpu when the scheduler + * decides to. + * WFI: Simply call kvm_vcpu_block(), which will halt execution of + * world-switches and schedule other host processes until there is an + * incoming IRQ or FIQ to the VM. */ -static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) { trace_kvm_wfi(*vcpu_pc(vcpu)); - kvm_vcpu_block(vcpu); + if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) + kvm_vcpu_on_spin(vcpu); + else + kvm_vcpu_block(vcpu); + return 1; } static exit_handle_fn arm_exit_handlers[] = { - [HSR_EC_WFI] = kvm_handle_wfi, + [HSR_EC_WFI] = kvm_handle_wfx, [HSR_EC_CP15_32] = kvm_handle_cp15_32, [HSR_EC_CP15_64] = kvm_handle_cp15_64, [HSR_EC_CP14_MR] = kvm_handle_cp14_access, diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 0c25d9487d5..4cb5a93182e 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -23,6 +23,68 @@ #include "trace.h" +static void mmio_write_buf(char *buf, unsigned int len, unsigned long data) +{ + void *datap = NULL; + union { + u8 byte; + u16 hword; + u32 word; + u64 dword; + } tmp; + + switch (len) { + case 1: + tmp.byte = data; + datap = &tmp.byte; + break; + case 2: + tmp.hword = data; + datap = &tmp.hword; + break; + case 4: + tmp.word = data; + datap = &tmp.word; + break; + case 8: + tmp.dword = data; + datap = &tmp.dword; + break; + } + + memcpy(buf, datap, len); +} + +static unsigned long mmio_read_buf(char *buf, unsigned int len) +{ + unsigned long data = 0; + union { + u16 hword; + u32 word; + u64 dword; + } tmp; + + switch (len) { + case 1: + data = buf[0]; + break; + case 2: + memcpy(&tmp.hword, buf, len); + data = tmp.hword; + break; + case 4: + memcpy(&tmp.word, buf, len); + data = tmp.word; + break; + case 8: + memcpy(&tmp.dword, buf, len); + data = tmp.dword; + break; + } + + return data; +} + /** * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation * @vcpu: The VCPU pointer @@ -33,28 +95,27 @@ */ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) { - unsigned long *dest; + unsigned long data; unsigned int len; int mask; if (!run->mmio.is_write) { - dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); - *dest = 0; - len = run->mmio.len; if (len > sizeof(unsigned long)) return -EINVAL; - memcpy(dest, run->mmio.data, len); - - trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, - *((u64 *)run->mmio.data)); + data = mmio_read_buf(run->mmio.data, len); if (vcpu->arch.mmio_decode.sign_extend && len < sizeof(unsigned long)) { mask = 1U << ((len * 8) - 1); - *dest = (*dest ^ mask) - mask; + data = (data ^ mask) - mask; } + + trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, + data); + data = vcpu_data_host_to_guest(vcpu, data, len); + *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; } return 0; @@ -105,6 +166,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, phys_addr_t fault_ipa) { struct kvm_exit_mmio mmio; + unsigned long data; unsigned long rt; int ret; @@ -125,13 +187,15 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, } rt = vcpu->arch.mmio_decode.rt; + data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), mmio.len); + trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len, fault_ipa, - (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0); + (mmio.is_write) ? data : 0); if (mmio.is_write) - memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); + mmio_write_buf(mmio.data, mmio.len, data); if (vgic_handle_mmio(vcpu, run, &mmio)) return 1; diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index b0de86b56c1..371958370de 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -19,6 +19,7 @@ #include <linux/mman.h> #include <linux/kvm_host.h> #include <linux/io.h> +#include <linux/hugetlb.h> #include <trace/events/kvm.h> #include <asm/pgalloc.h> #include <asm/cacheflush.h> @@ -41,6 +42,8 @@ static unsigned long hyp_idmap_start; static unsigned long hyp_idmap_end; static phys_addr_t hyp_idmap_vector; +#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) + static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { /* @@ -93,19 +96,29 @@ static bool page_empty(void *ptr) static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) { - pmd_t *pmd_table = pmd_offset(pud, 0); - pud_clear(pud); - kvm_tlb_flush_vmid_ipa(kvm, addr); - pmd_free(NULL, pmd_table); + if (pud_huge(*pud)) { + pud_clear(pud); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { + pmd_t *pmd_table = pmd_offset(pud, 0); + pud_clear(pud); + kvm_tlb_flush_vmid_ipa(kvm, addr); + pmd_free(NULL, pmd_table); + } put_page(virt_to_page(pud)); } static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) { - pte_t *pte_table = pte_offset_kernel(pmd, 0); - pmd_clear(pmd); - kvm_tlb_flush_vmid_ipa(kvm, addr); - pte_free_kernel(NULL, pte_table); + if (kvm_pmd_huge(*pmd)) { + pmd_clear(pmd); + kvm_tlb_flush_vmid_ipa(kvm, addr); + } else { + pte_t *pte_table = pte_offset_kernel(pmd, 0); + pmd_clear(pmd); + kvm_tlb_flush_vmid_ipa(kvm, addr); + pte_free_kernel(NULL, pte_table); + } put_page(virt_to_page(pmd)); } @@ -136,18 +149,32 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, continue; } + if (pud_huge(*pud)) { + /* + * If we are dealing with a huge pud, just clear it and + * move on. + */ + clear_pud_entry(kvm, pud, addr); + addr = pud_addr_end(addr, end); + continue; + } + pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { addr = pmd_addr_end(addr, end); continue; } - pte = pte_offset_kernel(pmd, addr); - clear_pte_entry(kvm, pte, addr); - next = addr + PAGE_SIZE; + if (!kvm_pmd_huge(*pmd)) { + pte = pte_offset_kernel(pmd, addr); + clear_pte_entry(kvm, pte, addr); + next = addr + PAGE_SIZE; + } - /* If we emptied the pte, walk back up the ladder */ - if (page_empty(pte)) { + /* + * If the pmd entry is to be cleared, walk back up the ladder + */ + if (kvm_pmd_huge(*pmd) || page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); next = pmd_addr_end(addr, end); if (page_empty(pmd) && !page_empty(pud)) { @@ -420,29 +447,71 @@ void kvm_free_stage2_pgd(struct kvm *kvm) kvm->arch.pgd = NULL; } - -static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, - phys_addr_t addr, const pte_t *new_pte, bool iomap) +static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte, old_pte; - /* Create 2nd stage page table mapping - Level 1 */ pgd = kvm->arch.pgd + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { if (!cache) - return 0; /* ignore calls from kvm_set_spte_hva */ + return NULL; pmd = mmu_memory_cache_alloc(cache); pud_populate(NULL, pud, pmd); get_page(virt_to_page(pud)); } - pmd = pmd_offset(pud, addr); + return pmd_offset(pud, addr); +} + +static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + *cache, phys_addr_t addr, const pmd_t *new_pmd) +{ + pmd_t *pmd, old_pmd; + + pmd = stage2_get_pmd(kvm, cache, addr); + VM_BUG_ON(!pmd); + + /* + * Mapping in huge pages should only happen through a fault. If a + * page is merged into a transparent huge page, the individual + * subpages of that huge page should be unmapped through MMU + * notifiers before we get here. + * + * Merging of CompoundPages is not supported; they should become + * splitting first, unmapped, merged, and mapped back in on-demand. + */ + VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); + + old_pmd = *pmd; + kvm_set_pmd(pmd, *new_pmd); + if (pmd_present(old_pmd)) + kvm_tlb_flush_vmid_ipa(kvm, addr); + else + get_page(virt_to_page(pmd)); + return 0; +} + +static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pte_t *new_pte, bool iomap) +{ + pmd_t *pmd; + pte_t *pte, old_pte; - /* Create 2nd stage page table mapping - Level 2 */ + /* Create stage-2 page table mapping - Level 1 */ + pmd = stage2_get_pmd(kvm, cache, addr); + if (!pmd) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* Create stage-2 page mappings - Level 2 */ if (pmd_none(*pmd)) { if (!cache) return 0; /* ignore calls from kvm_set_spte_hva */ @@ -507,16 +576,60 @@ out: return ret; } +static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) +{ + pfn_t pfn = *pfnp; + gfn_t gfn = *ipap >> PAGE_SHIFT; + + if (PageTransCompound(pfn_to_page(pfn))) { + unsigned long mask; + /* + * The address we faulted on is backed by a transparent huge + * page. However, because we map the compound huge page and + * not the individual tail page, we need to transfer the + * refcount to the head page. We have to be careful that the + * THP doesn't start to split while we are adjusting the + * refcounts. + * + * We are sure this doesn't happen, because mmu_notifier_retry + * was successful and we are holding the mmu_lock, so if this + * THP is trying to split, it will be blocked in the mmu + * notifier before touching any of the pages, specifically + * before being able to call __split_huge_page_refcount(). + * + * We can therefore safely transfer the refcount from PG_tail + * to PG_head and switch the pfn from a tail page to the head + * page accordingly. + */ + mask = PTRS_PER_PMD - 1; + VM_BUG_ON((gfn & mask) != (pfn & mask)); + if (pfn & mask) { + *ipap &= PMD_MASK; + kvm_release_pfn_clean(pfn); + pfn &= ~mask; + kvm_get_pfn(pfn); + *pfnp = pfn; + } + + return true; + } + + return false; +} + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - gfn_t gfn, struct kvm_memory_slot *memslot, + struct kvm_memory_slot *memslot, unsigned long fault_status) { - pte_t new_pte; - pfn_t pfn; int ret; - bool write_fault, writable; + bool write_fault, writable, hugetlb = false, force_pte = false; unsigned long mmu_seq; + gfn_t gfn = fault_ipa >> PAGE_SHIFT; + unsigned long hva = gfn_to_hva(vcpu->kvm, gfn); + struct kvm *kvm = vcpu->kvm; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + struct vm_area_struct *vma; + pfn_t pfn; write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); if (fault_status == FSC_PERM && !write_fault) { @@ -524,6 +637,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, return -EFAULT; } + /* Let's check if we will get back a huge page backed by hugetlbfs */ + down_read(¤t->mm->mmap_sem); + vma = find_vma_intersection(current->mm, hva, hva + 1); + if (is_vm_hugetlb_page(vma)) { + hugetlb = true; + gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; + } else { + /* + * Pages belonging to VMAs not aligned to the PMD mapping + * granularity cannot be mapped using block descriptors even + * if the pages belong to a THP for the process, because the + * stage-2 block descriptor will cover more than a single THP + * and we loose atomicity for unmapping, updates, and splits + * of the THP or other pages in the stage-2 block range. + */ + if (vma->vm_start & ~PMD_MASK) + force_pte = true; + } + up_read(¤t->mm->mmap_sem); + /* We need minimum second+third level pages */ ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); if (ret) @@ -541,26 +674,40 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, */ smp_rmb(); - pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable); + pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); if (is_error_pfn(pfn)) return -EFAULT; - new_pte = pfn_pte(pfn, PAGE_S2); - coherent_icache_guest_page(vcpu->kvm, gfn); - - spin_lock(&vcpu->kvm->mmu_lock); - if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) + spin_lock(&kvm->mmu_lock); + if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; - if (writable) { - kvm_set_s2pte_writable(&new_pte); - kvm_set_pfn_dirty(pfn); + if (!hugetlb && !force_pte) + hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); + + if (hugetlb) { + pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2); + new_pmd = pmd_mkhuge(new_pmd); + if (writable) { + kvm_set_s2pmd_writable(&new_pmd); + kvm_set_pfn_dirty(pfn); + } + coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE); + ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); + } else { + pte_t new_pte = pfn_pte(pfn, PAGE_S2); + if (writable) { + kvm_set_s2pte_writable(&new_pte); + kvm_set_pfn_dirty(pfn); + } + coherent_icache_guest_page(kvm, hva, PAGE_SIZE); + ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); } - stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); + out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); + spin_unlock(&kvm->mmu_lock); kvm_release_pfn_clean(pfn); - return 0; + return ret; } /** @@ -629,7 +776,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) memslot = gfn_to_memslot(vcpu->kvm, gfn); - ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status); + ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status); if (ret == 0) ret = 1; out_unlock: diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 86a693a02ba..0881bf169fb 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c @@ -18,6 +18,7 @@ #include <linux/kvm_host.h> #include <linux/wait.h> +#include <asm/cputype.h> #include <asm/kvm_emulate.h> #include <asm/kvm_psci.h> @@ -34,22 +35,30 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) { struct kvm *kvm = source_vcpu->kvm; - struct kvm_vcpu *vcpu; + struct kvm_vcpu *vcpu = NULL, *tmp; wait_queue_head_t *wq; unsigned long cpu_id; + unsigned long mpidr; phys_addr_t target_pc; + int i; cpu_id = *vcpu_reg(source_vcpu, 1); if (vcpu_mode_is_32bit(source_vcpu)) cpu_id &= ~((u32) 0); - if (cpu_id >= atomic_read(&kvm->online_vcpus)) + kvm_for_each_vcpu(i, tmp, kvm) { + mpidr = kvm_vcpu_get_mpidr(tmp); + if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) { + vcpu = tmp; + break; + } + } + + if (!vcpu) return KVM_PSCI_RET_INVAL; target_pc = *vcpu_reg(source_vcpu, 2); - vcpu = kvm_get_vcpu(kvm, cpu_id); - wq = kvm_arch_vcpu_wq(vcpu); if (!waitqueue_active(wq)) return KVM_PSCI_RET_INVAL; @@ -62,6 +71,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) vcpu_set_thumb(vcpu); } + /* Propagate caller endianness */ + if (kvm_vcpu_is_be(source_vcpu)) + kvm_vcpu_set_be(vcpu); + *vcpu_pc(vcpu) = target_pc; vcpu->arch.pause = false; smp_mb(); /* Make sure the above is visible */ diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index c02ba4af599..f558c073c02 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -30,16 +30,14 @@ #include <kvm/arm_arch_timer.h> /****************************************************************************** - * Cortex-A15 Reset Values + * Cortex-A15 and Cortex-A7 Reset Values */ -static const int a15_max_cpu_idx = 3; - -static struct kvm_regs a15_regs_reset = { +static struct kvm_regs cortexa_regs_reset = { .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, }; -static const struct kvm_irq_level a15_vtimer_irq = { +static const struct kvm_irq_level cortexa_vtimer_irq = { { .irq = 27 }, .level = 1, }; @@ -62,12 +60,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) const struct kvm_irq_level *cpu_vtimer_irq; switch (vcpu->arch.target) { + case KVM_ARM_TARGET_CORTEX_A7: case KVM_ARM_TARGET_CORTEX_A15: - if (vcpu->vcpu_id > a15_max_cpu_idx) - return -EINVAL; - reset_regs = &a15_regs_reset; + reset_regs = &cortexa_regs_reset; vcpu->arch.midr = read_cpuid_id(); - cpu_vtimer_irq = &a15_vtimer_irq; + cpu_vtimer_irq = &cortexa_vtimer_irq; break; default: return -ENODEV; diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index d6408d1ee54..e0c68d5bb7d 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -10,6 +10,11 @@ UNWIND( .fnstart ) and r3, r0, #31 @ Get bit offset mov r0, r0, lsr #5 add r1, r1, r0, lsl #2 @ Get word offset +#if __LINUX_ARM_ARCH__ >= 7 + .arch_extension mp + ALT_SMP(W(pldw) [r1]) + ALT_UP(W(nop)) +#endif mov r3, r2, lsl r3 1: ldrex r2, [r1] \instr r2, r2, r3 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 025f742dd4d..3e58d710013 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -18,6 +18,7 @@ #include <linux/hardirq.h> /* for in_atomic() */ #include <linux/gfp.h> #include <linux/highmem.h> +#include <linux/hugetlb.h> #include <asm/current.h> #include <asm/page.h> @@ -40,7 +41,35 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) return 0; pmd = pmd_offset(pud, addr); - if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd))) + if (unlikely(pmd_none(*pmd))) + return 0; + + /* + * A pmd can be bad if it refers to a HugeTLB or THP page. + * + * Both THP and HugeTLB pages have the same pmd layout + * and should not be manipulated by the pte functions. + * + * Lock the page table for the destination and check + * to see that it's still huge and whether or not we will + * need to fault on write, or if we have a splitting THP. + */ + if (unlikely(pmd_thp_or_huge(*pmd))) { + ptl = ¤t->mm->page_table_lock; + spin_lock(ptl); + if (unlikely(!pmd_thp_or_huge(*pmd) + || pmd_hugewillfault(*pmd) + || pmd_trans_splitting(*pmd))) { + spin_unlock(ptl); + return 0; + } + + *ptep = NULL; + *ptlp = ptl; + return 1; + } + + if (unlikely(pmd_bad(*pmd))) return 0; pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); @@ -94,7 +123,10 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) from += tocopy; n -= tocopy; - pte_unmap_unlock(pte, ptl); + if (pte) + pte_unmap_unlock(pte, ptl); + else + spin_unlock(ptl); } if (!atomic) up_read(¤t->mm->mmap_sem); @@ -147,7 +179,10 @@ __clear_user_memset(void __user *addr, unsigned long n) addr += tocopy; n -= tocopy; - pte_unmap_unlock(pte, ptl); + if (pte) + pte_unmap_unlock(pte, ptl); + else + spin_unlock(ptl); } up_read(¤t->mm->mmap_sem); diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile index 3b0a9538093..90aab2d5a07 100644 --- a/arch/arm/mach-at91/Makefile +++ b/arch/arm/mach-at91/Makefile @@ -2,7 +2,7 @@ # Makefile for the linux kernel. # -obj-y := irq.o gpio.o setup.o +obj-y := irq.o gpio.o setup.o sysirq_mask.o obj-m := obj-n := obj- := @@ -98,7 +98,6 @@ obj-y += leds.o # Power Management obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_AT91_SLOW_CLOCK) += pm_slowclock.o -obj-$(CONFIG_CPU_IDLE) += cpuidle.o ifeq ($(CONFIG_PM_DEBUG),y) CFLAGS_pm.o += -DDEBUG diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index 4aad93d54d6..25805f2f601 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c @@ -27,6 +27,7 @@ #include "generic.h" #include "clock.h" #include "sam9_smc.h" +#include "pm.h" /* -------------------------------------------------------------------- * Clocks @@ -327,6 +328,7 @@ static void __init at91rm9200_ioremap_registers(void) { at91rm9200_ioremap_st(AT91RM9200_BASE_ST); at91_ioremap_ramc(0, AT91RM9200_BASE_MC, 256); + at91_pm_set_standby(at91rm9200_standby); } static void __init at91rm9200_initialize(void) diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c index 5de6074b4f4..d6a1fa85371 100644 --- a/arch/arm/mach-at91/at91sam9260.c +++ b/arch/arm/mach-at91/at91sam9260.c @@ -28,6 +28,7 @@ #include "generic.h" #include "clock.h" #include "sam9_smc.h" +#include "pm.h" /* -------------------------------------------------------------------- * Clocks @@ -342,6 +343,7 @@ static void __init at91sam9260_ioremap_registers(void) at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT); at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC); at91_ioremap_matrix(AT91SAM9260_BASE_MATRIX); + at91_pm_set_standby(at91sam9_sdram_standby); } static void __init at91sam9260_initialize(void) @@ -349,6 +351,8 @@ static void __init at91sam9260_initialize(void) arm_pm_idle = at91sam9_idle; arm_pm_restart = at91sam9_alt_restart; + at91_sysirq_mask_rtt(AT91SAM9260_BASE_RTT); + /* Register GPIO subsystem */ at91_gpio_init(at91sam9260_gpio, 3); } diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c index 0e0793241ab..23ba1d8a153 100644 --- a/arch/arm/mach-at91/at91sam9261.c +++ b/arch/arm/mach-at91/at91sam9261.c @@ -27,6 +27,7 @@ #include "generic.h" #include "clock.h" #include "sam9_smc.h" +#include "pm.h" /* -------------------------------------------------------------------- * Clocks @@ -284,6 +285,7 @@ static void __init at91sam9261_ioremap_registers(void) at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT); at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC); at91_ioremap_matrix(AT91SAM9261_BASE_MATRIX); + at91_pm_set_standby(at91sam9_sdram_standby); } static void __init at91sam9261_initialize(void) @@ -291,6 +293,8 @@ static void __init at91sam9261_initialize(void) arm_pm_idle = at91sam9_idle; arm_pm_restart = at91sam9_alt_restart; + at91_sysirq_mask_rtt(AT91SAM9261_BASE_RTT); + /* Register GPIO subsystem */ at91_gpio_init(at91sam9261_gpio, 3); } diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 629ea5fc95c..b2a34740146 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -465,7 +465,7 @@ void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static u64 lcdc_dmamask = DMA_BIT_MASK(32); -static struct atmel_lcdfb_info lcdc_data; +static struct atmel_lcdfb_pdata lcdc_data; static struct resource lcdc_resources[] = { [0] = { @@ -498,7 +498,7 @@ static struct platform_device at91_lcdc_device = { .num_resources = ARRAY_SIZE(lcdc_resources), }; -void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) +void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) { if (!data) { return; @@ -559,7 +559,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) platform_device_register(&at91_lcdc_device); } #else -void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {} +void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {} #endif diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c index 6ce7d185089..7eccb0fc57b 100644 --- a/arch/arm/mach-at91/at91sam9263.c +++ b/arch/arm/mach-at91/at91sam9263.c @@ -26,6 +26,7 @@ #include "generic.h" #include "clock.h" #include "sam9_smc.h" +#include "pm.h" /* -------------------------------------------------------------------- * Clocks @@ -321,6 +322,7 @@ static void __init at91sam9263_ioremap_registers(void) at91sam9_ioremap_smc(0, AT91SAM9263_BASE_SMC0); at91sam9_ioremap_smc(1, AT91SAM9263_BASE_SMC1); at91_ioremap_matrix(AT91SAM9263_BASE_MATRIX); + at91_pm_set_standby(at91sam9_sdram_standby); } static void __init at91sam9263_initialize(void) @@ -328,6 +330,9 @@ static void __init at91sam9263_initialize(void) arm_pm_idle = at91sam9_idle; arm_pm_restart = at91sam9_alt_restart; + at91_sysirq_mask_rtt(AT91SAM9263_BASE_RTT0); + at91_sysirq_mask_rtt(AT91SAM9263_BASE_RTT1); + /* Register GPIO subsystem */ at91_gpio_init(at91sam9263_gpio, 5); } diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index 858c8aac2da..4aeadddbc18 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -832,7 +832,7 @@ void __init at91_add_device_can(struct at91_can_data *data) {} #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static u64 lcdc_dmamask = DMA_BIT_MASK(32); -static struct atmel_lcdfb_info lcdc_data; +static struct atmel_lcdfb_pdata lcdc_data; static struct resource lcdc_resources[] = { [0] = { @@ -859,7 +859,7 @@ static struct platform_device at91_lcdc_device = { .num_resources = ARRAY_SIZE(lcdc_resources), }; -void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) +void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) { if (!data) return; @@ -891,7 +891,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) platform_device_register(&at91_lcdc_device); } #else -void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {} +void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {} #endif diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 474ee04d24b..9405aa08b10 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c @@ -26,6 +26,7 @@ #include "generic.h" #include "clock.h" #include "sam9_smc.h" +#include "pm.h" /* -------------------------------------------------------------------- * Clocks @@ -370,6 +371,7 @@ static void __init at91sam9g45_ioremap_registers(void) at91sam926x_ioremap_pit(AT91SAM9G45_BASE_PIT); at91sam9_ioremap_smc(0, AT91SAM9G45_BASE_SMC); at91_ioremap_matrix(AT91SAM9G45_BASE_MATRIX); + at91_pm_set_standby(at91_ddr_standby); } static void __init at91sam9g45_initialize(void) @@ -377,6 +379,9 @@ static void __init at91sam9g45_initialize(void) arm_pm_idle = at91sam9_idle; arm_pm_restart = at91sam9g45_restart; + at91_sysirq_mask_rtc(AT91SAM9G45_BASE_RTC); + at91_sysirq_mask_rtt(AT91SAM9G45_BASE_RTT); + /* Register GPIO subsystem */ at91_gpio_init(at91sam9g45_gpio, 5); } diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index acb703e1333..cb36fa872d3 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -965,7 +965,7 @@ void __init at91_add_device_isi(struct isi_platform_data *data, #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static u64 lcdc_dmamask = DMA_BIT_MASK(32); -static struct atmel_lcdfb_info lcdc_data; +static struct atmel_lcdfb_pdata lcdc_data; static struct resource lcdc_resources[] = { [0] = { @@ -991,7 +991,7 @@ static struct platform_device at91_lcdc_device = { .num_resources = ARRAY_SIZE(lcdc_resources), }; -void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) +void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) { if (!data) return; @@ -1037,7 +1037,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) platform_device_register(&at91_lcdc_device); } #else -void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {} +void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {} #endif diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c index 2d895a29773..388ec3aec4b 100644 --- a/arch/arm/mach-at91/at91sam9n12.c +++ b/arch/arm/mach-at91/at91sam9n12.c @@ -224,7 +224,13 @@ static void __init at91sam9n12_map_io(void) at91_init_sram(0, AT91SAM9N12_SRAM_BASE, AT91SAM9N12_SRAM_SIZE); } +static void __init at91sam9n12_initialize(void) +{ + at91_sysirq_mask_rtc(AT91SAM9N12_BASE_RTC); +} + AT91_SOC_START(at91sam9n12) .map_io = at91sam9n12_map_io, .register_clocks = at91sam9n12_register_clocks, + .init = at91sam9n12_initialize, AT91_SOC_END diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c index d4ec0d9a987..0750ffb7e6b 100644 --- a/arch/arm/mach-at91/at91sam9rl.c +++ b/arch/arm/mach-at91/at91sam9rl.c @@ -27,6 +27,7 @@ #include "generic.h" #include "clock.h" #include "sam9_smc.h" +#include "pm.h" /* -------------------------------------------------------------------- * Clocks @@ -287,6 +288,7 @@ static void __init at91sam9rl_ioremap_registers(void) at91sam926x_ioremap_pit(AT91SAM9RL_BASE_PIT); at91sam9_ioremap_smc(0, AT91SAM9RL_BASE_SMC); at91_ioremap_matrix(AT91SAM9RL_BASE_MATRIX); + at91_pm_set_standby(at91sam9_sdram_standby); } static void __init at91sam9rl_initialize(void) @@ -294,6 +296,9 @@ static void __init at91sam9rl_initialize(void) arm_pm_idle = at91sam9_idle; arm_pm_restart = at91sam9_alt_restart; + at91_sysirq_mask_rtc(AT91SAM9RL_BASE_RTC); + at91_sysirq_mask_rtt(AT91SAM9RL_BASE_RTT); + /* Register GPIO subsystem */ at91_gpio_init(at91sam9rl_gpio, 4); } diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index 352468f265a..a698bdab2cc 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c @@ -498,7 +498,7 @@ void __init at91_add_device_ac97(struct ac97c_platform_data *data) {} #if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE) static u64 lcdc_dmamask = DMA_BIT_MASK(32); -static struct atmel_lcdfb_info lcdc_data; +static struct atmel_lcdfb_pdata lcdc_data; static struct resource lcdc_resources[] = { [0] = { @@ -525,7 +525,7 @@ static struct platform_device at91_lcdc_device = { .num_resources = ARRAY_SIZE(lcdc_resources), }; -void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) +void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) { if (!data) { return; @@ -557,7 +557,7 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) platform_device_register(&at91_lcdc_device); } #else -void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {} +void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data) {} #endif diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c index 916e5a14291..e8a2e075a1b 100644 --- a/arch/arm/mach-at91/at91sam9x5.c +++ b/arch/arm/mach-at91/at91sam9x5.c @@ -322,6 +322,11 @@ static void __init at91sam9x5_map_io(void) at91_init_sram(0, AT91SAM9X5_SRAM_BASE, AT91SAM9X5_SRAM_SIZE); } +static void __init at91sam9x5_initialize(void) +{ + at91_sysirq_mask_rtc(AT91SAM9X5_BASE_RTC); +} + /* -------------------------------------------------------------------- * Interrupt initialization * -------------------------------------------------------------------- */ @@ -329,4 +334,5 @@ static void __init at91sam9x5_map_io(void) AT91_SOC_START(at91sam9x5) .map_io = at91sam9x5_map_io, .register_clocks = at91sam9x5_register_clocks, + .init = at91sam9x5_initialize, AT91_SOC_END diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c index 0b153c87521..f4f8735315d 100644 --- a/arch/arm/mach-at91/board-sam9260ek.c +++ b/arch/arm/mach-at91/board-sam9260ek.c @@ -28,7 +28,7 @@ #include <linux/spi/spi.h> #include <linux/spi/at73c213.h> #include <linux/clk.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/gpio_keys.h> #include <linux/input.h> diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index d3437624ca4..473546b9408 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c @@ -389,7 +389,7 @@ static struct fb_monspecs at91fb_default_stn_monspecs = { | ATMEL_LCDC_IFWIDTH_4 \ | ATMEL_LCDC_SCANMOD_SINGLE) -static void at91_lcdc_stn_power_control(int on) +static void at91_lcdc_stn_power_control(struct atmel_lcdfb_pdata *pdata, int on) { /* backlight */ if (on) { /* power up */ @@ -401,7 +401,7 @@ static void at91_lcdc_stn_power_control(int on) } } -static struct atmel_lcdfb_info __initdata ek_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = { .default_bpp = 1, .default_dmacon = ATMEL_LCDC_DMAEN, .default_lcdcon2 = AT91SAM9261_DEFAULT_STN_LCDCON2, @@ -445,7 +445,7 @@ static struct fb_monspecs at91fb_default_tft_monspecs = { | ATMEL_LCDC_DISTYPE_TFT \ | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE) -static void at91_lcdc_tft_power_control(int on) +static void at91_lcdc_tft_power_control(struct atmel_lcdfb_pdata *pdata, int on) { if (on) at91_set_gpio_value(AT91_PIN_PA12, 0); /* power up */ @@ -453,7 +453,7 @@ static void at91_lcdc_tft_power_control(int on) at91_set_gpio_value(AT91_PIN_PA12, 1); /* power down */ } -static struct atmel_lcdfb_info __initdata ek_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = { .lcdcon_is_backlight = true, .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN, @@ -465,7 +465,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = { #endif #else -static struct atmel_lcdfb_info __initdata ek_lcdc_data; +static struct atmel_lcdfb_pdata __initdata ek_lcdc_data; #endif diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c index 3284df05df1..2f931915c80 100644 --- a/arch/arm/mach-at91/board-sam9263ek.c +++ b/arch/arm/mach-at91/board-sam9263ek.c @@ -27,7 +27,7 @@ #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/fb.h> #include <linux/gpio_keys.h> #include <linux/input.h> @@ -275,13 +275,13 @@ static struct fb_monspecs at91fb_default_monspecs = { | ATMEL_LCDC_DISTYPE_TFT \ | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE) -static void at91_lcdc_power_control(int on) +static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on) { at91_set_gpio_value(AT91_PIN_PA30, on); } /* Driver datas */ -static struct atmel_lcdfb_info __initdata ek_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = { .lcdcon_is_backlight = true, .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN, @@ -292,7 +292,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = { }; #else -static struct atmel_lcdfb_info __initdata ek_lcdc_data; +static struct atmel_lcdfb_pdata __initdata ek_lcdc_data; #endif diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c index 2a94896a137..ef39078c8ce 100644 --- a/arch/arm/mach-at91/board-sam9m10g45ek.c +++ b/arch/arm/mach-at91/board-sam9m10g45ek.c @@ -284,7 +284,7 @@ static struct fb_monspecs at91fb_default_monspecs = { | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE) /* Driver datas */ -static struct atmel_lcdfb_info __initdata ek_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = { .lcdcon_is_backlight = true, .default_bpp = 32, .default_dmacon = ATMEL_LCDC_DMAEN, @@ -295,7 +295,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = { }; #else -static struct atmel_lcdfb_info __initdata ek_lcdc_data; +static struct atmel_lcdfb_pdata __initdata ek_lcdc_data; #endif diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c index aa265dcf212..604eecf6cd7 100644 --- a/arch/arm/mach-at91/board-sam9rlek.c +++ b/arch/arm/mach-at91/board-sam9rlek.c @@ -170,7 +170,7 @@ static struct fb_monspecs at91fb_default_monspecs = { | ATMEL_LCDC_DISTYPE_TFT \ | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE) -static void at91_lcdc_power_control(int on) +static void at91_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on) { if (on) at91_set_gpio_value(AT91_PIN_PC1, 0); /* power up */ @@ -179,7 +179,7 @@ static void at91_lcdc_power_control(int on) } /* Driver datas */ -static struct atmel_lcdfb_info __initdata ek_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata ek_lcdc_data = { .lcdcon_is_backlight = true, .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN, @@ -191,7 +191,7 @@ static struct atmel_lcdfb_info __initdata ek_lcdc_data = { }; #else -static struct atmel_lcdfb_info __initdata ek_lcdc_data; +static struct atmel_lcdfb_pdata __initdata ek_lcdc_data; #endif diff --git a/arch/arm/mach-at91/board.h b/arch/arm/mach-at91/board.h index 4a234fb2ab3..6c08b341167 100644 --- a/arch/arm/mach-at91/board.h +++ b/arch/arm/mach-at91/board.h @@ -107,8 +107,8 @@ extern void __init at91_add_device_pwm(u32 mask); extern void __init at91_add_device_ssc(unsigned id, unsigned pins); /* LCD Controller */ -struct atmel_lcdfb_info; -extern void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data); +struct atmel_lcdfb_pdata; +extern void __init at91_add_device_lcdc(struct atmel_lcdfb_pdata *data); /* AC97 */ extern void __init at91_add_device_ac97(struct ac97c_platform_data *data); diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c deleted file mode 100644 index 4ec6a6d9b9b..00000000000 --- a/arch/arm/mach-at91/cpuidle.c +++ /dev/null @@ -1,68 +0,0 @@ -/* - * based on arch/arm/mach-kirkwood/cpuidle.c - * - * CPU idle support for AT91 SoC - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - * - * The cpu idle uses wait-for-interrupt and RAM self refresh in order - * to implement two idle states - - * #1 wait-for-interrupt - * #2 wait-for-interrupt and RAM self refresh - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/cpuidle.h> -#include <linux/io.h> -#include <linux/export.h> -#include <asm/proc-fns.h> -#include <asm/cpuidle.h> -#include <mach/cpu.h> - -#include "pm.h" - -#define AT91_MAX_STATES 2 - -/* Actual code that puts the SoC in different idle states */ -static int at91_enter_idle(struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index) -{ - if (cpu_is_at91rm9200()) - at91rm9200_standby(); - else if (cpu_is_at91sam9g45()) - at91sam9g45_standby(); - else if (cpu_is_at91sam9263()) - at91sam9263_standby(); - else - at91sam9_standby(); - - return index; -} - -static struct cpuidle_driver at91_idle_driver = { - .name = "at91_idle", - .owner = THIS_MODULE, - .states[0] = ARM_CPUIDLE_WFI_STATE, - .states[1] = { - .enter = at91_enter_idle, - .exit_latency = 10, - .target_residency = 10000, - .flags = CPUIDLE_FLAG_TIME_VALID, - .name = "RAM_SR", - .desc = "WFI and DDR Self Refresh", - }, - .state_count = AT91_MAX_STATES, -}; - -/* Initialize CPU idle by registering the idle states */ -static int __init at91_init_cpuidle(void) -{ - return cpuidle_register(&at91_idle_driver, NULL); -} - -device_initcall(at91_init_cpuidle); diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h index dc6e2f5f804..26dee3ce939 100644 --- a/arch/arm/mach-at91/generic.h +++ b/arch/arm/mach-at91/generic.h @@ -34,6 +34,8 @@ extern int __init at91_aic_of_init(struct device_node *node, struct device_node *parent); extern int __init at91_aic5_of_init(struct device_node *node, struct device_node *parent); +extern void __init at91_sysirq_mask_rtc(u32 rtc_base); +extern void __init at91_sysirq_mask_rtt(u32 rtt_base); /* Timer */ diff --git a/arch/arm/mach-at91/include/mach/at91sam9n12.h b/arch/arm/mach-at91/include/mach/at91sam9n12.h index d374b87c045..0151bcf6163 100644 --- a/arch/arm/mach-at91/include/mach/at91sam9n12.h +++ b/arch/arm/mach-at91/include/mach/at91sam9n12.h @@ -49,6 +49,11 @@ #define AT91SAM9N12_BASE_USART3 0xf8028000 /* + * System Peripherals + */ +#define AT91SAM9N12_BASE_RTC 0xfffffeb0 + +/* * Internal Memory. */ #define AT91SAM9N12_SRAM_BASE 0x00300000 /* Internal SRAM base address */ diff --git a/arch/arm/mach-at91/include/mach/at91sam9x5.h b/arch/arm/mach-at91/include/mach/at91sam9x5.h index c75ee19b58d..2fc76c49e97 100644 --- a/arch/arm/mach-at91/include/mach/at91sam9x5.h +++ b/arch/arm/mach-at91/include/mach/at91sam9x5.h @@ -55,6 +55,11 @@ #define AT91SAM9X5_BASE_USART2 0xf8024000 /* + * System Peripherals + */ +#define AT91SAM9X5_BASE_RTC 0xfffffeb0 + +/* * Internal Memory. */ #define AT91SAM9X5_SRAM_BASE 0x00300000 /* Internal SRAM base address */ diff --git a/arch/arm/mach-at91/include/mach/sama5d3.h b/arch/arm/mach-at91/include/mach/sama5d3.h index 31096a8aaf1..25613d8c6dc 100644 --- a/arch/arm/mach-at91/include/mach/sama5d3.h +++ b/arch/arm/mach-at91/include/mach/sama5d3.h @@ -73,6 +73,11 @@ #define SAMA5D3_BASE_USART3 0xf8024000 /* + * System Peripherals + */ +#define SAMA5D3_BASE_RTC 0xfffffeb0 + +/* * Internal Memory */ #define SAMA5D3_SRAM_BASE 0x00300000 /* Internal SRAM base address */ diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 15afb5d9271..9986542e806 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -39,6 +39,8 @@ #include "at91_rstc.h" #include "at91_shdwc.h" +static void (*at91_pm_standby)(void); + static void __init show_reset_status(void) { static char reset[] __initdata = "reset"; @@ -266,14 +268,8 @@ static int at91_pm_enter(suspend_state_t state) * For ARM 926 based chips, this requirement is weaker * as at91sam9 can access a RAM in self-refresh mode. */ - if (cpu_is_at91rm9200()) - at91rm9200_standby(); - else if (cpu_is_at91sam9g45()) - at91sam9g45_standby(); - else if (cpu_is_at91sam9263()) - at91sam9263_standby(); - else - at91sam9_standby(); + if (at91_pm_standby) + at91_pm_standby(); break; case PM_SUSPEND_ON: @@ -314,6 +310,18 @@ static const struct platform_suspend_ops at91_pm_ops = { .end = at91_pm_end, }; +static struct platform_device at91_cpuidle_device = { + .name = "cpuidle-at91", +}; + +void at91_pm_set_standby(void (*at91_standby)(void)) +{ + if (at91_standby) { + at91_cpuidle_device.dev.platform_data = at91_standby; + at91_pm_standby = at91_standby; + } +} + static int __init at91_pm_init(void) { #ifdef CONFIG_AT91_SLOW_CLOCK @@ -325,6 +333,9 @@ static int __init at91_pm_init(void) /* AT91RM9200 SDRAM low-power mode cannot be used with self-refresh. */ if (cpu_is_at91rm9200()) at91_ramc_write(0, AT91RM9200_SDRAMC_LPR, 0); + + if (at91_cpuidle_device.dev.platform_data) + platform_device_register(&at91_cpuidle_device); suspend_set_ops(&at91_pm_ops); diff --git a/arch/arm/mach-at91/pm.h b/arch/arm/mach-at91/pm.h index 2f5908f0b8c..3ed190ce062 100644 --- a/arch/arm/mach-at91/pm.h +++ b/arch/arm/mach-at91/pm.h @@ -11,9 +11,13 @@ #ifndef __ARCH_ARM_MACH_AT91_PM #define __ARCH_ARM_MACH_AT91_PM +#include <asm/proc-fns.h> + #include <mach/at91_ramc.h> #include <mach/at91rm9200_sdramc.h> +extern void at91_pm_set_standby(void (*at91_standby)(void)); + /* * The AT91RM9200 goes into self-refresh mode with this command, and will * terminate self-refresh automatically on the next SDRAM access. @@ -45,16 +49,18 @@ static inline void at91rm9200_standby(void) /* We manage both DDRAM/SDRAM controllers, we need more than one value to * remember. */ -static inline void at91sam9g45_standby(void) +static inline void at91_ddr_standby(void) { /* Those two values allow us to delay self-refresh activation * to the maximum. */ - u32 lpr0, lpr1; - u32 saved_lpr0, saved_lpr1; + u32 lpr0, lpr1 = 0; + u32 saved_lpr0, saved_lpr1 = 0; - saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR); - lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB; - lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH; + if (at91_ramc_base[1]) { + saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR); + lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB; + lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH; + } saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR); lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB; @@ -62,25 +68,29 @@ static inline void at91sam9g45_standby(void) /* self-refresh mode now */ at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0); - at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1); + if (at91_ramc_base[1]) + at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1); cpu_do_idle(); at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); - at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); + if (at91_ramc_base[1]) + at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); } /* We manage both DDRAM/SDRAM controllers, we need more than one value to * remember. */ -static inline void at91sam9263_standby(void) +static inline void at91sam9_sdram_standby(void) { - u32 lpr0, lpr1; - u32 saved_lpr0, saved_lpr1; + u32 lpr0, lpr1 = 0; + u32 saved_lpr0, saved_lpr1 = 0; - saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR); - lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB; - lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH; + if (at91_ramc_base[1]) { + saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR); + lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB; + lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH; + } saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR); lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB; @@ -88,27 +98,14 @@ static inline void at91sam9263_standby(void) /* self-refresh mode now */ at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0); - at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1); + if (at91_ramc_base[1]) + at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1); cpu_do_idle(); at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0); - at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1); -} - -static inline void at91sam9_standby(void) -{ - u32 saved_lpr, lpr; - - saved_lpr = at91_ramc_read(0, AT91_SDRAMC_LPR); - - lpr = saved_lpr & ~AT91_SDRAMC_LPCB; - at91_ramc_write(0, AT91_SDRAMC_LPR, lpr | - AT91_SDRAMC_LPCB_SELF_REFRESH); - - cpu_do_idle(); - - at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr); + if (at91_ramc_base[1]) + at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1); } #endif diff --git a/arch/arm/mach-at91/sama5d3.c b/arch/arm/mach-at91/sama5d3.c index 401279715ab..3ea86428ee0 100644 --- a/arch/arm/mach-at91/sama5d3.c +++ b/arch/arm/mach-at91/sama5d3.c @@ -371,7 +371,13 @@ static void __init sama5d3_map_io(void) at91_init_sram(0, SAMA5D3_SRAM_BASE, SAMA5D3_SRAM_SIZE); } +static void __init sama5d3_initialize(void) +{ + at91_sysirq_mask_rtc(SAMA5D3_BASE_RTC); +} + AT91_SOC_START(sama5d3) .map_io = sama5d3_map_io, .register_clocks = sama5d3_register_clocks, + .init = sama5d3_initialize, AT91_SOC_END diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c index b17fbcf4d9e..094b3459c28 100644 --- a/arch/arm/mach-at91/setup.c +++ b/arch/arm/mach-at91/setup.c @@ -23,6 +23,7 @@ #include "at91_shdwc.h" #include "soc.h" #include "generic.h" +#include "pm.h" struct at91_init_soc __initdata at91_boot_soc; @@ -376,15 +377,16 @@ static void at91_dt_rstc(void) } static struct of_device_id ramc_ids[] = { - { .compatible = "atmel,at91rm9200-sdramc" }, - { .compatible = "atmel,at91sam9260-sdramc" }, - { .compatible = "atmel,at91sam9g45-ddramc" }, + { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, + { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, + { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, { /*sentinel*/ } }; static void at91_dt_ramc(void) { struct device_node *np; + const struct of_device_id *of_id; np = of_find_matching_node(NULL, ramc_ids); if (!np) @@ -396,6 +398,12 @@ static void at91_dt_ramc(void) /* the controller may have 2 banks */ at91_ramc_base[1] = of_iomap(np, 1); + of_id = of_match_node(ramc_ids, np); + if (!of_id) + pr_warn("AT91: ramc no standby function available\n"); + else + at91_pm_set_standby(of_id->data); + of_node_put(np); } diff --git a/arch/arm/mach-at91/sysirq_mask.c b/arch/arm/mach-at91/sysirq_mask.c new file mode 100644 index 00000000000..2ba694f9626 --- /dev/null +++ b/arch/arm/mach-at91/sysirq_mask.c @@ -0,0 +1,71 @@ +/* + * sysirq_mask.c - System-interrupt masking + * + * Copyright (C) 2013 Johan Hovold <jhovold@gmail.com> + * + * Functions to disable system interrupts from backup-powered peripherals. + * + * The RTC and RTT-peripherals are generally powered by backup power (VDDBU) + * and are not reset on wake-up, user, watchdog or software reset. This means + * that their interrupts may be enabled during early boot (e.g. after a user + * reset). + * + * As the RTC and RTT share the system-interrupt line with the PIT, an + * interrupt occurring before a handler has been installed would lead to the + * system interrupt being disabled and prevent the system from booting. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/io.h> +#include <mach/at91_rtt.h> + +#include "generic.h" + +#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ +#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */ + +void __init at91_sysirq_mask_rtc(u32 rtc_base) +{ + void __iomem *base; + u32 mask; + + base = ioremap(rtc_base, 64); + if (!base) + return; + + mask = readl_relaxed(base + AT91_RTC_IMR); + if (mask) { + pr_info("AT91: Disabling rtc irq\n"); + writel_relaxed(mask, base + AT91_RTC_IDR); + (void)readl_relaxed(base + AT91_RTC_IMR); /* flush */ + } + + iounmap(base); +} + +void __init at91_sysirq_mask_rtt(u32 rtt_base) +{ + void __iomem *base; + void __iomem *reg; + u32 mode; + + base = ioremap(rtt_base, 16); + if (!base) + return; + + reg = base + AT91_RTT_MR; + + mode = readl_relaxed(reg); + if (mode & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)) { + pr_info("AT91: Disabling rtt irq\n"); + mode &= ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); + writel_relaxed(mode, reg); + (void)readl_relaxed(reg); /* flush */ + } + + iounmap(base); +} diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index e026b19b23e..a075b3e0c5c 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -40,7 +40,6 @@ config ARCH_DAVINCI_DA850 bool "DA850/OMAP-L138/AM18x based system" select ARCH_DAVINCI_DA8XX select ARCH_HAS_CPUFREQ - select CPU_FREQ_TABLE select CP_INTC config ARCH_DAVINCI_DA8XX diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index 40f15f133c5..d1f45af7a53 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -17,7 +17,7 @@ #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/i2c/pcf857x.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/spi/spi.h> diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index df16cb88a26..e0af0eccde8 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -18,7 +18,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/platform_data/pca953x.h> #include <linux/input.h> #include <linux/input/tps6507x-ts.h> diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index f4a6c18912e..e08a8684ead 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -18,7 +18,7 @@ #include <linux/i2c.h> #include <linux/io.h> #include <linux/clk.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/leds.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 9cc32c283b8..987605b7855 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -15,7 +15,7 @@ #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c/pcf857x.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 44b20191a9f..13d0801fd6b 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -22,7 +22,7 @@ #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/i2c/pcf857x.h> #include <media/tvp514x.h> diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index cd0f58730c2..7aa105b1fd0 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -15,7 +15,7 @@ #include <linux/mtd/partitions.h> #include <linux/regulator/machine.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/etherdevice.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c index d8436014810..41c7c961579 100644 --- a/arch/arm/mach-davinci/board-sffsdr.c +++ b/arch/arm/mach-davinci/board-sffsdr.c @@ -26,7 +26,7 @@ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> diff --git a/arch/arm/mach-davinci/sram.c b/arch/arm/mach-davinci/sram.c index f18928b073f..8540dddf1fb 100644 --- a/arch/arm/mach-davinci/sram.c +++ b/arch/arm/mach-davinci/sram.c @@ -25,7 +25,6 @@ struct gen_pool *sram_get_gen_pool(void) void *sram_alloc(size_t len, dma_addr_t *dma) { - unsigned long vaddr; dma_addr_t dma_base = davinci_soc_info.sram_dma; if (dma) @@ -33,13 +32,7 @@ void *sram_alloc(size_t len, dma_addr_t *dma) if (!sram_pool || (dma && !dma_base)) return NULL; - vaddr = gen_pool_alloc(sram_pool, len); - if (!vaddr) - return NULL; - - if (dma) - *dma = gen_pool_virt_to_phys(sram_pool, vaddr); - return (void *)vaddr; + return gen_pool_dma_alloc(sram_pool, len, dma); } EXPORT_SYMBOL(sram_alloc); diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index a4e7ba82881..61d2906ccef 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c @@ -28,6 +28,7 @@ #include <linux/of_address.h> #include <linux/irqchip/arm-gic.h> #include <linux/irqchip/chained_irq.h> +#include <linux/platform_device.h> #include <asm/proc-fns.h> #include <asm/exception.h> @@ -292,6 +293,16 @@ void exynos5_restart(enum reboot_mode mode, const char *cmd) __raw_writel(val, addr); } +static struct platform_device exynos_cpuidle = { + .name = "exynos_cpuidle", + .id = -1, +}; + +void __init exynos_cpuidle_init(void) +{ + platform_device_register(&exynos_cpuidle); +} + void __init exynos_init_late(void) { if (of_machine_is_compatible("samsung,exynos5440")) diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h index f0fa2050d08..ff9b6a9419b 100644 --- a/arch/arm/mach-exynos/common.h +++ b/arch/arm/mach-exynos/common.h @@ -21,6 +21,7 @@ struct map_desc; void exynos_init_io(void); void exynos4_restart(enum reboot_mode mode, const char *cmd); void exynos5_restart(enum reboot_mode mode, const char *cmd); +void exynos_cpuidle_init(void); void exynos_init_late(void); void exynos_firmware_init(void); diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c index ac139226d63..ddbfe8709fe 100644 --- a/arch/arm/mach-exynos/cpuidle.c +++ b/arch/arm/mach-exynos/cpuidle.c @@ -15,6 +15,7 @@ #include <linux/io.h> #include <linux/export.h> #include <linux/time.h> +#include <linux/platform_device.h> #include <asm/proc-fns.h> #include <asm/smp_scu.h> @@ -192,7 +193,7 @@ static void __init exynos5_core_down_clk(void) __raw_writel(tmp, EXYNOS5_PWR_CTRL2); } -static int __init exynos4_init_cpuidle(void) +static int exynos_cpuidle_probe(struct platform_device *pdev) { int cpu_id, ret; struct cpuidle_device *device; @@ -205,7 +206,7 @@ static int __init exynos4_init_cpuidle(void) ret = cpuidle_register_driver(&exynos4_idle_driver); if (ret) { - printk(KERN_ERR "CPUidle failed to register driver\n"); + dev_err(&pdev->dev, "failed to register cpuidle driver\n"); return ret; } @@ -219,11 +220,20 @@ static int __init exynos4_init_cpuidle(void) ret = cpuidle_register_device(device); if (ret) { - printk(KERN_ERR "CPUidle register device failed\n"); + dev_err(&pdev->dev, "failed to register cpuidle device\n"); return ret; } } return 0; } -device_initcall(exynos4_init_cpuidle); + +static struct platform_driver exynos_cpuidle_driver = { + .probe = exynos_cpuidle_probe, + .driver = { + .name = "exynos_cpuidle", + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(exynos_cpuidle_driver); diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c index 4b8f6e2ca16..4603e6bd424 100644 --- a/arch/arm/mach-exynos/mach-exynos4-dt.c +++ b/arch/arm/mach-exynos/mach-exynos4-dt.c @@ -21,6 +21,8 @@ static void __init exynos4_dt_machine_init(void) { + exynos_cpuidle_init(); + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c index 7976ab33319..1fe075a70c1 100644 --- a/arch/arm/mach-exynos/mach-exynos5-dt.c +++ b/arch/arm/mach-exynos/mach-exynos5-dt.c @@ -43,6 +43,8 @@ static void __init exynos5_dt_machine_init(void) } } + exynos_cpuidle_init(); + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c index 1fd2cf097e3..eb1fa5c8472 100644 --- a/arch/arm/mach-footbridge/netwinder-hw.c +++ b/arch/arm/mach-footbridge/netwinder-hw.c @@ -692,14 +692,14 @@ static void netwinder_led_set(struct led_classdev *cdev, unsigned long flags; u32 reg; - spin_lock_irqsave(&nw_gpio_lock, flags); + raw_spin_lock_irqsave(&nw_gpio_lock, flags); reg = nw_gpio_read(); if (b != LED_OFF) reg &= ~led->mask; else reg |= led->mask; nw_gpio_modify_op(led->mask, reg); - spin_unlock_irqrestore(&nw_gpio_lock, flags); + raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); } static enum led_brightness netwinder_led_get(struct led_classdev *cdev) @@ -709,9 +709,9 @@ static enum led_brightness netwinder_led_get(struct led_classdev *cdev) unsigned long flags; u32 reg; - spin_lock_irqsave(&nw_gpio_lock, flags); + raw_spin_lock_irqsave(&nw_gpio_lock, flags); reg = nw_gpio_read(); - spin_unlock_irqrestore(&nw_gpio_lock, flags); + raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); return (reg & led->mask) ? LED_OFF : LED_FULL; } diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c index 70bfa571b24..f8cb5710d6e 100644 --- a/arch/arm/mach-gemini/gpio.c +++ b/arch/arm/mach-gemini/gpio.c @@ -21,9 +21,9 @@ #include <mach/hardware.h> #include <mach/irqs.h> -#include <mach/gpio.h> #define GPIO_BASE(x) IO_ADDRESS(GEMINI_GPIO_BASE(x)) +#define irq_to_gpio(x) ((x) - GPIO_IRQ_BASE) /* GPIO registers definition */ #define GPIO_DATA_OUT 0x0 diff --git a/arch/arm/mach-gemini/include/mach/gpio.h b/arch/arm/mach-gemini/include/mach/gpio.h deleted file mode 100644 index 40a0527bada..00000000000 --- a/arch/arm/mach-gemini/include/mach/gpio.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Gemini gpiolib specific defines - * - * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __MACH_GPIO_H__ -#define __MACH_GPIO_H__ - -#include <mach/irqs.h> - -#define gpio_to_irq(x) ((x) + GPIO_IRQ_BASE) -#define irq_to_gpio(x) ((x) - GPIO_IRQ_BASE) - -#endif /* __MACH_GPIO_H__ */ diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig index fe98df44579..0aded64a9eb 100644 --- a/arch/arm/mach-highbank/Kconfig +++ b/arch/arm/mach-highbank/Kconfig @@ -4,11 +4,12 @@ config ARCH_HIGHBANK select ARCH_HAS_CPUFREQ select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_HAS_OPP + select ARCH_SUPPORTS_BIG_ENDIAN select ARCH_WANT_OPTIONAL_GPIOLIB select ARM_AMBA - select ARM_ERRATA_764369 + select ARM_ERRATA_764369 if SMP select ARM_ERRATA_775420 - select ARM_ERRATA_798181 + select ARM_ERRATA_798181 if SMP select ARM_GIC select ARM_PSCI select ARM_TIMER_SP804 diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index bbe1f5bb799..1789e2b3190 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -102,8 +102,8 @@ obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o ifeq ($(CONFIG_PM),y) obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o -# i.MX6SL reuses pm-imx6q.c -obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o +# i.MX6SL reuses i.MX6Q code +obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o endif # i.MX5 based machines diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index d756d91fd74..04cfd0fcb0e 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -122,13 +122,14 @@ static struct clk_div_table clk_enet_ref_table[] = { { .val = 1, .div = 10, }, { .val = 2, .div = 5, }, { .val = 3, .div = 4, }, + { /* sentinel */ } }; static struct clk_div_table post_div_table[] = { { .val = 2, .div = 1, }, { .val = 1, .div = 2, }, { .val = 0, .div = 4, }, - { } + { /* sentinel */ } }; static struct clk_div_table video_div_table[] = { @@ -136,7 +137,7 @@ static struct clk_div_table video_div_table[] = { { .val = 1, .div = 2, }, { .val = 2, .div = 1, }, { .val = 3, .div = 4, }, - { } + { /* sentinel */ } }; static void __init imx6q_clocks_init(struct device_node *ccm_node) @@ -298,7 +299,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3); clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); - clk[can_root] = imx_clk_divider("can_root", "pll3_usb_otg", base + 0x20, 2, 6); + clk[can_root] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6); clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6); clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3); clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3); diff --git a/arch/arm/mach-imx/clk-pllv3.c b/arch/arm/mach-imx/clk-pllv3.c index f6640b6a7b3..61364050fcc 100644 --- a/arch/arm/mach-imx/clk-pllv3.c +++ b/arch/arm/mach-imx/clk-pllv3.c @@ -12,6 +12,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> +#include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/jiffies.h> @@ -45,33 +46,49 @@ struct clk_pllv3 { #define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw) +static int clk_pllv3_wait_lock(struct clk_pllv3 *pll) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(10); + u32 val = readl_relaxed(pll->base) & BM_PLL_POWER; + + /* No need to wait for lock when pll is not powered up */ + if ((pll->powerup_set && !val) || (!pll->powerup_set && val)) + return 0; + + /* Wait for PLL to lock */ + do { + if (readl_relaxed(pll->base) & BM_PLL_LOCK) + break; + if (time_after(jiffies, timeout)) + break; + usleep_range(50, 500); + } while (1); + + return readl_relaxed(pll->base) & BM_PLL_LOCK ? 0 : -ETIMEDOUT; +} + static int clk_pllv3_prepare(struct clk_hw *hw) { struct clk_pllv3 *pll = to_clk_pllv3(hw); - unsigned long timeout; u32 val; + int ret; val = readl_relaxed(pll->base); - val &= ~BM_PLL_BYPASS; if (pll->powerup_set) val |= BM_PLL_POWER; else val &= ~BM_PLL_POWER; writel_relaxed(val, pll->base); - timeout = jiffies + msecs_to_jiffies(10); - /* Wait for PLL to lock */ - do { - if (readl_relaxed(pll->base) & BM_PLL_LOCK) - break; - if (time_after(jiffies, timeout)) - break; - } while (1); + ret = clk_pllv3_wait_lock(pll); + if (ret) + return ret; - if (readl_relaxed(pll->base) & BM_PLL_LOCK) - return 0; - else - return -ETIMEDOUT; + val = readl_relaxed(pll->base); + val &= ~BM_PLL_BYPASS; + writel_relaxed(val, pll->base); + + return 0; } static void clk_pllv3_unprepare(struct clk_hw *hw) @@ -146,7 +163,7 @@ static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate, val |= div; writel_relaxed(val, pll->base); - return 0; + return clk_pllv3_wait_lock(pll); } static const struct clk_ops clk_pllv3_ops = { @@ -202,7 +219,7 @@ static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate, val |= div; writel_relaxed(val, pll->base); - return 0; + return clk_pllv3_wait_lock(pll); } static const struct clk_ops clk_pllv3_sys_ops = { @@ -276,7 +293,7 @@ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate, writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET); writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET); - return 0; + return clk_pllv3_wait_lock(pll); } static const struct clk_ops clk_pllv3_av_ops = { diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 7cbe22d0c6e..24a7899e36a 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -127,11 +127,6 @@ static inline void imx_smp_prepare(void) {} static inline void imx_scu_standby_enable(void) {} #endif void imx_src_init(void); -#ifdef CONFIG_HAVE_IMX_SRC -void imx_src_prepare_restart(void); -#else -static inline void imx_src_prepare_restart(void) {} -#endif void imx_gpc_init(void); void imx_gpc_pre_suspend(void); void imx_gpc_post_resume(void); diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 0f9f24116da..d0cfb225ec9 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c @@ -22,7 +22,7 @@ #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/phy.h> #include <linux/reboot.h> #include <linux/regmap.h> @@ -176,7 +176,7 @@ static void __init imx6q_opp_check_1p2ghz(struct device *cpu_dev) val = readl_relaxed(base + OCOTP_CFG3); val >>= OCOTP_CFG3_SPEED_SHIFT; if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ) - if (opp_disable(cpu_dev, 1200000000)) + if (dev_pm_opp_disable(cpu_dev, 1200000000)) pr_warn("failed to disable 1.2 GHz OPP\n"); put_node: diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c index 19bb6441a7d..c5f95674e9b 100644 --- a/arch/arm/mach-imx/mach-pca100.c +++ b/arch/arm/mach-imx/mach-pca100.c @@ -20,7 +20,7 @@ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/dma-mapping.h> #include <linux/spi/spi.h> #include <linux/spi/eeprom.h> diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c index 45303bd6290..639a3dfb009 100644 --- a/arch/arm/mach-imx/mach-pcm037.c +++ b/arch/arm/mach-imx/mach-pcm037.c @@ -23,7 +23,7 @@ #include <linux/smsc911x.h> #include <linux/interrupt.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/irq.h> diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c index e805ac273e9..592ddbe031a 100644 --- a/arch/arm/mach-imx/mach-pcm038.c +++ b/arch/arm/mach-imx/mach-pcm038.c @@ -18,7 +18,7 @@ */ #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/io.h> #include <linux/mtd/plat-ram.h> #include <linux/mtd/physmap.h> diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c index b726cb1c5fd..ac504b67326 100644 --- a/arch/arm/mach-imx/mach-pcm043.c +++ b/arch/arm/mach-imx/mach-pcm043.c @@ -24,7 +24,7 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c index 0910761e828..8825d1217d1 100644 --- a/arch/arm/mach-imx/mach-vpr200.c +++ b/arch/arm/mach-imx/mach-vpr200.c @@ -29,7 +29,7 @@ #include <asm/mach/time.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/mfd/mc13xxx.h> #include "common.h" diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index 4754373e7e7..45f7f4e0a44 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c @@ -115,21 +115,6 @@ void imx_set_cpu_arg(int cpu, u32 arg) writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); } -void imx_src_prepare_restart(void) -{ - u32 val; - - /* clear enable bits of secondary cores */ - spin_lock(&scr_lock); - val = readl_relaxed(src_base + SRC_SCR); - val &= ~(0x7 << BP_SRC_SCR_CORE1_ENABLE); - writel_relaxed(val, src_base + SRC_SCR); - spin_unlock(&scr_lock); - - /* clear persistent entry register of primary core */ - writel_relaxed(0, src_base + SRC_GPR1); -} - void __init imx_src_init(void) { struct device_node *np; diff --git a/arch/arm/mach-imx/system.c b/arch/arm/mach-imx/system.c index e6edcd38b28..5e3027d3692 100644 --- a/arch/arm/mach-imx/system.c +++ b/arch/arm/mach-imx/system.c @@ -42,9 +42,6 @@ void mxc_restart(enum reboot_mode mode, const char *cmd) { unsigned int wcr_enable; - if (cpu_is_imx6q() || cpu_is_imx6dl()) - imx_src_prepare_restart(); - if (wdog_clk) clk_enable(wdog_clk); @@ -55,7 +52,14 @@ void mxc_restart(enum reboot_mode mode, const char *cmd) /* Assert SRS signal */ __raw_writew(wcr_enable, wdog_base); - /* write twice to ensure the request will not get ignored */ + /* + * Due to imx6q errata ERR004346 (WDOG: WDOG SRS bit requires to be + * written twice), we add another two writes to ensure there must be at + * least two writes happen in the same one 32kHz clock period. We save + * the target check here, since the writes shouldn't be a huge burden + * for other platforms. + */ + __raw_writew(wcr_enable, wdog_base); __raw_writew(wcr_enable, wdog_base); /* wait for reset to assert... */ diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index 1df6e7602ca..4fc0a195de0 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c @@ -198,7 +198,8 @@ static struct mmci_platform_data mmc_data = { static void cp_clcd_enable(struct clcd_fb *fb) { struct fb_var_screeninfo *var = &fb->fb.var; - u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2; + u32 val = CM_CTRL_STATIC1 | CM_CTRL_STATIC2 + | CM_CTRL_LCDEN0 | CM_CTRL_LCDEN1; if (var->bits_per_pixel <= 8 || (var->bits_per_pixel == 16 && var->green.length == 5)) diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index c9c5a33bc80..c5e01b24d9f 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c @@ -808,22 +808,6 @@ static u8 __init pci_v3_swizzle(struct pci_dev *dev, u8 *pinp) return pci_common_swizzle(dev, pinp); } -static int __init pci_v3_map_irq_dt(const struct pci_dev *dev, u8 slot, u8 pin) -{ - struct of_irq oirq; - int ret; - - ret = of_irq_map_pci(dev, &oirq); - if (ret) { - dev_err(&dev->dev, "of_irq_map_pci() %d\n", ret); - /* Proper return code 0 == NO_IRQ */ - return 0; - } - - return irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); -} - static struct hw_pci pci_v3 __initdata = { .swizzle = pci_v3_swizzle, .setup = pci_v3_setup, @@ -914,7 +898,7 @@ static int __init pci_v3_probe(struct platform_device *pdev) return -EINVAL; } - pci_v3.map_irq = pci_v3_map_irq_dt; + pci_v3.map_irq = of_irq_parse_and_map_pci; pci_common_init_dev(&pdev->dev, &pci_v3); return 0; diff --git a/arch/arm/mach-iop32x/em7210.c b/arch/arm/mach-iop32x/em7210.c index 31fbb6c61b2..177cd073a83 100644 --- a/arch/arm/mach-iop32x/em7210.c +++ b/arch/arm/mach-iop32x/em7210.c @@ -32,6 +32,7 @@ #include <asm/mach/time.h> #include <asm/mach-types.h> #include <mach/time.h> +#include "gpio-iop32x.h" static void __init em7210_timer_init(void) { @@ -183,6 +184,7 @@ void em7210_power_off(void) static void __init em7210_init_machine(void) { + register_iop32x_gpio(); platform_device_register(&em7210_serial_device); platform_device_register(&iop3xx_i2c0_device); platform_device_register(&iop3xx_i2c1_device); diff --git a/arch/arm/mach-iop32x/glantank.c b/arch/arm/mach-iop32x/glantank.c index ac304705fe6..547b2342d61 100644 --- a/arch/arm/mach-iop32x/glantank.c +++ b/arch/arm/mach-iop32x/glantank.c @@ -34,6 +34,7 @@ #include <asm/mach-types.h> #include <asm/page.h> #include <mach/time.h> +#include "gpio-iop32x.h" /* * GLAN Tank timer tick configuration. @@ -187,6 +188,7 @@ static void glantank_power_off(void) static void __init glantank_init_machine(void) { + register_iop32x_gpio(); platform_device_register(&iop3xx_i2c0_device); platform_device_register(&iop3xx_i2c1_device); platform_device_register(&glantank_flash_device); diff --git a/arch/arm/mach-iop32x/gpio-iop32x.h b/arch/arm/mach-iop32x/gpio-iop32x.h new file mode 100644 index 00000000000..3c7309c0202 --- /dev/null +++ b/arch/arm/mach-iop32x/gpio-iop32x.h @@ -0,0 +1,10 @@ +static struct resource iop32x_gpio_res[] = { + DEFINE_RES_MEM((IOP3XX_PERIPHERAL_PHYS_BASE + 0x07c4), 0x10), +}; + +static inline void register_iop32x_gpio(void) +{ + platform_device_register_simple("gpio-iop", 0, + iop32x_gpio_res, + ARRAY_SIZE(iop32x_gpio_res)); +} diff --git a/arch/arm/mach-iop32x/include/mach/gpio.h b/arch/arm/mach-iop32x/include/mach/gpio.h deleted file mode 100644 index 708f4ec9db1..00000000000 --- a/arch/arm/mach-iop32x/include/mach/gpio.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_ARCH_IOP32X_GPIO_H -#define __ASM_ARCH_IOP32X_GPIO_H - -#include <asm/hardware/iop3xx-gpio.h> - -#endif diff --git a/arch/arm/mach-iop32x/include/mach/iop32x.h b/arch/arm/mach-iop32x/include/mach/iop32x.h index 941f363aca5..56ec864ec31 100644 --- a/arch/arm/mach-iop32x/include/mach/iop32x.h +++ b/arch/arm/mach-iop32x/include/mach/iop32x.h @@ -19,7 +19,6 @@ * Peripherals that are shared between the iop32x and iop33x but * located at different addresses. */ -#define IOP3XX_GPIO_REG(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07c4 + (reg)) #define IOP3XX_TIMER_REG(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07e0 + (reg)) #include <asm/hardware/iop3xx.h> diff --git a/arch/arm/mach-iop32x/iq31244.c b/arch/arm/mach-iop32x/iq31244.c index f2cd2966212..0e1392b20d1 100644 --- a/arch/arm/mach-iop32x/iq31244.c +++ b/arch/arm/mach-iop32x/iq31244.c @@ -37,6 +37,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <mach/time.h> +#include "gpio-iop32x.h" /* * Until March of 2007 iq31244 platforms and ep80219 platforms shared the @@ -283,6 +284,7 @@ void ep80219_power_off(void) static void __init iq31244_init_machine(void) { + register_iop32x_gpio(); platform_device_register(&iop3xx_i2c0_device); platform_device_register(&iop3xx_i2c1_device); platform_device_register(&iq31244_flash_device); diff --git a/arch/arm/mach-iop32x/iq80321.c b/arch/arm/mach-iop32x/iq80321.c index 015435de90d..66782ff1f46 100644 --- a/arch/arm/mach-iop32x/iq80321.c +++ b/arch/arm/mach-iop32x/iq80321.c @@ -33,6 +33,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <mach/time.h> +#include "gpio-iop32x.h" /* * IQ80321 timer tick configuration. @@ -170,6 +171,7 @@ static struct platform_device iq80321_serial_device = { static void __init iq80321_init_machine(void) { + register_iop32x_gpio(); platform_device_register(&iop3xx_i2c0_device); platform_device_register(&iop3xx_i2c1_device); platform_device_register(&iq80321_flash_device); diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 069144300b7..c1cd80ecc21 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -30,6 +30,7 @@ #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/io.h> +#include <linux/gpio.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/mach/arch.h> @@ -40,6 +41,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <mach/time.h> +#include "gpio-iop32x.h" /* * N2100 timer tick configuration. @@ -288,8 +290,14 @@ static void n2100_power_off(void) static void n2100_restart(enum reboot_mode mode, const char *cmd) { - gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW); - gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT); + int ret; + + ret = gpio_direction_output(N2100_HARDWARE_RESET, 0); + if (ret) { + pr_crit("could not drive reset GPIO low\n"); + return; + } + /* Wait for reset to happen */ while (1) ; } @@ -299,7 +307,7 @@ static struct timer_list power_button_poll_timer; static void power_button_poll(unsigned long dummy) { - if (gpio_line_get(N2100_POWER_BUTTON) == 0) { + if (gpio_get_value(N2100_POWER_BUTTON) == 0) { ctrl_alt_del(); return; } @@ -308,9 +316,37 @@ static void power_button_poll(unsigned long dummy) add_timer(&power_button_poll_timer); } +static int __init n2100_request_gpios(void) +{ + int ret; + + if (!machine_is_n2100()) + return 0; + + ret = gpio_request(N2100_HARDWARE_RESET, "reset"); + if (ret) + pr_err("could not request reset GPIO\n"); + + ret = gpio_request(N2100_POWER_BUTTON, "power"); + if (ret) + pr_err("could not request power GPIO\n"); + else { + ret = gpio_direction_input(N2100_POWER_BUTTON); + if (ret) + pr_err("could not set power GPIO as input\n"); + } + /* Set up power button poll timer */ + init_timer(&power_button_poll_timer); + power_button_poll_timer.function = power_button_poll; + power_button_poll_timer.expires = jiffies + (HZ / 10); + add_timer(&power_button_poll_timer); + return 0; +} +device_initcall(n2100_request_gpios); static void __init n2100_init_machine(void) { + register_iop32x_gpio(); platform_device_register(&iop3xx_i2c0_device); platform_device_register(&n2100_flash_device); platform_device_register(&n2100_serial_device); @@ -321,11 +357,6 @@ static void __init n2100_init_machine(void) ARRAY_SIZE(n2100_i2c_devices)); pm_power_off = n2100_power_off; - - init_timer(&power_button_poll_timer); - power_button_poll_timer.function = power_button_poll; - power_button_poll_timer.expires = jiffies + (HZ / 10); - add_timer(&power_button_poll_timer); } MACHINE_START(N2100, "Thecus N2100") diff --git a/arch/arm/mach-iop33x/include/mach/gpio.h b/arch/arm/mach-iop33x/include/mach/gpio.h deleted file mode 100644 index ddd55bba9bb..00000000000 --- a/arch/arm/mach-iop33x/include/mach/gpio.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_ARCH_IOP33X_GPIO_H -#define __ASM_ARCH_IOP33X_GPIO_H - -#include <asm/hardware/iop3xx-gpio.h> - -#endif diff --git a/arch/arm/mach-iop33x/include/mach/iop33x.h b/arch/arm/mach-iop33x/include/mach/iop33x.h index a89c0a234bf..c9512265309 100644 --- a/arch/arm/mach-iop33x/include/mach/iop33x.h +++ b/arch/arm/mach-iop33x/include/mach/iop33x.h @@ -18,7 +18,6 @@ * Peripherals that are shared between the iop32x and iop33x but * located at different addresses. */ -#define IOP3XX_GPIO_REG(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + 0x1780 + (reg)) #define IOP3XX_TIMER_REG(reg) (IOP3XX_PERIPHERAL_VIRT_BASE + 0x07d0 + (reg)) #include <asm/hardware/iop3xx.h> diff --git a/arch/arm/mach-iop33x/iq80331.c b/arch/arm/mach-iop33x/iq80331.c index c43304a10fa..e2cb65cfbe2 100644 --- a/arch/arm/mach-iop33x/iq80331.c +++ b/arch/arm/mach-iop33x/iq80331.c @@ -122,8 +122,15 @@ static struct platform_device iq80331_flash_device = { .resource = &iq80331_flash_resource, }; +static struct resource iq80331_gpio_res[] = { + DEFINE_RES_MEM((IOP3XX_PERIPHERAL_PHYS_BASE + 0x1780), 0x10), +}; + static void __init iq80331_init_machine(void) { + platform_device_register_simple("gpio-iop", 0, + iq80331_gpio_res, + ARRAY_SIZE(iq80331_gpio_res)); platform_device_register(&iop3xx_i2c0_device); platform_device_register(&iop3xx_i2c1_device); platform_device_register(&iop33x_uart0_device); diff --git a/arch/arm/mach-iop33x/iq80332.c b/arch/arm/mach-iop33x/iq80332.c index 8192987e78e..0b6269d94f8 100644 --- a/arch/arm/mach-iop33x/iq80332.c +++ b/arch/arm/mach-iop33x/iq80332.c @@ -122,8 +122,15 @@ static struct platform_device iq80332_flash_device = { .resource = &iq80332_flash_resource, }; +static struct resource iq80332_gpio_res[] = { + DEFINE_RES_MEM((IOP3XX_PERIPHERAL_PHYS_BASE + 0x1780), 0x10), +}; + static void __init iq80332_init_machine(void) { + platform_device_register_simple("gpio-iop", 0, + iq80332_gpio_res, + ARRAY_SIZE(iq80332_gpio_res)); platform_device_register(&iop3xx_i2c0_device); platform_device_register(&iop3xx_i2c1_device); platform_device_register(&iop33x_uart0_device); diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig index 30e1ebe3a89..c342dc4e8a4 100644 --- a/arch/arm/mach-ixp4xx/Kconfig +++ b/arch/arm/mach-ixp4xx/Kconfig @@ -1,9 +1,5 @@ if ARCH_IXP4XX -config ARCH_SUPPORTS_BIG_ENDIAN - bool - default y - menu "Intel IXP4xx Implementation Options" comment "IXP4xx Platforms" diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 5327decde5a..9edaf4734fa 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -81,6 +81,44 @@ void __init ixp4xx_map_io(void) iotable_init(ixp4xx_io_desc, ARRAY_SIZE(ixp4xx_io_desc)); } +/* + * GPIO-functions + */ +/* + * The following converted to the real HW bits the gpio_line_config + */ +/* GPIO pin types */ +#define IXP4XX_GPIO_OUT 0x1 +#define IXP4XX_GPIO_IN 0x2 + +/* GPIO signal types */ +#define IXP4XX_GPIO_LOW 0 +#define IXP4XX_GPIO_HIGH 1 + +/* GPIO Clocks */ +#define IXP4XX_GPIO_CLK_0 14 +#define IXP4XX_GPIO_CLK_1 15 + +static void gpio_line_config(u8 line, u32 direction) +{ + if (direction == IXP4XX_GPIO_IN) + *IXP4XX_GPIO_GPOER |= (1 << line); + else + *IXP4XX_GPIO_GPOER &= ~(1 << line); +} + +static void gpio_line_get(u8 line, int *value) +{ + *value = (*IXP4XX_GPIO_GPINR >> line) & 0x1; +} + +static void gpio_line_set(u8 line, int value) +{ + if (value == IXP4XX_GPIO_HIGH) + *IXP4XX_GPIO_GPOUTR |= (1 << line); + else if (value == IXP4XX_GPIO_LOW) + *IXP4XX_GPIO_GPOUTR &= ~(1 << line); +} /************************************************************************* * IXP4xx chipset IRQ handling @@ -117,17 +155,6 @@ static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) return -EINVAL; } -int irq_to_gpio(unsigned int irq) -{ - int gpio = (irq < 32) ? irq2gpio[irq] : -EINVAL; - - if (gpio == -1) - return -EINVAL; - - return gpio; -} -EXPORT_SYMBOL(irq_to_gpio); - static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type) { int line = irq2gpio[d->irq]; diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c index 63de1b3fd06..736dc692d54 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-setup.c +++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c @@ -26,6 +26,7 @@ #include <linux/reboot.h> #include <linux/i2c.h> #include <linux/i2c-gpio.h> +#include <linux/gpio.h> #include <mach/hardware.h> @@ -161,11 +162,8 @@ static struct platform_device *dsmg600_devices[] __initdata = { static void dsmg600_power_off(void) { - /* enable the pwr cntl gpio */ - gpio_line_config(DSMG600_PO_GPIO, IXP4XX_GPIO_OUT); - - /* poweroff */ - gpio_line_set(DSMG600_PO_GPIO, IXP4XX_GPIO_HIGH); + /* enable the pwr cntl and drive it high */ + gpio_direction_output(DSMG600_PO_GPIO, 1); } /* This is used to make sure the power-button pusher is serious. The button @@ -202,7 +200,7 @@ static void dsmg600_power_handler(unsigned long data) ctrl_alt_del(); /* Change the state of the power LED to "blink" */ - gpio_line_set(DSMG600_LED_PWR_GPIO, IXP4XX_GPIO_LOW); + gpio_set_value(DSMG600_LED_PWR_GPIO, 0); } else { power_button_countdown = PBUTTON_HOLDDOWN_COUNT; } @@ -228,6 +226,40 @@ static void __init dsmg600_timer_init(void) ixp4xx_timer_init(); } +static int __init dsmg600_gpio_init(void) +{ + if (!machine_is_dsmg600()) + return 0; + + gpio_request(DSMG600_RB_GPIO, "reset button"); + if (request_irq(gpio_to_irq(DSMG600_RB_GPIO), &dsmg600_reset_handler, + IRQF_DISABLED | IRQF_TRIGGER_LOW, + "DSM-G600 reset button", NULL) < 0) { + + printk(KERN_DEBUG "Reset Button IRQ %d not available\n", + gpio_to_irq(DSMG600_RB_GPIO)); + } + + /* + * The power button on the D-Link DSM-G600 is on GPIO 15, but + * it cannot handle interrupts on that GPIO line. So we'll + * have to poll it with a kernel timer. + */ + + /* Make sure that the power button GPIO is set up as an input */ + gpio_request(DSMG600_PB_GPIO, "power button"); + gpio_direction_input(DSMG600_PB_GPIO); + /* Request poweroff GPIO line */ + gpio_request(DSMG600_PO_GPIO, "power off button"); + + /* Set the initial value for the power button IRQ handler */ + power_button_countdown = PBUTTON_HOLDDOWN_COUNT; + + mod_timer(&dsmg600_power_timer, jiffies + msecs_to_jiffies(500)); + return 0; +} +device_initcall(dsmg600_gpio_init); + static void __init dsmg600_init(void) { ixp4xx_sys_init(); @@ -251,27 +283,6 @@ static void __init dsmg600_init(void) platform_add_devices(dsmg600_devices, ARRAY_SIZE(dsmg600_devices)); pm_power_off = dsmg600_power_off; - - if (request_irq(gpio_to_irq(DSMG600_RB_GPIO), &dsmg600_reset_handler, - IRQF_DISABLED | IRQF_TRIGGER_LOW, - "DSM-G600 reset button", NULL) < 0) { - - printk(KERN_DEBUG "Reset Button IRQ %d not available\n", - gpio_to_irq(DSMG600_RB_GPIO)); - } - - /* The power button on the D-Link DSM-G600 is on GPIO 15, but - * it cannot handle interrupts on that GPIO line. So we'll - * have to poll it with a kernel timer. - */ - - /* Make sure that the power button GPIO is set up as an input */ - gpio_line_config(DSMG600_PB_GPIO, IXP4XX_GPIO_IN); - - /* Set the initial value for the power button IRQ handler */ - power_button_countdown = PBUTTON_HOLDDOWN_COUNT; - - mod_timer(&dsmg600_power_timer, jiffies + msecs_to_jiffies(500)); } MACHINE_START(DSMG600, "D-Link DSM-G600 RevA") diff --git a/arch/arm/mach-ixp4xx/include/mach/platform.h b/arch/arm/mach-ixp4xx/include/mach/platform.h index 4c4c6a6f452..75c4c6572ad 100644 --- a/arch/arm/mach-ixp4xx/include/mach/platform.h +++ b/arch/arm/mach-ixp4xx/include/mach/platform.h @@ -131,44 +131,5 @@ struct pci_sys_data; extern int ixp4xx_setup(int nr, struct pci_sys_data *sys); extern struct pci_ops ixp4xx_ops; -/* - * GPIO-functions - */ -/* - * The following converted to the real HW bits the gpio_line_config - */ -/* GPIO pin types */ -#define IXP4XX_GPIO_OUT 0x1 -#define IXP4XX_GPIO_IN 0x2 - -/* GPIO signal types */ -#define IXP4XX_GPIO_LOW 0 -#define IXP4XX_GPIO_HIGH 1 - -/* GPIO Clocks */ -#define IXP4XX_GPIO_CLK_0 14 -#define IXP4XX_GPIO_CLK_1 15 - -static inline void gpio_line_config(u8 line, u32 direction) -{ - if (direction == IXP4XX_GPIO_IN) - *IXP4XX_GPIO_GPOER |= (1 << line); - else - *IXP4XX_GPIO_GPOER &= ~(1 << line); -} - -static inline void gpio_line_get(u8 line, int *value) -{ - *value = (*IXP4XX_GPIO_GPINR >> line) & 0x1; -} - -static inline void gpio_line_set(u8 line, int value) -{ - if (value == IXP4XX_GPIO_HIGH) - *IXP4XX_GPIO_GPOUTR |= (1 << line); - else if (value == IXP4XX_GPIO_LOW) - *IXP4XX_GPIO_GPOUTR &= ~(1 << line); -} - #endif // __ASSEMBLY__ diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c index 22d688b7d51..e7b8befa872 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c @@ -20,6 +20,7 @@ #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/delay.h> +#include <linux/gpio.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> @@ -80,10 +81,10 @@ ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) if (ctrl & NAND_CTRL_CHANGE) { if (ctrl & NAND_NCE) { - gpio_line_set(IXDP425_NAND_NCE_PIN, IXP4XX_GPIO_LOW); + gpio_set_value(IXDP425_NAND_NCE_PIN, 0); udelay(5); } else - gpio_line_set(IXDP425_NAND_NCE_PIN, IXP4XX_GPIO_HIGH); + gpio_set_value(IXDP425_NAND_NCE_PIN, 1); offset = (ctrl & NAND_CLE) ? IXDP425_NAND_CMD_BYTE : 0; offset |= (ctrl & NAND_ALE) ? IXDP425_NAND_ADDR_BYTE : 0; @@ -227,7 +228,8 @@ static void __init ixdp425_init(void) ixdp425_flash_nand_resource.start = IXP4XX_EXP_BUS_BASE(3), ixdp425_flash_nand_resource.end = IXP4XX_EXP_BUS_BASE(3) + 0x10 - 1; - gpio_line_config(IXDP425_NAND_NCE_PIN, IXP4XX_GPIO_OUT); + gpio_request(IXDP425_NAND_NCE_PIN, "NAND NCE pin"); + gpio_direction_output(IXDP425_NAND_NCE_PIN, 0); /* Configure expansion bus for NAND Flash */ *IXP4XX_EXP_CS3 = IXP4XX_EXP_BUS_CS_EN | diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c index ed667ce9f57..507cb523353 100644 --- a/arch/arm/mach-ixp4xx/nas100d-setup.c +++ b/arch/arm/mach-ixp4xx/nas100d-setup.c @@ -184,11 +184,8 @@ static void nas100d_power_off(void) { /* This causes the box to drop the power and go dead. */ - /* enable the pwr cntl gpio */ - gpio_line_config(NAS100D_PO_GPIO, IXP4XX_GPIO_OUT); - - /* do the deed */ - gpio_line_set(NAS100D_PO_GPIO, IXP4XX_GPIO_HIGH); + /* enable the pwr cntl gpio and assert power off */ + gpio_direction_output(NAS100D_PO_GPIO, 1); } /* This is used to make sure the power-button pusher is serious. The button @@ -225,7 +222,7 @@ static void nas100d_power_handler(unsigned long data) ctrl_alt_del(); /* Change the state of the power LED to "blink" */ - gpio_line_set(NAS100D_LED_PWR_GPIO, IXP4XX_GPIO_LOW); + gpio_set_value(NAS100D_LED_PWR_GPIO, 0); } else { power_button_countdown = PBUTTON_HOLDDOWN_COUNT; } @@ -242,6 +239,33 @@ static irqreturn_t nas100d_reset_handler(int irq, void *dev_id) return IRQ_HANDLED; } +static int __init nas100d_gpio_init(void) +{ + if (!machine_is_nas100d()) + return 0; + + /* + * The power button on the Iomega NAS100d is on GPIO 14, but + * it cannot handle interrupts on that GPIO line. So we'll + * have to poll it with a kernel timer. + */ + + /* Request the power off GPIO */ + gpio_request(NAS100D_PO_GPIO, "power off"); + + /* Make sure that the power button GPIO is set up as an input */ + gpio_request(NAS100D_PB_GPIO, "power button"); + gpio_direction_input(NAS100D_PB_GPIO); + + /* Set the initial value for the power button IRQ handler */ + power_button_countdown = PBUTTON_HOLDDOWN_COUNT; + + mod_timer(&nas100d_power_timer, jiffies + msecs_to_jiffies(500)); + + return 0; +} +device_initcall(nas100d_gpio_init); + static void __init nas100d_init(void) { uint8_t __iomem *f; @@ -278,19 +302,6 @@ static void __init nas100d_init(void) gpio_to_irq(NAS100D_RB_GPIO)); } - /* The power button on the Iomega NAS100d is on GPIO 14, but - * it cannot handle interrupts on that GPIO line. So we'll - * have to poll it with a kernel timer. - */ - - /* Make sure that the power button GPIO is set up as an input */ - gpio_line_config(NAS100D_PB_GPIO, IXP4XX_GPIO_IN); - - /* Set the initial value for the power button IRQ handler */ - power_button_countdown = PBUTTON_HOLDDOWN_COUNT; - - mod_timer(&nas100d_power_timer, jiffies + msecs_to_jiffies(500)); - /* * Map in a portion of the flash and read the MAC address. * Since it is stored in BE in the flash itself, we need to diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c index 7e55236c26e..ba5f1cda2a9 100644 --- a/arch/arm/mach-ixp4xx/nslu2-setup.c +++ b/arch/arm/mach-ixp4xx/nslu2-setup.c @@ -197,11 +197,8 @@ static void nslu2_power_off(void) { /* This causes the box to drop the power and go dead. */ - /* enable the pwr cntl gpio */ - gpio_line_config(NSLU2_PO_GPIO, IXP4XX_GPIO_OUT); - - /* do the deed */ - gpio_line_set(NSLU2_PO_GPIO, IXP4XX_GPIO_HIGH); + /* enable the pwr cntl gpio and assert power off */ + gpio_direction_output(NSLU2_PO_GPIO, 1); } static irqreturn_t nslu2_power_handler(int irq, void *dev_id) @@ -223,6 +220,16 @@ static irqreturn_t nslu2_reset_handler(int irq, void *dev_id) return IRQ_HANDLED; } +static int __init nslu2_gpio_init(void) +{ + if (!machine_is_nslu2()) + return 0; + + /* Request the power off GPIO */ + return gpio_request(NSLU2_PO_GPIO, "power off"); +} +device_initcall(nslu2_gpio_init); + static void __init nslu2_timer_init(void) { /* The xtal on this machine is non-standard. */ diff --git a/arch/arm/mach-keystone/platsmp.c b/arch/arm/mach-keystone/platsmp.c index c12296157d4..5cf0683577e 100644 --- a/arch/arm/mach-keystone/platsmp.c +++ b/arch/arm/mach-keystone/platsmp.c @@ -17,7 +17,6 @@ #include <linux/io.h> #include <asm/smp_plat.h> -#include <asm/prom.h> #include "keystone.h" diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c index 489495976fc..8e3e4331c38 100644 --- a/arch/arm/mach-kirkwood/lacie_v2-common.c +++ b/arch/arm/mach-kirkwood/lacie_v2-common.c @@ -12,7 +12,7 @@ #include <linux/spi/flash.h> #include <linux/spi/spi.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/gpio.h> #include <asm/mach/time.h> #include <mach/kirkwood.h> diff --git a/arch/arm/mach-mmp/include/mach/gpio.h b/arch/arm/mach-mmp/include/mach/gpio.h deleted file mode 100644 index 13219ebf512..00000000000 --- a/arch/arm/mach-mmp/include/mach/gpio.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_MACH_GPIO_H -#define __ASM_MACH_GPIO_H - -#include <asm-generic/gpio.h> - -#include <mach/cputype.h> - -#endif /* __ASM_MACH_GPIO_H */ diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c index 702232996c8..cfadd974f5c 100644 --- a/arch/arm/mach-mmp/ttc_dkb.c +++ b/arch/arm/mach-mmp/ttc_dkb.c @@ -191,7 +191,6 @@ static struct pxa3xx_nand_platform_data dkb_nand_info = { #define SCLK_SOURCE_SELECT(x) (x << 30) /* 0x0 ~ 0x3 */ /* link config */ #define CFG_DUMBMODE(mode) (mode << 28) /* 0x0 ~ 0x6*/ -#define CFG_GRA_SWAPRB(x) (x << 0) /* 1: rbswap enabled */ static struct mmp_mach_path_config dkb_disp_config[] = { [0] = { .name = "mmp-parallel", @@ -199,8 +198,7 @@ static struct mmp_mach_path_config dkb_disp_config[] = { .output_type = PATH_OUT_PARALLEL, .path_config = CFG_IOPADMODE(0x1) | SCLK_SOURCE_SELECT(0x1), - .link_config = CFG_DUMBMODE(0x2) - | CFG_GRA_SWAPRB(0x1), + .link_config = CFG_DUMBMODE(0x2), }, }; diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index 9eb63d72460..5e269d7263c 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig @@ -1,5 +1,6 @@ config ARCH_MVEBU bool "Marvell SOCs with Device Tree support" if ARCH_MULTI_V7 + select ARCH_SUPPORTS_BIG_ENDIAN select CLKSRC_MMIO select COMMON_CLK select GENERIC_CLOCKEVENTS diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S index 5476669ba90..ee7598fe75d 100644 --- a/arch/arm/mach-mvebu/coherency_ll.S +++ b/arch/arm/mach-mvebu/coherency_ll.S @@ -20,6 +20,8 @@ #define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0 #define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4 +#include <asm/assembler.h> + .text /* * r0: Coherency fabric base register address @@ -29,6 +31,7 @@ ENTRY(ll_set_cpu_coherent) /* Create bit by cpu index */ mov r3, #(1 << 24) lsl r1, r3, r1 +ARM_BE8(rev r1, r1) /* Add CPU to SMP group - Atomic */ add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S index 8a1b0c96e9e..3dd80df428f 100644 --- a/arch/arm/mach-mvebu/headsmp.S +++ b/arch/arm/mach-mvebu/headsmp.S @@ -21,12 +21,16 @@ #include <linux/linkage.h> #include <linux/init.h> +#include <asm/assembler.h> + /* * Armada XP specific entry point for secondary CPUs. * We add the CPU to the coherency fabric and then jump to secondary * startup */ ENTRY(armada_xp_secondary_startup) + ARM_BE8(setend be ) @ go BE8 if entered LE + /* Get coherency fabric base physical address */ adr r0, 1f ldr r1, [r0] diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c index a7ce6928668..d68909b095f 100644 --- a/arch/arm/mach-omap1/board-osk.c +++ b/arch/arm/mach-omap1/board-osk.c @@ -300,7 +300,7 @@ static struct omap_lcd_config osk_lcd_config __initdata = { #ifdef CONFIG_OMAP_OSK_MISTRAL #include <linux/input.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index e15ac005ef1..1f25f3e99c0 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -40,7 +40,7 @@ omap-4-5-common = omap4-common.o omap-wakeupgen.o obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-common) $(smp-y) sleep44xx.o obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-common) $(smp-y) sleep44xx.o obj-$(CONFIG_SOC_AM43XX) += $(omap-4-5-common) -obj-$(CONFIG_SOC_DRA7XX) += $(omap-4-5-common) $(smp-y) +obj-$(CONFIG_SOC_DRA7XX) += $(omap-4-5-common) $(smp-y) sleep44xx.o plus_sec := $(call as-instr,.arch_extension sec,+sec) AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec) diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index 33d159e2386..8dd0ec858cf 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c @@ -25,7 +25,7 @@ #include <linux/gpio.h> #include <linux/platform_data/gpio-omap.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/i2c/twl.h> #include <linux/regulator/fixed.h> #include <linux/regulator/machine.h> diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c index fc20a61f6b2..ac82512b9c8 100644 --- a/arch/arm/mach-omap2/board-flash.c +++ b/arch/arm/mach-omap2/board-flash.c @@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs, board_nand_data.nr_parts = nr_parts; board_nand_data.devsize = nand_type; - board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT; + board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW; gpmc_nand_init(&board_nand_data, gpmc_t); } #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */ diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c index 87e41a8b8d4..f7808349a73 100644 --- a/arch/arm/mach-omap2/board-h4.c +++ b/arch/arm/mach-omap2/board-h4.c @@ -20,7 +20,7 @@ #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/i2c.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/input.h> #include <linux/err.h> #include <linux/clk.h> diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 8b9cd0690ce..d6ed819ff15 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -25,7 +25,7 @@ #include <linux/gpio.h> #include <linux/input.h> #include <linux/gpio_keys.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/cpu.h> #include <linux/mtd/mtd.h> @@ -510,17 +510,17 @@ static int __init beagle_opp_init(void) mpu_dev = get_cpu_device(0); iva_dev = omap_device_get_by_hwmod_name("iva"); - if (IS_ERR(mpu_dev) || IS_ERR(iva_dev)) { + if (!mpu_dev || IS_ERR(iva_dev)) { pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n", __func__, mpu_dev, iva_dev); return -ENODEV; } /* Enable MPU 1GHz and lower opps */ - r = opp_enable(mpu_dev, 800000000); + r = dev_pm_opp_enable(mpu_dev, 800000000); /* TODO: MPU 1GHz needs SR and ABB */ /* Enable IVA 800MHz and lower opps */ - r |= opp_enable(iva_dev, 660000000); + r |= dev_pm_opp_enable(iva_dev, 660000000); /* TODO: DSP 800MHz needs SR and ABB */ if (r) { pr_err("%s: failed to enable higher opp %d\n", @@ -529,8 +529,8 @@ static int __init beagle_opp_init(void) * Cleanup - disable the higher freqs - we dont care * about the results */ - opp_disable(mpu_dev, 800000000); - opp_disable(iva_dev, 660000000); + dev_pm_opp_disable(mpu_dev, 800000000); + dev_pm_opp_disable(iva_dev, 660000000); } } return 0; diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c index ba8342fef79..119efaf5808 100644 --- a/arch/arm/mach-omap2/board-omap3stalker.c +++ b/arch/arm/mach-omap2/board-omap3stalker.c @@ -32,7 +32,7 @@ #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/smsc911x.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/usb/phy.h> #include <asm/mach-types.h> diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index 5c0d0e12042..f093af17f5e 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -213,29 +213,11 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = { } }; -static int rx51_lp5523_setup(void) -{ - return gpio_request_one(RX51_LP5523_CHIP_EN_GPIO, GPIOF_DIR_OUT, - "lp5523_enable"); -} - -static void rx51_lp5523_release(void) -{ - gpio_free(RX51_LP5523_CHIP_EN_GPIO); -} - -static void rx51_lp5523_enable(bool state) -{ - gpio_set_value(RX51_LP5523_CHIP_EN_GPIO, !!state); -} - static struct lp55xx_platform_data rx51_lp5523_platform_data = { .led_config = rx51_lp5523_led_config, .num_channels = ARRAY_SIZE(rx51_lp5523_led_config), .clock_mode = LP55XX_CLOCK_AUTO, - .setup_resources = rx51_lp5523_setup, - .release_resources = rx51_lp5523_release, - .enable = rx51_lp5523_enable, + .enable_gpio = RX51_LP5523_CHIP_EN_GPIO, }; #endif diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c index 03a2829beb8..3b05aea56d1 100644 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ b/arch/arm/mach-omap2/cclock3xxx_data.c @@ -381,6 +381,42 @@ static struct clk_hw_omap dpll4_ck_hw = { DEFINE_STRUCT_CLK(dpll4_ck, dpll3_ck_parent_names, dpll4_ck_ops); +static const struct clk_div_table dpll4_mx_ck_div_table[] = { + { .div = 1, .val = 1 }, + { .div = 2, .val = 2 }, + { .div = 3, .val = 3 }, + { .div = 4, .val = 4 }, + { .div = 5, .val = 5 }, + { .div = 6, .val = 6 }, + { .div = 7, .val = 7 }, + { .div = 8, .val = 8 }, + { .div = 9, .val = 9 }, + { .div = 10, .val = 10 }, + { .div = 11, .val = 11 }, + { .div = 12, .val = 12 }, + { .div = 13, .val = 13 }, + { .div = 14, .val = 14 }, + { .div = 15, .val = 15 }, + { .div = 16, .val = 16 }, + { .div = 17, .val = 17 }, + { .div = 18, .val = 18 }, + { .div = 19, .val = 19 }, + { .div = 20, .val = 20 }, + { .div = 21, .val = 21 }, + { .div = 22, .val = 22 }, + { .div = 23, .val = 23 }, + { .div = 24, .val = 24 }, + { .div = 25, .val = 25 }, + { .div = 26, .val = 26 }, + { .div = 27, .val = 27 }, + { .div = 28, .val = 28 }, + { .div = 29, .val = 29 }, + { .div = 30, .val = 30 }, + { .div = 31, .val = 31 }, + { .div = 32, .val = 32 }, + { .div = 0 }, +}; + DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0, OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL), OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH, @@ -524,10 +560,10 @@ static const struct clksel_rate clkout2_src_54m_rates[] = { { .div = 0 } }; -DEFINE_CLK_DIVIDER(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0, +DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0, OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); + 0, dpll4_mx_ck_div_table, NULL); static struct clk dpll4_m3x2_ck; @@ -847,10 +883,10 @@ static struct clk dpll3_m3x2_ck_3630 = { DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1); -DEFINE_CLK_DIVIDER(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0, +DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0, OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); + 0, dpll4_mx_ck_div_table, NULL); static struct clk dpll4_m4x2_ck; @@ -869,7 +905,8 @@ static struct clk_hw_omap dpll4_m4x2_ck_hw = { .clkdm_name = "dpll4_clkdm", }; -DEFINE_STRUCT_CLK(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names, dpll4_m5x2_ck_ops); +DEFINE_STRUCT_CLK_FLAGS(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names, + dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT); static struct clk dpll4_m4x2_ck_3630 = { .name = "dpll4_m4x2_ck", @@ -877,6 +914,7 @@ static struct clk dpll4_m4x2_ck_3630 = { .parent_names = dpll4_m4x2_ck_parent_names, .num_parents = ARRAY_SIZE(dpll4_m4x2_ck_parent_names), .ops = &dpll4_m5x2_ck_3630_ops, + .flags = CLK_SET_RATE_PARENT, }; DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0, @@ -968,8 +1006,9 @@ static struct clk_hw_omap dss1_alwon_fck_3430es1_hw = { .clkdm_name = "dss_clkdm", }; -DEFINE_STRUCT_CLK(dss1_alwon_fck_3430es1, dss1_alwon_fck_3430es1_parent_names, - aes2_ick_ops); +DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es1, + dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops, + CLK_SET_RATE_PARENT); static struct clk dss1_alwon_fck_3430es2; @@ -983,8 +1022,9 @@ static struct clk_hw_omap dss1_alwon_fck_3430es2_hw = { .clkdm_name = "dss_clkdm", }; -DEFINE_STRUCT_CLK(dss1_alwon_fck_3430es2, dss1_alwon_fck_3430es1_parent_names, - aes2_ick_ops); +DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es2, + dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops, + CLK_SET_RATE_PARENT); static struct clk dss2_alwon_fck; diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index b237950eb8a..ec0dc0b1755 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c @@ -830,7 +830,8 @@ DEFINE_CLK_GATE(dss_tv_clk, "extalt_clkin_ck", &extalt_clkin_ck, 0x0, OMAP4430_CM_DSS_DSS_CLKCTRL, OMAP4430_OPTFCLKEN_TV_CLK_SHIFT, 0x0, NULL); -DEFINE_CLK_GATE(dss_dss_clk, "dpll_per_m5x2_ck", &dpll_per_m5x2_ck, 0x0, +DEFINE_CLK_GATE(dss_dss_clk, "dpll_per_m5x2_ck", &dpll_per_m5x2_ck, + CLK_SET_RATE_PARENT, OMAP4430_CM_DSS_DSS_CLKCTRL, OMAP4430_OPTFCLKEN_DSSCLK_SHIFT, 0x0, NULL); diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.c b/arch/arm/mach-omap2/gpmc-smsc911x.c index ef990118d32..2757504a13c 100644 --- a/arch/arm/mach-omap2/gpmc-smsc911x.c +++ b/arch/arm/mach-omap2/gpmc-smsc911x.c @@ -83,7 +83,7 @@ void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *gpmc_cfg) pdev = platform_device_register_resndata(NULL, "smsc911x", gpmc_cfg->id, gpmc_smsc911x_resources, ARRAY_SIZE(gpmc_smsc911x_resources), &gpmc_smsc911x_config, sizeof(gpmc_smsc911x_config)); - if (!pdev) { + if (IS_ERR(pdev)) { pr_err("Unable to register platform device\n"); gpio_free(gpmc_cfg->gpio_reset); goto free2; diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 51525faa0ae..81de5625195 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1341,14 +1341,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np, #ifdef CONFIG_MTD_NAND -static const char * const nand_ecc_opts[] = { - [OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw", - [OMAP_ECC_HAMMING_CODE_HW] = "hw", - [OMAP_ECC_HAMMING_CODE_HW_ROMCODE] = "hw-romcode", - [OMAP_ECC_BCH4_CODE_HW] = "bch4", - [OMAP_ECC_BCH8_CODE_HW] = "bch8", -}; - static const char * const nand_xfer_types[] = { [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled", [NAND_OMAP_POLLED] = "polled", @@ -1378,13 +1370,41 @@ static int gpmc_probe_nand_child(struct platform_device *pdev, gpmc_nand_data->cs = val; gpmc_nand_data->of_node = child; - if (!of_property_read_string(child, "ti,nand-ecc-opt", &s)) - for (val = 0; val < ARRAY_SIZE(nand_ecc_opts); val++) - if (!strcasecmp(s, nand_ecc_opts[val])) { - gpmc_nand_data->ecc_opt = val; - break; - } + /* Detect availability of ELM module */ + gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0); + if (gpmc_nand_data->elm_of_node == NULL) + gpmc_nand_data->elm_of_node = + of_parse_phandle(child, "elm_id", 0); + if (gpmc_nand_data->elm_of_node == NULL) + pr_warn("%s: ti,elm-id property not found\n", __func__); + + /* select ecc-scheme for NAND */ + if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) { + pr_err("%s: ti,nand-ecc-opt not found\n", __func__); + return -ENODEV; + } + if (!strcmp(s, "ham1") || !strcmp(s, "sw") || + !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) + gpmc_nand_data->ecc_opt = + OMAP_ECC_HAM1_CODE_HW; + else if (!strcmp(s, "bch4")) + if (gpmc_nand_data->elm_of_node) + gpmc_nand_data->ecc_opt = + OMAP_ECC_BCH4_CODE_HW; + else + gpmc_nand_data->ecc_opt = + OMAP_ECC_BCH4_CODE_HW_DETECTION_SW; + else if (!strcmp(s, "bch8")) + if (gpmc_nand_data->elm_of_node) + gpmc_nand_data->ecc_opt = + OMAP_ECC_BCH8_CODE_HW; + else + gpmc_nand_data->ecc_opt = + OMAP_ECC_BCH8_CODE_HW_DETECTION_SW; + else + pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__); + /* select data transfer mode for NAND controller */ if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++) if (!strcasecmp(s, nand_xfer_types[val])) { diff --git a/arch/arm/mach-omap2/omap-pm.h b/arch/arm/mach-omap2/omap-pm.h index 67faa7b8fe9..1d777e63e05 100644 --- a/arch/arm/mach-omap2/omap-pm.h +++ b/arch/arm/mach-omap2/omap-pm.h @@ -17,7 +17,7 @@ #include <linux/device.h> #include <linux/cpufreq.h> #include <linux/clk.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> /* * agent_id values for use with omap_pm_set_min_bus_tput(): diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index b69dd9abb50..53f0735817b 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -621,6 +621,7 @@ static int _od_suspend_noirq(struct device *dev) if (!ret && !pm_runtime_status_suspended(dev)) { if (pm_generic_runtime_suspend(dev) == 0) { + pm_runtime_set_suspended(dev); omap_device_idle(pdev); od->flags |= OMAP_DEVICE_SUSPENDED; } @@ -634,10 +635,18 @@ static int _od_resume_noirq(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct omap_device *od = to_omap_device(pdev); - if ((od->flags & OMAP_DEVICE_SUSPENDED) && - !pm_runtime_status_suspended(dev)) { + if (od->flags & OMAP_DEVICE_SUSPENDED) { od->flags &= ~OMAP_DEVICE_SUSPENDED; omap_device_enable(pdev); + /* + * XXX: we run before core runtime pm has resumed itself. At + * this point in time, we just restore the runtime pm state and + * considering symmetric operations in resume, we donot expect + * to fail. If we failed, something changed in core runtime_pm + * framework OR some device driver messed things up, hence, WARN + */ + WARN(pm_runtime_set_active(dev), + "Could not set %s runtime state active\n", dev_name(dev)); pm_generic_runtime_resume(dev); } diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c index 82fd8c72f75..a358a07e18f 100644 --- a/arch/arm/mach-omap2/opp.c +++ b/arch/arm/mach-omap2/opp.c @@ -18,7 +18,7 @@ */ #include <linux/module.h> #include <linux/of.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/cpu.h> #include "omap_device.h" @@ -85,14 +85,14 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def, dev = &oh->od->pdev->dev; } - r = opp_add(dev, opp_def->freq, opp_def->u_volt); + r = dev_pm_opp_add(dev, opp_def->freq, opp_def->u_volt); if (r) { dev_err(dev, "%s: add OPP %ld failed for %s [%d] result=%d\n", __func__, opp_def->freq, opp_def->hwmod_name, i, r); } else { if (!opp_def->default_available) - r = opp_disable(dev, opp_def->freq); + r = dev_pm_opp_disable(dev, opp_def->freq); if (r) dev_err(dev, "%s: disable %ld failed for %s [%d] result=%d\n", __func__, opp_def->freq, diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 360b2daf54d..e1b41416fbf 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -13,7 +13,7 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/err.h> -#include <linux/opp.h> +#include <linux/pm_opp.h> #include <linux/export.h> #include <linux/suspend.h> #include <linux/cpu.h> @@ -131,7 +131,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, { struct voltagedomain *voltdm; struct clk *clk; - struct opp *opp; + struct dev_pm_opp *opp; unsigned long freq, bootup_volt; struct device *dev; @@ -172,7 +172,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, clk_put(clk); rcu_read_lock(); - opp = opp_find_freq_ceil(dev, &freq); + opp = dev_pm_opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) { rcu_read_unlock(); pr_err("%s: unable to find boot up OPP for vdd_%s\n", @@ -180,7 +180,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, goto exit; } - bootup_volt = opp_get_voltage(opp); + bootup_volt = dev_pm_opp_get_voltage(opp); rcu_read_unlock(); if (!bootup_volt) { pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n", diff --git a/arch/arm/mach-omap2/prm44xx_54xx.h b/arch/arm/mach-omap2/prm44xx_54xx.h index a085d9cc1f5..7a976065e13 100644 --- a/arch/arm/mach-omap2/prm44xx_54xx.h +++ b/arch/arm/mach-omap2/prm44xx_54xx.h @@ -42,7 +42,8 @@ extern u32 omap4_prm_vcvp_read(u8 offset); extern void omap4_prm_vcvp_write(u32 val, u8 offset); extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset); -#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) +#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ + defined(CONFIG_SOC_DRA7XX) void omap44xx_prm_reconfigure_io_chain(void); #else static inline void omap44xx_prm_reconfigure_io_chain(void) diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index a8427115ee0..96100dbf5a2 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -615,14 +615,12 @@ endmenu config PXA25x bool select CPU_XSCALE - select CPU_FREQ_TABLE if CPU_FREQ help Select code specific to PXA21x/25x/26x variants config PXA27x bool select CPU_XSCALE - select CPU_FREQ_TABLE if CPU_FREQ help Select code specific to PXA27x variants @@ -635,7 +633,6 @@ config CPU_PXA26x config PXA3xx bool select CPU_XSC3 - select CPU_FREQ_TABLE if CPU_FREQ help Select code specific to PXA3xx variants diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index f9423493ed3..584439bfa59 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c @@ -310,6 +310,7 @@ static struct platform_pwm_backlight_data cm_x300_backlight_data = { .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 10000, + .enable_gpio = -1, }; static struct platform_device cm_x300_backlight_device = { diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c index 2d4a7b4d5d7..3aa264640c9 100644 --- a/arch/arm/mach-pxa/colibri-pxa270-income.c +++ b/arch/arm/mach-pxa/colibri-pxa270-income.c @@ -189,6 +189,7 @@ static struct platform_pwm_backlight_data income_backlight_data = { .max_brightness = 0x3ff, .dft_brightness = 0x1ff, .pwm_period_ns = 1000000, + .enable_gpio = -1, }; static struct platform_device income_backlight = { diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c index fe2eb8394df..ab93441e596 100644 --- a/arch/arm/mach-pxa/ezx.c +++ b/arch/arm/mach-pxa/ezx.c @@ -54,6 +54,7 @@ static struct platform_pwm_backlight_data ezx_backlight_data = { .max_brightness = 1023, .dft_brightness = 1023, .pwm_period_ns = 78770, + .enable_gpio = -1, }; static struct platform_device ezx_backlight_device = { diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index 133109ec733..a7c30eb0c8d 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -561,6 +561,7 @@ static struct platform_pwm_backlight_data backlight_data = { .max_brightness = 200, .dft_brightness = 100, .pwm_period_ns = 30923, + .enable_gpio = -1, }; static struct platform_device backlight = { diff --git a/arch/arm/mach-pxa/include/mach/gpio.h b/arch/arm/mach-pxa/include/mach/gpio.h deleted file mode 100644 index 0248e433bc9..00000000000 --- a/arch/arm/mach-pxa/include/mach/gpio.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * arch/arm/mach-pxa/include/mach/gpio.h - * - * PXA GPIO wrappers for arch-neutral GPIO calls - * - * Written by Philipp Zabel <philipp.zabel@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef __ASM_ARCH_PXA_GPIO_H -#define __ASM_ARCH_PXA_GPIO_H - -#include <asm-generic/gpio.h> - -#include <mach/irqs.h> -#include <mach/hardware.h> - -#endif diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index 1255ee00f3d..9f6ec167902 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c @@ -269,6 +269,7 @@ static struct platform_pwm_backlight_data lpd270_backlight_data = { .max_brightness = 1, .dft_brightness = 1, .pwm_period_ns = 78770, + .enable_gpio = -1, }; static struct platform_device lpd270_backlight_device = { diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index f44532fc648..fab30d666cc 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -378,6 +378,7 @@ static struct platform_pwm_backlight_data backlight_data = { .max_brightness = 272, .dft_brightness = 100, .pwm_period_ns = 30923, + .enable_gpio = -1, .init = magician_backlight_init, .notify = magician_backlight_notify, .exit = magician_backlight_exit, diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index dd70343c870..08ccc0718f3 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c @@ -338,6 +338,7 @@ static struct platform_pwm_backlight_data mainstone_backlight_data = { .max_brightness = 1023, .dft_brightness = 1023, .pwm_period_ns = 78770, + .enable_gpio = -1, }; static struct platform_device mainstone_backlight_device = { diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index acc9d3cc076..f70583fee59 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c @@ -186,6 +186,7 @@ static struct platform_pwm_backlight_data mioa701_backlight_data = { .max_brightness = 100, .dft_brightness = 50, .pwm_period_ns = 4000 * 1024, /* Fl = 250kHz */ + .enable_gpio = -1, }; /* diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 17d4c53017c..e54a296fb81 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c @@ -322,6 +322,7 @@ static struct platform_pwm_backlight_data palm27x_backlight_data = { .max_brightness = 0xfe, .dft_brightness = 0x7e, .pwm_period_ns = 3500 * 1024, + .enable_gpio = -1, .init = palm27x_backlight_init, .notify = palm27x_backlight_notify, .exit = palm27x_backlight_exit, diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c index 100b176f7e8..7691c974ca4 100644 --- a/arch/arm/mach-pxa/palmtc.c +++ b/arch/arm/mach-pxa/palmtc.c @@ -166,45 +166,12 @@ static inline void palmtc_keys_init(void) {} * Backlight ******************************************************************************/ #if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) -static int palmtc_backlight_init(struct device *dev) -{ - int ret; - - ret = gpio_request(GPIO_NR_PALMTC_BL_POWER, "BL POWER"); - if (ret) - goto err; - ret = gpio_direction_output(GPIO_NR_PALMTC_BL_POWER, 1); - if (ret) - goto err2; - - return 0; - -err2: - gpio_free(GPIO_NR_PALMTC_BL_POWER); -err: - return ret; -} - -static int palmtc_backlight_notify(struct device *dev, int brightness) -{ - /* backlight is on when GPIO16 AF0 is high */ - gpio_set_value(GPIO_NR_PALMTC_BL_POWER, brightness); - return brightness; -} - -static void palmtc_backlight_exit(struct device *dev) -{ - gpio_free(GPIO_NR_PALMTC_BL_POWER); -} - static struct platform_pwm_backlight_data palmtc_backlight_data = { .pwm_id = 1, .max_brightness = PALMTC_MAX_INTENSITY, .dft_brightness = PALMTC_MAX_INTENSITY, .pwm_period_ns = PALMTC_PERIOD_NS, - .init = palmtc_backlight_init, - .notify = palmtc_backlight_notify, - .exit = palmtc_backlight_exit, + .enable_gpio = GPIO_NR_PALMTC_BL_POWER, }; static struct platform_device palmtc_backlight = { diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c index 0742721ced2..956fd24ee6f 100644 --- a/arch/arm/mach-pxa/palmte2.c +++ b/arch/arm/mach-pxa/palmte2.c @@ -165,6 +165,7 @@ static struct platform_pwm_backlight_data palmte2_backlight_data = { .max_brightness = PALMTE2_MAX_INTENSITY, .dft_brightness = PALMTE2_MAX_INTENSITY, .pwm_period_ns = PALMTE2_PERIOD_NS, + .enable_gpio = -1, .init = palmte2_backlight_init, .notify = palmte2_backlight_notify, .exit = palmte2_backlight_exit, diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 3133ba82c50..9a4e470f162 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -153,6 +153,7 @@ static struct platform_pwm_backlight_data pcm990_backlight_data = { .max_brightness = 1023, .dft_brightness = 1023, .pwm_period_ns = 78770, + .enable_gpio = -1, }; static struct platform_device pcm990_backlight_device = { diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index 969b0ba7fa7..8386dc30b3e 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -539,6 +539,7 @@ static struct platform_pwm_backlight_data raumfeld_pwm_backlight_data = { .dft_brightness = 100, /* 10000 ns = 10 ms ^= 100 kHz */ .pwm_period_ns = 10000, + .enable_gpio = -1, }; static struct platform_device raumfeld_pwm_backlight_device = { diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c index 62aea3e835f..01de542432a 100644 --- a/arch/arm/mach-pxa/stargate2.c +++ b/arch/arm/mach-pxa/stargate2.c @@ -27,7 +27,7 @@ #include <linux/i2c/pxa-i2c.h> #include <linux/i2c/pcf857x.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/smc91x.h> #include <linux/gpio.h> #include <linux/leds.h> diff --git a/arch/arm/mach-pxa/tavorevb.c b/arch/arm/mach-pxa/tavorevb.c index 4680efe5534..a71da84e784 100644 --- a/arch/arm/mach-pxa/tavorevb.c +++ b/arch/arm/mach-pxa/tavorevb.c @@ -175,6 +175,7 @@ static struct platform_pwm_backlight_data tavorevb_backlight_data[] = { .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 100000, + .enable_gpio = -1, }, [1] = { /* secondary backlight */ @@ -182,6 +183,7 @@ static struct platform_pwm_backlight_data tavorevb_backlight_data[] = { .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 100000, + .enable_gpio = -1, }, }; diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c index 9c363c081d3..29905b127ad 100644 --- a/arch/arm/mach-pxa/viper.c +++ b/arch/arm/mach-pxa/viper.c @@ -401,6 +401,7 @@ static struct platform_pwm_backlight_data viper_backlight_data = { .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 1000000, + .enable_gpio = -1, .init = viper_backlight_init, .notify = viper_backlight_notify, .exit = viper_backlight_exit, diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index 2513d8f4931..e1a121b36cf 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c @@ -206,6 +206,7 @@ static struct platform_pwm_backlight_data z2_backlight_data[] = { .max_brightness = 1023, .dft_brightness = 0, .pwm_period_ns = 1260320, + .enable_gpio = -1, }, [1] = { /* LCD Backlight */ @@ -213,6 +214,7 @@ static struct platform_pwm_backlight_data z2_backlight_data[] = { .max_brightness = 1023, .dft_brightness = 512, .pwm_period_ns = 1260320, + .enable_gpio = -1, }, }; diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c index 36cf7cf95ec..77daea478e8 100644 --- a/arch/arm/mach-pxa/zylonite.c +++ b/arch/arm/mach-pxa/zylonite.c @@ -125,6 +125,7 @@ static struct platform_pwm_backlight_data zylonite_backlight_data = { .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 10000, + .enable_gpio = -1, }; static struct platform_device zylonite_backlight_device = { diff --git a/arch/arm/mach-s3c24xx/mach-h1940.c b/arch/arm/mach-s3c24xx/mach-h1940.c index 74dd47988b4..952b6a040d1 100644 --- a/arch/arm/mach-s3c24xx/mach-h1940.c +++ b/arch/arm/mach-s3c24xx/mach-h1940.c @@ -504,6 +504,7 @@ static struct platform_pwm_backlight_data backlight_data = { .dft_brightness = 50, /* tcnt = 0x31 */ .pwm_period_ns = 36296, + .enable_gpio = -1, .init = h1940_backlight_init, .notify = h1940_backlight_notify, .exit = h1940_backlight_exit, diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c index a83db46320b..4a18d49a63e 100644 --- a/arch/arm/mach-s3c24xx/mach-mini2440.c +++ b/arch/arm/mach-s3c24xx/mach-mini2440.c @@ -24,7 +24,7 @@ #include <linux/io.h> #include <linux/serial_core.h> #include <linux/dm9000.h> -#include <linux/i2c/at24.h> +#include <linux/platform_data/at24.h> #include <linux/platform_device.h> #include <linux/gpio_keys.h> #include <linux/i2c.h> diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c index 206b1f7546d..034b7fe45c4 100644 --- a/arch/arm/mach-s3c24xx/mach-rx1950.c +++ b/arch/arm/mach-s3c24xx/mach-rx1950.c @@ -522,6 +522,7 @@ static struct platform_pwm_backlight_data rx1950_backlight_data = { .max_brightness = 24, .dft_brightness = 4, .pwm_period_ns = 48000, + .enable_gpio = -1, .init = rx1950_backlight_init, .notify = rx1950_backlight_notify, .exit = rx1950_backlight_exit, diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c index 1a911df9e45..758e31b2655 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410.c @@ -114,6 +114,7 @@ static struct platform_pwm_backlight_data crag6410_backlight_data = { .max_brightness = 1000, .dft_brightness = 600, .pwm_period_ns = 100000, /* about 1kHz */ + .enable_gpio = -1, }; static struct platform_device crag6410_backlight_device = { @@ -310,10 +311,6 @@ static struct regulator_consumer_supply wallvdd_consumers[] = { REGULATOR_SUPPLY("SPKVDDL", "spi0.1"), REGULATOR_SUPPLY("SPKVDDR", "spi0.1"), - REGULATOR_SUPPLY("SPKVDDL", "wm5102-codec"), - REGULATOR_SUPPLY("SPKVDDR", "wm5102-codec"), - REGULATOR_SUPPLY("SPKVDDL", "wm5110-codec"), - REGULATOR_SUPPLY("SPKVDDR", "wm5110-codec"), REGULATOR_SUPPLY("DC1VDD", "0-0034"), REGULATOR_SUPPLY("DC2VDD", "0-0034"), @@ -653,14 +650,6 @@ static struct regulator_consumer_supply pvdd_1v8_consumers[] = { REGULATOR_SUPPLY("DBVDD3", "spi0.1"), REGULATOR_SUPPLY("LDOVDD", "spi0.1"), REGULATOR_SUPPLY("CPVDD", "spi0.1"), - - REGULATOR_SUPPLY("DBVDD2", "wm5102-codec"), - REGULATOR_SUPPLY("DBVDD3", "wm5102-codec"), - REGULATOR_SUPPLY("CPVDD", "wm5102-codec"), - - REGULATOR_SUPPLY("DBVDD2", "wm5110-codec"), - REGULATOR_SUPPLY("DBVDD3", "wm5110-codec"), - REGULATOR_SUPPLY("CPVDD", "wm5110-codec"), }; static struct regulator_init_data pvdd_1v8 = { diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c index e8064044ef7..614a03a92cf 100644 --- a/arch/arm/mach-s3c64xx/mach-hmt.c +++ b/arch/arm/mach-s3c64xx/mach-hmt.c @@ -114,6 +114,7 @@ static struct platform_pwm_backlight_data hmt_backlight_data = { .max_brightness = 100 * 256, .dft_brightness = 40 * 256, .pwm_period_ns = 1000000000 / (100 * 256 * 20), + .enable_gpio = -1, .init = hmt_bl_init, .notify = hmt_bl_notify, .exit = hmt_bl_exit, diff --git a/arch/arm/mach-s3c64xx/mach-smartq.c b/arch/arm/mach-s3c64xx/mach-smartq.c index 0f47237be3b..a6b338fd047 100644 --- a/arch/arm/mach-s3c64xx/mach-smartq.c +++ b/arch/arm/mach-s3c64xx/mach-smartq.c @@ -151,6 +151,7 @@ static struct platform_pwm_backlight_data smartq_backlight_data = { .max_brightness = 1000, .dft_brightness = 600, .pwm_period_ns = 1000000000 / (1000 * 20), + .enable_gpio = -1, .init = smartq_bl_init, }; diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index 2a7b32ca5c9..d5ea938cc9a 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c @@ -625,6 +625,7 @@ static struct samsung_bl_gpio_info smdk6410_bl_gpio_info = { static struct platform_pwm_backlight_data smdk6410_bl_data = { .pwm_id = 1, + .enable_gpio = -1, }; static struct s3c_hsotg_plat smdk6410_hsotg_pdata; diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c index 0b00304c1e9..9efdcc03df3 100644 --- a/arch/arm/mach-s5p64x0/mach-smdk6440.c +++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c @@ -223,6 +223,7 @@ static struct samsung_bl_gpio_info smdk6440_bl_gpio_info = { static struct platform_pwm_backlight_data smdk6440_bl_data = { .pwm_id = 1, + .enable_gpio = -1, }; static void __init smdk6440_map_io(void) diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c index 5949296e88f..c3cacc067ef 100644 --- a/arch/arm/mach-s5p64x0/mach-smdk6450.c +++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c @@ -242,6 +242,7 @@ static struct samsung_bl_gpio_info smdk6450_bl_gpio_info = { static struct platform_pwm_backlight_data smdk6450_bl_data = { .pwm_id = 1, + .enable_gpio = -1, }; static void __init smdk6450_map_io(void) diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c index 7c57a221785..9e256b9fc93 100644 --- a/arch/arm/mach-s5pc100/mach-smdkc100.c +++ b/arch/arm/mach-s5pc100/mach-smdkc100.c @@ -216,6 +216,7 @@ static struct samsung_bl_gpio_info smdkc100_bl_gpio_info = { static struct platform_pwm_backlight_data smdkc100_bl_data = { .pwm_id = 0, + .enable_gpio = -1, }; static void __init smdkc100_map_io(void) diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c index 6d72bb992e3..f52cc15c2d8 100644 --- a/arch/arm/mach-s5pv210/mach-smdkv210.c +++ b/arch/arm/mach-s5pv210/mach-smdkv210.c @@ -279,6 +279,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = { static struct platform_pwm_backlight_data smdkv210_bl_data = { .pwm_id = 3, .pwm_period_ns = 1000, + .enable_gpio = -1, }; static void __init smdkv210_map_io(void) diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index e838ba27e44..c9808c68415 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -512,6 +512,9 @@ static void __init assabet_map_io(void) * Its called GPCLKR0 in my SA1110 manual. */ Ser1SDCR0 |= SDCR0_SUS; + MSC1 = (MSC1 & ~0xffff) | + MSC_NonBrst | MSC_32BitStMem | + MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0); if (!machine_has_neponset()) sa1100_register_uart_fns(&assabet_port_fns); diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index f25b6119e02..d4ea142c4ed 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -42,74 +42,31 @@ EXPORT_SYMBOL(reset_status); /* * This table is setup for a 3.6864MHz Crystal. */ -static const unsigned short cclk_frequency_100khz[NR_FREQS] = { - 590, /* 59.0 MHz */ - 737, /* 73.7 MHz */ - 885, /* 88.5 MHz */ - 1032, /* 103.2 MHz */ - 1180, /* 118.0 MHz */ - 1327, /* 132.7 MHz */ - 1475, /* 147.5 MHz */ - 1622, /* 162.2 MHz */ - 1769, /* 176.9 MHz */ - 1917, /* 191.7 MHz */ - 2064, /* 206.4 MHz */ - 2212, /* 221.2 MHz */ - 2359, /* 235.9 MHz */ - 2507, /* 250.7 MHz */ - 2654, /* 265.4 MHz */ - 2802 /* 280.2 MHz */ +struct cpufreq_frequency_table sa11x0_freq_table[NR_FREQS+1] = { + { .frequency = 59000, /* 59.0 MHz */}, + { .frequency = 73700, /* 73.7 MHz */}, + { .frequency = 88500, /* 88.5 MHz */}, + { .frequency = 103200, /* 103.2 MHz */}, + { .frequency = 118000, /* 118.0 MHz */}, + { .frequency = 132700, /* 132.7 MHz */}, + { .frequency = 147500, /* 147.5 MHz */}, + { .frequency = 162200, /* 162.2 MHz */}, + { .frequency = 176900, /* 176.9 MHz */}, + { .frequency = 191700, /* 191.7 MHz */}, + { .frequency = 206400, /* 206.4 MHz */}, + { .frequency = 221200, /* 221.2 MHz */}, + { .frequency = 235900, /* 235.9 MHz */}, + { .frequency = 250700, /* 250.7 MHz */}, + { .frequency = 265400, /* 265.4 MHz */}, + { .frequency = 280200, /* 280.2 MHz */}, + { .frequency = CPUFREQ_TABLE_END, }, }; -/* rounds up(!) */ -unsigned int sa11x0_freq_to_ppcr(unsigned int khz) -{ - int i; - - khz /= 100; - - for (i = 0; i < NR_FREQS; i++) - if (cclk_frequency_100khz[i] >= khz) - break; - - return i; -} - -unsigned int sa11x0_ppcr_to_freq(unsigned int idx) -{ - unsigned int freq = 0; - if (idx < NR_FREQS) - freq = cclk_frequency_100khz[idx] * 100; - return freq; -} - - -/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on - * this platform, anyway. - */ -int sa11x0_verify_speed(struct cpufreq_policy *policy) -{ - unsigned int tmp; - if (policy->cpu) - return -EINVAL; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - - /* make sure that at least one frequency is within the policy */ - tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100; - if (tmp > policy->max) - policy->max = tmp; - - cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - - return 0; -} - unsigned int sa11x0_getspeed(unsigned int cpu) { if (cpu) return 0; - return cclk_frequency_100khz[PPCR & 0xf] * 100; + return sa11x0_freq_table[PPCR & 0xf].frequency; } /* diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h index 9a33695c949..0d92e119b36 100644 --- a/arch/arm/mach-sa1100/generic.h +++ b/arch/arm/mach-sa1100/generic.h @@ -3,6 +3,7 @@ * * Author: Nicolas Pitre */ +#include <linux/cpufreq.h> #include <linux/reboot.h> extern void sa1100_timer_init(void); @@ -19,12 +20,8 @@ extern void sa11x0_init_late(void); extern void sa1110_mb_enable(void); extern void sa1110_mb_disable(void); -struct cpufreq_policy; - -extern unsigned int sa11x0_freq_to_ppcr(unsigned int khz); -extern int sa11x0_verify_speed(struct cpufreq_policy *policy); +extern struct cpufreq_frequency_table sa11x0_freq_table[]; extern unsigned int sa11x0_getspeed(unsigned int cpu); -extern unsigned int sa11x0_ppcr_to_freq(unsigned int idx); struct flash_platform_data; struct resource; diff --git a/arch/arm/mach-sa1100/include/mach/gpio.h b/arch/arm/mach-sa1100/include/mach/gpio.h deleted file mode 100644 index 6a9eecf3137..00000000000 --- a/arch/arm/mach-sa1100/include/mach/gpio.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * arch/arm/mach-sa1100/include/mach/gpio.h - * - * SA1100 GPIO wrappers for arch-neutral GPIO calls - * - * Written by Philipp Zabel <philipp.zabel@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef __ASM_ARCH_SA1100_GPIO_H -#define __ASM_ARCH_SA1100_GPIO_H - -#include <linux/io.h> -#include <mach/hardware.h> -#include <asm/irq.h> -#include <asm-generic/gpio.h> - -#define __ARM_GPIOLIB_COMPLEX - -static inline int gpio_get_value(unsigned gpio) -{ - if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX)) - return GPLR & GPIO_GPIO(gpio); - else - return __gpio_get_value(gpio); -} - -static inline void gpio_set_value(unsigned gpio, int value) -{ - if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX)) - if (value) - GPSR = GPIO_GPIO(gpio); - else - GPCR = GPIO_GPIO(gpio); - else - __gpio_set_value(gpio, value); -} - -#define gpio_cansleep __gpio_cansleep - -#endif diff --git a/arch/arm/mach-sa1100/include/mach/h3xxx.h b/arch/arm/mach-sa1100/include/mach/h3xxx.h index 7d9df16f04a..c810620db53 100644 --- a/arch/arm/mach-sa1100/include/mach/h3xxx.h +++ b/arch/arm/mach-sa1100/include/mach/h3xxx.h @@ -13,6 +13,8 @@ #ifndef _INCLUDE_H3XXX_H_ #define _INCLUDE_H3XXX_H_ +#include "hardware.h" /* Gives GPIO_MAX */ + /* Physical memory regions corresponding to chip selects */ #define H3600_EGPIO_PHYS (SA1100_CS5_PHYS + 0x01000000) #define H3600_BANK_2_PHYS SA1100_CS2_PHYS diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c index bcbc94540e4..41e476e571d 100644 --- a/arch/arm/mach-sa1100/simpad.c +++ b/arch/arm/mach-sa1100/simpad.c @@ -19,6 +19,7 @@ #include <mach/hardware.h> #include <asm/setup.h> +#include <asm/irq.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 8bc8e4c5884..958e3cbf0ac 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -423,6 +423,7 @@ static struct platform_pwm_backlight_data pwm_backlight_data = { .max_brightness = 255, .dft_brightness = 255, .pwm_period_ns = 33333, /* 30kHz */ + .enable_gpio = -1, }; static struct platform_device pwm_backlight_device = { diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig index 835833e3c4f..d71654bc8d5 100644 --- a/arch/arm/mach-sti/Kconfig +++ b/arch/arm/mach-sti/Kconfig @@ -12,7 +12,7 @@ menuconfig ARCH_STI select HAVE_ARM_SCU if SMP select ARCH_REQUIRE_GPIOLIB select ARM_ERRATA_754322 - select ARM_ERRATA_764369 + select ARM_ERRATA_764369 if SMP select ARM_ERRATA_775420 select PL310_ERRATA_753970 if CACHE_PL310 select PL310_ERRATA_769419 if CACHE_PL310 @@ -30,7 +30,7 @@ config SOC_STIH415 default y help This enables support for STMicroelectronics Digital Consumer - Electronics family StiH415 parts, primarily targetted at set-top-box + Electronics family StiH415 parts, primarily targeted at set-top-box and other digital audio/video applications using Flattned Device Trees. @@ -39,7 +39,7 @@ config SOC_STIH416 default y help This enables support for STMicroelectronics Digital Consumer - Electronics family StiH416 parts, primarily targetted at set-top-box + Electronics family StiH416 parts, primarily targeted at set-top-box and other digital audio/video applications using Flattened Device Trees. diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig index 0bf04a0bca9..09e740f58b2 100644 --- a/arch/arm/mach-tegra/Kconfig +++ b/arch/arm/mach-tegra/Kconfig @@ -51,7 +51,7 @@ config ARCH_TEGRA_3x_SOC config ARCH_TEGRA_114_SOC bool "Enable support for Tegra114 family" - select ARM_ERRATA_798181 + select ARM_ERRATA_798181 if SMP select ARM_L1_CACHE_SHIFT_6 select HAVE_ARM_ARCH_TIMER select PINCTRL_TEGRA114 diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c index d7aa52ea6cf..bc471973cf0 100644 --- a/arch/arm/mach-tegra/apbio.c +++ b/arch/arm/mach-tegra/apbio.c @@ -114,7 +114,7 @@ static int do_dma_transfer(unsigned long apb_add, dma_desc->callback = apb_dma_complete; dma_desc->callback_param = NULL; - INIT_COMPLETION(tegra_apb_wait); + reinit_completion(&tegra_apb_wait); dmaengine_submit(dma_desc); dma_async_issue_pending(tegra_apb_dma_chan); diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index ce553d557c3..73368176c6e 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c @@ -90,9 +90,9 @@ static void __init tegra_init_cache(void) static void __init tegra_init_early(void) { - tegra_cpu_reset_handler_init(); tegra_apb_io_init(); tegra_init_fuse(); + tegra_cpu_reset_handler_init(); tegra_init_cache(); tegra_powergate_init(); tegra_hotplug_init(); diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c index b5db207dfd1..9a5f9fb352c 100644 --- a/arch/arm/mach-u300/timer.c +++ b/arch/arm/mach-u300/timer.c @@ -358,8 +358,7 @@ static struct delay_timer u300_delay_timer; */ static void __init u300_timer_init_of(struct device_node *np) { - struct resource irq_res; - int irq; + unsigned int irq; struct clk *clk; unsigned long rate; @@ -368,11 +367,11 @@ static void __init u300_timer_init_of(struct device_node *np) panic("could not ioremap system timer\n"); /* Get the IRQ for the GP1 timer */ - irq = of_irq_to_resource(np, 2, &irq_res); - if (irq <= 0) + irq = irq_of_parse_and_map(np, 2); + if (!irq) panic("no IRQ for system timer\n"); - pr_info("U300 GP1 timer @ base: %p, IRQ: %d\n", u300_timer_base, irq); + pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq); /* Clock the interrupt controller */ clk = of_clk_get(np, 0); diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index c67f8ad5ccd..0034d2cd697 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -29,7 +29,6 @@ if ARCH_U8500 config UX500_SOC_DB8500 bool - select CPU_FREQ_TABLE if CPU_FREQ select MFD_DB8500_PRCMU select PINCTRL_DB8500 select PINCTRL_DB8540 diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig index d7e7422527c..4a70be485ff 100644 --- a/arch/arm/mach-vexpress/Kconfig +++ b/arch/arm/mach-vexpress/Kconfig @@ -1,6 +1,7 @@ config ARCH_VEXPRESS bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7 select ARCH_REQUIRE_GPIOLIB + select ARCH_SUPPORTS_BIG_ENDIAN select ARM_AMBA select ARM_GIC select ARM_TIMER_SP804 @@ -64,10 +65,22 @@ config ARCH_VEXPRESS_DCSCB This is needed to provide CPU and cluster power management on RTSM implementing big.LITTLE. +config ARCH_VEXPRESS_SPC + bool "Versatile Express Serial Power Controller (SPC)" + select ARCH_HAS_CPUFREQ + select ARCH_HAS_OPP + select PM_OPP + help + The TC2 (A15x2 A7x3) versatile express core tile integrates a logic + block called Serial Power Controller (SPC) that provides the interface + between the dual cluster test-chip and the M3 microcontroller that + carries out power management. + config ARCH_VEXPRESS_TC2_PM bool "Versatile Express TC2 power management" depends on MCPM select ARM_CCI + select ARCH_VEXPRESS_SPC help Support for CPU and cluster power management on Versatile Express with a TC2 (A15x2 A7x3) big.LITTLE core tile. diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile index 505e64ab3ea..0997e0b7494 100644 --- a/arch/arm/mach-vexpress/Makefile +++ b/arch/arm/mach-vexpress/Makefile @@ -8,7 +8,8 @@ obj-y := v2m.o obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o obj-$(CONFIG_ARCH_VEXPRESS_DCSCB) += dcscb.o dcscb_setup.o CFLAGS_dcscb.o += -march=armv7-a -obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o spc.o +obj-$(CONFIG_ARCH_VEXPRESS_SPC) += spc.o +obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += tc2_pm.o CFLAGS_tc2_pm.o += -march=armv7-a obj-$(CONFIG_SMP) += platsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c index 3a6384c6c43..14d49968873 100644 --- a/arch/arm/mach-vexpress/dcscb.c +++ b/arch/arm/mach-vexpress/dcscb.c @@ -133,38 +133,8 @@ static void dcscb_power_down(void) if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { arch_spin_unlock(&dcscb_lock); - /* - * Flush all cache levels for this cluster. - * - * To do so we do: - * - Clear the SCTLR.C bit to prevent further cache allocations - * - Flush the whole cache - * - Clear the ACTLR "SMP" bit to disable local coherency - * - * Let's do it in the safest possible way i.e. with - * no memory access within the following sequence - * including to the stack. - * - * Note: fp is preserved to the stack explicitly prior doing - * this since adding it to the clobber list is incompatible - * with having CONFIG_FRAME_POINTER=y. - */ - asm volatile( - "str fp, [sp, #-4]! \n\t" - "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" - "bic r0, r0, #"__stringify(CR_C)" \n\t" - "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" - "isb \n\t" - "bl v7_flush_dcache_all \n\t" - "clrex \n\t" - "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" - "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" - "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" - "isb \n\t" - "dsb \n\t" - "ldr fp, [sp], #4" - : : : "r0","r1","r2","r3","r4","r5","r6","r7", - "r9","r10","lr","memory"); + /* Flush all cache levels for this cluster. */ + v7_exit_coherency_flush(all); /* * This is a harmless no-op. On platforms with a real @@ -183,26 +153,8 @@ static void dcscb_power_down(void) } else { arch_spin_unlock(&dcscb_lock); - /* - * Flush the local CPU cache. - * Let's do it in the safest possible way as above. - */ - asm volatile( - "str fp, [sp, #-4]! \n\t" - "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" - "bic r0, r0, #"__stringify(CR_C)" \n\t" - "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" - "isb \n\t" - "bl v7_flush_dcache_louis \n\t" - "clrex \n\t" - "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" - "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" - "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" - "isb \n\t" - "dsb \n\t" - "ldr fp, [sp], #4" - : : : "r0","r1","r2","r3","r4","r5","r6","r7", - "r9","r10","lr","memory"); + /* Disable and flush the local CPU cache. */ + v7_exit_coherency_flush(louis); } __mcpm_cpu_down(cpu, cluster); diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index eefb029197c..033d34dcbd3 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -17,14 +17,31 @@ * GNU General Public License for more details. */ +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/cpu.h> +#include <linux/delay.h> #include <linux/err.h> +#include <linux/interrupt.h> #include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> #include <linux/slab.h> +#include <linux/semaphore.h> #include <asm/cacheflush.h> #define SPCLOG "vexpress-spc: " +#define PERF_LVL_A15 0x00 +#define PERF_REQ_A15 0x04 +#define PERF_LVL_A7 0x08 +#define PERF_REQ_A7 0x0c +#define COMMS 0x10 +#define COMMS_REQ 0x14 +#define PWC_STATUS 0x18 +#define PWC_FLAG 0x1c + /* SPC wake-up IRQs status and mask */ #define WAKE_INT_MASK 0x24 #define WAKE_INT_RAW 0x28 @@ -36,12 +53,45 @@ #define A15_BX_ADDR0 0x68 #define A7_BX_ADDR0 0x78 +/* SPC system config interface registers */ +#define SYSCFG_WDATA 0x70 +#define SYSCFG_RDATA 0x74 + +/* A15/A7 OPP virtual register base */ +#define A15_PERFVAL_BASE 0xC10 +#define A7_PERFVAL_BASE 0xC30 + +/* Config interface control bits */ +#define SYSCFG_START (1 << 31) +#define SYSCFG_SCC (6 << 20) +#define SYSCFG_STAT (14 << 20) + /* wake-up interrupt masks */ #define GBL_WAKEUP_INT_MSK (0x3 << 10) /* TC2 static dual-cluster configuration */ #define MAX_CLUSTERS 2 +/* + * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS + * operation, the operation could start just before jiffie is about + * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz + */ +#define TIMEOUT_US 20000 + +#define MAX_OPPS 8 +#define CA15_DVFS 0 +#define CA7_DVFS 1 +#define SPC_SYS_CFG 2 +#define STAT_COMPLETE(type) ((1 << 0) << (type << 2)) +#define STAT_ERR(type) ((1 << 1) << (type << 2)) +#define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type)) + +struct ve_spc_opp { + unsigned long freq; + unsigned long u_volt; +}; + struct ve_spc_drvdata { void __iomem *baseaddr; /* @@ -49,6 +99,12 @@ struct ve_spc_drvdata { * It corresponds to A15 processors MPIDR[15:8] bitfield */ u32 a15_clusid; + uint32_t cur_rsp_mask; + uint32_t cur_rsp_stat; + struct semaphore sem; + struct completion done; + struct ve_spc_opp *opps[MAX_CLUSTERS]; + int num_opps[MAX_CLUSTERS]; }; static struct ve_spc_drvdata *info; @@ -157,8 +213,197 @@ void ve_spc_powerdown(u32 cluster, bool enable) writel_relaxed(enable, info->baseaddr + pwdrn_reg); } -int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid) +static int ve_spc_get_performance(int cluster, u32 *freq) +{ + struct ve_spc_opp *opps = info->opps[cluster]; + u32 perf_cfg_reg = 0; + u32 perf; + + perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7; + + perf = readl_relaxed(info->baseaddr + perf_cfg_reg); + if (perf >= info->num_opps[cluster]) + return -EINVAL; + + opps += perf; + *freq = opps->freq; + + return 0; +} + +/* find closest match to given frequency in OPP table */ +static int ve_spc_round_performance(int cluster, u32 freq) +{ + int idx, max_opp = info->num_opps[cluster]; + struct ve_spc_opp *opps = info->opps[cluster]; + u32 fmin = 0, fmax = ~0, ftmp; + + freq /= 1000; /* OPP entries in kHz */ + for (idx = 0; idx < max_opp; idx++, opps++) { + ftmp = opps->freq; + if (ftmp >= freq) { + if (ftmp <= fmax) + fmax = ftmp; + } else { + if (ftmp >= fmin) + fmin = ftmp; + } + } + if (fmax != ~0) + return fmax * 1000; + else + return fmin * 1000; +} + +static int ve_spc_find_performance_index(int cluster, u32 freq) +{ + int idx, max_opp = info->num_opps[cluster]; + struct ve_spc_opp *opps = info->opps[cluster]; + + for (idx = 0; idx < max_opp; idx++, opps++) + if (opps->freq == freq) + break; + return (idx == max_opp) ? -EINVAL : idx; +} + +static int ve_spc_waitforcompletion(int req_type) +{ + int ret = wait_for_completion_interruptible_timeout( + &info->done, usecs_to_jiffies(TIMEOUT_US)); + if (ret == 0) + ret = -ETIMEDOUT; + else if (ret > 0) + ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO; + return ret; +} + +static int ve_spc_set_performance(int cluster, u32 freq) +{ + u32 perf_cfg_reg, perf_stat_reg; + int ret, perf, req_type; + + if (cluster_is_a15(cluster)) { + req_type = CA15_DVFS; + perf_cfg_reg = PERF_LVL_A15; + perf_stat_reg = PERF_REQ_A15; + } else { + req_type = CA7_DVFS; + perf_cfg_reg = PERF_LVL_A7; + perf_stat_reg = PERF_REQ_A7; + } + + perf = ve_spc_find_performance_index(cluster, freq); + + if (perf < 0) + return perf; + + if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) + return -ETIME; + + init_completion(&info->done); + info->cur_rsp_mask = RESPONSE_MASK(req_type); + + writel(perf, info->baseaddr + perf_cfg_reg); + ret = ve_spc_waitforcompletion(req_type); + + info->cur_rsp_mask = 0; + up(&info->sem); + + return ret; +} + +static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data) +{ + int ret; + + if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) + return -ETIME; + + init_completion(&info->done); + info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG); + + /* Set the control value */ + writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS); + ret = ve_spc_waitforcompletion(SPC_SYS_CFG); + + if (ret == 0) + *data = readl(info->baseaddr + SYSCFG_RDATA); + + info->cur_rsp_mask = 0; + up(&info->sem); + + return ret; +} + +static irqreturn_t ve_spc_irq_handler(int irq, void *data) +{ + struct ve_spc_drvdata *drv_data = data; + uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS); + + if (info->cur_rsp_mask & status) { + info->cur_rsp_stat = status; + complete(&drv_data->done); + } + + return IRQ_HANDLED; +} + +/* + * +--------------------------+ + * | 31 20 | 19 0 | + * +--------------------------+ + * | u_volt | freq(kHz) | + * +--------------------------+ + */ +#define MULT_FACTOR 20 +#define VOLT_SHIFT 20 +#define FREQ_MASK (0xFFFFF) +static int ve_spc_populate_opps(uint32_t cluster) { + uint32_t data = 0, off, ret, idx; + struct ve_spc_opp *opps; + + opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL); + if (!opps) + return -ENOMEM; + + info->opps[cluster] = opps; + + off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE; + for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) { + ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data); + if (!ret) { + opps->freq = (data & FREQ_MASK) * MULT_FACTOR; + opps->u_volt = data >> VOLT_SHIFT; + } else { + break; + } + } + info->num_opps[cluster] = idx; + + return ret; +} + +static int ve_init_opp_table(struct device *cpu_dev) +{ + int cluster = topology_physical_package_id(cpu_dev->id); + int idx, ret = 0, max_opp = info->num_opps[cluster]; + struct ve_spc_opp *opps = info->opps[cluster]; + + for (idx = 0; idx < max_opp; idx++, opps++) { + ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt); + if (ret) { + dev_warn(cpu_dev, "failed to add opp %lu %lu\n", + opps->freq, opps->u_volt); + return ret; + } + } + return ret; +} + +int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq) +{ + int ret; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { pr_err(SPCLOG "unable to allocate mem\n"); @@ -168,6 +413,25 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid) info->baseaddr = baseaddr; info->a15_clusid = a15_clusid; + if (irq <= 0) { + pr_err(SPCLOG "Invalid IRQ %d\n", irq); + kfree(info); + return -EINVAL; + } + + init_completion(&info->done); + + readl_relaxed(info->baseaddr + PWC_STATUS); + + ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH + | IRQF_ONESHOT, "vexpress-spc", info); + if (ret) { + pr_err(SPCLOG "IRQ %d request failed\n", irq); + kfree(info); + return -ENODEV; + } + + sema_init(&info->sem, 1); /* * Multi-cluster systems may need this data when non-coherent, during * cluster power-up/power-down. Make sure driver info reaches main @@ -178,3 +442,103 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid) return 0; } + +struct clk_spc { + struct clk_hw hw; + int cluster; +}; + +#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw) +static unsigned long spc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_spc *spc = to_clk_spc(hw); + u32 freq; + + if (ve_spc_get_performance(spc->cluster, &freq)) + return -EIO; + + return freq * 1000; +} + +static long spc_round_rate(struct clk_hw *hw, unsigned long drate, + unsigned long *parent_rate) +{ + struct clk_spc *spc = to_clk_spc(hw); + + return ve_spc_round_performance(spc->cluster, drate); +} + +static int spc_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_spc *spc = to_clk_spc(hw); + + return ve_spc_set_performance(spc->cluster, rate / 1000); +} + +static struct clk_ops clk_spc_ops = { + .recalc_rate = spc_recalc_rate, + .round_rate = spc_round_rate, + .set_rate = spc_set_rate, +}; + +static struct clk *ve_spc_clk_register(struct device *cpu_dev) +{ + struct clk_init_data init; + struct clk_spc *spc; + + spc = kzalloc(sizeof(*spc), GFP_KERNEL); + if (!spc) { + pr_err("could not allocate spc clk\n"); + return ERR_PTR(-ENOMEM); + } + + spc->hw.init = &init; + spc->cluster = topology_physical_package_id(cpu_dev->id); + + init.name = dev_name(cpu_dev); + init.ops = &clk_spc_ops; + init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE; + init.num_parents = 0; + + return devm_clk_register(cpu_dev, &spc->hw); +} + +static int __init ve_spc_clk_init(void) +{ + int cpu; + struct clk *clk; + + if (!info) + return 0; /* Continue only if SPC is initialised */ + + if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) { + pr_err("failed to build OPP table\n"); + return -ENODEV; + } + + for_each_possible_cpu(cpu) { + struct device *cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_warn("failed to get cpu%d device\n", cpu); + continue; + } + clk = ve_spc_clk_register(cpu_dev); + if (IS_ERR(clk)) { + pr_warn("failed to register cpu%d clock\n", cpu); + continue; + } + if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) { + pr_warn("failed to register cpu%d clock lookup\n", cpu); + continue; + } + + if (ve_init_opp_table(cpu_dev)) + pr_warn("failed to initialise cpu%d opp table\n", cpu); + } + + platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); + return 0; +} +module_init(ve_spc_clk_init); diff --git a/arch/arm/mach-vexpress/spc.h b/arch/arm/mach-vexpress/spc.h index 5f7e4a446a1..dbd44c3720f 100644 --- a/arch/arm/mach-vexpress/spc.h +++ b/arch/arm/mach-vexpress/spc.h @@ -15,7 +15,7 @@ #ifndef __SPC_H_ #define __SPC_H_ -int __init ve_spc_init(void __iomem *base, u32 a15_clusid); +int __init ve_spc_init(void __iomem *base, u32 a15_clusid, int irq); void ve_spc_global_wakeup_irq(bool set); void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c index e6eb4819291..05a364c5077 100644 --- a/arch/arm/mach-vexpress/tc2_pm.c +++ b/arch/arm/mach-vexpress/tc2_pm.c @@ -16,6 +16,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/irqchip/arm-gic.h> @@ -156,32 +157,7 @@ static void tc2_pm_down(u64 residency) : : "r" (0x400) ); } - /* - * We need to disable and flush the whole (L1 and L2) cache. - * Let's do it in the safest possible way i.e. with - * no memory access within the following sequence - * including the stack. - * - * Note: fp is preserved to the stack explicitly prior doing - * this since adding it to the clobber list is incompatible - * with having CONFIG_FRAME_POINTER=y. - */ - asm volatile( - "str fp, [sp, #-4]! \n\t" - "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" - "bic r0, r0, #"__stringify(CR_C)" \n\t" - "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" - "isb \n\t" - "bl v7_flush_dcache_all \n\t" - "clrex \n\t" - "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" - "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" - "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" - "isb \n\t" - "dsb \n\t" - "ldr fp, [sp], #4" - : : : "r0","r1","r2","r3","r4","r5","r6","r7", - "r9","r10","lr","memory"); + v7_exit_coherency_flush(all); cci_disable_port_by_cpu(mpidr); @@ -197,26 +173,7 @@ static void tc2_pm_down(u64 residency) arch_spin_unlock(&tc2_pm_lock); - /* - * We need to disable and flush only the L1 cache. - * Let's do it in the safest possible way as above. - */ - asm volatile( - "str fp, [sp, #-4]! \n\t" - "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" - "bic r0, r0, #"__stringify(CR_C)" \n\t" - "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" - "isb \n\t" - "bl v7_flush_dcache_louis \n\t" - "clrex \n\t" - "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" - "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" - "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" - "isb \n\t" - "dsb \n\t" - "ldr fp, [sp], #4" - : : : "r0","r1","r2","r3","r4","r5","r6","r7", - "r9","r10","lr","memory"); + v7_exit_coherency_flush(louis); } __mcpm_cpu_down(cpu, cluster); @@ -311,7 +268,7 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level) static int __init tc2_pm_init(void) { - int ret; + int ret, irq; void __iomem *scc; u32 a15_cluster_id, a7_cluster_id, sys_info; struct device_node *np; @@ -336,13 +293,15 @@ static int __init tc2_pm_init(void) tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf; tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf; + irq = irq_of_parse_and_map(np, 0); + /* * A subset of the SCC registers is also used to communicate * with the SPC (power controller). We need to be able to * drive it very early in the boot process to power up * processors, so we initialize the SPC driver here. */ - ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id); + ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq); if (ret) return ret; diff --git a/arch/arm/mach-w90x900/include/mach/gpio.h b/arch/arm/mach-w90x900/include/mach/gpio.h deleted file mode 100644 index 5385a420327..00000000000 --- a/arch/arm/mach-w90x900/include/mach/gpio.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * linux/arch/arm/mach-w90p910/include/mach/gpio.h - * - * Generic w90p910 GPIO handling - * - * Wan ZongShun <mcuos.com@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __ASM_ARCH_W90P910_GPIO_H -#define __ASM_ARCH_W90P910_GPIO_H - -#include <mach/hardware.h> -#include <asm/irq.h> - -static inline int gpio_to_irq(unsigned gpio) -{ - return gpio; -} -#define gpio_to_irq gpio_to_irq - -static inline int irq_to_gpio(unsigned irq) -{ - return irq; -} - -#endif diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c index 5f252569c68..9a7bd137c8f 100644 --- a/arch/arm/mach-zynq/common.c +++ b/arch/arm/mach-zynq/common.c @@ -44,6 +44,10 @@ static struct of_device_id zynq_of_bus_ids[] __initdata = { {} }; +static struct platform_device zynq_cpuidle_device = { + .name = "cpuidle-zynq", +}; + /** * zynq_init_machine - System specific initialization, intended to be * called from board specific initialization. @@ -56,6 +60,8 @@ static void __init zynq_init_machine(void) l2x0_of_init(0x02060000, 0xF0F0FFFF); of_platform_bus_probe(NULL, zynq_of_bus_ids, NULL); + + platform_device_register(&zynq_cpuidle_device); } static void __init zynq_timer_init(void) diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index cd2c88e7a8f..1f8fed94c2a 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -952,3 +952,9 @@ config ARCH_HAS_BARRIERS help This option allows the use of custom mandatory barriers included via the mach/barriers.h file. + +config ARCH_SUPPORTS_BIG_ENDIAN + bool + help + This option specifies the architecture can support big endian + operation. diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 80741992a9f..3815a8262af 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -38,9 +38,8 @@ ENTRY(v6_early_abort) bne do_DataAbort bic r1, r1, #1 << 11 @ clear bit 11 of FSR ldr r3, [r4] @ read aborted ARM instruction -#ifdef CONFIG_CPU_ENDIAN_BE8 - rev r3, r3 -#endif + ARM_BE8(rev r3, r3) + do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 6f4585b8907..924036473b1 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -25,6 +25,7 @@ #include <asm/cp15.h> #include <asm/system_info.h> #include <asm/unaligned.h> +#include <asm/opcodes.h> #include "fault.h" @@ -762,21 +763,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (thumb_mode(regs)) { u16 *ptr = (u16 *)(instrptr & ~1); fault = probe_kernel_address(ptr, tinstr); + tinstr = __mem_to_opcode_thumb16(tinstr); if (!fault) { if (cpu_architecture() >= CPU_ARCH_ARMv7 && IS_T32(tinstr)) { /* Thumb-2 32-bit */ u16 tinst2 = 0; fault = probe_kernel_address(ptr + 1, tinst2); - instr = (tinstr << 16) | tinst2; + tinst2 = __mem_to_opcode_thumb16(tinst2); + instr = __opcode_thumb32_compose(tinstr, tinst2); thumb2_32b = 1; } else { isize = 2; instr = thumb2arm(tinstr); } } - } else + } else { fault = probe_kernel_address(instrptr, instr); + instr = __mem_to_opcode_arm(instr); + } if (fault) { type = TYPE_FAULT; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1272ed202dd..79f8b39801a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops); static u64 get_coherent_dma_mask(struct device *dev) { - u64 mask = (u64)arm_dma_limit; + u64 mask = (u64)DMA_BIT_MASK(32); if (dev) { mask = dev->coherent_dma_mask; @@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } - if ((~mask) & (u64)arm_dma_limit) { - dev_warn(dev, "coherent DMA mask %#llx is smaller " - "than system GFP_DMA mask %#llx\n", - mask, (u64)arm_dma_limit); + /* + * If the mask allows for more memory than we can address, + * and we actually have that much memory, then fail the + * allocation. + */ + if (sizeof(mask) != sizeof(dma_addr_t) && + mask > (dma_addr_t)~0 && + dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) { + dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", + mask); + dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); + return 0; + } + + /* + * Now check that the mask, when translated to a PFN, + * fits within the allowable addresses which we can + * allocate. + */ + if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) { + dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", + mask, + dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, + arm_dma_pfn_limit + 1); return 0; } } @@ -687,7 +707,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { - pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); + pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) @@ -700,7 +720,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { - pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); + pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) @@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, */ int dma_supported(struct device *dev, u64 mask) { - if (mask < (u64)arm_dma_limit) + unsigned long limit; + + /* + * If the mask allows for more memory than we can address, + * and we actually have that much memory, then we must + * indicate that DMA to this device is not supported. + */ + if (sizeof(mask) != sizeof(dma_addr_t) && + mask > (dma_addr_t)~0 && + dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) return 0; + + /* + * Translate the device's DMA mask to a PFN limit. This + * PFN number includes the page which we can DMA to. + */ + limit = dma_to_pfn(dev, mask); + + if (limit < arm_dma_pfn_limit) + return 0; + return 1; } EXPORT_SYMBOL(dma_supported); diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c index 9d285626bc7..312e15e6d00 100644 --- a/arch/arm/mm/extable.c +++ b/arch/arm/mm/extable.c @@ -9,8 +9,13 @@ int fixup_exception(struct pt_regs *regs) const struct exception_table_entry *fixup; fixup = search_exception_tables(instruction_pointer(regs)); - if (fixup) + if (fixup) { regs->ARM_pc = fixup->fixup; +#ifdef CONFIG_THUMB2_KERNEL + /* Clear the IT state to avoid nasty surprises in the fixup */ + regs->ARM_cpsr &= ~PSR_IT_MASK; +#endif + } return fixup != NULL; } diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 2a5907b5c8d..ff379ac115d 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -65,7 +65,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, return ret; } -#if USE_SPLIT_PTLOCKS +#if USE_SPLIT_PTE_PTLOCKS /* * If we are using split PTE locks, then we need to take the page * lock here. Otherwise we are using shared mm->page_table_lock @@ -84,10 +84,10 @@ static inline void do_pte_unlock(spinlock_t *ptl) { spin_unlock(ptl); } -#else /* !USE_SPLIT_PTLOCKS */ +#else /* !USE_SPLIT_PTE_PTLOCKS */ static inline void do_pte_lock(spinlock_t *ptl) {} static inline void do_pte_unlock(spinlock_t *ptl) {} -#endif /* USE_SPLIT_PTLOCKS */ +#endif /* USE_SPLIT_PTE_PTLOCKS */ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 83cb3ac2709..8e0e52eb76b 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -10,6 +10,7 @@ #include <asm/system_info.h> pgd_t *idmap_pgd; +phys_addr_t (*arch_virt_to_idmap) (unsigned long x); #ifdef CONFIG_ARM_LPAE static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, @@ -67,8 +68,9 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start, unsigned long addr, end; unsigned long next; - addr = virt_to_phys(text_start); - end = virt_to_phys(text_end); + addr = virt_to_idmap(text_start); + end = virt_to_idmap(text_end); + pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end); prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; @@ -90,8 +92,6 @@ static int __init init_static_idmap(void) if (!idmap_pgd) return -ENOMEM; - pr_info("Setting up static identity map for 0x%p - 0x%p\n", - __idmap_text_start, __idmap_text_end); identity_mapping_add(idmap_pgd, __idmap_text_start, __idmap_text_end, 0); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 18ec4c504ab..3e8f106ee5f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -76,14 +76,6 @@ static int __init parse_tag_initrd2(const struct tag *tag) __tagtable(ATAG_INITRD2, parse_tag_initrd2); -#ifdef CONFIG_OF_FLATTREE -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - phys_initrd_start = start; - phys_initrd_size = end - start; -} -#endif /* CONFIG_OF_FLATTREE */ - /* * This keeps memory configuration data used by a couple memory * initialization functions, as well as show_mem() for the skipping @@ -217,6 +209,7 @@ EXPORT_SYMBOL(arm_dma_zone_size); * so a successful GFP_DMA allocation will always satisfy this. */ phys_addr_t arm_dma_limit; +unsigned long arm_dma_pfn_limit; static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, unsigned long dma_size) @@ -239,6 +232,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc) arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; } else arm_dma_limit = 0xffffffff; + arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; #endif } @@ -350,6 +344,11 @@ void __init arm_memblock_init(struct meminfo *mi, memblock_reserve(__pa(_stext), _end - _stext); #endif #ifdef CONFIG_BLK_DEV_INITRD + /* FDT scan will populate initrd_start */ + if (initrd_start) { + phys_initrd_start = __virt_to_phys(initrd_start); + phys_initrd_size = initrd_end - initrd_start; + } if (phys_initrd_size && !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", @@ -421,12 +420,10 @@ void __init bootmem_init(void) * This doesn't seem to be used by the Linux memory manager any * more, but is used by ll_rw_block. If we can get rid of it, we * also get rid of some of the stuff above as well. - * - * Note: max_low_pfn and max_pfn reflect the number of _pages_ in - * the system, not the maximum PFN. */ - max_low_pfn = max_low - PHYS_PFN_OFFSET; - max_pfn = max_high - PHYS_PFN_OFFSET; + min_low_pfn = min; + max_low_pfn = max_low; + max_pfn = max_high; } /* @@ -532,7 +529,7 @@ static inline void free_area_high(unsigned long pfn, unsigned long end) static void __init free_highpages(void) { #ifdef CONFIG_HIGHMEM - unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET; + unsigned long max_low = max_low_pfn; struct memblock_region *mem, *res; /* set highmem page free */ diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index d5a4e9ad8f0..d5a982d15a8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm); #ifdef CONFIG_ZONE_DMA extern phys_addr_t arm_dma_limit; +extern unsigned long arm_dma_pfn_limit; #else #define arm_dma_limit ((phys_addr_t)~0) +#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT) #endif extern phys_addr_t arm_lowmem_limit; diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 0c6356255fe..d27158c38eb 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size) } /* - * We don't use supersection mappings for mmap() on /dev/mem, which - * means that we can't map the memory area above the 4G barrier into - * userspace. + * Do not allow /dev/mem mappings beyond the supported physical range. */ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) { - return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); + return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT)); } #ifdef CONFIG_STRICT_DEVMEM diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b1d17eeb59b..78eeeca78f5 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -28,6 +28,8 @@ #include <asm/highmem.h> #include <asm/system_info.h> #include <asm/traps.h> +#include <asm/procinfo.h> +#include <asm/memory.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> @@ -1315,6 +1317,86 @@ static void __init map_lowmem(void) } } +#ifdef CONFIG_ARM_LPAE +/* + * early_paging_init() recreates boot time page table setup, allowing machines + * to switch over to a high (>4G) address space on LPAE systems + */ +void __init early_paging_init(const struct machine_desc *mdesc, + struct proc_info_list *procinfo) +{ + pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags; + unsigned long map_start, map_end; + pgd_t *pgd0, *pgdk; + pud_t *pud0, *pudk, *pud_start; + pmd_t *pmd0, *pmdk; + phys_addr_t phys; + int i; + + if (!(mdesc->init_meminfo)) + return; + + /* remap kernel code and data */ + map_start = init_mm.start_code; + map_end = init_mm.brk; + + /* get a handle on things... */ + pgd0 = pgd_offset_k(0); + pud_start = pud0 = pud_offset(pgd0, 0); + pmd0 = pmd_offset(pud0, 0); + + pgdk = pgd_offset_k(map_start); + pudk = pud_offset(pgdk, map_start); + pmdk = pmd_offset(pudk, map_start); + + mdesc->init_meminfo(); + + /* Run the patch stub to update the constants */ + fixup_pv_table(&__pv_table_begin, + (&__pv_table_end - &__pv_table_begin) << 2); + + /* + * Cache cleaning operations for self-modifying code + * We should clean the entries by MVA but running a + * for loop over every pv_table entry pointer would + * just complicate the code. + */ + flush_cache_louis(); + dsb(); + isb(); + + /* remap level 1 table */ + for (i = 0; i < PTRS_PER_PGD; pud0++, i++) { + set_pud(pud0, + __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); + pmd0 += PTRS_PER_PMD; + } + + /* remap pmds for kernel mapping */ + phys = __pa(map_start) & PMD_MASK; + do { + *pmdk++ = __pmd(phys | pmdprot); + phys += PMD_SIZE; + } while (phys < map_end); + + flush_cache_all(); + cpu_switch_mm(pgd0, &init_mm); + cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET); + local_flush_bp_all(); + local_flush_tlb_all(); +} + +#else + +void __init early_paging_init(const struct machine_desc *mdesc, + struct proc_info_list *procinfo) +{ + if (mdesc->init_meminfo) + mdesc->init_meminfo(); +} + +#endif + /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 34d4ab217ba..5c668b7a31f 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -296,6 +296,15 @@ void __init sanity_check_meminfo(void) } /* + * early_paging_init() recreates boot time page table setup, allowing machines + * to switch over to a high (>4G) address space on LPAE systems + */ +void __init early_paging_init(const struct machine_desc *mdesc, + struct proc_info_list *procinfo) +{ +} + +/* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 1128064fddc..45dc29f85d5 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -220,9 +220,7 @@ __v6_setup: #endif /* CONFIG_MMU */ adr r5, v6_crval ldmia r5, {r5, r6} -#ifdef CONFIG_CPU_ENDIAN_BE8 - orr r6, r6, #1 << 25 @ big-endian page tables -#endif + ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r5 @ clear bits them orr r0, r0, r6 @ set them diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index c63d9bdee51..60920f62fdf 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -367,9 +367,7 @@ __v7_setup: #endif adr r5, v7_crval ldmia r5, {r5, r6} -#ifdef CONFIG_CPU_ENDIAN_BE8 - orr r6, r6, #1 << 25 @ big-endian page tables -#endif + ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables #ifdef CONFIG_SWP_EMULATE orr r5, r5, #(1 << 10) @ set SW bit in "clear" bic r6, r6, #(1 << 10) @ clear it in "mmuset" diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 99b44e0e8d8..9ed155ad0f9 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -19,6 +19,7 @@ #include <linux/if_vlan.h> #include <asm/cacheflush.h> #include <asm/hwcap.h> +#include <asm/opcodes.h> #include "bpf_jit_32.h" @@ -113,8 +114,11 @@ static u32 jit_udiv(u32 dividend, u32 divisor) static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) { + inst |= (cond << 28); + inst = __opcode_to_mem_arm(inst); + if (ctx->target != NULL) - ctx->target[ctx->idx] = inst | (cond << 28); + ctx->target[ctx->idx] = inst; ctx->idx++; } diff --git a/arch/arm/plat-iop/Makefile b/arch/arm/plat-iop/Makefile index a99dc15a70f..224e56c6049 100644 --- a/arch/arm/plat-iop/Makefile +++ b/arch/arm/plat-iop/Makefile @@ -5,7 +5,6 @@ obj-y := # IOP32X -obj-$(CONFIG_ARCH_IOP32X) += gpio.o obj-$(CONFIG_ARCH_IOP32X) += i2c.o obj-$(CONFIG_ARCH_IOP32X) += pci.o obj-$(CONFIG_ARCH_IOP32X) += setup.o @@ -16,7 +15,6 @@ obj-$(CONFIG_ARCH_IOP32X) += pmu.o obj-$(CONFIG_ARCH_IOP32X) += restart.o # IOP33X -obj-$(CONFIG_ARCH_IOP33X) += gpio.o obj-$(CONFIG_ARCH_IOP33X) += i2c.o obj-$(CONFIG_ARCH_IOP33X) += pci.o obj-$(CONFIG_ARCH_IOP33X) += setup.o diff --git a/arch/arm/plat-iop/gpio.c b/arch/arm/plat-iop/gpio.c deleted file mode 100644 index 697de6dc493..00000000000 --- a/arch/arm/plat-iop/gpio.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * arch/arm/plat-iop/gpio.c - * GPIO handling for Intel IOP3xx processors. - * - * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - */ - -#include <linux/device.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/gpio.h> -#include <linux/export.h> -#include <asm/hardware/iop3xx.h> -#include <mach/gpio.h> - -void gpio_line_config(int line, int direction) -{ - unsigned long flags; - - local_irq_save(flags); - if (direction == GPIO_IN) { - *IOP3XX_GPOE |= 1 << line; - } else if (direction == GPIO_OUT) { - *IOP3XX_GPOE &= ~(1 << line); - } - local_irq_restore(flags); -} -EXPORT_SYMBOL(gpio_line_config); - -int gpio_line_get(int line) -{ - return !!(*IOP3XX_GPID & (1 << line)); -} -EXPORT_SYMBOL(gpio_line_get); - -void gpio_line_set(int line, int value) -{ - unsigned long flags; - - local_irq_save(flags); - if (value == GPIO_LOW) { - *IOP3XX_GPOD &= ~(1 << line); - } else if (value == GPIO_HIGH) { - *IOP3XX_GPOD |= 1 << line; - } - local_irq_restore(flags); -} -EXPORT_SYMBOL(gpio_line_set); - -static int iop3xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) -{ - gpio_line_config(gpio, GPIO_IN); - return 0; -} - -static int iop3xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int level) -{ - gpio_line_set(gpio, level); - gpio_line_config(gpio, GPIO_OUT); - return 0; -} - -static int iop3xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio) -{ - return gpio_line_get(gpio); -} - -static void iop3xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) -{ - gpio_line_set(gpio, value); -} - -static struct gpio_chip iop3xx_chip = { - .label = "iop3xx", - .direction_input = iop3xx_gpio_direction_input, - .get = iop3xx_gpio_get_value, - .direction_output = iop3xx_gpio_direction_output, - .set = iop3xx_gpio_set_value, - .base = 0, - .ngpio = IOP3XX_N_GPIOS, -}; - -static int __init iop3xx_gpio_setup(void) -{ - return gpiochip_add(&iop3xx_chip); -} -arch_initcall(iop3xx_gpio_setup); diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c index d51f9565567..be4ad0b21c0 100644 --- a/arch/arm/plat-samsung/dev-backlight.c +++ b/arch/arm/plat-samsung/dev-backlight.c @@ -70,6 +70,7 @@ static struct samsung_bl_drvdata samsung_dfl_bl_data __initdata = { .max_brightness = 255, .dft_brightness = 255, .pwm_period_ns = 78770, + .enable_gpio = -1, .init = samsung_bl_init, .exit = samsung_bl_exit, }, @@ -121,6 +122,10 @@ void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info, samsung_bl_data->lth_brightness = bl_data->lth_brightness; if (bl_data->pwm_period_ns) samsung_bl_data->pwm_period_ns = bl_data->pwm_period_ns; + if (bl_data->enable_gpio >= 0) + samsung_bl_data->enable_gpio = bl_data->enable_gpio; + if (bl_data->enable_gpio_flags) + samsung_bl_data->enable_gpio_flags = bl_data->enable_gpio_flags; if (bl_data->init) samsung_bl_data->init = bl_data->init; if (bl_data->notify) diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S index 2677bc3762d..40f27e52de7 100644 --- a/arch/arm/plat-versatile/headsmp.S +++ b/arch/arm/plat-versatile/headsmp.S @@ -10,6 +10,7 @@ */ #include <linux/linkage.h> #include <linux/init.h> +#include <asm/assembler.h> /* * Realview/Versatile Express specific entry point for secondary CPUs. @@ -17,6 +18,7 @@ * until we're ready for them to initialise. */ ENTRY(versatile_secondary_startup) + ARM_BE8(setend be) mrc p15, 0, r0, c0, c0, 5 bic r0, #0xff000000 adr r4, 1f diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 52b8f40b1c7..2f37e1d6cb4 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -642,9 +642,9 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp, static int vfp_hotplug(struct notifier_block *b, unsigned long action, void *hcpu) { - if (action == CPU_DYING || action == CPU_DYING_FROZEN) { - vfp_force_reload((long)hcpu, current_thread_info()); - } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + if (action == CPU_DYING || action == CPU_DYING_FROZEN) + vfp_current_hw_state[(long)hcpu] = NULL; + else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) vfp_enable(NULL); return NOTIFY_OK; } diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile index 43841033afd..12969523414 100644 --- a/arch/arm/xen/Makefile +++ b/arch/arm/xen/Makefile @@ -1 +1 @@ -obj-y := enlighten.o hypercall.o grant-table.o +obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c new file mode 100644 index 00000000000..b0e77de9914 --- /dev/null +++ b/arch/arm/xen/mm.c @@ -0,0 +1,65 @@ +#include <linux/bootmem.h> +#include <linux/gfp.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/swiotlb.h> + +#include <xen/xen.h> +#include <xen/interface/memory.h> +#include <xen/swiotlb-xen.h> + +#include <asm/cacheflush.h> +#include <asm/xen/page.h> +#include <asm/xen/hypercall.h> +#include <asm/xen/interface.h> + +int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, + unsigned int address_bits, + dma_addr_t *dma_handle) +{ + if (!xen_initial_domain()) + return -EINVAL; + + /* we assume that dom0 is mapped 1:1 for now */ + *dma_handle = pstart; + return 0; +} +EXPORT_SYMBOL_GPL(xen_create_contiguous_region); + +void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) +{ + return; +} +EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); + +struct dma_map_ops *xen_dma_ops; +EXPORT_SYMBOL_GPL(xen_dma_ops); + +static struct dma_map_ops xen_swiotlb_dma_ops = { + .mapping_error = xen_swiotlb_dma_mapping_error, + .alloc = xen_swiotlb_alloc_coherent, + .free = xen_swiotlb_free_coherent, + .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, + .sync_single_for_device = xen_swiotlb_sync_single_for_device, + .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, + .map_sg = xen_swiotlb_map_sg_attrs, + .unmap_sg = xen_swiotlb_unmap_sg_attrs, + .map_page = xen_swiotlb_map_page, + .unmap_page = xen_swiotlb_unmap_page, + .dma_supported = xen_swiotlb_dma_supported, + .set_dma_mask = xen_swiotlb_set_dma_mask, +}; + +int __init xen_mm_init(void) +{ + if (!xen_initial_domain()) + return 0; + xen_swiotlb_init(1, false); + xen_dma_ops = &xen_swiotlb_dma_ops; + return 0; +} +arch_initcall(xen_mm_init); diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c new file mode 100644 index 00000000000..23732cdff55 --- /dev/null +++ b/arch/arm/xen/p2m.c @@ -0,0 +1,208 @@ +#include <linux/bootmem.h> +#include <linux/gfp.h> +#include <linux/export.h> +#include <linux/rwlock.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/swiotlb.h> + +#include <xen/xen.h> +#include <xen/interface/memory.h> +#include <xen/swiotlb-xen.h> + +#include <asm/cacheflush.h> +#include <asm/xen/page.h> +#include <asm/xen/hypercall.h> +#include <asm/xen/interface.h> + +struct xen_p2m_entry { + unsigned long pfn; + unsigned long mfn; + unsigned long nr_pages; + struct rb_node rbnode_mach; + struct rb_node rbnode_phys; +}; + +rwlock_t p2m_lock; +struct rb_root phys_to_mach = RB_ROOT; +static struct rb_root mach_to_phys = RB_ROOT; + +static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) +{ + struct rb_node **link = &phys_to_mach.rb_node; + struct rb_node *parent = NULL; + struct xen_p2m_entry *entry; + int rc = 0; + + while (*link) { + parent = *link; + entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); + + if (new->mfn == entry->mfn) + goto err_out; + if (new->pfn == entry->pfn) + goto err_out; + + if (new->pfn < entry->pfn) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + rb_link_node(&new->rbnode_phys, parent, link); + rb_insert_color(&new->rbnode_phys, &phys_to_mach); + goto out; + +err_out: + rc = -EINVAL; + pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", + __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); +out: + return rc; +} + +unsigned long __pfn_to_mfn(unsigned long pfn) +{ + struct rb_node *n = phys_to_mach.rb_node; + struct xen_p2m_entry *entry; + unsigned long irqflags; + + read_lock_irqsave(&p2m_lock, irqflags); + while (n) { + entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); + if (entry->pfn <= pfn && + entry->pfn + entry->nr_pages > pfn) { + read_unlock_irqrestore(&p2m_lock, irqflags); + return entry->mfn + (pfn - entry->pfn); + } + if (pfn < entry->pfn) + n = n->rb_left; + else + n = n->rb_right; + } + read_unlock_irqrestore(&p2m_lock, irqflags); + + return INVALID_P2M_ENTRY; +} +EXPORT_SYMBOL_GPL(__pfn_to_mfn); + +static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new) +{ + struct rb_node **link = &mach_to_phys.rb_node; + struct rb_node *parent = NULL; + struct xen_p2m_entry *entry; + int rc = 0; + + while (*link) { + parent = *link; + entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach); + + if (new->mfn == entry->mfn) + goto err_out; + if (new->pfn == entry->pfn) + goto err_out; + + if (new->mfn < entry->mfn) + link = &(*link)->rb_left; + else + link = &(*link)->rb_right; + } + rb_link_node(&new->rbnode_mach, parent, link); + rb_insert_color(&new->rbnode_mach, &mach_to_phys); + goto out; + +err_out: + rc = -EINVAL; + pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", + __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); +out: + return rc; +} + +unsigned long __mfn_to_pfn(unsigned long mfn) +{ + struct rb_node *n = mach_to_phys.rb_node; + struct xen_p2m_entry *entry; + unsigned long irqflags; + + read_lock_irqsave(&p2m_lock, irqflags); + while (n) { + entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach); + if (entry->mfn <= mfn && + entry->mfn + entry->nr_pages > mfn) { + read_unlock_irqrestore(&p2m_lock, irqflags); + return entry->pfn + (mfn - entry->mfn); + } + if (mfn < entry->mfn) + n = n->rb_left; + else + n = n->rb_right; + } + read_unlock_irqrestore(&p2m_lock, irqflags); + + return INVALID_P2M_ENTRY; +} +EXPORT_SYMBOL_GPL(__mfn_to_pfn); + +bool __set_phys_to_machine_multi(unsigned long pfn, + unsigned long mfn, unsigned long nr_pages) +{ + int rc; + unsigned long irqflags; + struct xen_p2m_entry *p2m_entry; + struct rb_node *n = phys_to_mach.rb_node; + + if (mfn == INVALID_P2M_ENTRY) { + write_lock_irqsave(&p2m_lock, irqflags); + while (n) { + p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); + if (p2m_entry->pfn <= pfn && + p2m_entry->pfn + p2m_entry->nr_pages > pfn) { + rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys); + rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); + write_unlock_irqrestore(&p2m_lock, irqflags); + kfree(p2m_entry); + return true; + } + if (pfn < p2m_entry->pfn) + n = n->rb_left; + else + n = n->rb_right; + } + write_unlock_irqrestore(&p2m_lock, irqflags); + return true; + } + + p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT); + if (!p2m_entry) { + pr_warn("cannot allocate xen_p2m_entry\n"); + return false; + } + p2m_entry->pfn = pfn; + p2m_entry->nr_pages = nr_pages; + p2m_entry->mfn = mfn; + + write_lock_irqsave(&p2m_lock, irqflags); + if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) || + (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) { + write_unlock_irqrestore(&p2m_lock, irqflags); + return false; + } + write_unlock_irqrestore(&p2m_lock, irqflags); + return true; +} +EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi); + +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + return __set_phys_to_machine_multi(pfn, mfn, 1); +} +EXPORT_SYMBOL_GPL(__set_phys_to_machine); + +int p2m_init(void) +{ + rwlock_init(&p2m_lock); + return 0; +} +arch_initcall(p2m_init); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index bb0bf1bfc05..88c8b6c1341 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -143,7 +143,6 @@ config CPU_BIG_ENDIAN config SMP bool "Symmetric Multi-Processing" - select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you say N here, the kernel will run on single and @@ -221,6 +220,7 @@ config XEN_DOM0 config XEN bool "Xen guest support on ARM64 (EXPERIMENTAL)" depends on ARM64 && OF + select SWIOTLB_XEN help Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi index bfdc5783492..d37d7369e26 100644 --- a/arch/arm64/boot/dts/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm-storm.dtsi @@ -103,6 +103,81 @@ #size-cells = <2>; ranges; + clocks { + #address-cells = <2>; + #size-cells = <2>; + ranges; + refclk: refclk { + compatible = "fixed-clock"; + #clock-cells = <1>; + clock-frequency = <100000000>; + clock-output-names = "refclk"; + }; + + pcppll: pcppll@17000100 { + compatible = "apm,xgene-pcppll-clock"; + #clock-cells = <1>; + clocks = <&refclk 0>; + clock-names = "pcppll"; + reg = <0x0 0x17000100 0x0 0x1000>; + clock-output-names = "pcppll"; + type = <0>; + }; + + socpll: socpll@17000120 { + compatible = "apm,xgene-socpll-clock"; + #clock-cells = <1>; + clocks = <&refclk 0>; + clock-names = "socpll"; + reg = <0x0 0x17000120 0x0 0x1000>; + clock-output-names = "socpll"; + type = <1>; + }; + + socplldiv2: socplldiv2 { + compatible = "fixed-factor-clock"; + #clock-cells = <1>; + clocks = <&socpll 0>; + clock-names = "socplldiv2"; + clock-mult = <1>; + clock-div = <2>; + clock-output-names = "socplldiv2"; + }; + + qmlclk: qmlclk { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + clock-names = "qmlclk"; + reg = <0x0 0x1703C000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "qmlclk"; + }; + + ethclk: ethclk { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + clock-names = "ethclk"; + reg = <0x0 0x17000000 0x0 0x1000>; + reg-names = "div-reg"; + divider-offset = <0x238>; + divider-width = <0x9>; + divider-shift = <0x0>; + clock-output-names = "ethclk"; + }; + + eth8clk: eth8clk { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <ðclk 0>; + clock-names = "eth8clk"; + reg = <0x0 0x1702C000 0x0 0x1000>; + reg-names = "csr-reg"; + clock-output-names = "eth8clk"; + }; + }; + serial0: serial@1c020000 { device_type = "serial"; compatible = "ns16550"; diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 83636446857..01de5aaa3ed 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -126,20 +126,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) return oldval; } -static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) -{ - unsigned long tmp, tmp2; - - asm volatile("// atomic_clear_mask\n" -"1: ldxr %0, %2\n" -" bic %0, %0, %3\n" -" stxr %w1, %0, %2\n" -" cbnz %w1, 1b" - : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) - : "Ir" (mask) - : "cc"); -} - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) static inline int __atomic_add_unless(atomic_t *v, int a, int u) diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 8d1810001ae..fd0c0c0e447 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -23,11 +23,15 @@ #include <asm-generic/dma-coherent.h> +#include <xen/xen.h> +#include <asm/xen/hypervisor.h> + #define ARCH_HAS_DMA_GET_REQUIRED_MASK +#define DMA_ERROR_CODE (~(dma_addr_t)0) extern struct dma_map_ops *dma_ops; -static inline struct dma_map_ops *get_dma_ops(struct device *dev) +static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) { if (unlikely(!dev) || !dev->archdata.dma_ops) return dma_ops; @@ -35,6 +39,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) return dev->archdata.dma_ops; } +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (xen_initial_domain()) + return xen_dma_ops; + else + return __generic_dma_ops(dev); +} + #include <asm-generic/dma-mapping-common.h> static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index b56e5b5df88..4cc813eddac 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -22,11 +22,14 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <linux/blk_types.h> #include <asm/byteorder.h> #include <asm/barrier.h> #include <asm/pgtable.h> +#include <xen/xen.h> + /* * Generic IO read/write. These perform native-endian accesses. */ @@ -263,5 +266,12 @@ extern int devmem_is_allowed(unsigned long pfn); */ #define xlate_dev_kmem_ptr(p) p +struct bio_vec; +extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, + const struct bio_vec *vec2); +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ + (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) + #endif /* __KERNEL__ */ #endif /* __ASM_IO_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index a5f28e2720c..c98ef4771c7 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -63,6 +63,7 @@ * TAC: Trap ACTLR * TSC: Trap SMC * TSW: Trap cache operations by set/way + * TWE: Trap WFE * TWI: Trap WFI * TIDCP: Trap L2CTLR/L2ECTLR * BSU_IS: Upgrade barriers to the inner shareable domain @@ -72,8 +73,9 @@ * FMO: Override CPSR.F and enable signaling with VF * SWIO: Turn set/way invalidates into set/way clean+invalidate */ -#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ - HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ +#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ + HCR_BSU_IS | HCR_FB | HCR_TAC | \ + HCR_AMO | HCR_IMO | HCR_FMO | \ HCR_SWIO | HCR_TIDCP | HCR_RW) #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) @@ -242,4 +244,6 @@ #define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10 +#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0) + #endif /* __ARM64_KVM_ARM_H__ */ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index eec07387521..dd8ecfc3f99 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -177,4 +177,65 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; } +static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) +{ + return vcpu_sys_reg(vcpu, MPIDR_EL1); +} + +static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) +{ + if (vcpu_mode_is_32bit(vcpu)) + *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; + else + vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25); +} + +static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) +{ + if (vcpu_mode_is_32bit(vcpu)) + return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); + + return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); +} + +static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, + unsigned long data, + unsigned int len) +{ + if (kvm_vcpu_is_be(vcpu)) { + switch (len) { + case 1: + return data & 0xff; + case 2: + return be16_to_cpu(data & 0xffff); + case 4: + return be32_to_cpu(data & 0xffffffff); + default: + return be64_to_cpu(data); + } + } + + return data; /* Leave LE untouched */ +} + +static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, + unsigned long data, + unsigned int len) +{ + if (kvm_vcpu_is_be(vcpu)) { + switch (len) { + case 1: + return data & 0xff; + case 2: + return cpu_to_be16(data & 0xffff); + case 4: + return cpu_to_be32(data & 0xffffffff); + default: + return cpu_to_be64(data); + } + } + + return data; /* Leave LE untouched */ +} + #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 0859a4ddd1e..5d85a02d123 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -36,11 +36,6 @@ #define KVM_VCPU_MAX_FEATURES 2 -/* We don't currently support large pages. */ -#define KVM_HPAGE_GFN_SHIFT(x) 0 -#define KVM_NR_PAGE_SIZES 1 -#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) - struct kvm_vcpu; int kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); @@ -151,6 +146,7 @@ struct kvm_vcpu_stat { struct kvm_vcpu_init; int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init); +int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); struct kvm_one_reg; diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index efe609c6a3c..680f74e6749 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -91,6 +91,7 @@ int kvm_mmu_init(void); void kvm_clear_hyp_idmap(void); #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) +#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) static inline bool kvm_is_write_fault(unsigned long esr) { @@ -116,13 +117,18 @@ static inline void kvm_set_s2pte_writable(pte_t *pte) pte_val(*pte) |= PTE_S2_RDWR; } +static inline void kvm_set_s2pmd_writable(pmd_t *pmd) +{ + pmd_val(*pmd) |= PMD_S2_RDWR; +} + struct kvm; -static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva, + unsigned long size) { if (!icache_is_aliasing()) { /* PIPT */ - unsigned long hva = gfn_to_hva(kvm, gfn); - flush_icache_range(hva, hva + PAGE_SIZE); + flush_icache_range(hva, hva + size); } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ /* any kind of VIPT cache */ __flush_icache_all(); diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index f214069ec5d..9bea6e74a00 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -63,9 +63,12 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) struct page *pte; pte = alloc_pages(PGALLOC_GFP, 0); - if (pte) - pgtable_page_ctor(pte); - + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index d57e66845c8..755f8614332 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -85,6 +85,8 @@ #define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */ #define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ +#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ + /* * Memory Attribute override for Stage-2 (MemAttr[3:0]) */ diff --git a/arch/arm64/include/asm/prom.h b/arch/arm64/include/asm/prom.h deleted file mode 100644 index 68b90e68295..00000000000 --- a/arch/arm64/include/asm/prom.h +++ /dev/null @@ -1 +0,0 @@ -/* Empty for now */ diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h new file mode 100644 index 00000000000..2820f1a6eeb --- /dev/null +++ b/arch/arm64/include/asm/xen/page-coherent.h @@ -0,0 +1,47 @@ +#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H +#define _ASM_ARM64_XEN_PAGE_COHERENT_H + +#include <asm/page.h> +#include <linux/dma-attrs.h> +#include <linux/dma-mapping.h> + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); +} + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); +} + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); +} + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); +} +#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index cbfacf7fb43..6a0a9b132d7 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -27,7 +27,6 @@ #include <linux/uaccess.h> #include <asm/debug-monitors.h> -#include <asm/local.h> #include <asm/cputype.h> #include <asm/system_misc.h> @@ -89,8 +88,8 @@ early_param("nodebugmon", early_debug_disable); * Keep track of debug users on each core. * The ref counts are per-cpu so we use a local_t type. */ -static DEFINE_PER_CPU(local_t, mde_ref_count); -static DEFINE_PER_CPU(local_t, kde_ref_count); +static DEFINE_PER_CPU(int, mde_ref_count); +static DEFINE_PER_CPU(int, kde_ref_count); void enable_debug_monitors(enum debug_el el) { @@ -98,11 +97,11 @@ void enable_debug_monitors(enum debug_el el) WARN_ON(preemptible()); - if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1) + if (this_cpu_inc_return(mde_ref_count) == 1) enable = DBG_MDSCR_MDE; if (el == DBG_ACTIVE_EL1 && - local_inc_return(&__get_cpu_var(kde_ref_count)) == 1) + this_cpu_inc_return(kde_ref_count) == 1) enable |= DBG_MDSCR_KDE; if (enable && debug_enabled) { @@ -118,11 +117,11 @@ void disable_debug_monitors(enum debug_el el) WARN_ON(preemptible()); - if (local_dec_and_test(&__get_cpu_var(mde_ref_count))) + if (this_cpu_dec_return(mde_ref_count) == 0) disable = ~DBG_MDSCR_MDE; if (el == DBG_ACTIVE_EL1 && - local_dec_and_test(&__get_cpu_var(kde_ref_count))) + this_cpu_dec_return(kde_ref_count) == 0) disable &= ~DBG_MDSCR_KDE; if (disable) { diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 329218ca9ff..ff516f6691e 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp) /* Breakpoint */ ctrl_reg = AARCH64_DBG_REG_BCR; val_reg = AARCH64_DBG_REG_BVR; - slots = __get_cpu_var(bp_on_reg); + slots = this_cpu_ptr(bp_on_reg); max_slots = core_num_brps; reg_enable = !debug_info->bps_disabled; } else { /* Watchpoint */ ctrl_reg = AARCH64_DBG_REG_WCR; val_reg = AARCH64_DBG_REG_WVR; - slots = __get_cpu_var(wp_on_reg); + slots = this_cpu_ptr(wp_on_reg); max_slots = core_num_wrps; reg_enable = !debug_info->wps_disabled; } @@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { /* Breakpoint */ base = AARCH64_DBG_REG_BCR; - slots = __get_cpu_var(bp_on_reg); + slots = this_cpu_ptr(bp_on_reg); max_slots = core_num_brps; } else { /* Watchpoint */ base = AARCH64_DBG_REG_WCR; - slots = __get_cpu_var(wp_on_reg); + slots = this_cpu_ptr(wp_on_reg); max_slots = core_num_wrps; } @@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable) switch (reg) { case AARCH64_DBG_REG_BCR: - slots = __get_cpu_var(bp_on_reg); + slots = this_cpu_ptr(bp_on_reg); max_slots = core_num_brps; break; case AARCH64_DBG_REG_WCR: - slots = __get_cpu_var(wp_on_reg); + slots = this_cpu_ptr(wp_on_reg); max_slots = core_num_wrps; break; default: @@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr, struct debug_info *debug_info; struct arch_hw_breakpoint_ctrl ctrl; - slots = (struct perf_event **)__get_cpu_var(bp_on_reg); + slots = this_cpu_ptr(bp_on_reg); addr = instruction_pointer(regs); debug_info = ¤t->thread.debug; @@ -596,7 +596,7 @@ unlock: user_enable_single_step(current); } else { toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); - kernel_step = &__get_cpu_var(stepping_kernel_bp); + kernel_step = this_cpu_ptr(&stepping_kernel_bp); if (*kernel_step != ARM_KERNEL_STEP_NONE) return 0; @@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; - slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + slots = this_cpu_ptr(wp_on_reg); debug_info = ¤t->thread.debug; for (i = 0; i < core_num_wrps; ++i) { @@ -698,7 +698,7 @@ unlock: user_enable_single_step(current); } else { toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); - kernel_step = &__get_cpu_var(stepping_kernel_bp); + kernel_step = this_cpu_ptr(&stepping_kernel_bp); if (*kernel_step != ARM_KERNEL_STEP_NONE) return 0; @@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs) struct debug_info *debug_info = ¤t->thread.debug; int handled_exception = 0, *kernel_step; - kernel_step = &__get_cpu_var(stepping_kernel_bp); + kernel_step = this_cpu_ptr(&stepping_kernel_bp); /* * Called from single-step exception handler. diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 2c28a6cf93e..e2ad0d87721 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -29,7 +29,7 @@ void *module_alloc(unsigned long size) { return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, -1, + GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, __builtin_return_address(0)); } diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 5d14470452a..0e63c98d224 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) */ regs = get_irq_regs(); - cpuc = &__get_cpu_var(cpu_hw_events); + cpuc = this_cpu_ptr(&cpu_hw_events); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; @@ -1258,7 +1258,7 @@ device_initcall(register_pmu_driver); static struct pmu_hw_events *armpmu_get_cpu_events(void) { - return &__get_cpu_var(cpu_hw_events); + return this_cpu_ptr(&cpu_hw_events); } static void __init cpu_pmu_init(struct arm_pmu *armpmu) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 47905598d79..0bc5e4cbc01 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -140,70 +140,18 @@ static void __init setup_processor(void) static void __init setup_machine_fdt(phys_addr_t dt_phys) { - struct boot_param_header *devtree; - unsigned long dt_root; - - /* Check we have a non-NULL DT pointer */ - if (!dt_phys) { - early_print("\n" - "Error: NULL or invalid device tree blob\n" - "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" - "\nPlease check your bootloader.\n"); - - while (true) - cpu_relax(); - - } - - devtree = phys_to_virt(dt_phys); - - /* Check device tree validity */ - if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) { + if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) { early_print("\n" "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" - "Expected 0x%x, found 0x%x\n" + "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" "\nPlease check your bootloader.\n", - dt_phys, devtree, OF_DT_HEADER, - be32_to_cpu(devtree->magic)); + dt_phys, phys_to_virt(dt_phys)); while (true) cpu_relax(); } - initial_boot_params = devtree; - dt_root = of_get_flat_dt_root(); - - machine_name = of_get_flat_dt_prop(dt_root, "model", NULL); - if (!machine_name) - machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL); - if (!machine_name) - machine_name = "<unknown>"; - pr_info("Machine: %s\n", machine_name); - - /* Retrieve various information from the /chosen node */ - of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); - /* Initialize {size,address}-cells info */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); - /* Setup memory, calling early_init_dt_add_memory_arch */ - of_scan_flat_dt(early_init_dt_scan_memory, NULL); -} - -void __init early_init_dt_add_memory_arch(u64 base, u64 size) -{ - base &= PAGE_MASK; - size &= PAGE_MASK; - if (base + size < PHYS_OFFSET) { - pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", - base, base + size); - return; - } - if (base < PHYS_OFFSET) { - pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", - base, PHYS_OFFSET); - size -= PHYS_OFFSET - base; - base = PHYS_OFFSET; - } - memblock_add(base, size); + machine_name = of_flat_dt_get_machine_name(); } /* diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index e51bbe79f5b..b3fc9f5ec6d 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -122,7 +122,7 @@ static inline int get_sigset_t(sigset_t *set, return 0; } -int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) +int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) { int err; diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 21e90820bd2..4480ab339a0 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -21,6 +21,7 @@ config KVM select MMU_NOTIFIER select PREEMPT_NOTIFIERS select ANON_INODES + select HAVE_KVM_CPU_RELAX_INTERCEPT select KVM_MMIO select KVM_ARM_HOST select KVM_ARM_VGIC diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 2c3ff67a8ec..3f0731e5327 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -248,6 +248,26 @@ int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, return kvm_reset_vcpu(vcpu); } +int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) +{ + int target = kvm_target_cpu(); + + if (target < 0) + return -ENODEV; + + memset(init, 0, sizeof(*init)); + + /* + * For now, we don't return any features. + * In future, we might use features to return target + * specific features available for the preferred + * target type. + */ + init->target = (__u32)target; + + return 0; +} + int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -EINVAL; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 9beaca03343..8da56067c30 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -47,21 +47,29 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) } /** - * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest + * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event + * instruction executed by a guest + * * @vcpu: the vcpu pointer * - * Simply call kvm_vcpu_block(), which will halt execution of + * WFE: Yield the CPU and come back to this vcpu when the scheduler + * decides to. + * WFI: Simply call kvm_vcpu_block(), which will halt execution of * world-switches and schedule other host processes until there is an * incoming IRQ or FIQ to the VM. */ -static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) { - kvm_vcpu_block(vcpu); + if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE) + kvm_vcpu_on_spin(vcpu); + else + kvm_vcpu_block(vcpu); + return 1; } static exit_handle_fn arm_exit_handlers[] = { - [ESR_EL2_EC_WFI] = kvm_handle_wfi, + [ESR_EL2_EC_WFI] = kvm_handle_wfx, [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index de2de5db628..0cb8742de4f 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -31,7 +31,6 @@ #include <linux/sort.h> #include <linux/of_fdt.h> -#include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> @@ -39,17 +38,9 @@ #include "mm.h" -static unsigned long phys_initrd_start __initdata = 0; -static unsigned long phys_initrd_size __initdata = 0; - phys_addr_t memstart_addr __read_mostly = 0; -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - phys_initrd_start = start; - phys_initrd_size = end - start; -} - +#ifdef CONFIG_BLK_DEV_INITRD static int __init early_initrd(char *p) { unsigned long start, size; @@ -59,12 +50,13 @@ static int __init early_initrd(char *p) if (*endp == ',') { size = memparse(endp + 1, NULL); - phys_initrd_start = start; - phys_initrd_size = size; + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(start + size); } return 0; } early_param("initrd", early_initrd); +#endif #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) @@ -137,13 +129,8 @@ void __init arm64_memblock_init(void) /* Register the kernel text, kernel data and initrd with memblock */ memblock_reserve(__pa(_text), _end - _text); #ifdef CONFIG_BLK_DEV_INITRD - if (phys_initrd_size) { - memblock_reserve(phys_initrd_start, phys_initrd_size); - - /* Now convert initrd to virtual addresses */ - initrd_start = __phys_to_virt(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - } + if (initrd_start) + memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); #endif /* diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile index be240404ba9..74a8d87e542 100644 --- a/arch/arm64/xen/Makefile +++ b/arch/arm64/xen/Makefile @@ -1,2 +1,2 @@ -xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o) +xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o) obj-y := xen-arm.o hypercall.o diff --git a/arch/avr32/boards/atngw100/evklcd10x.c b/arch/avr32/boards/atngw100/evklcd10x.c index 20388750d56..64919b0da7a 100644 --- a/arch/avr32/boards/atngw100/evklcd10x.c +++ b/arch/avr32/boards/atngw100/evklcd10x.c @@ -58,7 +58,7 @@ static struct fb_monspecs __initdata atevklcd10x_default_monspecs = { .dclkmax = 28330000, }; -static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT @@ -96,7 +96,7 @@ static struct fb_monspecs __initdata atevklcd10x_default_monspecs = { .dclkmax = 7000000, }; -static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT @@ -134,7 +134,7 @@ static struct fb_monspecs __initdata atevklcd10x_default_monspecs = { .dclkmax = 6400000, }; -static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata atevklcd10x_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT @@ -145,7 +145,7 @@ static struct atmel_lcdfb_info __initdata atevklcd10x_lcdc_data = { }; #endif -static void atevklcd10x_lcdc_power_control(int on) +static void atevklcd10x_lcdc_power_control(struct atmel_lcdfb_pdata *pdata, int on) { gpio_set_value(GPIO_PIN_PB(15), on); } diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c index 7de083d19b7..1ba09e4c02b 100644 --- a/arch/avr32/boards/atngw100/mrmt.c +++ b/arch/avr32/boards/atngw100/mrmt.c @@ -83,7 +83,7 @@ static struct fb_monspecs __initdata lcd_fb_default_monspecs = { .dclkmax = 9260000, }; -static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata rmt_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT @@ -126,7 +126,7 @@ static struct fb_monspecs __initdata lcd_fb_default_monspecs = { .dclkmax = 9260000, }; -static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { +static struct atmel_lcdfb_pdata __initdata rmt_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT diff --git a/arch/avr32/boards/atstk1000/atstk1000.h b/arch/avr32/boards/atstk1000/atstk1000.h index 9392d325286..653cc09e536 100644 --- a/arch/avr32/boards/atstk1000/atstk1000.h +++ b/arch/avr32/boards/atstk1000/atstk1000.h @@ -10,7 +10,7 @@ #ifndef __ARCH_AVR32_BOARDS_ATSTK1000_ATSTK1000_H #define __ARCH_AVR32_BOARDS_ATSTK1000_ATSTK1000_H -extern struct atmel_lcdfb_info atstk1000_lcdc_data; +extern struct atmel_lcdfb_pdata atstk1000_lcdc_data; void atstk1000_setup_j2_leds(void); diff --git a/arch/avr32/boards/atstk1000/setup.c b/arch/avr32/boards/atstk1000/setup.c index 2d6b560115d..b6b88f5e0b4 100644 --- a/arch/avr32/boards/atstk1000/setup.c +++ b/arch/avr32/boards/atstk1000/setup.c @@ -55,7 +55,7 @@ static struct fb_monspecs __initdata atstk1000_default_monspecs = { .dclkmax = 30000000, }; -struct atmel_lcdfb_info __initdata atstk1000_lcdc_data = { +struct atmel_lcdfb_pdata __initdata atstk1000_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c index 27bd6fbe21c..7b1f2cd8540 100644 --- a/arch/avr32/boards/favr-32/setup.c +++ b/arch/avr32/boards/favr-32/setup.c @@ -125,7 +125,7 @@ static struct fb_monspecs __initdata favr32_default_monspecs = { .dclkmax = 28000000, }; -struct atmel_lcdfb_info __initdata favr32_lcdc_data = { +struct atmel_lcdfb_pdata __initdata favr32_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT diff --git a/arch/avr32/boards/hammerhead/setup.c b/arch/avr32/boards/hammerhead/setup.c index 9d1efd1cd42..dc0e317f2ec 100644 --- a/arch/avr32/boards/hammerhead/setup.c +++ b/arch/avr32/boards/hammerhead/setup.c @@ -77,7 +77,7 @@ static struct fb_monspecs __initdata hammerhead_hda350t_monspecs = { .dclkmax = 10000000, }; -struct atmel_lcdfb_info __initdata hammerhead_lcdc_data = { +struct atmel_lcdfb_pdata __initdata hammerhead_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT diff --git a/arch/avr32/boards/merisc/display.c b/arch/avr32/boards/merisc/display.c index 85a543cd4ab..e7683ee7ed4 100644 --- a/arch/avr32/boards/merisc/display.c +++ b/arch/avr32/boards/merisc/display.c @@ -45,7 +45,7 @@ static struct fb_monspecs merisc_fb_monspecs = { .dclkmax = 30000000, }; -struct atmel_lcdfb_info merisc_lcdc_data = { +struct atmel_lcdfb_pdata merisc_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT diff --git a/arch/avr32/boards/mimc200/setup.c b/arch/avr32/boards/mimc200/setup.c index 05358aa5ef7..1cb8e9cc5cf 100644 --- a/arch/avr32/boards/mimc200/setup.c +++ b/arch/avr32/boards/mimc200/setup.c @@ -8,7 +8,7 @@ * published by the Free Software Foundation. */ -extern struct atmel_lcdfb_info mimc200_lcdc_data; +extern struct atmel_lcdfb_pdata mimc200_lcdc_data; #include <linux/clk.h> #include <linux/etherdevice.h> @@ -71,7 +71,7 @@ static struct fb_monspecs __initdata mimc200_default_monspecs = { .dclkmax = 25200000, }; -struct atmel_lcdfb_info __initdata mimc200_lcdc_data = { +struct atmel_lcdfb_pdata __initdata mimc200_lcdc_data = { .default_bpp = 16, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h index bc7e8ae479e..1aba19d68c5 100644 --- a/arch/avr32/include/asm/pgalloc.h +++ b/arch/avr32/include/asm/pgalloc.h @@ -68,7 +68,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, return NULL; page = virt_to_page(pg); - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + quicklist_free(QUICK_PT, NULL, pg); + return NULL; + } return page; } diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h index 11c4259c62f..43993642143 100644 --- a/arch/avr32/include/uapi/asm/socket.h +++ b/arch/avr32/include/uapi/asm/socket.h @@ -76,4 +76,6 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* __ASM_AVR32_SOCKET_H */ diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index a68f3cf7c3c..a1f4d1e91b5 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -1439,7 +1439,7 @@ fail: * LCDC * -------------------------------------------------------------------- */ #if defined(CONFIG_CPU_AT32AP7000) || defined(CONFIG_CPU_AT32AP7002) -static struct atmel_lcdfb_info atmel_lcdfb0_data; +static struct atmel_lcdfb_pdata atmel_lcdfb0_data; static struct resource atmel_lcdfb0_resource[] = { { .start = 0xff000000, @@ -1467,12 +1467,12 @@ static struct clk atmel_lcdfb0_pixclk = { }; struct platform_device *__init -at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data, +at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_pdata *data, unsigned long fbmem_start, unsigned long fbmem_len, u64 pin_mask) { struct platform_device *pdev; - struct atmel_lcdfb_info *info; + struct atmel_lcdfb_pdata *info; struct fb_monspecs *monspecs; struct fb_videomode *modedb; unsigned int modedb_size; @@ -1529,7 +1529,7 @@ at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data, } info = pdev->dev.platform_data; - memcpy(info, data, sizeof(struct atmel_lcdfb_info)); + memcpy(info, data, sizeof(struct atmel_lcdfb_pdata)); info->default_monspecs = monspecs; pdev->name = "at32ap-lcdfb"; diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h index d485b039135..f1a316d52c7 100644 --- a/arch/avr32/mach-at32ap/include/mach/board.h +++ b/arch/avr32/mach-at32ap/include/mach/board.h @@ -44,9 +44,9 @@ struct platform_device * at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n); void at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n); -struct atmel_lcdfb_info; +struct atmel_lcdfb_pdata; struct platform_device * -at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_info *data, +at32_add_device_lcdc(unsigned int id, struct atmel_lcdfb_pdata *data, unsigned long fbmem_start, unsigned long fbmem_len, u64 pin_mask); diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index f78c9a2c7e2..9ceccef9c64 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -34,7 +34,6 @@ config BLACKFIN select ARCH_WANT_IPC_PARSE_VERSION select GENERIC_ATOMIC64 select GENERIC_IRQ_PROBE - select USE_GENERIC_SMP_HELPERS if SMP select HAVE_NMI_WATCHDOG if NMI_WATCHDOG select GENERIC_SMP_IDLE_THREAD select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS @@ -52,6 +51,9 @@ config GENERIC_BUG config ZONE_DMA def_bool y +config GENERIC_GPIO + def_bool y + config FORCE_MAX_ZONEORDER int default "14" @@ -317,6 +319,14 @@ config BF53x depends on (BF531 || BF532 || BF533 || BF534 || BF536 || BF537) default y +config GPIO_ADI + def_bool y + depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561) + +config PINCTRL + def_bool y + depends on BF54x || BF60x + config MEM_MT48LC64M4A2FB_7E bool depends on (BFIN533_STAMP) @@ -1429,7 +1439,6 @@ source "drivers/cpufreq/Kconfig" config BFIN_CPU_FREQ bool depends on CPU_FREQ - select CPU_FREQ_TABLE default y config CPU_VOLTAGE diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig index 13eb73231a9..4ca39ab6b2b 100644 --- a/arch/blackfin/configs/BF609-EZKIT_defconfig +++ b/arch/blackfin/configs/BF609-EZKIT_defconfig @@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y CONFIG_I2C_BLACKFIN_TWI=y CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 CONFIG_SPI=y -CONFIG_SPI_BFIN6XX=y +CONFIG_SPI_BFIN_V3=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y # CONFIG_HWMON is not set diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h index 98d0133346b..99d338ca2ea 100644 --- a/arch/blackfin/include/asm/gpio.h +++ b/arch/blackfin/include/asm/gpio.h @@ -25,8 +25,12 @@ #ifndef __ASSEMBLY__ +#ifndef CONFIG_PINCTRL + #include <linux/compiler.h> -#include <linux/gpio.h> +#include <asm/blackfin.h> +#include <asm/portmux.h> +#include <asm/irq_handler.h> /*********************************************************** * @@ -45,7 +49,6 @@ * MODIFICATION HISTORY : **************************************************************/ -#if !BFIN_GPIO_PINT void set_gpio_dir(unsigned, unsigned short); void set_gpio_inen(unsigned, unsigned short); void set_gpio_polar(unsigned, unsigned short); @@ -115,7 +118,6 @@ struct gpio_port_t { unsigned short dummy16; unsigned short inen; }; -#endif #ifdef BFIN_SPECIAL_GPIO_BANKS void bfin_special_gpio_free(unsigned gpio); @@ -127,25 +129,21 @@ void bfin_special_gpio_pm_hibernate_suspend(void); #endif #ifdef CONFIG_PM -int bfin_pm_standby_ctrl(unsigned ctrl); +void bfin_gpio_pm_hibernate_restore(void); +void bfin_gpio_pm_hibernate_suspend(void); +int bfin_gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl); +int bfin_gpio_pm_standby_ctrl(unsigned ctrl); static inline int bfin_pm_standby_setup(void) { - return bfin_pm_standby_ctrl(1); + return bfin_gpio_pm_standby_ctrl(1); } static inline void bfin_pm_standby_restore(void) { - bfin_pm_standby_ctrl(0); + bfin_gpio_pm_standby_ctrl(0); } -void bfin_gpio_pm_hibernate_restore(void); -void bfin_gpio_pm_hibernate_suspend(void); -void bfin_pint_suspend(void); -void bfin_pint_resume(void); - -# if !BFIN_GPIO_PINT -int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl); struct gpio_port_s { unsigned short data; @@ -161,7 +159,6 @@ struct gpio_port_s { unsigned short reserved; unsigned short mux; }; -# endif #endif /*CONFIG_PM*/ /*********************************************************** @@ -178,36 +175,29 @@ struct gpio_port_s { ************************************************************* * MODIFICATION HISTORY : **************************************************************/ - -int bfin_gpio_request(unsigned gpio, const char *label); -void bfin_gpio_free(unsigned gpio); int bfin_gpio_irq_request(unsigned gpio, const char *label); void bfin_gpio_irq_free(unsigned gpio); -int bfin_gpio_direction_input(unsigned gpio); -int bfin_gpio_direction_output(unsigned gpio, int value); -int bfin_gpio_get_value(unsigned gpio); -void bfin_gpio_set_value(unsigned gpio, int value); +void bfin_gpio_irq_prepare(unsigned gpio); + +static inline int irq_to_gpio(unsigned irq) +{ + return irq - GPIO_IRQ_BASE; +} +#endif /* CONFIG_PINCTRL */ #include <asm/irq.h> #include <asm/errno.h> -#ifdef CONFIG_GPIOLIB #include <asm-generic/gpio.h> /* cansleep wrappers */ static inline int gpio_get_value(unsigned int gpio) { - if (gpio < MAX_BLACKFIN_GPIOS) - return bfin_gpio_get_value(gpio); - else - return __gpio_get_value(gpio); + return __gpio_get_value(gpio); } static inline void gpio_set_value(unsigned int gpio, int value) { - if (gpio < MAX_BLACKFIN_GPIOS) - bfin_gpio_set_value(gpio, value); - else - __gpio_set_value(gpio, value); + __gpio_set_value(gpio, value); } static inline int gpio_cansleep(unsigned int gpio) @@ -219,113 +209,6 @@ static inline int gpio_to_irq(unsigned gpio) { return __gpio_to_irq(gpio); } - -#else /* !CONFIG_GPIOLIB */ - -static inline int gpio_request(unsigned gpio, const char *label) -{ - return bfin_gpio_request(gpio, label); -} - -static inline void gpio_free(unsigned gpio) -{ - return bfin_gpio_free(gpio); -} - -static inline int gpio_direction_input(unsigned gpio) -{ - return bfin_gpio_direction_input(gpio); -} - -static inline int gpio_direction_output(unsigned gpio, int value) -{ - return bfin_gpio_direction_output(gpio, value); -} - -static inline int gpio_set_debounce(unsigned gpio, unsigned debounce) -{ - return -EINVAL; -} - -static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) -{ - int err; - - err = bfin_gpio_request(gpio, label); - if (err) - return err; - - if (flags & GPIOF_DIR_IN) - err = bfin_gpio_direction_input(gpio); - else - err = bfin_gpio_direction_output(gpio, - (flags & GPIOF_INIT_HIGH) ? 1 : 0); - - if (err) - bfin_gpio_free(gpio); - - return err; -} - -static inline int gpio_request_array(const struct gpio *array, size_t num) -{ - int i, err; - - for (i = 0; i < num; i++, array++) { - err = gpio_request_one(array->gpio, array->flags, array->label); - if (err) - goto err_free; - } - return 0; - -err_free: - while (i--) - bfin_gpio_free((--array)->gpio); - return err; -} - -static inline void gpio_free_array(const struct gpio *array, size_t num) -{ - while (num--) - bfin_gpio_free((array++)->gpio); -} - -static inline int __gpio_get_value(unsigned gpio) -{ - return bfin_gpio_get_value(gpio); -} - -static inline void __gpio_set_value(unsigned gpio, int value) -{ - return bfin_gpio_set_value(gpio, value); -} - -static inline int gpio_get_value(unsigned gpio) -{ - return __gpio_get_value(gpio); -} - -static inline void gpio_set_value(unsigned gpio, int value) -{ - return __gpio_set_value(gpio, value); -} - -static inline int gpio_to_irq(unsigned gpio) -{ - if (likely(gpio < MAX_BLACKFIN_GPIOS)) - return gpio + GPIO_IRQ_BASE; - - return -EINVAL; -} - -#include <asm-generic/gpio.h> /* cansleep wrappers */ -#endif /* !CONFIG_GPIOLIB */ - -static inline int irq_to_gpio(unsigned irq) -{ - return (irq - GPIO_IRQ_BASE); -} - #endif /* __ASSEMBLY__ */ #endif /* __ARCH_BLACKFIN_GPIO_H__ */ diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h index 4ae1144a457..2fd04f10cc2 100644 --- a/arch/blackfin/include/asm/irq.h +++ b/arch/blackfin/include/asm/irq.h @@ -23,8 +23,7 @@ /* * pm save bfin pint registers */ -struct bfin_pm_pint_save { - u32 mask_set; +struct adi_pm_pint_save { u32 assign; u32 edge_set; u32 invert_set; diff --git a/arch/blackfin/include/asm/irq_handler.h b/arch/blackfin/include/asm/irq_handler.h index 4fbf83575db..4b2a992794d 100644 --- a/arch/blackfin/include/asm/irq_handler.h +++ b/arch/blackfin/include/asm/irq_handler.h @@ -12,11 +12,11 @@ #include <mach/irq.h> /* init functions only */ -extern int __init init_arch_irq(void); +extern int init_arch_irq(void); extern void init_exception_vectors(void); -extern void __init program_IAR(void); +extern void program_IAR(void); #ifdef init_mach_irq -extern void __init init_mach_irq(void); +extern void init_mach_irq(void); #else # define init_mach_irq() #endif diff --git a/arch/blackfin/include/asm/portmux.h b/arch/blackfin/include/asm/portmux.h index 9b1e2c37b32..7aa20436e79 100644 --- a/arch/blackfin/include/asm/portmux.h +++ b/arch/blackfin/include/asm/portmux.h @@ -17,14 +17,29 @@ #define P_MAYSHARE 0x2000 #define P_DONTCARE 0x1000 - +#ifdef CONFIG_PINCTRL +#include <asm/irq_handler.h> + +#define gpio_pint_regs bfin_pint_regs +#define adi_internal_set_wake bfin_internal_set_wake + +#define peripheral_request(per, label) 0 +#define peripheral_free(per) +#define peripheral_request_list(per, label) \ + (pdev ? (IS_ERR(devm_pinctrl_get_select_default(&pdev->dev)) \ + ? -EINVAL : 0) : 0) +#define peripheral_free_list(per) +#else int peripheral_request(unsigned short per, const char *label); void peripheral_free(unsigned short per); int peripheral_request_list(const unsigned short per[], const char *label); void peripheral_free_list(const unsigned short per[]); +#endif -#include <asm/gpio.h> +#include <linux/err.h> +#include <linux/pinctrl/pinctrl.h> #include <mach/portmux.h> +#include <linux/gpio.h> #ifndef P_SPORT2_TFS #define P_SPORT2_TFS P_UNDEF diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index 735f24e0742..703dc7cf2ec 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile @@ -7,7 +7,7 @@ extra-y := vmlinux.lds obj-y := \ entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \ - fixed_code.o reboot.o bfin_gpio.o bfin_dma.o \ + fixed_code.o reboot.o bfin_dma.o \ exception.o dumpstack.o ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y) @@ -16,6 +16,7 @@ else obj-y += time.o endif +obj-$(CONFIG_GPIO_ADI) += bfin_gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c index ed978f1c5cb..a017359c182 100644 --- a/arch/blackfin/kernel/bfin_gpio.c +++ b/arch/blackfin/kernel/bfin_gpio.c @@ -11,11 +11,8 @@ #include <linux/err.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <asm/blackfin.h> -#include <asm/gpio.h> -#include <asm/portmux.h> +#include <linux/gpio.h> #include <linux/irq.h> -#include <asm/irq_handler.h> #if ANOMALY_05000311 || ANOMALY_05000323 enum { @@ -58,19 +55,6 @@ static struct gpio_port_t * const gpio_array[] = { (struct gpio_port_t *) FIO0_FLAG_D, (struct gpio_port_t *) FIO1_FLAG_D, (struct gpio_port_t *) FIO2_FLAG_D, -#elif defined(CONFIG_BF54x) || defined(CONFIG_BF60x) - (struct gpio_port_t *)PORTA_FER, - (struct gpio_port_t *)PORTB_FER, - (struct gpio_port_t *)PORTC_FER, - (struct gpio_port_t *)PORTD_FER, - (struct gpio_port_t *)PORTE_FER, - (struct gpio_port_t *)PORTF_FER, - (struct gpio_port_t *)PORTG_FER, -# if defined(CONFIG_BF54x) - (struct gpio_port_t *)PORTH_FER, - (struct gpio_port_t *)PORTI_FER, - (struct gpio_port_t *)PORTJ_FER, -# endif #else # error no gpio arrays defined #endif @@ -169,12 +153,6 @@ DECLARE_RESERVED_MAP(gpio_irq, GPIO_BANK_NUM); inline int check_gpio(unsigned gpio) { -#if defined(CONFIG_BF54x) - if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15 - || gpio == GPIO_PH14 || gpio == GPIO_PH15 - || gpio == GPIO_PJ14 || gpio == GPIO_PJ15) - return -EINVAL; -#endif if (gpio >= MAX_BLACKFIN_GPIOS) return -EINVAL; return 0; @@ -212,12 +190,6 @@ static void port_setup(unsigned gpio, unsigned short usage) else *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio); SSYNC(); -#elif defined(CONFIG_BF54x) || defined(CONFIG_BF60x) - if (usage == GPIO_USAGE) - gpio_array[gpio_bank(gpio)]->port_fer &= ~gpio_bit(gpio); - else - gpio_array[gpio_bank(gpio)]->port_fer |= gpio_bit(gpio); - SSYNC(); #endif } @@ -255,7 +227,7 @@ static int portmux_group_check(unsigned short per) u16 ident = P_IDENT(per); u16 function = P_FUNCT2MUX(per); s8 offset = port_mux[ident]; - u16 m, pmux, pfunc; + u16 m, pmux, pfunc, mask; if (offset < 0) return 0; @@ -270,10 +242,12 @@ static int portmux_group_check(unsigned short per) continue; if (offset == 1) - pfunc = (pmux >> offset) & 3; + mask = 3; else - pfunc = (pmux >> offset) & 1; - if (pfunc != function) { + mask = 1; + + pfunc = (pmux >> offset) & mask; + if (pfunc != (function & mask)) { pr_err("pin group conflict! request pin %d func %d conflict with pin %d func %d\n", ident, function, m, pfunc); return -EINVAL; @@ -288,43 +262,21 @@ static void portmux_setup(unsigned short per) u16 ident = P_IDENT(per); u16 function = P_FUNCT2MUX(per); s8 offset = port_mux[ident]; - u16 pmux; + u16 pmux, mask; if (offset == -1) return; pmux = bfin_read_PORT_MUX(); - if (offset != 1) - pmux &= ~(1 << offset); + if (offset == 1) + mask = 3; else - pmux &= ~(3 << 1); - pmux |= (function << offset); - bfin_write_PORT_MUX(pmux); -} -#elif defined(CONFIG_BF54x) || defined(CONFIG_BF60x) -inline void portmux_setup(unsigned short per) -{ - u16 ident = P_IDENT(per); - u16 function = P_FUNCT2MUX(per); - u32 pmux; + mask = 1; - pmux = gpio_array[gpio_bank(ident)]->port_mux; + pmux &= ~(mask << offset); + pmux |= ((function & mask) << offset); - pmux &= ~(0x3 << (2 * gpio_sub_n(ident))); - pmux |= (function & 0x3) << (2 * gpio_sub_n(ident)); - - gpio_array[gpio_bank(ident)]->port_mux = pmux; -} - -inline u16 get_portmux(unsigned short per) -{ - u16 ident = P_IDENT(per); - u32 pmux = gpio_array[gpio_bank(ident)]->port_mux; - return (pmux >> (2 * gpio_sub_n(ident)) & 0x3); -} -static int portmux_group_check(unsigned short per) -{ - return 0; + bfin_write_PORT_MUX(pmux); } #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) static int portmux_group_check(unsigned short per) @@ -379,7 +331,6 @@ static int portmux_group_check(unsigned short per) } #endif -#if !(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) /*********************************************************** * * FUNCTIONS: Blackfin General Purpose Ports Access Functions @@ -572,7 +523,7 @@ static const unsigned int sic_iwr_irqs[] = { ************************************************************* * MODIFICATION HISTORY : **************************************************************/ -int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl) +int bfin_gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl) { unsigned long flags; @@ -591,7 +542,7 @@ int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl) return 0; } -int bfin_pm_standby_ctrl(unsigned ctrl) +int bfin_gpio_pm_standby_ctrl(unsigned ctrl) { u16 bank, mask, i; @@ -682,53 +633,6 @@ void bfin_gpio_pm_hibernate_restore(void) #endif -#else /* CONFIG_BF54x || CONFIG_BF60x */ -#ifdef CONFIG_PM - -int bfin_pm_standby_ctrl(unsigned ctrl) -{ - return 0; -} - -void bfin_gpio_pm_hibernate_suspend(void) -{ - int i, bank; - - for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { - bank = gpio_bank(i); - - gpio_bank_saved[bank].fer = gpio_array[bank]->port_fer; - gpio_bank_saved[bank].mux = gpio_array[bank]->port_mux; - gpio_bank_saved[bank].data = gpio_array[bank]->data; - gpio_bank_saved[bank].inen = gpio_array[bank]->inen; - gpio_bank_saved[bank].dir = gpio_array[bank]->dir_set; - } -} - -void bfin_gpio_pm_hibernate_restore(void) -{ - int i, bank; - - for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { - bank = gpio_bank(i); - - gpio_array[bank]->port_mux = gpio_bank_saved[bank].mux; - gpio_array[bank]->port_fer = gpio_bank_saved[bank].fer; - gpio_array[bank]->inen = gpio_bank_saved[bank].inen; - gpio_array[bank]->data_set = gpio_bank_saved[bank].data - & gpio_bank_saved[bank].dir; - gpio_array[bank]->dir_set = gpio_bank_saved[bank].dir; - } -} -#endif - -unsigned short get_gpio_dir(unsigned gpio) -{ - return (0x01 & (gpio_array[gpio_bank(gpio)]->dir_clear >> gpio_sub_n(gpio))); -} -EXPORT_SYMBOL(get_gpio_dir); - -#endif /* CONFIG_BF54x || CONFIG_BF60x */ /*********************************************************** * @@ -785,11 +689,7 @@ int peripheral_request(unsigned short per, const char *label) * be requested and used by several drivers */ -#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) - if (!((per & P_MAYSHARE) && get_portmux(per) == P_FUNCT2MUX(per))) { -#else if (!(per & P_MAYSHARE)) { -#endif /* * Allow that the identical pin function can * be requested from the same driver twice @@ -938,12 +838,9 @@ int bfin_gpio_request(unsigned gpio, const char *label) if (unlikely(is_reserved(gpio_irq, gpio, 1))) { printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved as gpio-irq!" " (Documentation/blackfin/bfin-gpio-notes.txt)\n", gpio); - } -#if !(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) - else { /* Reset POLAR setting when acquiring a gpio for the first time */ + } else { /* Reset POLAR setting when acquiring a gpio for the first time */ set_gpio_polar(gpio, 0); } -#endif reserve(gpio, gpio); set_label(gpio, label); @@ -1112,11 +1009,7 @@ void bfin_gpio_irq_free(unsigned gpio) static inline void __bfin_gpio_direction_input(unsigned gpio) { -#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) - gpio_array[gpio_bank(gpio)]->dir_clear = gpio_bit(gpio); -#else gpio_array[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio); -#endif gpio_array[gpio_bank(gpio)]->inen |= gpio_bit(gpio); } @@ -1140,17 +1033,7 @@ EXPORT_SYMBOL(bfin_gpio_direction_input); void bfin_gpio_irq_prepare(unsigned gpio) { -#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) - unsigned long flags; -#endif - port_setup(gpio, GPIO_USAGE); - -#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) - flags = hard_local_irq_save(); - __bfin_gpio_direction_input(gpio); - hard_local_irq_restore(flags); -#endif } void bfin_gpio_set_value(unsigned gpio, int arg) @@ -1175,11 +1058,7 @@ int bfin_gpio_direction_output(unsigned gpio, int value) gpio_array[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); gpio_set_value(gpio, value); -#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) - gpio_array[gpio_bank(gpio)]->dir_set = gpio_bit(gpio); -#else gpio_array[gpio_bank(gpio)]->dir |= gpio_bit(gpio); -#endif AWA_DUMMY_READ(dir); hard_local_irq_restore(flags); @@ -1190,9 +1069,6 @@ EXPORT_SYMBOL(bfin_gpio_direction_output); int bfin_gpio_get_value(unsigned gpio) { -#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) - return (1 & (gpio_array[gpio_bank(gpio)]->data >> gpio_sub_n(gpio))); -#else unsigned long flags; if (unlikely(get_gpio_edge(gpio))) { @@ -1205,7 +1081,6 @@ int bfin_gpio_get_value(unsigned gpio) return ret; } else return get_gpio_data(gpio); -#endif } EXPORT_SYMBOL(bfin_gpio_get_value); diff --git a/arch/blackfin/mach-bf548/Kconfig b/arch/blackfin/mach-bf548/Kconfig index 94acb586832..334ec7b1218 100644 --- a/arch/blackfin/mach-bf548/Kconfig +++ b/arch/blackfin/mach-bf548/Kconfig @@ -377,40 +377,6 @@ config IRQ_PINT3 endmenu -comment "Pin Interrupt to Port Assignment" -menu "Assignment" - -config PINTx_REASSIGN - bool "Reprogram PINT Assignment" - default y - help - The interrupt assignment registers controls the pin-to-interrupt - assignment in a byte-wide manner. Each option allows you to select - a set of pins (High/Low Byte) of an specific Port being mapped - to one of the four PIN Interrupts IRQ_PINTx. - - You shouldn't change any of these unless you know exactly what you're doing. - Please consult the Blackfin BF54x Processor Hardware Reference Manual. - -config PINT0_ASSIGN - hex "PINT0_ASSIGN" - depends on PINTx_REASSIGN - default 0x00000101 -config PINT1_ASSIGN - hex "PINT1_ASSIGN" - depends on PINTx_REASSIGN - default 0x01010000 -config PINT2_ASSIGN - hex "PINT2_ASSIGN" - depends on PINTx_REASSIGN - default 0x07000101 -config PINT3_ASSIGN - hex "PINT3_ASSIGN" - depends on PINTx_REASSIGN - default 0x02020303 - -endmenu - endmenu endif diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 372eb54944e..d495000b81a 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -17,6 +17,9 @@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> +#include <linux/pinctrl/machine.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/platform_data/pinctrl-adi2.h> #include <asm/bfin5xx_spi.h> #include <asm/dma.h> #include <asm/gpio.h> @@ -241,6 +244,13 @@ static struct resource bfin_uart0_resources[] = { .end = UART0_RBR+2, .flags = IORESOURCE_MEM, }, +#ifdef CONFIG_EARLY_PRINTK + { + .start = PORTE_FER, + .end = PORTE_FER+2, + .flags = IORESOURCE_REG, + }, +#endif { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, @@ -289,6 +299,13 @@ static struct resource bfin_uart1_resources[] = { .end = UART1_RBR+2, .flags = IORESOURCE_MEM, }, +#ifdef CONFIG_EARLY_PRINTK + { + .start = PORTH_FER, + .end = PORTH_FER+2, + .flags = IORESOURCE_REG, + }, +#endif { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, @@ -353,6 +370,13 @@ static struct resource bfin_uart2_resources[] = { .end = UART2_RBR+2, .flags = IORESOURCE_MEM, }, +#ifdef CONFIG_EARLY_PRINTK + { + .start = PORTB_FER, + .end = PORTB_FER+2, + .flags = IORESOURCE_REG, + }, +#endif { .start = IRQ_UART2_TX, .end = IRQ_UART2_TX, @@ -401,6 +425,13 @@ static struct resource bfin_uart3_resources[] = { .end = UART3_RBR+2, .flags = IORESOURCE_MEM, }, +#ifdef CONFIG_EARLY_PRINTK + { + .start = PORTB_FER, + .end = PORTB_FER+2, + .flags = IORESOURCE_REG, + }, +#endif { .start = IRQ_UART3_TX, .end = IRQ_UART3_TX, @@ -1058,6 +1089,411 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = { }; #endif +#ifdef CONFIG_PINCTRL_ADI2 + +# define ADI_PINT_DEVNAME "adi-gpio-pint" +# define ADI_GPIO_DEVNAME "adi-gpio" +# define ADI_PINCTRL_DEVNAME "pinctrl-adi2" + +static struct platform_device bfin_pinctrl_device = { + .name = ADI_PINCTRL_DEVNAME, + .id = 0, +}; + +static struct resource bfin_pint0_resources[] = { + { + .start = PINT0_MASK_SET, + .end = PINT0_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT0, + .end = IRQ_PINT0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint0_device = { + .name = ADI_PINT_DEVNAME, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_pint0_resources), + .resource = bfin_pint0_resources, +}; + +static struct resource bfin_pint1_resources[] = { + { + .start = PINT1_MASK_SET, + .end = PINT1_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT1, + .end = IRQ_PINT1, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint1_device = { + .name = ADI_PINT_DEVNAME, + .id = 1, + .num_resources = ARRAY_SIZE(bfin_pint1_resources), + .resource = bfin_pint1_resources, +}; + +static struct resource bfin_pint2_resources[] = { + { + .start = PINT2_MASK_SET, + .end = PINT2_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT2, + .end = IRQ_PINT2, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint2_device = { + .name = ADI_PINT_DEVNAME, + .id = 2, + .num_resources = ARRAY_SIZE(bfin_pint2_resources), + .resource = bfin_pint2_resources, +}; + +static struct resource bfin_pint3_resources[] = { + { + .start = PINT3_MASK_SET, + .end = PINT3_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT3, + .end = IRQ_PINT3, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint3_device = { + .name = ADI_PINT_DEVNAME, + .id = 3, + .num_resources = ARRAY_SIZE(bfin_pint3_resources), + .resource = bfin_pint3_resources, +}; + +static struct resource bfin_gpa_resources[] = { + { + .start = PORTA_FER, + .end = PORTA_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { /* optional */ + .start = IRQ_PA0, + .end = IRQ_PA0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpa_pdata = { + .port_gpio_base = GPIO_PA0, /* Optional */ + .port_pin_base = GPIO_PA0, + .port_width = GPIO_BANKSIZE, + .pint_id = 0, /* PINT0 */ + .pint_assign = true, /* PINT upper 16 bit */ + .pint_map = 0, /* mapping mask in PINT */ +}; + +static struct platform_device bfin_gpa_device = { + .name = ADI_GPIO_DEVNAME, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_gpa_resources), + .resource = bfin_gpa_resources, + .dev = { + .platform_data = &bfin_gpa_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpb_resources[] = { + { + .start = PORTB_FER, + .end = PORTB_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PB0, + .end = IRQ_PB0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpb_pdata = { + .port_gpio_base = GPIO_PB0, + .port_pin_base = GPIO_PB0, + .port_width = 15, + .pint_id = 0, + .pint_assign = true, + .pint_map = 1, +}; + +static struct platform_device bfin_gpb_device = { + .name = ADI_GPIO_DEVNAME, + .id = 1, + .num_resources = ARRAY_SIZE(bfin_gpb_resources), + .resource = bfin_gpb_resources, + .dev = { + .platform_data = &bfin_gpb_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpc_resources[] = { + { + .start = PORTC_FER, + .end = PORTC_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PC0, + .end = IRQ_PC0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpc_pdata = { + .port_gpio_base = GPIO_PC0, + .port_pin_base = GPIO_PC0, + .port_width = 14, + .pint_id = 2, + .pint_assign = true, + .pint_map = 0, +}; + +static struct platform_device bfin_gpc_device = { + .name = ADI_GPIO_DEVNAME, + .id = 2, + .num_resources = ARRAY_SIZE(bfin_gpc_resources), + .resource = bfin_gpc_resources, + .dev = { + .platform_data = &bfin_gpc_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpd_resources[] = { + { + .start = PORTD_FER, + .end = PORTD_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PD0, + .end = IRQ_PD0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpd_pdata = { + .port_gpio_base = GPIO_PD0, + .port_pin_base = GPIO_PD0, + .port_width = GPIO_BANKSIZE, + .pint_id = 2, + .pint_assign = false, + .pint_map = 1, +}; + +static struct platform_device bfin_gpd_device = { + .name = ADI_GPIO_DEVNAME, + .id = 3, + .num_resources = ARRAY_SIZE(bfin_gpd_resources), + .resource = bfin_gpd_resources, + .dev = { + .platform_data = &bfin_gpd_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpe_resources[] = { + { + .start = PORTE_FER, + .end = PORTE_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PE0, + .end = IRQ_PE0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpe_pdata = { + .port_gpio_base = GPIO_PE0, + .port_pin_base = GPIO_PE0, + .port_width = GPIO_BANKSIZE, + .pint_id = 3, + .pint_assign = true, + .pint_map = 2, +}; + +static struct platform_device bfin_gpe_device = { + .name = ADI_GPIO_DEVNAME, + .id = 4, + .num_resources = ARRAY_SIZE(bfin_gpe_resources), + .resource = bfin_gpe_resources, + .dev = { + .platform_data = &bfin_gpe_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpf_resources[] = { + { + .start = PORTF_FER, + .end = PORTF_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PF0, + .end = IRQ_PF0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpf_pdata = { + .port_gpio_base = GPIO_PF0, + .port_pin_base = GPIO_PF0, + .port_width = GPIO_BANKSIZE, + .pint_id = 3, + .pint_assign = false, + .pint_map = 3, +}; + +static struct platform_device bfin_gpf_device = { + .name = ADI_GPIO_DEVNAME, + .id = 5, + .num_resources = ARRAY_SIZE(bfin_gpf_resources), + .resource = bfin_gpf_resources, + .dev = { + .platform_data = &bfin_gpf_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpg_resources[] = { + { + .start = PORTG_FER, + .end = PORTG_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PG0, + .end = IRQ_PG0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpg_pdata = { + .port_gpio_base = GPIO_PG0, + .port_pin_base = GPIO_PG0, + .port_width = GPIO_BANKSIZE, + .pint_id = -1, +}; + +static struct platform_device bfin_gpg_device = { + .name = ADI_GPIO_DEVNAME, + .id = 6, + .num_resources = ARRAY_SIZE(bfin_gpg_resources), + .resource = bfin_gpg_resources, + .dev = { + .platform_data = &bfin_gpg_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gph_resources[] = { + { + .start = PORTH_FER, + .end = PORTH_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PH0, + .end = IRQ_PH0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gph_pdata = { + .port_gpio_base = GPIO_PH0, + .port_pin_base = GPIO_PH0, + .port_width = 14, + .pint_id = -1, +}; + +static struct platform_device bfin_gph_device = { + .name = ADI_GPIO_DEVNAME, + .id = 7, + .num_resources = ARRAY_SIZE(bfin_gph_resources), + .resource = bfin_gph_resources, + .dev = { + .platform_data = &bfin_gph_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpi_resources[] = { + { + .start = PORTI_FER, + .end = PORTI_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PI0, + .end = IRQ_PI0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpi_pdata = { + .port_gpio_base = GPIO_PI0, + .port_pin_base = GPIO_PI0, + .port_width = GPIO_BANKSIZE, + .pint_id = -1, +}; + +static struct platform_device bfin_gpi_device = { + .name = ADI_GPIO_DEVNAME, + .id = 8, + .num_resources = ARRAY_SIZE(bfin_gpi_resources), + .resource = bfin_gpi_resources, + .dev = { + .platform_data = &bfin_gpi_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpj_resources[] = { + { + .start = PORTJ_FER, + .end = PORTJ_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PJ0, + .end = IRQ_PJ0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpj_pdata = { + .port_gpio_base = GPIO_PJ0, + .port_pin_base = GPIO_PJ0, + .port_width = 14, + .pint_id = -1, +}; + +static struct platform_device bfin_gpj_device = { + .name = ADI_GPIO_DEVNAME, + .id = 9, + .num_resources = ARRAY_SIZE(bfin_gpj_resources), + .resource = bfin_gpj_resources, + .dev = { + .platform_data = &bfin_gpj_pdata, /* Passed to driver */ + }, +}; + +#endif + static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) @@ -1066,7 +1502,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ - .chip_select = 1, /* SPI_SSEL1*/ + .chip_select = MAX_CTRL_CS + GPIO_PE4, /* SPI_SSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, @@ -1078,7 +1514,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 1, - .chip_select = 4, + .chip_select = MAX_CTRL_CS + GPIO_PG6, /* SPI_SSEL2 */ }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) @@ -1088,7 +1524,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .irq = IRQ_PB4, /* old boards (<=Rev 1.3) use IRQ_PJ11 */ .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, - .chip_select = 2, + .chip_select = MAX_CTRL_CS + GPIO_PE5, /* SPI_SSEL2 */ }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) @@ -1096,7 +1532,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, - .chip_select = 1, + .chip_select = MAX_CTRL_CS + GPIO_PE4, /* SPI_SSEL1 */ }, #endif #if defined(CONFIG_INPUT_ADXL34X_SPI) || defined(CONFIG_INPUT_ADXL34X_SPI_MODULE) @@ -1106,7 +1542,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .irq = IRQ_PC5, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 1, - .chip_select = 2, + .chip_select = MAX_CTRL_CS + GPIO_PG6, /* SPI_SSEL2 */ .mode = SPI_MODE_3, }, #endif @@ -1152,7 +1588,7 @@ static struct resource bfin_spi1_resource[] = { /* SPI controller data */ static struct bfin5xx_spi_master bf54x_spi_master_info0 = { - .num_chipselect = 4, + .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; @@ -1168,7 +1604,7 @@ static struct platform_device bf54x_spi_master0 = { }; static struct bfin5xx_spi_master bf54x_spi_master_info1 = { - .num_chipselect = 4, + .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0}, }; @@ -1508,6 +1944,23 @@ static struct platform_device bfin_ac97 = { static struct platform_device *ezkit_devices[] __initdata = { &bfin_dpmc, +#if defined(CONFIG_PINCTRL_ADI2) + &bfin_pinctrl_device, + &bfin_pint0_device, + &bfin_pint1_device, + &bfin_pint2_device, + &bfin_pint3_device, + &bfin_gpa_device, + &bfin_gpb_device, + &bfin_gpc_device, + &bfin_gpd_device, + &bfin_gpe_device, + &bfin_gpf_device, + &bfin_gpg_device, + &bfin_gph_device, + &bfin_gpi_device, + &bfin_gpj_device, +#endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, @@ -1644,10 +2097,66 @@ static struct platform_device *ezkit_devices[] __initdata = { #endif }; +/* Pin control settings */ +static struct pinctrl_map __initdata bfin_pinmux_map[] = { + /* per-device maps */ + PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.0", "pinctrl-adi2.0", NULL, "uart0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.1", "pinctrl-adi2.0", NULL, "uart1"), +#ifdef CONFIG_BFIN_UART1_CTSRTS + PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.1", "pinctrl-adi2.0", NULL, "uart1_ctsrts"), +#endif + PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.2", "pinctrl-adi2.0", NULL, "uart2"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.3", "pinctrl-adi2.0", NULL, "uart3"), +#ifdef CONFIG_BFIN_UART3_CTSRTS + PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.3", "pinctrl-adi2.0", NULL, "uart3_ctsrts"), +#endif + PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.0", "pinctrl-adi2.0", NULL, "uart0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.1", "pinctrl-adi2.0", NULL, "uart1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.2", "pinctrl-adi2.0", NULL, "uart2"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.3", "pinctrl-adi2.0", NULL, "uart3"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-sdh.0", "pinctrl-adi2.0", NULL, "rsi0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi.0", "pinctrl-adi2.0", NULL, "spi0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi.1", "pinctrl-adi2.0", NULL, "spi1"), + PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.0", "pinctrl-adi2.0", NULL, "twi0"), +#if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */ + PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.1", "pinctrl-adi2.0", NULL, "twi1"), +#endif + PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"), + PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.1", "pinctrl-adi2.0", NULL, "sport1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.1", "pinctrl-adi2.0", NULL, "sport1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.2", "pinctrl-adi2.0", NULL, "sport2"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.2", "pinctrl-adi2.0", NULL, "sport2"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.2", "pinctrl-adi2.0", NULL, "sport2"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.3", "pinctrl-adi2.0", NULL, "sport3"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.3", "pinctrl-adi2.0", NULL, "sport3"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.3", "pinctrl-adi2.0", NULL, "sport3"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-sport-uart.0", "pinctrl-adi2.0", NULL, "sport0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-sport-uart.1", "pinctrl-adi2.0", NULL, "sport1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-sport-uart.2", "pinctrl-adi2.0", NULL, "sport2"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-sport-uart.3", "pinctrl-adi2.0", NULL, "sport3"), + PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi"), +#ifdef CONFIG_BF548_ATAPI_ALTERNATIVE_PORT + PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"), +#endif + PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"), + PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"), +}; + static int __init ezkit_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); + /* Initialize pinmuxing */ + pinctrl_register_mappings(bfin_pinmux_map, + ARRAY_SIZE(bfin_pinmux_map)); + i2c_register_board_info(0, bfin_i2c_board_info0, ARRAY_SIZE(bfin_i2c_board_info0)); #if !defined(CONFIG_BF542) /* The BF542 only has 1 TWI */ @@ -1679,21 +2188,6 @@ static struct platform_device *ezkit_early_devices[] __initdata = { &bfin_uart3_device, #endif #endif - -#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) -#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART - &bfin_sport0_uart_device, -#endif -#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART - &bfin_sport1_uart_device, -#endif -#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART - &bfin_sport2_uart_device, -#endif -#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART - &bfin_sport3_uart_device, -#endif -#endif }; void __init native_machine_early_platform_add_devices(void) diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h index be9edb28f96..006da1edcf8 100644 --- a/arch/blackfin/mach-bf548/include/mach/gpio.h +++ b/arch/blackfin/mach-bf548/include/mach/gpio.h @@ -194,14 +194,6 @@ struct gpio_port_t { unsigned int port_mux; }; -struct gpio_port_s { - unsigned short fer; - unsigned short data; - unsigned short dir; - unsigned short inen; - unsigned int mux; -}; - #endif #include <mach-common/ports-a.h> diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h index 10dc142c518..cf7cb725cfa 100644 --- a/arch/blackfin/mach-bf548/include/mach/irq.h +++ b/arch/blackfin/mach-bf548/include/mach/irq.h @@ -433,7 +433,7 @@ #include <linux/types.h> /* - * bfin pint registers layout + * gpio pint registers layout */ struct bfin_pint_regs { u32 mask_set; diff --git a/arch/blackfin/mach-bf548/include/mach/portmux.h b/arch/blackfin/mach-bf548/include/mach/portmux.h index e2224620273..d9f8632d7d0 100644 --- a/arch/blackfin/mach-bf548/include/mach/portmux.h +++ b/arch/blackfin/mach-bf548/include/mach/portmux.h @@ -7,8 +7,6 @@ #ifndef _MACH_PORTMUX_H_ #define _MACH_PORTMUX_H_ -#define MAX_RESOURCES MAX_BLACKFIN_GPIOS - #define P_SPORT2_TFS (P_DEFINED | P_IDENT(GPIO_PA0) | P_FUNCT(0)) #define P_SPORT2_DTSEC (P_DEFINED | P_IDENT(GPIO_PA1) | P_FUNCT(0)) #define P_SPORT2_DTPRI (P_DEFINED | P_IDENT(GPIO_PA2) | P_FUNCT(0)) diff --git a/arch/blackfin/mach-bf609/Kconfig b/arch/blackfin/mach-bf609/Kconfig index 2bcbf94b1ed..b0fca44110b 100644 --- a/arch/blackfin/mach-bf609/Kconfig +++ b/arch/blackfin/mach-bf609/Kconfig @@ -9,48 +9,6 @@ source "arch/blackfin/mach-bf609/boards/Kconfig" menu "BF609 Specific Configuration" -comment "Pin Interrupt to Port Assignment" -menu "Assignment" - -config PINTx_REASSIGN - bool "Reprogram PINT Assignment" - default y - help - The interrupt assignment registers controls the pin-to-interrupt - assignment in a byte-wide manner. Each option allows you to select - a set of pins (High/Low Byte) of an specific Port being mapped - to one of the four PIN Interrupts IRQ_PINTx. - - You shouldn't change any of these unless you know exactly what you're doing. - Please consult the Blackfin BF60x Processor Hardware Reference Manual. - -config PINT0_ASSIGN - hex "PINT0_ASSIGN" - depends on PINTx_REASSIGN - default 0x00000101 -config PINT1_ASSIGN - hex "PINT1_ASSIGN" - depends on PINTx_REASSIGN - default 0x00000101 -config PINT2_ASSIGN - hex "PINT2_ASSIGN" - depends on PINTx_REASSIGN - default 0x00000101 -config PINT3_ASSIGN - hex "PINT3_ASSIGN" - depends on PINTx_REASSIGN - default 0x00000101 -config PINT4_ASSIGN - hex "PINT3_ASSIGN" - depends on PINTx_REASSIGN - default 0x00000101 -config PINT5_ASSIGN - hex "PINT3_ASSIGN" - depends on PINTx_REASSIGN - default 0x00000101 - -endmenu - config SEC_IRQ_PRIORITY_LEVELS int "SEC interrupt priority levels" default 7 diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index d56a55ad83a..82beedd953f 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c @@ -17,6 +17,9 @@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> +#include <linux/pinctrl/machine.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/platform_data/pinctrl-adi2.h> #include <asm/bfin_spi3.h> #include <asm/dma.h> #include <asm/gpio.h> @@ -106,8 +109,6 @@ static struct platform_device bfin_rotary_device = { #include <linux/stmmac.h> #include <linux/phy.h> -static unsigned short pins[] = P_RMII0; - static struct stmmac_mdio_bus_data phy_private_data = { .phy_mask = 1, }; @@ -212,6 +213,18 @@ static struct resource bfin_uart0_resources[] = { .end = UART0_RXDIV+4, .flags = IORESOURCE_MEM, }, +#ifdef CONFIG_EARLY_PRINTK + { + .start = PORTD_FER, + .end = PORTD_FER+2, + .flags = IORESOURCE_REG, + }, + { + .start = PORTD_MUX, + .end = PORTD_MUX+3, + .flags = IORESOURCE_REG, + }, +#endif { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, @@ -276,6 +289,13 @@ static struct resource bfin_uart1_resources[] = { .end = UART1_RXDIV+4, .flags = IORESOURCE_MEM, }, +#ifdef CONFIG_EARLY_PRINTK + { + .start = PORTG_FER_SET, + .end = PORTG_FER_SET+2, + .flags = IORESOURCE_REG, + }, +#endif { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, @@ -674,17 +694,12 @@ static struct mtd_partition ezkit_partitions[] = { }, }; -int bf609_nor_flash_init(struct platform_device *dev) +int bf609_nor_flash_init(struct platform_device *pdev) { #define CONFIG_SMC_GCTL_VAL 0x00000010 - const unsigned short pins[] = { - P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12, - P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21, - P_A22, P_A23, P_A24, P_A25, P_NORCK, 0, - }; - - peripheral_request_list(pins, "smc0"); + if (!devm_pinctrl_get_select_default(&pdev->dev)) + return -EBUSY; bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); bfin_write32(SMC_B0CTL, 0x01002011); bfin_write32(SMC_B0TIM, 0x08170977); @@ -692,16 +707,9 @@ int bf609_nor_flash_init(struct platform_device *dev) return 0; } -void bf609_nor_flash_exit(struct platform_device *dev) +void bf609_nor_flash_exit(struct platform_device *pdev) { - const unsigned short pins[] = { - P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12, - P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21, - P_A22, P_A23, P_A24, P_A25, P_NORCK, 0, - }; - - peripheral_free_list(pins); - + devm_pinctrl_put(pdev->dev.pins->p); bfin_write32(SMC_GCTL, 0); } @@ -1319,6 +1327,356 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = { }; #endif +#ifdef CONFIG_PINCTRL_ADI2 + +# define ADI_PINT_DEVNAME "adi-gpio-pint" +# define ADI_GPIO_DEVNAME "adi-gpio" +# define ADI_PINCTRL_DEVNAME "pinctrl-adi2" + +static struct platform_device bfin_pinctrl_device = { + .name = ADI_PINCTRL_DEVNAME, + .id = 0, +}; + +static struct resource bfin_pint0_resources[] = { + { + .start = PINT0_MASK_SET, + .end = PINT0_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT0, + .end = IRQ_PINT0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint0_device = { + .name = ADI_PINT_DEVNAME, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_pint0_resources), + .resource = bfin_pint0_resources, +}; + +static struct resource bfin_pint1_resources[] = { + { + .start = PINT1_MASK_SET, + .end = PINT1_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT1, + .end = IRQ_PINT1, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint1_device = { + .name = ADI_PINT_DEVNAME, + .id = 1, + .num_resources = ARRAY_SIZE(bfin_pint1_resources), + .resource = bfin_pint1_resources, +}; + +static struct resource bfin_pint2_resources[] = { + { + .start = PINT2_MASK_SET, + .end = PINT2_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT2, + .end = IRQ_PINT2, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint2_device = { + .name = ADI_PINT_DEVNAME, + .id = 2, + .num_resources = ARRAY_SIZE(bfin_pint2_resources), + .resource = bfin_pint2_resources, +}; + +static struct resource bfin_pint3_resources[] = { + { + .start = PINT3_MASK_SET, + .end = PINT3_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT3, + .end = IRQ_PINT3, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint3_device = { + .name = ADI_PINT_DEVNAME, + .id = 3, + .num_resources = ARRAY_SIZE(bfin_pint3_resources), + .resource = bfin_pint3_resources, +}; + +static struct resource bfin_pint4_resources[] = { + { + .start = PINT4_MASK_SET, + .end = PINT4_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT4, + .end = IRQ_PINT4, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint4_device = { + .name = ADI_PINT_DEVNAME, + .id = 4, + .num_resources = ARRAY_SIZE(bfin_pint4_resources), + .resource = bfin_pint4_resources, +}; + +static struct resource bfin_pint5_resources[] = { + { + .start = PINT5_MASK_SET, + .end = PINT5_LATCH + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PINT5, + .end = IRQ_PINT5, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_pint5_device = { + .name = ADI_PINT_DEVNAME, + .id = 5, + .num_resources = ARRAY_SIZE(bfin_pint5_resources), + .resource = bfin_pint5_resources, +}; + +static struct resource bfin_gpa_resources[] = { + { + .start = PORTA_FER, + .end = PORTA_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { /* optional */ + .start = IRQ_PA0, + .end = IRQ_PA0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpa_pdata = { + .port_pin_base = GPIO_PA0, + .port_width = GPIO_BANKSIZE, + .pint_id = 0, /* PINT0 */ + .pint_assign = true, /* PINT upper 16 bit */ + .pint_map = 0, /* mapping mask in PINT */ +}; + +static struct platform_device bfin_gpa_device = { + .name = ADI_GPIO_DEVNAME, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_gpa_resources), + .resource = bfin_gpa_resources, + .dev = { + .platform_data = &bfin_gpa_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpb_resources[] = { + { + .start = PORTB_FER, + .end = PORTB_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PB0, + .end = IRQ_PB0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpb_pdata = { + .port_pin_base = GPIO_PB0, + .port_width = GPIO_BANKSIZE, + .pint_id = 0, + .pint_assign = false, + .pint_map = 1, +}; + +static struct platform_device bfin_gpb_device = { + .name = ADI_GPIO_DEVNAME, + .id = 1, + .num_resources = ARRAY_SIZE(bfin_gpb_resources), + .resource = bfin_gpb_resources, + .dev = { + .platform_data = &bfin_gpb_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpc_resources[] = { + { + .start = PORTC_FER, + .end = PORTC_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PC0, + .end = IRQ_PC0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpc_pdata = { + .port_pin_base = GPIO_PC0, + .port_width = GPIO_BANKSIZE, + .pint_id = 1, + .pint_assign = false, + .pint_map = 1, +}; + +static struct platform_device bfin_gpc_device = { + .name = ADI_GPIO_DEVNAME, + .id = 2, + .num_resources = ARRAY_SIZE(bfin_gpc_resources), + .resource = bfin_gpc_resources, + .dev = { + .platform_data = &bfin_gpc_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpd_resources[] = { + { + .start = PORTD_FER, + .end = PORTD_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PD0, + .end = IRQ_PD0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpd_pdata = { + .port_pin_base = GPIO_PD0, + .port_width = GPIO_BANKSIZE, + .pint_id = 2, + .pint_assign = false, + .pint_map = 1, +}; + +static struct platform_device bfin_gpd_device = { + .name = ADI_GPIO_DEVNAME, + .id = 3, + .num_resources = ARRAY_SIZE(bfin_gpd_resources), + .resource = bfin_gpd_resources, + .dev = { + .platform_data = &bfin_gpd_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpe_resources[] = { + { + .start = PORTE_FER, + .end = PORTE_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PE0, + .end = IRQ_PE0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpe_pdata = { + .port_pin_base = GPIO_PE0, + .port_width = GPIO_BANKSIZE, + .pint_id = 3, + .pint_assign = false, + .pint_map = 1, +}; + +static struct platform_device bfin_gpe_device = { + .name = ADI_GPIO_DEVNAME, + .id = 4, + .num_resources = ARRAY_SIZE(bfin_gpe_resources), + .resource = bfin_gpe_resources, + .dev = { + .platform_data = &bfin_gpe_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpf_resources[] = { + { + .start = PORTF_FER, + .end = PORTF_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PF0, + .end = IRQ_PF0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpf_pdata = { + .port_pin_base = GPIO_PF0, + .port_width = GPIO_BANKSIZE, + .pint_id = 4, + .pint_assign = false, + .pint_map = 1, +}; + +static struct platform_device bfin_gpf_device = { + .name = ADI_GPIO_DEVNAME, + .id = 5, + .num_resources = ARRAY_SIZE(bfin_gpf_resources), + .resource = bfin_gpf_resources, + .dev = { + .platform_data = &bfin_gpf_pdata, /* Passed to driver */ + }, +}; + +static struct resource bfin_gpg_resources[] = { + { + .start = PORTG_FER, + .end = PORTG_MUX + 3, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_PG0, + .end = IRQ_PG0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct adi_pinctrl_gpio_platform_data bfin_gpg_pdata = { + .port_pin_base = GPIO_PG0, + .port_width = GPIO_BANKSIZE, + .pint_id = 5, + .pint_assign = false, + .pint_map = 1, +}; + +static struct platform_device bfin_gpg_device = { + .name = ADI_GPIO_DEVNAME, + .id = 6, + .num_resources = ARRAY_SIZE(bfin_gpg_resources), + .resource = bfin_gpg_resources, + .dev = { + .platform_data = &bfin_gpg_pdata, /* Passed to driver */ + }, +}; + +#endif + #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/input.h> #include <linux/gpio_keys.h> @@ -1349,7 +1707,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ - .chip_select = 1, /* SPI_SSEL1*/ + .chip_select = MAX_CTRL_CS + GPIO_PD11, /* SPI_SSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, @@ -1362,7 +1720,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .irq = IRQ_PD9, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, - .chip_select = 4, + .chip_select = MAX_CTRL_CS + GPIO_PC15, /* SPI_SSEL4 */ }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) @@ -1370,7 +1728,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, - .chip_select = 1, + .chip_select = MAX_CTRL_CS + GPIO_PD11, /* SPI_SSEL1*/ .controller_data = &spidev_chip_info, }, #endif @@ -1565,6 +1923,22 @@ static struct platform_device bfin_dpmc = { static struct platform_device *ezkit_devices[] __initdata = { &bfin_dpmc, +#if defined(CONFIG_PINCTRL_ADI2) + &bfin_pinctrl_device, + &bfin_pint0_device, + &bfin_pint1_device, + &bfin_pint2_device, + &bfin_pint3_device, + &bfin_pint4_device, + &bfin_pint5_device, + &bfin_gpa_device, + &bfin_gpb_device, + &bfin_gpc_device, + &bfin_gpd_device, + &bfin_gpe_device, + &bfin_gpf_device, + &bfin_gpg_device, +#endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, @@ -1681,20 +2055,52 @@ static struct platform_device *ezkit_devices[] __initdata = { }; +/* Pin control settings */ +static struct pinctrl_map __initdata bfin_pinmux_map[] = { + /* per-device maps */ + PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.0", "pinctrl-adi2.0", NULL, "uart0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-uart.1", "pinctrl-adi2.0", NULL, "uart1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.0", "pinctrl-adi2.0", NULL, "uart0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_sir.1", "pinctrl-adi2.0", NULL, "uart1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-sdh.0", "pinctrl-adi2.0", NULL, "rsi0"), + PIN_MAP_MUX_GROUP_DEFAULT("stmmaceth.0", "pinctrl-adi2.0", NULL, "eth0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi3.0", "pinctrl-adi2.0", NULL, "spi0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-spi3.1", "pinctrl-adi2.0", NULL, "spi1"), + PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.0", "pinctrl-adi2.0", NULL, "twi0"), + PIN_MAP_MUX_GROUP_DEFAULT("i2c-bfin-twi.1", "pinctrl-adi2.0", NULL, "twi1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), + PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"), + PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), +#if defined(CONFIG_VIDEO_MT9M114) || defined(CONFIG_VIDEO_MT9M114_MODULE) + PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"), +#elif defined(CONFIG_VIDEO_VS6624) || defined(CONFIG_VIDEO_VS6624_MODULE) + PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), +#else + PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"), +#endif + PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.1", "pinctrl-adi2.0", NULL, "sport1"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.2", "pinctrl-adi2.0", NULL, "sport2"), + PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.2", "pinctrl-adi2.0", NULL, "sport2"), +}; + static int __init ezkit_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); + /* Initialize pinmuxing */ + pinctrl_register_mappings(bfin_pinmux_map, + ARRAY_SIZE(bfin_pinmux_map)); + i2c_register_board_info(0, bfin_i2c_board_info0, ARRAY_SIZE(bfin_i2c_board_info0)); i2c_register_board_info(1, bfin_i2c_board_info1, ARRAY_SIZE(bfin_i2c_board_info1)); -#if defined(CONFIG_STMMAC_ETH) || defined(CONFIG_STMMAC_ETH_MODULE) - if (!peripheral_request_list(pins, "emac0")) - printk(KERN_ERR "%s(): request emac pins failed\n", __func__); -#endif - platform_add_devices(ezkit_devices, ARRAY_SIZE(ezkit_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); @@ -1713,18 +2119,6 @@ static struct platform_device *ezkit_early_devices[] __initdata = { &bfin_uart1_device, #endif #endif - -#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) -#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART - &bfin_sport0_uart_device, -#endif -#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART - &bfin_sport1_uart_device, -#endif -#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART - &bfin_sport2_uart_device, -#endif -#endif }; void __init native_machine_early_platform_add_devices(void) diff --git a/arch/blackfin/mach-bf609/include/mach/gpio.h b/arch/blackfin/mach-bf609/include/mach/gpio.h index c32c8cc8db2..07182513e79 100644 --- a/arch/blackfin/mach-bf609/include/mach/gpio.h +++ b/arch/blackfin/mach-bf609/include/mach/gpio.h @@ -152,14 +152,6 @@ struct gpio_port_t { unsigned long revid; }; -struct gpio_port_s { - unsigned short fer; - unsigned short data; - unsigned short dir; - unsigned short inen; - unsigned int mux; -}; - #endif #include <mach-common/ports-a.h> diff --git a/arch/blackfin/mach-bf609/include/mach/irq.h b/arch/blackfin/mach-bf609/include/mach/irq.h index fa0843d5d77..d1cb6a86f80 100644 --- a/arch/blackfin/mach-bf609/include/mach/irq.h +++ b/arch/blackfin/mach-bf609/include/mach/irq.h @@ -298,7 +298,7 @@ extern u8 sec_int_priority[]; /* - * bfin pint registers layout + * gpio pint registers layout */ struct bfin_pint_regs { u32 mask_set; diff --git a/arch/blackfin/mach-bf609/include/mach/portmux.h b/arch/blackfin/mach-bf609/include/mach/portmux.h index 2e1a51c2509..c48bb71a55c 100644 --- a/arch/blackfin/mach-bf609/include/mach/portmux.h +++ b/arch/blackfin/mach-bf609/include/mach/portmux.h @@ -7,8 +7,6 @@ #ifndef _MACH_PORTMUX_H_ #define _MACH_PORTMUX_H_ -#define MAX_RESOURCES MAX_BLACKFIN_GPIOS - /* EMAC RMII Port Mux */ #define P_MII0_MDC (P_DEFINED | P_IDENT(GPIO_PC6) | P_FUNCT(0)) #define P_MII0_MDIO (P_DEFINED | P_IDENT(GPIO_PC7) | P_FUNCT(0)) @@ -21,6 +19,7 @@ #define P_MII0_CRS (P_DEFINED | P_IDENT(GPIO_PC5) | P_FUNCT(0)) #define P_MII0_ERxER (P_DEFINED | P_IDENT(GPIO_PC4) | P_FUNCT(0)) #define P_MII0_TxCLK (P_DEFINED | P_IDENT(GPIO_PB14) | P_FUNCT(0)) +#define P_MII0_PTPPPS (P_DEFINED | P_IDENT(GPIO_PB15) | P_FUNCT(0)) #define P_RMII0 {\ P_MII0_ETxD0, \ @@ -32,6 +31,7 @@ P_MII0_TxCLK, \ P_MII0_PHYINT, \ P_MII0_CRS, \ + P_MII0_PTPPPS, \ P_MII0_MDC, \ P_MII0_MDIO, 0} @@ -46,6 +46,7 @@ #define P_MII1_CRS (P_DEFINED | P_IDENT(GPIO_PE13) | P_FUNCT(0)) #define P_MII1_ERxER (P_DEFINED | P_IDENT(GPIO_PE14) | P_FUNCT(0)) #define P_MII1_TxCLK (P_DEFINED | P_IDENT(GPIO_PG6) | P_FUNCT(0)) +#define P_MII1_PTPPPS (P_DEFINED | P_IDENT(GPIO_PC9) | P_FUNCT(0)) #define P_RMII1 {\ P_MII1_ETxD0, \ @@ -57,6 +58,7 @@ P_MII1_TxCLK, \ P_MII1_PHYINT, \ P_MII1_CRS, \ + P_MII1_PTPPPS, \ P_MII1_MDC, \ P_MII1_MDIO, 0} diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index d143fd8d2bc..ca75613231c 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -704,10 +704,9 @@ static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) __irq_set_handler_locked(irq, handle); } -static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); -extern void bfin_gpio_irq_prepare(unsigned gpio); +#ifdef CONFIG_GPIO_ADI -#if !BFIN_GPIO_PINT +static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); static void bfin_gpio_ack_irq(struct irq_data *d) { @@ -821,15 +820,6 @@ static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -#ifdef CONFIG_PM -static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) -{ - return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state); -} -#else -# define bfin_gpio_set_wake NULL -#endif - static void bfin_demux_gpio_block(unsigned int irq) { unsigned int gpio, mask; @@ -896,279 +886,40 @@ void bfin_demux_gpio_irq(unsigned int inta_irq, bfin_demux_gpio_block(irq); } -#else - -#define NR_PINT_BITS 32 -#define IRQ_NOT_AVAIL 0xFF - -#define PINT_2_BANK(x) ((x) >> 5) -#define PINT_2_BIT(x) ((x) & 0x1F) -#define PINT_BIT(x) (1 << (PINT_2_BIT(x))) - -static unsigned char irq2pint_lut[NR_PINTS]; -static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS]; - -static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = { - (struct bfin_pint_regs *)PINT0_MASK_SET, - (struct bfin_pint_regs *)PINT1_MASK_SET, - (struct bfin_pint_regs *)PINT2_MASK_SET, - (struct bfin_pint_regs *)PINT3_MASK_SET, -#ifdef CONFIG_BF60x - (struct bfin_pint_regs *)PINT4_MASK_SET, - (struct bfin_pint_regs *)PINT5_MASK_SET, -#endif -}; - -inline unsigned int get_irq_base(u32 bank, u8 bmap) -{ - unsigned int irq_base; - -#ifndef CONFIG_BF60x - if (bank < 2) { /*PA-PB */ - irq_base = IRQ_PA0 + bmap * 16; - } else { /*PC-PJ */ - irq_base = IRQ_PC0 + bmap * 16; - } -#else - irq_base = IRQ_PA0 + bank * 16 + bmap * 16; -#endif - return irq_base; -} - - /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ -void init_pint_lut(void) -{ - u16 bank, bit, irq_base, bit_pos; - u32 pint_assign; - u8 bmap; - - memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut)); - - for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) { - - pint_assign = pint[bank]->assign; - - for (bit = 0; bit < NR_PINT_BITS; bit++) { - - bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF; - - irq_base = get_irq_base(bank, bmap); - - irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0); - bit_pos = bit + bank * NR_PINT_BITS; - - pint2irq_lut[bit_pos] = irq_base - SYS_IRQS; - irq2pint_lut[irq_base - SYS_IRQS] = bit_pos; - } - } -} - -static void bfin_gpio_ack_irq(struct irq_data *d) -{ - u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; - u32 pintbit = PINT_BIT(pint_val); - u32 bank = PINT_2_BANK(pint_val); - - if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { - if (pint[bank]->invert_set & pintbit) - pint[bank]->invert_clear = pintbit; - else - pint[bank]->invert_set = pintbit; - } - pint[bank]->request = pintbit; - -} - -static void bfin_gpio_mask_ack_irq(struct irq_data *d) -{ - u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; - u32 pintbit = PINT_BIT(pint_val); - u32 bank = PINT_2_BANK(pint_val); - - if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { - if (pint[bank]->invert_set & pintbit) - pint[bank]->invert_clear = pintbit; - else - pint[bank]->invert_set = pintbit; - } - - pint[bank]->request = pintbit; - pint[bank]->mask_clear = pintbit; -} - -static void bfin_gpio_mask_irq(struct irq_data *d) -{ - u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; - - pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val); -} - -static void bfin_gpio_unmask_irq(struct irq_data *d) -{ - u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; - u32 pintbit = PINT_BIT(pint_val); - u32 bank = PINT_2_BANK(pint_val); - - pint[bank]->mask_set = pintbit; -} - -static unsigned int bfin_gpio_irq_startup(struct irq_data *d) -{ - unsigned int irq = d->irq; - u32 gpionr = irq_to_gpio(irq); - u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; - - if (pint_val == IRQ_NOT_AVAIL) { - printk(KERN_ERR - "GPIO IRQ %d :Not in PINT Assign table " - "Reconfigure Interrupt to Port Assignemt\n", irq); - return -ENODEV; - } - - if (__test_and_set_bit(gpionr, gpio_enabled)) - bfin_gpio_irq_prepare(gpionr); - - bfin_gpio_unmask_irq(d); - - return 0; -} - -static void bfin_gpio_irq_shutdown(struct irq_data *d) -{ - u32 gpionr = irq_to_gpio(d->irq); - - bfin_gpio_mask_irq(d); - __clear_bit(gpionr, gpio_enabled); - bfin_gpio_irq_free(gpionr); -} - -static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type) -{ - unsigned int irq = d->irq; - int ret; - char buf[16]; - u32 gpionr = irq_to_gpio(irq); - u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; - u32 pintbit = PINT_BIT(pint_val); - u32 bank = PINT_2_BANK(pint_val); - - if (pint_val == IRQ_NOT_AVAIL) - return -ENODEV; - - if (type == IRQ_TYPE_PROBE) { - /* only probe unenabled GPIO interrupt lines */ - if (test_bit(gpionr, gpio_enabled)) - return 0; - type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; - } - - if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | - IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { - - snprintf(buf, 16, "gpio-irq%d", irq); - ret = bfin_gpio_irq_request(gpionr, buf); - if (ret) - return ret; - - if (__test_and_set_bit(gpionr, gpio_enabled)) - bfin_gpio_irq_prepare(gpionr); - - } else { - __clear_bit(gpionr, gpio_enabled); - return 0; - } - - if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW))) - pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */ - else - pint[bank]->invert_clear = pintbit; /* high or rising edge denoted by zero */ - - if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) - == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { - if (gpio_get_value(gpionr)) - pint[bank]->invert_set = pintbit; - else - pint[bank]->invert_clear = pintbit; - } - - if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { - pint[bank]->edge_set = pintbit; - bfin_set_irq_handler(irq, handle_edge_irq); - } else { - pint[bank]->edge_clear = pintbit; - bfin_set_irq_handler(irq, handle_level_irq); - } - - return 0; -} - #ifdef CONFIG_PM -static struct bfin_pm_pint_save save_pint_reg[NR_PINT_SYS_IRQS]; -static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS]; static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) { - u32 pint_irq; - u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; - u32 bank = PINT_2_BANK(pint_val); - - switch (bank) { - case 0: - pint_irq = IRQ_PINT0; - break; - case 2: - pint_irq = IRQ_PINT2; - break; - case 3: - pint_irq = IRQ_PINT3; - break; - case 1: - pint_irq = IRQ_PINT1; - break; -#ifdef CONFIG_BF60x - case 4: - pint_irq = IRQ_PINT4; - break; - case 5: - pint_irq = IRQ_PINT5; - break; -#endif - default: - return -EINVAL; - } + return bfin_gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state); +} -#ifndef SEC_GCTL - bfin_internal_set_wake(pint_irq, state); -#endif +#else - return 0; -} +# define bfin_gpio_set_wake NULL -void bfin_pint_suspend(void) -{ - u32 bank; +#endif - for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) { - save_pint_reg[bank].mask_set = pint[bank]->mask_set; - save_pint_reg[bank].assign = pint[bank]->assign; - save_pint_reg[bank].edge_set = pint[bank]->edge_set; - save_pint_reg[bank].invert_set = pint[bank]->invert_set; - } -} +static struct irq_chip bfin_gpio_irqchip = { + .name = "GPIO", + .irq_ack = bfin_gpio_ack_irq, + .irq_mask = bfin_gpio_mask_irq, + .irq_mask_ack = bfin_gpio_mask_ack_irq, + .irq_unmask = bfin_gpio_unmask_irq, + .irq_disable = bfin_gpio_mask_irq, + .irq_enable = bfin_gpio_unmask_irq, + .irq_set_type = bfin_gpio_irq_type, + .irq_startup = bfin_gpio_irq_startup, + .irq_shutdown = bfin_gpio_irq_shutdown, + .irq_set_wake = bfin_gpio_set_wake, +}; -void bfin_pint_resume(void) -{ - u32 bank; +#endif - for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) { - pint[bank]->mask_set = save_pint_reg[bank].mask_set; - pint[bank]->assign = save_pint_reg[bank].assign; - pint[bank]->edge_set = save_pint_reg[bank].edge_set; - pint[bank]->invert_set = save_pint_reg[bank].invert_set; - } -} +#ifdef CONFIG_PM #ifdef SEC_GCTL +static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS]; + static int sec_suspend(void) { u32 bank; @@ -1195,92 +946,10 @@ static struct syscore_ops sec_pm_syscore_ops = { .suspend = sec_suspend, .resume = sec_resume, }; - -#endif -#else -# define bfin_gpio_set_wake NULL -#endif - -void bfin_demux_gpio_irq(unsigned int inta_irq, - struct irq_desc *desc) -{ - u32 bank, pint_val; - u32 request, irq; - u32 level_mask; - int umask = 0; - struct irq_chip *chip = irq_desc_get_chip(desc); - - if (chip->irq_mask_ack) { - chip->irq_mask_ack(&desc->irq_data); - } else { - chip->irq_mask(&desc->irq_data); - if (chip->irq_ack) - chip->irq_ack(&desc->irq_data); - } - - switch (inta_irq) { - case IRQ_PINT0: - bank = 0; - break; - case IRQ_PINT2: - bank = 2; - break; - case IRQ_PINT3: - bank = 3; - break; - case IRQ_PINT1: - bank = 1; - break; -#ifdef CONFIG_BF60x - case IRQ_PINT4: - bank = 4; - break; - case IRQ_PINT5: - bank = 5; - break; #endif - default: - return; - } - - pint_val = bank * NR_PINT_BITS; - - request = pint[bank]->request; - - level_mask = pint[bank]->edge_set & request; - - while (request) { - if (request & 1) { - irq = pint2irq_lut[pint_val] + SYS_IRQS; - if (level_mask & PINT_BIT(pint_val)) { - umask = 1; - chip->irq_unmask(&desc->irq_data); - } - bfin_handle_irq(irq); - } - pint_val++; - request >>= 1; - } - if (!umask) - chip->irq_unmask(&desc->irq_data); -} #endif -static struct irq_chip bfin_gpio_irqchip = { - .name = "GPIO", - .irq_ack = bfin_gpio_ack_irq, - .irq_mask = bfin_gpio_mask_irq, - .irq_mask_ack = bfin_gpio_mask_ack_irq, - .irq_unmask = bfin_gpio_unmask_irq, - .irq_disable = bfin_gpio_mask_irq, - .irq_enable = bfin_gpio_unmask_irq, - .irq_set_type = bfin_gpio_irq_type, - .irq_startup = bfin_gpio_irq_startup, - .irq_shutdown = bfin_gpio_irq_shutdown, - .irq_set_wake = bfin_gpio_set_wake, -}; - void init_exception_vectors(void) { /* cannot program in software: @@ -1331,17 +1000,6 @@ int __init init_arch_irq(void) local_irq_disable(); -#if BFIN_GPIO_PINT -# ifdef CONFIG_PINTx_REASSIGN - pint[0]->assign = CONFIG_PINT0_ASSIGN; - pint[1]->assign = CONFIG_PINT1_ASSIGN; - pint[2]->assign = CONFIG_PINT2_ASSIGN; - pint[3]->assign = CONFIG_PINT3_ASSIGN; -# endif - /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ - init_pint_lut(); -#endif - for (irq = 0; irq <= SYS_IRQS; irq++) { if (irq <= IRQ_CORETMR) irq_set_chip(irq, &bfin_core_irqchip); @@ -1349,12 +1007,8 @@ int __init init_arch_irq(void) irq_set_chip(irq, &bfin_internal_irqchip); switch (irq) { -#if BFIN_GPIO_PINT - case IRQ_PINT0: - case IRQ_PINT1: - case IRQ_PINT2: - case IRQ_PINT3: -#elif defined(BF537_FAMILY) +#if !BFIN_GPIO_PINT +#if defined(BF537_FAMILY) case IRQ_PH_INTA_MAC_RX: case IRQ_PF_INTA_PG_INTA: #elif defined(BF533_FAMILY) @@ -1372,6 +1026,7 @@ int __init init_arch_irq(void) #endif irq_set_chained_handler(irq, bfin_demux_gpio_irq); break; +#endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) case IRQ_MAC_ERROR: irq_set_chained_handler(irq, @@ -1419,10 +1074,12 @@ int __init init_arch_irq(void) handle_level_irq); #endif /* if configured as edge, then will be changed to do_edge_IRQ */ +#ifdef CONFIG_GPIO_ADI for (irq = GPIO_IRQ_BASE; irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) irq_set_chip_and_handler(irq, &bfin_gpio_irqchip, handle_level_irq); +#endif bfin_write_IMASK(0); CSYNC(); ilat = bfin_read_ILAT(); @@ -1525,19 +1182,6 @@ int __init init_arch_irq(void) local_irq_disable(); -#if BFIN_GPIO_PINT -# ifdef CONFIG_PINTx_REASSIGN - pint[0]->assign = CONFIG_PINT0_ASSIGN; - pint[1]->assign = CONFIG_PINT1_ASSIGN; - pint[2]->assign = CONFIG_PINT2_ASSIGN; - pint[3]->assign = CONFIG_PINT3_ASSIGN; - pint[4]->assign = CONFIG_PINT4_ASSIGN; - pint[5]->assign = CONFIG_PINT5_ASSIGN; -# endif - /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ - init_pint_lut(); -#endif - for (irq = 0; irq <= SYS_IRQS; irq++) { if (irq <= IRQ_CORETMR) { irq_set_chip_and_handler(irq, &bfin_core_irqchip, @@ -1546,9 +1190,6 @@ int __init init_arch_irq(void) if (irq == IRQ_CORETMR) irq_set_handler(irq, handle_percpu_irq); #endif - } else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) { - irq_set_chip(irq, &bfin_sec_irqchip); - irq_set_chained_handler(irq, bfin_demux_gpio_irq); } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) { irq_set_chip_and_handler(irq, &bfin_sec_irqchip, handle_percpu_irq); @@ -1563,10 +1204,6 @@ int __init init_arch_irq(void) __irq_set_preflow_handler(irq, bfin_sec_preflow_handler); } } - for (irq = GPIO_IRQ_BASE; - irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) - irq_set_chip_and_handler(irq, &bfin_gpio_irqchip, - handle_level_irq); bfin_write_IMASK(0); CSYNC(); diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c index 87bfe549ad3..1387a94bcfd 100644 --- a/arch/blackfin/mach-common/pm.c +++ b/arch/blackfin/mach-common/pm.c @@ -27,7 +27,7 @@ struct bfin_cpu_pm_fns *bfin_cpu_pm; void bfin_pm_suspend_standby_enter(void) { -#ifndef CONFIG_BF60x +#if !BFIN_GPIO_PINT bfin_pm_standby_setup(); #endif @@ -41,7 +41,7 @@ void bfin_pm_suspend_standby_enter(void) # endif #endif -#ifndef CONFIG_BF60x +#if !BFIN_GPIO_PINT bfin_pm_standby_restore(); #endif @@ -128,6 +128,7 @@ static void flushinv_all_dcache(void) if ((status & 0x3) != 0x3) continue; + /* construct the address using the tag */ addr = (status & 0xFFFFC800) | (subbank << 12) | (set << 5); @@ -140,11 +141,14 @@ static void flushinv_all_dcache(void) int bfin_pm_suspend_mem_enter(void) { - int wakeup, ret; + int ret; +#ifndef CONFIG_BF60x + int wakeup; +#endif unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH + L1_DATA_B_LENGTH + L1_SCRATCH_LENGTH, - GFP_KERNEL); + GFP_ATOMIC); if (memptr == NULL) { panic("bf53x_suspend_l1_mem malloc failed"); @@ -170,10 +174,8 @@ int bfin_pm_suspend_mem_enter(void) return ret; } +#ifdef CONFIG_GPIO_ADI bfin_gpio_pm_hibernate_suspend(); - -#if BFIN_GPIO_PINT - bfin_pint_suspend(); #endif #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK) @@ -194,11 +196,9 @@ int bfin_pm_suspend_mem_enter(void) _enable_icplb(); _enable_dcplb(); -#if BFIN_GPIO_PINT - bfin_pint_resume(); -#endif - +#ifdef CONFIG_GPIO_ADI bfin_gpio_pm_hibernate_restore(); +#endif blackfin_dma_resume(); kfree(memptr); diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 82f301c117a..2bbae078381 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -146,6 +146,7 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) platform_clear_ipi(cpu, IRQ_SUPPLE_1); + smp_rmb(); bfin_ipi_data = &__get_cpu_var(bfin_ipi); while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) { msg = 0; @@ -161,18 +162,20 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) case BFIN_IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; - case BFIN_IPI_CALL_FUNC_SINGLE: generic_smp_call_function_single_interrupt(); break; - case BFIN_IPI_CPU_STOP: ipi_cpu_stop(cpu); break; + default: + goto out; } atomic_dec(&bfin_ipi_data->count); } while (msg < BITS_PER_LONG); + } +out: return IRQ_HANDLED; } @@ -198,10 +201,11 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) bfin_ipi_data = &per_cpu(bfin_ipi, cpu); atomic_set_mask((1 << msg), &bfin_ipi_data->bits); atomic_inc(&bfin_ipi_data->count); - platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); } - local_irq_restore(flags); + smp_wmb(); + for_each_cpu(cpu, cpumask) + platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); } void arch_send_call_function_single_ipi(int cpu) diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 957dd00ea56..77ea09b8bce 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -36,9 +36,6 @@ config GENERIC_HWEIGHT config GENERIC_BUG def_bool y -config COMMON_CLKDEV - def_bool y - config C6X_BIG_KERNEL bool "Build a big kernel" help @@ -105,10 +102,6 @@ menu "Processor type and features" source "arch/c6x/platforms/Kconfig" -config TMS320C6X_CACHES_ON - bool "L2 cache support" - default y - config KERNEL_RAM_BASE_ADDRESS hex "Virtual address of memory base" default 0xe0000000 if SOC_TMS320C6455 diff --git a/arch/c6x/include/asm/prom.h b/arch/c6x/include/asm/prom.h deleted file mode 100644 index b4ec95f0751..00000000000 --- a/arch/c6x/include/asm/prom.h +++ /dev/null @@ -1 +0,0 @@ -/* dummy prom.h; here to make linux/of.h's #includes happy */ diff --git a/arch/c6x/include/asm/setup.h b/arch/c6x/include/asm/setup.h index ecead15872a..696804475f5 100644 --- a/arch/c6x/include/asm/setup.h +++ b/arch/c6x/include/asm/setup.h @@ -14,8 +14,6 @@ #include <uapi/asm/setup.h> #ifndef __ASSEMBLY__ -extern char c6x_command_line[COMMAND_LINE_SIZE]; - extern int c6x_add_memory(phys_addr_t start, unsigned long size); extern unsigned long ram_start; diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c index 9e15ab9199b..fa3e5741514 100644 --- a/arch/c6x/kernel/devicetree.c +++ b/arch/c6x/kernel/devicetree.c @@ -10,37 +10,8 @@ * */ #include <linux/init.h> -#include <linux/of.h> -#include <linux/of_fdt.h> -#include <linux/initrd.h> #include <linux/memblock.h> -void __init early_init_devtree(void *params) -{ - /* Setup flat device-tree pointer */ - initial_boot_params = params; - - /* Retrieve various informations from the /chosen node of the - * device-tree, including the platform type, initrd location and - * size and more ... - */ - of_scan_flat_dt(early_init_dt_scan_chosen, c6x_command_line); - - /* Scan memory nodes and rebuild MEMBLOCKs */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); - of_scan_flat_dt(early_init_dt_scan_memory, NULL); -} - - -#ifdef CONFIG_BLK_DEV_INITRD -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - initrd_start = (unsigned long)__va(start); - initrd_end = (unsigned long)__va(end); - initrd_below_start_ok = 1; -} -#endif - void __init early_init_dt_add_memory_arch(u64 base, u64 size) { c6x_add_memory(base, size); diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c index f4e72bd8c10..731db4b9014 100644 --- a/arch/c6x/kernel/setup.c +++ b/arch/c6x/kernel/setup.c @@ -68,13 +68,6 @@ unsigned long ram_end; static unsigned long dma_start __initdata; static unsigned long dma_size __initdata; -char c6x_command_line[COMMAND_LINE_SIZE]; - -#if defined(CONFIG_CMDLINE_BOOL) -static const char default_command_line[COMMAND_LINE_SIZE] __section(.cmdline) = - CONFIG_CMDLINE; -#endif - struct cpuinfo_c6x { const char *cpu_name; const char *cpu_voltage; @@ -294,10 +287,8 @@ notrace void __init machine_init(unsigned long dt_ptr) fdt = dtb; /* Do some early initialization based on the flat device tree */ - early_init_devtree(fdt); + early_init_dt_scan(fdt); - /* parse_early_param needs a boot_command_line */ - strlcpy(boot_command_line, c6x_command_line, COMMAND_LINE_SIZE); parse_early_param(); } @@ -309,7 +300,7 @@ void __init setup_arch(char **cmdline_p) printk(KERN_INFO "Initializing kernel\n"); /* Initialize command line */ - *cmdline_p = c6x_command_line; + *cmdline_p = boot_command_line; memory_end = ram_end; memory_end &= ~(PAGE_SIZE - 1); diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S index 279d8072512..5a6e141d164 100644 --- a/arch/c6x/kernel/vmlinux.lds.S +++ b/arch/c6x/kernel/vmlinux.lds.S @@ -37,12 +37,6 @@ SECTIONS _vectors_end = .; } - . = ALIGN(0x1000); - .cmdline : - { - *(.cmdline) - } - /* * This section contains data which may be shared with other * cores. It needs to be a fixed offset from PAGE_OFFSET diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 02380bed189..9c957c81c68 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -130,13 +130,11 @@ config SVINTO_SIM config ETRAXFS bool "ETRAX-FS-V32" - select CPU_FREQ_TABLE if CPU_FREQ help Support CRIS V32. config CRIS_MACH_ARTPEC3 bool "ARTPEC-3" - select CPU_FREQ_TABLE if CPU_FREQ help Support Axis ARTPEC-3. diff --git a/arch/cris/include/asm/io.h b/arch/cris/include/asm/io.h index 5d3047e5563..4353cf239a1 100644 --- a/arch/cris/include/asm/io.h +++ b/arch/cris/include/asm/io.h @@ -3,6 +3,7 @@ #include <asm/page.h> /* for __va, __pa */ #include <arch/io.h> +#include <asm-generic/iomap.h> #include <linux/kernel.h> struct cris_io_operations diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h index 146da904cdd..f666734926d 100644 --- a/arch/cris/include/asm/pci.h +++ b/arch/cris/include/asm/pci.h @@ -11,7 +11,6 @@ #define pcibios_assign_all_busses(void) 1 -extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h index 6da975db112..235ece437dd 100644 --- a/arch/cris/include/asm/pgalloc.h +++ b/arch/cris/include/asm/pgalloc.h @@ -32,7 +32,12 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres { struct page *pte; pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); - pgtable_page_ctor(pte); + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h index eb723e51554..13829aaaeec 100644 --- a/arch/cris/include/uapi/asm/socket.h +++ b/arch/cris/include/uapi/asm/socket.h @@ -78,6 +78,8 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h index f0cb1c34116..5d429976242 100644 --- a/arch/frv/include/uapi/asm/socket.h +++ b/arch/frv/include/uapi/asm/socket.h @@ -76,5 +76,7 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/frv/mb93090-mb00/pci-frv.h b/arch/frv/mb93090-mb00/pci-frv.h index 76c4e73d643..a7e487fe76e 100644 --- a/arch/frv/mb93090-mb00/pci-frv.h +++ b/arch/frv/mb93090-mb00/pci-frv.h @@ -30,7 +30,6 @@ void pcibios_resource_survey(void); /* pci-vdk.c */ -extern int __nongpreldata pcibios_last_bus; extern struct pci_ops *__nongpreldata pci_root_ops; /* pci-irq.c */ diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c index deb67843693..efa5d65b000 100644 --- a/arch/frv/mb93090-mb00/pci-vdk.c +++ b/arch/frv/mb93090-mb00/pci-vdk.c @@ -25,7 +25,6 @@ unsigned int __nongpreldata pci_probe = 1; -int __nongpreldata pcibios_last_bus = -1; struct pci_ops *__nongpreldata pci_root_ops; /* @@ -220,37 +219,6 @@ static struct pci_ops * __init pci_check_direct(void) } /* - * Discover remaining PCI buses in case there are peer host bridges. - * We use the number of last PCI bus provided by the PCI BIOS. - */ -static void __init pcibios_fixup_peer_bridges(void) -{ - struct pci_bus bus; - struct pci_dev dev; - int n; - u16 l; - - if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff) - return; - printk("PCI: Peer bridge fixup\n"); - for (n=0; n <= pcibios_last_bus; n++) { - if (pci_find_bus(0, n)) - continue; - bus.number = n; - bus.ops = pci_root_ops; - dev.bus = &bus; - for(dev.devfn=0; dev.devfn<256; dev.devfn += 8) - if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) && - l != 0x0000 && l != 0xffff) { - printk("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l); - printk("PCI: Discovered peer bus %02x\n", n); - pci_scan_bus(n, pci_root_ops, NULL); - break; - } - } -} - -/* * Exceptions for specific devices. Usually work-arounds for fatal design flaws. */ @@ -418,7 +386,6 @@ int __init pcibios_init(void) pci_scan_root_bus(NULL, 0, pci_root_ops, NULL, &resources); pcibios_irq_init(); - pcibios_fixup_peer_bridges(); pcibios_fixup_irqs(); pcibios_resource_survey(); @@ -432,9 +399,6 @@ char * __init pcibios_setup(char *str) if (!strcmp(str, "off")) { pci_probe = 0; return NULL; - } else if (!strncmp(str, "lastbus=", 8)) { - pcibios_last_bus = simple_strtol(str+8, NULL, 0); - return NULL; } return str; } diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c index f6084bc524e..41907d25ed3 100644 --- a/arch/frv/mm/pgalloc.c +++ b/arch/frv/mm/pgalloc.c @@ -37,11 +37,15 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) #else page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); #endif - if (page) { - clear_highpage(page); - pgtable_page_ctor(page); - flush_dcache_page(page); + if (!page) + return NULL; + + clear_highpage(page); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; } + flush_dcache_page(page); return page; } diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig deleted file mode 100644 index 24b1dc2564f..00000000000 --- a/arch/h8300/Kconfig +++ /dev/null @@ -1,108 +0,0 @@ -config H8300 - bool - default y - select HAVE_IDE - select GENERIC_ATOMIC64 - select HAVE_UID16 - select VIRT_TO_BUS - select ARCH_WANT_IPC_PARSE_VERSION - select GENERIC_IRQ_SHOW - select GENERIC_CPU_DEVICES - select MODULES_USE_ELF_RELA - select OLD_SIGSUSPEND3 - select OLD_SIGACTION - select HAVE_UNDERSCORE_SYMBOL_PREFIX - -config MMU - bool - default n - -config SWAP - bool - default n - -config ZONE_DMA - bool - default y - -config FPU - bool - default n - -config RWSEM_GENERIC_SPINLOCK - bool - default y - -config RWSEM_XCHGADD_ALGORITHM - bool - default n - -config ARCH_HAS_ILOG2_U32 - bool - default n - -config ARCH_HAS_ILOG2_U64 - bool - default n - -config GENERIC_HWEIGHT - bool - default y - -config GENERIC_CALIBRATE_DELAY - bool - default y - -config GENERIC_BUG - bool - depends on BUG - -config TIME_LOW_RES - bool - default y - -config NO_IOPORT - def_bool y - -config NO_DMA - def_bool y - -config ISA - bool - default y - -config PCI - bool - default n - -config HZ - int - default 100 - -source "init/Kconfig" - -source "kernel/Kconfig.freezer" - -source "arch/h8300/Kconfig.cpu" - -menu "Executable file formats" - -source "fs/Kconfig.binfmt" - -endmenu - -source "net/Kconfig" - -source "drivers/Kconfig" - -source "arch/h8300/Kconfig.ide" - -source "fs/Kconfig" - -source "arch/h8300/Kconfig.debug" - -source "security/Kconfig" - -source "crypto/Kconfig" - -source "lib/Kconfig" diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu deleted file mode 100644 index cdee771460e..00000000000 --- a/arch/h8300/Kconfig.cpu +++ /dev/null @@ -1,171 +0,0 @@ -menu "Processor type and features" - -choice - prompt "H8/300 platform" - default H8300H_GENERIC - -config H8300H_GENERIC - bool "H8/300H Generic" - help - H8/300H CPU Generic Hardware Support - -config H8300H_AKI3068NET - bool "AE-3068/69" - select H83068 - help - AKI-H8/3068F / AKI-H8/3069F Flashmicom LAN Board Support - More Information. (Japanese Only) - <http://akizukidenshi.com/catalog/default.aspx> - AE-3068/69 Evaluation Board Support - More Information. - <http://www.microtronique.com/ae3069lan.htm> - -config H8300H_H8MAX - bool "H8MAX" - select H83068 - help - H8MAX Evaluation Board Support - More Information. (Japanese Only) - <http://strawberry-linux.com/h8/index.html> - -config H8300H_SIM - bool "H8/300H Simulator" - select H83007 - help - GDB Simulator Support - More Information. - <http://sourceware.org/sid/> - -config H8S_GENERIC - bool "H8S Generic" - help - H8S CPU Generic Hardware Support - -config H8S_EDOSK2674 - bool "EDOSK-2674" - select H8S2678 - help - Renesas EDOSK-2674 Evaluation Board Support - More Information. - <http://www.azpower.com/H8-uClinux/index.html> - <http://www.renesas.eu/products/tools/introductory_evaluation_tools/evaluation_development_os_kits/edosk2674r/edosk2674r_software_tools_root.jsp> - -config H8S_SIM - bool "H8S Simulator" - help - GDB Simulator Support - More Information. - <http://sourceware.org/sid/> - -endchoice - -choice - prompt "CPU Selection" - -config H83002 - bool "H8/3001,3002,3003" - depends on BROKEN - select CPU_H8300H - -config H83007 - bool "H8/3006,3007" - select CPU_H8300H - -config H83048 - bool "H8/3044,3045,3046,3047,3048,3052" - depends on BROKEN - select CPU_H8300H - -config H83068 - bool "H8/3065,3066,3067,3068,3069" - select CPU_H8300H - -config H8S2678 - bool "H8S/2670,2673,2674R,2675,2676" - select CPU_H8S - -endchoice - -config CPU_CLOCK - int "CPU Clock Frequency (/1KHz)" - default "20000" - help - CPU Clock Frequency divide to 1000 - -choice - prompt "Kernel executes from" - ---help--- - Choose the memory type that the kernel will be running in. - -config RAMKERNEL - bool "RAM" - help - The kernel will be resident in RAM when running. - -config ROMKERNEL - bool "ROM" - help - The kernel will be resident in FLASH/ROM when running. -endchoice - - -config CPU_H8300H - bool - depends on (H83002 || H83007 || H83048 || H83068) - default y - -config CPU_H8S - bool - depends on H8S2678 - default y - -choice - prompt "Timer" -config H8300_TIMER8 - bool "8bit timer (2ch cascade)" - depends on (H83007 || H83068 || H8S2678) - -config H8300_TIMER16 - bool "16bit timer" - depends on (H83007 || H83068) - -config H8300_ITU - bool "ITU" - depends on (H83002 || H83048) - -config H8300_TPU - bool "TPU" - depends on H8S2678 -endchoice - -if H8300_TIMER8 -choice - prompt "Timer Channel" -config H8300_TIMER8_CH0 - bool "Channel 0" -config H8300_TIMER8_CH2 - bool "Channel 2" - depends on CPU_H8300H -endchoice -endif - -config H8300_TIMER16_CH - int "16bit timer channel (0 - 2)" - depends on H8300_TIMER16 - range 0 2 - -config H8300_ITU_CH - int "ITU channel" - depends on H8300_ITU - range 0 4 - -config H8300_TPU_CH - int "TPU channel" - depends on H8300_TPU - range 0 4 - -source "kernel/Kconfig.preempt" - -source "mm/Kconfig" - -endmenu diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug deleted file mode 100644 index e8d1b236ad8..00000000000 --- a/arch/h8300/Kconfig.debug +++ /dev/null @@ -1,68 +0,0 @@ -menu "Kernel hacking" - -source "lib/Kconfig.debug" - -config FULLDEBUG - bool "Full Symbolic/Source Debugging support" - help - Enable debugging symbols on kernel build. - -config HIGHPROFILE - bool "Use fast second timer for profiling" - help - Use a fast secondary clock to produce profiling information. - -config NO_KERNEL_MSG - bool "Suppress Kernel BUG Messages" - help - Do not output any debug BUG messages within the kernel. - -config GDB_MAGICPRINT - bool "Message Output for GDB MagicPrint service" - depends on (H8300H_SIM || H8S_SIM) - help - kernel messages output using MagicPrint service from GDB - -config SYSCALL_PRINT - bool "SystemCall trace print" - help - output history of systemcall - -config GDB_DEBUG - bool "Use gdb stub" - depends on (!H8300H_SIM && !H8S_SIM) - help - gdb stub exception support - -config SH_STANDARD_BIOS - bool "Use gdb protocol serial console" - depends on (!H8300H_SIM && !H8S_SIM) - help - serial console output using GDB protocol. - Require eCos/RedBoot - -config DEFAULT_CMDLINE - bool "Use builtin commandline" - default n - help - builtin kernel commandline enabled. - -config KERNEL_COMMAND - string "Buildin command string" - depends on DEFAULT_CMDLINE - help - builtin kernel commandline strings. - -config BLKDEV_RESERVE - bool "BLKDEV Reserved Memory" - default n - help - Reserved BLKDEV area. - -config BLKDEV_RESERVE_ADDRESS - hex 'start address' - depends on BLKDEV_RESERVE - help - BLKDEV start address. - -endmenu diff --git a/arch/h8300/Kconfig.ide b/arch/h8300/Kconfig.ide deleted file mode 100644 index a38a63054ac..00000000000 --- a/arch/h8300/Kconfig.ide +++ /dev/null @@ -1,44 +0,0 @@ -# uClinux H8/300 Target Board Selection Menu (IDE) - -if (H8300H_AKI3068NET) -menu "IDE Extra configuration" - -config H8300_IDE_BASE - hex "IDE register base address" - depends on IDE - default 0 - help - IDE registers base address - -config H8300_IDE_ALT - hex "IDE register alternate address" - depends on IDE - default 0 - help - IDE alternate registers address - -config H8300_IDE_IRQ - int "IDE IRQ no" - depends on IDE - default 0 - help - IDE use IRQ no -endmenu -endif - -if (H8300H_H8MAX) -config H8300_IDE_BASE - hex - depends on IDE - default 0x200000 - -config H8300_IDE_ALT - hex - depends on IDE - default 0x60000c - -config H8300_IDE_IRQ - int - depends on IDE - default 5 -endif diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile deleted file mode 100644 index a556447877b..00000000000 --- a/arch/h8300/Makefile +++ /dev/null @@ -1,71 +0,0 @@ -# -# arch/h8300/Makefile -# -# This file is subject to the terms and conditions of the GNU General Public -# License. See the file "COPYING" in the main directory of this archive -# for more details. -# -# (C) Copyright 2002,2003 Yoshinori Sato <ysato@users.sourceforge.jp> -# - -platform-$(CONFIG_CPU_H8300H) := h8300h -platform-$(CONFIG_CPU_H8S) := h8s -PLATFORM := $(platform-y) - -board-$(CONFIG_H8300H_GENERIC) := generic -board-$(CONFIG_H8300H_AKI3068NET) := aki3068net -board-$(CONFIG_H8300H_H8MAX) := h8max -board-$(CONFIG_H8300H_SIM) := generic -board-$(CONFIG_H8S_GENERIC) := generic -board-$(CONFIG_H8S_EDOSK2674) := edosk2674 -board-$(CONFIG_H8S_SIM) := generic -BOARD := $(board-y) - -model-$(CONFIG_RAMKERNEL) := ram -model-$(CONFIG_ROMKERNEL) := rom -MODEL := $(model-y) - -cflags-$(CONFIG_CPU_H8300H) := -mh -ldflags-$(CONFIG_CPU_H8300H) := -mh8300helf -cflags-$(CONFIG_CPU_H8S) := -ms -ldflags-$(CONFIG_CPU_H8S) := -mh8300self - -KBUILD_CFLAGS += $(cflags-y) -KBUILD_CFLAGS += -mint32 -fno-builtin -KBUILD_CFLAGS += -g -KBUILD_CFLAGS += -D__linux__ -KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\" -KBUILD_AFLAGS += -DPLATFORM=$(PLATFORM) -DMODEL=$(MODEL) $(cflags-y) -LDFLAGS += $(ldflags-y) - -CROSS_COMPILE = h8300-elf- -LIBGCC := $(shell $(CROSS-COMPILE)$(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) - -head-y := arch/$(ARCH)/platform/$(PLATFORM)/$(BOARD)/crt0_$(MODEL).o - -core-y += arch/$(ARCH)/kernel/ \ - arch/$(ARCH)/mm/ -ifdef PLATFORM -core-y += arch/$(ARCH)/platform/$(PLATFORM)/ \ - arch/$(ARCH)/platform/$(PLATFORM)/$(BOARD)/ -endif - -libs-y += arch/$(ARCH)/lib/ $(LIBGCC) - -boot := arch/h8300/boot - -export MODEL PLATFORM BOARD - -archmrproper: - -archclean: - $(Q)$(MAKE) $(clean)=$(boot) - -vmlinux.srec vmlinux.bin zImage: vmlinux - $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ - -define archhelp - @echo 'vmlinux.bin - Create raw binary' - @echo 'vmlinux.srec - Create srec binary' - @echo 'zImage - Compressed kernel image' -endef diff --git a/arch/h8300/README b/arch/h8300/README deleted file mode 100644 index efa805fda19..00000000000 --- a/arch/h8300/README +++ /dev/null @@ -1,38 +0,0 @@ -linux-2.6 for H8/300 README -Yoshinori Sato <ysato@users.sourceforge.jp> - -* Supported CPU -H8/300H and H8S - -* Supported Target -1.simulator of GDB - require patches. - -2.AE 3068/AE 3069 - more information - MICROTRONIQUE <http://www.microtronique.com/> - Akizuki Denshi Tsusho Ltd. <http://akizukidenshi.com/> (Japanese Only) - -3.H8MAX - see http://ip-sol.jp/h8max/ (Japanese Only) - -4.EDOSK2674 - see http://www.eu.renesas.com/products/mpumcu/tool/edk/support/edosk2674.html - http://www.uclinux.org/pub/uClinux/ports/h8/HITACHI-EDOSK2674-HOWTO - http://www.azpower.com/H8-uClinux/ - -* Toolchain Version -gcc-3.1 or higher and patch -see arch/h8300/tools_patch/README -binutils-2.12 or higher -gdb-5.2 or higher -The environment that can compile a h8300-elf binary is necessary. - -* Userland Develop environment -used h8300-elf toolchains. -see http://www.uclinux.org/pub/uClinux/ports/h8/ - -* A few words of thanks -Porting to H8/300 serieses is support of Information-technology Promotion Agency, Japan. -I thank support. -and All developer/user. diff --git a/arch/h8300/boot/Makefile b/arch/h8300/boot/Makefile deleted file mode 100644 index 0bb62e064ee..00000000000 --- a/arch/h8300/boot/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -# arch/h8300/boot/Makefile - -targets := vmlinux.srec vmlinux.bin zImage -subdir- := compressed - -OBJCOPYFLAGS_vmlinux.srec := -Osrec -OBJCOPYFLAGS_vmlinux.bin := -Obinary -OBJCOPYFLAGS_zImage := -O binary -R .note -R .comment -R .stab -R .stabstr -S - -$(obj)/vmlinux.srec $(obj)/vmlinux.bin: vmlinux FORCE - $(call if_changed,objcopy) - @echo ' Kernel: $@ is ready' - -$(obj)/zImage: $(obj)/compressed/vmlinux FORCE - $(call if_changed,objcopy) - @echo 'Kernel: $@ is ready' - -$(obj)/compressed/vmlinux: FORCE - $(Q)$(MAKE) $(build)=$(obj)/compressed $@ - -CLEAN_FILES += arch/$(ARCH)/vmlinux.bin arch/$(ARCH)/vmlinux.srec - diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile deleted file mode 100644 index a6c98fe3bbc..00000000000 --- a/arch/h8300/boot/compressed/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# -# linux/arch/sh/boot/compressed/Makefile -# -# create a compressed vmlinux image from the original vmlinux -# - -targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o -asflags-y := -traditional - -OBJECTS = $(obj)/head.o $(obj)/misc.o - -# -# IMAGE_OFFSET is the load offset of the compression loader -# Assign dummy values if these 2 variables are not defined, -# in order to suppress error message. -# -CONFIG_MEMORY_START ?= 0x00400000 -CONFIG_BOOT_LINK_OFFSET ?= 0x00140000 -IMAGE_OFFSET := $(shell printf "0x%08x" $$(($(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET)))) - -LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -estartup $(obj)/vmlinux.lds - -$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE - $(call if_changed,ld) - @: - -$(obj)/vmlinux.bin: vmlinux FORCE - $(call if_changed,objcopy) - -$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE - $(call if_changed,gzip) - -LDFLAGS_piggy.o := -r --format binary --oformat elf32-h8300 -T -OBJCOPYFLAGS := -O binary - -$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE - $(call if_changed,ld) diff --git a/arch/h8300/boot/compressed/head.S b/arch/h8300/boot/compressed/head.S deleted file mode 100644 index 10e9a2d1cc6..00000000000 --- a/arch/h8300/boot/compressed/head.S +++ /dev/null @@ -1,47 +0,0 @@ -/* - * linux/arch/h8300/boot/compressed/head.S - * - * Copyright (C) 2006 Yoshinori Sato - */ - - .h8300h -#include <linux/linkage.h> - -#define SRAM_START 0xff4000 - - .section .text..startup - .global startup -startup: - mov.l #SRAM_START+0x8000, sp - mov.l #__sbss, er0 - mov.l #__ebss, er1 - sub.l er0, er1 - shlr er1 - shlr er1 - sub.l er2, er2 -1: - mov.l er2, @er0 - adds #4, er0 - dec.l #1, er1 - bne 1b - jsr @_decompress_kernel - jmp @0x400000 - - .align 9 -fake_headers_as_bzImage: - .word 0 - .ascii "HdrS" ; header signature - .word 0x0202 ; header version number (>= 0x0105) - ; or else old loadlin-1.5 will fail) - .word 0 ; default_switch - .word 0 ; SETUPSEG - .word 0x1000 - .word 0 ; pointing to kernel version string - .byte 0 ; = 0, old one (LILO, Loadlin, - ; 0xTV: T=0 for LILO - ; V = version - .byte 1 ; Load flags bzImage=1 - .word 0x8000 ; size to move, when setup is not - .long 0x100000 ; 0x100000 = default for big kernel - .long 0 ; address of loaded ramdisk image - .long 0 ; its size in bytes diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c deleted file mode 100644 index 4a1e3dd4394..00000000000 --- a/arch/h8300/boot/compressed/misc.c +++ /dev/null @@ -1,180 +0,0 @@ -/* - * arch/h8300/boot/compressed/misc.c - * - * This is a collection of several routines from gzip-1.0.3 - * adapted for Linux. - * - * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 - * - * Adapted for h8300 by Yoshinori Sato 2006 - */ - -#include <asm/uaccess.h> - -/* - * gzip declarations - */ - -#define OF(args) args -#define STATIC static - -#undef memset -#undef memcpy -#define memzero(s, n) memset ((s), 0, (n)) - -typedef unsigned char uch; -typedef unsigned short ush; -typedef unsigned long ulg; - -#define WSIZE 0x8000 /* Window size must be at least 32k, */ - /* and a power of two */ - -static uch *inbuf; /* input buffer */ -static uch window[WSIZE]; /* Sliding window buffer */ - -static unsigned insize = 0; /* valid bytes in inbuf */ -static unsigned inptr = 0; /* index of next byte to be processed in inbuf */ -static unsigned outcnt = 0; /* bytes in output buffer */ - -/* gzip flag byte */ -#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ -#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ -#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ -#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ -#define COMMENT 0x10 /* bit 4 set: file comment present */ -#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ -#define RESERVED 0xC0 /* bit 6,7: reserved */ - -#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) - -/* Diagnostic functions */ -#ifdef DEBUG -# define Assert(cond,msg) {if(!(cond)) error(msg);} -# define Trace(x) fprintf x -# define Tracev(x) {if (verbose) fprintf x ;} -# define Tracevv(x) {if (verbose>1) fprintf x ;} -# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} -# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} -#else -# define Assert(cond,msg) -# define Trace(x) -# define Tracev(x) -# define Tracevv(x) -# define Tracec(c,x) -# define Tracecv(c,x) -#endif - -static int fill_inbuf(void); -static void flush_window(void); -static void error(char *m); - -extern char input_data[]; -extern int input_len; - -static long bytes_out = 0; -static uch *output_data; -static unsigned long output_ptr = 0; - -static void error(char *m); - -int puts(const char *); - -extern int _end; -static unsigned long free_mem_ptr; -static unsigned long free_mem_end_ptr; - -#define HEAP_SIZE 0x10000 - -#include "../../../../lib/inflate.c" - -#define SCR *((volatile unsigned char *)0xffff8a) -#define TDR *((volatile unsigned char *)0xffff8b) -#define SSR *((volatile unsigned char *)0xffff8c) - -int puts(const char *s) -{ - return 0; -} - -void* memset(void* s, int c, size_t n) -{ - int i; - char *ss = (char*)s; - - for (i=0;i<n;i++) ss[i] = c; - return s; -} - -void* memcpy(void* __dest, __const void* __src, - size_t __n) -{ - int i; - char *d = (char *)__dest, *s = (char *)__src; - - for (i=0;i<__n;i++) d[i] = s[i]; - return __dest; -} - -/* =========================================================================== - * Fill the input buffer. This is called only when the buffer is empty - * and at least one byte is really needed. - */ -static int fill_inbuf(void) -{ - if (insize != 0) { - error("ran out of input data"); - } - - inbuf = input_data; - insize = input_len; - inptr = 1; - return inbuf[0]; -} - -/* =========================================================================== - * Write the output window window[0..outcnt-1] and update crc and bytes_out. - * (Used for the decompressed data only.) - */ -static void flush_window(void) -{ - ulg c = crc; /* temporary variable */ - unsigned n; - uch *in, *out, ch; - - in = window; - out = &output_data[output_ptr]; - for (n = 0; n < outcnt; n++) { - ch = *out++ = *in++; - c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); - } - crc = c; - bytes_out += (ulg)outcnt; - output_ptr += (ulg)outcnt; - outcnt = 0; -} - -static void error(char *x) -{ - puts("\n\n"); - puts(x); - puts("\n\n -- System halted"); - - while(1); /* Halt */ -} - -#define STACK_SIZE (4096) -long user_stack [STACK_SIZE]; -long* stack_start = &user_stack[STACK_SIZE]; - -void decompress_kernel(void) -{ - output_data = 0; - output_ptr = (unsigned long)0x400000; - free_mem_ptr = (unsigned long)&_end; - free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - - makecrc(); - puts("Uncompressing Linux... "); - gunzip(); - puts("Ok, booting the kernel.\n"); -} diff --git a/arch/h8300/boot/compressed/vmlinux.lds b/arch/h8300/boot/compressed/vmlinux.lds deleted file mode 100644 index a0a3a0ed54e..00000000000 --- a/arch/h8300/boot/compressed/vmlinux.lds +++ /dev/null @@ -1,32 +0,0 @@ -SECTIONS -{ - .text : - { - __stext = . ; - __text = .; - *(.text..startup) - *(.text) - __etext = . ; - } - - .rodata : - { - *(.rodata) - } - .data : - - { - __sdata = . ; - ___data_start = . ; - *(.data.*) - } - .bss : - { - . = ALIGN(0x4) ; - __sbss = . ; - *(.bss*) - . = ALIGN(0x4) ; - __ebss = . ; - __end = . ; - } -} diff --git a/arch/h8300/boot/compressed/vmlinux.scr b/arch/h8300/boot/compressed/vmlinux.scr deleted file mode 100644 index a0f6962736e..00000000000 --- a/arch/h8300/boot/compressed/vmlinux.scr +++ /dev/null @@ -1,9 +0,0 @@ -SECTIONS -{ - .data : { - _input_len = .; - LONG(_input_data_end - _input_data) _input_data = .; - *(.data) - _input_data_end = .; - } -} diff --git a/arch/h8300/defconfig b/arch/h8300/defconfig deleted file mode 100644 index 042425a0264..00000000000 --- a/arch/h8300/defconfig +++ /dev/null @@ -1,42 +0,0 @@ -CONFIG_EXPERIMENTAL=y -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_LOG_BUF_SHIFT=14 -CONFIG_EXPERT=y -# CONFIG_UID16 is not set -# CONFIG_SYSCTL_SYSCALL is not set -# CONFIG_KALLSYMS is not set -# CONFIG_HOTPLUG is not set -# CONFIG_BASE_FULL is not set -# CONFIG_FUTEX is not set -# CONFIG_EPOLL is not set -# CONFIG_SIGNALFD is not set -# CONFIG_TIMERFD is not set -# CONFIG_EVENTFD is not set -# CONFIG_VM_EVENT_COUNTERS is not set -# CONFIG_COMPAT_BRK is not set -CONFIG_SLOB=y -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set -CONFIG_H83007=y -CONFIG_BINFMT_FLAT=y -CONFIG_BINFMT_ZFLAT=y -CONFIG_BINFMT_MISC=y -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_REDBOOT_PARTS=y -CONFIG_MTD_CHAR=y -CONFIG_MTD_RAM=y -CONFIG_MTD_ROM=y -CONFIG_MTD_UCLINUX=y -# CONFIG_BLK_DEV is not set -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_HWMON is not set -# CONFIG_USB_SUPPORT is not set -# CONFIG_DNOTIFY is not set -CONFIG_ROMFS_FS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set -# CONFIG_ENABLE_MUST_CHECK is not set -# CONFIG_CRC32 is not set diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild deleted file mode 100644 index 7e0e7213a48..00000000000 --- a/arch/h8300/include/asm/Kbuild +++ /dev/null @@ -1,9 +0,0 @@ - -generic-y += clkdev.h -generic-y += exec.h -generic-y += linkage.h -generic-y += mmu.h -generic-y += module.h -generic-y += trace_clock.h -generic-y += xor.h -generic-y += preempt.h diff --git a/arch/h8300/include/asm/asm-offsets.h b/arch/h8300/include/asm/asm-offsets.h deleted file mode 100644 index d370ee36a18..00000000000 --- a/arch/h8300/include/asm/asm-offsets.h +++ /dev/null @@ -1 +0,0 @@ -#include <generated/asm-offsets.h> diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h deleted file mode 100644 index 40901e353c2..00000000000 --- a/arch/h8300/include/asm/atomic.h +++ /dev/null @@ -1,146 +0,0 @@ -#ifndef __ARCH_H8300_ATOMIC__ -#define __ARCH_H8300_ATOMIC__ - -#include <linux/types.h> -#include <asm/cmpxchg.h> - -/* - * Atomic operations that C can't guarantee us. Useful for - * resource counting etc.. - */ - -#define ATOMIC_INIT(i) { (i) } - -#define atomic_read(v) (*(volatile int *)&(v)->counter) -#define atomic_set(v, i) (((v)->counter) = i) - -#include <linux/kernel.h> - -static __inline__ int atomic_add_return(int i, atomic_t *v) -{ - unsigned long flags; - int ret; - local_irq_save(flags); - ret = v->counter += i; - local_irq_restore(flags); - return ret; -} - -#define atomic_add(i, v) atomic_add_return(i, v) -#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) - -static __inline__ int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long flags; - int ret; - local_irq_save(flags); - ret = v->counter -= i; - local_irq_restore(flags); - return ret; -} - -#define atomic_sub(i, v) atomic_sub_return(i, v) -#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0) - -static __inline__ int atomic_inc_return(atomic_t *v) -{ - unsigned long flags; - int ret; - local_irq_save(flags); - v->counter++; - ret = v->counter; - local_irq_restore(flags); - return ret; -} - -#define atomic_inc(v) atomic_inc_return(v) - -/* - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) - -static __inline__ int atomic_dec_return(atomic_t *v) -{ - unsigned long flags; - int ret; - local_irq_save(flags); - --v->counter; - ret = v->counter; - local_irq_restore(flags); - return ret; -} - -#define atomic_dec(v) atomic_dec_return(v) - -static __inline__ int atomic_dec_and_test(atomic_t *v) -{ - unsigned long flags; - int ret; - local_irq_save(flags); - --v->counter; - ret = v->counter; - local_irq_restore(flags); - return ret == 0; -} - -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -{ - int ret; - unsigned long flags; - - local_irq_save(flags); - ret = v->counter; - if (likely(ret == old)) - v->counter = new; - local_irq_restore(flags); - return ret; -} - -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int ret; - unsigned long flags; - - local_irq_save(flags); - ret = v->counter; - if (ret != u) - v->counter += a; - local_irq_restore(flags); - return ret; -} - -static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("stc ccr,r1l\n\t" - "orc #0x80,ccr\n\t" - "mov.l %0,er0\n\t" - "and.l %1,er0\n\t" - "mov.l er0,%0\n\t" - "ldc r1l,ccr" - : "=m" (*v) : "g" (~(mask)) :"er0","er1"); -} - -static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) -{ - __asm__ __volatile__("stc ccr,r1l\n\t" - "orc #0x80,ccr\n\t" - "mov.l %0,er0\n\t" - "or.l %1,er0\n\t" - "mov.l er0,%0\n\t" - "ldc r1l,ccr" - : "=m" (*v) : "g" (mask) :"er0","er1"); -} - -/* Atomic operations are already serializing */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - -#endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/arch/h8300/include/asm/barrier.h b/arch/h8300/include/asm/barrier.h deleted file mode 100644 index 9e0aa9fc195..00000000000 --- a/arch/h8300/include/asm/barrier.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _H8300_BARRIER_H -#define _H8300_BARRIER_H - -#define nop() asm volatile ("nop"::) - -/* - * Force strict CPU ordering. - * Not really required on H8... - */ -#define mb() asm volatile ("" : : :"memory") -#define rmb() asm volatile ("" : : :"memory") -#define wmb() asm volatile ("" : : :"memory") -#define set_mb(var, value) do { xchg(&var, value); } while (0) - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#endif /* _H8300_BARRIER_H */ diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h deleted file mode 100644 index eb34e0cd33d..00000000000 --- a/arch/h8300/include/asm/bitops.h +++ /dev/null @@ -1,211 +0,0 @@ -#ifndef _H8300_BITOPS_H -#define _H8300_BITOPS_H - -/* - * Copyright 1992, Linus Torvalds. - * Copyright 2002, Yoshinori Sato - */ - -#include <linux/compiler.h> - -#ifdef __KERNEL__ - -#ifndef _LINUX_BITOPS_H -#error only <linux/bitops.h> can be included directly -#endif - -/* - * Function prototypes to keep gcc -Wall happy - */ - -/* - * ffz = Find First Zero in word. Undefined if no zero exists, - * so code should check against ~0UL first.. - */ -static __inline__ unsigned long ffz(unsigned long word) -{ - unsigned long result; - - result = -1; - __asm__("1:\n\t" - "shlr.l %2\n\t" - "adds #1,%0\n\t" - "bcs 1b" - : "=r" (result) - : "0" (result),"r" (word)); - return result; -} - -#define H8300_GEN_BITOP_CONST(OP,BIT) \ - case BIT: \ - __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \ - break; - -#define H8300_GEN_BITOP(FNAME,OP) \ -static __inline__ void FNAME(int nr, volatile unsigned long* addr) \ -{ \ - volatile unsigned char *b_addr; \ - b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ - if (__builtin_constant_p(nr)) { \ - switch(nr & 7) { \ - H8300_GEN_BITOP_CONST(OP,0) \ - H8300_GEN_BITOP_CONST(OP,1) \ - H8300_GEN_BITOP_CONST(OP,2) \ - H8300_GEN_BITOP_CONST(OP,3) \ - H8300_GEN_BITOP_CONST(OP,4) \ - H8300_GEN_BITOP_CONST(OP,5) \ - H8300_GEN_BITOP_CONST(OP,6) \ - H8300_GEN_BITOP_CONST(OP,7) \ - } \ - } else { \ - __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \ - } \ -} - -/* - * clear_bit() doesn't provide any barrier for the compiler. - */ -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - -H8300_GEN_BITOP(set_bit ,"bset") -H8300_GEN_BITOP(clear_bit ,"bclr") -H8300_GEN_BITOP(change_bit,"bnot") -#define __set_bit(nr,addr) set_bit((nr),(addr)) -#define __clear_bit(nr,addr) clear_bit((nr),(addr)) -#define __change_bit(nr,addr) change_bit((nr),(addr)) - -#undef H8300_GEN_BITOP -#undef H8300_GEN_BITOP_CONST - -static __inline__ int test_bit(int nr, const unsigned long* addr) -{ - return (*((volatile unsigned char *)addr + - ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0; -} - -#define __test_bit(nr, addr) test_bit(nr, addr) - -#define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT) \ - case BIT: \ - __asm__("stc ccr,%w1\n\t" \ - "orc #0x80,ccr\n\t" \ - "bld #" #BIT ",@%4\n\t" \ - OP " #" #BIT ",@%4\n\t" \ - "rotxl.l %0\n\t" \ - "ldc %w1,ccr" \ - : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ - : "0" (retval),"r" (b_addr) \ - : "memory"); \ - break; - -#define H8300_GEN_TEST_BITOP_CONST(OP,BIT) \ - case BIT: \ - __asm__("bld #" #BIT ",@%3\n\t" \ - OP " #" #BIT ",@%3\n\t" \ - "rotxl.l %0\n\t" \ - : "=r"(retval),"=m"(*b_addr) \ - : "0" (retval),"r" (b_addr) \ - : "memory"); \ - break; - -#define H8300_GEN_TEST_BITOP(FNNAME,OP) \ -static __inline__ int FNNAME(int nr, volatile void * addr) \ -{ \ - int retval = 0; \ - char ccrsave; \ - volatile unsigned char *b_addr; \ - b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ - if (__builtin_constant_p(nr)) { \ - switch(nr & 7) { \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,0) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,1) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,2) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,3) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,4) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,5) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,6) \ - H8300_GEN_TEST_BITOP_CONST_INT(OP,7) \ - } \ - } else { \ - __asm__("stc ccr,%w1\n\t" \ - "orc #0x80,ccr\n\t" \ - "btst %w5,@%4\n\t" \ - OP " %w5,@%4\n\t" \ - "beq 1f\n\t" \ - "inc.l #1,%0\n" \ - "1:\n\t" \ - "ldc %w1,ccr" \ - : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr) \ - : "0" (retval),"r" (b_addr),"r"(nr) \ - : "memory"); \ - } \ - return retval; \ -} \ - \ -static __inline__ int __ ## FNNAME(int nr, volatile void * addr) \ -{ \ - int retval = 0; \ - volatile unsigned char *b_addr; \ - b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3); \ - if (__builtin_constant_p(nr)) { \ - switch(nr & 7) { \ - H8300_GEN_TEST_BITOP_CONST(OP,0) \ - H8300_GEN_TEST_BITOP_CONST(OP,1) \ - H8300_GEN_TEST_BITOP_CONST(OP,2) \ - H8300_GEN_TEST_BITOP_CONST(OP,3) \ - H8300_GEN_TEST_BITOP_CONST(OP,4) \ - H8300_GEN_TEST_BITOP_CONST(OP,5) \ - H8300_GEN_TEST_BITOP_CONST(OP,6) \ - H8300_GEN_TEST_BITOP_CONST(OP,7) \ - } \ - } else { \ - __asm__("btst %w4,@%3\n\t" \ - OP " %w4,@%3\n\t" \ - "beq 1f\n\t" \ - "inc.l #1,%0\n" \ - "1:" \ - : "=r"(retval),"=m"(*b_addr) \ - : "0" (retval),"r" (b_addr),"r"(nr) \ - : "memory"); \ - } \ - return retval; \ -} - -H8300_GEN_TEST_BITOP(test_and_set_bit, "bset") -H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr") -H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") -#undef H8300_GEN_TEST_BITOP_CONST -#undef H8300_GEN_TEST_BITOP_CONST_INT -#undef H8300_GEN_TEST_BITOP - -#include <asm-generic/bitops/ffs.h> - -static __inline__ unsigned long __ffs(unsigned long word) -{ - unsigned long result; - - result = -1; - __asm__("1:\n\t" - "shlr.l %2\n\t" - "adds #1,%0\n\t" - "bcc 1b" - : "=r" (result) - : "0"(result),"r"(word)); - return result; -} - -#include <asm-generic/bitops/find.h> -#include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/hweight.h> -#include <asm-generic/bitops/lock.h> -#include <asm-generic/bitops/le.h> -#include <asm-generic/bitops/ext2-atomic.h> - -#endif /* __KERNEL__ */ - -#include <asm-generic/bitops/fls.h> -#include <asm-generic/bitops/__fls.h> -#include <asm-generic/bitops/fls64.h> - -#endif /* _H8300_BITOPS_H */ diff --git a/arch/h8300/include/asm/bootinfo.h b/arch/h8300/include/asm/bootinfo.h deleted file mode 100644 index 5bed7e7aac0..00000000000 --- a/arch/h8300/include/asm/bootinfo.h +++ /dev/null @@ -1,2 +0,0 @@ - -/* Nothing for h8300 */ diff --git a/arch/h8300/include/asm/bug.h b/arch/h8300/include/asm/bug.h deleted file mode 100644 index 1e1be811993..00000000000 --- a/arch/h8300/include/asm/bug.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _H8300_BUG_H -#define _H8300_BUG_H - -/* always true */ -#define is_valid_bugaddr(addr) (1) - -#include <asm-generic/bug.h> - -struct pt_regs; -extern void die(const char *str, struct pt_regs *fp, unsigned long err); - -#endif diff --git a/arch/h8300/include/asm/bugs.h b/arch/h8300/include/asm/bugs.h deleted file mode 100644 index 1cb4afba6eb..00000000000 --- a/arch/h8300/include/asm/bugs.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * include/asm-h8300/bugs.h - * - * Copyright (C) 1994 Linus Torvalds - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -static void check_bugs(void) -{ -} diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h deleted file mode 100644 index 05887a1d80e..00000000000 --- a/arch/h8300/include/asm/cache.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ARCH_H8300_CACHE_H -#define __ARCH_H8300_CACHE_H - -/* bytes per L1 cache line */ -#define L1_CACHE_SHIFT 2 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) - -/* m68k-elf-gcc 2.95.2 doesn't like these */ - -#define __cacheline_aligned -#define ____cacheline_aligned - -#endif diff --git a/arch/h8300/include/asm/cachectl.h b/arch/h8300/include/asm/cachectl.h deleted file mode 100644 index c464022d8e2..00000000000 --- a/arch/h8300/include/asm/cachectl.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _H8300_CACHECTL_H -#define _H8300_CACHECTL_H - -/* Definitions for the cacheflush system call. */ - -#define FLUSH_SCOPE_LINE 0 /* Flush a cache line */ -#define FLUSH_SCOPE_PAGE 0 /* Flush a page */ -#define FLUSH_SCOPE_ALL 0 /* Flush the whole cache -- superuser only */ - -#define FLUSH_CACHE_DATA 0 /* Writeback and flush data cache */ -#define FLUSH_CACHE_INSN 0 /* Flush instruction cache */ -#define FLUSH_CACHE_BOTH 0 /* Flush both caches */ - -#endif /* _H8300_CACHECTL_H */ diff --git a/arch/h8300/include/asm/cacheflush.h b/arch/h8300/include/asm/cacheflush.h deleted file mode 100644 index 4cf2df20c1c..00000000000 --- a/arch/h8300/include/asm/cacheflush.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * (C) Copyright 2002, Yoshinori Sato <ysato@users.sourceforge.jp> - */ - -#ifndef _ASM_H8300_CACHEFLUSH_H -#define _ASM_H8300_CACHEFLUSH_H - -/* - * Cache handling functions - * No Cache memory all dummy functions - */ - -#define flush_cache_all() -#define flush_cache_mm(mm) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma,a,b) -#define flush_cache_page(vma,p,pfn) -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) -#define flush_dcache_mmap_lock(mapping) -#define flush_dcache_mmap_unlock(mapping) -#define flush_icache() -#define flush_icache_page(vma,page) -#define flush_icache_range(start,len) -#define flush_cache_vmap(start, end) -#define flush_cache_vunmap(start, end) -#define cache_push_v(vaddr,len) -#define cache_push(paddr,len) -#define cache_clear(paddr,len) - -#define flush_dcache_range(a,b) - -#define flush_icache_user_range(vma,page,addr,len) - -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) - -#endif /* _ASM_H8300_CACHEFLUSH_H */ diff --git a/arch/h8300/include/asm/checksum.h b/arch/h8300/include/asm/checksum.h deleted file mode 100644 index 98724e12508..00000000000 --- a/arch/h8300/include/asm/checksum.h +++ /dev/null @@ -1,102 +0,0 @@ -#ifndef _H8300_CHECKSUM_H -#define _H8300_CHECKSUM_H - -/* - * computes the checksum of a memory block at buff, length len, - * and adds in "sum" (32-bit) - * - * returns a 32-bit number suitable for feeding into itself - * or csum_tcpudp_magic - * - * this function must be called with even lengths, except - * for the last fragment, which may be odd - * - * it's best to have buff aligned on a 32-bit boundary - */ -__wsum csum_partial(const void *buff, int len, __wsum sum); - -/* - * the same as csum_partial, but copies from src while it - * checksums - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ - -__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); - - -/* - * the same as csum_partial_copy, but copies from user space. - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ - -extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *csum_err); - -__sum16 ip_fast_csum(const void *iph, unsigned int ihl); - - -/* - * Fold a partial checksum - */ - -static inline __sum16 csum_fold(__wsum sum) -{ - __asm__("mov.l %0,er0\n\t" - "add.w e0,r0\n\t" - "xor.w e0,e0\n\t" - "rotxl.w e0\n\t" - "add.w e0,r0\n\t" - "sub.w e0,e0\n\t" - "mov.l er0,%0" - : "=r"(sum) - : "0"(sum) - : "er0"); - return (__force __sum16)~sum; -} - - -/* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 16-bit checksum, already complemented - */ - -static inline __wsum -csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) -{ - __asm__ ("sub.l er0,er0\n\t" - "add.l %2,%0\n\t" - "addx #0,r0l\n\t" - "add.l %3,%0\n\t" - "addx #0,r0l\n\t" - "add.l %4,%0\n\t" - "addx #0,r0l\n\t" - "add.l er0,%0\n\t" - "bcc 1f\n\t" - "inc.l #1,%0\n" - "1:" - : "=&r" (sum) - : "0" (sum), "r" (daddr), "r" (saddr), "r" (len + proto) - :"er0"); - return sum; -} - -static inline __sum16 -csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, - unsigned short proto, __wsum sum) -{ - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); -} - -/* - * this routine is used for miscellaneous IP-like checksums, mainly - * in icmp.c - */ - -extern __sum16 ip_compute_csum(const void *buff, int len); - -#endif /* _H8300_CHECKSUM_H */ diff --git a/arch/h8300/include/asm/cmpxchg.h b/arch/h8300/include/asm/cmpxchg.h deleted file mode 100644 index cdb203ef681..00000000000 --- a/arch/h8300/include/asm/cmpxchg.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef __ARCH_H8300_CMPXCHG__ -#define __ARCH_H8300_CMPXCHG__ - -#include <linux/irqflags.h> - -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - -struct __xchg_dummy { unsigned long a[100]; }; -#define __xg(x) ((volatile struct __xchg_dummy *)(x)) - -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - unsigned long tmp, flags; - - local_irq_save(flags); - - switch (size) { - case 1: - __asm__ __volatile__ - ("mov.b %2,%0\n\t" - "mov.b %1,%2" - : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); - break; - case 2: - __asm__ __volatile__ - ("mov.w %2,%0\n\t" - "mov.w %1,%2" - : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); - break; - case 4: - __asm__ __volatile__ - ("mov.l %2,%0\n\t" - "mov.l %1,%2" - : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); - break; - default: - tmp = 0; - } - local_irq_restore(flags); - return tmp; -} - -#include <asm-generic/cmpxchg-local.h> - -/* - * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make - * them available. - */ -#define cmpxchg_local(ptr, o, n) \ - ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ - (unsigned long)(n), sizeof(*(ptr)))) -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) - -#ifndef CONFIG_SMP -#include <asm-generic/cmpxchg.h> -#endif - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -#endif /* __ARCH_H8300_CMPXCHG__ */ diff --git a/arch/h8300/include/asm/cputime.h b/arch/h8300/include/asm/cputime.h deleted file mode 100644 index 092e187c7b0..00000000000 --- a/arch/h8300/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __H8300_CPUTIME_H -#define __H8300_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __H8300_CPUTIME_H */ diff --git a/arch/h8300/include/asm/current.h b/arch/h8300/include/asm/current.h deleted file mode 100644 index 57d74ee55a1..00000000000 --- a/arch/h8300/include/asm/current.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _H8300_CURRENT_H -#define _H8300_CURRENT_H -/* - * current.h - * (C) Copyright 2000, Lineo, David McCullough <davidm@lineo.com> - * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) - * - * rather than dedicate a register (as the m68k source does), we - * just keep a global, we should probably just change it all to be - * current and lose _current_task. - */ - -#include <linux/thread_info.h> -#include <asm/thread_info.h> - -struct task_struct; - -static inline struct task_struct *get_current(void) -{ - return(current_thread_info()->task); -} - -#define current get_current() - -#endif /* _H8300_CURRENT_H */ diff --git a/arch/h8300/include/asm/dbg.h b/arch/h8300/include/asm/dbg.h deleted file mode 100644 index 2c6d1cbcf73..00000000000 --- a/arch/h8300/include/asm/dbg.h +++ /dev/null @@ -1,2 +0,0 @@ -#define DEBUG 1 -#define BREAK asm volatile ("trap #3") diff --git a/arch/h8300/include/asm/delay.h b/arch/h8300/include/asm/delay.h deleted file mode 100644 index 743beba70f8..00000000000 --- a/arch/h8300/include/asm/delay.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef _H8300_DELAY_H -#define _H8300_DELAY_H - -#include <asm/param.h> - -/* - * Copyright (C) 2002 Yoshinori Sato <ysato@sourceforge.jp> - * - * Delay routines, using a pre-computed "loops_per_second" value. - */ - -static inline void __delay(unsigned long loops) -{ - __asm__ __volatile__ ("1:\n\t" - "dec.l #1,%0\n\t" - "bne 1b" - :"=r" (loops):"0"(loops)); -} - -/* - * Use only for very small delays ( < 1 msec). Should probably use a - * lookup table, really, as the multiplications take much too long with - * short delays. This is a "reasonable" implementation, though (and the - * first constant multiplications gets optimized away if the delay is - * a constant) - */ - -extern unsigned long loops_per_jiffy; - -static inline void udelay(unsigned long usecs) -{ - usecs *= 4295; /* 2**32 / 1000000 */ - usecs /= (loops_per_jiffy*HZ); - if (usecs) - __delay(usecs); -} - -#endif /* _H8300_DELAY_H */ diff --git a/arch/h8300/include/asm/device.h b/arch/h8300/include/asm/device.h deleted file mode 100644 index d8f9872b0e2..00000000000 --- a/arch/h8300/include/asm/device.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#include <asm-generic/device.h> - diff --git a/arch/h8300/include/asm/div64.h b/arch/h8300/include/asm/div64.h deleted file mode 100644 index 6cd978cefb2..00000000000 --- a/arch/h8300/include/asm/div64.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/div64.h> diff --git a/arch/h8300/include/asm/dma.h b/arch/h8300/include/asm/dma.h deleted file mode 100644 index 3edbaaaedf5..00000000000 --- a/arch/h8300/include/asm/dma.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _H8300_DMA_H -#define _H8300_DMA_H - - -/* - * Set number of channels of DMA on ColdFire for different implementations. - */ -#define MAX_DMA_CHANNELS 0 -#define MAX_DMA_ADDRESS PAGE_OFFSET - -/* These are in kernel/dma.c: */ -extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ -extern void free_dma(unsigned int dmanr); /* release it again */ - -#endif /* _H8300_DMA_H */ diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h deleted file mode 100644 index 6db71248a82..00000000000 --- a/arch/h8300/include/asm/elf.h +++ /dev/null @@ -1,101 +0,0 @@ -#ifndef __ASMH8300_ELF_H -#define __ASMH8300_ELF_H - -/* - * ELF register definitions.. - */ - -#include <asm/ptrace.h> -#include <asm/user.h> - -typedef unsigned long elf_greg_t; - -#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) -typedef elf_greg_t elf_gregset_t[ELF_NGREG]; -typedef unsigned long elf_fpregset_t; - -/* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) ((x)->e_machine == EM_H8_300) - -/* - * These are used to set parameters in the core dumps. - */ -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2MSB -#define ELF_ARCH EM_H8_300 -#if defined(__H8300H__) -#define ELF_CORE_EFLAGS 0x810000 -#endif -#if defined(__H8300S__) -#define ELF_CORE_EFLAGS 0x820000 -#endif - -#define ELF_PLAT_INIT(_r) _r->er1 = 0 - -#define ELF_EXEC_PAGESIZE 4096 - -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE 0xD0000000UL - -/* This yields a mask that user programs can use to figure out what - instruction set this cpu supports. */ - -#define ELF_HWCAP (0) - -/* This yields a string that ld.so will use to load implementation - specific libraries for optimization. This is more specific in - intent than poking at uname or /proc/cpuinfo. */ - -#define ELF_PLATFORM (NULL) - -#define R_H8_NONE 0 -#define R_H8_DIR32 1 -#define R_H8_DIR32_28 2 -#define R_H8_DIR32_24 3 -#define R_H8_DIR32_16 4 -#define R_H8_DIR32U 6 -#define R_H8_DIR32U_28 7 -#define R_H8_DIR32U_24 8 -#define R_H8_DIR32U_20 9 -#define R_H8_DIR32U_16 10 -#define R_H8_DIR24 11 -#define R_H8_DIR24_20 12 -#define R_H8_DIR24_16 13 -#define R_H8_DIR24U 14 -#define R_H8_DIR24U_20 15 -#define R_H8_DIR24U_16 16 -#define R_H8_DIR16 17 -#define R_H8_DIR16U 18 -#define R_H8_DIR16S_32 19 -#define R_H8_DIR16S_28 20 -#define R_H8_DIR16S_24 21 -#define R_H8_DIR16S_20 22 -#define R_H8_DIR16S 23 -#define R_H8_DIR8 24 -#define R_H8_DIR8U 25 -#define R_H8_DIR8Z_32 26 -#define R_H8_DIR8Z_28 27 -#define R_H8_DIR8Z_24 28 -#define R_H8_DIR8Z_20 29 -#define R_H8_DIR8Z_16 30 -#define R_H8_PCREL16 31 -#define R_H8_PCREL8 32 -#define R_H8_BPOS 33 -#define R_H8_PCREL32 34 -#define R_H8_GOT32O 35 -#define R_H8_GOT16O 36 -#define R_H8_DIR16A8 59 -#define R_H8_DIR16R8 60 -#define R_H8_DIR24A8 61 -#define R_H8_DIR24R8 62 -#define R_H8_DIR32A16 63 -#define R_H8_ABS32 65 -#define R_H8_ABS32A16 127 - -#endif diff --git a/arch/h8300/include/asm/emergency-restart.h b/arch/h8300/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42..00000000000 --- a/arch/h8300/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/h8300/include/asm/fb.h b/arch/h8300/include/asm/fb.h deleted file mode 100644 index c7df3803099..00000000000 --- a/arch/h8300/include/asm/fb.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ -#include <linux/fb.h> - -#define fb_pgprotect(...) do {} while (0) - -static inline int fb_is_primary_device(struct fb_info *info) -{ - return 0; -} - -#endif /* _ASM_FB_H_ */ diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h deleted file mode 100644 index bd12b31b90e..00000000000 --- a/arch/h8300/include/asm/flat.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * include/asm-h8300/flat.h -- uClinux flat-format executables - */ - -#ifndef __H8300_FLAT_H__ -#define __H8300_FLAT_H__ - -#define flat_argvp_envp_on_stack() 1 -#define flat_old_ram_flag(flags) 1 -#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) -#define flat_set_persistent(relval, p) 0 - -/* - * on the H8 a couple of the relocations have an instruction in the - * top byte. As there can only be 24bits of address space, we just - * always preserve that 8bits at the top, when it isn't an instruction - * is is 0 (davidm@snapgear.com) - */ - -#define flat_get_relocate_addr(rel) (rel) -#define flat_get_addr_from_rp(rp, relval, flags, persistent) \ - (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff)) -#define flat_put_addr_at_rp(rp, addr, rel) \ - put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp) - -#endif /* __H8300_FLAT_H__ */ diff --git a/arch/h8300/include/asm/fpu.h b/arch/h8300/include/asm/fpu.h deleted file mode 100644 index 4fc416e80be..00000000000 --- a/arch/h8300/include/asm/fpu.h +++ /dev/null @@ -1 +0,0 @@ -/* Nothing do */ diff --git a/arch/h8300/include/asm/ftrace.h b/arch/h8300/include/asm/ftrace.h deleted file mode 100644 index 40a8c178f10..00000000000 --- a/arch/h8300/include/asm/ftrace.h +++ /dev/null @@ -1 +0,0 @@ -/* empty */ diff --git a/arch/h8300/include/asm/futex.h b/arch/h8300/include/asm/futex.h deleted file mode 100644 index 6a332a9f099..00000000000 --- a/arch/h8300/include/asm/futex.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#include <asm-generic/futex.h> - -#endif diff --git a/arch/h8300/include/asm/gpio-internal.h b/arch/h8300/include/asm/gpio-internal.h deleted file mode 100644 index a714f0c0efb..00000000000 --- a/arch/h8300/include/asm/gpio-internal.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef _H8300_GPIO_H -#define _H8300_GPIO_H - -#define H8300_GPIO_P1 0 -#define H8300_GPIO_P2 1 -#define H8300_GPIO_P3 2 -#define H8300_GPIO_P4 3 -#define H8300_GPIO_P5 4 -#define H8300_GPIO_P6 5 -#define H8300_GPIO_P7 6 -#define H8300_GPIO_P8 7 -#define H8300_GPIO_P9 8 -#define H8300_GPIO_PA 9 -#define H8300_GPIO_PB 10 -#define H8300_GPIO_PC 11 -#define H8300_GPIO_PD 12 -#define H8300_GPIO_PE 13 -#define H8300_GPIO_PF 14 -#define H8300_GPIO_PG 15 -#define H8300_GPIO_PH 16 - -#define H8300_GPIO_B7 0x80 -#define H8300_GPIO_B6 0x40 -#define H8300_GPIO_B5 0x20 -#define H8300_GPIO_B4 0x10 -#define H8300_GPIO_B3 0x08 -#define H8300_GPIO_B2 0x04 -#define H8300_GPIO_B1 0x02 -#define H8300_GPIO_B0 0x01 - -#define H8300_GPIO_INPUT 0 -#define H8300_GPIO_OUTPUT 1 - -#define H8300_GPIO_RESERVE(port, bits) \ - h8300_reserved_gpio(port, bits) - -#define H8300_GPIO_FREE(port, bits) \ - h8300_free_gpio(port, bits) - -#define H8300_GPIO_DDR(port, bit, dir) \ - h8300_set_gpio_dir(((port) << 8) | (bit), dir) - -#define H8300_GPIO_GETDIR(port, bit) \ - h8300_get_gpio_dir(((port) << 8) | (bit)) - -extern int h8300_reserved_gpio(int port, int bits); -extern int h8300_free_gpio(int port, int bits); -extern int h8300_set_gpio_dir(int port_bit, int dir); -extern int h8300_get_gpio_dir(int port_bit); -extern int h8300_init_gpio(void); - -#endif diff --git a/arch/h8300/include/asm/hardirq.h b/arch/h8300/include/asm/hardirq.h deleted file mode 100644 index c2e1aa0f0d1..00000000000 --- a/arch/h8300/include/asm/hardirq.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef __H8300_HARDIRQ_H -#define __H8300_HARDIRQ_H - -#include <asm/irq.h> - -#define HARDIRQ_BITS 8 - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - -#include <asm-generic/hardirq.h> - -#endif diff --git a/arch/h8300/include/asm/hw_irq.h b/arch/h8300/include/asm/hw_irq.h deleted file mode 100644 index d75a5a1119e..00000000000 --- a/arch/h8300/include/asm/hw_irq.h +++ /dev/null @@ -1 +0,0 @@ -/* Do Nothing */ diff --git a/arch/h8300/include/asm/io.h b/arch/h8300/include/asm/io.h deleted file mode 100644 index c1a8df22080..00000000000 --- a/arch/h8300/include/asm/io.h +++ /dev/null @@ -1,358 +0,0 @@ -#ifndef _H8300_IO_H -#define _H8300_IO_H - -#ifdef __KERNEL__ - -#include <asm/virtconvert.h> - -#if defined(CONFIG_H83007) || defined(CONFIG_H83068) -#include <asm/regs306x.h> -#elif defined(CONFIG_H8S2678) -#include <asm/regs267x.h> -#else -#error UNKNOWN CPU TYPE -#endif - - -/* - * These are for ISA/PCI shared memory _only_ and should never be used - * on any other type of memory, including Zorro memory. They are meant to - * access the bus in the bus byte order which is little-endian!. - * - * readX/writeX() are used to access memory mapped devices. On some - * architectures the memory mapped IO stuff needs to be accessed - * differently. On the m68k architecture, we just read/write the - * memory location directly. - */ -/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates - * two accesses to memory, which may be undesirable for some devices. - */ - -/* - * swap functions are sometimes needed to interface little-endian hardware - */ - -static inline unsigned short _swapw(volatile unsigned short v) -{ -#ifndef H8300_IO_NOSWAP - unsigned short r; - __asm__("xor.b %w0,%x0\n\t" - "xor.b %x0,%w0\n\t" - "xor.b %w0,%x0" - :"=r"(r) - :"0"(v)); - return r; -#else - return v; -#endif -} - -static inline unsigned long _swapl(volatile unsigned long v) -{ -#ifndef H8300_IO_NOSWAP - unsigned long r; - __asm__("xor.b %w0,%x0\n\t" - "xor.b %x0,%w0\n\t" - "xor.b %w0,%x0\n\t" - "xor.w %e0,%f0\n\t" - "xor.w %f0,%e0\n\t" - "xor.w %e0,%f0\n\t" - "xor.b %w0,%x0\n\t" - "xor.b %x0,%w0\n\t" - "xor.b %w0,%x0" - :"=r"(r) - :"0"(v)); - return r; -#else - return v; -#endif -} - -#define readb(addr) \ - ({ unsigned char __v = \ - *(volatile unsigned char *)((unsigned long)(addr) & 0x00ffffff); \ - __v; }) -#define readw(addr) \ - ({ unsigned short __v = \ - *(volatile unsigned short *)((unsigned long)(addr) & 0x00ffffff); \ - __v; }) -#define readl(addr) \ - ({ unsigned long __v = \ - *(volatile unsigned long *)((unsigned long)(addr) & 0x00ffffff); \ - __v; }) - -#define writeb(b,addr) (void)((*(volatile unsigned char *) \ - ((unsigned long)(addr) & 0x00ffffff)) = (b)) -#define writew(b,addr) (void)((*(volatile unsigned short *) \ - ((unsigned long)(addr) & 0x00ffffff)) = (b)) -#define writel(b,addr) (void)((*(volatile unsigned long *) \ - ((unsigned long)(addr) & 0x00ffffff)) = (b)) -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) - -#define __raw_readb readb -#define __raw_readw readw -#define __raw_readl readl -#define __raw_writeb writeb -#define __raw_writew writew -#define __raw_writel writel - -static inline int h8300_buswidth(unsigned int addr) -{ - return (*(volatile unsigned char *)ABWCR & (1 << ((addr >> 21) & 7))) == 0; -} - -static inline void io_outsb(unsigned int addr, const void *buf, int len) -{ - volatile unsigned char *ap_b = (volatile unsigned char *) addr; - volatile unsigned short *ap_w = (volatile unsigned short *) addr; - unsigned char *bp = (unsigned char *) buf; - - if(h8300_buswidth(addr) && (addr & 1)) { - while (len--) - *ap_w = *bp++; - } else { - while (len--) - *ap_b = *bp++; - } -} - -static inline void io_outsw(unsigned int addr, const void *buf, int len) -{ - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *ap = _swapw(*bp++); -} - -static inline void io_outsl(unsigned int addr, const void *buf, int len) -{ - volatile unsigned long *ap = (volatile unsigned long *) addr; - unsigned long *bp = (unsigned long *) buf; - while (len--) - *ap = _swapl(*bp++); -} - -static inline void io_outsw_noswap(unsigned int addr, const void *buf, int len) -{ - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *ap = *bp++; -} - -static inline void io_outsl_noswap(unsigned int addr, const void *buf, int len) -{ - volatile unsigned long *ap = (volatile unsigned long *) addr; - unsigned long *bp = (unsigned long *) buf; - while (len--) - *ap = *bp++; -} - -static inline void io_insb(unsigned int addr, void *buf, int len) -{ - volatile unsigned char *ap_b; - volatile unsigned short *ap_w; - unsigned char *bp = (unsigned char *) buf; - - if(h8300_buswidth(addr)) { - ap_w = (volatile unsigned short *)(addr & ~1); - while (len--) - *bp++ = *ap_w & 0xff; - } else { - ap_b = (volatile unsigned char *)addr; - while (len--) - *bp++ = *ap_b; - } -} - -static inline void io_insw(unsigned int addr, void *buf, int len) -{ - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *bp++ = _swapw(*ap); -} - -static inline void io_insl(unsigned int addr, void *buf, int len) -{ - volatile unsigned long *ap = (volatile unsigned long *) addr; - unsigned long *bp = (unsigned long *) buf; - while (len--) - *bp++ = _swapl(*ap); -} - -static inline void io_insw_noswap(unsigned int addr, void *buf, int len) -{ - volatile unsigned short *ap = (volatile unsigned short *) addr; - unsigned short *bp = (unsigned short *) buf; - while (len--) - *bp++ = *ap; -} - -static inline void io_insl_noswap(unsigned int addr, void *buf, int len) -{ - volatile unsigned long *ap = (volatile unsigned long *) addr; - unsigned long *bp = (unsigned long *) buf; - while (len--) - *bp++ = *ap; -} - -/* - * make the short names macros so specific devices - * can override them as required - */ - -#define memset_io(a,b,c) memset((void *)(a),(b),(c)) -#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) -#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) - -#define mmiowb() - -#define inb(addr) ((h8300_buswidth(addr))?readw((addr) & ~1) & 0xff:readb(addr)) -#define inw(addr) _swapw(readw(addr)) -#define inl(addr) _swapl(readl(addr)) -#define outb(x,addr) ((void)((h8300_buswidth(addr) && \ - ((addr) & 1))?writew(x,(addr) & ~1):writeb(x,addr))) -#define outw(x,addr) ((void) writew(_swapw(x),addr)) -#define outl(x,addr) ((void) writel(_swapl(x),addr)) - -#define inb_p(addr) inb(addr) -#define inw_p(addr) inw(addr) -#define inl_p(addr) inl(addr) -#define outb_p(x,addr) outb(x,addr) -#define outw_p(x,addr) outw(x,addr) -#define outl_p(x,addr) outl(x,addr) - -#define outsb(a,b,l) io_outsb(a,b,l) -#define outsw(a,b,l) io_outsw(a,b,l) -#define outsl(a,b,l) io_outsl(a,b,l) - -#define insb(a,b,l) io_insb(a,b,l) -#define insw(a,b,l) io_insw(a,b,l) -#define insl(a,b,l) io_insl(a,b,l) - -#define IO_SPACE_LIMIT 0xffffff - - -/* Values for nocacheflag and cmode */ -#define IOMAP_FULL_CACHING 0 -#define IOMAP_NOCACHE_SER 1 -#define IOMAP_NOCACHE_NONSER 2 -#define IOMAP_WRITETHROUGH 3 - -extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag); -extern void __iounmap(void *addr, unsigned long size); - -static inline void *ioremap(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); -} -static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_NOCACHE_SER); -} -static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_WRITETHROUGH); -} -static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, IOMAP_FULL_CACHING); -} - -extern void iounmap(void *addr); - -/* H8/300 internal I/O functions */ -static __inline__ unsigned char ctrl_inb(unsigned long addr) -{ - return *(volatile unsigned char*)addr; -} - -static __inline__ unsigned short ctrl_inw(unsigned long addr) -{ - return *(volatile unsigned short*)addr; -} - -static __inline__ unsigned long ctrl_inl(unsigned long addr) -{ - return *(volatile unsigned long*)addr; -} - -static __inline__ void ctrl_outb(unsigned char b, unsigned long addr) -{ - *(volatile unsigned char*)addr = b; -} - -static __inline__ void ctrl_outw(unsigned short b, unsigned long addr) -{ - *(volatile unsigned short*)addr = b; -} - -static __inline__ void ctrl_outl(unsigned long b, unsigned long addr) -{ - *(volatile unsigned long*)addr = b; -} - -static __inline__ void ctrl_bclr(int b, unsigned long addr) -{ - if (__builtin_constant_p(b)) - switch (b) { - case 0: __asm__("bclr #0,@%0"::"r"(addr)); break; - case 1: __asm__("bclr #1,@%0"::"r"(addr)); break; - case 2: __asm__("bclr #2,@%0"::"r"(addr)); break; - case 3: __asm__("bclr #3,@%0"::"r"(addr)); break; - case 4: __asm__("bclr #4,@%0"::"r"(addr)); break; - case 5: __asm__("bclr #5,@%0"::"r"(addr)); break; - case 6: __asm__("bclr #6,@%0"::"r"(addr)); break; - case 7: __asm__("bclr #7,@%0"::"r"(addr)); break; - } - else - __asm__("bclr %w0,@%1"::"r"(b), "r"(addr)); -} - -static __inline__ void ctrl_bset(int b, unsigned long addr) -{ - if (__builtin_constant_p(b)) - switch (b) { - case 0: __asm__("bset #0,@%0"::"r"(addr)); break; - case 1: __asm__("bset #1,@%0"::"r"(addr)); break; - case 2: __asm__("bset #2,@%0"::"r"(addr)); break; - case 3: __asm__("bset #3,@%0"::"r"(addr)); break; - case 4: __asm__("bset #4,@%0"::"r"(addr)); break; - case 5: __asm__("bset #5,@%0"::"r"(addr)); break; - case 6: __asm__("bset #6,@%0"::"r"(addr)); break; - case 7: __asm__("bset #7,@%0"::"r"(addr)); break; - } - else - __asm__("bset %w0,@%1"::"r"(b), "r"(addr)); -} - -/* Pages to physical address... */ -#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) -#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT) - -/* - * Macros used for converting between virtual and physical mappings. - */ -#define phys_to_virt(vaddr) ((void *) (vaddr)) -#define virt_to_phys(vaddr) ((unsigned long) (vaddr)) - -#define virt_to_bus virt_to_phys -#define bus_to_virt phys_to_virt - -/* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) - -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - -#endif /* __KERNEL__ */ - -#endif /* _H8300_IO_H */ diff --git a/arch/h8300/include/asm/irq.h b/arch/h8300/include/asm/irq.h deleted file mode 100644 index 13d7c601cd0..00000000000 --- a/arch/h8300/include/asm/irq.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef _H8300_IRQ_H_ -#define _H8300_IRQ_H_ - -#include <asm/ptrace.h> - -#if defined(CONFIG_CPU_H8300H) -#define NR_IRQS 64 -#define EXT_IRQ0 12 -#define EXT_IRQ1 13 -#define EXT_IRQ2 14 -#define EXT_IRQ3 15 -#define EXT_IRQ4 16 -#define EXT_IRQ5 17 -#define EXT_IRQ6 18 -#define EXT_IRQ7 19 -#define EXT_IRQS 5 -#define IER_REGS *(volatile unsigned char *)IER -#endif -#if defined(CONFIG_CPU_H8S) -#define NR_IRQS 128 -#define EXT_IRQ0 16 -#define EXT_IRQ1 17 -#define EXT_IRQ2 18 -#define EXT_IRQ3 19 -#define EXT_IRQ4 20 -#define EXT_IRQ5 21 -#define EXT_IRQ6 22 -#define EXT_IRQ7 23 -#define EXT_IRQ8 24 -#define EXT_IRQ9 25 -#define EXT_IRQ10 26 -#define EXT_IRQ11 27 -#define EXT_IRQ12 28 -#define EXT_IRQ13 29 -#define EXT_IRQ14 30 -#define EXT_IRQ15 31 -#define EXT_IRQS 15 - -#define IER_REGS *(volatile unsigned short *)IER -#endif - -static __inline__ int irq_canonicalize(int irq) -{ - return irq; -} - -typedef void (*h8300_vector)(void); - -#endif /* _H8300_IRQ_H_ */ diff --git a/arch/h8300/include/asm/irq_regs.h b/arch/h8300/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b7027..00000000000 --- a/arch/h8300/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/irq_regs.h> diff --git a/arch/h8300/include/asm/irqflags.h b/arch/h8300/include/asm/irqflags.h deleted file mode 100644 index 9617cd57aeb..00000000000 --- a/arch/h8300/include/asm/irqflags.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef _H8300_IRQFLAGS_H -#define _H8300_IRQFLAGS_H - -static inline unsigned long arch_local_save_flags(void) -{ - unsigned long flags; - asm volatile ("stc ccr,%w0" : "=r" (flags)); - return flags; -} - -static inline void arch_local_irq_disable(void) -{ - asm volatile ("orc #0x80,ccr" : : : "memory"); -} - -static inline void arch_local_irq_enable(void) -{ - asm volatile ("andc #0x7f,ccr" : : : "memory"); -} - -static inline unsigned long arch_local_irq_save(void) -{ - unsigned long flags = arch_local_save_flags(); - arch_local_irq_disable(); - return flags; -} - -static inline void arch_local_irq_restore(unsigned long flags) -{ - asm volatile ("ldc %w0,ccr" : : "r" (flags) : "memory"); -} - -static inline bool arch_irqs_disabled_flags(unsigned long flags) -{ - return (flags & 0x80) == 0x80; -} - -static inline bool arch_irqs_disabled(void) -{ - return arch_irqs_disabled_flags(arch_local_save_flags()); -} - -#endif /* _H8300_IRQFLAGS_H */ diff --git a/arch/h8300/include/asm/kdebug.h b/arch/h8300/include/asm/kdebug.h deleted file mode 100644 index 6ece1b03766..00000000000 --- a/arch/h8300/include/asm/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kdebug.h> diff --git a/arch/h8300/include/asm/kmap_types.h b/arch/h8300/include/asm/kmap_types.h deleted file mode 100644 index be12a716011..00000000000 --- a/arch/h8300/include/asm/kmap_types.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_H8300_KMAP_TYPES_H -#define _ASM_H8300_KMAP_TYPES_H - -#include <asm-generic/kmap_types.h> - -#endif diff --git a/arch/h8300/include/asm/local.h b/arch/h8300/include/asm/local.h deleted file mode 100644 index fdd4efe437c..00000000000 --- a/arch/h8300/include/asm/local.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_LOCAL_H_ -#define _H8300_LOCAL_H_ - -#include <asm-generic/local.h> - -#endif diff --git a/arch/h8300/include/asm/local64.h b/arch/h8300/include/asm/local64.h deleted file mode 100644 index 36c93b5cc23..00000000000 --- a/arch/h8300/include/asm/local64.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/local64.h> diff --git a/arch/h8300/include/asm/mc146818rtc.h b/arch/h8300/include/asm/mc146818rtc.h deleted file mode 100644 index ab9d9646d24..00000000000 --- a/arch/h8300/include/asm/mc146818rtc.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Machine dependent access functions for RTC registers. - */ -#ifndef _H8300_MC146818RTC_H -#define _H8300_MC146818RTC_H - -/* empty include file to satisfy the include in genrtc.c/ide-geometry.c */ - -#endif /* _H8300_MC146818RTC_H */ diff --git a/arch/h8300/include/asm/mmu_context.h b/arch/h8300/include/asm/mmu_context.h deleted file mode 100644 index f44b730da54..00000000000 --- a/arch/h8300/include/asm/mmu_context.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef __H8300_MMU_CONTEXT_H -#define __H8300_MMU_CONTEXT_H - -#include <asm/setup.h> -#include <asm/page.h> -#include <asm/pgalloc.h> -#include <asm-generic/mm_hooks.h> - -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -} - -static inline int -init_new_context(struct task_struct *tsk, struct mm_struct *mm) -{ - // mm->context = virt_to_phys(mm->pgd); - return(0); -} - -#define destroy_context(mm) do { } while(0) -#define deactivate_mm(tsk,mm) do { } while(0) - -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) -{ -} - -static inline void activate_mm(struct mm_struct *prev_mm, - struct mm_struct *next_mm) -{ -} - -#endif diff --git a/arch/h8300/include/asm/mutex.h b/arch/h8300/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc1..00000000000 --- a/arch/h8300/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include <asm-generic/mutex-dec.h> diff --git a/arch/h8300/include/asm/page.h b/arch/h8300/include/asm/page.h deleted file mode 100644 index 837381a2df4..00000000000 --- a/arch/h8300/include/asm/page.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef _H8300_PAGE_H -#define _H8300_PAGE_H - -/* PAGE_SHIFT determines the page size */ - -#define PAGE_SHIFT (12) -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -#include <asm/setup.h> - -#ifndef __ASSEMBLY__ - -#define get_user_page(vaddr) __get_free_page(GFP_KERNEL) -#define free_user_page(page, addr) free_page(addr) - -#define clear_page(page) memset((page), 0, PAGE_SIZE) -#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - -#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ - alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) -#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE - -/* - * These are used to make use of C type-checking.. - */ -typedef struct { unsigned long pte; } pte_t; -typedef struct { unsigned long pmd[16]; } pmd_t; -typedef struct { unsigned long pgd; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; -typedef struct page *pgtable_t; - -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((&x)->pmd[0]) -#define pgd_val(x) ((x).pgd) -#define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) -#define __pgd(x) ((pgd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -extern unsigned long memory_start; -extern unsigned long memory_end; - -#endif /* !__ASSEMBLY__ */ - -#include <asm/page_offset.h> - -#define PAGE_OFFSET (PAGE_OFFSET_RAW) - -#ifndef __ASSEMBLY__ - -#define __pa(vaddr) virt_to_phys(vaddr) -#define __va(paddr) phys_to_virt((unsigned long)paddr) - -#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) -#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) - -#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) -#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) -#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) -#define pfn_valid(page) (page < max_mapnr) - -#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) - -#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ - ((void *)(kaddr) < (void *)memory_end)) - -#endif /* __ASSEMBLY__ */ - -#include <asm-generic/memory_model.h> -#include <asm-generic/getorder.h> - -#endif /* _H8300_PAGE_H */ diff --git a/arch/h8300/include/asm/page_offset.h b/arch/h8300/include/asm/page_offset.h deleted file mode 100644 index f8706463008..00000000000 --- a/arch/h8300/include/asm/page_offset.h +++ /dev/null @@ -1,3 +0,0 @@ - -#define PAGE_OFFSET_RAW 0x00000000 - diff --git a/arch/h8300/include/asm/param.h b/arch/h8300/include/asm/param.h deleted file mode 100644 index c3909e7ff17..00000000000 --- a/arch/h8300/include/asm/param.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _H8300_PARAM_H -#define _H8300_PARAM_H - -#include <uapi/asm/param.h> - -#define HZ CONFIG_HZ -#define USER_HZ HZ -#define CLOCKS_PER_SEC (USER_HZ) -#endif /* _H8300_PARAM_H */ diff --git a/arch/h8300/include/asm/pci.h b/arch/h8300/include/asm/pci.h deleted file mode 100644 index 0b2acaa3dd8..00000000000 --- a/arch/h8300/include/asm/pci.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef _ASM_H8300_PCI_H -#define _ASM_H8300_PCI_H - -/* - * asm-h8300/pci.h - H8/300 specific PCI declarations. - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - */ - -#define pcibios_assign_all_busses() 0 - -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - -#define PCI_DMA_BUS_IS_PHYS (1) - -#endif /* _ASM_H8300_PCI_H */ diff --git a/arch/h8300/include/asm/percpu.h b/arch/h8300/include/asm/percpu.h deleted file mode 100644 index 72c03e3666d..00000000000 --- a/arch/h8300/include/asm/percpu.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ARCH_H8300_PERCPU__ -#define __ARCH_H8300_PERCPU__ - -#include <asm-generic/percpu.h> - -#endif /* __ARCH_H8300_PERCPU__ */ diff --git a/arch/h8300/include/asm/pgalloc.h b/arch/h8300/include/asm/pgalloc.h deleted file mode 100644 index c2e89a286d2..00000000000 --- a/arch/h8300/include/asm/pgalloc.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _H8300_PGALLOC_H -#define _H8300_PGALLOC_H - -#include <asm/setup.h> - -#define check_pgt_cache() do { } while (0) - -#endif /* _H8300_PGALLOC_H */ diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h deleted file mode 100644 index 7ca20f894dd..00000000000 --- a/arch/h8300/include/asm/pgtable.h +++ /dev/null @@ -1,73 +0,0 @@ -#ifndef _H8300_PGTABLE_H -#define _H8300_PGTABLE_H - -#include <asm-generic/4level-fixup.h> - -#include <linux/slab.h> -#include <asm/processor.h> -#include <asm/page.h> -#include <asm/io.h> - -#define pgd_present(pgd) (1) /* pages are always present on NO_MM */ -#define pgd_none(pgd) (0) -#define pgd_bad(pgd) (0) -#define pgd_clear(pgdp) -#define kern_addr_valid(addr) (1) -#define pmd_offset(a, b) ((void *)0) -#define pmd_none(pmd) (1) -#define pgd_offset_k(adrdress) ((pgd_t *)0) -#define pte_offset_kernel(dir, address) ((pte_t *)0) - -#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */ -#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */ -#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */ -#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */ -#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */ - -extern void paging_init(void); -#define swapper_pg_dir ((pgd_t *) 0) - -#define __swp_type(x) (0) -#define __swp_offset(x) (0) -#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) - -static inline int pte_file(pte_t pte) { return 0; } - -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -#define ZERO_PAGE(vaddr) (virt_to_page(0)) - -/* - * These would be in other places but having them here reduces the diffs. - */ -extern unsigned int kobjsize(const void *objp); -extern int is_in_rom(unsigned long); - -/* - * No page table caches to initialise - */ -#define pgtable_cache_init() do { } while (0) - -/* - * All 32bit addresses are effectively valid for vmalloc... - * Sort of meaningless for non-VM targets. - */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff - -/* - * All 32bit addresses are effectively valid for vmalloc... - * Sort of meaningless for non-VM targets. - */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff - -#define arch_enter_lazy_cpu_mode() do {} while (0) - -#include <asm-generic/pgtable.h> - -#endif /* _H8300_PGTABLE_H */ diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h deleted file mode 100644 index 4b0ca49bb46..00000000000 --- a/arch/h8300/include/asm/processor.h +++ /dev/null @@ -1,139 +0,0 @@ -/* - * include/asm-h8300/processor.h - * - * Copyright (C) 2002 Yoshinori Sato - * - * Based on: linux/asm-m68nommu/processor.h - * - * Copyright (C) 1995 Hamish Macdonald - */ - -#ifndef __ASM_H8300_PROCESSOR_H -#define __ASM_H8300_PROCESSOR_H - -/* - * Default implementation of macro that returns current - * instruction pointer ("program counter"). - */ -#define current_text_addr() ({ __label__ _l; _l: &&_l;}) - -#include <linux/compiler.h> -#include <asm/segment.h> -#include <asm/fpu.h> -#include <asm/ptrace.h> -#include <asm/current.h> - -static inline unsigned long rdusp(void) { - extern unsigned int sw_usp; - return(sw_usp); -} - -static inline void wrusp(unsigned long usp) { - extern unsigned int sw_usp; - sw_usp = usp; -} - -/* - * User space process size: 3.75GB. This is hardcoded into a few places, - * so don't change it unless you know what you are doing. - */ -#define TASK_SIZE (0xFFFFFFFFUL) - -#ifdef __KERNEL__ -#define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP -#endif - -/* - * This decides where the kernel will search for a free chunk of vm - * space during mmap's. We won't be using it - */ -#define TASK_UNMAPPED_BASE 0 - -struct thread_struct { - unsigned long ksp; /* kernel stack pointer */ - unsigned long usp; /* user stack pointer */ - unsigned long ccr; /* saved status register */ - unsigned long esp0; /* points to SR of stack frame */ - struct { - unsigned short *addr; - unsigned short inst; - } breakinfo; -}; - -#define INIT_THREAD { \ - .ksp = sizeof(init_stack) + (unsigned long)init_stack, \ - .usp = 0, \ - .ccr = PS_S, \ - .esp0 = 0, \ - .breakinfo = { \ - .addr = (unsigned short *)-1, \ - .inst = 0 \ - } \ -} - -/* - * Do necessary setup to start up a newly executed thread. - * - * pass the data segment into user programs if it exists, - * it can't hurt anything as far as I can tell - */ -#if defined(__H8300H__) -#define start_thread(_regs, _pc, _usp) \ -do { \ - (_regs)->pc = (_pc); \ - (_regs)->ccr = 0x00; /* clear all flags */ \ - (_regs)->er5 = current->mm->start_data; /* GOT base */ \ - wrusp((unsigned long)(_usp) - sizeof(unsigned long)*3); \ -} while(0) -#endif -#if defined(__H8300S__) -#define start_thread(_regs, _pc, _usp) \ -do { \ - (_regs)->pc = (_pc); \ - (_regs)->ccr = 0x00; /* clear kernel flag */ \ - (_regs)->exr = 0x78; /* enable all interrupts */ \ - (_regs)->er5 = current->mm->start_data; /* GOT base */ \ - /* 14 = space for retaddr(4), vector(4), er0(4) and ext(2) on stack */ \ - wrusp(((unsigned long)(_usp)) - 14); \ -} while(0) -#endif - -/* Forward declaration, a strange C thing */ -struct task_struct; - -/* Free all resources held by a thread. */ -static inline void release_thread(struct task_struct *dead_task) -{ -} - -/* - * Free current thread data structures etc.. - */ -static inline void exit_thread(void) -{ -} - -/* - * Return saved PC of a blocked thread. - */ -unsigned long thread_saved_pc(struct task_struct *tsk); -unsigned long get_wchan(struct task_struct *p); - -#define KSTK_EIP(tsk) \ - ({ \ - unsigned long eip = 0; \ - if ((tsk)->thread.esp0 > PAGE_SIZE && \ - MAP_NR((tsk)->thread.esp0) < max_mapnr) \ - eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \ - eip; }) -#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) - -#define cpu_relax() barrier() - -#define HARD_RESET_NOW() ({ \ - local_irq_disable(); \ - asm("jmp @@0"); \ -}) - -#endif diff --git a/arch/h8300/include/asm/ptrace.h b/arch/h8300/include/asm/ptrace.h deleted file mode 100644 index c1826b95c5c..00000000000 --- a/arch/h8300/include/asm/ptrace.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _H8300_PTRACE_H -#define _H8300_PTRACE_H - -#include <uapi/asm/ptrace.h> - -#ifndef __ASSEMBLY__ -#if defined(CONFIG_CPU_H8S) -#endif -#ifndef PS_S -#define PS_S (0x10) -#endif - -#if defined(__H8300H__) -#define H8300_REGS_NO 11 -#endif -#if defined(__H8300S__) -#define H8300_REGS_NO 12 -#endif - -/* Find the stack offset for a register, relative to thread.esp0. */ -#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg) - -#define arch_has_single_step() (1) - -#define user_mode(regs) (!((regs)->ccr & PS_S)) -#define instruction_pointer(regs) ((regs)->pc) -#define profile_pc(regs) instruction_pointer(regs) -#define current_pt_regs() ((struct pt_regs *) \ - (THREAD_SIZE + (unsigned long)current_thread_info()) - 1) -#define signal_pt_regs() ((struct pt_regs *)current->thread.esp0) -#define current_user_stack_pointer() rdusp() -#endif /* __ASSEMBLY__ */ -#endif /* _H8300_PTRACE_H */ diff --git a/arch/h8300/include/asm/regs267x.h b/arch/h8300/include/asm/regs267x.h deleted file mode 100644 index 1bff731a9f7..00000000000 --- a/arch/h8300/include/asm/regs267x.h +++ /dev/null @@ -1,336 +0,0 @@ -/* internal Peripherals Register address define */ -/* CPU: H8/306x */ - -#if !defined(__REGS_H8S267x__) -#define __REGS_H8S267x__ - -#if defined(__KERNEL__) - -#define DASTCR 0xFEE01A -#define DADR0 0xFFFFA4 -#define DADR1 0xFFFFA5 -#define DACR01 0xFFFFA6 -#define DADR2 0xFFFFA8 -#define DADR3 0xFFFFA9 -#define DACR23 0xFFFFAA - -#define ADDRA 0xFFFF90 -#define ADDRAH 0xFFFF90 -#define ADDRAL 0xFFFF91 -#define ADDRB 0xFFFF92 -#define ADDRBH 0xFFFF92 -#define ADDRBL 0xFFFF93 -#define ADDRC 0xFFFF94 -#define ADDRCH 0xFFFF94 -#define ADDRCL 0xFFFF95 -#define ADDRD 0xFFFF96 -#define ADDRDH 0xFFFF96 -#define ADDRDL 0xFFFF97 -#define ADDRE 0xFFFF98 -#define ADDREH 0xFFFF98 -#define ADDREL 0xFFFF99 -#define ADDRF 0xFFFF9A -#define ADDRFH 0xFFFF9A -#define ADDRFL 0xFFFF9B -#define ADDRG 0xFFFF9C -#define ADDRGH 0xFFFF9C -#define ADDRGL 0xFFFF9D -#define ADDRH 0xFFFF9E -#define ADDRHH 0xFFFF9E -#define ADDRHL 0xFFFF9F - -#define ADCSR 0xFFFFA0 -#define ADCR 0xFFFFA1 - -#define ABWCR 0xFFFEC0 -#define ASTCR 0xFFFEC1 -#define WTCRAH 0xFFFEC2 -#define WTCRAL 0xFFFEC3 -#define WTCRBH 0xFFFEC4 -#define WTCRBL 0xFFFEC5 -#define RDNCR 0xFFFEC6 -#define CSACRH 0xFFFEC8 -#define CSACRL 0xFFFEC9 -#define BROMCRH 0xFFFECA -#define BROMCRL 0xFFFECB -#define BCR 0xFFFECC -#define DRAMCR 0xFFFED0 -#define DRACCR 0xFFFED2 -#define REFCR 0xFFFED4 -#define RTCNT 0xFFFED6 -#define RTCOR 0xFFFED7 - -#define MAR0AH 0xFFFEE0 -#define MAR0AL 0xFFFEE2 -#define IOAR0A 0xFFFEE4 -#define ETCR0A 0xFFFEE6 -#define MAR0BH 0xFFFEE8 -#define MAR0BL 0xFFFEEA -#define IOAR0B 0xFFFEEC -#define ETCR0B 0xFFFEEE -#define MAR1AH 0xFFFEF0 -#define MAR1AL 0xFFFEF2 -#define IOAR1A 0xFFFEF4 -#define ETCR1A 0xFFFEF6 -#define MAR1BH 0xFFFEF8 -#define MAR1BL 0xFFFEFA -#define IOAR1B 0xFFFEFC -#define ETCR1B 0xFFFEFE -#define DMAWER 0xFFFF20 -#define DMATCR 0xFFFF21 -#define DMACR0A 0xFFFF22 -#define DMACR0B 0xFFFF23 -#define DMACR1A 0xFFFF24 -#define DMACR1B 0xFFFF25 -#define DMABCRH 0xFFFF26 -#define DMABCRL 0xFFFF27 - -#define EDSAR0 0xFFFDC0 -#define EDDAR0 0xFFFDC4 -#define EDTCR0 0xFFFDC8 -#define EDMDR0 0xFFFDCC -#define EDMDR0H 0xFFFDCC -#define EDMDR0L 0xFFFDCD -#define EDACR0 0xFFFDCE -#define EDSAR1 0xFFFDD0 -#define EDDAR1 0xFFFDD4 -#define EDTCR1 0xFFFDD8 -#define EDMDR1 0xFFFDDC -#define EDMDR1H 0xFFFDDC -#define EDMDR1L 0xFFFDDD -#define EDACR1 0xFFFDDE -#define EDSAR2 0xFFFDE0 -#define EDDAR2 0xFFFDE4 -#define EDTCR2 0xFFFDE8 -#define EDMDR2 0xFFFDEC -#define EDMDR2H 0xFFFDEC -#define EDMDR2L 0xFFFDED -#define EDACR2 0xFFFDEE -#define EDSAR3 0xFFFDF0 -#define EDDAR3 0xFFFDF4 -#define EDTCR3 0xFFFDF8 -#define EDMDR3 0xFFFDFC -#define EDMDR3H 0xFFFDFC -#define EDMDR3L 0xFFFDFD -#define EDACR3 0xFFFDFE - -#define IPRA 0xFFFE00 -#define IPRB 0xFFFE02 -#define IPRC 0xFFFE04 -#define IPRD 0xFFFE06 -#define IPRE 0xFFFE08 -#define IPRF 0xFFFE0A -#define IPRG 0xFFFE0C -#define IPRH 0xFFFE0E -#define IPRI 0xFFFE10 -#define IPRJ 0xFFFE12 -#define IPRK 0xFFFE14 -#define ITSR 0xFFFE16 -#define SSIER 0xFFFE18 -#define ISCRH 0xFFFE1A -#define ISCRL 0xFFFE1C - -#define INTCR 0xFFFF31 -#define IER 0xFFFF32 -#define IERH 0xFFFF32 -#define IERL 0xFFFF33 -#define ISR 0xFFFF34 -#define ISRH 0xFFFF34 -#define ISRL 0xFFFF35 - -#define P1DDR 0xFFFE20 -#define P2DDR 0xFFFE21 -#define P3DDR 0xFFFE22 -#define P4DDR 0xFFFE23 -#define P5DDR 0xFFFE24 -#define P6DDR 0xFFFE25 -#define P7DDR 0xFFFE26 -#define P8DDR 0xFFFE27 -#define P9DDR 0xFFFE28 -#define PADDR 0xFFFE29 -#define PBDDR 0xFFFE2A -#define PCDDR 0xFFFE2B -#define PDDDR 0xFFFE2C -#define PEDDR 0xFFFE2D -#define PFDDR 0xFFFE2E -#define PGDDR 0xFFFE2F -#define PHDDR 0xFFFF74 - -#define PFCR0 0xFFFE32 -#define PFCR1 0xFFFE33 -#define PFCR2 0xFFFE34 - -#define PAPCR 0xFFFE36 -#define PBPCR 0xFFFE37 -#define PCPCR 0xFFFE38 -#define PDPCR 0xFFFE39 -#define PEPCR 0xFFFE3A - -#define P3ODR 0xFFFE3C -#define PAODR 0xFFFE3D - -#define P1DR 0xFFFF60 -#define P2DR 0xFFFF61 -#define P3DR 0xFFFF62 -#define P4DR 0xFFFF63 -#define P5DR 0xFFFF64 -#define P6DR 0xFFFF65 -#define P7DR 0xFFFF66 -#define P8DR 0xFFFF67 -#define P9DR 0xFFFF68 -#define PADR 0xFFFF69 -#define PBDR 0xFFFF6A -#define PCDR 0xFFFF6B -#define PDDR 0xFFFF6C -#define PEDR 0xFFFF6D -#define PFDR 0xFFFF6E -#define PGDR 0xFFFF6F -#define PHDR 0xFFFF72 - -#define PORT1 0xFFFF50 -#define PORT2 0xFFFF51 -#define PORT3 0xFFFF52 -#define PORT4 0xFFFF53 -#define PORT5 0xFFFF54 -#define PORT6 0xFFFF55 -#define PORT7 0xFFFF56 -#define PORT8 0xFFFF57 -#define PORT9 0xFFFF58 -#define PORTA 0xFFFF59 -#define PORTB 0xFFFF5A -#define PORTC 0xFFFF5B -#define PORTD 0xFFFF5C -#define PORTE 0xFFFF5D -#define PORTF 0xFFFF5E -#define PORTG 0xFFFF5F -#define PORTH 0xFFFF70 - -#define PCR 0xFFFF46 -#define PMR 0xFFFF47 -#define NDERH 0xFFFF48 -#define NDERL 0xFFFF49 -#define PODRH 0xFFFF4A -#define PODRL 0xFFFF4B -#define NDRH1 0xFFFF4C -#define NDRL1 0xFFFF4D -#define NDRH2 0xFFFF4E -#define NDRL2 0xFFFF4F - -#define SMR0 0xFFFF78 -#define BRR0 0xFFFF79 -#define SCR0 0xFFFF7A -#define TDR0 0xFFFF7B -#define SSR0 0xFFFF7C -#define RDR0 0xFFFF7D -#define SCMR0 0xFFFF7E -#define SMR1 0xFFFF80 -#define BRR1 0xFFFF81 -#define SCR1 0xFFFF82 -#define TDR1 0xFFFF83 -#define SSR1 0xFFFF84 -#define RDR1 0xFFFF85 -#define SCMR1 0xFFFF86 -#define SMR2 0xFFFF88 -#define BRR2 0xFFFF89 -#define SCR2 0xFFFF8A -#define TDR2 0xFFFF8B -#define SSR2 0xFFFF8C -#define RDR2 0xFFFF8D -#define SCMR2 0xFFFF8E - -#define IRCR0 0xFFFE1E -#define SEMR 0xFFFDA8 - -#define MDCR 0xFFFF3E -#define SYSCR 0xFFFF3D -#define MSTPCRH 0xFFFF40 -#define MSTPCRL 0xFFFF41 -#define FLMCR1 0xFFFFC8 -#define FLMCR2 0xFFFFC9 -#define EBR1 0xFFFFCA -#define EBR2 0xFFFFCB -#define CTGARC_RAMCR 0xFFFECE -#define SBYCR 0xFFFF3A -#define SCKCR 0xFFFF3B -#define PLLCR 0xFFFF45 - -#define TSTR 0xFFFFC0 -#define TSNC 0XFFFFC1 - -#define TCR0 0xFFFFD0 -#define TMDR0 0xFFFFD1 -#define TIORH0 0xFFFFD2 -#define TIORL0 0xFFFFD3 -#define TIER0 0xFFFFD4 -#define TSR0 0xFFFFD5 -#define TCNT0 0xFFFFD6 -#define GRA0 0xFFFFD8 -#define GRB0 0xFFFFDA -#define GRC0 0xFFFFDC -#define GRD0 0xFFFFDE -#define TCR1 0xFFFFE0 -#define TMDR1 0xFFFFE1 -#define TIORH1 0xFFFFE2 -#define TIORL1 0xFFFFE3 -#define TIER1 0xFFFFE4 -#define TSR1 0xFFFFE5 -#define TCNT1 0xFFFFE6 -#define GRA1 0xFFFFE8 -#define GRB1 0xFFFFEA -#define TCR2 0xFFFFF0 -#define TMDR2 0xFFFFF1 -#define TIORH2 0xFFFFF2 -#define TIORL2 0xFFFFF3 -#define TIER2 0xFFFFF4 -#define TSR2 0xFFFFF5 -#define TCNT2 0xFFFFF6 -#define GRA2 0xFFFFF8 -#define GRB2 0xFFFFFA -#define TCR3 0xFFFE80 -#define TMDR3 0xFFFE81 -#define TIORH3 0xFFFE82 -#define TIORL3 0xFFFE83 -#define TIER3 0xFFFE84 -#define TSR3 0xFFFE85 -#define TCNT3 0xFFFE86 -#define GRA3 0xFFFE88 -#define GRB3 0xFFFE8A -#define GRC3 0xFFFE8C -#define GRD3 0xFFFE8E -#define TCR4 0xFFFE90 -#define TMDR4 0xFFFE91 -#define TIORH4 0xFFFE92 -#define TIORL4 0xFFFE93 -#define TIER4 0xFFFE94 -#define TSR4 0xFFFE95 -#define TCNT4 0xFFFE96 -#define GRA4 0xFFFE98 -#define GRB4 0xFFFE9A -#define TCR5 0xFFFEA0 -#define TMDR5 0xFFFEA1 -#define TIORH5 0xFFFEA2 -#define TIORL5 0xFFFEA3 -#define TIER5 0xFFFEA4 -#define TSR5 0xFFFEA5 -#define TCNT5 0xFFFEA6 -#define GRA5 0xFFFEA8 -#define GRB5 0xFFFEAA - -#define _8TCR0 0xFFFFB0 -#define _8TCR1 0xFFFFB1 -#define _8TCSR0 0xFFFFB2 -#define _8TCSR1 0xFFFFB3 -#define _8TCORA0 0xFFFFB4 -#define _8TCORA1 0xFFFFB5 -#define _8TCORB0 0xFFFFB6 -#define _8TCORB1 0xFFFFB7 -#define _8TCNT0 0xFFFFB8 -#define _8TCNT1 0xFFFFB9 - -#define TCSR 0xFFFFBC -#define TCNT 0xFFFFBD -#define RSTCSRW 0xFFFFBE -#define RSTCSRR 0xFFFFBF - -#endif /* __KERNEL__ */ -#endif /* __REGS_H8S267x__ */ diff --git a/arch/h8300/include/asm/regs306x.h b/arch/h8300/include/asm/regs306x.h deleted file mode 100644 index 027dd633fa2..00000000000 --- a/arch/h8300/include/asm/regs306x.h +++ /dev/null @@ -1,212 +0,0 @@ -/* internal Peripherals Register address define */ -/* CPU: H8/306x */ - -#if !defined(__REGS_H8306x__) -#define __REGS_H8306x__ - -#if defined(__KERNEL__) - -#define DASTCR 0xFEE01A -#define DADR0 0xFEE09C -#define DADR1 0xFEE09D -#define DACR 0xFEE09E - -#define ADDRAH 0xFFFFE0 -#define ADDRAL 0xFFFFE1 -#define ADDRBH 0xFFFFE2 -#define ADDRBL 0xFFFFE3 -#define ADDRCH 0xFFFFE4 -#define ADDRCL 0xFFFFE5 -#define ADDRDH 0xFFFFE6 -#define ADDRDL 0xFFFFE7 -#define ADCSR 0xFFFFE8 -#define ADCR 0xFFFFE9 - -#define BRCR 0xFEE013 -#define ADRCR 0xFEE01E -#define CSCR 0xFEE01F -#define ABWCR 0xFEE020 -#define ASTCR 0xFEE021 -#define WCRH 0xFEE022 -#define WCRL 0xFEE023 -#define BCR 0xFEE024 -#define DRCRA 0xFEE026 -#define DRCRB 0xFEE027 -#define RTMCSR 0xFEE028 -#define RTCNT 0xFEE029 -#define RTCOR 0xFEE02A - -#define MAR0AR 0xFFFF20 -#define MAR0AE 0xFFFF21 -#define MAR0AH 0xFFFF22 -#define MAR0AL 0xFFFF23 -#define ETCR0AL 0xFFFF24 -#define ETCR0AH 0xFFFF25 -#define IOAR0A 0xFFFF26 -#define DTCR0A 0xFFFF27 -#define MAR0BR 0xFFFF28 -#define MAR0BE 0xFFFF29 -#define MAR0BH 0xFFFF2A -#define MAR0BL 0xFFFF2B -#define ETCR0BL 0xFFFF2C -#define ETCR0BH 0xFFFF2D -#define IOAR0B 0xFFFF2E -#define DTCR0B 0xFFFF2F -#define MAR1AR 0xFFFF30 -#define MAR1AE 0xFFFF31 -#define MAR1AH 0xFFFF32 -#define MAR1AL 0xFFFF33 -#define ETCR1AL 0xFFFF34 -#define ETCR1AH 0xFFFF35 -#define IOAR1A 0xFFFF36 -#define DTCR1A 0xFFFF37 -#define MAR1BR 0xFFFF38 -#define MAR1BE 0xFFFF39 -#define MAR1BH 0xFFFF3A -#define MAR1BL 0xFFFF3B -#define ETCR1BL 0xFFFF3C -#define ETCR1BH 0xFFFF3D -#define IOAR1B 0xFFFF3E -#define DTCR1B 0xFFFF3F - -#define ISCR 0xFEE014 -#define IER 0xFEE015 -#define ISR 0xFEE016 -#define IPRA 0xFEE018 -#define IPRB 0xFEE019 - -#define P1DDR 0xFEE000 -#define P2DDR 0xFEE001 -#define P3DDR 0xFEE002 -#define P4DDR 0xFEE003 -#define P5DDR 0xFEE004 -#define P6DDR 0xFEE005 -/*#define P7DDR 0xFEE006*/ -#define P8DDR 0xFEE007 -#define P9DDR 0xFEE008 -#define PADDR 0xFEE009 -#define PBDDR 0xFEE00A - -#define P1DR 0xFFFFD0 -#define P2DR 0xFFFFD1 -#define P3DR 0xFFFFD2 -#define P4DR 0xFFFFD3 -#define P5DR 0xFFFFD4 -#define P6DR 0xFFFFD5 -/*#define P7DR 0xFFFFD6*/ -#define P8DR 0xFFFFD7 -#define P9DR 0xFFFFD8 -#define PADR 0xFFFFD9 -#define PBDR 0xFFFFDA - -#define P2CR 0xFEE03C -#define P4CR 0xFEE03E -#define P5CR 0xFEE03F - -#define SMR0 0xFFFFB0 -#define BRR0 0xFFFFB1 -#define SCR0 0xFFFFB2 -#define TDR0 0xFFFFB3 -#define SSR0 0xFFFFB4 -#define RDR0 0xFFFFB5 -#define SCMR0 0xFFFFB6 -#define SMR1 0xFFFFB8 -#define BRR1 0xFFFFB9 -#define SCR1 0xFFFFBA -#define TDR1 0xFFFFBB -#define SSR1 0xFFFFBC -#define RDR1 0xFFFFBD -#define SCMR1 0xFFFFBE -#define SMR2 0xFFFFC0 -#define BRR2 0xFFFFC1 -#define SCR2 0xFFFFC2 -#define TDR2 0xFFFFC3 -#define SSR2 0xFFFFC4 -#define RDR2 0xFFFFC5 -#define SCMR2 0xFFFFC6 - -#define MDCR 0xFEE011 -#define SYSCR 0xFEE012 -#define DIVCR 0xFEE01B -#define MSTCRH 0xFEE01C -#define MSTCRL 0xFEE01D -#define FLMCR1 0xFEE030 -#define FLMCR2 0xFEE031 -#define EBR1 0xFEE032 -#define EBR2 0xFEE033 -#define RAMCR 0xFEE077 - -#define TSTR 0xFFFF60 -#define TSNC 0XFFFF61 -#define TMDR 0xFFFF62 -#define TOLR 0xFFFF63 -#define TISRA 0xFFFF64 -#define TISRB 0xFFFF65 -#define TISRC 0xFFFF66 -#define TCR0 0xFFFF68 -#define TIOR0 0xFFFF69 -#define TCNT0H 0xFFFF6A -#define TCNT0L 0xFFFF6B -#define GRA0H 0xFFFF6C -#define GRA0L 0xFFFF6D -#define GRB0H 0xFFFF6E -#define GRB0L 0xFFFF6F -#define TCR1 0xFFFF70 -#define TIOR1 0xFFFF71 -#define TCNT1H 0xFFFF72 -#define TCNT1L 0xFFFF73 -#define GRA1H 0xFFFF74 -#define GRA1L 0xFFFF75 -#define GRB1H 0xFFFF76 -#define GRB1L 0xFFFF77 -#define TCR3 0xFFFF78 -#define TIOR3 0xFFFF79 -#define TCNT3H 0xFFFF7A -#define TCNT3L 0xFFFF7B -#define GRA3H 0xFFFF7C -#define GRA3L 0xFFFF7D -#define GRB3H 0xFFFF7E -#define GRB3L 0xFFFF7F - -#define _8TCR0 0xFFFF80 -#define _8TCR1 0xFFFF81 -#define _8TCSR0 0xFFFF82 -#define _8TCSR1 0xFFFF83 -#define TCORA0 0xFFFF84 -#define TCORA1 0xFFFF85 -#define TCORB0 0xFFFF86 -#define TCORB1 0xFFFF87 -#define _8TCNT0 0xFFFF88 -#define _8TCNT1 0xFFFF89 - -#define _8TCR2 0xFFFF90 -#define _8TCR3 0xFFFF91 -#define _8TCSR2 0xFFFF92 -#define _8TCSR3 0xFFFF93 -#define TCORA2 0xFFFF94 -#define TCORA3 0xFFFF95 -#define TCORB2 0xFFFF96 -#define TCORB3 0xFFFF97 -#define _8TCNT2 0xFFFF98 -#define _8TCNT3 0xFFFF99 - -#define TCSR 0xFFFF8C -#define TCNT 0xFFFF8D -#define RSTCSR 0xFFFF8F - -#define TPMR 0xFFFFA0 -#define TPCR 0xFFFFA1 -#define NDERB 0xFFFFA2 -#define NDERA 0xFFFFA3 -#define NDRB1 0xFFFFA4 -#define NDRA1 0xFFFFA5 -#define NDRB2 0xFFFFA6 -#define NDRA2 0xFFFFA7 - -#define TCSR 0xFFFF8C -#define TCNT 0xFFFF8D -#define RSTCSRW 0xFFFF8E -#define RSTCSRR 0xFFFF8F - -#endif /* __KERNEL__ */ -#endif /* __REGS_H8306x__ */ diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h deleted file mode 100644 index 82130eda0e5..00000000000 --- a/arch/h8300/include/asm/scatterlist.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_SCATTERLIST_H -#define _H8300_SCATTERLIST_H - -#include <asm-generic/scatterlist.h> - -#endif /* !(_H8300_SCATTERLIST_H) */ diff --git a/arch/h8300/include/asm/sections.h b/arch/h8300/include/asm/sections.h deleted file mode 100644 index a81743e8b74..00000000000 --- a/arch/h8300/include/asm/sections.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_SECTIONS_H_ -#define _H8300_SECTIONS_H_ - -#include <asm-generic/sections.h> - -#endif diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h deleted file mode 100644 index b79a82d0f99..00000000000 --- a/arch/h8300/include/asm/segment.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef _H8300_SEGMENT_H -#define _H8300_SEGMENT_H - -/* define constants */ -#define USER_DATA (1) -#ifndef __USER_DS -#define __USER_DS (USER_DATA) -#endif -#define USER_PROGRAM (2) -#define SUPER_DATA (3) -#ifndef __KERNEL_DS -#define __KERNEL_DS (SUPER_DATA) -#endif -#define SUPER_PROGRAM (4) - -#ifndef __ASSEMBLY__ - -typedef struct { - unsigned long seg; -} mm_segment_t; - -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) -#define USER_DS MAKE_MM_SEG(__USER_DS) -#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS) - -/* - * Get/set the SFC/DFC registers for MOVES instructions - */ - -static inline mm_segment_t get_fs(void) -{ - return USER_DS; -} - -static inline mm_segment_t get_ds(void) -{ - /* return the supervisor data space code */ - return KERNEL_DS; -} - -static inline void set_fs(mm_segment_t val) -{ -} - -#define segment_eq(a,b) ((a).seg == (b).seg) - -#endif /* __ASSEMBLY__ */ - -#endif /* _H8300_SEGMENT_H */ diff --git a/arch/h8300/include/asm/sh_bios.h b/arch/h8300/include/asm/sh_bios.h deleted file mode 100644 index b6bb6e58295..00000000000 --- a/arch/h8300/include/asm/sh_bios.h +++ /dev/null @@ -1,29 +0,0 @@ -/* eCos HAL interface header */ - -#ifndef SH_BIOS_H -#define SH_BIOS_H - -#define HAL_IF_VECTOR_TABLE 0xfffe20 -#define CALL_IF_SET_CONSOLE_COMM 13 -#define QUERY_CURRENT -1 -#define MANGLER -3 - -/* Checking for GDB stub active */ -/* suggestion Jonathan Larmour */ -static int sh_bios_in_gdb_mode(void) -{ - static int gdb_active = -1; - if (gdb_active == -1) { - int (*set_console_comm)(int); - set_console_comm = ((void **)HAL_IF_VECTOR_TABLE)[CALL_IF_SET_CONSOLE_COMM]; - gdb_active = (set_console_comm(QUERY_CURRENT) == MANGLER); - } - return gdb_active; -} - -static void sh_bios_gdb_detach(void) -{ - -} - -#endif diff --git a/arch/h8300/include/asm/shm.h b/arch/h8300/include/asm/shm.h deleted file mode 100644 index ed6623c0545..00000000000 --- a/arch/h8300/include/asm/shm.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _H8300_SHM_H -#define _H8300_SHM_H - - -/* format of page table entries that correspond to shared memory pages - currently out in swap space (see also mm/swap.c): - bits 0-1 (PAGE_PRESENT) is = 0 - bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE - bits 31..9 are used like this: - bits 15..9 (SHM_ID) the id of the shared memory segment - bits 30..16 (SHM_IDX) the index of the page within the shared memory segment - (actually only bits 25..16 get used since SHMMAX is so low) - bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach -*/ -/* on the m68k both bits 0 and 1 must be zero */ -/* format on the sun3 is similar, but bits 30, 31 are set to zero and all - others are reduced by 2. --m */ - -#ifndef CONFIG_SUN3 -#define SHM_ID_SHIFT 9 -#else -#define SHM_ID_SHIFT 7 -#endif -#define _SHM_ID_BITS 7 -#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1) - -#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS) -#define _SHM_IDX_BITS 15 -#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1) - -#endif /* _H8300_SHM_H */ diff --git a/arch/h8300/include/asm/shmparam.h b/arch/h8300/include/asm/shmparam.h deleted file mode 100644 index d1863953ec6..00000000000 --- a/arch/h8300/include/asm/shmparam.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_SHMPARAM_H -#define _H8300_SHMPARAM_H - -#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ - -#endif /* _H8300_SHMPARAM_H */ diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h deleted file mode 100644 index 6341e36386f..00000000000 --- a/arch/h8300/include/asm/signal.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _H8300_SIGNAL_H -#define _H8300_SIGNAL_H - -#include <uapi/asm/signal.h> - -/* Most things should be clean enough to redefine this at will, if care - is taken to make libc match. */ - -#define _NSIG 64 -#define _NSIG_BPW 32 -#define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -typedef unsigned long old_sigset_t; /* at least 32 bits */ - -typedef struct { - unsigned long sig[_NSIG_WORDS]; -} sigset_t; - -#define __ARCH_HAS_SA_RESTORER - -#include <asm/sigcontext.h> -#undef __HAVE_ARCH_SIG_BITOPS - -#endif /* _H8300_SIGNAL_H */ diff --git a/arch/h8300/include/asm/smp.h b/arch/h8300/include/asm/smp.h deleted file mode 100644 index 9e9bd7e5892..00000000000 --- a/arch/h8300/include/asm/smp.h +++ /dev/null @@ -1 +0,0 @@ -/* nothing required here yet */ diff --git a/arch/h8300/include/asm/spinlock.h b/arch/h8300/include/asm/spinlock.h deleted file mode 100644 index d5407fa173e..00000000000 --- a/arch/h8300/include/asm/spinlock.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __H8300_SPINLOCK_H -#define __H8300_SPINLOCK_H - -#error "H8/300 doesn't do SMP yet" - -#endif diff --git a/arch/h8300/include/asm/string.h b/arch/h8300/include/asm/string.h deleted file mode 100644 index ca5034897d8..00000000000 --- a/arch/h8300/include/asm/string.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _H8300_STRING_H_ -#define _H8300_STRING_H_ - -#ifdef __KERNEL__ /* only set these up for kernel code */ - -#include <asm/setup.h> -#include <asm/page.h> - -#define __HAVE_ARCH_MEMSET -extern void * memset(void * s, int c, size_t count); - -#define __HAVE_ARCH_MEMCPY -extern void * memcpy(void *d, const void *s, size_t count); - -#else /* KERNEL */ - -/* - * let user libraries deal with these, - * IMHO the kernel has no place defining these functions for user apps - */ - -#define __HAVE_ARCH_STRCPY 1 -#define __HAVE_ARCH_STRNCPY 1 -#define __HAVE_ARCH_STRCAT 1 -#define __HAVE_ARCH_STRNCAT 1 -#define __HAVE_ARCH_STRCMP 1 -#define __HAVE_ARCH_STRNCMP 1 -#define __HAVE_ARCH_STRNICMP 1 -#define __HAVE_ARCH_STRCHR 1 -#define __HAVE_ARCH_STRRCHR 1 -#define __HAVE_ARCH_STRSTR 1 -#define __HAVE_ARCH_STRLEN 1 -#define __HAVE_ARCH_STRNLEN 1 -#define __HAVE_ARCH_MEMSET 1 -#define __HAVE_ARCH_MEMCPY 1 -#define __HAVE_ARCH_MEMMOVE 1 -#define __HAVE_ARCH_MEMSCAN 1 -#define __HAVE_ARCH_MEMCMP 1 -#define __HAVE_ARCH_MEMCHR 1 -#define __HAVE_ARCH_STRTOK 1 - -#endif /* KERNEL */ - -#endif /* _M68K_STRING_H_ */ diff --git a/arch/h8300/include/asm/switch_to.h b/arch/h8300/include/asm/switch_to.h deleted file mode 100644 index cdd8731ce48..00000000000 --- a/arch/h8300/include/asm/switch_to.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef _H8300_SWITCH_TO_H -#define _H8300_SWITCH_TO_H - -/* - * switch_to(n) should switch tasks to task ptr, first checking that - * ptr isn't the current task, in which case it does nothing. This - * also clears the TS-flag if the task we switched to has used the - * math co-processor latest. - */ -/* - * switch_to() saves the extra registers, that are not saved - * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and - * a0-a1. Some of these are used by schedule() and its predecessors - * and so we might get see unexpected behaviors when a task returns - * with unexpected register values. - * - * syscall stores these registers itself and none of them are used - * by syscall after the function in the syscall has been called. - * - * Beware that resume now expects *next to be in d1 and the offset of - * tss to be in a1. This saves a few instructions as we no longer have - * to push them onto the stack and read them back right after. - * - * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) - * - * Changed 96/09/19 by Andreas Schwab - * pass prev in a0, next in a1, offset of tss in d1, and whether - * the mm structures are shared in d2 (to avoid atc flushing). - * - * H8/300 Porting 2002/09/04 Yoshinori Sato - */ - -asmlinkage void resume(void); -#define switch_to(prev,next,last) { \ - void *_last; \ - __asm__ __volatile__( \ - "mov.l %1, er0\n\t" \ - "mov.l %2, er1\n\t" \ - "mov.l %3, er2\n\t" \ - "jsr @_resume\n\t" \ - "mov.l er2,%0\n\t" \ - : "=r" (_last) \ - : "r" (&(prev->thread)), \ - "r" (&(next->thread)), \ - "g" (prev) \ - : "cc", "er0", "er1", "er2", "er3"); \ - (last) = _last; \ -} - -#endif /* _H8300_SWITCH_TO_H */ diff --git a/arch/h8300/include/asm/target_time.h b/arch/h8300/include/asm/target_time.h deleted file mode 100644 index 9f2a9aa1fe6..00000000000 --- a/arch/h8300/include/asm/target_time.h +++ /dev/null @@ -1,4 +0,0 @@ -extern int platform_timer_setup(void (*timer_int)(int, void *, struct pt_regs *)); -extern void platform_timer_eoi(void); -extern void platform_gettod(unsigned int *year, unsigned int *mon, unsigned int *day, - unsigned int *hour, unsigned int *min, unsigned int *sec); diff --git a/arch/h8300/include/asm/termios.h b/arch/h8300/include/asm/termios.h deleted file mode 100644 index 93a63df5624..00000000000 --- a/arch/h8300/include/asm/termios.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef _H8300_TERMIOS_H -#define _H8300_TERMIOS_H - -#include <uapi/asm/termios.h> - -/* intr=^C quit=^| erase=del kill=^U - eof=^D vtime=\0 vmin=\1 sxtc=\0 - start=^Q stop=^S susp=^Z eol=\0 - reprint=^R discard=^U werase=^W lnext=^V - eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - unsigned short tmp; \ - get_user(tmp, &(termio)->c_iflag); \ - (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \ - get_user(tmp, &(termio)->c_oflag); \ - (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \ - get_user(tmp, &(termio)->c_cflag); \ - (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \ - get_user(tmp, &(termio)->c_lflag); \ - (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \ - get_user((termios)->c_line, &(termio)->c_line); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) - -#endif /* _H8300_TERMIOS_H */ diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h deleted file mode 100644 index ec2f7777c65..00000000000 --- a/arch/h8300/include/asm/thread_info.h +++ /dev/null @@ -1,103 +0,0 @@ -/* thread_info.h: h8300 low-level thread information - * adapted from the i386 and PPC versions by Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Copyright (C) 2002 David Howells (dhowells@redhat.com) - * - Incorporating suggestions made by Linus Torvalds and Dave Miller - */ - -#ifndef _ASM_THREAD_INFO_H -#define _ASM_THREAD_INFO_H - -#include <asm/page.h> - -#ifdef __KERNEL__ - -#ifndef __ASSEMBLY__ - -/* - * low level task data. - * If you change this, change the TI_* offsets below to match. - */ -struct thread_info { - struct task_struct *task; /* main task structure */ - struct exec_domain *exec_domain; /* execution domain */ - unsigned long flags; /* low level flags */ - int cpu; /* cpu we're on */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ - struct restart_block restart_block; -}; - -/* - * macros/functions for gaining access to the thread information structure - */ -#define INIT_THREAD_INFO(tsk) \ -{ \ - .task = &tsk, \ - .exec_domain = &default_exec_domain, \ - .flags = 0, \ - .cpu = 0, \ - .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ -} - -#define init_thread_info (init_thread_union.thread_info) -#define init_stack (init_thread_union.stack) - - -/* - * Size of kernel stack for each process. This must be a power of 2... - */ -#define THREAD_SIZE_ORDER 1 -#define THREAD_SIZE 8192 /* 2 pages */ - - -/* how to get the thread information struct from C */ -static inline struct thread_info *current_thread_info(void) -{ - struct thread_info *ti; - __asm__( - "mov.l sp, %0 \n\t" - "and.l %1, %0" - : "=&r"(ti) - : "i" (~(THREAD_SIZE-1)) - ); - return ti; -} - -#endif /* __ASSEMBLY__ */ - -/* - * Offsets in thread_info structure, used in assembly code - */ -#define TI_TASK 0 -#define TI_EXECDOMAIN 4 -#define TI_FLAGS 8 -#define TI_CPU 12 -#define TI_PRE_COUNT 16 - -#define PREEMPT_ACTIVE 0x4000000 - -/* - * thread information flag bit numbers - */ -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_SIGPENDING 1 /* signal pending */ -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_MEMDIE 4 /* is terminating due to OOM killer */ -#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ -#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */ - -/* as above, but as bit values */ -#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) -#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) -#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) -#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) - -#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_RESUME) - -#endif /* __KERNEL__ */ - -#endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/h8300/include/asm/timer.h b/arch/h8300/include/asm/timer.h deleted file mode 100644 index def80464d38..00000000000 --- a/arch/h8300/include/asm/timer.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __H8300_TIMER_H -#define __H8300_TIMER_H - -void h8300_timer_tick(void); -void h8300_timer_setup(void); -void h8300_gettod(unsigned int *year, unsigned int *mon, unsigned int *day, - unsigned int *hour, unsigned int *min, unsigned int *sec); - -#define TIMER_FREQ (CONFIG_CPU_CLOCK*10000) /* Timer input freq. */ - -#define calc_param(cnt, div, rate, limit) \ -do { \ - cnt = TIMER_FREQ / HZ; \ - for (div = 0; div < ARRAY_SIZE(divide_rate); div++) { \ - if (rate[div] == 0) \ - continue; \ - if ((cnt / rate[div]) > limit) \ - break; \ - } \ - if (div == ARRAY_SIZE(divide_rate)) \ - panic("Timer counter overflow"); \ - cnt /= divide_rate[div]; \ -} while(0) - -#endif diff --git a/arch/h8300/include/asm/timex.h b/arch/h8300/include/asm/timex.h deleted file mode 100644 index 23e67013439..00000000000 --- a/arch/h8300/include/asm/timex.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * linux/include/asm-h8300/timex.h - * - * H8/300 architecture timex specifications - */ -#ifndef _ASM_H8300_TIMEX_H -#define _ASM_H8300_TIMEX_H - -#define CLOCK_TICK_RATE (CONFIG_CPU_CLOCK*1000/8192) /* Timer input freq. */ - -typedef unsigned long cycles_t; -extern short h8300_timer_count; - -static inline cycles_t get_cycles(void) -{ - return 0; -} - -#endif diff --git a/arch/h8300/include/asm/tlb.h b/arch/h8300/include/asm/tlb.h deleted file mode 100644 index 7f0743051ad..00000000000 --- a/arch/h8300/include/asm/tlb.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __H8300_TLB_H__ -#define __H8300_TLB_H__ - -#define tlb_flush(tlb) do { } while(0) - -#include <asm-generic/tlb.h> - -#endif diff --git a/arch/h8300/include/asm/tlbflush.h b/arch/h8300/include/asm/tlbflush.h deleted file mode 100644 index 41c148a9208..00000000000 --- a/arch/h8300/include/asm/tlbflush.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef _H8300_TLBFLUSH_H -#define _H8300_TLBFLUSH_H - -/* - * Copyright (C) 2000 Lineo, David McCullough <davidm@uclinux.org> - * Copyright (C) 2000-2002, Greg Ungerer <gerg@snapgear.com> - */ - -#include <asm/setup.h> - -/* - * flush all user-space atc entries. - */ -static inline void __flush_tlb(void) -{ - BUG(); -} - -static inline void __flush_tlb_one(unsigned long addr) -{ - BUG(); -} - -#define flush_tlb() __flush_tlb() - -/* - * flush all atc entries (both kernel and user-space entries). - */ -static inline void flush_tlb_all(void) -{ - BUG(); -} - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - BUG(); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) -{ - BUG(); -} - -static inline void flush_tlb_range(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - BUG(); -} - -static inline void flush_tlb_kernel_page(unsigned long addr) -{ - BUG(); -} - -#endif /* _H8300_TLBFLUSH_H */ diff --git a/arch/h8300/include/asm/topology.h b/arch/h8300/include/asm/topology.h deleted file mode 100644 index fdc121924d4..00000000000 --- a/arch/h8300/include/asm/topology.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_H8300_TOPOLOGY_H -#define _ASM_H8300_TOPOLOGY_H - -#include <asm-generic/topology.h> - -#endif /* _ASM_H8300_TOPOLOGY_H */ diff --git a/arch/h8300/include/asm/traps.h b/arch/h8300/include/asm/traps.h deleted file mode 100644 index 41cf6be02f6..00000000000 --- a/arch/h8300/include/asm/traps.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * linux/include/asm-h8300/traps.h - * - * Copyright (C) 2003 Yoshinori Sato <ysato@users.sourceforge.jp> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive - * for more details. - */ - -#ifndef _H8300_TRAPS_H -#define _H8300_TRAPS_H - -extern void system_call(void); -extern void interrupt_entry(void); -extern void trace_break(void); - -#define JMP_OP 0x5a000000 -#define JSR_OP 0x5e000000 -#define VECTOR(address) ((JMP_OP)|((unsigned long)address)) -#define REDIRECT(address) ((JSR_OP)|((unsigned long)address)) - -#define TRACE_VEC 5 - -#define TRAP0_VEC 8 -#define TRAP1_VEC 9 -#define TRAP2_VEC 10 -#define TRAP3_VEC 11 - -#if defined(__H8300H__) -#define NR_TRAPS 12 -#endif -#if defined(__H8300S__) -#define NR_TRAPS 16 -#endif - -#endif /* _H8300_TRAPS_H */ diff --git a/arch/h8300/include/asm/types.h b/arch/h8300/include/asm/types.h deleted file mode 100644 index c012707f603..00000000000 --- a/arch/h8300/include/asm/types.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _H8300_TYPES_H -#define _H8300_TYPES_H - -#include <uapi/asm/types.h> - - -#define BITS_PER_LONG 32 - -#endif /* _H8300_TYPES_H */ diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h deleted file mode 100644 index 8725d1ad427..00000000000 --- a/arch/h8300/include/asm/uaccess.h +++ /dev/null @@ -1,163 +0,0 @@ -#ifndef __H8300_UACCESS_H -#define __H8300_UACCESS_H - -/* - * User space memory access functions - */ -#include <linux/sched.h> -#include <linux/mm.h> -#include <linux/string.h> - -#include <asm/segment.h> - -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - -/* We let the MMU do all checking */ -#define access_ok(type, addr, size) __access_ok((unsigned long)addr,size) -static inline int __access_ok(unsigned long addr, unsigned long size) -{ -#define RANGE_CHECK_OK(addr, size, lower, upper) \ - (((addr) >= (lower)) && (((addr) + (size)) < (upper))) - - extern unsigned long _ramend; - return(RANGE_CHECK_OK(addr, size, 0L, (unsigned long)&_ramend)); -} - -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -/* Returns 0 if exception not found and fixup otherwise. */ -extern unsigned long search_exception_table(unsigned long); - - -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - */ - -#define put_user(x, ptr) \ -({ \ - int __pu_err = 0; \ - typeof(*(ptr)) __pu_val = (x); \ - switch (sizeof (*(ptr))) { \ - case 1: \ - case 2: \ - case 4: \ - *(ptr) = (__pu_val); \ - break; \ - case 8: \ - memcpy(ptr, &__pu_val, sizeof (*(ptr))); \ - break; \ - default: \ - __pu_err = __put_user_bad(); \ - break; \ - } \ - __pu_err; \ -}) -#define __put_user(x, ptr) put_user(x, ptr) - -extern int __put_user_bad(void); - -/* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. - */ - -#define __ptr(x) ((unsigned long *)(x)) - -/* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. - */ - -#define get_user(x, ptr) \ -({ \ - int __gu_err = 0; \ - typeof(*(ptr)) __gu_val = *ptr; \ - switch (sizeof(*(ptr))) { \ - case 1: \ - case 2: \ - case 4: \ - case 8: \ - break; \ - default: \ - __gu_err = __get_user_bad(); \ - break; \ - } \ - (x) = __gu_val; \ - __gu_err; \ -}) -#define __get_user(x, ptr) get_user(x, ptr) - -extern int __get_user_bad(void); - -#define copy_from_user(to, from, n) (memcpy(to, from, n), 0) -#define copy_to_user(to, from, n) (memcpy(to, from, n), 0) - -#define __copy_from_user(to, from, n) copy_from_user(to, from, n) -#define __copy_to_user(to, from, n) copy_to_user(to, from, n) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - -#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; }) - -#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; }) - -/* - * Copy a null terminated string from userspace. - */ - -static inline long -strncpy_from_user(char *dst, const char *src, long count) -{ - char *tmp; - strncpy(dst, src, count); - for (tmp = dst; *tmp && count > 0; tmp++, count--) - ; - return(tmp - dst); /* DAVIDM should we count a NUL ? check getname */ -} - -/* - * Return the size of a string (including the ending 0) - * - * Return 0 on exception, a value greater than N if too long - */ -static inline long strnlen_user(const char *src, long n) -{ - return(strlen(src) + 1); /* DAVIDM make safer */ -} - -#define strlen_user(str) strnlen_user(str, 32767) - -/* - * Zero Userspace - */ - -static inline unsigned long -clear_user(void *to, unsigned long n) -{ - memset(to, 0, n); - return 0; -} - -#define __clear_user clear_user - -#endif /* _H8300_UACCESS_H */ diff --git a/arch/h8300/include/asm/ucontext.h b/arch/h8300/include/asm/ucontext.h deleted file mode 100644 index 0bcf8f85fab..00000000000 --- a/arch/h8300/include/asm/ucontext.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _H8300_UCONTEXT_H -#define _H8300_UCONTEXT_H - -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - stack_t uc_stack; - struct sigcontext uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; - -#endif diff --git a/arch/h8300/include/asm/unaligned.h b/arch/h8300/include/asm/unaligned.h deleted file mode 100644 index b8d06c70c2d..00000000000 --- a/arch/h8300/include/asm/unaligned.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _ASM_H8300_UNALIGNED_H -#define _ASM_H8300_UNALIGNED_H - -#include <linux/unaligned/be_memmove.h> -#include <linux/unaligned/le_byteshift.h> -#include <linux/unaligned/generic.h> - -#define get_unaligned __get_unaligned_be -#define put_unaligned __put_unaligned_be - -#endif /* _ASM_H8300_UNALIGNED_H */ diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h deleted file mode 100644 index ab671ecf519..00000000000 --- a/arch/h8300/include/asm/unistd.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef _ASM_H8300_UNISTD_H_ -#define _ASM_H8300_UNISTD_H_ - -#include <uapi/asm/unistd.h> - - -#define NR_syscalls 321 - -#define __ARCH_WANT_OLD_READDIR -#define __ARCH_WANT_OLD_STAT -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SYS_ALARM -#define __ARCH_WANT_SYS_GETHOSTNAME -#define __ARCH_WANT_SYS_IPC -#define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK -#define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME -#define __ARCH_WANT_SYS_WAITPID -#define __ARCH_WANT_SYS_SOCKETCALL -#define __ARCH_WANT_SYS_FADVISE64 -#define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK -#define __ARCH_WANT_SYS_NICE -#define __ARCH_WANT_SYS_OLD_GETRLIMIT -#define __ARCH_WANT_SYS_OLD_MMAP -#define __ARCH_WANT_SYS_OLD_SELECT -#define __ARCH_WANT_SYS_OLDUMOUNT -#define __ARCH_WANT_SYS_SIGPENDING -#define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_FORK -#define __ARCH_WANT_SYS_VFORK -#define __ARCH_WANT_SYS_CLONE - -#endif /* _ASM_H8300_UNISTD_H_ */ diff --git a/arch/h8300/include/asm/user.h b/arch/h8300/include/asm/user.h deleted file mode 100644 index 14a9e18950f..00000000000 --- a/arch/h8300/include/asm/user.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef _H8300_USER_H -#define _H8300_USER_H - -#include <asm/page.h> - -/* Core file format: The core file is written in such a way that gdb - can understand it and provide useful information to the user (under - linux we use the 'trad-core' bfd). There are quite a number of - obstacles to being able to view the contents of the floating point - registers, and until these are solved you will not be able to view the - contents of them. Actually, you can read in the core file and look at - the contents of the user struct to find out what the floating point - registers contain. - The actual file contents are as follows: - UPAGE: 1 page consisting of a user struct that tells gdb what is present - in the file. Directly after this is a copy of the task_struct, which - is currently not used by gdb, but it may come in useful at some point. - All of the registers are stored as part of the upage. The upage should - always be only one page. - DATA: The data area is stored. We use current->end_text to - current->brk to pick up all of the user variables, plus any memory - that may have been malloced. No attempt is made to determine if a page - is demand-zero or if a page is totally unused, we just cover the entire - range. All of the addresses are rounded in such a way that an integral - number of pages is written. - STACK: We need the stack information in order to get a meaningful - backtrace. We need to write the data from (esp) to - current->start_stack, so we round each of these off in order to be able - to write an integer number of pages. - The minimum core file size is 3 pages, or 12288 bytes. -*/ - -/* This is the old layout of "struct pt_regs" as of Linux 1.x, and - is still the layout used by user (the new pt_regs doesn't have - all registers). */ -struct user_regs_struct { - long er1,er2,er3,er4,er5,er6; - long er0; - long usp; - long orig_er0; - short ccr; - long pc; -}; - - -/* When the kernel dumps core, it starts by dumping the user struct - - this will be used by gdb to figure out where the data and stack segments - are within the file, and what virtual addresses to use. */ -struct user{ -/* We start with the registers, to mimic the way that "memory" is returned - from the ptrace(3,...) function. */ - struct user_regs_struct regs; /* Where the registers are actually stored */ -/* ptrace does not yet supply these. Someday.... */ -/* The rest of this junk is to help gdb figure out what goes where */ - unsigned long int u_tsize; /* Text segment size (pages). */ - unsigned long int u_dsize; /* Data segment size (pages). */ - unsigned long int u_ssize; /* Stack segment size (pages). */ - unsigned long start_code; /* Starting virtual address of text. */ - unsigned long start_stack; /* Starting virtual address of stack area. - This is actually the bottom of the stack, - the top of the stack is always found in the - esp register. */ - long int signal; /* Signal that caused the core dump. */ - int reserved; /* No longer used */ - unsigned long u_ar0; /* Used by gdb to help find the values for */ - /* the registers. */ - unsigned long magic; /* To uniquely identify a core file */ - char u_comm[32]; /* User command that was responsible */ -}; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - -#endif diff --git a/arch/h8300/include/asm/virtconvert.h b/arch/h8300/include/asm/virtconvert.h deleted file mode 100644 index 19cfd62b11c..00000000000 --- a/arch/h8300/include/asm/virtconvert.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __H8300_VIRT_CONVERT__ -#define __H8300_VIRT_CONVERT__ - -/* - * Macros used for converting between virtual and physical mappings. - */ - -#ifdef __KERNEL__ - -#include <asm/setup.h> -#include <asm/page.h> - -#define phys_to_virt(vaddr) ((void *) (vaddr)) -#define virt_to_phys(vaddr) ((unsigned long) (vaddr)) - -#define virt_to_bus virt_to_phys -#define bus_to_virt phys_to_virt - -#endif -#endif diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild deleted file mode 100644 index 040178cdb3e..00000000000 --- a/arch/h8300/include/uapi/asm/Kbuild +++ /dev/null @@ -1,34 +0,0 @@ -# UAPI Header export list -include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += byteorder.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += param.h -header-y += poll.h -header-y += posix_types.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += types.h -header-y += unistd.h diff --git a/arch/h8300/include/uapi/asm/auxvec.h b/arch/h8300/include/uapi/asm/auxvec.h deleted file mode 100644 index 1d36fe38b08..00000000000 --- a/arch/h8300/include/uapi/asm/auxvec.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef __ASMH8300_AUXVEC_H -#define __ASMH8300_AUXVEC_H - -#endif diff --git a/arch/h8300/include/uapi/asm/bitsperlong.h b/arch/h8300/include/uapi/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b..00000000000 --- a/arch/h8300/include/uapi/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/bitsperlong.h> diff --git a/arch/h8300/include/uapi/asm/byteorder.h b/arch/h8300/include/uapi/asm/byteorder.h deleted file mode 100644 index 13539da99ef..00000000000 --- a/arch/h8300/include/uapi/asm/byteorder.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_BYTEORDER_H -#define _H8300_BYTEORDER_H - -#include <linux/byteorder/big_endian.h> - -#endif /* _H8300_BYTEORDER_H */ diff --git a/arch/h8300/include/uapi/asm/errno.h b/arch/h8300/include/uapi/asm/errno.h deleted file mode 100644 index 0c2f5641fdc..00000000000 --- a/arch/h8300/include/uapi/asm/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_ERRNO_H -#define _H8300_ERRNO_H - -#include <asm-generic/errno.h> - -#endif /* _H8300_ERRNO_H */ diff --git a/arch/h8300/include/uapi/asm/fcntl.h b/arch/h8300/include/uapi/asm/fcntl.h deleted file mode 100644 index 1952cb2e3b0..00000000000 --- a/arch/h8300/include/uapi/asm/fcntl.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _H8300_FCNTL_H -#define _H8300_FCNTL_H - -#define O_DIRECTORY 040000 /* must be a directory */ -#define O_NOFOLLOW 0100000 /* don't follow links */ -#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ -#define O_LARGEFILE 0400000 - -#include <asm-generic/fcntl.h> - -#endif /* _H8300_FCNTL_H */ diff --git a/arch/h8300/include/uapi/asm/ioctl.h b/arch/h8300/include/uapi/asm/ioctl.h deleted file mode 100644 index b279fe06dfe..00000000000 --- a/arch/h8300/include/uapi/asm/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ioctl.h> diff --git a/arch/h8300/include/uapi/asm/ioctls.h b/arch/h8300/include/uapi/asm/ioctls.h deleted file mode 100644 index 30eaed2facd..00000000000 --- a/arch/h8300/include/uapi/asm/ioctls.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ARCH_H8300_IOCTLS_H__ -#define __ARCH_H8300_IOCTLS_H__ - -#define FIOQSIZE 0x545E - -#include <asm-generic/ioctls.h> - -#endif /* __ARCH_H8300_IOCTLS_H__ */ diff --git a/arch/h8300/include/uapi/asm/ipcbuf.h b/arch/h8300/include/uapi/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d..00000000000 --- a/arch/h8300/include/uapi/asm/ipcbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ipcbuf.h> diff --git a/arch/h8300/include/uapi/asm/kvm_para.h b/arch/h8300/include/uapi/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b95..00000000000 --- a/arch/h8300/include/uapi/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kvm_para.h> diff --git a/arch/h8300/include/uapi/asm/mman.h b/arch/h8300/include/uapi/asm/mman.h deleted file mode 100644 index 8eebf89f5ab..00000000000 --- a/arch/h8300/include/uapi/asm/mman.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/mman.h> diff --git a/arch/h8300/include/uapi/asm/msgbuf.h b/arch/h8300/include/uapi/asm/msgbuf.h deleted file mode 100644 index 6b148cd09aa..00000000000 --- a/arch/h8300/include/uapi/asm/msgbuf.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _H8300_MSGBUF_H -#define _H8300_MSGBUF_H - -/* - * The msqid64_ds structure for H8/300 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ - unsigned long __unused1; - __kernel_time_t msg_rtime; /* last msgrcv time */ - unsigned long __unused2; - __kernel_time_t msg_ctime; /* last change time */ - unsigned long __unused3; - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused4; - unsigned long __unused5; -}; - -#endif /* _H8300_MSGBUF_H */ diff --git a/arch/h8300/include/uapi/asm/param.h b/arch/h8300/include/uapi/asm/param.h deleted file mode 100644 index 3dd18ae15f0..00000000000 --- a/arch/h8300/include/uapi/asm/param.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _UAPI_H8300_PARAM_H -#define _UAPI_H8300_PARAM_H - -#ifndef __KERNEL__ -#define HZ 100 -#endif - -#define EXEC_PAGESIZE 4096 - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - -#endif /* _UAPI_H8300_PARAM_H */ diff --git a/arch/h8300/include/uapi/asm/poll.h b/arch/h8300/include/uapi/asm/poll.h deleted file mode 100644 index f61540c22d9..00000000000 --- a/arch/h8300/include/uapi/asm/poll.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __H8300_POLL_H -#define __H8300_POLL_H - -#define POLLWRNORM POLLOUT -#define POLLWRBAND 256 - -#include <asm-generic/poll.h> - -#undef POLLREMOVE - -#endif diff --git a/arch/h8300/include/uapi/asm/posix_types.h b/arch/h8300/include/uapi/asm/posix_types.h deleted file mode 100644 index 91e62ba4c7b..00000000000 --- a/arch/h8300/include/uapi/asm/posix_types.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef __ARCH_H8300_POSIX_TYPES_H -#define __ARCH_H8300_POSIX_TYPES_H - -/* - * This file is generally used by user-level software, so you need to - * be a little careful about namespace pollution etc. Also, we cannot - * assume GCC is being used. - */ - -typedef unsigned short __kernel_mode_t; -#define __kernel_mode_t __kernel_mode_t - -typedef unsigned short __kernel_ipc_pid_t; -#define __kernel_ipc_pid_t __kernel_ipc_pid_t - -typedef unsigned short __kernel_uid_t; -typedef unsigned short __kernel_gid_t; -#define __kernel_uid_t __kernel_uid_t - -typedef unsigned short __kernel_old_uid_t; -typedef unsigned short __kernel_old_gid_t; -#define __kernel_old_uid_t __kernel_old_uid_t - -#include <asm-generic/posix_types.h> - -#endif diff --git a/arch/h8300/include/uapi/asm/ptrace.h b/arch/h8300/include/uapi/asm/ptrace.h deleted file mode 100644 index ef39ec5977b..00000000000 --- a/arch/h8300/include/uapi/asm/ptrace.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _UAPI_H8300_PTRACE_H -#define _UAPI_H8300_PTRACE_H - -#ifndef __ASSEMBLY__ - -#define PT_ER1 0 -#define PT_ER2 1 -#define PT_ER3 2 -#define PT_ER4 3 -#define PT_ER5 4 -#define PT_ER6 5 -#define PT_ER0 6 -#define PT_ORIG_ER0 7 -#define PT_CCR 8 -#define PT_PC 9 -#define PT_USP 10 -#define PT_EXR 12 - -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct pt_regs { - long retpc; - long er4; - long er5; - long er6; - long er3; - long er2; - long er1; - long orig_er0; - unsigned short ccr; - long er0; - long vector; -#if defined(CONFIG_CPU_H8S) - unsigned short exr; -#endif - unsigned long pc; -} __attribute__((aligned(2),packed)); - -#define PTRACE_GETREGS 12 -#define PTRACE_SETREGS 13 - -#endif /* __ASSEMBLY__ */ -#endif /* _UAPI_H8300_PTRACE_H */ diff --git a/arch/h8300/include/uapi/asm/resource.h b/arch/h8300/include/uapi/asm/resource.h deleted file mode 100644 index 46c5f439160..00000000000 --- a/arch/h8300/include/uapi/asm/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_RESOURCE_H -#define _H8300_RESOURCE_H - -#include <asm-generic/resource.h> - -#endif /* _H8300_RESOURCE_H */ diff --git a/arch/h8300/include/uapi/asm/sembuf.h b/arch/h8300/include/uapi/asm/sembuf.h deleted file mode 100644 index e04a3ec0cb9..00000000000 --- a/arch/h8300/include/uapi/asm/sembuf.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _H8300_SEMBUF_H -#define _H8300_SEMBUF_H - -/* - * The semid64_ds structure for m68k architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ - unsigned long __unused1; - __kernel_time_t sem_ctime; /* last change time */ - unsigned long __unused2; - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _H8300_SEMBUF_H */ diff --git a/arch/h8300/include/uapi/asm/setup.h b/arch/h8300/include/uapi/asm/setup.h deleted file mode 100644 index e2c600e9673..00000000000 --- a/arch/h8300/include/uapi/asm/setup.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __H8300_SETUP_H -#define __H8300_SETUP_H - -#define COMMAND_LINE_SIZE 512 - -#endif diff --git a/arch/h8300/include/uapi/asm/shmbuf.h b/arch/h8300/include/uapi/asm/shmbuf.h deleted file mode 100644 index 64e77993a7a..00000000000 --- a/arch/h8300/include/uapi/asm/shmbuf.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _H8300_SHMBUF_H -#define _H8300_SHMBUF_H - -/* - * The shmid64_ds structure for m68k architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - unsigned long __unused1; - __kernel_time_t shm_dtime; /* last detach time */ - unsigned long __unused2; - __kernel_time_t shm_ctime; /* last change time */ - unsigned long __unused3; - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused4; - unsigned long __unused5; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _H8300_SHMBUF_H */ diff --git a/arch/h8300/include/uapi/asm/sigcontext.h b/arch/h8300/include/uapi/asm/sigcontext.h deleted file mode 100644 index e4b81505f8f..00000000000 --- a/arch/h8300/include/uapi/asm/sigcontext.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _ASM_H8300_SIGCONTEXT_H -#define _ASM_H8300_SIGCONTEXT_H - -struct sigcontext { - unsigned long sc_mask; /* old sigmask */ - unsigned long sc_usp; /* old user stack pointer */ - unsigned long sc_er0; - unsigned long sc_er1; - unsigned long sc_er2; - unsigned long sc_er3; - unsigned long sc_er4; - unsigned long sc_er5; - unsigned long sc_er6; - unsigned short sc_ccr; - unsigned long sc_pc; -}; - -#endif diff --git a/arch/h8300/include/uapi/asm/siginfo.h b/arch/h8300/include/uapi/asm/siginfo.h deleted file mode 100644 index bc8fbea931a..00000000000 --- a/arch/h8300/include/uapi/asm/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_SIGINFO_H -#define _H8300_SIGINFO_H - -#include <asm-generic/siginfo.h> - -#endif diff --git a/arch/h8300/include/uapi/asm/signal.h b/arch/h8300/include/uapi/asm/signal.h deleted file mode 100644 index af3a6c37fee..00000000000 --- a/arch/h8300/include/uapi/asm/signal.h +++ /dev/null @@ -1,115 +0,0 @@ -#ifndef _UAPI_H8300_SIGNAL_H -#define _UAPI_H8300_SIGNAL_H - -#include <linux/types.h> - -/* Avoid too many header ordering problems. */ -struct siginfo; - -#ifndef __KERNEL__ -/* Here we must cater to libcs that poke about in kernel headers. */ - -#define NSIG 32 -typedef unsigned long sigset_t; - -#endif /* __KERNEL__ */ - -#define SIGHUP 1 -#define SIGINT 2 -#define SIGQUIT 3 -#define SIGILL 4 -#define SIGTRAP 5 -#define SIGABRT 6 -#define SIGIOT 6 -#define SIGBUS 7 -#define SIGFPE 8 -#define SIGKILL 9 -#define SIGUSR1 10 -#define SIGSEGV 11 -#define SIGUSR2 12 -#define SIGPIPE 13 -#define SIGALRM 14 -#define SIGTERM 15 -#define SIGSTKFLT 16 -#define SIGCHLD 17 -#define SIGCONT 18 -#define SIGSTOP 19 -#define SIGTSTP 20 -#define SIGTTIN 21 -#define SIGTTOU 22 -#define SIGURG 23 -#define SIGXCPU 24 -#define SIGXFSZ 25 -#define SIGVTALRM 26 -#define SIGPROF 27 -#define SIGWINCH 28 -#define SIGIO 29 -#define SIGPOLL SIGIO -/* -#define SIGLOST 29 -*/ -#define SIGPWR 30 -#define SIGSYS 31 -#define SIGUNUSED 31 - -/* These should not be considered constants from userland. */ -#define SIGRTMIN 32 -#define SIGRTMAX _NSIG - -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001 -#define SA_NOCLDWAIT 0x00000002 /* not supported yet */ -#define SA_SIGINFO 0x00000004 -#define SA_ONSTACK 0x08000000 -#define SA_RESTART 0x10000000 -#define SA_NODEFER 0x40000000 -#define SA_RESETHAND 0x80000000 - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - -#define SA_RESTORER 0x04000000 - -#define MINSIGSTKSZ 2048 -#define SIGSTKSZ 8192 - -#include <asm-generic/signal-defs.h> - -#ifndef __KERNEL__ -/* Here we must cater to libcs that poke about in kernel headers. */ - -struct sigaction { - union { - __sighandler_t _sa_handler; - void (*_sa_sigaction)(int, struct siginfo *, void *); - } _u; - sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -#define sa_handler _u._sa_handler -#define sa_sigaction _u._sa_sigaction - -#endif /* __KERNEL__ */ - -typedef struct sigaltstack { - void *ss_sp; - int ss_flags; - size_t ss_size; -} stack_t; - - -#endif /* _UAPI_H8300_SIGNAL_H */ diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h deleted file mode 100644 index 9490758c5e2..00000000000 --- a/arch/h8300/include/uapi/asm/socket.h +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef _ASM_SOCKET_H -#define _ASM_SOCKET_H - -#include <asm/sockios.h> - -/* For setsockoptions(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -#define SO_REUSEPORT 15 -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 -#define SO_GET_FILTER SO_ATTACH_FILTER - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 - -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - -#define SO_PROTOCOL 38 -#define SO_DOMAIN 39 - -#define SO_RXQ_OVFL 40 - -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS -#define SO_PEEK_OFF 42 - -/* Instruct lower device to use last 4-bytes of skb data as FCS */ -#define SO_NOFCS 43 - -#define SO_LOCK_FILTER 44 - -#define SO_SELECT_ERR_QUEUE 45 - -#define SO_BUSY_POLL 46 - -#endif /* _ASM_SOCKET_H */ diff --git a/arch/h8300/include/uapi/asm/sockios.h b/arch/h8300/include/uapi/asm/sockios.h deleted file mode 100644 index e9c7ec810c2..00000000000 --- a/arch/h8300/include/uapi/asm/sockios.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ARCH_H8300_SOCKIOS__ -#define __ARCH_H8300_SOCKIOS__ - -/* Socket-level I/O control calls. */ -#define FIOSETOWN 0x8901 -#define SIOCSPGRP 0x8902 -#define FIOGETOWN 0x8903 -#define SIOCGPGRP 0x8904 -#define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ -#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ - -#endif /* __ARCH_H8300_SOCKIOS__ */ diff --git a/arch/h8300/include/uapi/asm/stat.h b/arch/h8300/include/uapi/asm/stat.h deleted file mode 100644 index 62c3cc24dfe..00000000000 --- a/arch/h8300/include/uapi/asm/stat.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef _H8300_STAT_H -#define _H8300_STAT_H - -struct __old_kernel_stat { - unsigned short st_dev; - unsigned short st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned long st_size; - unsigned long st_atime; - unsigned long st_mtime; - unsigned long st_ctime; -}; - -struct stat { - unsigned short st_dev; - unsigned short __pad1; - unsigned long st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned short __pad2; - unsigned long st_size; - unsigned long st_blksize; - unsigned long st_blocks; - unsigned long st_atime; - unsigned long __unused1; - unsigned long st_mtime; - unsigned long __unused2; - unsigned long st_ctime; - unsigned long __unused3; - unsigned long __unused4; - unsigned long __unused5; -}; - -/* This matches struct stat64 in glibc2.1, hence the absolutely - * insane amounts of padding around dev_t's. - */ -struct stat64 { - unsigned long long st_dev; - unsigned char __pad1[2]; - -#define STAT64_HAS_BROKEN_ST_INO 1 - unsigned long __st_ino; - - unsigned int st_mode; - unsigned int st_nlink; - - unsigned long st_uid; - unsigned long st_gid; - - unsigned long long st_rdev; - unsigned char __pad3[2]; - - long long st_size; - unsigned long st_blksize; - - unsigned long __pad4; /* future possible st_blocks high bits */ - unsigned long st_blocks; /* Number 512-byte blocks allocated. */ - - unsigned long st_atime; - unsigned long st_atime_nsec; - - unsigned long st_mtime; - unsigned long st_mtime_nsec; - - unsigned long st_ctime; - unsigned long st_ctime_nsec; - - unsigned long long st_ino; -}; - -#endif /* _H8300_STAT_H */ diff --git a/arch/h8300/include/uapi/asm/statfs.h b/arch/h8300/include/uapi/asm/statfs.h deleted file mode 100644 index b96efa712aa..00000000000 --- a/arch/h8300/include/uapi/asm/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _H8300_STATFS_H -#define _H8300_STATFS_H - -#include <asm-generic/statfs.h> - -#endif /* _H8300_STATFS_H */ diff --git a/arch/h8300/include/uapi/asm/swab.h b/arch/h8300/include/uapi/asm/swab.h deleted file mode 100644 index 39abbf52807..00000000000 --- a/arch/h8300/include/uapi/asm/swab.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _H8300_SWAB_H -#define _H8300_SWAB_H - -#include <linux/types.h> - -#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) -# define __SWAB_64_THRU_32__ -#endif - -#endif /* _H8300_SWAB_H */ diff --git a/arch/h8300/include/uapi/asm/termbits.h b/arch/h8300/include/uapi/asm/termbits.h deleted file mode 100644 index 3287a6244d7..00000000000 --- a/arch/h8300/include/uapi/asm/termbits.h +++ /dev/null @@ -1,201 +0,0 @@ -#ifndef __ARCH_H8300_TERMBITS_H__ -#define __ARCH_H8300_TERMBITS_H__ - -#include <linux/posix_types.h> - -typedef unsigned char cc_t; -typedef unsigned int speed_t; -typedef unsigned int tcflag_t; - -#define NCCS 19 -struct termios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ -}; - -struct termios2 { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -struct ktermios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -/* c_cc characters */ -#define VINTR 0 -#define VQUIT 1 -#define VERASE 2 -#define VKILL 3 -#define VEOF 4 -#define VTIME 5 -#define VMIN 6 -#define VSWTC 7 -#define VSTART 8 -#define VSTOP 9 -#define VSUSP 10 -#define VEOL 11 -#define VREPRINT 12 -#define VDISCARD 13 -#define VWERASE 14 -#define VLNEXT 15 -#define VEOL2 16 - - -/* c_iflag bits */ -#define IGNBRK 0000001 -#define BRKINT 0000002 -#define IGNPAR 0000004 -#define PARMRK 0000010 -#define INPCK 0000020 -#define ISTRIP 0000040 -#define INLCR 0000100 -#define IGNCR 0000200 -#define ICRNL 0000400 -#define IUCLC 0001000 -#define IXON 0002000 -#define IXANY 0004000 -#define IXOFF 0010000 -#define IMAXBEL 0020000 -#define IUTF8 0040000 - -/* c_oflag bits */ -#define OPOST 0000001 -#define OLCUC 0000002 -#define ONLCR 0000004 -#define OCRNL 0000010 -#define ONOCR 0000020 -#define ONLRET 0000040 -#define OFILL 0000100 -#define OFDEL 0000200 -#define NLDLY 0000400 -#define NL0 0000000 -#define NL1 0000400 -#define CRDLY 0003000 -#define CR0 0000000 -#define CR1 0001000 -#define CR2 0002000 -#define CR3 0003000 -#define TABDLY 0014000 -#define TAB0 0000000 -#define TAB1 0004000 -#define TAB2 0010000 -#define TAB3 0014000 -#define XTABS 0014000 -#define BSDLY 0020000 -#define BS0 0000000 -#define BS1 0020000 -#define VTDLY 0040000 -#define VT0 0000000 -#define VT1 0040000 -#define FFDLY 0100000 -#define FF0 0000000 -#define FF1 0100000 - -/* c_cflag bit meaning */ -#define CBAUD 0010017 -#define B0 0000000 /* hang up */ -#define B50 0000001 -#define B75 0000002 -#define B110 0000003 -#define B134 0000004 -#define B150 0000005 -#define B200 0000006 -#define B300 0000007 -#define B600 0000010 -#define B1200 0000011 -#define B1800 0000012 -#define B2400 0000013 -#define B4800 0000014 -#define B9600 0000015 -#define B19200 0000016 -#define B38400 0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CSIZE 0000060 -#define CS5 0000000 -#define CS6 0000020 -#define CS7 0000040 -#define CS8 0000060 -#define CSTOPB 0000100 -#define CREAD 0000200 -#define PARENB 0000400 -#define PARODD 0001000 -#define HUPCL 0002000 -#define CLOCAL 0004000 -#define CBAUDEX 0010000 -#define BOTHER 0010000 -#define B57600 0010001 -#define B115200 0010002 -#define B230400 0010003 -#define B460800 0010004 -#define B500000 0010005 -#define B576000 0010006 -#define B921600 0010007 -#define B1000000 0010010 -#define B1152000 0010011 -#define B1500000 0010012 -#define B2000000 0010013 -#define B2500000 0010014 -#define B3000000 0010015 -#define B3500000 0010016 -#define B4000000 0010017 -#define CIBAUD 002003600000 /* input baud rate */ -#define CMSPAR 010000000000 /* mark or space (stick) parity */ -#define CRTSCTS 020000000000 /* flow control */ - -#define IBSHIFT 16 /* shift from CBAUD to CIBAUD */ - -/* c_lflag bits */ -#define ISIG 0000001 -#define ICANON 0000002 -#define XCASE 0000004 -#define ECHO 0000010 -#define ECHOE 0000020 -#define ECHOK 0000040 -#define ECHONL 0000100 -#define NOFLSH 0000200 -#define TOSTOP 0000400 -#define ECHOCTL 0001000 -#define ECHOPRT 0002000 -#define ECHOKE 0004000 -#define FLUSHO 0010000 -#define PENDIN 0040000 -#define IEXTEN 0100000 -#define EXTPROC 0200000 - - -/* tcflow() and TCXONC use these */ -#define TCOOFF 0 -#define TCOON 1 -#define TCIOFF 2 -#define TCION 3 - -/* tcflush() and TCFLSH use these */ -#define TCIFLUSH 0 -#define TCOFLUSH 1 -#define TCIOFLUSH 2 - -/* tcsetattr uses these */ -#define TCSANOW 0 -#define TCSADRAIN 1 -#define TCSAFLUSH 2 - -#endif /* __ARCH_H8300_TERMBITS_H__ */ diff --git a/arch/h8300/include/uapi/asm/termios.h b/arch/h8300/include/uapi/asm/termios.h deleted file mode 100644 index 5a67d7e3884..00000000000 --- a/arch/h8300/include/uapi/asm/termios.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _UAPI_H8300_TERMIOS_H -#define _UAPI_H8300_TERMIOS_H - -#include <asm/termbits.h> -#include <asm/ioctls.h> - -struct winsize { - unsigned short ws_row; - unsigned short ws_col; - unsigned short ws_xpixel; - unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { - unsigned short c_iflag; /* input mode flags */ - unsigned short c_oflag; /* output mode flags */ - unsigned short c_cflag; /* control mode flags */ - unsigned short c_lflag; /* local mode flags */ - unsigned char c_line; /* line discipline */ - unsigned char c_cc[NCC]; /* control characters */ -}; - - -/* modem lines */ -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG -#define TIOCM_OUT1 0x2000 -#define TIOCM_OUT2 0x4000 -#define TIOCM_LOOP 0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - - -#endif /* _UAPI_H8300_TERMIOS_H */ diff --git a/arch/h8300/include/uapi/asm/types.h b/arch/h8300/include/uapi/asm/types.h deleted file mode 100644 index 9ec9d4c5ac4..00000000000 --- a/arch/h8300/include/uapi/asm/types.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/int-ll64.h> diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h deleted file mode 100644 index 8cb5d429f84..00000000000 --- a/arch/h8300/include/uapi/asm/unistd.h +++ /dev/null @@ -1,330 +0,0 @@ -#ifndef _UAPI_ASM_H8300_UNISTD_H_ -#define _UAPI_ASM_H8300_UNISTD_H_ - -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_lchown 16 -#define __NR_break 17 -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 -#define __NR_stty 31 -#define __NR_gtty 32 -#define __NR_access 33 -#define __NR_nice 34 -#define __NR_ftime 35 -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 -#define __NR_prof 44 -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 -#define __NR_lock 53 -#define __NR_ioctl 54 -#define __NR_fcntl 55 -#define __NR_mpx 56 -#define __NR_setpgid 57 -#define __NR_ulimit 58 -#define __NR_oldolduname 59 -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 -#define __NR_select 82 -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 -#define __NR_profil 98 -#define __NR_statfs 99 -#define __NR_fstatfs 100 -#define __NR_ioperm 101 -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -#define __NR_olduname 109 -#define __NR_iopl 110 -#define __NR_vhangup 111 -#define __NR_idle 112 -#define __NR_vm86old 113 -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_modify_ldt 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 -#define __NR_create_module 127 -#define __NR_init_module 128 -#define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 -#define __NR_vm86 166 -#define __NR_query_module 167 -#define __NR_poll 168 -#define __NR_nfsservctl 169 -#define __NR_setresgid 170 -#define __NR_getresgid 171 -#define __NR_prctl 172 -#define __NR_rt_sigreturn 173 -#define __NR_rt_sigaction 174 -#define __NR_rt_sigprocmask 175 -#define __NR_rt_sigpending 176 -#define __NR_rt_sigtimedwait 177 -#define __NR_rt_sigqueueinfo 178 -#define __NR_rt_sigsuspend 179 -#define __NR_pread64 180 -#define __NR_pwrite64 181 -#define __NR_chown 182 -#define __NR_getcwd 183 -#define __NR_capget 184 -#define __NR_capset 185 -#define __NR_sigaltstack 186 -#define __NR_sendfile 187 -#define __NR_getpmsg 188 /* some people actually want streams */ -#define __NR_putpmsg 189 /* some people actually want streams */ -#define __NR_vfork 190 -#define __NR_ugetrlimit 191 -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#define __NR_lchown32 198 -#define __NR_getuid32 199 -#define __NR_getgid32 200 -#define __NR_geteuid32 201 -#define __NR_getegid32 202 -#define __NR_setreuid32 203 -#define __NR_setregid32 204 -#define __NR_getgroups32 205 -#define __NR_setgroups32 206 -#define __NR_fchown32 207 -#define __NR_setresuid32 208 -#define __NR_getresuid32 209 -#define __NR_setresgid32 210 -#define __NR_getresgid32 211 -#define __NR_chown32 212 -#define __NR_setuid32 213 -#define __NR_setgid32 214 -#define __NR_setfsuid32 215 -#define __NR_setfsgid32 216 -#define __NR_pivot_root 217 -#define __NR_mincore 218 -#define __NR_madvise 219 -#define __NR_madvise1 219 -#define __NR_getdents64 220 -#define __NR_fcntl64 221 -/* 223 is unused */ -#define __NR_gettid 224 -#define __NR_readahead 225 -#define __NR_setxattr 226 -#define __NR_lsetxattr 227 -#define __NR_fsetxattr 228 -#define __NR_getxattr 229 -#define __NR_lgetxattr 230 -#define __NR_fgetxattr 231 -#define __NR_listxattr 232 -#define __NR_llistxattr 233 -#define __NR_flistxattr 234 -#define __NR_removexattr 235 -#define __NR_lremovexattr 236 -#define __NR_fremovexattr 237 -#define __NR_tkill 238 -#define __NR_sendfile64 239 -#define __NR_futex 240 -#define __NR_sched_setaffinity 241 -#define __NR_sched_getaffinity 242 -#define __NR_set_thread_area 243 -#define __NR_get_thread_area 244 -#define __NR_io_setup 245 -#define __NR_io_destroy 246 -#define __NR_io_getevents 247 -#define __NR_io_submit 248 -#define __NR_io_cancel 249 -#define __NR_fadvise64 250 -/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ -#define __NR_exit_group 252 -#define __NR_lookup_dcookie 253 -#define __NR_epoll_create 254 -#define __NR_epoll_ctl 255 -#define __NR_epoll_wait 256 -#define __NR_remap_file_pages 257 -#define __NR_set_tid_address 258 -#define __NR_timer_create 259 -#define __NR_timer_settime (__NR_timer_create+1) -#define __NR_timer_gettime (__NR_timer_create+2) -#define __NR_timer_getoverrun (__NR_timer_create+3) -#define __NR_timer_delete (__NR_timer_create+4) -#define __NR_clock_settime (__NR_timer_create+5) -#define __NR_clock_gettime (__NR_timer_create+6) -#define __NR_clock_getres (__NR_timer_create+7) -#define __NR_clock_nanosleep (__NR_timer_create+8) -#define __NR_statfs64 268 -#define __NR_fstatfs64 269 -#define __NR_tgkill 270 -#define __NR_utimes 271 -#define __NR_fadvise64_64 272 -#define __NR_vserver 273 -#define __NR_mbind 274 -#define __NR_get_mempolicy 275 -#define __NR_set_mempolicy 276 -#define __NR_mq_open 277 -#define __NR_mq_unlink (__NR_mq_open+1) -#define __NR_mq_timedsend (__NR_mq_open+2) -#define __NR_mq_timedreceive (__NR_mq_open+3) -#define __NR_mq_notify (__NR_mq_open+4) -#define __NR_mq_getsetattr (__NR_mq_open+5) -#define __NR_kexec_load 283 -#define __NR_waitid 284 -/* #define __NR_sys_setaltroot 285 */ -#define __NR_add_key 286 -#define __NR_request_key 287 -#define __NR_keyctl 288 -#define __NR_ioprio_set 289 -#define __NR_ioprio_get 290 -#define __NR_inotify_init 291 -#define __NR_inotify_add_watch 292 -#define __NR_inotify_rm_watch 293 -#define __NR_migrate_pages 294 -#define __NR_openat 295 -#define __NR_mkdirat 296 -#define __NR_mknodat 297 -#define __NR_fchownat 298 -#define __NR_futimesat 299 -#define __NR_fstatat64 300 -#define __NR_unlinkat 301 -#define __NR_renameat 302 -#define __NR_linkat 303 -#define __NR_symlinkat 304 -#define __NR_readlinkat 305 -#define __NR_fchmodat 306 -#define __NR_faccessat 307 -#define __NR_pselect6 308 -#define __NR_ppoll 309 -#define __NR_unshare 310 -#define __NR_set_robust_list 311 -#define __NR_get_robust_list 312 -#define __NR_splice 313 -#define __NR_sync_file_range 314 -#define __NR_tee 315 -#define __NR_vmsplice 316 -#define __NR_move_pages 317 -#define __NR_getcpu 318 -#define __NR_epoll_pwait 319 -#define __NR_setns 320 - -#endif /* _UAPI_ASM_H8300_UNISTD_H_ */ diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile deleted file mode 100644 index 1cc57f872d3..00000000000 --- a/arch/h8300/kernel/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -# -# Makefile for the linux kernel. -# - -extra-y := vmlinux.lds - -obj-y := process.o traps.o ptrace.o irq.o \ - sys_h8300.o time.o signal.o \ - setup.o gpio.o syscalls.o \ - entry.o timer/ - -obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c deleted file mode 100644 index fd961e0bd74..00000000000 --- a/arch/h8300/kernel/asm-offsets.c +++ /dev/null @@ -1,60 +0,0 @@ -/* - * This program is used to generate definitions needed by - * assembly language modules. - * - * We use the technique used in the OSF Mach kernel code: - * generate asm statements containing #defines, - * compile this file to assembler, and then extract the - * #defines from the assembly-language output. - */ - -#include <linux/stddef.h> -#include <linux/sched.h> -#include <linux/kernel_stat.h> -#include <linux/ptrace.h> -#include <linux/hardirq.h> -#include <linux/kbuild.h> -#include <asm/bootinfo.h> -#include <asm/irq.h> -#include <asm/ptrace.h> - -int main(void) -{ - /* offsets into the task struct */ - DEFINE(TASK_STATE, offsetof(struct task_struct, state)); - DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); - DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); - DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); - DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); - DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); - DEFINE(TASK_MM, offsetof(struct task_struct, mm)); - DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); - - /* offsets into the irq_cpustat_t struct */ - DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending)); - - /* offsets into the thread struct */ - DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); - DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); - DEFINE(THREAD_CCR, offsetof(struct thread_struct, ccr)); - - /* offsets into the pt_regs struct */ - DEFINE(LER0, offsetof(struct pt_regs, er0) - sizeof(long)); - DEFINE(LER1, offsetof(struct pt_regs, er1) - sizeof(long)); - DEFINE(LER2, offsetof(struct pt_regs, er2) - sizeof(long)); - DEFINE(LER3, offsetof(struct pt_regs, er3) - sizeof(long)); - DEFINE(LER4, offsetof(struct pt_regs, er4) - sizeof(long)); - DEFINE(LER5, offsetof(struct pt_regs, er5) - sizeof(long)); - DEFINE(LER6, offsetof(struct pt_regs, er6) - sizeof(long)); - DEFINE(LORIG, offsetof(struct pt_regs, orig_er0) - sizeof(long)); - DEFINE(LCCR, offsetof(struct pt_regs, ccr) - sizeof(long)); - DEFINE(LVEC, offsetof(struct pt_regs, vector) - sizeof(long)); -#if defined(__H8300S__) - DEFINE(LEXR, offsetof(struct pt_regs, exr) - sizeof(long)); -#endif - DEFINE(LRET, offsetof(struct pt_regs, pc) - sizeof(long)); - - DEFINE(PT_PTRACED, PT_PTRACED); - - return 0; -} diff --git a/arch/h8300/kernel/entry.S b/arch/h8300/kernel/entry.S deleted file mode 100644 index 94bd30f11df..00000000000 --- a/arch/h8300/kernel/entry.S +++ /dev/null @@ -1,402 +0,0 @@ -/* -*- mode: asm -*- - * - * linux/arch/h8300/platform/h8300h/entry.S - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * David McCullough <davidm@snapgear.com> - * - */ - -/* - * entry.S - * include exception/interrupt gateway - * system call entry - */ - -#include <linux/sys.h> -#include <asm/unistd.h> -#include <asm/setup.h> -#include <asm/segment.h> -#include <asm/linkage.h> -#include <asm/asm-offsets.h> -#include <asm/thread_info.h> -#include <asm/errno.h> - -#if defined(CONFIG_CPU_H8300H) -#define USERRET 8 -INTERRUPTS = 64 - .h8300h - .macro SHLL2 reg - shll.l \reg - shll.l \reg - .endm - .macro SHLR2 reg - shlr.l \reg - shlr.l \reg - .endm - .macro SAVEREGS - mov.l er0,@-sp - mov.l er1,@-sp - mov.l er2,@-sp - mov.l er3,@-sp - .endm - .macro RESTOREREGS - mov.l @sp+,er3 - mov.l @sp+,er2 - .endm - .macro SAVEEXR - .endm - .macro RESTOREEXR - .endm -#endif -#if defined(CONFIG_CPU_H8S) -#define USERRET 10 -#define USEREXR 8 -INTERRUPTS = 128 - .h8300s - .macro SHLL2 reg - shll.l #2,\reg - .endm - .macro SHLR2 reg - shlr.l #2,\reg - .endm - .macro SAVEREGS - stm.l er0-er3,@-sp - .endm - .macro RESTOREREGS - ldm.l @sp+,er2-er3 - .endm - .macro SAVEEXR - mov.w @(USEREXR:16,er0),r1 - mov.w r1,@(LEXR-LER3:16,sp) /* copy EXR */ - .endm - .macro RESTOREEXR - mov.w @(LEXR-LER1:16,sp),r1 /* restore EXR */ - mov.b r1l,r1h - mov.w r1,@(USEREXR:16,er0) - .endm -#endif - - -/* CPU context save/restore macros. */ - - .macro SAVE_ALL - mov.l er0,@-sp - stc ccr,r0l /* check kernel mode */ - btst #4,r0l - bne 5f - - /* user mode */ - mov.l sp,@_sw_usp - mov.l @sp,er0 /* restore saved er0 */ - orc #0x10,ccr /* switch kernel stack */ - mov.l @_sw_ksp,sp - sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */ - SAVEREGS - mov.l @_sw_usp,er0 - mov.l @(USERRET:16,er0),er1 /* copy the RET addr */ - mov.l er1,@(LRET-LER3:16,sp) - SAVEEXR - - mov.l @(LORIG-LER3:16,sp),er0 - mov.l er0,@(LER0-LER3:16,sp) /* copy ER0 */ - mov.w e1,r1 /* e1 highbyte = ccr */ - and #0xef,r1h /* mask mode? flag */ - bra 6f -5: - /* kernel mode */ - mov.l @sp,er0 /* restore saved er0 */ - subs #2,sp /* set dummy ccr */ - SAVEREGS - mov.w @(LRET-LER3:16,sp),r1 /* copy old ccr */ -6: - mov.b r1h,r1l - mov.b #0,r1h - mov.w r1,@(LCCR-LER3:16,sp) /* set ccr */ - mov.l er6,@-sp /* syscall arg #6 */ - mov.l er5,@-sp /* syscall arg #5 */ - mov.l er4,@-sp /* syscall arg #4 */ - .endm /* r1 = ccr */ - - .macro RESTORE_ALL - mov.l @sp+,er4 - mov.l @sp+,er5 - mov.l @sp+,er6 - RESTOREREGS - mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */ - btst #4,r0l - bne 7f - - orc #0x80,ccr - mov.l @_sw_usp,er0 - mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */ - mov.l er1,@er0 - RESTOREEXR - mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */ - mov.b r1l,r1h - mov.b @(LRET+1-LER1:16,sp),r1l - mov.w r1,e1 - mov.w @(LRET+2-LER1:16,sp),r1 - mov.l er1,@(USERRET:16,er0) - - mov.l @sp+,er1 - add.l #(LRET-LER1),sp /* remove LORIG - LRET */ - mov.l sp,@_sw_ksp - andc #0xef,ccr /* switch to user mode */ - mov.l er0,sp - bra 8f -7: - mov.l @sp+,er1 - adds #4,sp - adds #2,sp -8: - mov.l @sp+,er0 - adds #4,sp /* remove the sw created LVEC */ - rte - .endm - -.globl _system_call -.globl _ret_from_exception -.globl _ret_from_fork -.globl _ret_from_kernel_thread -.globl _ret_from_interrupt -.globl _interrupt_redirect_table -.globl _sw_ksp,_sw_usp -.globl _resume -.globl _interrupt_entry -.globl _trace_break - -#if defined(CONFIG_ROMKERNEL) - .section .int_redirect,"ax" -_interrupt_redirect_table: -#if defined(CONFIG_CPU_H8300H) - .rept 7 - .long 0 - .endr -#endif -#if defined(CONFIG_CPU_H8S) - .rept 5 - .long 0 - .endr - jmp @_trace_break - .long 0 -#endif - - jsr @_interrupt_entry /* NMI */ - jmp @_system_call /* TRAPA #0 (System call) */ - .long 0 - .long 0 - jmp @_trace_break /* TRAPA #3 (breakpoint) */ - .rept INTERRUPTS-12 - jsr @_interrupt_entry - .endr -#endif -#if defined(CONFIG_RAMKERNEL) -.globl _interrupt_redirect_table - .section .bss -_interrupt_redirect_table: - .space 4 -#endif - - .section .text - .align 2 -_interrupt_entry: - SAVE_ALL - mov.l sp,er0 - add.l #LVEC,er0 - btst #4,r1l - bne 1f - /* user LVEC */ - mov.l @_sw_usp,er0 - adds #4,er0 -1: - mov.l @er0,er0 /* LVEC address */ -#if defined(CONFIG_ROMKERNEL) - sub.l #_interrupt_redirect_table,er0 -#endif -#if defined(CONFIG_RAMKERNEL) - mov.l @_interrupt_redirect_table,er1 - sub.l er1,er0 -#endif - SHLR2 er0 - dec.l #1,er0 - mov.l sp,er1 - subs #4,er1 /* adjust ret_pc */ - jsr @_do_IRQ - jmp @_ret_from_interrupt - -_system_call: - subs #4,sp /* dummy LVEC */ - SAVE_ALL - andc #0x7f,ccr - mov.l er0,er4 - - /* save top of frame */ - mov.l sp,er0 - jsr @_set_esp0 - mov.l sp,er2 - and.w #0xe000,r2 - mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l - btst #(TIF_SYSCALL_TRACE & 7),r2l - beq 1f - jsr @_do_syscall_trace -1: - cmp.l #NR_syscalls,er4 - bcc badsys - SHLL2 er4 - mov.l #_sys_call_table,er0 - add.l er4,er0 - mov.l @er0,er4 - beq _ret_from_exception:16 - mov.l @(LER1:16,sp),er0 - mov.l @(LER2:16,sp),er1 - mov.l @(LER3:16,sp),er2 - jsr @er4 - mov.l er0,@(LER0:16,sp) /* save the return value */ - mov.l sp,er2 - and.w #0xe000,r2 - mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l - btst #(TIF_SYSCALL_TRACE & 7),r2l - beq 2f - jsr @_do_syscall_trace -2: -#if defined(CONFIG_SYSCALL_PRINT) - jsr @_syscall_print -#endif - orc #0x80,ccr - bra resume_userspace - -badsys: - mov.l #-ENOSYS,er0 - mov.l er0,@(LER0:16,sp) - bra resume_userspace - -#if !defined(CONFIG_PREEMPT) -#define resume_kernel restore_all -#endif - -_ret_from_exception: -#if defined(CONFIG_PREEMPT) - orc #0x80,ccr -#endif -_ret_from_interrupt: - mov.b @(LCCR+1:16,sp),r0l - btst #4,r0l - bne resume_kernel:8 /* return from kernel */ -resume_userspace: - andc #0x7f,ccr - mov.l sp,er4 - and.w #0xe000,r4 /* er4 <- current thread info */ - mov.l @(TI_FLAGS:16,er4),er1 - and.l #_TIF_WORK_MASK,er1 - beq restore_all:8 -work_pending: - btst #TIF_NEED_RESCHED,r1l - bne work_resched:8 - /* work notifysig */ - mov.l sp,er0 - subs #4,er0 /* er0: pt_regs */ - jsr @_do_notify_resume - bra restore_all:8 -work_resched: - mov.l sp,er0 - jsr @_set_esp0 - jsr @_schedule - bra resume_userspace:8 -restore_all: - RESTORE_ALL /* Does RTE */ - -#if defined(CONFIG_PREEMPT) -resume_kernel: - mov.l @(TI_PRE_COUNT:16,er4),er0 - bne restore_all:8 -need_resched: - mov.l @(TI_FLAGS:16,er4),er0 - btst #TIF_NEED_RESCHED,r0l - beq restore_all:8 - mov.b @(LCCR+1:16,sp),r0l /* Interrupt Enabled? */ - bmi restore_all:8 - mov.l #PREEMPT_ACTIVE,er0 - mov.l er0,@(TI_PRE_COUNT:16,er4) - andc #0x7f,ccr - mov.l sp,er0 - jsr @_set_esp0 - jsr @_schedule - orc #0x80,ccr - bra need_resched:8 -#endif - -_ret_from_fork: - mov.l er2,er0 - jsr @_schedule_tail - jmp @_ret_from_exception - -_ret_from_kernel_thread: - mov.l er2,er0 - jsr @_schedule_tail - mov.l @(LER4:16,sp),er0 - mov.l @(LER5:16,sp),er1 - jsr @er1 - jmp @_ret_from_exception - -_resume: - /* - * Beware - when entering resume, offset of tss is in d1, - * prev (the current task) is in a0, next (the new task) - * is in a1 and d2.b is non-zero if the mm structure is - * shared between the tasks, so don't change these - * registers until their contents are no longer needed. - */ - - /* save sr */ - sub.w r3,r3 - stc ccr,r3l - mov.w r3,@(THREAD_CCR+2:16,er0) - - /* disable interrupts */ - orc #0x80,ccr - mov.l @_sw_usp,er3 - mov.l er3,@(THREAD_USP:16,er0) - mov.l sp,@(THREAD_KSP:16,er0) - - /* Skip address space switching if they are the same. */ - /* FIXME: what did we hack out of here, this does nothing! */ - - mov.l @(THREAD_USP:16,er1),er0 - mov.l er0,@_sw_usp - mov.l @(THREAD_KSP:16,er1),sp - - /* restore status register */ - mov.w @(THREAD_CCR+2:16,er1),r3 - - ldc r3l,ccr - rts - -_trace_break: - subs #4,sp - SAVE_ALL - sub.l er1,er1 - dec.l #1,er1 - mov.l er1,@(LORIG,sp) - mov.l sp,er0 - jsr @_set_esp0 - mov.l @_sw_usp,er0 - mov.l @er0,er1 - mov.w @(-2:16,er1),r2 - cmp.w #0x5730,r2 - beq 1f - subs #2,er1 - mov.l er1,@er0 -1: - and.w #0xff,e1 - mov.l er1,er0 - jsr @_trace_trap - jmp @_ret_from_exception - - .section .bss -_sw_ksp: - .space 4 -_sw_usp: - .space 4 - - .end diff --git a/arch/h8300/kernel/gpio.c b/arch/h8300/kernel/gpio.c deleted file mode 100644 index 084bfd0c107..00000000000 --- a/arch/h8300/kernel/gpio.c +++ /dev/null @@ -1,178 +0,0 @@ -/* - * linux/arch/h8300/kernel/gpio.c - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - */ - -/* - * Internal I/O Port Management - */ - -#include <linux/stddef.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/fs.h> -#include <linux/init.h> - -#define _(addr) (volatile unsigned char *)(addr) -#if defined(CONFIG_H83007) || defined(CONFIG_H83068) -#include <asm/regs306x.h> -static volatile unsigned char *ddrs[] = { - _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR), - NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR), -}; -#define MAX_PORT 11 -#endif - - #if defined(CONFIG_H83002) || defined(CONFIG_H8048) -/* Fix me!! */ -#include <asm/regs306x.h> -static volatile unsigned char *ddrs[] = { - _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR), - NULL, _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR), -}; -#define MAX_PORT 11 -#endif - -#if defined(CONFIG_H8S2678) -#include <asm/regs267x.h> -static volatile unsigned char *ddrs[] = { - _(P1DDR),_(P2DDR),_(P3DDR),NULL ,_(P5DDR),_(P6DDR), - _(P7DDR),_(P8DDR),NULL, _(PADDR),_(PBDDR),_(PCDDR), - _(PDDDR),_(PEDDR),_(PFDDR),_(PGDDR),_(PHDDR), - _(PADDR),_(PBDDR),_(PCDDR),_(PDDDR),_(PEDDR),_(PFDDR), - _(PGDDR),_(PHDDR) -}; -#define MAX_PORT 17 -#endif -#undef _ - -#if !defined(P1DDR) -#error Unsuppoted CPU Selection -#endif - -static struct { - unsigned char used; - unsigned char ddr; -} gpio_regs[MAX_PORT]; - -extern char *_platform_gpio_table(int length); - -int h8300_reserved_gpio(int port, unsigned int bits) -{ - unsigned char *used; - - if (port < 0 || port >= MAX_PORT) - return -1; - used = &(gpio_regs[port].used); - if ((*used & bits) != 0) - return 0; - *used |= bits; - return 1; -} - -int h8300_free_gpio(int port, unsigned int bits) -{ - unsigned char *used; - - if (port < 0 || port >= MAX_PORT) - return -1; - used = &(gpio_regs[port].used); - if ((*used & bits) != bits) - return 0; - *used &= (~bits); - return 1; -} - -int h8300_set_gpio_dir(int port_bit,int dir) -{ - int port = (port_bit >> 8) & 0xff; - int bit = port_bit & 0xff; - - if (ddrs[port] == NULL) - return 0; - if (gpio_regs[port].used & bit) { - if (dir) - gpio_regs[port].ddr |= bit; - else - gpio_regs[port].ddr &= ~bit; - *ddrs[port] = gpio_regs[port].ddr; - return 1; - } else - return 0; -} - -int h8300_get_gpio_dir(int port_bit) -{ - int port = (port_bit >> 8) & 0xff; - int bit = port_bit & 0xff; - - if (ddrs[port] == NULL) - return 0; - if (gpio_regs[port].used & bit) { - return (gpio_regs[port].ddr & bit) != 0; - } else - return -1; -} - -#if defined(CONFIG_PROC_FS) -static char *port_status(int portno) -{ - static char result[10]; - static const char io[2]={'I','O'}; - char *rp; - int c; - unsigned char used,ddr; - - used = gpio_regs[portno].used; - ddr = gpio_regs[portno].ddr; - result[8]='\0'; - rp = result + 7; - for (c = 8; c > 0; c--,rp--,used >>= 1, ddr >>= 1) - if (used & 0x01) - *rp = io[ ddr & 0x01]; - else - *rp = '-'; - return result; -} - -static int gpio_proc_show(struct seq_file *m, void *v) -{ - static const char port_name[]="123456789ABCDEFGH"; - int c; - - for (c = 0; c < MAX_PORT; c++) { - if (ddrs[c] == NULL) - continue; - seq_printf(m, "P%c: %s\n", port_name[c], port_status(c)); - } - return 0; -} - -static int gpio_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, gpio_proc_show, PDE_DATA(inode)); -} - -static const struct file_operations gpio_proc_fops = { - .open = gpio_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static __init int register_proc(void) -{ - return proc_create("gpio", S_IRUGO, NULL, &gpio_proc_fops) != NULL; -} - -__initcall(register_proc); -#endif - -void __init h8300_gpio_init(void) -{ - memcpy(gpio_regs,_platform_gpio_table(sizeof(gpio_regs)),sizeof(gpio_regs)); -} diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c deleted file mode 100644 index 53d7c0e4bd8..00000000000 --- a/arch/h8300/kernel/h8300_ksyms.c +++ /dev/null @@ -1,100 +0,0 @@ -#include <linux/module.h> -#include <linux/linkage.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/user.h> -#include <linux/elfcore.h> -#include <linux/in6.h> -#include <linux/interrupt.h> - -#include <asm/setup.h> -#include <asm/pgalloc.h> -#include <asm/irq.h> -#include <asm/io.h> -#include <asm/checksum.h> -#include <asm/current.h> -#include <asm/gpio.h> - -//asmlinkage long long __ashrdi3 (long long, int); -//asmlinkage long long __lshrdi3 (long long, int); -extern char h8300_debug_device[]; - -/* platform dependent support */ - -EXPORT_SYMBOL(strnlen); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strstr); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strncmp); - -EXPORT_SYMBOL(ip_fast_csum); - -EXPORT_SYMBOL(enable_irq); -EXPORT_SYMBOL(disable_irq); - -/* Networking helper routines. */ -EXPORT_SYMBOL(csum_partial_copy_nocheck); - -/* The following are special because they're not called - explicitly (the C compiler generates them). Fortunately, - their interface isn't gonna change any time soon now, so - it's OK to leave it out of version control. */ -//EXPORT_SYMBOL(__ashrdi3); -//EXPORT_SYMBOL(__lshrdi3); -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(memcmp); -EXPORT_SYMBOL(memscan); -EXPORT_SYMBOL(memmove); - -/* - * libgcc functions - functions that are used internally by the - * compiler... (prototypes are not correct though, but that - * doesn't really matter since they're not versioned). - */ -extern void __gcc_bcmp(void); -extern void __ashldi3(void); -extern void __ashrdi3(void); -extern void __cmpdi2(void); -extern void __divdi3(void); -extern void __divsi3(void); -extern void __lshrdi3(void); -extern void __moddi3(void); -extern void __modsi3(void); -extern void __muldi3(void); -extern void __mulsi3(void); -extern void __negdi2(void); -extern void __ucmpdi2(void); -extern void __udivdi3(void); -extern void __udivmoddi4(void); -extern void __udivsi3(void); -extern void __umoddi3(void); -extern void __umodsi3(void); - - /* gcc lib functions */ -EXPORT_SYMBOL(__gcc_bcmp); -EXPORT_SYMBOL(__ashldi3); -EXPORT_SYMBOL(__ashrdi3); -EXPORT_SYMBOL(__cmpdi2); -EXPORT_SYMBOL(__divdi3); -EXPORT_SYMBOL(__divsi3); -EXPORT_SYMBOL(__lshrdi3); -EXPORT_SYMBOL(__moddi3); -EXPORT_SYMBOL(__modsi3); -EXPORT_SYMBOL(__muldi3); -EXPORT_SYMBOL(__mulsi3); -EXPORT_SYMBOL(__negdi2); -EXPORT_SYMBOL(__ucmpdi2); -EXPORT_SYMBOL(__udivdi3); -EXPORT_SYMBOL(__udivmoddi4); -EXPORT_SYMBOL(__udivsi3); -EXPORT_SYMBOL(__umoddi3); -EXPORT_SYMBOL(__umodsi3); - -EXPORT_SYMBOL(h8300_reserved_gpio); -EXPORT_SYMBOL(h8300_free_gpio); -EXPORT_SYMBOL(h8300_set_gpio_dir); diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c deleted file mode 100644 index 2fa8ac7b79b..00000000000 --- a/arch/h8300/kernel/irq.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * linux/arch/h8300/kernel/irq.c - * - * Copyright 2007 Yoshinori Sato <ysato@users.sourceforge.jp> - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/kernel_stat.h> -#include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/random.h> -#include <linux/bootmem.h> -#include <linux/irq.h> -#include <linux/interrupt.h> - -#include <asm/traps.h> -#include <asm/io.h> -#include <asm/setup.h> -#include <asm/errno.h> - -/*#define DEBUG*/ - -extern unsigned long *interrupt_redirect_table; -extern const int h8300_saved_vectors[]; -extern const h8300_vector h8300_trap_table[]; -int h8300_enable_irq_pin(unsigned int irq); -void h8300_disable_irq_pin(unsigned int irq); - -#define CPU_VECTOR ((unsigned long *)0x000000) -#define ADDR_MASK (0xffffff) - -static inline int is_ext_irq(unsigned int irq) -{ - return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS)); -} - -static void h8300_enable_irq(struct irq_data *data) -{ - if (is_ext_irq(data->irq)) - IER_REGS |= 1 << (data->irq - EXT_IRQ0); -} - -static void h8300_disable_irq(struct irq_data *data) -{ - if (is_ext_irq(data->irq)) - IER_REGS &= ~(1 << (data->irq - EXT_IRQ0)); -} - -static unsigned int h8300_startup_irq(struct irq_data *data) -{ - if (is_ext_irq(data->irq)) - return h8300_enable_irq_pin(data->irq); - else - return 0; -} - -static void h8300_shutdown_irq(struct irq_data *data) -{ - if (is_ext_irq(data->irq)) - h8300_disable_irq_pin(data->irq); -} - -/* - * h8300 interrupt controller implementation - */ -struct irq_chip h8300irq_chip = { - .name = "H8300-INTC", - .irq_startup = h8300_startup_irq, - .irq_shutdown = h8300_shutdown_irq, - .irq_enable = h8300_enable_irq, - .irq_disable = h8300_disable_irq, -}; - -#if defined(CONFIG_RAMKERNEL) -static unsigned long __init *get_vector_address(void) -{ - unsigned long *rom_vector = CPU_VECTOR; - unsigned long base,tmp; - int vec_no; - - base = rom_vector[EXT_IRQ0] & ADDR_MASK; - - /* check romvector format */ - for (vec_no = EXT_IRQ1; vec_no <= EXT_IRQ0+EXT_IRQS; vec_no++) { - if ((base+(vec_no - EXT_IRQ0)*4) != (rom_vector[vec_no] & ADDR_MASK)) - return NULL; - } - - /* ramvector base address */ - base -= EXT_IRQ0*4; - - /* writerble check */ - tmp = ~(*(volatile unsigned long *)base); - (*(volatile unsigned long *)base) = tmp; - if ((*(volatile unsigned long *)base) != tmp) - return NULL; - return (unsigned long *)base; -} - -static void __init setup_vector(void) -{ - int i; - unsigned long *ramvec,*ramvec_p; - const h8300_vector *trap_entry; - const int *saved_vector; - - ramvec = get_vector_address(); - if (ramvec == NULL) - panic("interrupt vector serup failed."); - else - printk(KERN_INFO "virtual vector at 0x%08lx\n",(unsigned long)ramvec); - - /* create redirect table */ - ramvec_p = ramvec; - trap_entry = h8300_trap_table; - saved_vector = h8300_saved_vectors; - for ( i = 0; i < NR_IRQS; i++) { - if (i == *saved_vector) { - ramvec_p++; - saved_vector++; - } else { - if ( i < NR_TRAPS ) { - if (*trap_entry) - *ramvec_p = VECTOR(*trap_entry); - ramvec_p++; - trap_entry++; - } else - *ramvec_p++ = REDIRECT(interrupt_entry); - } - } - interrupt_redirect_table = ramvec; -#ifdef DEBUG - ramvec_p = ramvec; - for (i = 0; i < NR_IRQS; i++) { - if ((i % 8) == 0) - printk(KERN_DEBUG "\n%p: ",ramvec_p); - printk(KERN_DEBUG "%p ",*ramvec_p); - ramvec_p++; - } - printk(KERN_DEBUG "\n"); -#endif -} -#else -#define setup_vector() do { } while(0) -#endif - -void __init init_IRQ(void) -{ - int c; - - setup_vector(); - - for (c = 0; c < NR_IRQS; c++) - irq_set_chip_and_handler(c, &h8300irq_chip, handle_simple_irq); -} - -asmlinkage void do_IRQ(int irq) -{ - irq_enter(); - generic_handle_irq(irq); - irq_exit(); -} diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c deleted file mode 100644 index 1d526e05db1..00000000000 --- a/arch/h8300/kernel/module.c +++ /dev/null @@ -1,75 +0,0 @@ -#include <linux/moduleloader.h> -#include <linux/elf.h> -#include <linux/vmalloc.h> -#include <linux/fs.h> -#include <linux/string.h> -#include <linux/kernel.h> - -#if 0 -#define DEBUGP printk -#else -#define DEBUGP(fmt...) -#endif - -int apply_relocate_add(Elf32_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *me) -{ - unsigned int i; - Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; - - DEBUGP("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { - /* This is where to make the change */ - uint32_t *loc = (uint32_t *)(sechdrs[sechdrs[relsec].sh_info].sh_addr - + rela[i].r_offset); - /* This is the symbol it is referring to. Note that all - undefined symbols have been resolved. */ - Elf32_Sym *sym = (Elf32_Sym *)sechdrs[symindex].sh_addr - + ELF32_R_SYM(rela[i].r_info); - uint32_t v = sym->st_value + rela[i].r_addend; - - switch (ELF32_R_TYPE(rela[i].r_info)) { - case R_H8_DIR24R8: - loc = (uint32_t *)((uint32_t)loc - 1); - *loc = (*loc & 0xff000000) | ((*loc & 0xffffff) + v); - break; - case R_H8_DIR24A8: - if (ELF32_R_SYM(rela[i].r_info)) - *loc += v; - break; - case R_H8_DIR32: - case R_H8_DIR32A16: - *loc += v; - break; - case R_H8_PCREL16: - v -= (unsigned long)loc + 2; - if ((Elf32_Sword)v > 0x7fff || - (Elf32_Sword)v < -(Elf32_Sword)0x8000) - goto overflow; - else - *(unsigned short *)loc = v; - break; - case R_H8_PCREL8: - v -= (unsigned long)loc + 1; - if ((Elf32_Sword)v > 0x7f || - (Elf32_Sword)v < -(Elf32_Sword)0x80) - goto overflow; - else - *(unsigned char *)loc = v; - break; - default: - printk(KERN_ERR "module %s: Unknown relocation: %u\n", - me->name, ELF32_R_TYPE(rela[i].r_info)); - return -ENOEXEC; - } - } - return 0; - overflow: - printk(KERN_ERR "module %s: relocation offset overflow: %08x\n", - me->name, rela[i].r_offset); - return -ENOEXEC; -} diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c deleted file mode 100644 index 1a744ab7e7e..00000000000 --- a/arch/h8300/kernel/process.c +++ /dev/null @@ -1,154 +0,0 @@ -/* - * linux/arch/h8300/kernel/process.c - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Based on: - * - * linux/arch/m68knommu/kernel/process.c - * - * Copyright (C) 1998 D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>, - * Kenneth Albanowski <kjahds@kjahds.com>, - * The Silver Hammer Group, Ltd. - * - * linux/arch/m68k/kernel/process.c - * - * Copyright (C) 1995 Hamish Macdonald - * - * 68060 fixes by Jesper Skov - */ - -/* - * This file handles the architecture-dependent parts of process handling.. - */ - -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/stddef.h> -#include <linux/unistd.h> -#include <linux/ptrace.h> -#include <linux/user.h> -#include <linux/interrupt.h> -#include <linux/reboot.h> -#include <linux/fs.h> -#include <linux/slab.h> -#include <linux/rcupdate.h> - -#include <asm/uaccess.h> -#include <asm/traps.h> -#include <asm/setup.h> -#include <asm/pgtable.h> - -void (*pm_power_off)(void) = NULL; -EXPORT_SYMBOL(pm_power_off); - -asmlinkage void ret_from_fork(void); -asmlinkage void ret_from_kernel_thread(void); - -/* - * The idle loop on an H8/300.. - */ -#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM) -void arch_cpu_idle(void) -{ - local_irq_enable(); - /* XXX: race here! What if need_resched() gets set now? */ - __asm__("sleep"); -} -#endif - -void machine_restart(char * __unused) -{ - local_irq_disable(); - __asm__("jmp @@0"); -} - -void machine_halt(void) -{ - local_irq_disable(); - __asm__("sleep"); - for (;;); -} - -void machine_power_off(void) -{ - local_irq_disable(); - __asm__("sleep"); - for (;;); -} - -void show_regs(struct pt_regs * regs) -{ - show_regs_print_info(KERN_DEFAULT); - - printk("\nPC: %08lx Status: %02x", - regs->pc, regs->ccr); - printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx", - regs->orig_er0, regs->er0, regs->er1); - printk("\nER2: %08lx ER3: %08lx ER4: %08lx ER5: %08lx", - regs->er2, regs->er3, regs->er4, regs->er5); - printk("\nER6' %08lx ",regs->er6); - if (user_mode(regs)) - printk("USP: %08lx\n", rdusp()); - else - printk("\n"); -} - -void flush_thread(void) -{ -} - -int copy_thread(unsigned long clone_flags, - unsigned long usp, unsigned long topstk, - struct task_struct * p) -{ - struct pt_regs * childregs; - - childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1; - - if (unlikely(p->flags & PF_KTHREAD)) { - memset(childregs, 0, sizeof(struct pt_regs)); - childregs->retpc = (unsigned long) ret_from_kernel_thread; - childregs->er4 = topstk; /* arg */ - childregs->er5 = usp; /* fn */ - p->thread.ksp = (unsigned long)childregs; - } - *childregs = *current_pt_regs(); - childregs->retpc = (unsigned long) ret_from_fork; - childregs->er0 = 0; - p->thread.usp = usp ?: rdusp(); - p->thread.ksp = (unsigned long)childregs; - - return 0; -} - -unsigned long thread_saved_pc(struct task_struct *tsk) -{ - return ((struct pt_regs *)tsk->thread.esp0)->pc; -} - -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long fp, pc; - unsigned long stack_page; - int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - - stack_page = (unsigned long)p; - fp = ((struct pt_regs *)p->thread.ksp)->er6; - do { - if (fp < stack_page+sizeof(struct thread_info) || - fp >= 8184+stack_page) - return 0; - pc = ((unsigned long *)fp)[1]; - if (!in_sched_functions(pc)) - return pc; - fp = *(unsigned long *) fp; - } while (count++ < 16); - return 0; -} diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c deleted file mode 100644 index 748cf6585aa..00000000000 --- a/arch/h8300/kernel/ptrace.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * linux/arch/h8300/kernel/ptrace.c - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Based on: - * linux/arch/m68k/kernel/ptrace.c - * - * Copyright (C) 1994 by Hamish Macdonald - * Taken from linux/kernel/ptrace.c and modified for M680x0. - * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file COPYING in the main directory of - * this archive for more details. - */ - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/errno.h> -#include <linux/ptrace.h> -#include <linux/user.h> -#include <linux/signal.h> - -#include <asm/uaccess.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/processor.h> -#include <asm/signal.h> - -/* cpu depend functions */ -extern long h8300_get_reg(struct task_struct *task, int regno); -extern int h8300_put_reg(struct task_struct *task, int regno, unsigned long data); - - -void user_disable_single_step(struct task_struct *child) -{ -} - -/* - * does not yet catch signals sent when the child dies. - * in exit.c or in signal.c. - */ - -void ptrace_disable(struct task_struct *child) -{ - user_disable_single_step(child); -} - -long arch_ptrace(struct task_struct *child, long request, - unsigned long addr, unsigned long data) -{ - int ret; - int regno = addr >> 2; - unsigned long __user *datap = (unsigned long __user *) data; - - switch (request) { - /* read the word at location addr in the USER area. */ - case PTRACE_PEEKUSR: { - unsigned long tmp = 0; - - if ((addr & 3) || addr >= sizeof(struct user)) { - ret = -EIO; - break ; - } - - ret = 0; /* Default return condition */ - - if (regno < H8300_REGS_NO) - tmp = h8300_get_reg(child, regno); - else { - switch (regno) { - case 49: - tmp = child->mm->start_code; - break ; - case 50: - tmp = child->mm->start_data; - break ; - case 51: - tmp = child->mm->end_code; - break ; - case 52: - tmp = child->mm->end_data; - break ; - default: - ret = -EIO; - } - } - if (!ret) - ret = put_user(tmp, datap); - break ; - } - - /* when I and D space are separate, this will have to be fixed. */ - case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ - if ((addr & 3) || addr >= sizeof(struct user)) { - ret = -EIO; - break ; - } - - if (regno == PT_ORIG_ER0) { - ret = -EIO; - break ; - } - if (regno < H8300_REGS_NO) { - ret = h8300_put_reg(child, regno, data); - break ; - } - ret = -EIO; - break ; - - case PTRACE_GETREGS: { /* Get all gp regs from the child. */ - int i; - unsigned long tmp; - for (i = 0; i < H8300_REGS_NO; i++) { - tmp = h8300_get_reg(child, i); - if (put_user(tmp, datap)) { - ret = -EFAULT; - break; - } - datap++; - } - ret = 0; - break; - } - - case PTRACE_SETREGS: { /* Set all gp regs in the child. */ - int i; - unsigned long tmp; - for (i = 0; i < H8300_REGS_NO; i++) { - if (get_user(tmp, datap)) { - ret = -EFAULT; - break; - } - h8300_put_reg(child, i, tmp); - datap++; - } - ret = 0; - break; - } - - default: - ret = ptrace_request(child, request, addr, data); - break; - } - return ret; -} - -asmlinkage void do_syscall_trace(void) -{ - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return; - if (!(current->ptrace & PT_PTRACED)) - return; - ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) - ? 0x80 : 0)); - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } -} diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c deleted file mode 100644 index d0b1607f271..00000000000 --- a/arch/h8300/kernel/setup.c +++ /dev/null @@ -1,242 +0,0 @@ -/* - * linux/arch/h8300/kernel/setup.c - * - * Copyleft ()) 2000 James D. Schettine {james@telos-systems.com} - * Copyright (C) 1999,2000 Greg Ungerer (gerg@snapgear.com) - * Copyright (C) 1998,1999 D. Jeff Dionne <jeff@lineo.ca> - * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com> - * Copyright (C) 1995 Hamish Macdonald - * Copyright (C) 2000 Lineo Inc. (www.lineo.com) - * Copyright (C) 2001 Lineo, Inc. <www.lineo.com> - * - * H8/300 porting Yoshinori Sato <ysato@users.sourceforge.jp> - */ - -/* - * This file handles the architecture-dependent parts of system setup - */ - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/mm.h> -#include <linux/fs.h> -#include <linux/fb.h> -#include <linux/console.h> -#include <linux/genhd.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/major.h> -#include <linux/bootmem.h> -#include <linux/seq_file.h> -#include <linux/init.h> - -#include <asm/setup.h> -#include <asm/irq.h> -#include <asm/pgtable.h> -#include <asm/sections.h> - -#if defined(__H8300H__) -#define CPU "H8/300H" -#include <asm/regs306x.h> -#endif - -#if defined(__H8300S__) -#define CPU "H8S" -#include <asm/regs267x.h> -#endif - -#define STUBSIZE 0xc000 - -unsigned long rom_length; -unsigned long memory_start; -unsigned long memory_end; - -char __initdata command_line[COMMAND_LINE_SIZE]; - -extern int _ramstart, _ramend; -extern char _target_name[]; -extern void h8300_gpio_init(void); - -#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) \ - && defined(CONFIG_GDB_MAGICPRINT) -/* printk with gdb service */ -static void gdb_console_output(struct console *c, const char *msg, unsigned len) -{ - for (; len > 0; len--) { - asm("mov.w %0,r2\n\t" - "jsr @0xc4"::"r"(*msg++):"er2"); - } -} - -/* - * Setup initial baud/bits/parity. We do two things here: - * - construct a cflag setting for the first rs_open() - * - initialize the serial port - * Return non-zero if we didn't find a serial port. - */ -static int __init gdb_console_setup(struct console *co, char *options) -{ - return 0; -} - -static const struct console gdb_console = { - .name = "gdb_con", - .write = gdb_console_output, - .device = NULL, - .setup = gdb_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, -}; -#endif - -void __init setup_arch(char **cmdline_p) -{ - int bootmap_size; - - memory_start = (unsigned long) &_ramstart; - - /* allow for ROMFS on the end of the kernel */ - if (memcmp((void *)memory_start, "-rom1fs-", 8) == 0) { -#if defined(CONFIG_BLK_DEV_INITRD) - initrd_start = memory_start; - initrd_end = memory_start += be32_to_cpu(((unsigned long *) (memory_start))[2]); -#else - memory_start += be32_to_cpu(((unsigned long *) memory_start)[2]); -#endif - } - memory_start = PAGE_ALIGN(memory_start); -#if !defined(CONFIG_BLKDEV_RESERVE) - memory_end = (unsigned long) &_ramend; /* by now the stack is part of the init task */ -#if defined(CONFIG_GDB_DEBUG) - memory_end -= STUBSIZE; -#endif -#else - if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) && - (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS)) - /* overlap userarea */ - memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; -#endif - - init_mm.start_code = (unsigned long) _stext; - init_mm.end_code = (unsigned long) _etext; - init_mm.end_data = (unsigned long) _edata; - init_mm.brk = (unsigned long) 0; - -#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT) - register_console((struct console *)&gdb_console); -#endif - - printk(KERN_INFO "\r\n\nuClinux " CPU "\n"); - printk(KERN_INFO "Target Hardware: %s\n",_target_name); - printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n"); - printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n"); - -#ifdef DEBUG - printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p " - "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start, - __bss_stop); - printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx " - "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start, - memory_end, memory_end, &_ramend); -#endif - -#ifdef CONFIG_DEFAULT_CMDLINE - /* set from default command line */ - if (*command_line == '\0') - strcpy(command_line,CONFIG_KERNEL_COMMAND); -#endif - /* Keep a copy of command line */ - *cmdline_p = &command_line[0]; - memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); - boot_command_line[COMMAND_LINE_SIZE-1] = 0; - -#ifdef DEBUG - if (strlen(*cmdline_p)) - printk(KERN_DEBUG "Command line: '%s'\n", *cmdline_p); -#endif - - /* - * give all the memory to the bootmap allocator, tell it to put the - * boot mem_map at the start of memory - */ - bootmap_size = init_bootmem_node( - NODE_DATA(0), - memory_start >> PAGE_SHIFT, /* map goes here */ - PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */ - memory_end >> PAGE_SHIFT); - /* - * free the usable memory, we have to make sure we do not free - * the bootmem bitmap so we then reserve it after freeing it :-) - */ - free_bootmem(memory_start, memory_end - memory_start); - reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT); - /* - * get kmalloc into gear - */ - paging_init(); - h8300_gpio_init(); -#if defined(CONFIG_H8300_AKI3068NET) && defined(CONFIG_IDE) - { -#define AREABIT(addr) (1 << (((addr) >> 21) & 7)) - /* setup BSC */ - volatile unsigned char *abwcr = (volatile unsigned char *)ABWCR; - volatile unsigned char *cscr = (volatile unsigned char *)CSCR; - *abwcr &= ~(AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT)); - *cscr |= (AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT)) | 0x0f; - } -#endif -#ifdef DEBUG - printk(KERN_DEBUG "Done setup_arch\n"); -#endif -} - -/* - * Get CPU information for use by the procfs. - */ - -static int show_cpuinfo(struct seq_file *m, void *v) -{ - char *cpu; - int mode; - u_long clockfreq; - - cpu = CPU; - mode = *(volatile unsigned char *)MDCR & 0x07; - - clockfreq = CONFIG_CPU_CLOCK; - - seq_printf(m, "CPU:\t\t%s (mode:%d)\n" - "Clock:\t\t%lu.%1luMHz\n" - "BogoMips:\t%lu.%02lu\n" - "Calibration:\t%lu loops\n", - cpu,mode, - clockfreq/1000,clockfreq%1000, - (loops_per_jiffy*HZ)/500000,((loops_per_jiffy*HZ)/5000)%100, - (loops_per_jiffy*HZ)); - - return 0; -} - -static void *c_start(struct seq_file *m, loff_t *pos) -{ - return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL; -} - -static void *c_next(struct seq_file *m, void *v, loff_t *pos) -{ - ++*pos; - return c_start(m, pos); -} - -static void c_stop(struct seq_file *m, void *v) -{ -} - -const struct seq_operations cpuinfo_op = { - .start = c_start, - .next = c_next, - .stop = c_stop, - .show = show_cpuinfo, -}; diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c deleted file mode 100644 index a65ff3b7632..00000000000 --- a/arch/h8300/kernel/signal.c +++ /dev/null @@ -1,444 +0,0 @@ -/* - * linux/arch/h8300/kernel/signal.c - * - * Copyright (C) 1991, 1992 Linus Torvalds - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive - * for more details. - */ - -/* - * uClinux H8/300 support by Yoshinori Sato <ysato@users.sourceforge.jp> - * and David McCullough <davidm@snapgear.com> - * - * Based on - * Linux/m68k by Hamish Macdonald - */ - -/* - * ++roman (07/09/96): implemented signal stacks (specially for tosemu on - * Atari :-) Current limitation: Only one sigstack can be active at one time. - * If a second signal with SA_ONSTACK set arrives while working on a sigstack, - * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested - * signal handlers! - */ - -#include <linux/sched.h> -#include <linux/mm.h> -#include <linux/kernel.h> -#include <linux/signal.h> -#include <linux/syscalls.h> -#include <linux/errno.h> -#include <linux/wait.h> -#include <linux/ptrace.h> -#include <linux/unistd.h> -#include <linux/stddef.h> -#include <linux/highuid.h> -#include <linux/personality.h> -#include <linux/tty.h> -#include <linux/binfmts.h> -#include <linux/tracehook.h> - -#include <asm/setup.h> -#include <asm/uaccess.h> -#include <asm/pgtable.h> -#include <asm/traps.h> -#include <asm/ucontext.h> - -/* - * Do a signal return; undo the signal stack. - * - * Keep the return code on the stack quadword aligned! - * That makes the cache flush below easier. - */ - -struct sigframe -{ - long dummy_er0; - long dummy_vector; -#if defined(CONFIG_CPU_H8S) - short dummy_exr; -#endif - long dummy_pc; - char *pretcode; - unsigned char retcode[8]; - unsigned long extramask[_NSIG_WORDS-1]; - struct sigcontext sc; - int sig; -} __attribute__((aligned(2),packed)); - -struct rt_sigframe -{ - long dummy_er0; - long dummy_vector; -#if defined(CONFIG_CPU_H8S) - short dummy_exr; -#endif - long dummy_pc; - char *pretcode; - struct siginfo *pinfo; - void *puc; - unsigned char retcode[8]; - struct siginfo info; - struct ucontext uc; - int sig; -} __attribute__((aligned(2),packed)); - -static inline int -restore_sigcontext(struct sigcontext *usc, int *pd0) -{ - struct pt_regs *regs = current_pt_regs(); - int err = 0; - unsigned int ccr; - unsigned int usp; - unsigned int er0; - - /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; - -#define COPY(r) err |= __get_user(regs->r, &usc->sc_##r) /* restore passed registers */ - COPY(er1); - COPY(er2); - COPY(er3); - COPY(er5); - COPY(pc); - ccr = regs->ccr & 0x10; - COPY(ccr); -#undef COPY - regs->ccr &= 0xef; - regs->ccr |= ccr; - regs->orig_er0 = -1; /* disable syscall checks */ - err |= __get_user(usp, &usc->sc_usp); - wrusp(usp); - - err |= __get_user(er0, &usc->sc_er0); - *pd0 = er0; - return err; -} - -asmlinkage int sys_sigreturn(void) -{ - unsigned long usp = rdusp(); - struct sigframe *frame = (struct sigframe *)(usp - 4); - sigset_t set; - int er0; - - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; - if (__get_user(set.sig[0], &frame->sc.sc_mask) || - (_NSIG_WORDS > 1 && - __copy_from_user(&set.sig[1], &frame->extramask, - sizeof(frame->extramask)))) - goto badframe; - - set_current_blocked(&set); - - if (restore_sigcontext(&frame->sc, &er0)) - goto badframe; - return er0; - -badframe: - force_sig(SIGSEGV, current); - return 0; -} - -asmlinkage int sys_rt_sigreturn(void) -{ - unsigned long usp = rdusp(); - struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4); - sigset_t set; - int er0; - - if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) - goto badframe; - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) - goto badframe; - - set_current_blocked(&set); - - if (restore_sigcontext(&frame->uc.uc_mcontext, &er0)) - goto badframe; - - if (restore_altstack(&frame->uc.uc_stack)) - goto badframe; - - return er0; - -badframe: - force_sig(SIGSEGV, current); - return 0; -} - -static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, - unsigned long mask) -{ - int err = 0; - - err |= __put_user(regs->er0, &sc->sc_er0); - err |= __put_user(regs->er1, &sc->sc_er1); - err |= __put_user(regs->er2, &sc->sc_er2); - err |= __put_user(regs->er3, &sc->sc_er3); - err |= __put_user(regs->er4, &sc->sc_er4); - err |= __put_user(regs->er5, &sc->sc_er5); - err |= __put_user(regs->er6, &sc->sc_er6); - err |= __put_user(rdusp(), &sc->sc_usp); - err |= __put_user(regs->pc, &sc->sc_pc); - err |= __put_user(regs->ccr, &sc->sc_ccr); - err |= __put_user(mask, &sc->sc_mask); - - return err; -} - -static inline void * -get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) -{ - unsigned long usp; - - /* Default to using normal stack. */ - usp = rdusp(); - - /* This is the X/Open sanctioned signal stack switching. */ - if (ka->sa.sa_flags & SA_ONSTACK) { - if (!sas_ss_flags(usp)) - usp = current->sas_ss_sp + current->sas_ss_size; - } - return (void *)((usp - frame_size) & -8UL); -} - -static int setup_frame (int sig, struct k_sigaction *ka, - sigset_t *set, struct pt_regs *regs) -{ - struct sigframe *frame; - int err = 0; - int usig; - unsigned char *ret; - - frame = get_sigframe(ka, regs, sizeof(*frame)); - - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - goto give_sigsegv; - - usig = current_thread_info()->exec_domain - && current_thread_info()->exec_domain->signal_invmap - && sig < 32 - ? current_thread_info()->exec_domain->signal_invmap[sig] - : sig; - - err |= __put_user(usig, &frame->sig); - if (err) - goto give_sigsegv; - - err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); - if (err) - goto give_sigsegv; - - if (_NSIG_WORDS > 1) { - err |= copy_to_user(frame->extramask, &set->sig[1], - sizeof(frame->extramask)); - if (err) - goto give_sigsegv; - } - - ret = frame->retcode; - if (ka->sa.sa_flags & SA_RESTORER) - ret = (unsigned char *)(ka->sa.sa_restorer); - else { - /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */ - err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff), - (unsigned long *)(frame->retcode + 0)); - err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4)); - } - - /* Set up to return from userspace. */ - err |= __put_user(ret, &frame->pretcode); - - if (err) - goto give_sigsegv; - - /* Set up registers for signal handler */ - wrusp ((unsigned long) frame); - regs->pc = (unsigned long) ka->sa.sa_handler; - regs->er0 = (current_thread_info()->exec_domain - && current_thread_info()->exec_domain->signal_invmap - && sig < 32 - ? current_thread_info()->exec_domain->signal_invmap[sig] - : sig); - regs->er1 = (unsigned long)&(frame->sc); - regs->er5 = current->mm->start_data; /* GOT base */ - - return 0; - -give_sigsegv: - force_sigsegv(sig, current); - return -EFAULT; -} - -static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs) -{ - struct rt_sigframe *frame; - int err = 0; - int usig; - unsigned char *ret; - - frame = get_sigframe(ka, regs, sizeof(*frame)); - - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - goto give_sigsegv; - - usig = current_thread_info()->exec_domain - && current_thread_info()->exec_domain->signal_invmap - && sig < 32 - ? current_thread_info()->exec_domain->signal_invmap[sig] - : sig; - - err |= __put_user(usig, &frame->sig); - if (err) - goto give_sigsegv; - - err |= __put_user(&frame->info, &frame->pinfo); - err |= __put_user(&frame->uc, &frame->puc); - err |= copy_siginfo_to_user(&frame->info, info); - if (err) - goto give_sigsegv; - - /* Create the ucontext. */ - err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); - err |= __save_altstack(&frame->uc.uc_stack, rdusp()); - err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); - err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set)); - if (err) - goto give_sigsegv; - - /* Set up to return from userspace. */ - ret = frame->retcode; - if (ka->sa.sa_flags & SA_RESTORER) - ret = (unsigned char *)(ka->sa.sa_restorer); - else { - /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */ - err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff), - (unsigned long *)(frame->retcode + 0)); - err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4)); - } - err |= __put_user(ret, &frame->pretcode); - - if (err) - goto give_sigsegv; - - /* Set up registers for signal handler */ - wrusp ((unsigned long) frame); - regs->pc = (unsigned long) ka->sa.sa_handler; - regs->er0 = (current_thread_info()->exec_domain - && current_thread_info()->exec_domain->signal_invmap - && sig < 32 - ? current_thread_info()->exec_domain->signal_invmap[sig] - : sig); - regs->er1 = (unsigned long)&(frame->info); - regs->er2 = (unsigned long)&frame->uc; - regs->er5 = current->mm->start_data; /* GOT base */ - - return 0; - -give_sigsegv: - force_sigsegv(sig, current); - return -EFAULT; -} - -/* - * OK, we're invoking a handler - */ -static void -handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, - struct pt_regs * regs) -{ - sigset_t *oldset = sigmask_to_save(); - int ret; - /* are we from a system call? */ - if (regs->orig_er0 >= 0) { - switch (regs->er0) { - case -ERESTART_RESTARTBLOCK: - case -ERESTARTNOHAND: - regs->er0 = -EINTR; - break; - - case -ERESTARTSYS: - if (!(ka->sa.sa_flags & SA_RESTART)) { - regs->er0 = -EINTR; - break; - } - /* fallthrough */ - case -ERESTARTNOINTR: - regs->er0 = regs->orig_er0; - regs->pc -= 2; - } - } - - /* set up the stack frame */ - if (ka->sa.sa_flags & SA_SIGINFO) - ret = setup_rt_frame(sig, ka, info, oldset, regs); - else - ret = setup_frame(sig, ka, oldset, regs); - - if (!ret) - signal_delivered(sig, info, ka, regs, 0); -} - -/* - * Note that 'init' is a special process: it doesn't get signals it doesn't - * want to handle. Thus you cannot kill init even with a SIGKILL even by - * mistake. - */ -static void do_signal(struct pt_regs *regs) -{ - siginfo_t info; - int signr; - struct k_sigaction ka; - - /* - * We want the common case to go fast, which - * is why we may in certain cases get here from - * kernel mode. Just return without doing anything - * if so. - */ - if ((regs->ccr & 0x10)) - return; - - current->thread.esp0 = (unsigned long) regs; - - signr = get_signal_to_deliver(&info, &ka, regs, NULL); - if (signr > 0) { - /* Whee! Actually deliver the signal. */ - handle_signal(signr, &info, &ka, regs); - return; - } - /* Did we come from a system call? */ - if (regs->orig_er0 >= 0) { - /* Restart the system call - no handlers present */ - if (regs->er0 == -ERESTARTNOHAND || - regs->er0 == -ERESTARTSYS || - regs->er0 == -ERESTARTNOINTR) { - regs->er0 = regs->orig_er0; - regs->pc -= 2; - } - if (regs->er0 == -ERESTART_RESTARTBLOCK){ - regs->er0 = __NR_restart_syscall; - regs->pc -= 2; - } - } - - /* If there's no signal to deliver, we just restore the saved mask. */ - restore_saved_sigmask(); -} - -asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) -{ - if (thread_info_flags & _TIF_SIGPENDING) - do_signal(regs); - - if (thread_info_flags & _TIF_NOTIFY_RESUME) { - clear_thread_flag(TIF_NOTIFY_RESUME); - tracehook_notify_resume(regs); - } -} diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c deleted file mode 100644 index bf350cb7f59..00000000000 --- a/arch/h8300/kernel/sys_h8300.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * linux/arch/h8300/kernel/sys_h8300.c - * - * This file contains various random system calls that - * have a non-standard calling sequence on the H8/300 - * platform. - */ - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/sem.h> -#include <linux/msg.h> -#include <linux/shm.h> -#include <linux/stat.h> -#include <linux/syscalls.h> -#include <linux/mman.h> -#include <linux/file.h> -#include <linux/fs.h> -#include <linux/ipc.h> - -#include <asm/setup.h> -#include <asm/uaccess.h> -#include <asm/cachectl.h> -#include <asm/traps.h> -#include <asm/unistd.h> - -/* sys_cacheflush -- no support. */ -asmlinkage int -sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) -{ - return -EINVAL; -} - -asmlinkage int sys_getpagesize(void) -{ - return PAGE_SIZE; -} - -#if defined(CONFIG_SYSCALL_PRINT) -asmlinkage void syscall_print(void *dummy,...) -{ - struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4); - printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n", - ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0); -} -#endif diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S deleted file mode 100644 index c55e0ed270d..00000000000 --- a/arch/h8300/kernel/syscalls.S +++ /dev/null @@ -1,338 +0,0 @@ -/* Systemcall Entry Table */ -#include <linux/sys.h> -#include <asm/linkage.h> -#include <asm/unistd.h> - -#define CALL(x) .long _ ## x - -.globl _sys_call_table - -#if defined(CONFIG_CPU_H8300H) - .h8300h -#endif -#if defined(CONFIG_CPU_H8S) - .h8300s -#endif - .section .text - .align 2 -_sys_call_table: - CALL(sys_ni_syscall) /* 0 - old "setup()" system call*/ - CALL(sys_exit) - CALL(sys_fork) - CALL(sys_read) - CALL(sys_write) - CALL(sys_open) /* 5 */ - CALL(sys_close) - CALL(sys_waitpid) - CALL(sys_creat) - CALL(sys_link) - CALL(sys_unlink) /* 10 */ - CALL(sys_execve) - CALL(sys_chdir) - CALL(sys_time) - CALL(sys_mknod) - CALL(sys_chmod) /* 15 */ - CALL(sys_chown16) - CALL(sys_ni_syscall) /* old break syscall holder */ - CALL(sys_stat) - CALL(sys_lseek) - CALL(sys_getpid) /* 20 */ - CALL(sys_mount) - CALL(sys_oldumount) - CALL(sys_setuid16) - CALL(sys_getuid16) - CALL(sys_stime) /* 25 */ - CALL(sys_ptrace) - CALL(sys_alarm) - CALL(sys_fstat) - CALL(sys_pause) - CALL(sys_utime) /* 30 */ - CALL(sys_ni_syscall) /* old stty syscall holder */ - CALL(sys_ni_syscall) /* old gtty syscall holder */ - CALL(sys_access) - CALL(sys_nice) - CALL(sys_ni_syscall) /* 35 old ftime syscall holder */ - CALL(sys_sync) - CALL(sys_kill) - CALL(sys_rename) - CALL(sys_mkdir) - CALL(sys_rmdir) /* 40 */ - CALL(sys_dup) - CALL(sys_pipe) - CALL(sys_times) - CALL(sys_ni_syscall) /* old prof syscall holder */ - CALL(sys_brk) /* 45 */ - CALL(sys_setgid16) - CALL(sys_getgid16) - CALL(sys_signal) - CALL(sys_geteuid16) - CALL(sys_getegid16) /* 50 */ - CALL(sys_acct) - CALL(sys_umount) /* recycled never used phys() */ - CALL(sys_ni_syscall) /* old lock syscall holder */ - CALL(sys_ioctl) - CALL(sys_fcntl) /* 55 */ - CALL(sys_ni_syscall) /* old mpx syscall holder */ - CALL(sys_setpgid) - CALL(sys_ni_syscall) /* old ulimit syscall holder */ - CALL(sys_ni_syscall) - CALL(sys_umask) /* 60 */ - CALL(sys_chroot) - CALL(sys_ustat) - CALL(sys_dup2) - CALL(sys_getppid) - CALL(sys_getpgrp) /* 65 */ - CALL(sys_setsid) - CALL(sys_sigaction) - CALL(sys_sgetmask) - CALL(sys_ssetmask) - CALL(sys_setreuid16) /* 70 */ - CALL(sys_setregid16) - CALL(sys_sigsuspend) - CALL(sys_sigpending) - CALL(sys_sethostname) - CALL(sys_setrlimit) /* 75 */ - CALL(sys_old_getrlimit) - CALL(sys_getrusage) - CALL(sys_gettimeofday) - CALL(sys_settimeofday) - CALL(sys_getgroups16) /* 80 */ - CALL(sys_setgroups16) - CALL(sys_old_select) - CALL(sys_symlink) - CALL(sys_lstat) - CALL(sys_readlink) /* 85 */ - CALL(sys_uselib) - CALL(sys_swapon) - CALL(sys_reboot) - CALL(sys_old_readdir) - CALL(sys_old_mmap) /* 90 */ - CALL(sys_munmap) - CALL(sys_truncate) - CALL(sys_ftruncate) - CALL(sys_fchmod) - CALL(sys_fchown16) /* 95 */ - CALL(sys_getpriority) - CALL(sys_setpriority) - CALL(sys_ni_syscall) /* old profil syscall holder */ - CALL(sys_statfs) - CALL(sys_fstatfs) /* 100 */ - CALL(sys_ni_syscall) /* ioperm for i386 */ - CALL(sys_socketcall) - CALL(sys_syslog) - CALL(sys_setitimer) - CALL(sys_getitimer) /* 105 */ - CALL(sys_newstat) - CALL(sys_newlstat) - CALL(sys_newfstat) - CALL(sys_ni_syscall) - CALL(sys_ni_syscall) /* iopl for i386 */ /* 110 */ - CALL(sys_vhangup) - CALL(sys_ni_syscall) /* obsolete idle() syscall */ - CALL(sys_ni_syscall) /* vm86old for i386 */ - CALL(sys_wait4) - CALL(sys_swapoff) /* 115 */ - CALL(sys_sysinfo) - CALL(sys_ipc) - CALL(sys_fsync) - CALL(sys_sigreturn) - CALL(sys_clone) /* 120 */ - CALL(sys_setdomainname) - CALL(sys_newuname) - CALL(sys_cacheflush) /* modify_ldt for i386 */ - CALL(sys_adjtimex) - CALL(sys_ni_syscall) /* 125 sys_mprotect */ - CALL(sys_sigprocmask) - CALL(sys_ni_syscall) /* sys_create_module */ - CALL(sys_init_module) - CALL(sys_delete_module) - CALL(sys_ni_syscall) /* 130 sys_get_kernel_syms */ - CALL(sys_quotactl) - CALL(sys_getpgid) - CALL(sys_fchdir) - CALL(sys_bdflush) - CALL(sys_sysfs) /* 135 */ - CALL(sys_personality) - CALL(sys_ni_syscall) /* for afs_syscall */ - CALL(sys_setfsuid16) - CALL(sys_setfsgid16) - CALL(sys_llseek) /* 140 */ - CALL(sys_getdents) - CALL(sys_select) - CALL(sys_flock) - CALL(sys_ni_syscall) /* sys_msync */ - CALL(sys_readv) /* 145 */ - CALL(sys_writev) - CALL(sys_getsid) - CALL(sys_fdatasync) - CALL(sys_sysctl) - CALL(sys_ni_syscall) /* 150 sys_mlock */ - CALL(sys_ni_syscall) /* sys_munlock */ - CALL(sys_ni_syscall) /* sys_mlockall */ - CALL(sys_ni_syscall) /* sys_munlockall */ - CALL(sys_sched_setparam) - CALL(sys_sched_getparam) /* 155 */ - CALL(sys_sched_setscheduler) - CALL(sys_sched_getscheduler) - CALL(sys_sched_yield) - CALL(sys_sched_get_priority_max) - CALL(sys_sched_get_priority_min) /* 160 */ - CALL(sys_sched_rr_get_interval) - CALL(sys_nanosleep) - CALL(sys_ni_syscall) /* sys_mremap */ - CALL(sys_setresuid16) - CALL(sys_getresuid16) /* 165 */ - CALL(sys_ni_syscall) /* for vm86 */ - CALL(sys_ni_syscall) /* sys_query_module */ - CALL(sys_poll) - CALL(sys_ni_syscall) /* old nfsservctl */ - CALL(sys_setresgid16) /* 170 */ - CALL(sys_getresgid16) - CALL(sys_prctl) - CALL(sys_rt_sigreturn) - CALL(sys_rt_sigaction) - CALL(sys_rt_sigprocmask) /* 175 */ - CALL(sys_rt_sigpending) - CALL(sys_rt_sigtimedwait) - CALL(sys_rt_sigqueueinfo) - CALL(sys_rt_sigsuspend) - CALL(sys_pread64) /* 180 */ - CALL(sys_pwrite64) - CALL(sys_lchown16); - CALL(sys_getcwd) - CALL(sys_capget) - CALL(sys_capset) /* 185 */ - CALL(sys_sigaltstack) - CALL(sys_sendfile) - CALL(sys_ni_syscall) /* streams1 */ - CALL(sys_ni_syscall) /* streams2 */ - CALL(sys_vfork) /* 190 */ - CALL(sys_getrlimit) - CALL(sys_mmap_pgoff) - CALL(sys_truncate64) - CALL(sys_ftruncate64) - CALL(sys_stat64) /* 195 */ - CALL(sys_lstat64) - CALL(sys_fstat64) - CALL(sys_chown) - CALL(sys_getuid) - CALL(sys_getgid) /* 200 */ - CALL(sys_geteuid) - CALL(sys_getegid) - CALL(sys_setreuid) - CALL(sys_setregid) - CALL(sys_getgroups) /* 205 */ - CALL(sys_setgroups) - CALL(sys_fchown) - CALL(sys_setresuid) - CALL(sys_getresuid) - CALL(sys_setresgid) /* 210 */ - CALL(sys_getresgid) - CALL(sys_lchown) - CALL(sys_setuid) - CALL(sys_setgid) - CALL(sys_setfsuid) /* 215 */ - CALL(sys_setfsgid) - CALL(sys_pivot_root) - CALL(sys_ni_syscall) - CALL(sys_ni_syscall) - CALL(sys_getdents64) /* 220 */ - CALL(sys_fcntl64) - CALL(sys_ni_syscall) /* reserved TUX */ - CALL(sys_ni_syscall) /* reserved Security */ - CALL(sys_gettid) - CALL(sys_readahead) /* 225 */ - CALL(sys_setxattr) - CALL(sys_lsetxattr) - CALL(sys_fsetxattr) - CALL(sys_getxattr) - CALL(sys_lgetxattr) /* 230 */ - CALL(sys_fgetxattr) - CALL(sys_listxattr) - CALL(sys_llistxattr) - CALL(sys_flistxattr) - CALL(sys_removexattr) /* 235 */ - CALL(sys_lremovexattr) - CALL(sys_fremovexattr) - CALL(sys_tkill) - CALL(sys_sendfile64) - CALL(sys_futex) /* 240 */ - CALL(sys_sched_setaffinity) - CALL(sys_sched_getaffinity) - CALL(sys_ni_syscall) - CALL(sys_ni_syscall) - CALL(sys_io_setup) /* 245 */ - CALL(sys_io_destroy) - CALL(sys_io_getevents) - CALL(sys_io_submit) - CALL(sys_io_cancel) - CALL(sys_fadvise64) /* 250 */ - CALL(sys_ni_syscall) - CALL(sys_exit_group) - CALL(sys_lookup_dcookie) - CALL(sys_epoll_create) - CALL(sys_epoll_ctl) /* 255 */ - CALL(sys_epoll_wait) - CALL(sys_ni_syscall) /* sys_remap_file_pages */ - CALL(sys_set_tid_address) - CALL(sys_timer_create) - CALL(sys_timer_settime) /* 260 */ - CALL(sys_timer_gettime) - CALL(sys_timer_getoverrun) - CALL(sys_timer_delete) - CALL(sys_clock_settime) - CALL(sys_clock_gettime) /* 265 */ - CALL(sys_clock_getres) - CALL(sys_clock_nanosleep) - CALL(sys_statfs64) - CALL(sys_fstatfs64) - CALL(sys_tgkill) /* 270 */ - CALL(sys_utimes) - CALL(sys_fadvise64_64) - CALL(sys_ni_syscall) /* sys_vserver */ - CALL(sys_ni_syscall) - CALL(sys_get_mempolicy) /* 275 */ - CALL(sys_set_mempolicy) - CALL(sys_mq_open) - CALL(sys_mq_unlink) - CALL(sys_mq_timedsend) - CALL(sys_mq_timedreceive) /* 280 */ - CALL(sys_mq_notify) - CALL(sys_mq_getsetattr) - CALL(sys_waitid) - CALL(sys_ni_syscall) /* sys_kexec_load */ - CALL(sys_add_key) /* 285 */ - CALL(sys_request_key) - CALL(sys_keyctl) - CALL(sys_ioprio_set) - CALL(sys_ioprio_get) /* 290 */ - CALL(sys_inotify_init) - CALL(sys_inotify_add_watch) - CALL(sys_inotify_rm_watch) - CALL(sys_migrate_pages) - CALL(sys_openat) /* 295 */ - CALL(sys_mkdirat) - CALL(sys_mknodat) - CALL(sys_fchownat) - CALL(sys_futimesat) - CALL(sys_fstatat64) /* 300 */ - CALL(sys_unlinkat) - CALL(sys_renameat) - CALL(sys_linkat) - CALL(sys_symlinkat) - CALL(sys_readlinkat) /* 305 */ - CALL(sys_fchmodat) - CALL(sys_faccessat) - CALL(sys_ni_syscall) /* sys_pselect6 */ - CALL(sys_ni_syscall) /* sys_ppoll */ - CALL(sys_unshare) /* 310 */ - CALL(sys_set_robust_list) - CALL(sys_get_robust_list) - CALL(sys_splice) - CALL(sys_sync_file_range) - CALL(sys_tee) /* 315 */ - CALL(sys_vmsplice) - CALL(sys_ni_syscall) /* sys_move_pages */ - CALL(sys_getcpu) - CALL(sys_ni_syscall) /* sys_epoll_pwait */ - CALL(sys_setns) /* 320 */ diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c deleted file mode 100644 index e0f74191d55..00000000000 --- a/arch/h8300/kernel/time.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * linux/arch/h8300/kernel/time.c - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Copied/hacked from: - * - * linux/arch/m68k/kernel/time.c - * - * Copyright (C) 1991, 1992, 1995 Linus Torvalds - * - * This file contains the m68k-specific time handling details. - * Most of the stuff is located in the machine specific files. - * - * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 - * "A Kernel Model for Precision Timekeeping" by Dave Mills - */ - -#include <linux/errno.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/timex.h> -#include <linux/profile.h> - -#include <asm/io.h> -#include <asm/irq_regs.h> -#include <asm/timer.h> - -#define TICK_SIZE (tick_nsec / 1000) - -void h8300_timer_tick(void) -{ - if (current->pid) - profile_tick(CPU_PROFILING); - xtime_update(1); - update_process_times(user_mode(get_irq_regs())); -} - -void read_persistent_clock(struct timespec *ts) -{ - unsigned int year, mon, day, hour, min, sec; - - /* FIX by dqg : Set to zero for platforms that don't have tod */ - /* without this time is undefined and can overflow time_t, causing */ - /* very strange errors */ - year = 1980; - mon = day = 1; - hour = min = sec = 0; -#ifdef CONFIG_H8300_GETTOD - h8300_gettod (&year, &mon, &day, &hour, &min, &sec); -#endif - if ((year += 1900) < 1970) - year += 100; - ts->tv_sec = mktime(year, mon, day, hour, min, sec); - ts->tv_nsec = 0; -} - -void __init time_init(void) -{ - - h8300_timer_setup(); -} diff --git a/arch/h8300/kernel/timer/Makefile b/arch/h8300/kernel/timer/Makefile deleted file mode 100644 index bef0510ea6a..00000000000 --- a/arch/h8300/kernel/timer/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# h8300 internal timer handler - -obj-$(CONFIG_H8300_TIMER8) := timer8.o -obj-$(CONFIG_H8300_TIMER16) := timer16.o -obj-$(CONFIG_H8300_ITU) := itu.o -obj-$(CONFIG_H8300_TPU) := tpu.o diff --git a/arch/h8300/kernel/timer/itu.c b/arch/h8300/kernel/timer/itu.c deleted file mode 100644 index 0a8b5cd5bf3..00000000000 --- a/arch/h8300/kernel/timer/itu.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * linux/arch/h8300/kernel/timer/itu.c - * - * Yoshinori Sato <ysato@users.sourcefoge.jp> - * - * ITU Timer Handler - * - */ - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/timex.h> - -#include <asm/segment.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/regs306x.h> - -#if CONFIG_H8300_ITU_CH == 0 -#define ITUBASE 0xffff64 -#define ITUIRQ 24 -#elif CONFIG_H8300_ITU_CH == 1 -#define ITUBASE 0xffff6e -#define ITUIRQ 28 -#elif CONFIG_H8300_ITU_CH == 2 -#define ITUBASE 0xffff78 -#define ITUIRQ 32 -#elif CONFIG_H8300_ITU_CH == 3 -#define ITUBASE 0xffff82 -#define ITUIRQ 36 -#elif CONFIG_H8300_ITU_CH == 4 -#define ITUBASE 0xffff92 -#define ITUIRQ 40 -#else -#error Unknown timer channel. -#endif - -#define TCR 0 -#define TIOR 1 -#define TIER 2 -#define TSR 3 -#define TCNT 4 -#define GRA 6 -#define GRB 8 - -static irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - h8300_timer_tick(); - ctrl_bclr(IMFA, ITUBASE + TSR); - return IRQ_HANDLED; -} - -static struct irqaction itu_irq = { - .name = "itu", - .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER, -}; - -static const int __initconst divide_rate[] = {1, 2, 4, 8}; - -void __init h8300_timer_setup(void) -{ - unsigned int div; - unsigned int cnt; - - calc_param(cnt, div, divide_rate, 0x10000); - - setup_irq(ITUIRQ, &itu_irq); - - /* initialize timer */ - ctrl_outb(0, TSTR); - ctrl_outb(CCLR0 | div, ITUBASE + TCR); - ctrl_outb(0x01, ITUBASE + TIER); - ctrl_outw(cnt, ITUBASE + GRA); - ctrl_bset(CONFIG_H8300_ITU_CH, TSTR); -} diff --git a/arch/h8300/kernel/timer/timer16.c b/arch/h8300/kernel/timer/timer16.c deleted file mode 100644 index 462d9f58171..00000000000 --- a/arch/h8300/kernel/timer/timer16.c +++ /dev/null @@ -1,77 +0,0 @@ -/* - * linux/arch/h8300/kernel/timer/timer16.c - * - * Yoshinori Sato <ysato@users.sourcefoge.jp> - * - * 16bit Timer Handler - * - */ - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/timex.h> - -#include <asm/segment.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/regs306x.h> - -/* 16bit timer */ -#if CONFIG_H8300_TIMER16_CH == 0 -#define _16BASE 0xffff78 -#define _16IRQ 24 -#elif CONFIG_H8300_TIMER16_CH == 1 -#define _16BASE 0xffff80 -#define _16IRQ 28 -#elif CONFIG_H8300_TIMER16_CH == 2 -#define _16BASE 0xffff88 -#define _16IRQ 32 -#else -#error Unknown timer channel. -#endif - -#define TCR 0 -#define TIOR 1 -#define TCNT 2 -#define GRA 4 -#define GRB 6 - -#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*10000 /* Timer input freq. */ - -static irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - h8300_timer_tick(); - ctrl_bclr(CONFIG_H8300_TIMER16_CH, TISRA); - return IRQ_HANDLED; -} - -static struct irqaction timer16_irq = { - .name = "timer-16", - .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER, -}; - -static const int __initconst divide_rate[] = {1, 2, 4, 8}; - -void __init h8300_timer_setup(void) -{ - unsigned int div; - unsigned int cnt; - - calc_param(cnt, div, divide_rate, 0x10000); - - setup_irq(_16IRQ, &timer16_irq); - - /* initialize timer */ - ctrl_outb(0, TSTR); - ctrl_outb(CCLR0 | div, _16BASE + TCR); - ctrl_outw(cnt, _16BASE + GRA); - ctrl_bset(4 + CONFIG_H8300_TIMER16_CH, TISRA); - ctrl_bset(CONFIG_H8300_TIMER16_CH, TSTR); -} diff --git a/arch/h8300/kernel/timer/timer8.c b/arch/h8300/kernel/timer/timer8.c deleted file mode 100644 index 505f3415b40..00000000000 --- a/arch/h8300/kernel/timer/timer8.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * linux/arch/h8300/kernel/cpu/timer/timer8.c - * - * Yoshinori Sato <ysato@users.sourcefoge.jp> - * - * 8bit Timer Handler - * - */ - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/profile.h> - -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/timer.h> -#if defined(CONFIG_CPU_H8300H) -#include <asm/regs306x.h> -#endif -#if defined(CONFIG_CPU_H8S) -#include <asm/regs267x.h> -#endif - -/* 8bit timer x2 */ -#define CMFA 6 - -#if defined(CONFIG_H8300_TIMER8_CH0) -#define _8BASE _8TCR0 -#ifdef CONFIG_CPU_H8300H -#define _8IRQ 36 -#endif -#ifdef CONFIG_CPU_H8S -#define _8IRQ 72 -#endif -#elif defined(CONFIG_H8300_TIMER8_CH2) -#ifdef CONFIG_CPU_H8300H -#define _8BASE _8TCR2 -#define _8IRQ 40 -#endif -#endif - -#ifndef _8BASE -#error Unknown timer channel. -#endif - -#define _8TCR 0 -#define _8TCSR 2 -#define TCORA 4 -#define TCORB 6 -#define _8TCNT 8 - -#define CMIEA 0x40 -#define CCLR_CMA 0x08 -#define CKS2 0x04 - -/* - * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "xtime_update()" routine every clocktick - */ - -static irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - h8300_timer_tick(); - ctrl_bclr(CMFA, _8BASE + _8TCSR); - return IRQ_HANDLED; -} - -static struct irqaction timer8_irq = { - .name = "timer-8", - .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER, -}; - -static const int __initconst divide_rate[] = {8, 64, 8192}; - -void __init h8300_timer_setup(void) -{ - unsigned int div; - unsigned int cnt; - - calc_param(cnt, div, divide_rate, 0x10000); - div++; - - setup_irq(_8IRQ, &timer8_irq); - -#if defined(CONFIG_CPU_H8S) - /* Timer module enable */ - ctrl_bclr(0, MSTPCRL) -#endif - - /* initialize timer */ - ctrl_outw(cnt, _8BASE + TCORA); - ctrl_outw(0x0000, _8BASE + _8TCSR); - ctrl_outw((CMIEA|CCLR_CMA|CKS2) << 8 | div, - _8BASE + _8TCR); -} diff --git a/arch/h8300/kernel/timer/tpu.c b/arch/h8300/kernel/timer/tpu.c deleted file mode 100644 index 0350f6204ec..00000000000 --- a/arch/h8300/kernel/timer/tpu.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * linux/arch/h8300/kernel/timer/tpu.c - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * TPU Timer Handler - * - */ - -#include <linux/errno.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/param.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/timex.h> - -#include <asm/segment.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/regs267x.h> - -/* TPU */ -#if CONFIG_H8300_TPU_CH == 0 -#define TPUBASE 0xffffd0 -#define TPUIRQ 40 -#elif CONFIG_H8300_TPU_CH == 1 -#define TPUBASE 0xffffe0 -#define TPUIRQ 48 -#elif CONFIG_H8300_TPU_CH == 2 -#define TPUBASE 0xfffff0 -#define TPUIRQ 52 -#elif CONFIG_H8300_TPU_CH == 3 -#define TPUBASE 0xfffe80 -#define TPUIRQ 56 -#elif CONFIG_H8300_TPU_CH == 4 -#define TPUBASE 0xfffe90 -#define TPUIRQ 64 -#else -#error Unknown timer channel. -#endif - -#define _TCR 0 -#define _TMDR 1 -#define _TIOR 2 -#define _TIER 4 -#define _TSR 5 -#define _TCNT 6 -#define _GRA 8 -#define _GRB 10 - -#define CCLR0 0x20 - -static irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - h8300_timer_tick(); - ctrl_bclr(0, TPUBASE + _TSR); - return IRQ_HANDLED; -} - -static struct irqaction tpu_irq = { - .name = "tpu", - .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER, -}; - -static const int __initconst divide_rate[] = { -#if CONFIG_H8300_TPU_CH == 0 - 1,4,16,64,0,0,0,0, -#elif (CONFIG_H8300_TPU_CH == 1) || (CONFIG_H8300_TPU_CH == 5) - 1,4,16,64,0,0,256,0, -#elif (CONFIG_H8300_TPU_CH == 2) || (CONFIG_H8300_TPU_CH == 4) - 1,4,16,64,0,0,0,1024, -#elif CONFIG_H8300_TPU_CH == 3 - 1,4,16,64,0,1024,256,4096, -#endif -}; - -void __init h8300_timer_setup(void) -{ - unsigned int cnt; - unsigned int div; - - calc_param(cnt, div, divide_rate, 0x10000); - - setup_irq(TPUIRQ, &tpu_irq); - - /* TPU module enabled */ - ctrl_bclr(3, MSTPCRH); - - ctrl_outb(0, TSTR); - ctrl_outb(CCLR0 | div, TPUBASE + _TCR); - ctrl_outb(0, TPUBASE + _TMDR); - ctrl_outw(0, TPUBASE + _TIOR); - ctrl_outb(0x01, TPUBASE + _TIER); - ctrl_outw(cnt, TPUBASE + _GRA); - ctrl_bset(CONFIG_H8300_TPU_CH, TSTR); -} diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c deleted file mode 100644 index cfe494dbe3d..00000000000 --- a/arch/h8300/kernel/traps.c +++ /dev/null @@ -1,166 +0,0 @@ -/* - * linux/arch/h8300/boot/traps.c -- general exception handling code - * H8/300 support Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Cloned from Linux/m68k. - * - * No original Copyright holder listed, - * Probable original (C) Roman Zippel (assigned DJD, 1999) - * - * Copyright 1999-2000 D. Jeff Dionne, <jeff@rt-control.com> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive - * for more details. - */ - -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/bug.h> - -#include <asm/irq.h> -#include <asm/traps.h> -#include <asm/page.h> - -static DEFINE_SPINLOCK(die_lock); - -/* - * this must be called very early as the kernel might - * use some instruction that are emulated on the 060 - */ - -void __init base_trap_init(void) -{ -} - -void __init trap_init (void) -{ -} - -asmlinkage void set_esp0 (unsigned long ssp) -{ - current->thread.esp0 = ssp; -} - -/* - * Generic dumping code. Used for panic and debug. - */ - -static void dump(struct pt_regs *fp) -{ - unsigned long *sp; - unsigned char *tp; - int i; - - printk("\nCURRENT PROCESS:\n\n"); - printk("COMM=%s PID=%d\n", current->comm, current->pid); - if (current->mm) { - printk("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n", - (int) current->mm->start_code, - (int) current->mm->end_code, - (int) current->mm->start_data, - (int) current->mm->end_data, - (int) current->mm->end_data, - (int) current->mm->brk); - printk("USER-STACK=%08x KERNEL-STACK=%08lx\n\n", - (int) current->mm->start_stack, - (int) PAGE_SIZE+(unsigned long)current); - } - - show_regs(fp); - printk("\nCODE:"); - tp = ((unsigned char *) fp->pc) - 0x20; - for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) { - if ((i % 0x10) == 0) - printk("\n%08x: ", (int) (tp + i)); - printk("%08x ", (int) *sp++); - } - printk("\n"); - - printk("\nKERNEL STACK:"); - tp = ((unsigned char *) fp) - 0x40; - for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) { - if ((i % 0x10) == 0) - printk("\n%08x: ", (int) (tp + i)); - printk("%08x ", (int) *sp++); - } - printk("\n"); - if (STACK_MAGIC != *(unsigned long *)((unsigned long)current+PAGE_SIZE)) - printk("(Possibly corrupted stack page??)\n"); - - printk("\n\n"); -} - -void die(const char *str, struct pt_regs *fp, unsigned long err) -{ - static int diecount; - - oops_enter(); - - console_verbose(); - spin_lock_irq(&die_lock); - report_bug(fp->pc, fp); - printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++diecount); - dump(fp); - - spin_unlock_irq(&die_lock); - do_exit(SIGSEGV); -} - -extern char _start, _etext; -#define check_kernel_text(addr) \ - ((addr >= (unsigned long)(&_start)) && \ - (addr < (unsigned long)(&_etext))) - -static int kstack_depth_to_print = 24; - -void show_stack(struct task_struct *task, unsigned long *esp) -{ - unsigned long *stack, addr; - int i; - - if (esp == NULL) - esp = (unsigned long *) &esp; - - stack = esp; - - printk("Stack from %08lx:", (unsigned long)stack); - for (i = 0; i < kstack_depth_to_print; i++) { - if (((unsigned long)stack & (THREAD_SIZE - 1)) == 0) - break; - if (i % 8 == 0) - printk("\n "); - printk(" %08lx", *stack++); - } - - printk("\nCall Trace:"); - i = 0; - stack = esp; - while (((unsigned long)stack & (THREAD_SIZE - 1)) != 0) { - addr = *stack++; - /* - * If the address is either in the text segment of the - * kernel, or in the region which contains vmalloc'ed - * memory, it *may* be the address of a calling - * routine; if so, print it so that someone tracing - * down the cause of the crash will be able to figure - * out the call path that was taken. - */ - if (check_kernel_text(addr)) { - if (i % 4 == 0) - printk("\n "); - printk(" [<%08lx>]", addr); - i++; - } - } - printk("\n"); -} - -void show_trace_task(struct task_struct *tsk) -{ - show_stack(tsk,(unsigned long *)tsk->thread.esp0); -} diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S deleted file mode 100644 index 3253fed42ac..00000000000 --- a/arch/h8300/kernel/vmlinux.lds.S +++ /dev/null @@ -1,157 +0,0 @@ -#include <asm-generic/vmlinux.lds.h> -#include <asm/page.h> - -/* target memory map */ -#ifdef CONFIG_H8300H_GENERIC -#define ROMTOP 0x000000 -#define ROMSIZE 0x400000 -#define RAMTOP 0x400000 -#define RAMSIZE 0x400000 -#endif - -#ifdef CONFIG_H8300H_AKI3068NET -#define ROMTOP 0x000000 -#define ROMSIZE 0x080000 -#define RAMTOP 0x400000 -#define RAMSIZE 0x200000 -#endif - -#ifdef CONFIG_H8300H_H8MAX -#define ROMTOP 0x000000 -#define ROMSIZE 0x080000 -#define RAMTOP 0x400000 -#define RAMSIZE 0x200000 -#endif - -#ifdef CONFIG_H8300H_SIM -#define ROMTOP 0x000000 -#define ROMSIZE 0x400000 -#define RAMTOP 0x400000 -#define RAMSIZE 0x400000 -#endif - -#ifdef CONFIG_H8S_SIM -#define ROMTOP 0x000000 -#define ROMSIZE 0x400000 -#define RAMTOP 0x400000 -#define RAMSIZE 0x800000 -#endif - -#ifdef CONFIG_H8S_EDOSK2674 -#define ROMTOP 0x000000 -#define ROMSIZE 0x400000 -#define RAMTOP 0x400000 -#define RAMSIZE 0x800000 -#endif - -#if defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM) -INPUT(romfs.o) -#endif - -_jiffies = _jiffies_64 + 4; - -ENTRY(__start) - -SECTIONS -{ -#if defined(CONFIG_ROMKERNEL) - . = ROMTOP; - .vectors : - { - __vector = . ; - *(.vectors*) - } -#else - . = RAMTOP; - .bootvec : - { - *(.bootvec) - } -#endif - .text : - { - _text = .; -#if defined(CONFIG_ROMKERNEL) - *(.int_redirect) -#endif - __stext = . ; - TEXT_TEXT - SCHED_TEXT - LOCK_TEXT - __etext = . ; - } - EXCEPTION_TABLE(16) - - RODATA -#if defined(CONFIG_ROMKERNEL) - SECURITY_INIT -#endif - ROEND = .; -#if defined(CONFIG_ROMKERNEL) - . = RAMTOP; - .data : AT(ROEND) -#else - .data : -#endif - { - __sdata = . ; - ___data_start = . ; - - INIT_TASK_DATA(0x2000) - . = ALIGN(0x4) ; - DATA_DATA - . = ALIGN(0x4) ; - *(.data.*) - - . = ALIGN(0x4) ; - ___init_begin = .; - __sinittext = .; - INIT_TEXT - __einittext = .; - INIT_DATA - . = ALIGN(0x4) ; - INIT_SETUP(0x4) - ___setup_start = .; - *(.init.setup) - . = ALIGN(0x4) ; - ___setup_end = .; - INIT_CALLS - CON_INITCALL - EXIT_TEXT - EXIT_DATA - INIT_RAM_FS - . = ALIGN(0x4) ; - ___init_end = .; - __edata = . ; - } -#if defined(CONFIG_RAMKERNEL) - SECURITY_INIT -#endif - __begin_data = LOADADDR(.data); - .bss : - { - . = ALIGN(0x4) ; - __sbss = . ; - ___bss_start = . ; - *(.bss*) - . = ALIGN(0x4) ; - *(COMMON) - . = ALIGN(0x4) ; - ___bss_stop = . ; - __ebss = . ; - __end = . ; - __ramstart = .; - } - .romfs : - { - *(.romfs*) - } - . = RAMTOP+RAMSIZE; - .dummy : - { - COMMAND_START = . - 0x200 ; - __ramend = . ; - } - - DISCARDS -} diff --git a/arch/h8300/lib/Makefile b/arch/h8300/lib/Makefile deleted file mode 100644 index 1577f5075b1..00000000000 --- a/arch/h8300/lib/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for H8/300-specific library files.. -# - -lib-y = ashrdi3.o checksum.o memcpy.o memset.o abs.o romfs.o diff --git a/arch/h8300/lib/abs.S b/arch/h8300/lib/abs.S deleted file mode 100644 index ddd1fb3d01a..00000000000 --- a/arch/h8300/lib/abs.S +++ /dev/null @@ -1,21 +0,0 @@ -;;; abs.S - -#include <asm/linkage.h> - -#if defined(__H8300H__) - .h8300h -#endif -#if defined(__H8300S__) - .h8300s -#endif - .text -.global _abs - -;;; int abs(int n) -_abs: - mov.l er0,er0 - bpl 1f - neg.l er0 -1: - rts - diff --git a/arch/h8300/lib/ashrdi3.c b/arch/h8300/lib/ashrdi3.c deleted file mode 100644 index 78efb65e315..00000000000 --- a/arch/h8300/lib/ashrdi3.c +++ /dev/null @@ -1,63 +0,0 @@ -/* ashrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */ -/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. - -This file is part of GNU CC. - -GNU CC is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2, or (at your option) -any later version. - -GNU CC is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with GNU CC; see the file COPYING. If not, write to -the Free Software Foundation, 59 Temple Place - Suite 330, -Boston, MA 02111-1307, USA. */ - -#define BITS_PER_UNIT 8 - -typedef int SItype __attribute__ ((mode (SI))); -typedef unsigned int USItype __attribute__ ((mode (SI))); -typedef int DItype __attribute__ ((mode (DI))); -typedef int word_type __attribute__ ((mode (__word__))); - -struct DIstruct {SItype high, low;}; - -typedef union -{ - struct DIstruct s; - DItype ll; -} DIunion; - -DItype -__ashrdi3 (DItype u, word_type b) -{ - DIunion w; - word_type bm; - DIunion uu; - - if (b == 0) - return u; - - uu.ll = u; - - bm = (sizeof (SItype) * BITS_PER_UNIT) - b; - if (bm <= 0) - { - /* w.s.high = 1..1 or 0..0 */ - w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1); - w.s.low = uu.s.high >> -bm; - } - else - { - USItype carries = (USItype)uu.s.high << bm; - w.s.high = uu.s.high >> b; - w.s.low = ((USItype)uu.s.low >> b) | carries; - } - - return w.ll; -} diff --git a/arch/h8300/lib/checksum.c b/arch/h8300/lib/checksum.c deleted file mode 100644 index bdc5b032acd..00000000000 --- a/arch/h8300/lib/checksum.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * INET An implementation of the TCP/IP protocol suite for the LINUX - * operating system. INET is implemented using the BSD Socket - * interface as the means of communication with the user level. - * - * IP/TCP/UDP checksumming routines - * - * Authors: Jorge Cwik, <jorge@laser.satlink.net> - * Arnt Gulbrandsen, <agulbra@nvg.unit.no> - * Tom May, <ftom@netcom.com> - * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> - * Lots of code moved from tcp.c and ip.c; see those files - * for more names. - * - * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: - * Fixed some nasty bugs, causing some horrible crashes. - * A: At some points, the sum (%0) was used as - * length-counter instead of the length counter - * (%1). Thanks to Roman Hodek for pointing this out. - * B: GCC seems to mess up if one uses too many - * data-registers to hold input values and one tries to - * specify d0 and d1 as scratch registers. Letting gcc choose these - * registers itself solves the problem. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most - of the assembly has to go. */ - -#include <net/checksum.h> -#include <linux/module.h> - -static inline unsigned short from32to16(unsigned long x) -{ - /* add up 16-bit and 16-bit for 16+c bit */ - x = (x & 0xffff) + (x >> 16); - /* add up carry.. */ - x = (x & 0xffff) + (x >> 16); - return x; -} - -static unsigned long do_csum(const unsigned char * buff, int len) -{ - int odd, count; - unsigned long result = 0; - - if (len <= 0) - goto out; - odd = 1 & (unsigned long) buff; - if (odd) { - result = *buff; - len--; - buff++; - } - count = len >> 1; /* nr of 16-bit words.. */ - if (count) { - if (2 & (unsigned long) buff) { - result += *(unsigned short *) buff; - count--; - len -= 2; - buff += 2; - } - count >>= 1; /* nr of 32-bit words.. */ - if (count) { - unsigned long carry = 0; - do { - unsigned long w = *(unsigned long *) buff; - count--; - buff += 4; - result += carry; - result += w; - carry = (w > result); - } while (count); - result += carry; - result = (result & 0xffff) + (result >> 16); - } - if (len & 2) { - result += *(unsigned short *) buff; - buff += 2; - } - } - if (len & 1) - result += (*buff << 8); - result = from32to16(result); - if (odd) - result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); -out: - return result; -} - -/* - * This is a version of ip_compute_csum() optimized for IP headers, - * which always checksum on 4 octet boundaries. - */ -__sum16 ip_fast_csum(const void *iph, unsigned int ihl) -{ - return (__force __sum16)~do_csum(iph,ihl*4); -} - -/* - * computes the checksum of a memory block at buff, length len, - * and adds in "sum" (32-bit) - * - * returns a 32-bit number suitable for feeding into itself - * or csum_tcpudp_magic - * - * this function must be called with even lengths, except - * for the last fragment, which may be odd - * - * it's best to have buff aligned on a 32-bit boundary - */ -/* - * Egads... That thing apparently assumes that *all* checksums it ever sees will - * be folded. Very likely a bug. - */ -__wsum csum_partial(const void *buff, int len, __wsum sum) -{ - unsigned int result = do_csum(buff, len); - - /* add in old sum, and carry.. */ - result += (__force u32)sum; - /* 16+c bits -> 16 bits */ - result = (result & 0xffff) + (result >> 16); - return (__force __wsum)result; -} - -EXPORT_SYMBOL(csum_partial); - -/* - * this routine is used for miscellaneous IP-like checksums, mainly - * in icmp.c - */ -__sum16 ip_compute_csum(const void *buff, int len) -{ - return (__force __sum16)~do_csum(buff,len); -} - -/* - * copy from fs while checksumming, otherwise like csum_partial - */ - -__wsum -csum_partial_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *csum_err) -{ - if (csum_err) *csum_err = 0; - memcpy(dst, (__force const void *)src, len); - return csum_partial(dst, len, sum); -} - -/* - * copy from ds while checksumming, otherwise like csum_partial - */ - -__wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) -{ - memcpy(dst, src, len); - return csum_partial(dst, len, sum); -} diff --git a/arch/h8300/lib/memcpy.S b/arch/h8300/lib/memcpy.S deleted file mode 100644 index cad325e2c0e..00000000000 --- a/arch/h8300/lib/memcpy.S +++ /dev/null @@ -1,84 +0,0 @@ -;;; memcpy.S - -#include <asm/linkage.h> - -#if defined(__H8300H__) - .h8300h -#endif -#if defined(__H8300S__) - .h8300s -#endif - - .text -.global _memcpy - -;;; void *memcpy(void *to, void *from, size_t n) -_memcpy: - mov.l er2,er2 - bne 1f - rts -1: - ;; address check - bld #0,r0l - bxor #0,r1l - bcs 4f - mov.l er4,@-sp - mov.l er0,@-sp - btst #0,r0l - beq 1f - ;; (aligned even) odd address - mov.b @er1,r3l - mov.b r3l,@er0 - adds #1,er1 - adds #1,er0 - dec.l #1,er2 - beq 3f -1: - ;; n < sizeof(unsigned long) check - sub.l er4,er4 - adds #4,er4 ; loop count check value - cmp.l er4,er2 - blo 2f - ;; unsigned long copy -1: - mov.l @er1,er3 - mov.l er3,@er0 - adds #4,er0 - adds #4,er1 - subs #4,er2 - cmp.l er4,er2 - bcc 1b - ;; rest -2: - mov.l er2,er2 - beq 3f -1: - mov.b @er1,r3l - mov.b r3l,@er0 - adds #1,er1 - adds #1,er0 - dec.l #1,er2 - bne 1b -3: - mov.l @sp+,er0 - mov.l @sp+,er4 - rts - - ;; odd <- even / even <- odd -4: - mov.l er4,er3 - mov.l er2,er4 - mov.l er5,er2 - mov.l er1,er5 - mov.l er6,er1 - mov.l er0,er6 -1: - eepmov.w - mov.w r4,r4 - bne 1b - dec.w #1,e4 - bpl 1b - mov.l er1,er6 - mov.l er2,er5 - mov.l er3,er4 - rts diff --git a/arch/h8300/lib/memset.S b/arch/h8300/lib/memset.S deleted file mode 100644 index 4549a64c5b7..00000000000 --- a/arch/h8300/lib/memset.S +++ /dev/null @@ -1,61 +0,0 @@ -/* memset.S */ - -#include <asm/linkage.h> - -#if defined(__H8300H__) - .h8300h -#endif -#if defined(__H8300S__) - .h8300s -#endif - .text - -.global _memset - -;;void *memset(*ptr, int c, size_t count) -;; ptr = er0 -;; c = er1(r1l) -;; count = er2 -_memset: - btst #0,r0l - beq 2f - - ;; odd address -1: - mov.b r1l,@er0 - adds #1,er0 - dec.l #1,er2 - beq 6f - - ;; even address -2: - mov.l er2,er3 - cmp.l #4,er2 - blo 4f - ;; count>=4 -> count/4 -#if defined(__H8300H__) - shlr.l er2 - shlr.l er2 -#endif -#if defined(__H8300S__) - shlr.l #2,er2 -#endif - ;; byte -> long - mov.b r1l,r1h - mov.w r1,e1 -3: - mov.l er1,@er0 - adds #4,er0 - dec.l #1,er2 - bne 3b -4: - ;; count % 4 - and.b #3,r3l - beq 6f -5: - mov.b r1l,@er0 - adds #1,er0 - dec.b r3l - bne 5b -6: - rts diff --git a/arch/h8300/lib/romfs.S b/arch/h8300/lib/romfs.S deleted file mode 100644 index 68910d8e1ff..00000000000 --- a/arch/h8300/lib/romfs.S +++ /dev/null @@ -1,57 +0,0 @@ -/* romfs move to __ebss */ - -#include <asm/linkage.h> - -#if defined(__H8300H__) - .h8300h -#endif -#if defined(__H8300S__) - .h8300s -#endif - -#define BLKOFFSET 512 - - .text -.globl __move_romfs -_romfs_sig_len = 8 - -__move_romfs: - mov.l #__sbss,er0 - mov.l #_romfs_sig,er1 - mov.b #_romfs_sig_len,r3l -1: /* check romfs image */ - mov.b @er0+,r2l - mov.b @er1+,r2h - cmp.b r2l,r2h - bne 2f - dec.b r3l - bne 1b - - /* find romfs image */ - mov.l @__sbss+8,er0 /* romfs length(be) */ - mov.l #__sbss,er1 - add.l er0,er1 /* romfs image end */ - mov.l #__ebss,er2 - add.l er0,er2 /* distination address */ -#if defined(CONFIG_INTELFLASH) - add.l #BLKOFFSET,er2 -#endif - adds #2,er0 - adds #1,er0 - shlr er0 - shlr er0 /* transfer length */ -1: - mov.l @er1,er3 /* copy image */ - mov.l er3,@er2 - subs #4,er1 - subs #4,er2 - dec.l #1,er0 - bpl 1b -2: - rts - - .section .rodata -_romfs_sig: - .ascii "-rom1fs-" - - .end diff --git a/arch/h8300/mm/Makefile b/arch/h8300/mm/Makefile deleted file mode 100644 index 5f4bc42b645..00000000000 --- a/arch/h8300/mm/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the linux m68k-specific parts of the memory manager. -# - -obj-y := init.o fault.o memory.o kmap.o diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c deleted file mode 100644 index 47253597700..00000000000 --- a/arch/h8300/mm/fault.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * linux/arch/h8300/mm/fault.c - * - * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, - * Copyright (C) 2000 Lineo, Inc. (www.lineo.com) - * - * Based on: - * - * linux/arch/m68knommu/mm/fault.c - * linux/arch/m68k/mm/fault.c - * - * Copyright (C) 1995 Hamish Macdonald - */ - -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/kernel.h> -#include <linux/ptrace.h> - -#include <asm/pgtable.h> - -/* - * This routine handles page faults. It determines the problem, and - * then passes it off to one of the appropriate routines. - * - * error_code: - * bit 0 == 0 means no page found, 1 means protection fault - * bit 1 == 0 means read, 1 means write - * - * If this routine detects a bad access, it returns 1, otherwise it - * returns 0. - */ -asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, - unsigned long error_code) -{ -#ifdef DEBUG - printk ("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n", - regs->sr, regs->pc, address, error_code); -#endif - -/* - * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. - */ - if ((unsigned long) address < PAGE_SIZE) { - printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); - } else - printk(KERN_ALERT "Unable to handle kernel access"); - printk(" at virtual address %08lx\n",address); - if (!user_mode(regs)) - die("Oops", regs, error_code); - do_exit(SIGKILL); - - return 1; -} - diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c deleted file mode 100644 index 6c1251e491a..00000000000 --- a/arch/h8300/mm/init.c +++ /dev/null @@ -1,155 +0,0 @@ -/* - * linux/arch/h8300/mm/init.c - * - * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>, - * Kenneth Albanowski <kjahds@kjahds.com>, - * Copyright (C) 2000 Lineo, Inc. (www.lineo.com) - * - * Based on: - * - * linux/arch/m68knommu/mm/init.c - * linux/arch/m68k/mm/init.c - * - * Copyright (C) 1995 Hamish Macdonald - * - * JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com) - * DEC/2000 -- linux 2.4 support <davidm@snapgear.com> - */ - -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/swap.h> -#include <linux/init.h> -#include <linux/highmem.h> -#include <linux/pagemap.h> -#include <linux/bootmem.h> -#include <linux/gfp.h> - -#include <asm/setup.h> -#include <asm/segment.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/sections.h> - -#undef DEBUG - -/* - * BAD_PAGE is the page that is used for page faults when linux - * is out-of-memory. Older versions of linux just did a - * do_exit(), but using this instead means there is less risk - * for a process dying in kernel mode, possibly leaving a inode - * unused etc.. - * - * BAD_PAGETABLE is the accompanying page-table: it is initialized - * to point to BAD_PAGE entries. - * - * ZERO_PAGE is a special page that is used for zero-initialized - * data and COW. - */ -static unsigned long empty_bad_page_table; - -static unsigned long empty_bad_page; - -unsigned long empty_zero_page; - -extern unsigned long rom_length; - -extern unsigned long memory_start; -extern unsigned long memory_end; - -/* - * paging_init() continues the virtual memory environment setup which - * was begun by the code in arch/head.S. - * The parameters are pointers to where to stick the starting and ending - * addresses of available kernel virtual memory. - */ -void __init paging_init(void) -{ - /* - * Make sure start_mem is page aligned, otherwise bootmem and - * page_alloc get different views og the world. - */ -#ifdef DEBUG - unsigned long start_mem = PAGE_ALIGN(memory_start); -#endif - unsigned long end_mem = memory_end & PAGE_MASK; - -#ifdef DEBUG - printk ("start_mem is %#lx\nvirtual_end is %#lx\n", - start_mem, end_mem); -#endif - - /* - * Initialize the bad page table and bad page to point - * to a couple of allocated pages. - */ - empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); - empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); - empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); - memset((void *)empty_zero_page, 0, PAGE_SIZE); - - /* - * Set up SFC/DFC registers (user data space). - */ - set_fs (USER_DS); - -#ifdef DEBUG - printk ("before free_area_init\n"); - - printk ("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n", - start_mem, end_mem); -#endif - - { - unsigned long zones_size[MAX_NR_ZONES] = {0, }; - - zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; - zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT; -#ifdef CONFIG_HIGHMEM - zones_size[ZONE_HIGHMEM] = 0; -#endif - free_area_init(zones_size); - } -} - -void __init mem_init(void) -{ - unsigned long codesize = _etext - _stext; - - pr_devel("Mem_init: start=%lx, end=%lx\n", memory_start, memory_end); - - high_memory = (void *) (memory_end & PAGE_MASK); - max_mapnr = MAP_NR(high_memory); - - /* this will put all low memory onto the freelists */ - free_all_bootmem(); - - mem_init_print_info(NULL); - if (rom_length > 0 && rom_length > codesize) - pr_info("Memory available: %luK/%luK ROM\n", - (rom_length - codesize) >> 10, rom_length >> 10); -} - - -#ifdef CONFIG_BLK_DEV_INITRD -void free_initrd_mem(unsigned long start, unsigned long end) -{ - free_reserved_area((void *)start, (void *)end, -1, "initrd"); -} -#endif - -void -free_initmem(void) -{ -#ifdef CONFIG_RAMKERNEL - free_initmem_default(-1); -#endif -} - diff --git a/arch/h8300/mm/kmap.c b/arch/h8300/mm/kmap.c deleted file mode 100644 index f79edcdadf3..00000000000 --- a/arch/h8300/mm/kmap.c +++ /dev/null @@ -1,58 +0,0 @@ -/* - * linux/arch/h8300/mm/kmap.c - * - * Based on - * linux/arch/m68knommu/mm/kmap.c - * - * Copyright (C) 2000 Lineo, <davidm@snapgear.com> - * Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com> - */ - -#include <linux/mm.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/vmalloc.h> - -#include <asm/setup.h> -#include <asm/segment.h> -#include <asm/page.h> -#include <asm/pgalloc.h> -#include <asm/io.h> - -#undef DEBUG - -#define VIRT_OFFSET (0x01000000) - -/* - * Map some physical address range into the kernel address space. - */ -void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) -{ - return (void *)(physaddr + VIRT_OFFSET); -} - -/* - * Unmap a ioremap()ed region again. - */ -void iounmap(void *addr) -{ -} - -/* - * __iounmap unmaps nearly everything, so be careful - * it doesn't free currently pointer/page tables anymore but it - * wans't used anyway and might be added later. - */ -void __iounmap(void *addr, unsigned long size) -{ -} - -/* - * Set new cache mode for some kernel address space. - * The caller must push data for that range itself, if such data may already - * be in the cache. - */ -void kernel_set_cachemode(void *addr, unsigned long size, int cmode) -{ -} diff --git a/arch/h8300/mm/memory.c b/arch/h8300/mm/memory.c deleted file mode 100644 index 06e36464139..00000000000 --- a/arch/h8300/mm/memory.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * linux/arch/h8300/mm/memory.c - * - * Copyright (C) 2002 Yoshinori Sato <ysato@users.sourceforge.jp>, - * - * Based on: - * - * linux/arch/m68knommu/mm/memory.c - * - * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>, - * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) - * - * Based on: - * - * linux/arch/m68k/mm/memory.c - * - * Copyright (C) 1995 Hamish Macdonald - */ - -#include <linux/mm.h> -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/types.h> - -#include <asm/setup.h> -#include <asm/segment.h> -#include <asm/page.h> -#include <asm/pgtable.h> -#include <asm/traps.h> -#include <asm/io.h> - -void cache_clear (unsigned long paddr, int len) -{ -} - - -void cache_push (unsigned long paddr, int len) -{ -} - -void cache_push_v (unsigned long vaddr, int len) -{ -} - -/* - * Map some physical address range into the kernel address space. - */ - -unsigned long kernel_map(unsigned long paddr, unsigned long size, - int nocacheflag, unsigned long *memavailp ) -{ - return paddr; -} - diff --git a/arch/h8300/platform/h8300h/Makefile b/arch/h8300/platform/h8300h/Makefile deleted file mode 100644 index 420f73b0d96..00000000000 --- a/arch/h8300/platform/h8300h/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for the linux kernel. -# -# Reuse any files we can from the H8/300H -# - -obj-y := irq.o ptrace_h8300h.o diff --git a/arch/h8300/platform/h8300h/aki3068net/Makefile b/arch/h8300/platform/h8300h/aki3068net/Makefile deleted file mode 100644 index b7ff78050b7..00000000000 --- a/arch/h8300/platform/h8300h/aki3068net/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the linux kernel. -# - -extra-y := crt0_ram.o diff --git a/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S b/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S deleted file mode 100644 index b2ad0f2d041..00000000000 --- a/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S +++ /dev/null @@ -1,110 +0,0 @@ -/* - * linux/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Platform depend startup - * Target Archtecture: AE-3068 (aka. aki3068net) - * Memory Layout : RAM - */ - -#define ASSEMBLY - -#include <asm/linkage.h> - -#if !defined(CONFIG_BLKDEV_RESERVE) -#if defined(CONFIG_GDB_DEBUG) -#define RAMEND (__ramend - 0xc000) -#else -#define RAMEND __ramend -#endif -#else -#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS -#endif - - .global __start - .global _command_line - .global __platform_gpio_table - .global __target_name - - .h8300h - - .section .text - .file "crt0_ram.S" - - /* CPU Reset entry */ -__start: - mov.l #RAMEND,sp - ldc #0x80,ccr - - /* Peripheral Setup */ - -#if defined(CONFIG_MTD_UCLINUX) - /* move romfs image */ - jsr @__move_romfs -#endif - - /* .bss clear */ - mov.l #__sbss,er5 - mov.l #__ebss,er4 - sub.l er5,er4 - shlr er4 - shlr er4 - sub.l er0,er0 -1: - mov.l er0,@er5 - adds #4,er5 - dec.l #1,er4 - bne 1b - - /* copy kernel commandline */ - mov.l #COMMAND_START,er5 - mov.l #_command_line,er6 - mov.w #512,r4 - eepmov.w - - /* uClinux kernel start */ - ldc #0x90,ccr /* running kernel */ - mov.l #_init_thread_union,sp - add.l #0x2000,sp - jsr @_start_kernel -_exit: - - jmp _exit - - rts - - /* I/O port assign information */ -__platform_gpio_table: - mov.l #gpio_table,er0 - rts - -gpio_table: - ;; P1DDR - .byte 0xff,0xff - ;; P2DDR - .byte 0xff,0xff - ;; P3DDR - .byte 0xff,0x00 - ;; P4DDR - .byte 0x00,0x00 - ;; P5DDR - .byte 0x01,0x01 - ;; P6DDR - .byte 0x00,0x00 - ;; dummy - .byte 0x00,0x00 - ;; P8DDR - .byte 0x0c,0x0c - ;; P9DDR - .byte 0x00,0x00 - ;; PADDR - .byte 0x00,0x00 - ;; PBDDR - .byte 0x30,0x30 - -__target_name: - .asciz "AE-3068" - - .section .bootvec,"ax" - jmp @__start diff --git a/arch/h8300/platform/h8300h/generic/Makefile b/arch/h8300/platform/h8300h/generic/Makefile deleted file mode 100644 index 2b12a170209..00000000000 --- a/arch/h8300/platform/h8300h/generic/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the linux kernel. -# - -extra-y := crt0_$(MODEL).o diff --git a/arch/h8300/platform/h8300h/generic/crt0_ram.S b/arch/h8300/platform/h8300h/generic/crt0_ram.S deleted file mode 100644 index 5ab7d9c1291..00000000000 --- a/arch/h8300/platform/h8300h/generic/crt0_ram.S +++ /dev/null @@ -1,107 +0,0 @@ -/* - * linux/arch/h8300/platform/h8300h/generic/crt0_ram.S - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Platform depend startup - * Target Archtecture: AE-3068 (aka. aki3068net) - * Memory Layout : RAM - */ - -#define ASSEMBLY - -#include <asm/linkage.h> - -#if !defined(CONFIG_BLKDEV_RESERVE) -#if defined(CONFIG_GDB_DEBUG) -#define RAMEND (__ramend - 0xc000) -#else -#define RAMEND __ramend -#endif -#else -#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS -#endif - - .global __start - .global _command_line - .global __platform_gpio_table - .global __target_name - - .h8300h - - .section .text - .file "crt0_ram.S" - - /* CPU Reset entry */ -__start: - mov.l #RAMEND,sp - ldc #0x80,ccr - - /* Peripheral Setup */ - -#if defined(CONFIG_BLK_DEV_BLKMEM) - /* move romfs image */ - jsr @__move_romfs -#endif - - /* .bss clear */ - mov.l #__sbss,er5 - mov.l #__ebss,er4 - sub.l er5,er4 - shlr er4 - shlr er4 - sub.l er0,er0 -1: - mov.l er0,@er5 - adds #4,er5 - dec.l #1,er4 - bne 1b - - /* copy kernel commandline */ - mov.l #COMMAND_START,er5 - mov.l #_command_line,er6 - mov.w #512,r4 - eepmov.w - - /* uClinux kernel start */ - ldc #0x90,ccr /* running kernel */ - mov.l #_init_thread_union,sp - add.l #0x2000,sp - jsr @_start_kernel -_exit: - - jmp _exit - - rts - - /* I/O port assign information */ -__platform_gpio_table: - mov.l #gpio_table,er0 - rts - -gpio_table: - ;; P1DDR - .byte 0x00,0x00 - ;; P2DDR - .byte 0x00,0x00 - ;; P3DDR - .byte 0x00,0x00 - ;; P4DDR - .byte 0x00,0x00 - ;; P5DDR - .byte 0x00,0x00 - ;; P6DDR - .byte 0x00,0x00 - ;; dummy - .byte 0x00,0x00 - ;; P8DDR - .byte 0x00,0x00 - ;; P9DDR - .byte 0x00,0x00 - ;; PADDR - .byte 0x00,0x00 - ;; PBDDR - .byte 0x00,0x00 - -__target_name: - .asciz "generic" diff --git a/arch/h8300/platform/h8300h/generic/crt0_rom.S b/arch/h8300/platform/h8300h/generic/crt0_rom.S deleted file mode 100644 index dda1dfa15a5..00000000000 --- a/arch/h8300/platform/h8300h/generic/crt0_rom.S +++ /dev/null @@ -1,122 +0,0 @@ -/* - * linux/arch/h8300/platform/h8300h/generic/crt0_rom.S - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Platform depend startup - * Target Archtecture: generic - * Memory Layout : ROM - */ - -#define ASSEMBLY - -#include <asm/linkage.h> - - .global __start - .global __command_line - .global __platform_gpio_table - .global __target_name - - .h8300h - .section .text - .file "crt0_rom.S" - - /* CPU Reset entry */ -__start: - mov.l #__ramend,sp - ldc #0x80,ccr - - /* Peripheral Setup */ - - /* .bss clear */ - mov.l #__sbss,er5 - mov.l #__ebss,er4 - sub.l er5,er4 - shlr er4 - shlr er4 - sub.l er0,er0 -1: - mov.l er0,@er5 - adds #4,er5 - dec.l #1,er4 - bne 1b - - /* copy .data */ -#if !defined(CONFIG_H8300H_SIM) - /* copy .data */ - mov.l #__begin_data,er5 - mov.l #__sdata,er6 - mov.l #__edata,er4 - sub.l er6,er4 - shlr.l er4 - shlr.l er4 -1: - mov.l @er5+,er0 - mov.l er0,@er6 - adds #4,er6 - dec.l #1,er4 - bne 1b -#endif - - /* copy kernel commandline */ - mov.l #COMMAND_START,er5 - mov.l #__command_line,er6 - mov.w #512,r4 - eepmov.w - - /* linux kernel start */ - ldc #0x90,ccr /* running kernel */ - mov.l #_init_thread_union,sp - add.l #0x2000,sp - jsr @_start_kernel -_exit: - - jmp _exit - - rts - - /* I/O port assign information */ -__platform_gpio_table: - mov.l #gpio_table,er0 - rts - -gpio_table: - ;; P1DDR - .byte 0x00,0x00 - ;; P2DDR - .byte 0x00,0x00 - ;; P3DDR - .byte 0x00,0x00 - ;; P4DDR - .byte 0x00,0x00 - ;; P5DDR - .byte 0x00,0x00 - ;; P6DDR - .byte 0x00,0x00 - ;; dummy - .byte 0x00,0x00 - ;; P8DDR - .byte 0x00,0x00 - ;; P9DDR - .byte 0x00,0x00 - ;; PADDR - .byte 0x00,0x00 - ;; PBDDR - .byte 0x00,0x00 - - .section .rodata -__target_name: - .asciz "generic" - - .section .bss -__command_line: - .space 512 - - /* interrupt vector */ - .section .vectors,"ax" - .long __start -vector = 1 - .rept 64-1 - .long _interrupt_redirect_table+vector*4 -vector = vector + 1 - .endr diff --git a/arch/h8300/platform/h8300h/h8max/Makefile b/arch/h8300/platform/h8300h/h8max/Makefile deleted file mode 100644 index b7ff78050b7..00000000000 --- a/arch/h8300/platform/h8300h/h8max/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the linux kernel. -# - -extra-y := crt0_ram.o diff --git a/arch/h8300/platform/h8300h/h8max/crt0_ram.S b/arch/h8300/platform/h8300h/h8max/crt0_ram.S deleted file mode 100644 index 6a0d4e2d9ec..00000000000 --- a/arch/h8300/platform/h8300h/h8max/crt0_ram.S +++ /dev/null @@ -1,110 +0,0 @@ -/* - * linux/arch/h8300/platform/h8300h/h8max/crt0_ram.S - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Platform depend startup - * Target Archtecture: H8MAX - * Memory Layout : RAM - */ - -#define ASSEMBLY - -#include <asm/linkage.h> - -#if !defined(CONFIG_BLKDEV_RESERVE) -#if defined(CONFIG_GDB_DEBUG) -#define RAMEND (__ramend - 0xc000) -#else -#define RAMEND __ramend -#endif -#else -#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS -#endif - - .global __start - .global _command_line - .global __platform_gpio_table - .global __target_name - - .h8300h - - .section .text - .file "crt0_ram.S" - - /* CPU Reset entry */ -__start: - mov.l #RAMEND,sp - ldc #0x80,ccr - - /* Peripheral Setup */ - -#if defined(CONFIG_MTD_UCLINUX) - /* move romfs image */ - jsr @__move_romfs -#endif - - /* .bss clear */ - mov.l #__sbss,er5 - mov.l #__ebss,er4 - sub.l er5,er4 - shlr er4 - shlr er4 - sub.l er0,er0 -1: - mov.l er0,@er5 - adds #4,er5 - dec.l #1,er4 - bne 1b - - /* copy kernel commandline */ - mov.l #COMMAND_START,er5 - mov.l #_command_line,er6 - mov.w #512,r4 - eepmov.w - - /* uClinux kernel start */ - ldc #0x90,ccr /* running kernel */ - mov.l #_init_thread_union,sp - add.l #0x2000,sp - jsr @_start_kernel -_exit: - - jmp _exit - - rts - - /* I/O port assign information */ -__platform_gpio_table: - mov.l #gpio_table,er0 - rts - -gpio_table: - ;; P1DDR - .byte 0xff,0xff - ;; P2DDR - .byte 0xff,0xff - ;; P3DDR - .byte 0x00,0x00 - ;; P4DDR - .byte 0x00,0x00 - ;; P5DDR - .byte 0x01,0x01 - ;; P6DDR - .byte 0xf6,0xf6 - ;; dummy - .byte 0x00,0x00 - ;; P8DDR - .byte 0xee,0xee - ;; P9DDR - .byte 0x00,0x00 - ;; PADDR - .byte 0x00,0x00 - ;; PBDDR - .byte 0x30,0x30 - -__target_name: - .asciz "H8MAX" - - .section .bootvec,"ax" - jmp @__start diff --git a/arch/h8300/platform/h8300h/irq.c b/arch/h8300/platform/h8300h/irq.c deleted file mode 100644 index 0a50353e09d..00000000000 --- a/arch/h8300/platform/h8300h/irq.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Interrupt handling H8/300H depend. - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - */ - -#include <linux/init.h> -#include <linux/errno.h> - -#include <asm/ptrace.h> -#include <asm/traps.h> -#include <asm/irq.h> -#include <asm/io.h> -#include <asm/gpio-internal.h> -#include <asm/regs306x.h> - -const int __initconst h8300_saved_vectors[] = { -#if defined(CONFIG_GDB_DEBUG) - TRAP3_VEC, /* TRAPA #3 is GDB breakpoint */ -#endif - -1, -}; - -const h8300_vector __initconst h8300_trap_table[] = { - 0, 0, 0, 0, 0, 0, 0, 0, - system_call, - 0, - 0, - trace_break, -}; - -int h8300_enable_irq_pin(unsigned int irq) -{ - int bitmask; - if (irq < EXT_IRQ0 || irq > EXT_IRQ5) - return 0; - - /* initialize IRQ pin */ - bitmask = 1 << (irq - EXT_IRQ0); - switch(irq) { - case EXT_IRQ0: - case EXT_IRQ1: - case EXT_IRQ2: - case EXT_IRQ3: - if (H8300_GPIO_RESERVE(H8300_GPIO_P8, bitmask) == 0) - return -EBUSY; - H8300_GPIO_DDR(H8300_GPIO_P8, bitmask, H8300_GPIO_INPUT); - break; - case EXT_IRQ4: - case EXT_IRQ5: - if (H8300_GPIO_RESERVE(H8300_GPIO_P9, bitmask) == 0) - return -EBUSY; - H8300_GPIO_DDR(H8300_GPIO_P9, bitmask, H8300_GPIO_INPUT); - break; - } - - return 0; -} - -void h8300_disable_irq_pin(unsigned int irq) -{ - int bitmask; - if (irq < EXT_IRQ0 || irq > EXT_IRQ5) - return; - - /* disable interrupt & release IRQ pin */ - bitmask = 1 << (irq - EXT_IRQ0); - switch(irq) { - case EXT_IRQ0: - case EXT_IRQ1: - case EXT_IRQ2: - case EXT_IRQ3: - *(volatile unsigned char *)IER &= ~bitmask; - H8300_GPIO_FREE(H8300_GPIO_P8, bitmask); - break ; - case EXT_IRQ4: - case EXT_IRQ5: - *(volatile unsigned char *)IER &= ~bitmask; - H8300_GPIO_FREE(H8300_GPIO_P9, bitmask); - break; - } -} diff --git a/arch/h8300/platform/h8300h/ptrace_h8300h.c b/arch/h8300/platform/h8300h/ptrace_h8300h.c deleted file mode 100644 index 4f1ed027963..00000000000 --- a/arch/h8300/platform/h8300h/ptrace_h8300h.c +++ /dev/null @@ -1,284 +0,0 @@ -/* - * linux/arch/h8300/platform/h8300h/ptrace_h8300h.c - * ptrace cpu depend helper functions - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file COPYING in the main directory of - * this archive for more details. - */ - -#include <linux/linkage.h> -#include <linux/sched.h> -#include <asm/ptrace.h> - -#define CCR_MASK 0x6f /* mode/imask not set */ -#define BREAKINST 0x5730 /* trapa #3 */ - -/* Mapping from PT_xxx to the stack offset at which the register is - saved. Notice that usp has no stack-slot and needs to be treated - specially (see get_reg/put_reg below). */ -static const int h8300_register_offset[] = { - PT_REG(er1), PT_REG(er2), PT_REG(er3), PT_REG(er4), - PT_REG(er5), PT_REG(er6), PT_REG(er0), PT_REG(orig_er0), - PT_REG(ccr), PT_REG(pc) -}; - -/* read register */ -long h8300_get_reg(struct task_struct *task, int regno) -{ - switch (regno) { - case PT_USP: - return task->thread.usp + sizeof(long)*2; - case PT_CCR: - return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]); - default: - return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]); - } -} - -/* write register */ -int h8300_put_reg(struct task_struct *task, int regno, unsigned long data) -{ - unsigned short oldccr; - switch (regno) { - case PT_USP: - task->thread.usp = data - sizeof(long)*2; - case PT_CCR: - oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]); - oldccr &= ~CCR_MASK; - data &= CCR_MASK; - data |= oldccr; - *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data; - break; - default: - *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data; - break; - } - return 0; -} - -/* disable singlestep */ -void user_disable_single_step(struct task_struct *child) -{ - if((long)child->thread.breakinfo.addr != -1L) { - *child->thread.breakinfo.addr = child->thread.breakinfo.inst; - child->thread.breakinfo.addr = (unsigned short *)-1L; - } -} - -/* calculate next pc */ -enum jump_type {none, /* normal instruction */ - jabs, /* absolute address jump */ - ind, /* indirect address jump */ - ret, /* return to subrutine */ - reg, /* register indexed jump */ - relb, /* pc relative jump (byte offset) */ - relw, /* pc relative jump (word offset) */ - }; - -/* opcode decode table define - ptn: opcode pattern - msk: opcode bitmask - len: instruction length (<0 next table index) - jmp: jump operation mode */ -struct optable { - unsigned char bitpattern; - unsigned char bitmask; - signed char length; - signed char type; -} __attribute__((aligned(1),packed)); - -#define OPTABLE(ptn,msk,len,jmp) \ - { \ - .bitpattern = ptn, \ - .bitmask = msk, \ - .length = len, \ - .type = jmp, \ - } - -static const struct optable optable_0[] = { - OPTABLE(0x00,0xff, 1,none), /* 0x00 */ - OPTABLE(0x01,0xff,-1,none), /* 0x01 */ - OPTABLE(0x02,0xfe, 1,none), /* 0x02-0x03 */ - OPTABLE(0x04,0xee, 1,none), /* 0x04-0x05/0x14-0x15 */ - OPTABLE(0x06,0xfe, 1,none), /* 0x06-0x07 */ - OPTABLE(0x08,0xea, 1,none), /* 0x08-0x09/0x0c-0x0d/0x18-0x19/0x1c-0x1d */ - OPTABLE(0x0a,0xee, 1,none), /* 0x0a-0x0b/0x1a-0x1b */ - OPTABLE(0x0e,0xee, 1,none), /* 0x0e-0x0f/0x1e-0x1f */ - OPTABLE(0x10,0xfc, 1,none), /* 0x10-0x13 */ - OPTABLE(0x16,0xfe, 1,none), /* 0x16-0x17 */ - OPTABLE(0x20,0xe0, 1,none), /* 0x20-0x3f */ - OPTABLE(0x40,0xf0, 1,relb), /* 0x40-0x4f */ - OPTABLE(0x50,0xfc, 1,none), /* 0x50-0x53 */ - OPTABLE(0x54,0xfd, 1,ret ), /* 0x54/0x56 */ - OPTABLE(0x55,0xff, 1,relb), /* 0x55 */ - OPTABLE(0x57,0xff, 1,none), /* 0x57 */ - OPTABLE(0x58,0xfb, 2,relw), /* 0x58/0x5c */ - OPTABLE(0x59,0xfb, 1,reg ), /* 0x59/0x5b */ - OPTABLE(0x5a,0xfb, 2,jabs), /* 0x5a/0x5e */ - OPTABLE(0x5b,0xfb, 2,ind ), /* 0x5b/0x5f */ - OPTABLE(0x60,0xe8, 1,none), /* 0x60-0x67/0x70-0x77 */ - OPTABLE(0x68,0xfa, 1,none), /* 0x68-0x69/0x6c-0x6d */ - OPTABLE(0x6a,0xfe,-2,none), /* 0x6a-0x6b */ - OPTABLE(0x6e,0xfe, 2,none), /* 0x6e-0x6f */ - OPTABLE(0x78,0xff, 4,none), /* 0x78 */ - OPTABLE(0x79,0xff, 2,none), /* 0x79 */ - OPTABLE(0x7a,0xff, 3,none), /* 0x7a */ - OPTABLE(0x7b,0xff, 2,none), /* 0x7b */ - OPTABLE(0x7c,0xfc, 2,none), /* 0x7c-0x7f */ - OPTABLE(0x80,0x80, 1,none), /* 0x80-0xff */ -}; - -static const struct optable optable_1[] = { - OPTABLE(0x00,0xff,-3,none), /* 0x0100 */ - OPTABLE(0x40,0xf0,-3,none), /* 0x0140-0x14f */ - OPTABLE(0x80,0xf0, 1,none), /* 0x0180-0x018f */ - OPTABLE(0xc0,0xc0, 2,none), /* 0x01c0-0x01ff */ -}; - -static const struct optable optable_2[] = { - OPTABLE(0x00,0x20, 2,none), /* 0x6a0?/0x6a8?/0x6b0?/0x6b8? */ - OPTABLE(0x20,0x20, 3,none), /* 0x6a2?/0x6aa?/0x6b2?/0x6ba? */ -}; - -static const struct optable optable_3[] = { - OPTABLE(0x69,0xfb, 2,none), /* 0x010069/0x01006d/014069/0x01406d */ - OPTABLE(0x6b,0xff,-4,none), /* 0x01006b/0x01406b */ - OPTABLE(0x6f,0xff, 3,none), /* 0x01006f/0x01406f */ - OPTABLE(0x78,0xff, 5,none), /* 0x010078/0x014078 */ -}; - -static const struct optable optable_4[] = { - OPTABLE(0x00,0x78, 3,none), /* 0x0100690?/0x01006d0?/0140690/0x01406d0?/0x0100698?/0x01006d8?/0140698?/0x01406d8? */ - OPTABLE(0x20,0x78, 4,none), /* 0x0100692?/0x01006d2?/0140692/0x01406d2?/0x010069a?/0x01006da?/014069a?/0x01406da? */ -}; - -static const struct optables_list { - const struct optable *ptr; - int size; -} optables[] = { -#define OPTABLES(no) \ - { \ - .ptr = optable_##no, \ - .size = sizeof(optable_##no) / sizeof(struct optable), \ - } - OPTABLES(0), - OPTABLES(1), - OPTABLES(2), - OPTABLES(3), - OPTABLES(4), - -}; - -const unsigned char condmask[] = { - 0x00,0x40,0x01,0x04,0x02,0x08,0x10,0x20 -}; - -static int isbranch(struct task_struct *task,int reson) -{ - unsigned char cond = h8300_get_reg(task, PT_CCR); - /* encode complex conditions */ - /* B4: N^V - B5: Z|(N^V) - B6: C|Z */ - __asm__("bld #3,%w0\n\t" - "bxor #1,%w0\n\t" - "bst #4,%w0\n\t" - "bor #2,%w0\n\t" - "bst #5,%w0\n\t" - "bld #2,%w0\n\t" - "bor #0,%w0\n\t" - "bst #6,%w0\n\t" - :"=&r"(cond)::"cc"); - cond &= condmask[reson >> 1]; - if (!(reson & 1)) - return cond == 0; - else - return cond != 0; -} - -static unsigned short *getnextpc(struct task_struct *child, unsigned short *pc) -{ - const struct optable *op; - unsigned char *fetch_p; - unsigned char inst; - unsigned long addr; - unsigned long *sp; - int op_len,regno; - op = optables[0].ptr; - op_len = optables[0].size; - fetch_p = (unsigned char *)pc; - inst = *fetch_p++; - do { - if ((inst & op->bitmask) == op->bitpattern) { - if (op->length < 0) { - op = optables[-op->length].ptr; - op_len = optables[-op->length].size + 1; - inst = *fetch_p++; - } else { - switch (op->type) { - case none: - return pc + op->length; - case jabs: - addr = *(unsigned long *)pc; - return (unsigned short *)(addr & 0x00ffffff); - case ind: - addr = *pc & 0xff; - return (unsigned short *)(*(unsigned long *)addr); - case ret: - sp = (unsigned long *)h8300_get_reg(child, PT_USP); - /* user stack frames - | er0 | temporary saved - +--------+ - | exp | exception stack frames - +--------+ - | ret pc | userspace return address - */ - return (unsigned short *)(*(sp+2) & 0x00ffffff); - case reg: - regno = (*pc >> 4) & 0x07; - if (regno == 0) - addr = h8300_get_reg(child, PT_ER0); - else - addr = h8300_get_reg(child, regno-1+PT_ER1); - return (unsigned short *)addr; - case relb: - if (inst == 0x55 || isbranch(child,inst & 0x0f)) - pc = (unsigned short *)((unsigned long)pc + - ((signed char)(*fetch_p))); - return pc+1; /* skip myself */ - case relw: - if (inst == 0x5c || isbranch(child,(*fetch_p & 0xf0) >> 4)) - pc = (unsigned short *)((unsigned long)pc + - ((signed short)(*(pc+1)))); - return pc+2; /* skip myself */ - } - } - } else - op++; - } while(--op_len > 0); - return NULL; -} - -/* Set breakpoint(s) to simulate a single step from the current PC. */ - -void user_enable_single_step(struct task_struct *child) -{ - unsigned short *nextpc; - nextpc = getnextpc(child,(unsigned short *)h8300_get_reg(child, PT_PC)); - child->thread.breakinfo.addr = nextpc; - child->thread.breakinfo.inst = *nextpc; - *nextpc = BREAKINST; -} - -asmlinkage void trace_trap(unsigned long bp) -{ - if ((unsigned long)current->thread.breakinfo.addr == bp) { - user_disable_single_step(current); - force_sig(SIGTRAP,current); - } else - force_sig(SIGILL,current); -} - diff --git a/arch/h8300/platform/h8s/Makefile b/arch/h8300/platform/h8s/Makefile deleted file mode 100644 index bf124188376..00000000000 --- a/arch/h8300/platform/h8s/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for the linux kernel. -# -# Reuse any files we can from the H8S -# - -obj-y := ints_h8s.o ptrace_h8s.o diff --git a/arch/h8300/platform/h8s/edosk2674/Makefile b/arch/h8300/platform/h8s/edosk2674/Makefile deleted file mode 100644 index 8e349723bb4..00000000000 --- a/arch/h8300/platform/h8s/edosk2674/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the linux kernel. -# - -extra-y := crt0_$(MODEL).o diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S b/arch/h8300/platform/h8s/edosk2674/crt0_ram.S deleted file mode 100644 index 5ed191b37cd..00000000000 --- a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S +++ /dev/null @@ -1,130 +0,0 @@ -/* - * linux/arch/h8300/platform/h8s/edosk2674/crt0_ram.S - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Platform depend startup - * Target Archtecture: EDOSK-2674 - * Memory Layout : RAM - */ - -#define ASSEMBLY - -#include <asm/linkage.h> -#include <asm/regs267x.h> - -#if !defined(CONFIG_BLKDEV_RESERVE) -#if defined(CONFIG_GDB_DEBUG) -#define RAMEND (__ramend - 0xc000) -#else -#define RAMEND __ramend -#endif -#else -#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS -#endif - - .global __start - .global __command_line - .global __platform_gpio_table - .global __target_name - - .h8300s - - .section .text - .file "crt0_ram.S" - - /* CPU Reset entry */ -__start: - mov.l #RAMEND,sp - ldc #0x80,ccr - ldc #0x00,exr - - /* Peripheral Setup */ - bclr #4,@INTCR:8 /* interrupt mode 2 */ - bset #5,@INTCR:8 - bclr #0,@IER+1:16 - bset #1,@ISCRL+1:16 /* IRQ0 Positive Edge */ - bclr #0,@ISCRL+1:16 - -#if defined(CONFIG_MTD_UCLINUX) - /* move romfs image */ - jsr @__move_romfs -#endif - - /* .bss clear */ - mov.l #__sbss,er5 - mov.l er5,er6 - mov.l #__ebss,er4 - sub.l er5,er4 - shlr #2,er4 - sub.l er0,er0 -1: - mov.l er0,@er5 - adds #4,er5 - dec.l #1,er4 - bne 1b - - /* copy kernel commandline */ - mov.l #COMMAND_START,er5 - mov.l #_command_line,er6 - mov.w #512,r4 - eepmov.w - - /* uClinux kernel start */ - ldc #0x90,ccr /* running kernel */ - mov.l #_init_thread_union,sp - add.l #0x2000,sp - jsr @_start_kernel -_exit: - - jmp _exit - - rts - - /* I/O port assign information */ -__platform_gpio_table: - mov.l #gpio_table,er0 - rts - -gpio_table: - ;; P1DDR - ;; used,ddr - .byte 0x00,0x00 - ;; P2DDR - .byte 0x00,0x00 - ;; P3DDR - .byte 0x3f,0x3a - ;; dummy - .byte 0x00,0x00 - ;; P5DDR - .byte 0x00,0x00 - ;; P6DDR - .byte 0x00,0x00 - ;; P7DDR - .byte 0x00,0x00 - ;; P8DDR - .byte 0x00,0x00 - ;; dummy - .byte 0x00,0x00 - ;; PADDR - .byte 0xff,0xff - ;; PBDDR - .byte 0xff,0x00 - ;; PCDDR - .byte 0xff,0x00 - ;; PDDDR - .byte 0xff,0x00 - ;; PEDDR - .byte 0xff,0x00 - ;; PFDDR - .byte 0xff,0xff - ;; PGDDR - .byte 0x0f,0x0f - ;; PHDDR - .byte 0x0f,0x0f - -__target_name: - .asciz "EDOSK-2674" - - .section .bootvec,"ax" - jmp @__start diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_rom.S b/arch/h8300/platform/h8s/edosk2674/crt0_rom.S deleted file mode 100644 index 06d1d7f324c..00000000000 --- a/arch/h8300/platform/h8s/edosk2674/crt0_rom.S +++ /dev/null @@ -1,186 +0,0 @@ -/* - * linux/arch/h8300/platform/h8s/edosk2674/crt0_rom.S - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Platform depend startup - * Target Archtecture: EDOSK-2674 - * Memory Layout : ROM - */ - -#define ASSEMBLY - -#include <asm/linkage.h> -#include <asm/regs267x.h> - - .global __start - .global __command_line - .global __platform_gpio_table - .global __target_name - - .h8300s - .section .text - .file "crt0_rom.S" - - /* CPU Reset entry */ -__start: - mov.l #__ramend,sp - ldc #0x80,ccr - ldc #0,exr - - /* Peripheral Setup */ -;BSC/GPIO setup - mov.l #init_regs,er0 - mov.w #0xffff,e2 -1: - mov.w @er0+,r2 - beq 2f - mov.w @er0+,r1 - mov.b r1l,@er2 - bra 1b - -2: -;SDRAM setup -#define SDRAM_SMR 0x400040 - - mov.b #0,r0l - mov.b r0l,@DRACCR:16 - mov.w #0x188,r0 - mov.w r0,@REFCR:16 - mov.w #0x85b4,r0 - mov.w r0,@DRAMCR:16 - mov.b #0,r1l - mov.b r1l,@SDRAM_SMR - mov.w #0x84b4,r0 - mov.w r0,@DRAMCR:16 -;special thanks to Arizona Cooperative Power - - /* copy .data */ - mov.l #__begin_data,er5 - mov.l #__sdata,er6 - mov.l #__edata,er4 - sub.l er6,er4 - shlr.l #2,er4 -1: - mov.l @er5+,er0 - mov.l er0,@er6 - adds #4,er6 - dec.l #1,er4 - bne 1b - - /* .bss clear */ - mov.l #__sbss,er5 - mov.l #__ebss,er4 - sub.l er5,er4 - shlr.l #2,er4 - sub.l er0,er0 -1: - mov.l er0,@er5 - adds #4,er5 - dec.l #1,er4 - bne 1b - - /* copy kernel commandline */ - mov.l #COMMAND_START,er5 - mov.l #__command_line,er6 - mov.w #512,r4 - eepmov.w - - /* linux kernel start */ - ldc #0x90,ccr /* running kernel */ - mov.l #_init_thread_union,sp - add.l #0x2000,sp - jsr @_start_kernel -_exit: - - jmp _exit - - rts - - /* I/O port assign information */ -__platform_gpio_table: - mov.l #gpio_table,er0 - rts - -#define INIT_REGS_DATA(REGS,DATA) \ - .word ((REGS) & 0xffff),DATA - -init_regs: -INIT_REGS_DATA(ASTCR,0xff) -INIT_REGS_DATA(RDNCR,0x00) -INIT_REGS_DATA(ABWCR,0x80) -INIT_REGS_DATA(WTCRAH,0x27) -INIT_REGS_DATA(WTCRAL,0x77) -INIT_REGS_DATA(WTCRBH,0x71) -INIT_REGS_DATA(WTCRBL,0x22) -INIT_REGS_DATA(CSACRH,0x80) -INIT_REGS_DATA(CSACRL,0x80) -INIT_REGS_DATA(BROMCRH,0xa0) -INIT_REGS_DATA(BROMCRL,0xa0) -INIT_REGS_DATA(P3DDR,0x3a) -INIT_REGS_DATA(P3ODR,0x06) -INIT_REGS_DATA(PADDR,0xff) -INIT_REGS_DATA(PFDDR,0xfe) -INIT_REGS_DATA(PGDDR,0x0f) -INIT_REGS_DATA(PHDDR,0x0f) -INIT_REGS_DATA(PFCR0,0xff) -INIT_REGS_DATA(PFCR2,0x0d) -INIT_REGS_DATA(ITSR, 0x00) -INIT_REGS_DATA(ITSR+1,0x3f) -INIT_REGS_DATA(INTCR,0x20) - - .word 0 - -gpio_table: - ;; P1DDR - .byte 0x00,0x00 - ;; P2DDR - .byte 0x00,0x00 - ;; P3DDR - .byte 0x00,0x00 - ;; dummy - .byte 0x00,0x00 - ;; P5DDR - .byte 0x00,0x00 - ;; P6DDR - .byte 0x00,0x00 - ;; P7DDR - .byte 0x00,0x00 - ;; P8DDR - .byte 0x00,0x00 - ;; dummy - .byte 0x00,0x00 - ;; PADDR - .byte 0x00,0x00 - ;; PBDDR - .byte 0x00,0x00 - ;; PCDDR - .byte 0x00,0x00 - ;; PDDDR - .byte 0x00,0x00 - ;; PEDDR - .byte 0x00,0x00 - ;; PFDDR - .byte 0x00,0x00 - ;; PGDDR - .byte 0x00,0x00 - ;; PHDDR - .byte 0x00,0x00 - - .section .rodata -__target_name: - .asciz "EDOSK-2674" - - .section .bss -__command_line: - .space 512 - - /* interrupt vector */ - .section .vectors,"ax" - .long __start - .long __start -vector = 2 - .rept 126 - .long _interrupt_redirect_table+vector*4 -vector = vector + 1 - .endr diff --git a/arch/h8300/platform/h8s/generic/Makefile b/arch/h8300/platform/h8s/generic/Makefile deleted file mode 100644 index 44b4685c664..00000000000 --- a/arch/h8300/platform/h8s/generic/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the linux kernel. -# - -extra-y = crt0_$(MODEL).o diff --git a/arch/h8300/platform/h8s/generic/crt0_ram.S b/arch/h8300/platform/h8s/generic/crt0_ram.S deleted file mode 100644 index 7018915de74..00000000000 --- a/arch/h8300/platform/h8s/generic/crt0_ram.S +++ /dev/null @@ -1,127 +0,0 @@ -/* - * linux/arch/h8300/platform/h8s/edosk2674/crt0_ram.S - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Platform depend startup - * Target Archtecture: generic - * Memory Layout : RAM - */ - -#define ASSEMBLY - -#include <asm/linkage.h> -#include <asm/regs267x.h> - -#if !defined(CONFIG_BLKDEV_RESERVE) -#if defined(CONFIG_GDB_DEBUG) -#define RAMEND (__ramend - 0xc000) -#else -#define RAMEND __ramend -#endif -#else -#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS -#endif - - .global __start - .global __command_line - .global __platform_gpio_table - .global __target_name - - .h8300s - - .section .text - .file "crt0_ram.S" - - /* CPU Reset entry */ -__start: - mov.l #RAMEND,sp - ldc #0x80,ccr - ldc #0x00,exr - - /* Peripheral Setup */ - bclr #4,@INTCR:8 /* interrupt mode 2 */ - bset #5,@INTCR:8 - -#if defined(CONFIG_MTD_UCLINUX) - /* move romfs image */ - jsr @__move_romfs -#endif - - /* .bss clear */ - mov.l #__sbss,er5 - mov.l er5,er6 - mov.l #__ebss,er4 - sub.l er5,er4 - shlr #2,er4 - sub.l er0,er0 -1: - mov.l er0,@er5 - adds #4,er5 - dec.l #1,er4 - bne 1b - - /* copy kernel commandline */ - mov.l #COMMAND_START,er5 - mov.l #_command_line,er6 - mov.w #512,r4 - eepmov.w - - /* uClinux kernel start */ - ldc #0x90,ccr /* running kernel */ - mov.l #_init_thread_union,sp - add.l #0x2000,sp - jsr @_start_kernel -_exit: - - jmp _exit - - rts - - /* I/O port assign information */ -__platform_gpio_table: - mov.l #gpio_table,er0 - rts - -gpio_table: - ;; P1DDR - ;; used,ddr - .byte 0x00,0x00 - ;; P2DDR - .byte 0x00,0x00 - ;; P3DDR - .byte 0x00,0x00 - ;; dummy - .byte 0x00,0x00 - ;; P5DDR - .byte 0x00,0x00 - ;; P6DDR - .byte 0x00,0x00 - ;; P7DDR - .byte 0x00,0x00 - ;; P8DDR - .byte 0x00,0x00 - ;; dummy - .byte 0x00,0x00 - ;; PADDR - .byte 0x00,0x00 - ;; PBDDR - .byte 0x00,0x00 - ;; PCDDR - .byte 0x00,0x00 - ;; PDDDR - .byte 0x00,0x00 - ;; PEDDR - .byte 0x00,0x00 - ;; PFDDR - .byte 0x00,0x00 - ;; PGDDR - .byte 0x00,0x00 - ;; PHDDR - .byte 0x00,0x00 - -__target_name: - .asciz "generic" - - .section .bootvec,"ax" - jmp @__start diff --git a/arch/h8300/platform/h8s/generic/crt0_rom.S b/arch/h8300/platform/h8s/generic/crt0_rom.S deleted file mode 100644 index 623ba782819..00000000000 --- a/arch/h8300/platform/h8s/generic/crt0_rom.S +++ /dev/null @@ -1,128 +0,0 @@ -/* - * linux/arch/h8300/platform/h8s/generic/crt0_rom.S - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * Platform depend startup - * Target Archtecture: generic - * Memory Layout : ROM - */ - -#define ASSEMBLY - -#include <asm/linkage.h> -#include <asm/regs267x.h> - - .global __start - .global __command_line - .global __platform_gpio_table - .global __target_name - - .h8300s - .section .text - .file "crt0_rom.S" - - /* CPU Reset entry */ -__start: - mov.l #__ramend,sp - ldc #0x80,ccr - ldc #0,exr - bclr #4,@INTCR:8 - bset #5,@INTCR:8 /* Interrupt mode 2 */ - - /* Peripheral Setup */ - - /* copy .data */ -#if !defined(CONFIG_H8S_SIM) - mov.l #__begin_data,er5 - mov.l #__sdata,er6 - mov.l #__edata,er4 - sub.l er6,er4 - shlr.l #2,er4 -1: - mov.l @er5+,er0 - mov.l er0,@er6 - adds #4,er6 - dec.l #1,er4 - bne 1b -#endif - - /* .bss clear */ - mov.l #__sbss,er5 - mov.l #__ebss,er4 - sub.l er5,er4 - shlr.l #2,er4 - sub.l er0,er0 -1: - mov.l er0,@er5 - adds #4,er5 - dec.l #1,er4 - bne 1b - - /* linux kernel start */ - ldc #0x90,ccr /* running kernel */ - mov.l #_init_thread_union,sp - add.l #0x2000,sp - jsr @_start_kernel -_exit: - - jmp _exit - - rts - - /* I/O port assign information */ -__platform_gpio_table: - mov.l #gpio_table,er0 - rts - -gpio_table: - ;; P1DDR - .byte 0x00,0x00 - ;; P2DDR - .byte 0x00,0x00 - ;; P3DDR - .byte 0x00,0x00 - ;; P4DDR - .byte 0x00,0x00 - ;; P5DDR - .byte 0x00,0x00 - ;; P6DDR - .byte 0x00,0x00 - ;; dummy - .byte 0x00,0x00 - ;; P8DDR - .byte 0x00,0x00 - ;; PADDR - .byte 0x00,0x00 - ;; PBDDR - .byte 0x00,0x00 - ;; PCDDR - .byte 0x00,0x00 - ;; PDDDR - .byte 0x00,0x00 - ;; PEDDR - .byte 0x00,0x00 - ;; PFDDR - .byte 0x00,0x00 - ;; PGDDR - .byte 0x00,0x00 - ;; PHDDR - .byte 0x00,0x00 - - .section .rodata -__target_name: - .asciz "generic" - - .section .bss -__command_line: - .space 512 - - /* interrupt vector */ - .section .vectors,"ax" - .long __start - .long __start -vector = 2 - .rept 126-1 - .long _interrupt_redirect_table+vector*4 -vector = vector + 1 - .endr diff --git a/arch/h8300/platform/h8s/irq.c b/arch/h8300/platform/h8s/irq.c deleted file mode 100644 index f3a5511c16b..00000000000 --- a/arch/h8300/platform/h8s/irq.c +++ /dev/null @@ -1,104 +0,0 @@ -/* - * linux/arch/h8300/platform/h8s/ints_h8s.c - * Interrupt handling CPU variants - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - */ - -#include <linux/init.h> -#include <linux/errno.h> -#include <linux/kernel.h> - -#include <asm/ptrace.h> -#include <asm/traps.h> -#include <asm/irq.h> -#include <asm/io.h> -#include <asm/gpio-internal.h> -#include <asm/regs267x.h> - -/* saved vector list */ -const int __initconst h8300_saved_vectors[] = { -#if defined(CONFIG_GDB_DEBUG) - TRACE_VEC, - TRAP3_VEC, -#endif - -1 -}; - -/* trap entry table */ -const H8300_VECTOR __initconst h8300_trap_table[] = { - 0,0,0,0,0, - trace_break, /* TRACE */ - 0,0, - system_call, /* TRAPA #0 */ - 0,0,0,0,0,0,0 -}; - -/* IRQ pin assignment */ -struct irq_pins { - unsigned char port_no; - unsigned char bit_no; -} __attribute__((aligned(1),packed)); -/* ISTR = 0 */ -static const struct irq_pins irq_assign_table0[16]={ - {H8300_GPIO_P5,H8300_GPIO_B0},{H8300_GPIO_P5,H8300_GPIO_B1}, - {H8300_GPIO_P5,H8300_GPIO_B2},{H8300_GPIO_P5,H8300_GPIO_B3}, - {H8300_GPIO_P5,H8300_GPIO_B4},{H8300_GPIO_P5,H8300_GPIO_B5}, - {H8300_GPIO_P5,H8300_GPIO_B6},{H8300_GPIO_P5,H8300_GPIO_B7}, - {H8300_GPIO_P6,H8300_GPIO_B0},{H8300_GPIO_P6,H8300_GPIO_B1}, - {H8300_GPIO_P6,H8300_GPIO_B2},{H8300_GPIO_P6,H8300_GPIO_B3}, - {H8300_GPIO_P6,H8300_GPIO_B4},{H8300_GPIO_P6,H8300_GPIO_B5}, - {H8300_GPIO_PF,H8300_GPIO_B1},{H8300_GPIO_PF,H8300_GPIO_B2}, -}; -/* ISTR = 1 */ -static const struct irq_pins irq_assign_table1[16]={ - {H8300_GPIO_P8,H8300_GPIO_B0},{H8300_GPIO_P8,H8300_GPIO_B1}, - {H8300_GPIO_P8,H8300_GPIO_B2},{H8300_GPIO_P8,H8300_GPIO_B3}, - {H8300_GPIO_P8,H8300_GPIO_B4},{H8300_GPIO_P8,H8300_GPIO_B5}, - {H8300_GPIO_PH,H8300_GPIO_B2},{H8300_GPIO_PH,H8300_GPIO_B3}, - {H8300_GPIO_P2,H8300_GPIO_B0},{H8300_GPIO_P2,H8300_GPIO_B1}, - {H8300_GPIO_P2,H8300_GPIO_B2},{H8300_GPIO_P2,H8300_GPIO_B3}, - {H8300_GPIO_P2,H8300_GPIO_B4},{H8300_GPIO_P2,H8300_GPIO_B5}, - {H8300_GPIO_P2,H8300_GPIO_B6},{H8300_GPIO_P2,H8300_GPIO_B7}, -}; - -/* IRQ to GPIO pin translation */ -#define IRQ_GPIO_MAP(irqbit,irq,port,bit) \ -do { \ - if (*(volatile unsigned short *)ITSR & irqbit) { \ - port = irq_assign_table1[irq - EXT_IRQ0].port_no; \ - bit = irq_assign_table1[irq - EXT_IRQ0].bit_no; \ - } else { \ - port = irq_assign_table0[irq - EXT_IRQ0].port_no; \ - bit = irq_assign_table0[irq - EXT_IRQ0].bit_no; \ - } \ -} while(0) - -int h8300_enable_irq_pin(unsigned int irq) -{ - if (irq >= EXT_IRQ0 && irq <= EXT_IRQ15) { - unsigned short ptn = 1 << (irq - EXT_IRQ0); - unsigned int port_no,bit_no; - IRQ_GPIO_MAP(ptn, irq, port_no, bit_no); - if (H8300_GPIO_RESERVE(port_no, bit_no) == 0) - return -EBUSY; /* pin already use */ - H8300_GPIO_DDR(port_no, bit_no, H8300_GPIO_INPUT); - *(volatile unsigned short *)ISR &= ~ptn; /* ISR clear */ - } - - return 0; -} - -void h8300_disable_irq_pin(unsigned int irq) -{ - if (irq >= EXT_IRQ0 && irq <= EXT_IRQ15) { - /* disable interrupt & release IRQ pin */ - unsigned short ptn = 1 << (irq - EXT_IRQ0); - unsigned short port_no,bit_no; - *(volatile unsigned short *)ISR &= ~ptn; - *(volatile unsigned short *)IER &= ~ptn; - IRQ_GPIO_MAP(ptn, irq, port_no, bit_no); - H8300_GPIO_FREE(port_no, bit_no); - } -} diff --git a/arch/h8300/platform/h8s/ptrace_h8s.c b/arch/h8300/platform/h8s/ptrace_h8s.c deleted file mode 100644 index c058ab1a849..00000000000 --- a/arch/h8300/platform/h8s/ptrace_h8s.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * linux/arch/h8300/platform/h8s/ptrace_h8s.c - * ptrace cpu depend helper functions - * - * Yoshinori Sato <ysato@users.sourceforge.jp> - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file COPYING in the main directory of - * this archive for more details. - */ - -#include <linux/linkage.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <asm/ptrace.h> - -#define CCR_MASK 0x6f -#define EXR_TRACE 0x80 - -/* Mapping from PT_xxx to the stack offset at which the register is - saved. Notice that usp has no stack-slot and needs to be treated - specially (see get_reg/put_reg below). */ -static const int h8300_register_offset[] = { - PT_REG(er1), PT_REG(er2), PT_REG(er3), PT_REG(er4), - PT_REG(er5), PT_REG(er6), PT_REG(er0), PT_REG(orig_er0), - PT_REG(ccr), PT_REG(pc), 0, PT_REG(exr) -}; - -/* read register */ -long h8300_get_reg(struct task_struct *task, int regno) -{ - switch (regno) { - case PT_USP: - return task->thread.usp + sizeof(long)*2 + 2; - case PT_CCR: - case PT_EXR: - return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]); - default: - return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]); - } -} - -/* write register */ -int h8300_put_reg(struct task_struct *task, int regno, unsigned long data) -{ - unsigned short oldccr; - switch (regno) { - case PT_USP: - task->thread.usp = data - sizeof(long)*2 - 2; - case PT_CCR: - oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]); - oldccr &= ~CCR_MASK; - data &= CCR_MASK; - data |= oldccr; - *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data; - break; - case PT_EXR: - /* exr modify not support */ - return -EIO; - default: - *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data; - break; - } - return 0; -} - -/* disable singlestep */ -void user_disable_single_step(struct task_struct *child) -{ - *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) &= ~EXR_TRACE; -} - -/* enable singlestep */ -void user_enable_single_step(struct task_struct *child) -{ - *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) |= EXR_TRACE; -} - -asmlinkage void trace_trap(unsigned long bp) -{ - (void)bp; - force_sig(SIGTRAP,current); -} - diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 99041b07e61..09df2608f40 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -4,7 +4,6 @@ comment "Linux Kernel Configuration for Hexagon" config HEXAGON def_bool y select HAVE_OPROFILE - select USE_GENERIC_SMP_HELPERS if SMP # Other pending projects/to-do items. # select HAVE_REGS_AND_STACK_ACCESS_API # select HAVE_HW_BREAKPOINT if PERF_EVENTS diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h index 679bf6d6648..4c9d382d779 100644 --- a/arch/hexagon/include/asm/pgalloc.h +++ b/arch/hexagon/include/asm/pgalloc.h @@ -65,10 +65,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, struct page *pte; pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); - - if (pte) - pgtable_page_ctor(pte); - + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c index 29d1f1b0001..0e7c1dbb37b 100644 --- a/arch/hexagon/kernel/setup.c +++ b/arch/hexagon/kernel/setup.c @@ -32,9 +32,6 @@ #include <asm/hexagon_vm.h> #include <asm/vm_mmu.h> #include <asm/time.h> -#ifdef CONFIG_OF -#include <asm/prom.h> -#endif char cmd_line[COMMAND_LINE_SIZE]; static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 7740ab10a17..4e4119b0e69 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -6,6 +6,7 @@ menu "Processor type and features" config IA64 bool + select ARCH_MIGHT_HAVE_PC_PARPORT select PCI if (!IA64_HP_SIM) select ACPI if (!IA64_HP_SIM) select PM if (!IA64_HP_SIM) @@ -343,7 +344,6 @@ config FORCE_MAX_ZONEORDER config SMP bool "Symmetric multi-processing support" - select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have a system with only one CPU, say N. If you have a system with more diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index 989dd3fe8de..db95f570705 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -234,10 +234,6 @@ struct kvm_vm_data { #define KVM_REQ_PTC_G 32 #define KVM_REQ_RESUME 33 -#define KVM_HPAGE_GFN_SHIFT(x) 0 -#define KVM_NR_PAGE_SIZES 1 -#define KVM_PAGES_PER_HPAGE(x) 1 - struct kvm; struct kvm_vcpu; @@ -480,7 +476,7 @@ struct kvm_arch { struct list_head assigned_dev_head; struct iommu_domain *iommu_domain; - int iommu_flags; + bool iommu_noncoherent; unsigned long irq_sources_bitmap; unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h index 96a8d927db2..5767cdfc08d 100644 --- a/arch/ia64/include/asm/pgalloc.h +++ b/arch/ia64/include/asm/pgalloc.h @@ -91,7 +91,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) if (!pg) return NULL; page = virt_to_page(pg); - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + quicklist_free(0, NULL, pg); + return NULL; + } return page; } diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index e0a899a1a8a..5a84b3a5074 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h @@ -319,7 +319,7 @@ struct thread_struct { regs->loadrs = 0; \ regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ - if (unlikely(!get_dumpable(current->mm))) { \ + if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \ /* \ * Zap scratch regs to avoid leaking bits between processes with different \ * uid/privileges. \ diff --git a/arch/ia64/include/asm/xen/page-coherent.h b/arch/ia64/include/asm/xen/page-coherent.h new file mode 100644 index 00000000000..96e42f97fa1 --- /dev/null +++ b/arch/ia64/include/asm/xen/page-coherent.h @@ -0,0 +1,38 @@ +#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H +#define _ASM_IA64_XEN_PAGE_COHERENT_H + +#include <asm/page.h> +#include <linux/dma-attrs.h> +#include <linux/dma-mapping.h> + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + void *vstart = (void*)__get_free_pages(flags, get_order(size)); + *dma_handle = virt_to_phys(vstart); + return vstart; +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + free_pages((unsigned long) cpu_addr, get_order(size)); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { } + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { } + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) { } + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) { } + +#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */ diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 556d0701a15..c25302fb48d 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -85,4 +85,6 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 5eb71d22c3d..59d52e3aef1 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -882,40 +882,10 @@ __init void prefill_possible_map(void) set_cpu_possible(i, true); } -static int _acpi_map_lsapic(acpi_handle handle, int *pcpu) +static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - struct acpi_madt_local_sapic *lsapic; cpumask_t tmp_map; - int cpu, physid; - - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) - return -EINVAL; - - if (!buffer.length || !buffer.pointer) - return -EINVAL; - - obj = buffer.pointer; - if (obj->type != ACPI_TYPE_BUFFER) - { - kfree(buffer.pointer); - return -EINVAL; - } - - lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer; - - if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) || - (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) { - kfree(buffer.pointer); - return -EINVAL; - } - - physid = ((lsapic->id << 8) | (lsapic->eid)); - - kfree(buffer.pointer); - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; + int cpu; cpumask_complement(&tmp_map, cpu_present_mask); cpu = cpumask_first(&tmp_map); @@ -934,9 +904,9 @@ static int _acpi_map_lsapic(acpi_handle handle, int *pcpu) } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { - return _acpi_map_lsapic(handle, pcpu); + return _acpi_map_lsapic(handle, physid, pcpu); } EXPORT_SYMBOL(acpi_map_lsapic); diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c index bac1639bc32..04bc8fd5f89 100644 --- a/arch/ia64/kernel/elfcore.c +++ b/arch/ia64/kernel/elfcore.c @@ -11,8 +11,7 @@ Elf64_Half elf_core_extra_phdrs(void) return GATE_EHDR->e_phnum; } -int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, - unsigned long limit) +int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); @@ -35,15 +34,13 @@ int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, phdr.p_offset += ofs; } phdr.p_paddr = 0; /* match other core phdrs */ - *size += sizeof(phdr); - if (*size > limit || !dump_write(file, &phdr, sizeof(phdr))) + if (!dump_emit(cprm, &phdr, sizeof(phdr))) return 0; } return 1; } -int elf_core_write_extra_data(struct file *file, size_t *size, - unsigned long limit) +int elf_core_write_extra_data(struct coredump_params *cprm) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); @@ -54,8 +51,7 @@ int elf_core_write_extra_data(struct file *file, size_t *size, void *addr = (void *)gate_phdrs[i].p_vaddr; size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz); - *size += memsz; - if (*size > limit || !dump_write(file, addr, memsz)) + if (!dump_emit(cprm, addr, memsz)) return 0; break; } diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index f8280a766a7..074fde49c9e 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -947,7 +947,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accouting + * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 3637e03d228..33cab9a8adf 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -105,7 +105,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) } int -copy_siginfo_to_user (siginfo_t __user *to, siginfo_t *from) +copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from) { if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index bdfd8789b37..985bf80c622 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1550,12 +1550,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -void kvm_arch_free_memslot(struct kvm_memory_slot *free, +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) { return 0; } diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index b6f7f43424e..88504abf570 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -357,9 +357,7 @@ int vmemmap_find_next_valid_pfn(int node, int i) end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; end_address = PAGE_ALIGN(end_address); - - stop_address = (unsigned long) &vmem_map[ - pgdat->node_start_pfn + pgdat->node_spanned_pages]; + stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)]; do { pgd_t *pgd; diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 75661fbf452..09ef94a8a7c 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -275,7 +275,6 @@ source "kernel/Kconfig.preempt" config SMP bool "Symmetric multi-processing support" - select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h index a979a419816..9fc78fc4444 100644 --- a/arch/m32r/include/asm/mmu_context.h +++ b/arch/m32r/include/asm/mmu_context.h @@ -45,7 +45,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) Flush all TLB and start new cycle. */ local_flush_tlb_all(); /* Fix version if needed. - Note that we avoid version #0 to distingush NO_CONTEXT. */ + Note that we avoid version #0 to distinguish NO_CONTEXT. */ if (!mc) mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; } diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h index 0fc73619897..2d55a064cca 100644 --- a/arch/m32r/include/asm/pgalloc.h +++ b/arch/m32r/include/asm/pgalloc.h @@ -43,7 +43,12 @@ static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm, { struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO); - pgtable_page_ctor(pte); + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index 24be7c8da86..52966650114 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -76,4 +76,6 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 311a300d48c..75f25a8e300 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -1,6 +1,7 @@ config M68K bool default y + select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select HAVE_IDE select HAVE_AOUT if MMU select HAVE_DEBUG_BUGVERBOSE diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 313f3dd23cd..f9924fbcfe4 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -56,6 +56,10 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, if (!page) return NULL; + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } pte = kmap(page); if (pte) { diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index 2f02f264e69..24bcba496c7 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h @@ -29,18 +29,22 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); + struct page *page; pte_t *pte; + page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); if(!page) return NULL; + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } pte = kmap(page); __flush_page_to_ram(pte); flush_tlb_kernel_page(pte); nocache_page(pte); kunmap(page); - pgtable_page_ctor(page); return page; } diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index 48d80d5a666..f868506e335 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h @@ -59,7 +59,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, return NULL; clear_highpage(page); - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } return page; } diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index 36368eb07e1..e56abd2c1b4 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -111,7 +111,6 @@ config METAG_META21 config SMP bool "Symmetric multi-processing support" depends on METAG_META21 && METAG_META21_MMU - select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one thread running Linux. If you have a system with only one thread running Linux, diff --git a/arch/metag/include/asm/mach/arch.h b/arch/metag/include/asm/mach/arch.h index 12c5664fea6..433f94624fa 100644 --- a/arch/metag/include/asm/mach/arch.h +++ b/arch/metag/include/asm/mach/arch.h @@ -53,7 +53,7 @@ struct machine_desc { /* * Current machine - only accessible during boot. */ -extern struct machine_desc *machine_desc; +extern const struct machine_desc *machine_desc; /* * Machine type table - also only accessible during boot diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h index 275d9285141..3104df0a482 100644 --- a/arch/metag/include/asm/pgalloc.h +++ b/arch/metag/include/asm/pgalloc.h @@ -52,8 +52,12 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, { struct page *pte; pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); - if (pte) - pgtable_page_ctor(pte); + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } diff --git a/arch/metag/include/asm/prom.h b/arch/metag/include/asm/prom.h deleted file mode 100644 index d2aa35d2228..00000000000 --- a/arch/metag/include/asm/prom.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * arch/metag/include/asm/prom.h - * - * Copyright (C) 2012 Imagination Technologies Ltd. - * - * Based on ARM version: - * Copyright (C) 2009 Canonical Ltd. <jeremy.kerr@canonical.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ -#ifndef __ASM_METAG_PROM_H -#define __ASM_METAG_PROM_H - -#include <asm/setup.h> -#define HAVE_ARCH_DEVTREE_FIXUPS - -extern struct machine_desc *setup_machine_fdt(void *dt); -extern void copy_fdt(void); - -#endif /* __ASM_METAG_PROM_H */ diff --git a/arch/metag/include/asm/setup.h b/arch/metag/include/asm/setup.h index e13083b15dd..e9fdee9452b 100644 --- a/arch/metag/include/asm/setup.h +++ b/arch/metag/include/asm/setup.h @@ -3,6 +3,7 @@ #include <uapi/asm/setup.h> +extern const struct machine_desc *setup_machine_fdt(void *dt); void per_cpu_trap_init(unsigned long); extern void __init dump_machine_table(void); #endif /* _ASM_METAG_SETUP_H */ diff --git a/arch/metag/include/asm/tbx.h b/arch/metag/include/asm/tbx.h index 287b36ff8ad..703b9cb0ac5 100644 --- a/arch/metag/include/asm/tbx.h +++ b/arch/metag/include/asm/tbx.h @@ -150,11 +150,9 @@ #else /* Reserved 0x04-0x09 */ #endif -#define TBID_SIGNUM_SWS 0x0A /* KICK received with SigMask != 0 */ -#define TBID_SIGNUM_SWK 0x0B /* KICK received with SigMask == 0 */ -/* Reserved 0x0C-0x0F */ +/* Reserved 0x0A-0x0F */ #define TBID_SIGNUM_TRT 0x10 /* Timer trigger */ -#define TBID_SIGNUM_LWK 0x11 /* Low level kick (handler provided by TBI) */ +#define TBID_SIGNUM_LWK 0x11 /* Low level kick */ #define TBID_SIGNUM_XXF 0x12 /* Fault handler - receives ALL _xxF sigs */ #ifdef TBI_1_4 #define TBID_SIGNUM_DFR 0x13 /* Deferred Exception handler */ @@ -183,8 +181,7 @@ each hardware signal, sometimes this is a many-to-one relationship. */ #define TBI_TRIG_BIT(SigNum) (\ ((SigNum) >= TBID_SIGNUM_TRT) ? 1<<((SigNum)-TBID_SIGNUM_TRT) :\ - ( ((SigNum) == TBID_SIGNUM_SWS) || \ - ((SigNum) == TBID_SIGNUM_SWK) ) ? \ + ((SigNum) == TBID_SIGNUM_LWK) ? \ TXSTAT_KICK_BIT : TXSTATI_BGNDHALT_BIT ) /* Return the hardware trigger vector number for entries in the @@ -687,10 +684,8 @@ typedef union _tbires_tag_ { Triggers will indicate the status of TXSTAT or TXSTATI sampled by the code that called the handler. - InstOrSWSId is defined firstly as 'Inst' if the SigNum is TBID_SIGNUM_SWx - and hold the actual SWITCH instruction detected, secondly if SigNum - is TBID_SIGNUM_SWS the 'SWSId' is defined to hold the Id of the - software signal detected, in other cases the value of this + Inst is defined as 'Inst' if the SigNum is TBID_SIGNUM_SWx and holds the + actual SWITCH instruction detected, in other cases the value of this parameter is undefined. pTBI points at the PTBI structure related to the thread and processing @@ -709,7 +704,7 @@ typedef union _tbires_tag_ { */ typedef TBIRES (*PTBIAPIFN)( TBIRES State, int SigNum, - int Triggers, int InstOrSWSId, + int Triggers, int Inst, volatile struct _tbi_tag_ *pTBI ); #endif /* ifndef __ASSEMBLY__ */ @@ -757,7 +752,7 @@ typedef volatile struct _tbi_tag_ { #ifndef __ASSEMBLY__ /* This handler should be used for TBID_SIGNUM_DFR */ extern TBIRES __TBIHandleDFR ( TBIRES State, int SigNum, - int Triggers, int InstOrSWSId, + int Triggers, int Inst, volatile struct _tbi_tag_ *pTBI ); #endif #endif diff --git a/arch/metag/kernel/devtree.c b/arch/metag/kernel/devtree.c index 7cd02529636..18dd7aea9fd 100644 --- a/arch/metag/kernel/devtree.c +++ b/arch/metag/kernel/devtree.c @@ -34,6 +34,19 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return alloc_bootmem_align(size, align); } +static const void * __init arch_get_next_mach(const char *const **match) +{ + static const struct machine_desc *mdesc = __arch_info_begin; + const struct machine_desc *m = mdesc; + + if (m >= __arch_info_end) + return NULL; + + mdesc++; + *match = m->dt_compat; + return m; +} + /** * setup_machine_fdt - Machine setup when an dtb was passed to the kernel * @dt: virtual address pointer to dt blob @@ -41,74 +54,18 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) * If a dtb was passed to the kernel, then use it to choose the correct * machine_desc and to setup the system. */ -struct machine_desc * __init setup_machine_fdt(void *dt) +const struct machine_desc * __init setup_machine_fdt(void *dt) { - struct boot_param_header *devtree = dt; - struct machine_desc *mdesc, *mdesc_best = NULL; - unsigned int score, mdesc_score = ~1; - unsigned long dt_root; - const char *model; + const struct machine_desc *mdesc; /* check device tree validity */ - if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) + if (!early_init_dt_scan(dt)) return NULL; - /* Search the mdescs for the 'best' compatible value match */ - initial_boot_params = devtree; - dt_root = of_get_flat_dt_root(); - - for_each_machine_desc(mdesc) { - score = of_flat_dt_match(dt_root, mdesc->dt_compat); - if (score > 0 && score < mdesc_score) { - mdesc_best = mdesc; - mdesc_score = score; - } - } - if (!mdesc_best) { - const char *prop; - long size; - - pr_err("\nError: unrecognized/unsupported device tree compatible list:\n[ "); - - prop = of_get_flat_dt_prop(dt_root, "compatible", &size); - if (prop) { - while (size > 0) { - printk("'%s' ", prop); - size -= strlen(prop) + 1; - prop += strlen(prop) + 1; - } - } - printk("]\n\n"); - + mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach); + if (!mdesc) dump_machine_table(); /* does not return */ - } - - model = of_get_flat_dt_prop(dt_root, "model", NULL); - if (!model) - model = of_get_flat_dt_prop(dt_root, "compatible", NULL); - if (!model) - model = "<unknown>"; - pr_info("Machine: %s, model: %s\n", mdesc_best->name, model); - - /* Retrieve various information from the /chosen node */ - of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); - - return mdesc_best; -} + pr_info("Machine name: %s\n", mdesc->name); -/** - * copy_fdt - Copy device tree into non-init memory. - * - * We must copy the flattened device tree blob into non-init memory because the - * unflattened device tree will reference the strings in it directly. - */ -void __init copy_fdt(void) -{ - void *alloc = early_init_dt_alloc_memory_arch( - be32_to_cpu(initial_boot_params->totalsize), 0x40); - if (alloc) { - memcpy(alloc, initial_boot_params, - be32_to_cpu(initial_boot_params->totalsize)); - initial_boot_params = alloc; - } + return mdesc; } diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index 8c00dedadc5..db589ad5dbc 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -305,9 +305,7 @@ void dma_free_coherent(struct device *dev, size_t size, if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); - ClearPageReserved(page); - - __free_page(page); + __free_reserved_page(page); continue; } } diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c index c396cd0b425..129c7cdda1c 100644 --- a/arch/metag/kernel/setup.c +++ b/arch/metag/kernel/setup.c @@ -42,7 +42,6 @@ #include <asm/mmu.h> #include <asm/mmzone.h> #include <asm/processor.h> -#include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/traps.h> @@ -115,7 +114,7 @@ extern u32 __dtb_start[]; extern struct console dash_console; #endif -struct machine_desc *machine_desc __initdata; +const struct machine_desc *machine_desc __initdata; /* * Map a Linux CPU number to a hardware thread ID @@ -302,13 +301,9 @@ void __init setup_arch(char **cmdline_p) * rather than the version from the bootloader. This makes call * stacks easier to understand and may allow us to unmap the * bootloader at some point. - * - * We need to keep the LWK handler that TBI installed in order to - * be able to do inter-thread comms. */ for (i = 0; i <= TBID_SIGNUM_MAX; i++) - if (i != TBID_SIGNUM_LWK) - _pTBI->fnSigs[i] = __TBIUnExpXXX; + _pTBI->fnSigs[i] = __TBIUnExpXXX; /* A Meta requirement is that the kernel is loaded (virtually) * at the PAGE_OFFSET. @@ -408,9 +403,7 @@ void __init setup_arch(char **cmdline_p) cpu_2_hwthread_id[smp_processor_id()] = hard_processor_id(); hwthread_id_2_cpu[hard_processor_id()] = smp_processor_id(); - /* Copy device tree blob into non-init memory before unflattening */ - copy_fdt(); - unflatten_device_tree(); + unflatten_and_copy_device_tree(); #ifdef CONFIG_SMP smp_init_cpus(); diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c index 25f9d1c2ffe..17b2e2e38d5 100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c @@ -819,8 +819,7 @@ void per_cpu_trap_init(unsigned long cpu) set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */ TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */ - TBI_TRIG_BIT(TBID_SIGNUM_SW1) | - TBI_TRIG_BIT(TBID_SIGNUM_SWS)); + TBI_TRIG_BIT(TBID_SIGNUM_SW1)); /* non-priv - use current stack */ int_context.Sig.pCtx = NULL; @@ -842,7 +841,7 @@ void __init trap_init(void) _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler; _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler; _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler; - _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler; + _pTBI->fnSigs[TBID_SIGNUM_LWK] = kick_handler; #ifdef CONFIG_METAG_META21 _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR; diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c index 123919534b8..3cd6288f65c 100644 --- a/arch/metag/mm/init.c +++ b/arch/metag/mm/init.c @@ -12,7 +12,6 @@ #include <linux/percpu.h> #include <linux/memblock.h> #include <linux/initrd.h> -#include <linux/of_fdt.h> #include <asm/setup.h> #include <asm/page.h> @@ -149,7 +148,7 @@ static void __init bootmem_init_one_node(unsigned int nid) if (!p->node_spanned_pages) return; - end_pfn = p->node_start_pfn + p->node_spanned_pages; + end_pfn = pgdat_end_pfn(p); #ifdef CONFIG_HIGHMEM if (end_pfn > max_low_pfn) end_pfn = max_low_pfn; @@ -405,11 +404,3 @@ void free_initrd_mem(unsigned long start, unsigned long end) "initrd"); } #endif - -#ifdef CONFIG_OF_FLATTREE -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - pr_err("%s(%llx, %llx)\n", - __func__, start, end); -} -#endif /* CONFIG_OF_FLATTREE */ diff --git a/arch/metag/mm/numa.c b/arch/metag/mm/numa.c index 9ae578c9b62..b172aa45fcf 100644 --- a/arch/metag/mm/numa.c +++ b/arch/metag/mm/numa.c @@ -34,7 +34,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) unsigned long pgdat_paddr; /* Don't allow bogus node assignment */ - BUG_ON(nid > MAX_NUMNODES || nid <= 0); + BUG_ON(nid >= MAX_NUMNODES || nid <= 0); start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; diff --git a/arch/metag/tbx/tbidefr.S b/arch/metag/tbx/tbidefr.S index 3eb165ebf54..8f0902b22f7 100644 --- a/arch/metag/tbx/tbidefr.S +++ b/arch/metag/tbx/tbidefr.S @@ -20,7 +20,7 @@ /* D1Ar1:D0Ar2 -- State * D0Ar3 -- SigNum * D0Ar4 -- Triggers - * D1Ar5 -- InstOrSWSId + * D1Ar5 -- Inst * D0Ar6 -- pTBI (volatile) */ ___TBIHandleDFR: diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index b82f82b7431..e23cccde9c2 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -1,5 +1,6 @@ config MICROBLAZE def_bool y + select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_FUNCTION_TRACER @@ -82,11 +83,6 @@ config MMU bool "MMU support" default n -config NO_MMU - bool - depends on !MMU - default y - comment "Boot options" config CMDLINE_BOOL @@ -250,10 +246,6 @@ config MICROBLAZE_64K_PAGES endchoice -config KERNEL_PAD - hex "Kernel PAD for unpacking" if ADVANCED_OPTIONS - default "0x80000" if MMU - endmenu source "mm/Kconfig" diff --git a/arch/microblaze/boot/dts/Makefile b/arch/microblaze/boot/dts/Makefile index c3b3a5d67b8..c4982d16e55 100644 --- a/arch/microblaze/boot/dts/Makefile +++ b/arch/microblaze/boot/dts/Makefile @@ -1,6 +1,4 @@ # -# arch/microblaze/boot/Makefile -# obj-y += linked_dtb.o diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index d52abb6812f..935f9bec414 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -127,8 +127,6 @@ extern void of_scan_pci_bridge(struct device_node *node, extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); -extern int pci_read_irq_line(struct pci_dev *dev); - extern int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index ebd35792482..7fdf7fabc7d 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h @@ -122,8 +122,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, #endif ptepage = alloc_pages(flags, 0); - if (ptepage) - clear_highpage(ptepage); + if (!ptepage) + return NULL; + clear_highpage(ptepage); + if (!pgtable_page_ctor(ptepage)) { + __free_page(ptepage); + return NULL; + } return ptepage; } @@ -158,8 +163,9 @@ extern inline void pte_free_slow(struct page *ptepage) __free_page(ptepage); } -extern inline void pte_free(struct mm_struct *mm, struct page *ptepage) +static inline void pte_free(struct mm_struct *mm, struct page *ptepage) { + pgtable_page_dtor(ptepage); __free_page(ptepage); } diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index 9977816c5ad..2f03ac81585 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h @@ -11,19 +11,10 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ - -#include <linux/of.h> /* linux/of.h gets to determine #include ordering */ - #ifndef _ASM_MICROBLAZE_PROM_H #define _ASM_MICROBLAZE_PROM_H -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -#include <linux/types.h> -#include <asm/irq.h> -#include <linux/atomic.h> -#define HAVE_ARCH_DEVTREE_FIXUPS +#include <linux/of.h> /* Other Prototypes */ enum early_consoles { @@ -33,32 +24,4 @@ enum early_consoles { extern int of_early_console(void *version); -/* - * OF address retreival & translation - */ - -#ifdef CONFIG_PCI -extern unsigned long pci_address_to_pio(phys_addr_t address); -#define pci_address_to_pio pci_address_to_pio -#endif /* CONFIG_PCI */ - -/* Parse the ibm,dma-window property of an OF node into the busno, phys and - * size parameters. - */ -void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, - unsigned long *busno, unsigned long *phys, unsigned long *size); - -extern void kdump_move_device_tree(void); - -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ - -/* These includes are put at the bottom because they may contain things - * that are overridden by this file. Ideally they shouldn't be included - * by this file, but there are a bunch of .c files that currently depend - * on it. Eventually they will be cleaned up. */ -#include <linux/of_fdt.h> -#include <linux/of_irq.h> -#include <linux/platform_device.h> - #endif /* _ASM_MICROBLAZE_PROM_H */ diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index fcc797feb9d..817b7eec95b 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -176,7 +176,7 @@ _invalidate: /* start to do TLB calculation */ addik r12, r0, _end rsub r12, r3, r12 - addik r12, r12, CONFIG_KERNEL_PAD /* that's the pad */ + addik r12, r12, CONFIG_LOWMEM_SIZE >> PTE_SHIFT /* that's the pad */ or r9, r0, r0 /* TLB0 = 0 */ or r10, r0, r0 /* TLB1 = 0 */ diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S index 61b3a1fed46..fc6b89f4dd3 100644 --- a/arch/microblaze/kernel/hw_exception_handler.S +++ b/arch/microblaze/kernel/hw_exception_handler.S @@ -193,8 +193,8 @@ * - W S REG EXC * * - * STACK FRAME STRUCTURE (for NO_MMU) - * --------------------------------- + * STACK FRAME STRUCTURE (for CONFIG_MMU=n) + * ---------------------------------------- * * +-------------+ + 0 * | MSR | diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c index 0c4453f134c..abdfb10e7ec 100644 --- a/arch/microblaze/kernel/prom.c +++ b/arch/microblaze/kernel/prom.c @@ -30,6 +30,7 @@ #include <linux/debugfs.h> #include <linux/irq.h> #include <linux/memblock.h> +#include <linux/of_fdt.h> #include <asm/prom.h> #include <asm/page.h> @@ -41,11 +42,6 @@ #include <asm/sections.h> #include <asm/pci-bridge.h> -void __init early_init_dt_add_memory_arch(u64 base, u64 size) -{ - memblock_add(base, size); -} - #ifdef CONFIG_EARLY_PRINTK static char *stdout; @@ -106,21 +102,10 @@ void __init early_init_devtree(void *params) { pr_debug(" -> early_init_devtree(%p)\n", params); - /* Setup flat device-tree pointer */ - initial_boot_params = params; - - /* Retrieve various informations from the /chosen node of the - * device-tree, including the platform type, initrd location and - * size, TCE reserve, and more ... - */ - of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line); - - /* Scan memory nodes and rebuild MEMBLOCKs */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); - of_scan_flat_dt(early_init_dt_scan_memory, NULL); + early_init_dt_scan(params); + if (!strlen(boot_command_line)) + strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); - /* Save command line for /proc/cmdline and then parse parameters */ - strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); parse_early_param(); memblock_allow_resize(); @@ -130,15 +115,6 @@ void __init early_init_devtree(void *params) pr_debug(" <- early_init_devtree()\n"); } -#ifdef CONFIG_BLK_DEV_INITRD -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - initrd_start = (unsigned long)__va(start); - initrd_end = (unsigned long)__va(end); - initrd_below_start_ok = 1; -} -#endif - /******* * * New implementation of the OF "find" APIs, return a refcounted diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index 0775e036c52..8de8ebc309f 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -16,6 +16,7 @@ #include <linux/initrd.h> #include <linux/console.h> #include <linux/debugfs.h> +#include <linux/of_fdt.h> #include <asm/setup.h> #include <asm/sections.h> @@ -50,7 +51,7 @@ char cmd_line[COMMAND_LINE_SIZE] __attribute__ ((section(".data"))); void __init setup_arch(char **cmdline_p) { - *cmdline_p = cmd_line; + *cmdline_p = boot_command_line; console_verbose(); diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c index f905b3ae68c..f1e1f666ddd 100644 --- a/arch/microblaze/kernel/sys_microblaze.c +++ b/arch/microblaze/kernel/sys_microblaze.c @@ -33,12 +33,23 @@ #include <linux/slab.h> #include <asm/syscalls.h> -asmlinkage long sys_mmap(unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, - unsigned long fd, off_t pgoff) +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + off_t, pgoff) { if (pgoff & ~PAGE_MASK) return -EINVAL; return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); } + +SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, pgoff) +{ + if (pgoff & (~PAGE_MASK >> 12)) + return -EINVAL; + + return sys_mmap_pgoff(addr, len, prot, flags, fd, + pgoff >> (PAGE_SHIFT - 12)); +} diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S index 4fca56cf02f..b882ad50535 100644 --- a/arch/microblaze/kernel/syscall_table.S +++ b/arch/microblaze/kernel/syscall_table.S @@ -192,7 +192,7 @@ ENTRY(sys_call_table) .long sys_ni_syscall /* reserved for streams2 */ .long sys_vfork /* 190 */ .long sys_getrlimit - .long sys_mmap_pgoff /* mmap2 */ + .long sys_mmap2 .long sys_truncate64 .long sys_ftruncate64 .long sys_stat64 /* 195 */ diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index e4b3f33ef34..3e39b1082fd 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -15,6 +15,7 @@ #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/cpuinfo.h> #include <linux/cnt32_to_63.h> @@ -148,7 +149,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_TIMER, + .flags = IRQF_TIMER, .name = "timer", .dev_id = &clockevent_xilinx_timer, }; diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 5226b09cbbb..dbbf2246a26 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -176,8 +176,7 @@ void consistent_free(size_t size, void *vaddr) page = virt_to_page(vaddr); do { - ClearPageReserved(page); - __free_page(page); + __free_reserved_page(page); page++; } while (size -= PAGE_SIZE); #else @@ -194,9 +193,7 @@ void consistent_free(size_t size, void *vaddr) pte_clear(&init_mm, (unsigned int)vaddr, ptep); if (pfn_valid(pfn)) { page = pfn_to_page(pfn); - - ClearPageReserved(page); - __free_page(page); + __free_reserved_page(page); } } vaddr += PAGE_SIZE; diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 1b93bf0892a..66804adcacf 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_pci.h> #include <linux/export.h> @@ -193,76 +194,6 @@ void pcibios_set_master(struct pci_dev *dev) } /* - * Reads the interrupt pin to determine if interrupt is use by card. - * If the interrupt is used, then gets the interrupt line from the - * openfirmware and sets it in the pci_dev and pci_config line. - */ -int pci_read_irq_line(struct pci_dev *pci_dev) -{ - struct of_irq oirq; - unsigned int virq; - - /* The current device-tree that iSeries generates from the HV - * PCI informations doesn't contain proper interrupt routing, - * and all the fallback would do is print out crap, so we - * don't attempt to resolve the interrupts here at all, some - * iSeries specific fixup does it. - * - * In the long run, we will hopefully fix the generated device-tree - * instead. - */ - pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); - -#ifdef DEBUG - memset(&oirq, 0xff, sizeof(oirq)); -#endif - /* Try to get a mapping from the device-tree */ - if (of_irq_map_pci(pci_dev, &oirq)) { - u8 line, pin; - - /* If that fails, lets fallback to what is in the config - * space and map that through the default controller. We - * also set the type to level low since that's what PCI - * interrupts are. If your platform does differently, then - * either provide a proper interrupt tree or don't use this - * function. - */ - if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) - return -1; - if (pin == 0) - return -1; - if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || - line == 0xff || line == 0) { - return -1; - } - pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", - line, pin); - - virq = irq_create_mapping(NULL, line); - if (virq) - irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); - } else { - pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", - oirq.size, oirq.specifier[0], oirq.specifier[1], - of_node_full_name(oirq.controller)); - - virq = irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); - } - if (!virq) { - pr_debug(" Failed to map !\n"); - return -1; - } - - pr_debug(" Mapped to linux irq %d\n", virq); - - pci_dev->irq = virq; - - return 0; -} -EXPORT_SYMBOL(pci_read_irq_line); - -/* * Platform support for /proc/bus/pci/X/Y mmap()s, * modelled on the sparc64 implementation by Dave Miller. * -- paulus. @@ -960,7 +891,7 @@ void pcibios_setup_bus_devices(struct pci_bus *bus) dev->dev.archdata.dma_data = (void *)PCI_DRAM_OFFSET; /* Read default IRQs and fixup if necessary */ - pci_read_irq_line(dev); + dev->irq = of_irq_parse_and_map_pci(dev, 0, 0); } } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 17cc7ff8458..650de3976e7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1,6 +1,7 @@ config MIPS bool default y + select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_CONTEXT_TRACKING select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE @@ -2125,7 +2126,6 @@ source "mm/Kconfig" config SMP bool "Multi-Processing support" depends on SYS_SUPPORTS_SMP - select USE_GENERIC_SMP_HELPERS help This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/mips/configs/db1235_defconfig b/arch/mips/configs/db1235_defconfig index e2b4ad55462..28e49f226dc 100644 --- a/arch/mips/configs/db1235_defconfig +++ b/arch/mips/configs/db1235_defconfig @@ -351,7 +351,6 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_MMC=y -CONFIG_MMC_CLKGATE=y CONFIG_MMC_AU1X=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 4d6fa0bf130..32966969f2f 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -27,13 +27,6 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 -/* Don't support huge pages */ -#define KVM_HPAGE_GFN_SHIFT(x) 0 - -/* We don't currently support large pages. */ -#define KVM_NR_PAGE_SIZES 1 -#define KVM_PAGES_PER_HPAGE(x) 1 - /* Special address that contains the comm page, used for reducing # of traps */ diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h index 17daffb280a..470f2095b34 100644 --- a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h +++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h @@ -69,6 +69,7 @@ void nlm_hal_init(void); int xlp_get_dram_map(int n, uint64_t *dram_map); /* Device tree related */ +void xlp_early_init_devtree(void); void *xlp_dt_init(void *fdtp); static inline int cpu_is_xlpii(void) diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h index a76fe5a57a9..df69bfd2b00 100644 --- a/arch/mips/include/asm/octeon/cvmx-pip.h +++ b/arch/mips/include/asm/octeon/cvmx-pip.h @@ -192,13 +192,13 @@ typedef struct { /* Number of packets processed by PIP */ uint32_t packets; /* - * Number of indentified L2 multicast packets. Does not + * Number of identified L2 multicast packets. Does not * include broadcast packets. Only includes packets whose * parse mode is SKIP_TO_L2 */ uint32_t multicast_packets; /* - * Number of indentified L2 broadcast packets. Does not + * Number of identified L2 broadcast packets. Does not * include multicast packets. Only includes packets whose * parse mode is SKIP_TO_L2 */ diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 881d18b4e29..b336037e876 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -80,9 +80,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, struct page *pte; pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); - if (pte) { - clear_highpage(pte); - pgtable_page_ctor(pte); + if (!pte) + return NULL; + clear_highpage(pte); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; } return pte; } diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h index 1e7e0961064..ccd2b75f152 100644 --- a/arch/mips/include/asm/prom.h +++ b/arch/mips/include/asm/prom.h @@ -17,22 +17,8 @@ #include <linux/types.h> #include <asm/bootinfo.h> -extern int early_init_dt_scan_memory_arch(unsigned long node, - const char *uname, int depth, void *data); - extern void device_tree_init(void); -static inline unsigned long pci_address_to_pio(phys_addr_t address) -{ - /* - * The ioport address can be directly used by inX() / outX() - */ - BUG_ON(address > IO_SPACE_LIMIT); - - return (unsigned long) address; -} -#define pci_address_to_pio pci_address_to_pio - struct boot_param_header; extern void __dt_setup_arch(struct boot_param_header *bph); diff --git a/arch/mips/include/uapi/asm/errno.h b/arch/mips/include/uapi/asm/errno.h index 31575e2fd1b..02d645d7aa9 100644 --- a/arch/mips/include/uapi/asm/errno.h +++ b/arch/mips/include/uapi/asm/errno.h @@ -102,7 +102,7 @@ #define EWOULDBLOCK EAGAIN /* Operation would block */ #define EALREADY 149 /* Operation already in progress */ #define EINPROGRESS 150 /* Operation now in progress */ -#define ESTALE 151 /* Stale NFS file handle */ +#define ESTALE 151 /* Stale file handle */ #define ECANCELED 158 /* AIO operation canceled */ /* diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 61c01f054d1..0df9787cd84 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -94,4 +94,6 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 0fa0b69cdd5..3c3b0df8f48 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -13,12 +13,9 @@ #include <linux/errno.h> #include <linux/types.h> #include <linux/bootmem.h> -#include <linux/initrd.h> #include <linux/debugfs.h> #include <linux/of.h> #include <linux/of_fdt.h> -#include <linux/of_irq.h> -#include <linux/of_platform.h> #include <asm/page.h> #include <asm/prom.h> @@ -40,13 +37,6 @@ char *mips_get_machine_name(void) } #ifdef CONFIG_OF -int __init early_init_dt_scan_memory_arch(unsigned long node, - const char *uname, int depth, - void *data) -{ - return early_init_dt_scan_memory(node, uname, depth, data); -} - void __init early_init_dt_add_memory_arch(u64 base, u64 size) { return add_memory_region(base, size, BOOT_MEM_RAM); @@ -57,57 +47,11 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); } -#ifdef CONFIG_BLK_DEV_INITRD -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - initrd_start = (unsigned long)__va(start); - initrd_end = (unsigned long)__va(end); - initrd_below_start_ok = 1; -} -#endif - -int __init early_init_dt_scan_model(unsigned long node, const char *uname, - int depth, void *data) -{ - if (!depth) { - char *model = of_get_flat_dt_prop(node, "model", NULL); - - if (model) - mips_set_machine_name(model); - } - return 0; -} - -void __init early_init_devtree(void *params) -{ - /* Setup flat device-tree pointer */ - initial_boot_params = params; - - /* Retrieve various informations from the /chosen node of the - * device-tree, including the platform type, initrd location and - * size, and more ... - */ - of_scan_flat_dt(early_init_dt_scan_chosen, arcs_cmdline); - - - /* Scan memory nodes */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); - of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); - - /* try to load the mips machine name */ - of_scan_flat_dt(early_init_dt_scan_model, NULL); -} - void __init __dt_setup_arch(struct boot_param_header *bph) { - if (be32_to_cpu(bph->magic) != OF_DT_HEADER) { - pr_err("DTB has bad magic, ignoring builtin OF DTB\n"); - + if (!early_init_dt_scan(bph)) return; - } - - initial_boot_params = bph; - early_init_devtree(initial_boot_params); + mips_set_machine_name(of_flat_dt_get_machine_name()); } #endif diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 57de8b75162..1905a419aa4 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -314,7 +314,7 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *, return ret; } -int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) +int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) { int err; diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index a7b044536de..73b34827826 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c @@ -198,12 +198,13 @@ kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) return -ENOIOCTLCMD; } -void kvm_arch_free_memslot(struct kvm_memory_slot *free, +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) { return 0; } diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c index 49c46037028..19686c5bc5e 100644 --- a/arch/mips/lantiq/prom.c +++ b/arch/mips/lantiq/prom.c @@ -14,6 +14,7 @@ #include <asm/bootinfo.h> #include <asm/time.h> +#include <asm/prom.h> #include <lantiq.h> diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c index b5059dc899f..928ba84c8a7 100644 --- a/arch/mips/mti-sead3/sead3-setup.c +++ b/arch/mips/mti-sead3/sead3-setup.c @@ -10,6 +10,8 @@ #include <linux/of_fdt.h> #include <linux/bootmem.h> +#include <asm/prom.h> + #include <asm/mips-boards/generic.h> const char *get_system_type(void) diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c index 88df445dda7..8316d5454b1 100644 --- a/arch/mips/netlogic/xlp/dt.c +++ b/arch/mips/netlogic/xlp/dt.c @@ -39,8 +39,11 @@ #include <linux/of_platform.h> #include <linux/of_device.h> +#include <asm/prom.h> + extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_xlp_fvp_begin[], __dtb_start[]; +static void *xlp_fdt_blob; void __init *xlp_dt_init(void *fdtp) { @@ -67,19 +70,26 @@ void __init *xlp_dt_init(void *fdtp) break; } } - initial_boot_params = fdtp; + xlp_fdt_blob = fdtp; return fdtp; } +void __init xlp_early_init_devtree(void) +{ + __dt_setup_arch(xlp_fdt_blob); + strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); +} + void __init device_tree_init(void) { unsigned long base, size; + struct boot_param_header *fdtp = xlp_fdt_blob; - if (!initial_boot_params) + if (!fdtp) return; - base = virt_to_phys((void *)initial_boot_params); - size = be32_to_cpu(initial_boot_params->totalsize); + base = virt_to_phys(fdtp); + size = be32_to_cpu(fdtp->totalsize); /* Before we do anything, lets reserve the dt blob */ reserve_bootmem(base, size, BOOTMEM_DEFAULT); diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c index 76a7131e486..6d981bb337e 100644 --- a/arch/mips/netlogic/xlp/setup.c +++ b/arch/mips/netlogic/xlp/setup.c @@ -98,7 +98,7 @@ void __init plat_mem_setup(void) pm_power_off = nlm_linux_exit; /* memory and bootargs from DT */ - early_init_devtree(initial_boot_params); + xlp_early_init_devtree(); if (boot_mem_map.nr_map == 0) { pr_info("Using DRAM BARs for memory map.\n"); diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c index 6c829df28dc..c2ce41ea61d 100644 --- a/arch/mips/pci/fixup-lantiq.c +++ b/arch/mips/pci/fixup-lantiq.c @@ -25,16 +25,5 @@ int pcibios_plat_dev_init(struct pci_dev *dev) int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { - struct of_irq dev_irq; - int irq; - - if (of_irq_map_pci(dev, &dev_irq)) { - dev_err(&dev->dev, "trying to map irq for unknown slot:%d pin:%d\n", - slot, pin); - return 0; - } - irq = irq_create_of_mapping(dev_irq.controller, dev_irq.specifier, - dev_irq.size); - dev_info(&dev->dev, "SLOT:%d PIN:%d IRQ:%d\n", slot, pin, irq); - return irq; + return of_irq_parse_and_map_pci(dev, slot, pin); } diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 95c9d41382e..adeff2bfe4c 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -583,29 +583,7 @@ err_put_intc_node: int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { - struct of_irq dev_irq; - int err; - int irq; - - err = of_irq_map_pci(dev, &dev_irq); - if (err) { - pr_err("pci %s: unable to get irq map, err=%d\n", - pci_name((struct pci_dev *) dev), err); - return 0; - } - - irq = irq_create_of_mapping(dev_irq.controller, - dev_irq.specifier, - dev_irq.size); - - if (irq == 0) - pr_crit("pci %s: no irq found for pin %u\n", - pci_name((struct pci_dev *) dev), pin); - else - pr_info("pci %s: using irq %d for pin %u\n", - pci_name((struct pci_dev *) dev), irq, pin); - - return irq; + return of_irq_parse_and_map_pci(dev, slot, pin); } int pcibios_plat_dev_init(struct pci_dev *dev) diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 15f21ea9612..eccc5526155 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -21,6 +21,7 @@ #include <asm/reboot.h> #include <asm/bootinfo.h> #include <asm/addrspace.h> +#include <asm/prom.h> #include "common.h" diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 6aaa1607001..8bde9237d13 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -181,7 +181,6 @@ endmenu config SMP bool "Symmetric multi-processing support" default y - select USE_GENERIC_SMP_HELPERS depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050 ---help--- This enables support for systems with more than one CPU. If you have diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h index c67c2b5365a..75dbe696f83 100644 --- a/arch/mn10300/include/asm/mmu_context.h +++ b/arch/mn10300/include/asm/mmu_context.h @@ -71,7 +71,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm) local_flush_tlb_all(); /* fix the TLB version if needed (we avoid version #0 so as to - * distingush MMU_NO_CONTEXT) */ + * distinguish MMU_NO_CONTEXT) */ if (!mc) *pmc = mc = MMU_CONTEXT_FIRST_VERSION; } diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index 6f31cc0f1a8..16632382468 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h @@ -44,7 +44,6 @@ extern void unit_pci_init(void); #define pcibios_assign_all_busses() 0 #endif -extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0xBE000004 #define PCIBIOS_MIN_MEM 0xB8000000 diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h index 146bacf193e..0f25d5fa86f 100644 --- a/arch/mn10300/include/asm/pgalloc.h +++ b/arch/mn10300/include/asm/pgalloc.h @@ -46,6 +46,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free(struct mm_struct *mm, struct page *pte) { + pgtable_page_dtor(pte); __free_page(pte); } diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index e2a2b203eb0..71dedcae55a 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -76,4 +76,6 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c index ebac9c11f79..2ad7f32fa12 100644 --- a/arch/mn10300/kernel/setup.c +++ b/arch/mn10300/kernel/setup.c @@ -35,9 +35,6 @@ struct mn10300_cpuinfo boot_cpu_data; -/* For PCI or other memory-mapped resources */ -unsigned long pci_mem_start = 0x18000000; - static char __initdata cmd_line[COMMAND_LINE_SIZE]; char redboot_command_line[COMMAND_LINE_SIZE] = "console=ttyS0,115200 root=/dev/mtdblock3 rw"; diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c index bd9ada693f9..e77a7c72808 100644 --- a/arch/mn10300/mm/pgtable.c +++ b/arch/mn10300/mm/pgtable.c @@ -78,8 +78,13 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) #else pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); #endif - if (pte) - clear_highpage(pte); + if (!pte) + return NULL; + clear_highpage(pte); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h index 7fa66a0e462..9e17aca5a2a 100644 --- a/arch/mn10300/unit-asb2305/pci-asb2305.h +++ b/arch/mn10300/unit-asb2305/pci-asb2305.h @@ -35,7 +35,6 @@ extern void pcibios_resource_survey(void); /* pci.c */ -extern int pcibios_last_bus; extern struct pci_ops *pci_root_ops; extern struct irq_routing_table *pcibios_get_irq_routing_table(void); diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c index e37fac0461f..6b4339f8c9c 100644 --- a/arch/mn10300/unit-asb2305/pci.c +++ b/arch/mn10300/unit-asb2305/pci.c @@ -24,7 +24,6 @@ unsigned int pci_probe = 1; -int pcibios_last_bus = -1; struct pci_ops *pci_root_ops; /* @@ -392,10 +391,6 @@ char *__init pcibios_setup(char *str) if (!strcmp(str, "off")) { pci_probe = 0; return NULL; - - } else if (!strncmp(str, "lastbus=", 8)) { - pcibios_last_bus = simple_strtol(str+8, NULL, 0); - return NULL; } return str; diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile index 4739b8302a5..89076a66eee 100644 --- a/arch/openrisc/Makefile +++ b/arch/openrisc/Makefile @@ -24,7 +24,7 @@ OBJCOPYFLAGS := -O binary -R .note -R .comment -S LDFLAGS_vmlinux := LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) -KBUILD_CFLAGS += -pipe -ffixed-r10 +KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y) KBUILD_CFLAGS += $(call cc-option,-mhard-mul) diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig index ea172bdfa36..42fe5303a37 100644 --- a/arch/openrisc/configs/or1ksim_defconfig +++ b/arch/openrisc/configs/or1ksim_defconfig @@ -1,9 +1,9 @@ CONFIG_CROSS_COMPILE="or32-linux-" +CONFIG_NO_HZ=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_GZIP is not set CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_KALLSYMS is not set # CONFIG_EPOLL is not set # CONFIG_TIMERFD is not set @@ -15,7 +15,6 @@ CONFIG_SLOB=y CONFIG_MODULES=y # CONFIG_BLOCK is not set CONFIG_OPENRISC_BUILTIN_DTB="or1ksim" -CONFIG_NO_HZ=y CONFIG_HZ_100=y CONFIG_NET=y CONFIG_PACKET=y @@ -39,11 +38,8 @@ CONFIG_DEVTMPFS_MOUNT=y # CONFIG_FW_LOADER is not set CONFIG_PROC_DEVICETREE=y CONFIG_NETDEVICES=y -CONFIG_MICREL_PHY=y -CONFIG_NET_ETHERNET=y CONFIG_ETHOC=y -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set +CONFIG_MICREL_PHY=y # CONFIG_WLAN is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set @@ -55,11 +51,9 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set -# CONFIG_MFD_SUPPORT is not set # CONFIG_USB_SUPPORT is not set # CONFIG_DNOTIFY is not set CONFIG_TMPFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y # CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index 78405625e79..da1951a2290 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -65,6 +65,7 @@ generic-y += trace_clock.h generic-y += types.h generic-y += ucontext.h generic-y += user.h +generic-y += vga.h generic-y += word-at-a-time.h generic-y += xor.h generic-y += preempt.h diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index 05c39ecd2ef..21484e5b9e9 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h @@ -78,8 +78,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, { struct page *pte; pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); - if (pte) - clear_page(page_address(pte)); + if (!pte) + return NULL; + clear_page(page_address(pte)); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } @@ -90,6 +95,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free(struct mm_struct *mm, struct page *pte) { + pgtable_page_dtor(pte); __free_page(pte); } diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h deleted file mode 100644 index 93c9980e1b6..00000000000 --- a/arch/openrisc/include/asm/prom.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * OpenRISC Linux - * - * Linux architectural port borrowing liberally from similar works of - * others. All original copyrights apply as per the original source - * declaration. - * - * OpenRISC implementation: - * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> - * et al. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ -#ifndef _ASM_OPENRISC_PROM_H -#define _ASM_OPENRISC_PROM_H - -#define HAVE_ARCH_DEVTREE_FIXUPS - -#endif /* _ASM_OPENRISC_PROM_H */ diff --git a/arch/openrisc/kernel/module.c b/arch/openrisc/kernel/module.c index 10ff50f0202..ef872ae4c87 100644 --- a/arch/openrisc/kernel/module.c +++ b/arch/openrisc/kernel/module.c @@ -47,12 +47,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, *location = value; break; case R_OR32_CONST: - location = (uint16_t *)location + 1; - *((uint16_t *)location) = (uint16_t) (value); + *((uint16_t *)location + 1) = value; break; case R_OR32_CONSTH: - location = (uint16_t *)location + 1; - *((uint16_t *)location) = (uint16_t) (value >> 16); + *((uint16_t *)location + 1) = value >> 16; break; case R_OR32_JUMPTARG: value -= (uint32_t)location; diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c index a63e76872f8..6a44340d1b1 100644 --- a/arch/openrisc/kernel/prom.c +++ b/arch/openrisc/kernel/prom.c @@ -18,83 +18,15 @@ * */ -#include <stdarg.h> -#include <linux/kernel.h> -#include <linux/string.h> #include <linux/init.h> -#include <linux/threads.h> -#include <linux/spinlock.h> #include <linux/types.h> -#include <linux/pci.h> -#include <linux/stringify.h> -#include <linux/delay.h> -#include <linux/initrd.h> -#include <linux/bitops.h> -#include <linux/module.h> -#include <linux/kexec.h> -#include <linux/debugfs.h> -#include <linux/irq.h> #include <linux/memblock.h> #include <linux/of_fdt.h> -#include <asm/prom.h> #include <asm/page.h> -#include <asm/processor.h> -#include <asm/irq.h> -#include <linux/io.h> -#include <asm/mmu.h> -#include <asm/pgtable.h> -#include <asm/sections.h> -#include <asm/setup.h> - -extern char cmd_line[COMMAND_LINE_SIZE]; - -void __init early_init_dt_add_memory_arch(u64 base, u64 size) -{ - size &= PAGE_MASK; - memblock_add(base, size); -} void __init early_init_devtree(void *params) { - void *alloc; - - /* Setup flat device-tree pointer */ - initial_boot_params = params; - - - /* Retrieve various informations from the /chosen node of the - * device-tree, including the platform type, initrd location and - * size, TCE reserve, and more ... - */ - of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line); - - /* Scan memory nodes and rebuild MEMBLOCKs */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); - of_scan_flat_dt(early_init_dt_scan_memory, NULL); - - /* Save command line for /proc/cmdline and then parse parameters */ - strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); - + early_init_dt_scan(params); memblock_allow_resize(); - - /* We must copy the flattend device tree from init memory to regular - * memory because the device tree references the strings in it - * directly. - */ - - alloc = __va(memblock_alloc(initial_boot_params->totalsize, PAGE_SIZE)); - - memcpy(alloc, initial_boot_params, initial_boot_params->totalsize); - - initial_boot_params = alloc; -} - -#ifdef CONFIG_BLK_DEV_INITRD -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - initrd_start = (unsigned long)__va(start); - initrd_end = (unsigned long)__va(end); - initrd_below_start_ok = 1; } -#endif diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c index d7359ffbcbd..4fc7ccc0a2c 100644 --- a/arch/openrisc/kernel/setup.c +++ b/arch/openrisc/kernel/setup.c @@ -40,6 +40,7 @@ #include <linux/device.h> #include <linux/of_platform.h> +#include <asm/sections.h> #include <asm/segment.h> #include <asm/pgtable.h> #include <asm/types.h> @@ -50,8 +51,6 @@ #include "vmlinux.h" -char __initdata cmd_line[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; - static unsigned long __init setup_memory(void) { unsigned long bootmap_size; @@ -77,7 +76,7 @@ static unsigned long __init setup_memory(void) ram_start_pfn = PFN_UP(memory_start); /* free_ram_start_pfn is first page after kernel */ - free_ram_start_pfn = PFN_UP(__pa(&_end)); + free_ram_start_pfn = PFN_UP(__pa(_end)); ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_pfn = ram_end_pfn; @@ -209,15 +208,15 @@ void __init setup_cpuinfo(void) * Falls back on built-in device tree in case null pointer is passed. */ -void __init or32_early_setup(unsigned int fdt) +void __init or32_early_setup(void *fdt) { - if (fdt) { - early_init_devtree((void*) fdt); - printk(KERN_INFO "FDT at 0x%08x\n", fdt); - } else { - early_init_devtree(__dtb_start); - printk(KERN_INFO "Compiled-in FDT at %p\n", __dtb_start); + if (fdt) + pr_info("FDT at %p\n", fdt); + else { + fdt = __dtb_start; + pr_info("Compiled-in FDT at %p\n", fdt); } + early_init_devtree(fdt); } static int __init openrisc_device_probe(void) @@ -285,15 +284,15 @@ void __init setup_arch(char **cmdline_p) { unsigned long max_low_pfn; - unflatten_device_tree(); + unflatten_and_copy_device_tree(); setup_cpuinfo(); /* process 1's initial memory region is the kernel code/data */ - init_mm.start_code = (unsigned long)&_stext; - init_mm.end_code = (unsigned long)&_etext; - init_mm.end_data = (unsigned long)&_edata; - init_mm.brk = (unsigned long)&_end; + init_mm.start_code = (unsigned long)_stext; + init_mm.end_code = (unsigned long)_etext; + init_mm.end_data = (unsigned long)_edata; + init_mm.brk = (unsigned long)_end; #ifdef CONFIG_BLK_DEV_INITRD initrd_start = (unsigned long)&__initrd_start; @@ -316,7 +315,7 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif - *cmdline_p = cmd_line; + *cmdline_p = boot_command_line; printk(KERN_INFO "OpenRISC Linux -- http://openrisc.net\n"); } diff --git a/arch/openrisc/kernel/vmlinux.h b/arch/openrisc/kernel/vmlinux.h index ee842a2d3f3..70b9ce41835 100644 --- a/arch/openrisc/kernel/vmlinux.h +++ b/arch/openrisc/kernel/vmlinux.h @@ -1,10 +1,8 @@ #ifndef __OPENRISC_VMLINUX_H_ #define __OPENRISC_VMLINUX_H_ -extern char _stext, _etext, _edata, _end; #ifdef CONFIG_BLK_DEV_INITRD extern char __initrd_start, __initrd_end; -extern char __initramfs_start; #endif extern u32 __dtb_start[]; diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 7dcde539d61..b5f1858baf3 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -1,6 +1,7 @@ config PARISC def_bool y select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_IDE select HAVE_OPROFILE select HAVE_FUNCTION_TRACER if 64BIT @@ -226,7 +227,6 @@ endchoice config SMP bool "Symmetric multi-processing support" - select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index fc987a1c12a..f213f5b4c42 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -121,8 +121,12 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); - if (page) - pgtable_page_ctor(page); + if (!page) + return NULL; + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } return page; } diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h index 135ad6047e5..f3a8aa55484 100644 --- a/arch/parisc/include/uapi/asm/errno.h +++ b/arch/parisc/include/uapi/asm/errno.h @@ -37,7 +37,7 @@ #define EBADMSG 67 /* Not a data message */ #define EUSERS 68 /* Too many users */ #define EDQUOT 69 /* Quota exceeded */ -#define ESTALE 70 /* Stale NFS file handle */ +#define ESTALE 70 /* Stale file handle */ #define EREMOTE 71 /* Object is remote */ #define EOVERFLOW 72 /* Value too large for defined data type */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 71700e636a8..7c614d01f1f 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -75,6 +75,8 @@ #define SO_BUSY_POLL 0x4027 +#define SO_MAX_PACING_RATE 0x4048 + /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 2a625fb063e..50dfafc3f2c 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -219,7 +219,7 @@ void *module_alloc(unsigned long size) * init_data correctly */ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, GFP_KERNEL | __GFP_HIGHMEM, - PAGE_KERNEL_RWX, -1, + PAGE_KERNEL_RWX, NUMA_NO_NODE, __builtin_return_address(0)); } diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c index 6c6a271a614..984abbee71c 100644 --- a/arch/parisc/kernel/signal32.c +++ b/arch/parisc/kernel/signal32.c @@ -319,7 +319,7 @@ copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from) } int -copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from) +copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from) { compat_uptr_t addr; compat_int_t val; diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h index 72ab41a51f3..af51d4ccee4 100644 --- a/arch/parisc/kernel/signal32.h +++ b/arch/parisc/kernel/signal32.h @@ -34,7 +34,7 @@ struct compat_ucontext { /* ELF32 signal handling */ -int copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from); +int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from); int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from); /* In a deft move of uber-hackery, we decide to carry the top half of all diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b365d5cbb72..b44b52c0a8f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -85,6 +85,7 @@ config GENERIC_HWEIGHT config PPC bool default y + select ARCH_MIGHT_HAVE_PC_PARPORT select BINFMT_ELF select OF select OF_EARLY_FLATTREE @@ -97,7 +98,7 @@ config PPC select VIRT_TO_BUS if !PPC64 select HAVE_IDE select HAVE_IOREMAP_PROT - select HAVE_EFFICIENT_UNALIGNED_ACCESS + select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_LITTLE_ENDIAN select HAVE_KPROBES select HAVE_ARCH_KGDB select HAVE_KRETPROBES @@ -106,7 +107,6 @@ config PPC select HAVE_MEMBLOCK_NODE_MAP select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG - select USE_GENERIC_SMP_HELPERS if SMP select HAVE_OPROFILE select HAVE_DEBUG_KMEMLEAK select GENERIC_ATOMIC64 if PPC32 @@ -140,6 +140,9 @@ config PPC select HAVE_DEBUG_STACKOVERFLOW select HAVE_IRQ_EXIT_ON_IRQ_STACK +config GENERIC_CSUM + def_bool CPU_LITTLE_ENDIAN + config EARLY_PRINTK bool default y @@ -405,7 +408,7 @@ config CRASH_DUMP config FA_DUMP bool "Firmware-assisted dump" - depends on PPC64 && PPC_RTAS && CRASH_DUMP + depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC help A robust mechanism to get reliable kernel crash dump with assistance from firmware. This approach does not use kexec, @@ -418,7 +421,7 @@ config FA_DUMP config IRQ_ALL_CPUS bool "Distribute interrupts on all CPUs by default" - depends on SMP && !MV64360 + depends on SMP help This option gives the kernel permission to distribute IRQs across multiple CPUs. Saying N here will route all IRQs to the first @@ -1010,6 +1013,9 @@ config PHYSICAL_START default "0x00000000" endif +config ARCH_RANDOM + def_bool n + source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 51cfb78d406..607acf54a42 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -36,17 +36,26 @@ KBUILD_DEFCONFIG := ppc64_defconfig endif ifeq ($(CONFIG_PPC64),y) -OLDARCH := ppc64 - new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi) ifeq ($(new_nm),y) NM := $(NM) --synthetic endif +endif +ifeq ($(CONFIG_PPC64),y) +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) +OLDARCH := ppc64le +else +OLDARCH := ppc64 +endif +else +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) +OLDARCH := ppcle else OLDARCH := ppc endif +endif # It seems there are times we use this Makefile without # including the config file, but this replicates the old behaviour @@ -56,11 +65,29 @@ endif UTS_MACHINE := $(OLDARCH) +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) +override CC += -mlittle-endian -mno-strict-align +override AS += -mlittle-endian +override LD += -EL +override CROSS32CC += -mlittle-endian +override CROSS32AS += -mlittle-endian +LDEMULATION := lppc +GNUTARGET := powerpcle +MULTIPLEWORD := -mno-multiple +else +override CC += -mbig-endian +override AS += -mbig-endian +override LD += -EB +LDEMULATION := ppc +GNUTARGET := powerpc +MULTIPLEWORD := -mmultiple +endif + ifeq ($(HAS_BIARCH),y) override AS += -a$(CONFIG_WORD_SIZE) -override LD += -m elf$(CONFIG_WORD_SIZE)ppc +override LD += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION) override CC += -m$(CONFIG_WORD_SIZE) -override AR := GNUTARGET=elf$(CONFIG_WORD_SIZE)-powerpc $(AR) +override AR := GNUTARGET=elf$(CONFIG_WORD_SIZE)-$(GNUTARGET) $(AR) endif LDFLAGS_vmlinux-y := -Bstatic @@ -86,7 +113,7 @@ endif CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) -CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple +CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) ifeq ($(CONFIG_PPC_BOOK3S_64),y) CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4) diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 15ca2255f43..ca7f08cc4af 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -22,7 +22,8 @@ all: $(obj)/zImage BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -Os -msoft-float -pipe \ -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ - -isystem $(shell $(CROSS32CC) -print-file-name=include) + -isystem $(shell $(CROSS32CC) -print-file-name=include) \ + -mbig-endian BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc ifdef CONFIG_DEBUG_INFO diff --git a/arch/powerpc/boot/dts/b4860emu.dts b/arch/powerpc/boot/dts/b4860emu.dts new file mode 100644 index 00000000000..7290021f2df --- /dev/null +++ b/arch/powerpc/boot/dts/b4860emu.dts @@ -0,0 +1,218 @@ +/* + * B4860 emulator Device Tree Source + * + * Copyright 2013 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * This software is provided by Freescale Semiconductor "as is" and any + * express or implied warranties, including, but not limited to, the implied + * warranties of merchantability and fitness for a particular purpose are + * disclaimed. In no event shall Freescale Semiconductor be liable for any + * direct, indirect, incidental, special, exemplary, or consequential damages + * (including, but not limited to, procurement of substitute goods or services; + * loss of use, data, or profits; or business interruption) however caused and + * on any theory of liability, whether in contract, strict liability, or tort + * (including negligence or otherwise) arising in any way out of the use of + * this software, even if advised of the possibility of such damage. + */ + +/dts-v1/; + +/include/ "fsl/e6500_power_isa.dtsi" + +/ { + compatible = "fsl,B4860"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + aliases { + ccsr = &soc; + + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + dma0 = &dma0; + dma1 = &dma1; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: PowerPC,e6500@0 { + device_type = "cpu"; + reg = <0 1>; + next-level-cache = <&L2>; + }; + cpu1: PowerPC,e6500@2 { + device_type = "cpu"; + reg = <2 3>; + next-level-cache = <&L2>; + }; + cpu2: PowerPC,e6500@4 { + device_type = "cpu"; + reg = <4 5>; + next-level-cache = <&L2>; + }; + cpu3: PowerPC,e6500@6 { + device_type = "cpu"; + reg = <6 7>; + next-level-cache = <&L2>; + }; + }; +}; + +/ { + model = "fsl,B4860QDS"; + compatible = "fsl,B4860EMU", "fsl,B4860QDS"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + ifc: localbus@ffe124000 { + reg = <0xf 0xfe124000 0 0x2000>; + ranges = <0 0 0xf 0xe8000000 0x08000000 + 2 0 0xf 0xff800000 0x00010000 + 3 0 0xf 0xffdf0000 0x00008000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x8000000>; + bank-width = <2>; + device-width = <1>; + }; + }; + + memory { + device_type = "memory"; + }; + + soc: soc@ffe000000 { + ranges = <0x00000000 0xf 0xfe000000 0x1000000>; + reg = <0xf 0xfe000000 0 0x00001000>; + }; +}; + +&ifc { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,ifc", "simple-bus"; + interrupts = <25 2 0 0>; +}; + +&soc { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + + soc-sram-error { + compatible = "fsl,soc-sram-error"; + interrupts = <16 2 1 2>; + }; + + corenet-law@0 { + compatible = "fsl,corenet-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <32>; + }; + + ddr1: memory-controller@8000 { + compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller"; + reg = <0x8000 0x1000>; + interrupts = <16 2 1 8>; + }; + + ddr2: memory-controller@9000 { + compatible = "fsl,qoriq-memory-controller-v4.5","fsl,qoriq-memory-controller"; + reg = <0x9000 0x1000>; + interrupts = <16 2 1 9>; + }; + + cpc: l3-cache-controller@10000 { + compatible = "fsl,b4-l3-cache-controller", "cache"; + reg = <0x10000 0x1000 + 0x11000 0x1000>; + interrupts = <16 2 1 4>; + }; + + corenet-cf@18000 { + compatible = "fsl,b4-corenet-cf"; + reg = <0x18000 0x1000>; + interrupts = <16 2 1 0>; + fsl,ccf-num-csdids = <32>; + fsl,ccf-num-snoopids = <32>; + }; + + iommu@20000 { + compatible = "fsl,pamu-v1.0", "fsl,pamu"; + reg = <0x20000 0x4000>; + #address-cells = <1>; + #size-cells = <1>; + interrupts = < + 24 2 0 0 + 16 2 1 1>; + pamu0: pamu@0 { + reg = <0 0x1000>; + fsl,primary-cache-geometry = <8 1>; + fsl,secondary-cache-geometry = <32 2>; + }; + }; + +/include/ "fsl/qoriq-mpic.dtsi" + + guts: global-utilities@e0000 { + compatible = "fsl,b4-device-config"; + reg = <0xe0000 0xe00>; + fsl,has-rstcr; + fsl,liodn-bits = <12>; + }; + + clockgen: global-utilities@e1000 { + compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0"; + reg = <0xe1000 0x1000>; + }; + +/include/ "fsl/qoriq-dma-0.dtsi" + dma@100300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */ + }; + +/include/ "fsl/qoriq-dma-1.dtsi" + dma@101300 { + fsl,iommu-parent = <&pamu0>; + fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */ + }; + +/include/ "fsl/qoriq-i2c-0.dtsi" +/include/ "fsl/qoriq-i2c-1.dtsi" +/include/ "fsl/qoriq-duart-0.dtsi" +/include/ "fsl/qoriq-duart-1.dtsi" + + L2: l2-cache-controller@c20000 { + compatible = "fsl,b4-l2-cache-controller"; + reg = <0xc20000 0x1000>; + next-level-cache = <&cpc>; + }; +}; diff --git a/arch/powerpc/boot/dts/b4qds.dtsi b/arch/powerpc/boot/dts/b4qds.dtsi index e6d2f8f9054..8b47edcfabf 100644 --- a/arch/powerpc/boot/dts/b4qds.dtsi +++ b/arch/powerpc/boot/dts/b4qds.dtsi @@ -120,25 +120,38 @@ }; i2c@118000 { - eeprom@50 { - compatible = "at24,24c64"; - reg = <0x50>; - }; - eeprom@51 { - compatible = "at24,24c256"; - reg = <0x51>; - }; - eeprom@53 { - compatible = "at24,24c256"; - reg = <0x53>; - }; - eeprom@57 { - compatible = "at24,24c256"; - reg = <0x57>; - }; - rtc@68 { - compatible = "dallas,ds3232"; - reg = <0x68>; + mux@77 { + compatible = "nxp,pca9547"; + reg = <0x77>; + #address-cells = <1>; + #size-cells = <0>; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + eeprom@50 { + compatible = "at24,24c64"; + reg = <0x50>; + }; + eeprom@51 { + compatible = "at24,24c256"; + reg = <0x51>; + }; + eeprom@53 { + compatible = "at24,24c256"; + reg = <0x53>; + }; + eeprom@57 { + compatible = "at24,24c256"; + reg = <0x57>; + }; + rtc@68 { + compatible = "dallas,ds3232"; + reg = <0x68>; + }; + }; }; }; diff --git a/arch/powerpc/boot/dts/c293pcie.dts b/arch/powerpc/boot/dts/c293pcie.dts index 1238bda8901..6681cc21030 100644 --- a/arch/powerpc/boot/dts/c293pcie.dts +++ b/arch/powerpc/boot/dts/c293pcie.dts @@ -45,6 +45,7 @@ ifc: ifc@fffe1e000 { reg = <0xf 0xffe1e000 0 0x2000>; ranges = <0x0 0x0 0xf 0xec000000 0x04000000 + 0x1 0x0 0xf 0xff800000 0x00010000 0x2 0x0 0xf 0xffdf0000 0x00010000>; }; diff --git a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi index 7b4426e0a5a..c6e451affb0 100644 --- a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi @@ -34,6 +34,8 @@ /dts-v1/; +/include/ "e6500_power_isa.dtsi" + / { compatible = "fsl,B4420"; #address-cells = <2>; diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi index e5cf6c81dd6..981397518fc 100644 --- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi @@ -41,7 +41,7 @@ &rio { compatible = "fsl,srio"; - interrupts = <16 2 1 11>; + interrupts = <16 2 1 20>; #address-cells = <2>; #size-cells = <2>; fsl,iommu-parent = <&pamu0>; diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi index 5263fa46a3f..9bc26b14790 100644 --- a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi @@ -34,6 +34,8 @@ /dts-v1/; +/include/ "e6500_power_isa.dtsi" + / { compatible = "fsl,B4860"; #address-cells = <2>; diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi index 5180d9d3798..0c0efa94cfb 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi @@ -130,7 +130,7 @@ usb@22000 { /include/ "pq3-esdhc-0.dtsi" sdhc@2e000 { - fsl,sdhci-auto-cmd12; + sdhci,auto-cmd12; interrupts = <41 0x2 0 0>; }; diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi index 743e4aeda34..f6ec4a67560 100644 --- a/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi +++ b/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi @@ -33,6 +33,9 @@ */ /dts-v1/; + +/include/ "e500v2_power_isa.dtsi" + / { compatible = "fsl,BSC9131"; #address-cells = <2>; diff --git a/arch/powerpc/boot/dts/t4240emu.dts b/arch/powerpc/boot/dts/t4240emu.dts new file mode 100644 index 00000000000..ee24ab33559 --- /dev/null +++ b/arch/powerpc/boot/dts/t4240emu.dts @@ -0,0 +1,268 @@ +/* + * T4240 emulator Device Tree Source + * + * Copyright 2013 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/dts-v1/; + +/include/ "fsl/e6500_power_isa.dtsi" +/ { + compatible = "fsl,T4240"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + aliases { + ccsr = &soc; + + serial0 = &serial0; + serial1 = &serial1; + serial2 = &serial2; + serial3 = &serial3; + dma0 = &dma0; + dma1 = &dma1; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: PowerPC,e6500@0 { + device_type = "cpu"; + reg = <0 1>; + next-level-cache = <&L2_1>; + }; + cpu1: PowerPC,e6500@2 { + device_type = "cpu"; + reg = <2 3>; + next-level-cache = <&L2_1>; + }; + cpu2: PowerPC,e6500@4 { + device_type = "cpu"; + reg = <4 5>; + next-level-cache = <&L2_1>; + }; + cpu3: PowerPC,e6500@6 { + device_type = "cpu"; + reg = <6 7>; + next-level-cache = <&L2_1>; + }; + + cpu4: PowerPC,e6500@8 { + device_type = "cpu"; + reg = <8 9>; + next-level-cache = <&L2_2>; + }; + cpu5: PowerPC,e6500@10 { + device_type = "cpu"; + reg = <10 11>; + next-level-cache = <&L2_2>; + }; + cpu6: PowerPC,e6500@12 { + device_type = "cpu"; + reg = <12 13>; + next-level-cache = <&L2_2>; + }; + cpu7: PowerPC,e6500@14 { + device_type = "cpu"; + reg = <14 15>; + next-level-cache = <&L2_2>; + }; + + cpu8: PowerPC,e6500@16 { + device_type = "cpu"; + reg = <16 17>; + next-level-cache = <&L2_3>; + }; + cpu9: PowerPC,e6500@18 { + device_type = "cpu"; + reg = <18 19>; + next-level-cache = <&L2_3>; + }; + cpu10: PowerPC,e6500@20 { + device_type = "cpu"; + reg = <20 21>; + next-level-cache = <&L2_3>; + }; + cpu11: PowerPC,e6500@22 { + device_type = "cpu"; + reg = <22 23>; + next-level-cache = <&L2_3>; + }; + }; +}; + +/ { + model = "fsl,T4240QDS"; + compatible = "fsl,T4240EMU", "fsl,T4240QDS"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&mpic>; + + ifc: localbus@ffe124000 { + reg = <0xf 0xfe124000 0 0x2000>; + ranges = <0 0 0xf 0xe8000000 0x08000000 + 2 0 0xf 0xff800000 0x00010000 + 3 0 0xf 0xffdf0000 0x00008000>; + + nor@0,0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "cfi-flash"; + reg = <0x0 0x0 0x8000000>; + + bank-width = <2>; + device-width = <1>; + }; + + }; + + memory { + device_type = "memory"; + }; + + soc: soc@ffe000000 { + ranges = <0x00000000 0xf 0xfe000000 0x1000000>; + reg = <0xf 0xfe000000 0 0x00001000>; + + }; +}; + +&ifc { + #address-cells = <2>; + #size-cells = <1>; + compatible = "fsl,ifc", "simple-bus"; + interrupts = <25 2 0 0>; +}; + +&soc { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + compatible = "simple-bus"; + + soc-sram-error { + compatible = "fsl,soc-sram-error"; + interrupts = <16 2 1 29>; + }; + + corenet-law@0 { + compatible = "fsl,corenet-law"; + reg = <0x0 0x1000>; + fsl,num-laws = <32>; + }; + + ddr1: memory-controller@8000 { + compatible = "fsl,qoriq-memory-controller-v4.7", + "fsl,qoriq-memory-controller"; + reg = <0x8000 0x1000>; + interrupts = <16 2 1 23>; + }; + + ddr2: memory-controller@9000 { + compatible = "fsl,qoriq-memory-controller-v4.7", + "fsl,qoriq-memory-controller"; + reg = <0x9000 0x1000>; + interrupts = <16 2 1 22>; + }; + + ddr3: memory-controller@a000 { + compatible = "fsl,qoriq-memory-controller-v4.7", + "fsl,qoriq-memory-controller"; + reg = <0xa000 0x1000>; + interrupts = <16 2 1 21>; + }; + + cpc: l3-cache-controller@10000 { + compatible = "fsl,t4240-l3-cache-controller", "cache"; + reg = <0x10000 0x1000 + 0x11000 0x1000 + 0x12000 0x1000>; + interrupts = <16 2 1 27 + 16 2 1 26 + 16 2 1 25>; + }; + + corenet-cf@18000 { + compatible = "fsl,corenet-cf"; + reg = <0x18000 0x1000>; + interrupts = <16 2 1 31>; + fsl,ccf-num-csdids = <32>; + fsl,ccf-num-snoopids = <32>; + }; + + iommu@20000 { + compatible = "fsl,pamu-v1.0", "fsl,pamu"; + reg = <0x20000 0x6000>; + interrupts = < + 24 2 0 0 + 16 2 1 30>; + }; + +/include/ "fsl/qoriq-mpic.dtsi" + + guts: global-utilities@e0000 { + compatible = "fsl,t4240-device-config", "fsl,qoriq-device-config-2.0"; + reg = <0xe0000 0xe00>; + fsl,has-rstcr; + fsl,liodn-bits = <12>; + }; + + clockgen: global-utilities@e1000 { + compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0"; + reg = <0xe1000 0x1000>; + }; + +/include/ "fsl/qoriq-dma-0.dtsi" +/include/ "fsl/qoriq-dma-1.dtsi" + +/include/ "fsl/qoriq-i2c-0.dtsi" +/include/ "fsl/qoriq-i2c-1.dtsi" +/include/ "fsl/qoriq-duart-0.dtsi" +/include/ "fsl/qoriq-duart-1.dtsi" + + L2_1: l2-cache-controller@c20000 { + compatible = "fsl,t4240-l2-cache-controller"; + reg = <0xc20000 0x40000>; + next-level-cache = <&cpc>; + }; + L2_2: l2-cache-controller@c60000 { + compatible = "fsl,t4240-l2-cache-controller"; + reg = <0xc60000 0x40000>; + next-level-cache = <&cpc>; + }; + L2_3: l2-cache-controller@ca0000 { + compatible = "fsl,t4240-l2-cache-controller"; + reg = <0xca0000 0x40000>; + next-level-cache = <&cpc>; + }; +}; diff --git a/arch/powerpc/boot/dts/t4240qds.dts b/arch/powerpc/boot/dts/t4240qds.dts index 0555976dd0f..63e81b01080 100644 --- a/arch/powerpc/boot/dts/t4240qds.dts +++ b/arch/powerpc/boot/dts/t4240qds.dts @@ -118,36 +118,53 @@ }; i2c@118000 { - eeprom@51 { - compatible = "at24,24c256"; - reg = <0x51>; - }; - eeprom@52 { - compatible = "at24,24c256"; - reg = <0x52>; - }; - eeprom@53 { - compatible = "at24,24c256"; - reg = <0x53>; - }; - eeprom@54 { - compatible = "at24,24c256"; - reg = <0x54>; - }; - eeprom@55 { - compatible = "at24,24c256"; - reg = <0x55>; - }; - eeprom@56 { - compatible = "at24,24c256"; - reg = <0x56>; - }; - rtc@68 { - compatible = "dallas,ds3232"; - reg = <0x68>; - interrupts = <0x1 0x1 0 0>; + mux@77 { + compatible = "nxp,pca9547"; + reg = <0x77>; + #address-cells = <1>; + #size-cells = <0>; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + eeprom@51 { + compatible = "at24,24c256"; + reg = <0x51>; + }; + eeprom@52 { + compatible = "at24,24c256"; + reg = <0x52>; + }; + eeprom@53 { + compatible = "at24,24c256"; + reg = <0x53>; + }; + eeprom@54 { + compatible = "at24,24c256"; + reg = <0x54>; + }; + eeprom@55 { + compatible = "at24,24c256"; + reg = <0x55>; + }; + eeprom@56 { + compatible = "at24,24c256"; + reg = <0x56>; + }; + rtc@68 { + compatible = "dallas,ds3232"; + reg = <0x68>; + interrupts = <0x1 0x1 0 0>; + }; + }; }; }; + + sdhc@114000 { + voltage-ranges = <1800 1800 3300 3300>; + }; }; pci0: pcie@ffe240000 { diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index cd7af841ba0..2e1af74a64b 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -147,21 +147,29 @@ link_address='0x400000' make_space=y case "$platform" in +of) + platformo="$object/of.o $object/epapr.o" + make_space=n + ;; pseries) platformo="$object/of.o $object/epapr.o" link_address='0x4000000' + make_space=n ;; maple) platformo="$object/of.o $object/epapr.o" link_address='0x400000' + make_space=n ;; pmac|chrp) platformo="$object/of.o $object/epapr.o" + make_space=n ;; coff) platformo="$object/crt0.o $object/of.o $object/epapr.o" lds=$object/zImage.coff.lds link_address='0x500000' + make_space=n pie= ;; miboot|uboot*) diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig index 3dfab4c40c7..bbd794deb6e 100644 --- a/arch/powerpc/configs/corenet32_smp_defconfig +++ b/arch/powerpc/configs/corenet32_smp_defconfig @@ -23,11 +23,7 @@ CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y CONFIG_MAC_PARTITION=y -CONFIG_P2041_RDB=y -CONFIG_P3041_DS=y -CONFIG_P4080_DS=y -CONFIG_P5020_DS=y -CONFIG_P5040_DS=y +CONFIG_CORENET_GENERIC=y CONFIG_HIGHMEM=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m @@ -104,6 +100,7 @@ CONFIG_FSL_PQ_MDIO=y CONFIG_E1000=y CONFIG_E1000E=y CONFIG_VITESSE_PHY=y +CONFIG_AT803X_PHY=y CONFIG_FIXED_PHY=y # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index fa94fb3bb44..63508ddee11 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig @@ -21,10 +21,7 @@ CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y CONFIG_MAC_PARTITION=y -CONFIG_B4_QDS=y -CONFIG_P5020_DS=y -CONFIG_P5040_DS=y -CONFIG_T4240_QDS=y +CONFIG_CORENET_GENERIC=y # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set CONFIG_BINFMT_MISC=m CONFIG_MATH_EMULATION=y diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index dc098d98821..d2e0fab5ee5 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig @@ -138,6 +138,7 @@ CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_CICADA_PHY=y CONFIG_VITESSE_PHY=y +CONFIG_AT803X_PHY=y CONFIG_FIXED_PHY=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index 5bca60161bb..4cb7b59e98b 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -138,6 +138,7 @@ CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_CICADA_PHY=y CONFIG_VITESSE_PHY=y +CONFIG_AT803X_PHY=y CONFIG_FIXED_PHY=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 0e8cfd09da2..581a3bcae72 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -2,7 +2,6 @@ CONFIG_PPC64=y CONFIG_ALTIVEC=y CONFIG_VSX=y CONFIG_SMP=y -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_IRQ_DOMAIN_DEBUG=y @@ -25,7 +24,6 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PARTITION_ADVANCED=y -CONFIG_EFI_PARTITION=y CONFIG_PPC_SPLPAR=y CONFIG_SCANLOG=m CONFIG_PPC_SMLPAR=y @@ -50,12 +48,10 @@ CONFIG_CPU_FREQ_PMAC64=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m CONFIG_PPC_TRANSACTIONAL_MEM=y -CONFIG_HOTPLUG_CPU=y CONFIG_KEXEC=y CONFIG_IRQ_ALL_CPUS=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_SCHED_SMT=y -CONFIG_PPC_DENORMALISATION=y CONFIG_PCCARD=y CONFIG_ELECTRA_CF=y CONFIG_HOTPLUG_PCI=y @@ -89,7 +85,6 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_TPROXY=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m @@ -131,7 +126,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -157,6 +151,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_VIRTIO_BLK=m CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_GENERIC=y @@ -185,6 +180,10 @@ CONFIG_SCSI_IPR=y CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_LPFC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_DH=m +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_ALUA=m CONFIG_ATA=y CONFIG_SATA_SIL24=y CONFIG_SATA_SVW=y @@ -203,6 +202,9 @@ CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_UEVENT=y CONFIG_ADB_PMU=y CONFIG_PMAC_SMU=y CONFIG_THERM_PM72=y @@ -216,6 +218,8 @@ CONFIG_DUMMY=m CONFIG_NETCONSOLE=y CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m +CONFIG_VIRTIO_NET=m +CONFIG_VHOST_NET=m CONFIG_VORTEX=y CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y @@ -262,6 +266,7 @@ CONFIG_HVC_CONSOLE=y CONFIG_HVC_RTAS=y CONFIG_HVC_BEAT=y CONFIG_HVCS=m +CONFIG_VIRTIO_CONSOLE=m CONFIG_IBM_BSR=m CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y @@ -301,7 +306,6 @@ CONFIG_HID_GYRATION=y CONFIG_HID_PANTHERLORD=y CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_USB_HIDDEV=y CONFIG_USB=y @@ -328,6 +332,8 @@ CONFIG_EDAC_MM_EDAC=y CONFIG_EDAC_PASEMI=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_BALLOON=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -386,21 +392,19 @@ CONFIG_NLS_UTF8=y CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_LOCKUP_DETECTOR=y CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_STACK_USAGE=y CONFIG_LATENCYTOP=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y CONFIG_BOOTX_TEXT=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_PPC_EARLY_DEBUG_BOOTX=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y @@ -422,4 +426,3 @@ CONFIG_CRYPTO_DEV_NX_ENCRYPT=m CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=y -CONFIG_VHOST_NET=m diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 0085dc4642c..f627fda0895 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -1,7 +1,6 @@ CONFIG_PPC64=y CONFIG_PPC_BOOK3E_64=y CONFIG_SMP=y -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y @@ -23,7 +22,7 @@ CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PARTITION_ADVANCED=y CONFIG_MAC_PARTITION=y CONFIG_EFI_PARTITION=y -CONFIG_P5020_DS=y +CONFIG_CORENET_GENERIC=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y @@ -61,7 +60,6 @@ CONFIG_NF_CONNTRACK_PPTP=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_TPROXY=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m @@ -103,7 +101,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -193,7 +190,6 @@ CONFIG_PPP_SYNC_TTY=m CONFIG_INPUT_EVDEV=m CONFIG_INPUT_MISC=y # CONFIG_SERIO_SERPORT is not set -CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set @@ -230,7 +226,6 @@ CONFIG_HID_NTRIG=y CONFIG_HID_PANTHERLORD=y CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_HID_GREENASIA=y CONFIG_HID_SMARTJOYPLUS=y @@ -302,19 +297,18 @@ CONFIG_NLS_UTF8=y CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_STACK_USAGE=y CONFIG_LATENCYTOP=y CONFIG_IRQSOFF_TRACER=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 20ebfaf7234..c2353bf059f 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -71,7 +71,7 @@ CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_PPC_BESTCOMM=y CONFIG_GPIO_MPC8XXX=y -CONFIG_MCU_MPC8349EMITX=m +CONFIG_MCU_MPC8349EMITX=y CONFIG_HIGHMEM=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 1d4b9763895..e9a8b4e0a0f 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -3,7 +3,6 @@ CONFIG_ALTIVEC=y CONFIG_VSX=y CONFIG_SMP=y CONFIG_NR_CPUS=2048 -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y @@ -33,7 +32,6 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_PARTITION_ADVANCED=y -CONFIG_EFI_PARTITION=y CONFIG_PPC_SPLPAR=y CONFIG_SCANLOG=m CONFIG_PPC_SMLPAR=y @@ -44,7 +42,6 @@ CONFIG_IBMEBUS=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m CONFIG_PPC_TRANSACTIONAL_MEM=y -CONFIG_HOTPLUG_CPU=y CONFIG_KEXEC=y CONFIG_IRQ_ALL_CPUS=y CONFIG_MEMORY_HOTPLUG=y @@ -52,7 +49,6 @@ CONFIG_MEMORY_HOTREMOVE=y CONFIG_PPC_64K_PAGES=y CONFIG_PPC_SUBPAGE_PROT=y CONFIG_SCHED_SMT=y -CONFIG_PPC_DENORMALISATION=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_RPA=m CONFIG_HOTPLUG_PCI_RPA_DLPAR=m @@ -113,7 +109,6 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -132,6 +127,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_VIRTIO_BLK=m CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_GENERIC=y @@ -157,6 +153,10 @@ CONFIG_SCSI_IPR=y CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_SCSI_LPFC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_DH=m +CONFIG_SCSI_DH_RDAC=m +CONFIG_SCSI_DH_ALUA=m CONFIG_ATA=y # CONFIG_ATA_SFF is not set CONFIG_MD=y @@ -174,11 +174,16 @@ CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_UEVENT=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_NETCONSOLE=y CONFIG_NETPOLL_TRAP=y CONFIG_TUN=m +CONFIG_VIRTIO_NET=m +CONFIG_VHOST_NET=m CONFIG_VORTEX=y CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y @@ -216,6 +221,7 @@ CONFIG_SERIAL_JSM=m CONFIG_HVC_CONSOLE=y CONFIG_HVC_RTAS=y CONFIG_HVCS=m +CONFIG_VIRTIO_CONSOLE=m CONFIG_IBM_BSR=m CONFIG_GEN_RTC=y CONFIG_RAW_DRIVER=y @@ -237,7 +243,6 @@ CONFIG_HID_GYRATION=y CONFIG_HID_PANTHERLORD=y CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_USB_HIDDEV=y CONFIG_USB=y @@ -258,6 +263,8 @@ CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y CONFIG_INFINIBAND_SRP=m CONFIG_INFINIBAND_ISER=m +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_BALLOON=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -314,18 +321,17 @@ CONFIG_NLS_UTF8=y CONFIG_CRC_T10DIF=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y -CONFIG_LOCKUP_DETECTOR=y CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_LOCKUP_DETECTOR=y CONFIG_LATENCYTOP=y CONFIG_SCHED_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_DEBUG_STACKOVERFLOW=y CONFIG_CODE_PATCHING_SELFTEST=y CONFIG_FTR_FIXUP_SELFTEST=y CONFIG_MSI_BITMAP_SELFTEST=y CONFIG_XMON=y CONFIG_XMON_DEFAULT=y -CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y @@ -347,4 +353,3 @@ CONFIG_CRYPTO_DEV_NX_ENCRYPT=m CONFIG_VIRTUALIZATION=y CONFIG_KVM_BOOK3S_64=m CONFIG_KVM_BOOK3S_64_HV=y -CONFIG_VHOST_NET=m diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h new file mode 100644 index 00000000000..d853d163ba4 --- /dev/null +++ b/arch/powerpc/include/asm/archrandom.h @@ -0,0 +1,32 @@ +#ifndef _ASM_POWERPC_ARCHRANDOM_H +#define _ASM_POWERPC_ARCHRANDOM_H + +#ifdef CONFIG_ARCH_RANDOM + +#include <asm/machdep.h> + +static inline int arch_get_random_long(unsigned long *v) +{ + if (ppc_md.get_random_long) + return ppc_md.get_random_long(v); + + return 0; +} + +static inline int arch_get_random_int(unsigned int *v) +{ + unsigned long val; + int rc; + + rc = arch_get_random_long(&val); + if (rc) + *v = val; + + return rc; +} + +int powernv_get_random_long(unsigned long *v); + +#endif /* CONFIG_ARCH_RANDOM */ + +#endif /* _ASM_POWERPC_ARCHRANDOM_H */ diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index ce0c28495f9..8251a3ba870 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -14,6 +14,9 @@ * which always checksum on 4 octet boundaries. ihl is the number * of 32-bit words and is always >= 5. */ +#ifdef CONFIG_GENERIC_CSUM +#include <asm-generic/checksum.h> +#else extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); /* @@ -123,5 +126,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, return sum; #endif } + +#endif #endif /* __KERNEL__ */ #endif diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h index 9b198d1b3b2..856f8deb557 100644 --- a/arch/powerpc/include/asm/disassemble.h +++ b/arch/powerpc/include/asm/disassemble.h @@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst) return inst & 0xffff; } +static inline unsigned int get_oc(u32 inst) +{ + return (inst >> 11) & 0x7fff; +} #endif /* __ASM_PPC_DISASSEMBLE_H__ */ diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 5a8b82aa724..4358e3002f3 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -43,6 +43,7 @@ extern struct ppc_emulated { struct ppc_emulated_entry popcntb; struct ppc_emulated_entry spe; struct ppc_emulated_entry string; + struct ppc_emulated_entry sync; struct ppc_emulated_entry unaligned; #ifdef CONFIG_MATH_EMULATION struct ppc_emulated_entry math; diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index cca12f08484..894662a5d4d 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) cmpwi r10,0; \ bne do_kvm_##n +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +/* + * If hv is possible, interrupts come into to the hv version + * of the kvmppc_interrupt code, which then jumps to the PR handler, + * kvmppc_interrupt_pr, if the guest is a PR guest. + */ +#define kvmppc_interrupt kvmppc_interrupt_hv +#else +#define kvmppc_interrupt kvmppc_interrupt_pr +#endif + #define __KVM_HANDLER(area, h, n) \ do_kvm_##n: \ BEGIN_FTR_SECTION_NESTED(947) \ ld r10,area+EX_CFAR(r13); \ std r10,HSTATE_CFAR(r13); \ END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ + BEGIN_FTR_SECTION_NESTED(948) \ + ld r10,area+EX_PPR(r13); \ + std r10,HSTATE_PPR(r13); \ + END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ ld r10,area+EX_R10(r13); \ stw r9,HSTATE_SCRATCH1(r13); \ ld r9,area+EX_R9(r13); \ @@ -217,6 +232,10 @@ do_kvm_##n: \ ld r10,area+EX_R10(r13); \ beq 89f; \ stw r9,HSTATE_SCRATCH1(r13); \ + BEGIN_FTR_SECTION_NESTED(948) \ + ld r9,area+EX_PPR(r13); \ + std r9,HSTATE_PPR(r13); \ + END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ ld r9,area+EX_R9(r13); \ std r12,HSTATE_SCRATCH0(r13); \ li r12,n; \ @@ -236,7 +255,7 @@ do_kvm_##n: \ #define KVM_HANDLER_SKIP(area, h, n) #endif -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE #define KVMTEST_PR(n) __KVMTEST(n) #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h index b8a4b9bc50b..f49ddb1b227 100644 --- a/arch/powerpc/include/asm/fsl_ifc.h +++ b/arch/powerpc/include/asm/fsl_ifc.h @@ -93,6 +93,7 @@ #define CSOR_NAND_PGS_512 0x00000000 #define CSOR_NAND_PGS_2K 0x00080000 #define CSOR_NAND_PGS_4K 0x00100000 +#define CSOR_NAND_PGS_8K 0x00180000 /* Spare region Size */ #define CSOR_NAND_SPRZ_MASK 0x0000E000 #define CSOR_NAND_SPRZ_SHIFT 13 @@ -102,6 +103,7 @@ #define CSOR_NAND_SPRZ_210 0x00006000 #define CSOR_NAND_SPRZ_218 0x00008000 #define CSOR_NAND_SPRZ_224 0x0000A000 +#define CSOR_NAND_SPRZ_CSOR_EXT 0x0000C000 /* Pages Per Block */ #define CSOR_NAND_PB_MASK 0x00000700 #define CSOR_NAND_PB_SHIFT 8 diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h index d3f64f36181..d4a5315718c 100644 --- a/arch/powerpc/include/asm/hvsi.h +++ b/arch/powerpc/include/asm/hvsi.h @@ -25,7 +25,7 @@ struct hvsi_header { uint8_t type; uint8_t len; - uint16_t seqno; + __be16 seqno; } __attribute__((packed)); struct hvsi_data { @@ -35,24 +35,24 @@ struct hvsi_data { struct hvsi_control { struct hvsi_header hdr; - uint16_t verb; + __be16 verb; /* optional depending on verb: */ - uint32_t word; - uint32_t mask; + __be32 word; + __be32 mask; } __attribute__((packed)); struct hvsi_query { struct hvsi_header hdr; - uint16_t verb; + __be16 verb; } __attribute__((packed)); struct hvsi_query_response { struct hvsi_header hdr; - uint16_t verb; - uint16_t query_seqno; + __be16 verb; + __be16 query_seqno; union { uint8_t version; - uint32_t mctrl_word; + __be32 mctrl_word; } u; } __attribute__((packed)); diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 5a64757dc0d..575fbf81fad 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -21,7 +21,7 @@ extern struct pci_dev *isa_bridge_pcidev; /* * has legacy ISA devices ? */ -#define arch_has_dev_port() (isa_bridge_pcidev != NULL) +#define arch_has_dev_port() (isa_bridge_pcidev != NULL || isa_io_special) #endif #include <linux/device.h> @@ -113,7 +113,7 @@ extern bool isa_io_special; /* gcc 4.0 and older doesn't have 'Z' constraint */ #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0) -#define DEF_MMIO_IN_LE(name, size, insn) \ +#define DEF_MMIO_IN_X(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ @@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ return ret; \ } -#define DEF_MMIO_OUT_LE(name, size, insn) \ +#define DEF_MMIO_OUT_X(name, size, insn) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn" %1,0,%2" \ @@ -130,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ IO_SET_SYNC_FLAG(); \ } #else /* newer gcc */ -#define DEF_MMIO_IN_LE(name, size, insn) \ +#define DEF_MMIO_IN_X(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ @@ -139,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ return ret; \ } -#define DEF_MMIO_OUT_LE(name, size, insn) \ +#define DEF_MMIO_OUT_X(name, size, insn) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn" %1,%y0" \ @@ -148,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ } #endif -#define DEF_MMIO_IN_BE(name, size, insn) \ +#define DEF_MMIO_IN_D(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ @@ -157,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ return ret; \ } -#define DEF_MMIO_OUT_BE(name, size, insn) \ +#define DEF_MMIO_OUT_D(name, size, insn) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ @@ -165,22 +165,37 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ IO_SET_SYNC_FLAG(); \ } +DEF_MMIO_IN_D(in_8, 8, lbz); +DEF_MMIO_OUT_D(out_8, 8, stb); -DEF_MMIO_IN_BE(in_8, 8, lbz); -DEF_MMIO_IN_BE(in_be16, 16, lhz); -DEF_MMIO_IN_BE(in_be32, 32, lwz); -DEF_MMIO_IN_LE(in_le16, 16, lhbrx); -DEF_MMIO_IN_LE(in_le32, 32, lwbrx); +#ifdef __BIG_ENDIAN__ +DEF_MMIO_IN_D(in_be16, 16, lhz); +DEF_MMIO_IN_D(in_be32, 32, lwz); +DEF_MMIO_IN_X(in_le16, 16, lhbrx); +DEF_MMIO_IN_X(in_le32, 32, lwbrx); -DEF_MMIO_OUT_BE(out_8, 8, stb); -DEF_MMIO_OUT_BE(out_be16, 16, sth); -DEF_MMIO_OUT_BE(out_be32, 32, stw); -DEF_MMIO_OUT_LE(out_le16, 16, sthbrx); -DEF_MMIO_OUT_LE(out_le32, 32, stwbrx); +DEF_MMIO_OUT_D(out_be16, 16, sth); +DEF_MMIO_OUT_D(out_be32, 32, stw); +DEF_MMIO_OUT_X(out_le16, 16, sthbrx); +DEF_MMIO_OUT_X(out_le32, 32, stwbrx); +#else +DEF_MMIO_IN_X(in_be16, 16, lhbrx); +DEF_MMIO_IN_X(in_be32, 32, lwbrx); +DEF_MMIO_IN_D(in_le16, 16, lhz); +DEF_MMIO_IN_D(in_le32, 32, lwz); + +DEF_MMIO_OUT_X(out_be16, 16, sthbrx); +DEF_MMIO_OUT_X(out_be32, 32, stwbrx); +DEF_MMIO_OUT_D(out_le16, 16, sth); +DEF_MMIO_OUT_D(out_le32, 32, stw); + +#endif /* __BIG_ENDIAN */ #ifdef __powerpc64__ -DEF_MMIO_OUT_BE(out_be64, 64, std); -DEF_MMIO_IN_BE(in_be64, 64, ld); + +#ifdef __BIG_ENDIAN__ +DEF_MMIO_OUT_D(out_be64, 64, std); +DEF_MMIO_IN_D(in_be64, 64, ld); /* There is no asm instructions for 64 bits reverse loads and stores */ static inline u64 in_le64(const volatile u64 __iomem *addr) @@ -192,6 +207,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val) { out_be64(addr, swab64(val)); } +#else +DEF_MMIO_OUT_D(out_le64, 64, std); +DEF_MMIO_IN_D(in_le64, 64, ld); + +/* There is no asm instructions for 64 bits reverse loads and stores */ +static inline u64 in_be64(const volatile u64 __iomem *addr) +{ + return swab64(in_le64(addr)); +} + +static inline void out_be64(volatile u64 __iomem *addr, u64 val) +{ + out_le64(addr, swab64(val)); +} + +#endif #endif /* __powerpc64__ */ /* diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 851bac7afa4..1bd92fd43cf 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -123,6 +123,8 @@ #define BOOK3S_HFLAG_SLB 0x2 #define BOOK3S_HFLAG_PAIRED_SINGLE 0x4 #define BOOK3S_HFLAG_NATIVE_PS 0x8 +#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10 +#define BOOK3S_HFLAG_NEW_TLBIE 0x20 #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ @@ -136,6 +138,8 @@ #define KVM_GUEST_MODE_NONE 0 #define KVM_GUEST_MODE_GUEST 1 #define KVM_GUEST_MODE_SKIP 2 +#define KVM_GUEST_MODE_GUEST_HV 3 +#define KVM_GUEST_MODE_HOST_HV 4 #define KVM_INST_FETCH_FAILED -1 diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index fa19e2f1a87..4a594b76674 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -58,16 +58,18 @@ struct hpte_cache { struct hlist_node list_pte_long; struct hlist_node list_vpte; struct hlist_node list_vpte_long; +#ifdef CONFIG_PPC_BOOK3S_64 + struct hlist_node list_vpte_64k; +#endif struct rcu_head rcu_head; u64 host_vpn; u64 pfn; ulong slot; struct kvmppc_pte pte; + int pagesize; }; struct kvmppc_vcpu_book3s { - struct kvm_vcpu vcpu; - struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; struct kvmppc_sid_map sid_map[SID_MAP_NUM]; struct { u64 esid; @@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s { struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; +#ifdef CONFIG_PPC_BOOK3S_64 + struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K]; +#endif int hpte_cache_count; spinlock_t mmu_lock; }; @@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s { #define CONTEXT_GUEST 1 #define CONTEXT_GUEST_END 2 -#define VSID_REAL 0x0fffffffffc00000ULL -#define VSID_BAT 0x0fffffffffb00000ULL +#define VSID_REAL 0x07ffffffffc00000ULL +#define VSID_BAT 0x07ffffffffb00000ULL +#define VSID_64K 0x0800000000000000ULL #define VSID_1T 0x1000000000000000ULL #define VSID_REAL_DR 0x2000000000000000ULL #define VSID_REAL_IR 0x4000000000000000ULL @@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask) extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); -extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); -extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); +extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, + bool iswrite); +extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); @@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); +extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte); extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); @@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, u32 val); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); -extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); +extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, + bool *writable); extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, unsigned long *rmap, long pte_index, int realmode); extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, @@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, unsigned long *hpret); extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map); +extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, + unsigned long mask); extern void kvmppc_entry_trampoline(void); extern void kvmppc_hv_entry_trampoline(void); @@ -184,11 +195,9 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { - return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); + return vcpu->arch.book3s; } -extern void kvm_return_point(void); - /* Also add subarch specific defines */ #ifdef CONFIG_KVM_BOOK3S_32_HANDLER @@ -198,203 +207,6 @@ extern void kvm_return_point(void); #include <asm/kvm_book3s_64.h> #endif -#ifdef CONFIG_KVM_BOOK3S_PR - -static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) -{ - return to_book3s(vcpu)->hior; -} - -static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, - unsigned long pending_now, unsigned long old_pending) -{ - if (pending_now) - vcpu->arch.shared->int_pending = 1; - else if (old_pending) - vcpu->arch.shared->int_pending = 0; -} - -static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) -{ - if ( num < 14 ) { - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->gpr[num] = val; - svcpu_put(svcpu); - to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; - } else - vcpu->arch.gpr[num] = val; -} - -static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) -{ - if ( num < 14 ) { - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r = svcpu->gpr[num]; - svcpu_put(svcpu); - return r; - } else - return vcpu->arch.gpr[num]; -} - -static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->cr = val; - svcpu_put(svcpu); - to_book3s(vcpu)->shadow_vcpu->cr = val; -} - -static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - u32 r; - r = svcpu->cr; - svcpu_put(svcpu); - return r; -} - -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->xer = val; - to_book3s(vcpu)->shadow_vcpu->xer = val; - svcpu_put(svcpu); -} - -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - u32 r; - r = svcpu->xer; - svcpu_put(svcpu); - return r; -} - -static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->ctr = val; - svcpu_put(svcpu); -} - -static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r; - r = svcpu->ctr; - svcpu_put(svcpu); - return r; -} - -static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->lr = val; - svcpu_put(svcpu); -} - -static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r; - r = svcpu->lr; - svcpu_put(svcpu); - return r; -} - -static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->pc = val; - svcpu_put(svcpu); -} - -static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r; - r = svcpu->pc; - svcpu_put(svcpu); - return r; -} - -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) -{ - ulong pc = kvmppc_get_pc(vcpu); - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - u32 r; - - /* Load the instruction manually if it failed to do so in the - * exit path */ - if (svcpu->last_inst == KVM_INST_FETCH_FAILED) - kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); - - r = svcpu->last_inst; - svcpu_put(svcpu); - return r; -} - -/* - * Like kvmppc_get_last_inst(), but for fetching a sc instruction. - * Because the sc instruction sets SRR0 to point to the following - * instruction, we have to fetch from pc - 4. - */ -static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) -{ - ulong pc = kvmppc_get_pc(vcpu) - 4; - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - u32 r; - - /* Load the instruction manually if it failed to do so in the - * exit path */ - if (svcpu->last_inst == KVM_INST_FETCH_FAILED) - kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); - - r = svcpu->last_inst; - svcpu_put(svcpu); - return r; -} - -static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r; - r = svcpu->fault_dar; - svcpu_put(svcpu); - return r; -} - -static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) -{ - ulong crit_raw = vcpu->arch.shared->critical; - ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); - bool crit; - - /* Truncate crit indicators in 32 bit mode */ - if (!(vcpu->arch.shared->msr & MSR_SF)) { - crit_raw &= 0xffffffff; - crit_r1 &= 0xffffffff; - } - - /* Critical section when crit == r1 */ - crit = (crit_raw == crit_r1); - /* ... and we're in supervisor mode */ - crit = crit && !(vcpu->arch.shared->msr & MSR_PR); - - return crit; -} -#else /* CONFIG_KVM_BOOK3S_PR */ - -static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) -{ - return 0; -} - -static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, - unsigned long pending_now, unsigned long old_pending) -{ -} - static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { vcpu->arch.gpr[num] = val; @@ -489,12 +301,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) return vcpu->arch.fault_dar; } -static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) -{ - return false; -} -#endif - /* Magic register values loaded into r3 and r4 before the 'sc' assembly * instruction for the OSI hypercalls */ #define OSI_SC_MAGIC_R3 0x113724FA diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h index ce0ef6ce8f8..c720e0b3238 100644 --- a/arch/powerpc/include/asm/kvm_book3s_32.h +++ b/arch/powerpc/include/asm/kvm_book3s_32.h @@ -22,7 +22,7 @@ static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) { - return to_book3s(vcpu)->shadow_vcpu; + return vcpu->arch.shadow_vcpu; } static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 86d638a3b35..bf0fa8b0a88 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -20,7 +20,7 @@ #ifndef __ASM_KVM_BOOK3S_64_H__ #define __ASM_KVM_BOOK3S_64_H__ -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) { preempt_disable(); @@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) #define SPAPR_TCE_SHIFT 12 -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ extern unsigned long kvm_rma_pages; #endif @@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v) (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); } -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* * Note modification of an HPTE; set the HPTE modified bit * if anyone is interested. @@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm, if (atomic_read(&kvm->arch.hpte_mod_interest)) rev->guest_rpte |= HPTE_GR_MODIFIED; } -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 9039d3c97ee..0bd9348a4db 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -83,7 +83,7 @@ struct kvmppc_host_state { u8 restore_hid5; u8 napping; -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE u8 hwthread_req; u8 hwthread_state; u8 host_ipi; @@ -101,6 +101,7 @@ struct kvmppc_host_state { #endif #ifdef CONFIG_PPC_BOOK3S_64 u64 cfar; + u64 ppr; #endif }; @@ -108,14 +109,14 @@ struct kvmppc_book3s_shadow_vcpu { ulong gpr[14]; u32 cr; u32 xer; - - u32 fault_dsisr; - u32 last_inst; ulong ctr; ulong lr; ulong pc; + ulong shadow_srr1; ulong fault_dar; + u32 fault_dsisr; + u32 last_inst; #ifdef CONFIG_PPC_BOOK3S_32 u32 sr[16]; /* Guest SRs */ diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index d3c1eb34c98..dd8f61510df 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -26,7 +26,12 @@ /* LPIDs we support with this build -- runtime limit may be lower */ #define KVMPPC_NR_LPIDS 64 -#define KVMPPC_INST_EHPRIV 0x7c00021c +#define KVMPPC_INST_EHPRIV 0x7c00021c +#define EHPRIV_OC_SHIFT 11 +/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */ +#define EHPRIV_OC_DEBUG 1 +#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \ + (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT)) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 33283532e9d..237d1d25b44 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -63,20 +63,17 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); #endif -/* We don't currently support large pages. */ -#define KVM_HPAGE_GFN_SHIFT(x) 0 -#define KVM_NR_PAGE_SIZES 1 -#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) - #define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE_LONG 12 #define HPTEG_HASH_BITS_VPTE 13 #define HPTEG_HASH_BITS_VPTE_LONG 5 +#define HPTEG_HASH_BITS_VPTE_64K 11 #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) +#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K) /* Physical Address Mask - allowed range of real mode RAM access */ #define KVM_PAM 0x0fffffffffffffffULL @@ -89,6 +86,9 @@ struct lppaca; struct slb_shadow; struct dtl_entry; +struct kvmppc_vcpu_book3s; +struct kvmppc_book3s_shadow_vcpu; + struct kvm_vm_stat { u32 remote_tlb_flush; }; @@ -224,15 +224,15 @@ struct revmap_entry { #define KVMPPC_GOT_PAGE 0x80 struct kvm_arch_memory_slot { -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE unsigned long *rmap; unsigned long *slot_phys; -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ }; struct kvm_arch { unsigned int lpid; -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE unsigned long hpt_virt; struct revmap_entry *revmap; unsigned int host_lpid; @@ -256,7 +256,10 @@ struct kvm_arch { cpumask_t need_tlb_flush; struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; int hpt_cma_alloc; -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE + struct mutex hpt_mutex; +#endif #ifdef CONFIG_PPC_BOOK3S_64 struct list_head spapr_tce_tables; struct list_head rtas_tokens; @@ -267,6 +270,7 @@ struct kvm_arch { #ifdef CONFIG_KVM_XICS struct kvmppc_xics *xics; #endif + struct kvmppc_ops *kvm_ops; }; /* @@ -294,6 +298,10 @@ struct kvmppc_vcore { u64 stolen_tb; u64 preempt_tb; struct kvm_vcpu *runner; + u64 tb_offset; /* guest timebase - host timebase */ + ulong lpcr; + u32 arch_compat; + ulong pcr; }; #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) @@ -328,6 +336,7 @@ struct kvmppc_pte { bool may_read : 1; bool may_write : 1; bool may_execute : 1; + u8 page_size; /* MMU_PAGE_xxx */ }; struct kvmppc_mmu { @@ -340,7 +349,8 @@ struct kvmppc_mmu { /* book3s */ void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); - int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); + int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *pte, bool data, bool iswrite); void (*reset_msr)(struct kvm_vcpu *vcpu); void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); @@ -360,6 +370,7 @@ struct kvmppc_slb { bool large : 1; /* PTEs are 16MB */ bool tb : 1; /* 1TB segment */ bool class : 1; + u8 base_page_size; /* MMU_PAGE_xxx */ }; # ifdef CONFIG_PPC_FSL_BOOK3E @@ -377,17 +388,6 @@ struct kvmppc_slb { #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */ #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */ -struct kvmppc_booke_debug_reg { - u32 dbcr0; - u32 dbcr1; - u32 dbcr2; -#ifdef CONFIG_KVM_E500MC - u32 dbcr4; -#endif - u64 iac[KVMPPC_BOOKE_MAX_IAC]; - u64 dac[KVMPPC_BOOKE_MAX_DAC]; -}; - #define KVMPPC_IRQ_DEFAULT 0 #define KVMPPC_IRQ_MPIC 1 #define KVMPPC_IRQ_XICS 2 @@ -402,6 +402,10 @@ struct kvm_vcpu_arch { int slb_max; /* 1 + index of last valid entry in slb[] */ int slb_nr; /* total number of entries in SLB */ struct kvmppc_mmu mmu; + struct kvmppc_vcpu_book3s *book3s; +#endif +#ifdef CONFIG_PPC_BOOK3S_32 + struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; #endif ulong gpr[32]; @@ -463,6 +467,8 @@ struct kvm_vcpu_arch { u32 ctrl; ulong dabr; ulong cfar; + ulong ppr; + ulong shadow_srr1; #endif u32 vrsave; /* also USPRG0 */ u32 mmucr; @@ -498,6 +504,8 @@ struct kvm_vcpu_arch { u64 mmcr[3]; u32 pmc[8]; + u64 siar; + u64 sdar; #ifdef CONFIG_KVM_EXIT_TIMING struct mutex exit_timing_lock; @@ -531,7 +539,10 @@ struct kvm_vcpu_arch { u32 eptcfg; u32 epr; u32 crit_save; - struct kvmppc_booke_debug_reg dbg_reg; + /* guest debug registers*/ + struct debug_reg dbg_reg; + /* hardware visible debug registers when in guest state */ + struct debug_reg shadow_dbg_reg; #endif gpa_t paddr_accessed; gva_t vaddr_accessed; @@ -582,7 +593,7 @@ struct kvm_vcpu_arch { struct kvmppc_icp *icp; /* XICS presentation controller */ #endif -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE struct kvm_vcpu_arch_shared shregs; unsigned long pgfault_addr; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index b15554a26c2..c8317fbf92c 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); - -extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int op, int *advance); -extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, - ulong val); -extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, - ulong *val); extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); extern int kvmppc_booke_init(void); @@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce *args); extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce); -extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, - struct kvm_allocate_rma *rma); extern struct kvm_rma_info *kvm_alloc_rma(void); extern void kvm_release_rma(struct kvm_rma_info *ri); extern struct page *kvm_alloc_hpt(unsigned long nr_pages); extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); extern int kvmppc_core_init_vm(struct kvm *kvm); extern void kvmppc_core_destroy_vm(struct kvm *kvm); -extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, +extern void kvmppc_core_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *free, struct kvm_memory_slot *dont); -extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, +extern int kvmppc_core_create_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot, unsigned long npages); extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, @@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); +union kvmppc_one_reg { + u32 wval; + u64 dval; + vector128 vval; + u64 vsxval[2]; + struct { + u64 addr; + u64 length; + } vpaval; +}; + +struct kvmppc_ops { + struct module *owner; + int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); + int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); + int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val); + int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val); + void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); + void (*vcpu_put)(struct kvm_vcpu *vcpu); + void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); + int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); + struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id); + void (*vcpu_free)(struct kvm_vcpu *vcpu); + int (*check_requests)(struct kvm_vcpu *vcpu); + int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); + void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); + int (*prepare_memory_region)(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem); + void (*commit_memory_region)(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old); + int (*unmap_hva)(struct kvm *kvm, unsigned long hva); + int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, + unsigned long end); + int (*age_hva)(struct kvm *kvm, unsigned long hva); + int (*test_age_hva)(struct kvm *kvm, unsigned long hva); + void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte); + void (*mmu_destroy)(struct kvm_vcpu *vcpu); + void (*free_memslot)(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont); + int (*create_memslot)(struct kvm_memory_slot *slot, + unsigned long npages); + int (*init_vm)(struct kvm *kvm); + void (*destroy_vm)(struct kvm *kvm); + int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); + int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance); + int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); + int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); + void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); + long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, + unsigned long arg); + +}; + +extern struct kvmppc_ops *kvmppc_hv_ops; +extern struct kvmppc_ops *kvmppc_pr_ops; + +static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) +{ + return kvm->arch.kvm_ops == kvmppc_hv_ops; +} + /* * Cuts out inst bits with ordering according to spec. * That means the leftmost bit is zero. All given bits are included. @@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) return r; } -union kvmppc_one_reg { - u32 wval; - u64 dval; - vector128 vval; - u64 vsxval[2]; - struct { - u64 addr; - u64 length; - } vpaval; -}; - #define one_reg_size(id) \ (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) @@ -245,10 +293,10 @@ union kvmppc_one_reg { __v; \ }) -void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); -void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); @@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); struct openpic; -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE extern void kvm_cma_reserve(void) __init; static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) { @@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) static inline u32 kvmppc_get_xics_latch(void) { - u32 xirr = get_paca()->kvm_hstate.saved_xirr; + u32 xirr; + xirr = get_paca()->kvm_hstate.saved_xirr; get_paca()->kvm_hstate.saved_xirr = 0; - return xirr; } @@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) paca[cpu].kvm_hstate.host_ipi = host_ipi; } -extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); +static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) +{ + vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu); +} #else static inline void __init kvm_cma_reserve(void) diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 4470d1e34d2..844c28de7ec 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -84,8 +84,8 @@ struct lppaca { * the processor is yielded (either because of an OS yield or a * hypervisor preempt). An even value implies that the processor is * currently executing. - * NOTE: This value will ALWAYS be zero for dedicated processors and - * will NEVER be zero for shared processors (ie, initialized to a 1). + * NOTE: Even dedicated processor partitions can yield so this + * field cannot be used to determine if we are shared or dedicated. */ volatile __be32 yield_count; volatile __be32 dispersion_count; /* dispatch changed physical cpu */ @@ -106,15 +106,15 @@ extern struct lppaca lppaca[]; #define lppaca_of(cpu) (*paca[cpu].lppaca_ptr) /* - * Old kernels used a reserved bit in the VPA to determine if it was running - * in shared processor mode. New kernels look for a non zero yield count - * but KVM still needs to set the bit to keep the old stuff happy. + * We are using a non architected field to determine if a partition is + * shared or dedicated. This currently works on both KVM and PHYP, but + * we will have to transition to something better. */ #define LPPACA_OLD_SHARED_PROC 2 static inline bool lppaca_shared_proc(struct lppaca *l) { - return l->yield_count != 0; + return !!(l->__old_status & LPPACA_OLD_SHARED_PROC); } /* diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 8b480901165..ad3025d0880 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -78,6 +78,18 @@ struct machdep_calls { long index); void (*tce_flush)(struct iommu_table *tbl); + /* _rm versions are for real mode use only */ + int (*tce_build_rm)(struct iommu_table *tbl, + long index, + long npages, + unsigned long uaddr, + enum dma_data_direction direction, + struct dma_attrs *attrs); + void (*tce_free_rm)(struct iommu_table *tbl, + long index, + long npages); + void (*tce_flush_rm)(struct iommu_table *tbl); + void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, unsigned long flags, void *caller); void (*iounmap)(volatile void __iomem *token); @@ -263,6 +275,10 @@ struct machdep_calls { ssize_t (*cpu_probe)(const char *, size_t); ssize_t (*cpu_release)(const char *, size_t); #endif + +#ifdef CONFIG_ARCH_RANDOM + int (*get_random_long)(unsigned long *v); +#endif }; extern void e500_idle(void); diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index c4cf0119727..807014dde82 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -135,8 +135,8 @@ extern char initial_stab[]; #ifndef __ASSEMBLY__ struct hash_pte { - unsigned long v; - unsigned long r; + __be64 v; + __be64 r; }; extern struct hash_pte *htab_address; diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index c5cd72833d6..033c06be1d8 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -129,6 +129,9 @@ extern int opal_enter_rtas(struct rtas_args *args, #define OPAL_LPC_READ 67 #define OPAL_LPC_WRITE 68 #define OPAL_RETURN_CPU 69 +#define OPAL_FLASH_VALIDATE 76 +#define OPAL_FLASH_MANAGE 77 +#define OPAL_FLASH_UPDATE 78 #ifndef __ASSEMBLY__ @@ -460,10 +463,12 @@ enum { enum { OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1, + OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2 }; enum { OPAL_P7IOC_NUM_PEST_REGS = 128, + OPAL_PHB3_NUM_PEST_REGS = 256 }; struct OpalIoPhbErrorCommon { @@ -531,28 +536,94 @@ struct OpalIoP7IOCPhbErrorData { uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS]; }; +struct OpalIoPhb3ErrorData { + struct OpalIoPhbErrorCommon common; + + uint32_t brdgCtl; + + /* PHB3 UTL regs */ + uint32_t portStatusReg; + uint32_t rootCmplxStatus; + uint32_t busAgentStatus; + + /* PHB3 cfg regs */ + uint32_t deviceStatus; + uint32_t slotStatus; + uint32_t linkStatus; + uint32_t devCmdStatus; + uint32_t devSecStatus; + + /* cfg AER regs */ + uint32_t rootErrorStatus; + uint32_t uncorrErrorStatus; + uint32_t corrErrorStatus; + uint32_t tlpHdr1; + uint32_t tlpHdr2; + uint32_t tlpHdr3; + uint32_t tlpHdr4; + uint32_t sourceId; + + uint32_t rsv3; + + /* Record data about the call to allocate a buffer */ + uint64_t errorClass; + uint64_t correlator; + + uint64_t nFir; /* 000 */ + uint64_t nFirMask; /* 003 */ + uint64_t nFirWOF; /* 008 */ + + /* PHB3 MMIO Error Regs */ + uint64_t phbPlssr; /* 120 */ + uint64_t phbCsr; /* 110 */ + uint64_t lemFir; /* C00 */ + uint64_t lemErrorMask; /* C18 */ + uint64_t lemWOF; /* C40 */ + uint64_t phbErrorStatus; /* C80 */ + uint64_t phbFirstErrorStatus; /* C88 */ + uint64_t phbErrorLog0; /* CC0 */ + uint64_t phbErrorLog1; /* CC8 */ + uint64_t mmioErrorStatus; /* D00 */ + uint64_t mmioFirstErrorStatus; /* D08 */ + uint64_t mmioErrorLog0; /* D40 */ + uint64_t mmioErrorLog1; /* D48 */ + uint64_t dma0ErrorStatus; /* D80 */ + uint64_t dma0FirstErrorStatus; /* D88 */ + uint64_t dma0ErrorLog0; /* DC0 */ + uint64_t dma0ErrorLog1; /* DC8 */ + uint64_t dma1ErrorStatus; /* E00 */ + uint64_t dma1FirstErrorStatus; /* E08 */ + uint64_t dma1ErrorLog0; /* E40 */ + uint64_t dma1ErrorLog1; /* E48 */ + uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS]; + uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS]; +}; + typedef struct oppanel_line { const char * line; uint64_t line_len; } oppanel_line_t; +/* /sys/firmware/opal */ +extern struct kobject *opal_kobj; + /* API functions */ -int64_t opal_console_write(int64_t term_number, int64_t *length, +int64_t opal_console_write(int64_t term_number, __be64 *length, const uint8_t *buffer); -int64_t opal_console_read(int64_t term_number, int64_t *length, +int64_t opal_console_read(int64_t term_number, __be64 *length, uint8_t *buffer); int64_t opal_console_write_buffer_space(int64_t term_number, - int64_t *length); -int64_t opal_rtc_read(uint32_t *year_month_day, - uint64_t *hour_minute_second_millisecond); + __be64 *length); +int64_t opal_rtc_read(__be32 *year_month_day, + __be64 *hour_minute_second_millisecond); int64_t opal_rtc_write(uint32_t year_month_day, uint64_t hour_minute_second_millisecond); int64_t opal_cec_power_down(uint64_t request); int64_t opal_cec_reboot(void); int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset); int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset); -int64_t opal_handle_interrupt(uint64_t isn, uint64_t *outstanding_event_mask); -int64_t opal_poll_events(uint64_t *outstanding_event_mask); +int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask); +int64_t opal_poll_events(__be64 *outstanding_event_mask); int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr, uint64_t tce_mem_size); int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr, @@ -560,9 +631,9 @@ int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr, int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func, uint64_t offset, uint8_t *data); int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func, - uint64_t offset, uint16_t *data); + uint64_t offset, __be16 *data); int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func, - uint64_t offset, uint32_t *data); + uint64_t offset, __be32 *data); int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func, uint64_t offset, uint8_t data); int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func, @@ -570,14 +641,14 @@ int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func, int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func, uint64_t offset, uint32_t data); int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority); -int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority); +int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority); int64_t opal_register_exception_handler(uint64_t opal_exception, uint64_t handler_address, uint64_t glue_cache_line); int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number, uint8_t *freeze_state, - uint16_t *pci_error_type, - uint64_t *phb_status); + __be16 *pci_error_type, + __be64 *phb_status); int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number, uint64_t eeh_action_token); int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state); @@ -614,13 +685,13 @@ int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq); int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number, uint32_t xive_num); int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num, - int32_t *interrupt_source_number); + __be32 *interrupt_source_number); int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num, - uint8_t msi_range, uint32_t *msi_address, - uint32_t *message_data); + uint8_t msi_range, __be32 *msi_address, + __be32 *message_data); int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num, uint8_t msi_range, - uint64_t *msi_address, uint32_t *message_data); + __be64 *msi_address, __be32 *message_data); int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address); int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status); int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines); @@ -642,7 +713,7 @@ int64_t opal_pci_fence_phb(uint64_t phb_id); int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); -int64_t opal_get_epow_status(uint64_t *status); +int64_t opal_get_epow_status(__be64 *status); int64_t opal_set_system_attention_led(uint8_t led_action); int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, uint16_t *pci_error_type, uint16_t *severity); @@ -656,6 +727,9 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, uint32_t data, uint32_t sz); int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, uint32_t *data, uint32_t sz); +int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); +int64_t opal_manage_flash(uint8_t op); +int64_t opal_update_flash(uint64_t blk_list); /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); @@ -684,6 +758,7 @@ extern int opal_set_rtc_time(struct rtc_time *tm); extern void opal_get_rtc_time(struct rtc_time *tm); extern unsigned long opal_get_boot_time(void); extern void opal_nvram_init(void); +extern void opal_flash_init(void); extern int opal_machine_check(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index a5954cebbc5..b6ea9e068c1 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -166,7 +166,7 @@ struct paca_struct { struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ #ifdef CONFIG_KVM_BOOK3S_HANDLER -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE /* We use this to store guest state in */ struct kvmppc_book3s_shadow_vcpu shadow_vcpu; #endif diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index b9f426212d3..32e4e212b9c 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -78,7 +78,7 @@ extern unsigned int HPAGE_SHIFT; * * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START * - * There are two was to determine a physical address from a virtual one: + * There are two ways to determine a physical address from a virtual one: * va = pa + PAGE_OFFSET - MEMORY_START * va = pa + KERNELBASE - PHYSICAL_START * @@ -403,7 +403,7 @@ void arch_free_page(struct page *page, int order); struct vm_area_struct; -#ifdef CONFIG_PPC_64K_PAGES +#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64) typedef pte_t *pgtable_t; #else typedef struct page *pgtable_t; diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index f65e27b09bd..16cb92d215d 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -91,7 +91,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, if (!pte) return NULL; page = virt_to_page(pte); - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } return page; } diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 46db09414a1..4a191c47286 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -394,6 +394,8 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, hpte_slot_array[index] = hidx << 4 | 0x1 << 3; } +struct page *realmode_pfn_to_page(unsigned long pfn); + static inline char *get_hpte_slot_array(pmd_t *pmdp) { /* diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index d7fe9f5b46d..3132bb9365f 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -143,6 +143,8 @@ #define PPC_INST_LSWX 0x7c00042a #define PPC_INST_LWARX 0x7c000028 #define PPC_INST_LWSYNC 0x7c2004ac +#define PPC_INST_SYNC 0x7c0004ac +#define PPC_INST_SYNC_MASK 0xfc0007fe #define PPC_INST_LXVD2X 0x7c000698 #define PPC_INST_MCRXR 0x7c000400 #define PPC_INST_MCRXR_MASK 0xfc0007fe @@ -181,6 +183,7 @@ #define PPC_INST_TLBIVAX 0x7c000624 #define PPC_INST_TLBSRX_DOT 0x7c0006a5 #define PPC_INST_XXLOR 0xf0000510 +#define PPC_INST_XXSWAPD 0xf0000250 #define PPC_INST_XVCPSGNDP 0xf0000780 #define PPC_INST_TRECHKPT 0x7c0007dd #define PPC_INST_TRECLAIM 0x7c00075d @@ -200,6 +203,7 @@ /* Misc instructions for BPF compiler */ #define PPC_INST_LD 0xe8000000 #define PPC_INST_LHZ 0xa0000000 +#define PPC_INST_LHBRX 0x7c00062c #define PPC_INST_LWZ 0x80000000 #define PPC_INST_STD 0xf8000000 #define PPC_INST_STDU 0xf8000001 @@ -218,7 +222,7 @@ #define PPC_INST_MULLW 0x7c0001d6 #define PPC_INST_MULHWU 0x7c000016 #define PPC_INST_MULLI 0x1c000000 -#define PPC_INST_DIVWU 0x7c0003d6 +#define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_RLWINM 0x54000000 #define PPC_INST_RLDICR 0x78000004 #define PPC_INST_SLW 0x7c000030 @@ -344,6 +348,8 @@ VSX_XX1((s), a, b)) #define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ VSX_XX3((t), a, b)) +#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \ + VSX_XX3((t), a, a)) #define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \ VSX_XX3((t), (a), (b)))) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 599545738af..3c1acc31a09 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -98,123 +98,51 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) -#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) +#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base) #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) -#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) +#define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base) #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) #define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) #define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) -#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b +#define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) #define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) #define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) -#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b +#define REST_VR(n,b,base) li b,16*(n); lvx n,base,b #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) -/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in - * thread_struct: - */ -#define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \ - 8*TS_FPRWIDTH*(n)(base) -#define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \ - SAVE_FPR_TRANSACT(n+1, base) -#define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \ - SAVE_2FPRS_TRANSACT(n+2, base) -#define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \ - SAVE_4FPRS_TRANSACT(n+4, base) -#define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \ - SAVE_8FPRS_TRANSACT(n+8, base) -#define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \ - SAVE_16FPRS_TRANSACT(n+16, base) - -#define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \ - 8*TS_FPRWIDTH*(n)(base) -#define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \ - REST_FPR_TRANSACT(n+1, base) -#define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \ - REST_2FPRS_TRANSACT(n+2, base) -#define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \ - REST_4FPRS_TRANSACT(n+4, base) -#define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \ - REST_8FPRS_TRANSACT(n+8, base) -#define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \ - REST_16FPRS_TRANSACT(n+16, base) - - -#define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \ - stvx n,b,base -#define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \ - SAVE_VR_TRANSACT(n+1,b,base) -#define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \ - SAVE_2VRS_TRANSACT(n+2,b,base) -#define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \ - SAVE_4VRS_TRANSACT(n+4,b,base) -#define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \ - SAVE_8VRS_TRANSACT(n+8,b,base) -#define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \ - SAVE_16VRS_TRANSACT(n+16,b,base) - -#define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \ - lvx n,b,base -#define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \ - REST_VR_TRANSACT(n+1,b,base) -#define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \ - REST_2VRS_TRANSACT(n+2,b,base) -#define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \ - REST_4VRS_TRANSACT(n+4,b,base) -#define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \ - REST_8VRS_TRANSACT(n+8,b,base) -#define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \ - REST_16VRS_TRANSACT(n+16,b,base) - - -#define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ - STXVD2X(n,R##base,R##b) -#define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \ - SAVE_VSR_TRANSACT(n+1,b,base) -#define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \ - SAVE_2VSRS_TRANSACT(n+2,b,base) -#define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \ - SAVE_4VSRS_TRANSACT(n+4,b,base) -#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \ - SAVE_8VSRS_TRANSACT(n+8,b,base) -#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \ - SAVE_16VSRS_TRANSACT(n+16,b,base) - -#define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ - LXVD2X(n,R##base,R##b) -#define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \ - REST_VSR_TRANSACT(n+1,b,base) -#define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \ - REST_2VSRS_TRANSACT(n+2,b,base) -#define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \ - REST_4VSRS_TRANSACT(n+4,b,base) -#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \ - REST_8VSRS_TRANSACT(n+8,b,base) -#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \ - REST_16VSRS_TRANSACT(n+16,b,base) +#ifdef __BIG_ENDIAN__ +#define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base) +#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base) +#else +#define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \ + STXVD2X(n,b,base); \ + XXSWAPD(n,n) +#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \ + XXSWAPD(n,n) +#endif /* Save the lower 32 VSRs in the thread VSR region */ -#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b) +#define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b) #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) -#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b) +#define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b) #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) @@ -478,13 +406,6 @@ BEGIN_FTR_SECTION_NESTED(945) \ std ra,TASKTHREADPPR(rb); \ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945) -#define RESTORE_PPR(ra, rb) \ -BEGIN_FTR_SECTION_NESTED(946) \ - ld ra,PACACURRENT(r13); \ - ld rb,TASKTHREADPPR(ra); \ - mtspr SPRN_PPR,rb; /* Restore PPR */ \ -END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946) - #endif /* @@ -832,6 +753,35 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946) #define N_SLINE 68 #define N_SO 100 -#endif /* __ASSEMBLY__ */ +/* + * Create an endian fixup trampoline + * + * This starts with a "tdi 0,0,0x48" instruction which is + * essentially a "trap never", and thus akin to a nop. + * + * The opcode for this instruction read with the wrong endian + * however results in a b . + 8 + * + * So essentially we use that trick to execute the following + * trampoline in "reverse endian" if we are running with the + * MSR_LE bit set the "wrong" way for whatever endianness the + * kernel is built for. + */ +#ifdef CONFIG_PPC_BOOK3E +#define FIXUP_ENDIAN +#else +#define FIXUP_ENDIAN \ + tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \ + b $+36; /* Skip trampoline if endian is good */ \ + .long 0x05009f42; /* bcl 20,31,$+4 */ \ + .long 0xa602487d; /* mflr r10 */ \ + .long 0x1c004a39; /* addi r10,r10,28 */ \ + .long 0xa600607d; /* mfmsr r11 */ \ + .long 0x01006b69; /* xori r11,r11,1 */ \ + .long 0xa6035a7d; /* mtsrr0 r10 */ \ + .long 0xa6037b7d; /* mtsrr1 r11 */ \ + .long 0x2400004c /* rfid */ +#endif /* !CONFIG_PPC_BOOK3E */ +#endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index ce4de5aed7b..fc14a38c7cc 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -14,8 +14,18 @@ #ifdef CONFIG_VSX #define TS_FPRWIDTH 2 + +#ifdef __BIG_ENDIAN__ +#define TS_FPROFFSET 0 +#define TS_VSRLOWOFFSET 1 +#else +#define TS_FPROFFSET 1 +#define TS_VSRLOWOFFSET 0 +#endif + #else #define TS_FPRWIDTH 1 +#define TS_FPROFFSET 0 #endif #ifdef CONFIG_PPC64 @@ -142,26 +152,22 @@ typedef struct { unsigned long seg; } mm_segment_t; -#define TS_FPROFFSET 0 -#define TS_VSRLOWOFFSET 1 -#define TS_FPR(i) fpr[i][TS_FPROFFSET] -#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET] +#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET] +#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET] -struct thread_struct { - unsigned long ksp; /* Kernel stack pointer */ -#ifdef CONFIG_PPC64 - unsigned long ksp_vsid; -#endif - struct pt_regs *regs; /* Pointer to saved register state */ - mm_segment_t fs; /* for get_fs() validation */ -#ifdef CONFIG_BOOKE - /* BookE base exception scratch space; align on cacheline */ - unsigned long normsave[8] ____cacheline_aligned; -#endif -#ifdef CONFIG_PPC32 - void *pgdir; /* root of page-table tree */ - unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ -#endif +/* FP and VSX 0-31 register set */ +struct thread_fp_state { + u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); + u64 fpscr; /* Floating point status */ +}; + +/* Complete AltiVec register set including VSCR */ +struct thread_vr_state { + vector128 vr[32] __attribute__((aligned(16))); + vector128 vscr __attribute__((aligned(16))); +}; + +struct debug_reg { #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* * The following help to manage the use of Debug Control Registers @@ -198,13 +204,28 @@ struct thread_struct { unsigned long dvc2; #endif #endif - /* FP and VSX 0-31 register set */ - double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); - struct { +}; - unsigned int pad; - unsigned int val; /* Floating point status */ - } fpscr; +struct thread_struct { + unsigned long ksp; /* Kernel stack pointer */ + +#ifdef CONFIG_PPC64 + unsigned long ksp_vsid; +#endif + struct pt_regs *regs; /* Pointer to saved register state */ + mm_segment_t fs; /* for get_fs() validation */ +#ifdef CONFIG_BOOKE + /* BookE base exception scratch space; align on cacheline */ + unsigned long normsave[8] ____cacheline_aligned; +#endif +#ifdef CONFIG_PPC32 + void *pgdir; /* root of page-table tree */ + unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ +#endif + /* Debug Registers */ + struct debug_reg debug; + struct thread_fp_state fp_state; + struct thread_fp_state *fp_save_area; int fpexc_mode; /* floating-point exception mode */ unsigned int align_ctl; /* alignment handling control */ #ifdef CONFIG_PPC64 @@ -222,10 +243,8 @@ struct thread_struct { struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ unsigned long trap_nr; /* last trap # on this thread */ #ifdef CONFIG_ALTIVEC - /* Complete AltiVec register set */ - vector128 vr[32] __attribute__((aligned(16))); - /* AltiVec status */ - vector128 vscr __attribute__((aligned(16))); + struct thread_vr_state vr_state; + struct thread_vr_state *vr_save_area; unsigned long vrsave; int used_vr; /* set if process has used altivec */ #endif /* CONFIG_ALTIVEC */ @@ -262,13 +281,8 @@ struct thread_struct { * transact_fpr[] is the new set of transactional values. * VRs work the same way. */ - double transact_fpr[32][TS_FPRWIDTH]; - struct { - unsigned int pad; - unsigned int val; /* Floating point status */ - } transact_fpscr; - vector128 transact_vr[32] __attribute__((aligned(16))); - vector128 transact_vscr __attribute__((aligned(16))); + struct thread_fp_state transact_fp; + struct thread_vr_state transact_vr; unsigned long transact_vrsave; #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #ifdef CONFIG_KVM_BOOK3S_32_HANDLER @@ -322,8 +336,6 @@ struct thread_struct { .ksp = INIT_SP, \ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .fs = KERNEL_DS, \ - .fpr = {{0}}, \ - .fpscr = { .val = 0, }, \ .fpexc_mode = 0, \ .ppr = INIT_PPR, \ } @@ -361,6 +373,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val); extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); +extern void load_fp_state(struct thread_fp_state *fp); +extern void store_fp_state(struct thread_fp_state *fp); +extern void load_vr_state(struct thread_vr_state *vr); +extern void store_vr_state(struct thread_vr_state *vr); + static inline unsigned int __unpack_fe01(unsigned long msr_bits) { return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 7d0c7f3a717..d977b9b7869 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -1,4 +1,3 @@ -#include <linux/of.h> /* linux/of.h gets to determine #include ordering */ #ifndef _POWERPC_PROM_H #define _POWERPC_PROM_H #ifdef __KERNEL__ @@ -20,21 +19,17 @@ #include <asm/irq.h> #include <linux/atomic.h> -#define HAVE_ARCH_DEVTREE_FIXUPS +/* These includes should be removed once implicit includes are cleaned up. */ +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> /* * OF address retreival & translation */ -/* Translate a DMA address from device space to CPU space */ -extern u64 of_translate_dma_address(struct device_node *dev, - const __be32 *in_addr); - -#ifdef CONFIG_PCI -extern unsigned long pci_address_to_pio(phys_addr_t address); -#define pci_address_to_pio pci_address_to_pio -#endif /* CONFIG_PCI */ - /* Parse the ibm,dma-window property of an OF node into the busno, phys and * size parameters. */ @@ -44,16 +39,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window, extern void kdump_move_device_tree(void); -/* cache lookup */ -struct device_node *of_find_next_cache_node(struct device_node *np); - -#ifdef CONFIG_NUMA -extern int of_node_to_nid(struct device_node *device); -#else -static inline int of_node_to_nid(struct device_node *device) { return 0; } -#endif -#define of_node_to_nid of_node_to_nid - extern void of_instantiate_rtc(void); extern int of_get_ibm_chip_id(struct device_node *np); @@ -143,14 +128,5 @@ struct of_drconf_cell { */ extern unsigned char ibm_architecture_vec[]; -/* These includes are put at the bottom because they may contain things - * that are overridden by this file. Ideally they shouldn't be included - * by this file, but there are a bunch of .c files that currently depend - * on it. Eventually they will be cleaned up. */ -#include <linux/of_fdt.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/platform_device.h> - #endif /* __KERNEL__ */ #endif /* _POWERPC_PROM_H */ diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h index 0156702ba24..576ad88104c 100644 --- a/arch/powerpc/include/asm/pte-book3e.h +++ b/arch/powerpc/include/asm/pte-book3e.h @@ -40,7 +40,7 @@ #define _PAGE_U1 0x010000 #define _PAGE_U0 0x020000 #define _PAGE_ACCESSED 0x040000 -#define _PAGE_LENDIAN 0x080000 +#define _PAGE_ENDIAN 0x080000 #define _PAGE_GUARDED 0x100000 #define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ #define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 10d1ef016bf..5c45787d551 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -115,7 +115,12 @@ #define MSR_64BIT MSR_SF /* Server variant */ -#define MSR_ (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) +#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) +#ifdef __BIG_ENDIAN__ +#define MSR_ __MSR +#else +#define MSR_ (__MSR | MSR_LE) +#endif #define MSR_KERNEL (MSR_ | MSR_64BIT) #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) #define MSR_USER64 (MSR_USER32 | MSR_64BIT) @@ -243,6 +248,7 @@ #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ +#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */ #define SPRN_SPURR 0x134 /* Scaled PURR */ #define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ #define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ @@ -283,6 +289,7 @@ #define LPCR_ISL (1ul << (63-2)) #define LPCR_VC_SH (63-2) #define LPCR_DPFD_SH (63-11) +#define LPCR_DPFD (7ul << LPCR_DPFD_SH) #define LPCR_VRMASD (0x1ful << (63-16)) #define LPCR_VRMA_L (1ul << (63-12)) #define LPCR_VRMA_LP0 (1ul << (63-15)) @@ -299,6 +306,7 @@ #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ #define LPCR_MER 0x00000800 /* Mediated External Exception */ #define LPCR_MER_SH 11 +#define LPCR_TC 0x00000200 /* Translation control */ #define LPCR_LPES 0x0000000c #define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ #define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ @@ -311,6 +319,10 @@ #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ #define SPRN_HMER 0x150 /* Hardware m? error recovery */ #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ +#define SPRN_PCR 0x152 /* Processor compatibility register */ +#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */ +#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */ +#define PCR_ARCH_205 0x2 /* Architecture 2.05 */ #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ #define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ @@ -420,6 +432,7 @@ #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ #define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ #define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ +#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH) #define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ #define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ #define HID4_LPID1_SH 0 /* partition ID top 2 bits */ @@ -1102,6 +1115,13 @@ #define PVR_BE 0x0070 #define PVR_PA6T 0x0090 +/* "Logical" PVR values defined in PAPR, representing architecture levels */ +#define PVR_ARCH_204 0x0f000001 +#define PVR_ARCH_205 0x0f000002 +#define PVR_ARCH_206 0x0f000003 +#define PVR_ARCH_206p 0x0f100003 +#define PVR_ARCH_207 0x0f000004 + /* Macros for setting and retrieving special purpose registers */ #ifndef __ASSEMBLY__ #define mfmsr() ({unsigned long rval; \ diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index ed8f836da09..2e31aacd8ac 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -381,7 +381,7 @@ #define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ -#define dbcr_iac_range(task) ((task)->thread.dbcr0) +#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0) #define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */ #define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */ #define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */ @@ -395,7 +395,7 @@ #define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */ #define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */ -#define dbcr_dac(task) ((task)->thread.dbcr1) +#define dbcr_dac(task) ((task)->thread.debug.dbcr1) #define DBCR_DAC1R DBCR1_DAC1R #define DBCR_DAC1W DBCR1_DAC1W #define DBCR_DAC2R DBCR1_DAC2R @@ -441,7 +441,7 @@ #define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */ #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ -#define dbcr_dac(task) ((task)->thread.dbcr0) +#define dbcr_dac(task) ((task)->thread.debug.dbcr0) #define DBCR_DAC1R DBCR0_DAC1R #define DBCR_DAC1W DBCR0_DAC1W #define DBCR_DAC2R DBCR0_DAC2R @@ -475,7 +475,7 @@ #define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */ #define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */ -#define dbcr_iac_range(task) ((task)->thread.dbcr1) +#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1) #define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */ #define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */ #define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */ diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h index 0cabfd7bc2d..f5cde45b116 100644 --- a/arch/powerpc/include/asm/scom.h +++ b/arch/powerpc/include/asm/scom.h @@ -54,8 +54,8 @@ struct scom_controller { scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count); void (*unmap)(scom_map_t map); - u64 (*read)(scom_map_t map, u32 reg); - void (*write)(scom_map_t map, u32 reg, u64 value); + int (*read)(scom_map_t map, u64 reg, u64 *value); + int (*write)(scom_map_t map, u64 reg, u64 value); }; extern const struct scom_controller *scom_controller; @@ -133,10 +133,18 @@ static inline void scom_unmap(scom_map_t map) * scom_read - Read a SCOM register * @map: Result of scom_map * @reg: Register index within that map + * @value: Updated with the value read + * + * Returns 0 (success) or a negative error code */ -static inline u64 scom_read(scom_map_t map, u32 reg) +static inline int scom_read(scom_map_t map, u64 reg, u64 *value) { - return scom_controller->read(map, reg); + int rc; + + rc = scom_controller->read(map, reg, value); + if (rc) + *value = 0xfffffffffffffffful; + return rc; } /** @@ -144,12 +152,15 @@ static inline u64 scom_read(scom_map_t map, u32 reg) * @map: Result of scom_map * @reg: Register index within that map * @value: Value to write + * + * Returns 0 (success) or a negative error code */ -static inline void scom_write(scom_map_t map, u32 reg, u64 value) +static inline int scom_write(scom_map_t map, u64 reg, u64 value) { - scom_controller->write(map, reg, value); + return scom_controller->write(map, reg, value); } + #endif /* CONFIG_PPC_SCOM */ #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index d3ca85529b8..703a8412dac 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -23,6 +23,10 @@ extern void reloc_got2(unsigned long); #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) +void check_for_initrd(void); +void do_init_bootmem(void); +void setup_panic(void); + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h index 3a7a67a0d00..d89beaba26f 100644 --- a/arch/powerpc/include/asm/sfp-machine.h +++ b/arch/powerpc/include/asm/sfp-machine.h @@ -125,7 +125,7 @@ #define FP_EX_DIVZERO (1 << (31 - 5)) #define FP_EX_INEXACT (1 << (31 - 6)) -#define __FPU_FPSCR (current->thread.fpscr.val) +#define __FPU_FPSCR (current->thread.fp_state.fpscr) /* We only actually write to the destination register * if exceptions signalled (if any) will not trap. diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h index 93f280e2327..37b7ca39ec9 100644 --- a/arch/powerpc/include/asm/spu.h +++ b/arch/powerpc/include/asm/spu.h @@ -235,6 +235,7 @@ extern long spu_sys_callback(struct spu_syscall_block *s); /* syscalls implemented in spufs */ struct file; +struct coredump_params; struct spufs_calls { long (*create_thread)(const char __user *name, unsigned int flags, umode_t mode, @@ -242,7 +243,7 @@ struct spufs_calls { long (*spu_run)(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus); int (*coredump_extra_notes_size)(void); - int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset); + int (*coredump_extra_notes_write)(struct coredump_params *cprm); void (*notify_spus_active)(void); struct module *owner; }; diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index e40010abcaf..0dffad6bcc8 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -10,7 +10,9 @@ #define __HAVE_ARCH_STRNCMP #define __HAVE_ARCH_STRCAT #define __HAVE_ARCH_MEMSET +#ifdef __BIG_ENDIAN__ #define __HAVE_ARCH_MEMCPY +#endif #define __HAVE_ARCH_MEMMOVE #define __HAVE_ARCH_MEMCMP #define __HAVE_ARCH_MEMCHR @@ -22,7 +24,9 @@ extern int strcmp(const char *,const char *); extern int strncmp(const char *, const char *, __kernel_size_t); extern char * strcat(char *, const char *); extern void * memset(void *,int,__kernel_size_t); +#ifdef __BIG_ENDIAN__ extern void * memcpy(void *,const void *,__kernel_size_t); +#endif extern void * memmove(void *,const void *,__kernel_size_t); extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 2be5618cdec..9ee12610af0 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -35,6 +35,7 @@ extern void giveup_vsx(struct task_struct *); extern void enable_kernel_spe(void); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); +extern void switch_booke_debug_regs(struct thread_struct *new_thread); #ifndef CONFIG_SMP extern void discard_lazy_cpu_state(void); diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index d0b6d4ac6dd..9a5c928bb3c 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -8,6 +8,8 @@ #include <linux/kernel.h> #include <asm/asm-compat.h> +#ifdef __BIG_ENDIAN__ + struct word_at_a_time { const unsigned long high_bits, low_bits; }; @@ -38,4 +40,80 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +#else + +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* Alan Modra's little-endian strlen tail for 64-bit */ +#define create_zero_mask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + unsigned long leading_zero_bits; + long trailing_zero_bit_mask; + + asm ("addi %1,%2,-1\n\t" + "andc %1,%1,%2\n\t" + "popcntd %0,%1" + : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) + : "r" (mask)); + return leading_zero_bits >> 3; +} + +#else /* 32-bit case */ + +/* + * This is largely generic for little-endian machines, but the + * optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +#endif + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h index c82eb12a5b1..0abb97f3be1 100644 --- a/arch/powerpc/include/asm/xor.h +++ b/arch/powerpc/include/asm/xor.h @@ -1 +1,68 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Author: Anton Blanchard <anton@au.ibm.com> + */ +#ifndef _ASM_POWERPC_XOR_H +#define _ASM_POWERPC_XOR_H + +#ifdef CONFIG_ALTIVEC + +#include <asm/cputable.h> + +void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in); +void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in); +void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in); +void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in, unsigned long *v5_in); + +static struct xor_block_template xor_block_altivec = { + .name = "altivec", + .do_2 = xor_altivec_2, + .do_3 = xor_altivec_3, + .do_4 = xor_altivec_4, + .do_5 = xor_altivec_5, +}; + +#define XOR_SPEED_ALTIVEC() \ + do { \ + if (cpu_has_feature(CPU_FTR_ALTIVEC)) \ + xor_speed(&xor_block_altivec); \ + } while (0) +#else +#define XOR_SPEED_ALTIVEC() +#endif + +/* Also try the generic routines. */ #include <asm-generic/xor.h> + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ +do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + XOR_SPEED_ALTIVEC(); \ +} while (0) + +#endif /* _ASM_POWERPC_XOR_H */ diff --git a/arch/powerpc/include/uapi/asm/byteorder.h b/arch/powerpc/include/uapi/asm/byteorder.h index aa6cc4fac96..ca931d07400 100644 --- a/arch/powerpc/include/uapi/asm/byteorder.h +++ b/arch/powerpc/include/uapi/asm/byteorder.h @@ -7,6 +7,10 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#ifdef __LITTLE_ENDIAN__ +#include <linux/byteorder/little_endian.h> +#else #include <linux/byteorder/big_endian.h> +#endif #endif /* _ASM_POWERPC_BYTEORDER_H */ diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 0fb1a6e9ff9..6836ec79a83 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -27,6 +27,7 @@ #define __KVM_HAVE_PPC_SMT #define __KVM_HAVE_IRQCHIP #define __KVM_HAVE_IRQ_LINE +#define __KVM_HAVE_GUEST_DEBUG struct kvm_regs { __u64 pc; @@ -269,7 +270,24 @@ struct kvm_fpu { __u64 fpr[32]; }; +/* + * Defines for h/w breakpoint, watchpoint (read, write or both) and + * software breakpoint. + * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status" + * for KVM_DEBUG_EXIT. + */ +#define KVMPPC_DEBUG_NONE 0x0 +#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1) +#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2) +#define KVMPPC_DEBUG_WATCH_READ (1UL << 3) struct kvm_debug_exit_arch { + __u64 address; + /* + * exiting to userspace because of h/w breakpoint, watchpoint + * (read, write or both) and software breakpoint. + */ + __u32 status; + __u32 reserved; }; /* for KVM_SET_GUEST_DEBUG */ @@ -281,10 +299,6 @@ struct kvm_guest_debug_arch { * Type denotes h/w breakpoint, read watchpoint, write * watchpoint or watchpoint (both read and write). */ -#define KVMPPC_DEBUG_NONE 0x0 -#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1) -#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2) -#define KVMPPC_DEBUG_WATCH_READ (1UL << 3) __u32 type; __u32 reserved; } bp[16]; @@ -429,6 +443,11 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10) #define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11) #define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12) +#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13) +#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14) +#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15) +#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16) +#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17) #define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18) #define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19) @@ -499,6 +518,65 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a) #define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b) +/* Timebase offset */ +#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c) + +/* POWER8 registers */ +#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d) +#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e) +#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f) +#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0) +#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1) +#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2) +#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3) +#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4) +#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5) +#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6) +#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7) +#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8) +#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9) +#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa) +#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab) +#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac) +#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad) +#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae) +#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf) +#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0) +#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) +#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) +#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) + +#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) +#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) +#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6) + +/* Architecture compatibility level */ +#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) + +/* Transactional Memory checkpointed state: + * This is all GPRs, all VSX regs and a subset of SPRs + */ +#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000) +/* TM GPRs */ +#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0) +#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n)) +#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f) +/* TM VSX */ +#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20) +#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n)) +#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f) +/* TM SPRS */ +#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60) +#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61) +#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62) +#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63) +#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64) +#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65) +#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66) +#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67) +#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68) +#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69) + /* PPC64 eXternal Interrupt Controller Specification */ #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index a6d74467c9e..fa698324a1f 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -83,4 +83,6 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index a27ccd5dc6b..de91f3ae631 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -54,8 +54,6 @@ struct aligninfo { /* DSISR bits reported for a DCBZ instruction: */ #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */ -#define SWAP(a, b) (t = (a), (a) = (b), (b) = t) - /* * The PowerPC stores certain bits of the instruction that caused the * alignment exception in the DSISR register. This array maps those @@ -256,11 +254,17 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr) * bottom 4 bytes of each register, and the loads clear the * top 4 bytes of the affected register. */ +#ifdef __BIG_ENDIAN__ #ifdef CONFIG_PPC64 #define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4) #else #define REG_BYTE(rp, i) *((u8 *)(rp) + (i)) #endif +#endif + +#ifdef __LITTLE_ENDIAN__ +#define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3)))) +#endif #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz)) @@ -305,6 +309,15 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr, nb0 = nb + reg * 4 - 128; nb = 128 - reg * 4; } +#ifdef __LITTLE_ENDIAN__ + /* + * String instructions are endian neutral but the code + * below is not. Force byte swapping on so that the + * effects of swizzling are undone in the load/store + * loops below. + */ + flags ^= SW; +#endif } else { /* lwm, stmw */ nb = (32 - reg) * 4; @@ -458,7 +471,7 @@ static struct aligninfo spe_aligninfo[32] = { static int emulate_spe(struct pt_regs *regs, unsigned int reg, unsigned int instr) { - int t, ret; + int ret; union { u64 ll; u32 w[2]; @@ -581,24 +594,18 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, if (flags & SW) { switch (flags & 0xf0) { case E8: - SWAP(data.v[0], data.v[7]); - SWAP(data.v[1], data.v[6]); - SWAP(data.v[2], data.v[5]); - SWAP(data.v[3], data.v[4]); + data.ll = swab64(data.ll); break; case E4: - - SWAP(data.v[0], data.v[3]); - SWAP(data.v[1], data.v[2]); - SWAP(data.v[4], data.v[7]); - SWAP(data.v[5], data.v[6]); + data.w[0] = swab32(data.w[0]); + data.w[1] = swab32(data.w[1]); break; /* Its half word endian */ default: - SWAP(data.v[0], data.v[1]); - SWAP(data.v[2], data.v[3]); - SWAP(data.v[4], data.v[5]); - SWAP(data.v[6], data.v[7]); + data.h[0] = swab16(data.h[0]); + data.h[1] = swab16(data.h[1]); + data.h[2] = swab16(data.h[2]); + data.h[3] = swab16(data.h[3]); break; } } @@ -658,14 +665,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, flush_vsx_to_thread(current); if (reg < 32) - ptr = (char *) ¤t->thread.TS_FPR(reg); + ptr = (char *) ¤t->thread.fp_state.fpr[reg][0]; else - ptr = (char *) ¤t->thread.vr[reg - 32]; + ptr = (char *) ¤t->thread.vr_state.vr[reg - 32]; lptr = (unsigned long *) ptr; +#ifdef __LITTLE_ENDIAN__ + if (flags & SW) { + elsize = length; + sw = length-1; + } else { + /* + * The elements are BE ordered, even in LE mode, so process + * them in reverse order. + */ + addr += length - elsize; + + /* 8 byte memory accesses go in the top 8 bytes of the VR */ + if (length == 8) + ptr += 8; + } +#else if (flags & SW) sw = elsize-1; +#endif for (j = 0; j < length; j += elsize) { for (i = 0; i < elsize; ++i) { @@ -675,19 +699,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, ret |= __get_user(ptr[i^sw], addr + i); } ptr += elsize; +#ifdef __LITTLE_ENDIAN__ + addr -= elsize; +#else addr += elsize; +#endif } +#ifdef __BIG_ENDIAN__ +#define VSX_HI 0 +#define VSX_LO 1 +#else +#define VSX_HI 1 +#define VSX_LO 0 +#endif + if (!ret) { if (flags & U) regs->gpr[areg] = regs->dar; /* Splat load copies the same data to top and bottom 8 bytes */ if (flags & SPLT) - lptr[1] = lptr[0]; - /* For 8 byte loads, zero the top 8 bytes */ + lptr[VSX_LO] = lptr[VSX_HI]; + /* For 8 byte loads, zero the low 8 bytes */ else if (!(flags & ST) && (8 == length)) - lptr[1] = 0; + lptr[VSX_LO] = 0; } else return -EFAULT; @@ -710,18 +746,28 @@ int fix_alignment(struct pt_regs *regs) unsigned int dsisr; unsigned char __user *addr; unsigned long p, swiz; - int ret, t; - union { + int ret, i; + union data { u64 ll; double dd; unsigned char v[8]; struct { +#ifdef __LITTLE_ENDIAN__ + int low32; + unsigned hi32; +#else unsigned hi32; int low32; +#endif } x32; struct { +#ifdef __LITTLE_ENDIAN__ + short low16; + unsigned char hi48[6]; +#else unsigned char hi48[6]; short low16; +#endif } x16; } data; @@ -780,8 +826,9 @@ int fix_alignment(struct pt_regs *regs) /* Byteswap little endian loads and stores */ swiz = 0; - if (regs->msr & MSR_LE) { + if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) { flags ^= SW; +#ifdef __BIG_ENDIAN__ /* * So-called "PowerPC little endian" mode works by * swizzling addresses rather than by actually doing @@ -794,6 +841,7 @@ int fix_alignment(struct pt_regs *regs) */ if (cpu_has_feature(CPU_FTR_PPC_LE)) swiz = 7; +#endif } /* DAR has the operand effective address */ @@ -818,7 +866,7 @@ int fix_alignment(struct pt_regs *regs) elsize = 8; flags = 0; - if (regs->msr & MSR_LE) + if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) flags |= SW; if (instruction & 0x100) flags |= ST; @@ -878,32 +926,36 @@ int fix_alignment(struct pt_regs *regs) * get it from register values */ if (!(flags & ST)) { - data.ll = 0; - ret = 0; - p = (unsigned long) addr; + unsigned int start = 0; + switch (nb) { - case 8: - ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++)); case 4: - ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++)); + start = offsetof(union data, x32.low32); + break; case 2: - ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++)); - ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++)); - if (unlikely(ret)) - return -EFAULT; + start = offsetof(union data, x16.low16); + break; } + + data.ll = 0; + ret = 0; + p = (unsigned long)addr; + + for (i = 0; i < nb; i++) + ret |= __get_user_inatomic(data.v[start + i], + SWIZ_PTR(p++)); + + if (unlikely(ret)) + return -EFAULT; + } else if (flags & F) { - data.dd = current->thread.TS_FPR(reg); + data.ll = current->thread.TS_FPR(reg); if (flags & S) { /* Single-precision FP store requires conversion... */ #ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); - cvt_df(&data.dd, (float *)&data.v[4]); + cvt_df(&data.dd, (float *)&data.x32.low32); preempt_enable(); #else return 0; @@ -915,17 +967,13 @@ int fix_alignment(struct pt_regs *regs) if (flags & SW) { switch (nb) { case 8: - SWAP(data.v[0], data.v[7]); - SWAP(data.v[1], data.v[6]); - SWAP(data.v[2], data.v[5]); - SWAP(data.v[3], data.v[4]); + data.ll = swab64(data.ll); break; case 4: - SWAP(data.v[4], data.v[7]); - SWAP(data.v[5], data.v[6]); + data.x32.low32 = swab32(data.x32.low32); break; case 2: - SWAP(data.v[6], data.v[7]); + data.x16.low16 = swab16(data.x16.low16); break; } } @@ -947,7 +995,7 @@ int fix_alignment(struct pt_regs *regs) #ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); - cvt_fd((float *)&data.v[4], &data.dd); + cvt_fd((float *)&data.x32.low32, &data.dd); preempt_enable(); #else return 0; @@ -957,25 +1005,28 @@ int fix_alignment(struct pt_regs *regs) /* Store result to memory or update registers */ if (flags & ST) { - ret = 0; - p = (unsigned long) addr; + unsigned int start = 0; + switch (nb) { - case 8: - ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++)); case 4: - ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++)); + start = offsetof(union data, x32.low32); + break; case 2: - ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++)); - ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++)); + start = offsetof(union data, x16.low16); + break; } + + ret = 0; + p = (unsigned long)addr; + + for (i = 0; i < nb; i++) + ret |= __put_user_inatomic(data.v[start + i], + SWIZ_PTR(p++)); + if (unlikely(ret)) return -EFAULT; } else if (flags & F) - current->thread.TS_FPR(reg) = data.dd; + current->thread.TS_FPR(reg) = data.ll; else regs->gpr[reg] = data.ll; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 502c7a4e73f..2ea5cc033ec 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -90,16 +90,17 @@ int main(void) DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); #endif DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); - DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); - DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); + DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state)); + DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area)); + DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr)); #ifdef CONFIG_ALTIVEC - DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0])); + DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state)); + DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area)); DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); - DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr)); DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); + DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr)); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX - DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr)); DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); #endif /* CONFIG_VSX */ #ifdef CONFIG_PPC64 @@ -114,7 +115,7 @@ int main(void) #endif /* CONFIG_SPE */ #endif /* CONFIG_PPC64 */ #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0)); + DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0)); #endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); @@ -143,20 +144,12 @@ int main(void) DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); - DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, - transact_vr[0])); - DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct, - transact_vscr)); + DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct, + transact_vr)); DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct, transact_vrsave)); - DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct, - transact_fpr[0])); - DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct, - transact_fpscr)); -#ifdef CONFIG_VSX - DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct, - transact_fpr[0])); -#endif + DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct, + transact_fp)); /* Local pt_regs on stack for Transactional Memory funcs. */ DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); @@ -446,7 +439,7 @@ int main(void) DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); @@ -477,7 +470,7 @@ int main(void) DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); /* book3s */ -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); @@ -509,6 +502,8 @@ int main(void) DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); + DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); + DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); @@ -518,18 +513,22 @@ int main(void) DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); + DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); + DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); - DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - - offsetof(struct kvmppc_vcpu_book3s, vcpu)); + DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); + DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); + DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); #ifdef CONFIG_PPC_BOOK3S_64 -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE + DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) #else # define SVCPU_FIELD(x, f) @@ -581,7 +580,7 @@ int main(void) HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); HSTATE_FIELD(HSTATE_NAPPING, napping); -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req); HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state); HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); @@ -597,10 +596,11 @@ int main(void) HSTATE_FIELD(HSTATE_DABR, dabr); HSTATE_FIELD(HSTATE_DECEXP, dec_expires); DEFINE(IPI_PRIORITY, IPI_PRIORITY); -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_PPC_BOOK3S_64 HSTATE_FIELD(HSTATE_CFAR, cfar); + HSTATE_FIELD(HSTATE_PPR, ppr); #endif /* CONFIG_PPC_BOOK3S_64 */ #else /* CONFIG_PPC_BOOK3S */ diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 55593ee2d5a..67130206534 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -189,14 +189,13 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len) } /* If PCI-E capable, dump PCI-E cap 10, and the AER */ - cap = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (cap) { + if (pci_is_pcie(dev)) { n += scnprintf(buf+n, len-n, "pci-e cap10:\n"); printk(KERN_WARNING "EEH: PCI-E capabilities and status follow:\n"); for (i=0; i<=8; i++) { - eeh_ops->read_config(dn, cap+4*i, 4, &cfg); + eeh_ops->read_config(dn, dev->pcie_cap+4*i, 4, &cfg); n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg); } @@ -327,11 +326,11 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) /* Isolate the PHB and send event */ eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); eeh_serialize_unlock(flags); - eeh_send_failure_event(phb_pe); pr_err("EEH: PHB#%x failure detected\n", phb_pe->phb->global_number); dump_stack(); + eeh_send_failure_event(phb_pe); return 1; out: @@ -454,8 +453,6 @@ int eeh_dev_check_failure(struct eeh_dev *edev) eeh_pe_state_mark(pe, EEH_PE_ISOLATED); eeh_serialize_unlock(flags); - eeh_send_failure_event(pe); - /* Most EEH events are due to device driver bugs. Having * a stack trace will help the device-driver authors figure * out what happened. So print that out. @@ -464,6 +461,8 @@ int eeh_dev_check_failure(struct eeh_dev *edev) pe->addr, pe->phb->global_number); dump_stack(); + eeh_send_failure_event(pe); + return 1; dn_unlock: diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index c04cdf70d48..bbfb0294b35 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -673,9 +673,7 @@ _GLOBAL(ret_from_except_lite) resume_kernel: /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ - CURRENT_THREAD_INFO(r9, r1) - ld r8,TI_FLAGS(r9) - andis. r8,r8,_TIF_EMULATE_STACK_STORE@h + andis. r8,r4,_TIF_EMULATE_STACK_STORE@h beq+ 1f addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ @@ -820,6 +818,12 @@ fast_exception_return: andi. r0,r3,MSR_RI beq- unrecov_restore + /* Load PPR from thread struct before we clear MSR:RI */ +BEGIN_FTR_SECTION + ld r2,PACACURRENT(r13) + ld r2,TASKTHREADPPR(r2) +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + /* * Clear RI before restoring r13. If we are returning to * userspace and we take an exception after restoring r13, @@ -840,8 +844,10 @@ fast_exception_return: */ andi. r0,r3,MSR_PR beq 1f +BEGIN_FTR_SECTION + mtspr SPRN_PPR,r2 /* Restore PPR */ +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ACCOUNT_CPU_USER_EXIT(r2, r4) - RESTORE_PPR(r2, r4) REST_GPR(13, r1) 1: mtspr SPRN_SRR1,r3 @@ -1017,7 +1023,7 @@ _GLOBAL(enter_rtas) li r9,1 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) - ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI + ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE andc r6,r0,r9 sync /* disable interrupts so SRR0/1 */ mtmsrd r0 /* don't get trashed */ @@ -1032,6 +1038,8 @@ _GLOBAL(enter_rtas) b . /* prevent speculative execution */ _STATIC(rtas_return_loc) + FIXUP_ENDIAN + /* relocation is off at this point */ GET_PACA(r4) clrldi r4,r4,2 /* convert to realmode address */ @@ -1103,28 +1111,30 @@ _GLOBAL(enter_prom) std r10,_CCR(r1) std r11,_MSR(r1) - /* Get the PROM entrypoint */ - mtlr r4 + /* Put PROM address in SRR0 */ + mtsrr0 r4 - /* Switch MSR to 32 bits mode + /* Setup our trampoline return addr in LR */ + bcl 20,31,$+4 +0: mflr r4 + addi r4,r4,(1f - 0b) + mtlr r4 + + /* Prepare a 32-bit mode big endian MSR */ #ifdef CONFIG_PPC_BOOK3E rlwinm r11,r11,0,1,31 - mtmsr r11 + mtsrr1 r11 + rfi #else /* CONFIG_PPC_BOOK3E */ - mfmsr r11 - li r12,1 - rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) - andc r11,r11,r12 - li r12,1 - rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) - andc r11,r11,r12 - mtmsrd r11 + LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) + andc r11,r11,r12 + mtsrr1 r11 + rfid #endif /* CONFIG_PPC_BOOK3E */ - isync - /* Enter PROM here... */ - blrl +1: /* Return from OF */ + FIXUP_ENDIAN /* Just make sure that r1 top 32 bits didn't get * corrupt by OF diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index 6300c13bbde..7898be90f2d 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -18,6 +18,7 @@ */ #include <linux/of.h> +#include <linux/of_fdt.h> #include <asm/epapr_hcalls.h> #include <asm/cacheflush.h> #include <asm/code-patching.h> diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 2d067049db2..e7751561fd1 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -399,7 +399,7 @@ interrupt_end_book3e: /* Altivec Unavailable Interrupt */ START_EXCEPTION(altivec_unavailable); - NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, + NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL, PROLOG_ADDITION_NONE) /* we can probably do a shorter exception entry for that one... */ EXCEPTION_COMMON(0x200, PACA_EXGEN, INTS_KEEP) @@ -421,7 +421,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* AltiVec Assist */ START_EXCEPTION(altivec_assist); - NORMAL_EXCEPTION_PROLOG(0x220, BOOKE_INTERRUPT_ALTIVEC_ASSIST, + NORMAL_EXCEPTION_PROLOG(0x220, + BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x220, PACA_EXGEN, INTS_DISABLE) bl .save_nvgprs @@ -607,6 +608,7 @@ kernel_dbg_exc: NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) + CHECK_NAPPING() addi r3,r1,STACK_FRAME_OVERHEAD bl .performance_monitor_exception b .ret_from_except_lite diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3a9ed6ac224..9f905e40922 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -126,7 +126,7 @@ BEGIN_FTR_SECTION bgt cr1,. GET_PACA(r13) -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE li r0,KVM_HWTHREAD_IN_KERNEL stb r0,HSTATE_HWTHREAD_STATE(r13) /* Order setting hwthread_state vs. testing hwthread_req */ @@ -425,7 +425,7 @@ data_access_check_stab: mfspr r9,SPRN_DSISR srdi r10,r10,60 rlwimi r10,r9,16,0x20 -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE lbz r9,HSTATE_IN_GUEST(r13) rlwimi r10,r9,8,0x300 #endif @@ -650,6 +650,32 @@ slb_miss_user_pseries: b . /* prevent spec. execution */ #endif /* __DISABLED__ */ +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER +kvmppc_skip_interrupt: + /* + * Here all GPRs are unchanged from when the interrupt happened + * except for r13, which is saved in SPRG_SCRATCH0. + */ + mfspr r13, SPRN_SRR0 + addi r13, r13, 4 + mtspr SPRN_SRR0, r13 + GET_SCRATCH0(r13) + rfid + b . + +kvmppc_skip_Hinterrupt: + /* + * Here all GPRs are unchanged from when the interrupt happened + * except for r13, which is saved in SPRG_SCRATCH0. + */ + mfspr r13, SPRN_HSRR0 + addi r13, r13, 4 + mtspr SPRN_HSRR0, r13 + GET_SCRATCH0(r13) + hrfid + b . +#endif + /* * Code from here down to __end_handlers is invoked from the * exception prologs above. Because the prologs assemble the diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index caeaabf11a2..f7f5b8bed68 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -35,15 +35,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 2: REST_32VSRS(n,c,base); \ 3: -#define __REST_32FPVSRS_TRANSACT(n,c,base) \ -BEGIN_FTR_SECTION \ - b 2f; \ -END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ - REST_32FPRS_TRANSACT(n,base); \ - b 3f; \ -2: REST_32VSRS_TRANSACT(n,c,base); \ -3: - #define __SAVE_32FPVSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ @@ -54,40 +45,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 3: #else #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base) -#define __REST_32FPVSRS_TRANSACT(n,b,base) REST_32FPRS(n, base) #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base) #endif #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base) -#define REST_32FPVSRS_TRANSACT(n,c,base) \ - __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base) #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -/* - * Wrapper to call load_up_fpu from C. - * void do_load_up_fpu(struct pt_regs *regs); - */ -_GLOBAL(do_load_up_fpu) - mflr r0 - std r0, 16(r1) - stdu r1, -112(r1) - - subi r6, r3, STACK_FRAME_OVERHEAD - /* load_up_fpu expects r12=MSR, r13=PACA, and returns - * with r12 = new MSR. - */ - ld r12,_MSR(r6) - GET_PACA(r13) - - bl load_up_fpu - std r12,_MSR(r6) - - ld r0, 112+16(r1) - addi r1, r1, 112 - mtlr r0 - blr - - /* void do_load_up_transact_fpu(struct thread_struct *thread) * * This is similar to load_up_fpu but for the transactional version of the FP @@ -105,9 +68,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) SYNC MTMSRD(r5) - lfd fr0,THREAD_TRANSACT_FPSCR(r3) + addi r7,r3,THREAD_TRANSACT_FPSTATE + lfd fr0,FPSTATE_FPSCR(r7) MTFSF_L(fr0) - REST_32FPVSRS_TRANSACT(0, R4, R3) + REST_32FPVSRS(0, R4, R7) /* FP/VSX off again */ MTMSRD(r6) @@ -117,11 +81,33 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* + * Load state from memory into FP registers including FPSCR. + * Assumes the caller has enabled FP in the MSR. + */ +_GLOBAL(load_fp_state) + lfd fr0,FPSTATE_FPSCR(r3) + MTFSF_L(fr0) + REST_32FPVSRS(0, R4, R3) + blr + +/* + * Store FP state into memory, including FPSCR + * Assumes the caller has enabled FP in the MSR. + */ +_GLOBAL(store_fp_state) + SAVE_32FPVSRS(0, R4, R3) + mffs fr0 + stfd fr0,FPSTATE_FPSCR(r3) + blr + +/* * This task wants to use the FPU now. * On UP, disable FP for the task which had the FPU previously, * and save its floating-point registers in its thread_struct. * Load up this task's FP registers from its thread_struct, * enable the FPU for the current task and return to the task. + * Note that on 32-bit this can only use registers that will be + * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. */ _GLOBAL(load_up_fpu) mfmsr r5 @@ -147,9 +133,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) beq 1f toreal(r4) addi r4,r4,THREAD /* want last_task_used_math->thread */ - SAVE_32FPVSRS(0, R5, R4) + addi r10,r4,THREAD_FPSTATE + SAVE_32FPVSRS(0, R5, R10) mffs fr0 - stfd fr0,THREAD_FPSCR(r4) + stfd fr0,FPSTATE_FPSCR(r10) PPC_LL r5,PT_REGS(r4) toreal(r5) PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) @@ -160,7 +147,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif /* CONFIG_SMP */ /* enable use of FP after return */ #ifdef CONFIG_PPC32 - mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ + mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ lwz r4,THREAD_FPEXC_MODE(r5) ori r9,r9,MSR_FP /* enable FP for current */ or r9,r9,r4 @@ -172,9 +159,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) or r12,r12,r4 std r12,_MSR(r1) #endif - lfd fr0,THREAD_FPSCR(r5) + addi r10,r5,THREAD_FPSTATE + lfd fr0,FPSTATE_FPSCR(r10) MTFSF_L(fr0) - REST_32FPVSRS(0, R4, R5) + REST_32FPVSRS(0, R4, R10) #ifndef CONFIG_SMP subi r4,r5,THREAD fromreal(r4) @@ -206,11 +194,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) PPC_LCMPI 0,r3,0 beqlr- /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ + PPC_LL r6,THREAD_FPSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) - PPC_LCMPI 0,r5,0 - SAVE_32FPVSRS(0, R4 ,R3) + PPC_LCMPI 0,r6,0 + bne 2f + addi r6,r3,THREAD_FPSTATE +2: PPC_LCMPI 0,r5,0 + SAVE_32FPVSRS(0, R4, R6) mffs fr0 - stfd fr0,THREAD_FPSCR(r3) + stfd fr0,FPSTATE_FPSCR(r6) beq 1f PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) li r3,MSR_FP|MSR_FE0|MSR_FE1 diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 1fb78561096..9b27b293a92 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -174,7 +174,11 @@ __ftrace_make_nop(struct module *mod, pr_devel(" %08x %08x\n", jmp[0], jmp[1]); +#ifdef __LITTLE_ENDIAN__ + ptr = ((unsigned long)jmp[1] << 32) + jmp[0]; +#else ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; +#endif /* This should match what was called */ if (ptr != ppc_function_entry((void *)addr)) { diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 3d11d8038de..2ae41aba405 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -68,6 +68,7 @@ _stext: _GLOBAL(__start) /* NOP this out unconditionally */ BEGIN_FTR_SECTION + FIXUP_ENDIAN b .__start_initialization_multiplatform END_FTR_SECTION(0, 1) @@ -115,6 +116,7 @@ __run_at_load: */ .globl __secondary_hold __secondary_hold: + FIXUP_ENDIAN #ifndef CONFIG_PPC_BOOK3E mfmsr r24 ori r24,r24,MSR_RI @@ -205,6 +207,7 @@ _GLOBAL(generic_secondary_thread_init) * as SCOM before entry). */ _GLOBAL(generic_secondary_smp_init) + FIXUP_ENDIAN mr r24,r3 mr r25,r4 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 1b92a97b1b0..7ee876d2adb 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -858,6 +858,9 @@ initial_mmu: addis r11, r11, 0x0080 /* Add 8M */ mtspr SPRN_MD_RPN, r11 + addi r10, r10, 0x0100 + mtspr SPRN_MD_CTR, r10 + addis r8, r8, 0x0080 /* Add 8M */ mtspr SPRN_MD_EPN, r8 mtspr SPRN_MD_TWC, r9 diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 289afaffbbb..f45726a1d96 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -555,27 +555,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) #ifdef CONFIG_SPE /* SPE Unavailable */ START_EXCEPTION(SPEUnavailable) - NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL) + NORMAL_EXCEPTION_PROLOG(SPE_ALTIVEC_UNAVAIL) beq 1f bl load_up_spe b fast_exception_return 1: addi r3,r1,STACK_FRAME_OVERHEAD EXC_XFER_EE_LITE(0x2010, KernelSPE) #else - EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \ + EXCEPTION(0x2020, SPE_ALTIVEC_UNAVAIL, SPEUnavailable, \ unknown_exception, EXC_XFER_EE) #endif /* CONFIG_SPE */ /* SPE Floating Point Data */ #ifdef CONFIG_SPE - EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \ - SPEFloatingPointException, EXC_XFER_EE); + EXCEPTION(0x2030, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData, + SPEFloatingPointException, EXC_XFER_EE) /* SPE Floating Point Round */ EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ SPEFloatingPointRoundException, EXC_XFER_EE) #else - EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \ + EXCEPTION(0x2040, SPE_FP_DATA_ALTIVEC_ASSIST, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ unknown_exception, EXC_XFER_EE) diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index e11863f4e59..847e40e62fc 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -84,7 +84,7 @@ _GLOBAL(power7_nap) std r9,_MSR(r1) std r1,PACAR1(r13) -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* Tell KVM we're napping */ li r4,KVM_HWTHREAD_IN_NAP stb r4,HSTATE_HWTHREAD_STATE(r13) diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index c1eef241017..83e89d31073 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -151,15 +151,16 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) return 1; } +static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info); static int kgdb_singlestep(struct pt_regs *regs) { struct thread_info *thread_info, *exception_thread_info; - struct thread_info *backup_current_thread_info; + struct thread_info *backup_current_thread_info = + &__get_cpu_var(kgdb_thread_info); if (user_mode(regs)) return 0; - backup_current_thread_info = kmalloc(sizeof(struct thread_info), GFP_KERNEL); /* * On Book E and perhaps other processors, singlestep is handled on * the critical exception stack. This causes current_thread_info() @@ -185,7 +186,6 @@ static int kgdb_singlestep(struct pt_regs *regs) /* Restore current_thread_info lastly. */ memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); - kfree(backup_current_thread_info); return 1; } diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 2156ea90eb5..90fab64d911 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -429,7 +429,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accouting + * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 22e88dd2f34..40bd7bd4e19 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -35,7 +35,7 @@ static struct legacy_serial_info { phys_addr_t taddr; } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; -static struct __initdata of_device_id legacy_serial_parents[] = { +static struct of_device_id legacy_serial_parents[] __initdata = { {.type = "soc",}, {.type = "tsi-bridge",}, {.type = "opb", }, diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 611acdf3009..be4e6d648f6 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -312,7 +312,7 @@ static union thread_union kexec_stack __init_task_data = */ struct paca_struct kexec_paca; -/* Our assembly helper, in kexec_stub.S */ +/* Our assembly helper, in misc_64.S */ extern void kexec_sequence(void *newstack, unsigned long start, void *image, void *control, void (*clear_all)(void)) __noreturn; diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 2b0ad984536..e47d268727a 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -659,6 +659,20 @@ _GLOBAL(__lshrdi3) blr /* + * 64-bit comparison: __cmpdi2(s64 a, s64 b) + * Returns 0 if a < b, 1 if a == b, 2 if a > b. + */ +_GLOBAL(__cmpdi2) + cmpw r3,r5 + li r3,1 + bne 1f + cmplw r4,r6 + beqlr +1: li r3,0 + bltlr + li r3,2 + blr +/* * 64-bit comparison: __ucmpdi2(u64 a, u64 b) * Returns 0 if a < b, 1 if a == b, 2 if a > b. */ diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 2d275707f41..9547381b631 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -25,8 +25,7 @@ #include <asm/uaccess.h> #include <asm/firmware.h> #include <linux/sort.h> - -#include "setup.h" +#include <asm/setup.h> LIST_HEAD(module_bug_list); diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index 2e3200ca485..6cff040bf45 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -26,8 +26,7 @@ #include <linux/cache.h> #include <linux/bug.h> #include <linux/sort.h> - -#include "setup.h" +#include <asm/setup.h> #if 0 #define DEBUGP printk diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 6ee59a0eb26..12664c130d7 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -26,8 +26,7 @@ #include <asm/firmware.h> #include <asm/code-patching.h> #include <linux/sort.h> - -#include "setup.h" +#include <asm/setup.h> /* FIXME: We don't do .init separately. To do this, we'd need to have a separate r2 value in the init and core section, and stub between @@ -62,6 +61,16 @@ struct ppc64_stub_entry r2) into the stub. */ static struct ppc64_stub_entry ppc64_stub = { .jump = { +#ifdef __LITTLE_ENDIAN__ + 0x00, 0x00, 0x82, 0x3d, /* addis r12,r2, <high> */ + 0x00, 0x00, 0x8c, 0x39, /* addi r12,r12, <low> */ + /* Save current r2 value in magic place on the stack. */ + 0x28, 0x00, 0x41, 0xf8, /* std r2,40(r1) */ + 0x20, 0x00, 0x6c, 0xe9, /* ld r11,32(r12) */ + 0x28, 0x00, 0x4c, 0xe8, /* ld r2,40(r12) */ + 0xa6, 0x03, 0x69, 0x7d, /* mtctr r11 */ + 0x20, 0x04, 0x80, 0x4e /* bctr */ +#else 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */ 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */ /* Save current r2 value in magic place on the stack. */ @@ -70,6 +79,7 @@ static struct ppc64_stub_entry ppc64_stub = 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */ 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */ 0x4e, 0x80, 0x04, 0x20 /* bctr */ +#endif } }; /* Count how many different 24-bit relocations (different symbol, @@ -269,8 +279,13 @@ static inline int create_stub(Elf64_Shdr *sechdrs, *entry = ppc64_stub; +#ifdef __LITTLE_ENDIAN__ + loc1 = (Elf64_Half *)&entry->jump[0]; + loc2 = (Elf64_Half *)&entry->jump[4]; +#else loc1 = (Elf64_Half *)&entry->jump[2]; loc2 = (Elf64_Half *)&entry->jump[6]; +#endif /* Stub uses address relative to r2. */ reladdr = (unsigned long)entry - my_r2(sechdrs, me); diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 8213ee1eb05..fd82c289ab1 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -223,9 +223,13 @@ static int __init nvram_write_header(struct nvram_partition * part) { loff_t tmp_index; int rc; - + struct nvram_header phead; + + memcpy(&phead, &part->header, NVRAM_HEADER_LEN); + phead.length = cpu_to_be16(phead.length); + tmp_index = part->index; - rc = ppc_md.nvram_write((char *)&part->header, NVRAM_HEADER_LEN, &tmp_index); + rc = ppc_md.nvram_write((char *)&phead, NVRAM_HEADER_LEN, &tmp_index); return rc; } @@ -505,6 +509,8 @@ int __init nvram_scan_partitions(void) memcpy(&phead, header, NVRAM_HEADER_LEN); + phead.length = be16_to_cpu(phead.length); + err = 0; c_sum = nvram_checksum(&phead); if (c_sum != phead.checksum) { diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 3fc16e3beb9..0620eaaaad4 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -46,7 +46,7 @@ struct lppaca lppaca[] = { static struct lppaca *extra_lppacas; static long __initdata lppaca_size; -static void allocate_lppacas(int nr_cpus, unsigned long limit) +static void __init allocate_lppacas(int nr_cpus, unsigned long limit) { if (nr_cpus <= NR_LPPACAS) return; @@ -57,7 +57,7 @@ static void allocate_lppacas(int nr_cpus, unsigned long limit) PAGE_SIZE, limit)); } -static struct lppaca *new_lppaca(int cpu) +static struct lppaca * __init new_lppaca(int cpu) { struct lppaca *lp; @@ -70,7 +70,7 @@ static struct lppaca *new_lppaca(int cpu) return lp; } -static void free_lppacas(void) +static void __init free_lppacas(void) { long new_size = 0, nr; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 905a24bb7ac..a1e3e40ca3f 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -228,7 +228,7 @@ int pcibios_add_platform_entries(struct pci_dev *pdev) */ static int pci_read_irq_line(struct pci_dev *pci_dev) { - struct of_irq oirq; + struct of_phandle_args oirq; unsigned int virq; pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); @@ -237,7 +237,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev) memset(&oirq, 0xff, sizeof(oirq)); #endif /* Try to get a mapping from the device-tree */ - if (of_irq_map_pci(pci_dev, &oirq)) { + if (of_irq_parse_pci(pci_dev, &oirq)) { u8 line, pin; /* If that fails, lets fallback to what is in the config @@ -263,11 +263,10 @@ static int pci_read_irq_line(struct pci_dev *pci_dev) irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", - oirq.size, oirq.specifier[0], oirq.specifier[1], - of_node_full_name(oirq.controller)); + oirq.args_count, oirq.args[0], oirq.args[1], + of_node_full_name(oirq.np)); - virq = irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); + virq = irq_create_of_mapping(&oirq); } if(virq == NO_IRQ) { pr_debug(" Failed to map !\n"); diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 4368ec6fdc8..ac0b034f9ae 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -302,7 +302,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus, struct device_node *dn) { struct pci_dev *dev = NULL; - const u32 *reg; + const __be32 *reg; int reglen, devfn; pr_debug(" * %s\n", dn->full_name); @@ -312,7 +312,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus, reg = of_get_property(dn, "reg", ®len); if (reg == NULL || reglen < 20) return NULL; - devfn = (reg[0] >> 8) & 0xff; + devfn = (of_read_number(reg, 1) >> 8) & 0xff; /* Check if the PCI device is already there */ dev = pci_get_slot(bus, devfn); diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 21646dbe1bb..3bd77edd761 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -79,10 +79,12 @@ EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); +#ifndef CONFIG_GENERIC_CSUM EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(ip_fast_csum); EXPORT_SYMBOL(csum_tcpudp_magic); +#endif EXPORT_SYMBOL(__copy_tofrom_user); EXPORT_SYMBOL(__clear_user); @@ -98,9 +100,13 @@ EXPORT_SYMBOL(start_thread); #ifdef CONFIG_PPC_FPU EXPORT_SYMBOL(giveup_fpu); +EXPORT_SYMBOL(load_fp_state); +EXPORT_SYMBOL(store_fp_state); #endif #ifdef CONFIG_ALTIVEC EXPORT_SYMBOL(giveup_altivec); +EXPORT_SYMBOL(load_vr_state); +EXPORT_SYMBOL(store_vr_state); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX EXPORT_SYMBOL(giveup_vsx); @@ -143,10 +149,14 @@ EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__lshrdi3); int __ucmpdi2(unsigned long long, unsigned long long); EXPORT_SYMBOL(__ucmpdi2); +int __cmpdi2(long long, long long); +EXPORT_SYMBOL(__cmpdi2); #endif long long __bswapdi2(long long); EXPORT_SYMBOL(__bswapdi2); +#ifdef __BIG_ENDIAN__ EXPORT_SYMBOL(memcpy); +#endif EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memcmp); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 96d2fdf3aa9..75c2d100998 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -314,28 +314,28 @@ static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); */ static void set_debug_reg_defaults(struct thread_struct *thread) { - thread->iac1 = thread->iac2 = 0; + thread->debug.iac1 = thread->debug.iac2 = 0; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 - thread->iac3 = thread->iac4 = 0; + thread->debug.iac3 = thread->debug.iac4 = 0; #endif - thread->dac1 = thread->dac2 = 0; + thread->debug.dac1 = thread->debug.dac2 = 0; #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 - thread->dvc1 = thread->dvc2 = 0; + thread->debug.dvc1 = thread->debug.dvc2 = 0; #endif - thread->dbcr0 = 0; + thread->debug.dbcr0 = 0; #ifdef CONFIG_BOOKE /* * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) */ - thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \ + thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US | DBCR1_IAC4US; /* * Force Data Address Compare User/Supervisor bits to be User-only * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. */ - thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; + thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; #else - thread->dbcr1 = 0; + thread->debug.dbcr1 = 0; #endif } @@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) */ mtmsr(mfmsr() & ~MSR_DE); - mtspr(SPRN_IAC1, thread->iac1); - mtspr(SPRN_IAC2, thread->iac2); + mtspr(SPRN_IAC1, thread->debug.iac1); + mtspr(SPRN_IAC2, thread->debug.iac2); #if CONFIG_PPC_ADV_DEBUG_IACS > 2 - mtspr(SPRN_IAC3, thread->iac3); - mtspr(SPRN_IAC4, thread->iac4); + mtspr(SPRN_IAC3, thread->debug.iac3); + mtspr(SPRN_IAC4, thread->debug.iac4); #endif - mtspr(SPRN_DAC1, thread->dac1); - mtspr(SPRN_DAC2, thread->dac2); + mtspr(SPRN_DAC1, thread->debug.dac1); + mtspr(SPRN_DAC2, thread->debug.dac2); #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 - mtspr(SPRN_DVC1, thread->dvc1); - mtspr(SPRN_DVC2, thread->dvc2); + mtspr(SPRN_DVC1, thread->debug.dvc1); + mtspr(SPRN_DVC2, thread->debug.dvc2); #endif - mtspr(SPRN_DBCR0, thread->dbcr0); - mtspr(SPRN_DBCR1, thread->dbcr1); + mtspr(SPRN_DBCR0, thread->debug.dbcr0); + mtspr(SPRN_DBCR1, thread->debug.dbcr1); #ifdef CONFIG_BOOKE - mtspr(SPRN_DBCR2, thread->dbcr2); + mtspr(SPRN_DBCR2, thread->debug.dbcr2); #endif } /* @@ -371,12 +371,13 @@ static void prime_debug_regs(struct thread_struct *thread) * debug registers, set the debug registers from the values * stored in the new thread. */ -static void switch_booke_debug_regs(struct thread_struct *new_thread) +void switch_booke_debug_regs(struct thread_struct *new_thread) { - if ((current->thread.dbcr0 & DBCR0_IDM) - || (new_thread->dbcr0 & DBCR0_IDM)) + if ((current->thread.debug.dbcr0 & DBCR0_IDM) + || (new_thread->debug.dbcr0 & DBCR0_IDM)) prime_debug_regs(new_thread); } +EXPORT_SYMBOL_GPL(switch_booke_debug_regs); #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ #ifndef CONFIG_HAVE_HW_BREAKPOINT static void set_debug_reg_defaults(struct thread_struct *thread) @@ -596,12 +597,13 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new) { struct thread_struct *new_thread, *old_thread; - unsigned long flags; struct task_struct *last; #ifdef CONFIG_PPC_BOOK3S_64 struct ppc64_tlb_batch *batch; #endif + WARN_ON(!irqs_disabled()); + /* Back up the TAR across context switches. * Note that the TAR is not available for use in the kernel. (To * provide this, the TAR should be backed up/restored on exception @@ -721,8 +723,6 @@ struct task_struct *__switch_to(struct task_struct *prev, } #endif /* CONFIG_PPC_BOOK3S_64 */ - local_irq_save(flags); - /* * We can't take a PMU exception inside _switch() since there is a * window where the kernel stack SLB and the kernel stack are out @@ -742,8 +742,6 @@ struct task_struct *__switch_to(struct task_struct *prev, } #endif /* CONFIG_PPC_BOOK3S_64 */ - local_irq_restore(flags); - return last; } @@ -1008,6 +1006,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.ptrace_bps[0] = NULL; #endif + p->thread.fp_save_area = NULL; +#ifdef CONFIG_ALTIVEC + p->thread.vr_save_area = NULL; +#endif + #ifdef CONFIG_PPC_STD_MMU_64 if (mmu_has_feature(MMU_FTR_SLB)) { unsigned long sp_vsid; @@ -1113,12 +1116,12 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) #ifdef CONFIG_VSX current->thread.used_vsr = 0; #endif - memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); - current->thread.fpscr.val = 0; + memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); + current->thread.fp_save_area = NULL; #ifdef CONFIG_ALTIVEC - memset(current->thread.vr, 0, sizeof(current->thread.vr)); - memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); - current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ + memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); + current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ + current->thread.vr_save_area = NULL; current->thread.vrsave = 0; current->thread.used_vr = 0; #endif /* CONFIG_ALTIVEC */ diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index b7634ce41db..f3a47098fb8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -546,15 +546,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) memblock_add(base, size); } -#ifdef CONFIG_BLK_DEV_INITRD -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - initrd_start = (unsigned long)__va(start); - initrd_end = (unsigned long)__va(end); - initrd_below_start_ok = 1; -} -#endif - static void __init early_reserve_mem_dt(void) { unsigned long i, len, dt_root; @@ -761,37 +752,6 @@ void __init early_init_devtree(void *params) *******/ /** - * of_find_next_cache_node - Find a node's subsidiary cache - * @np: node of type "cpu" or "cache" - * - * Returns a node pointer with refcount incremented, use - * of_node_put() on it when done. Caller should hold a reference - * to np. - */ -struct device_node *of_find_next_cache_node(struct device_node *np) -{ - struct device_node *child; - const phandle *handle; - - handle = of_get_property(np, "l2-cache", NULL); - if (!handle) - handle = of_get_property(np, "next-level-cache", NULL); - - if (handle) - return of_find_node_by_phandle(*handle); - - /* OF on pmac has nodes instead of properties named "l2-cache" - * beneath CPU nodes. - */ - if (!strcmp(np->type, "cpu")) - for_each_child_of_node(np, child) - if (!strcmp(child->type, "cache")) - return child; - - return NULL; -} - -/** * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device * @np: device node of the device * diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 5fe2842e8ba..cb64a6e1dc5 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -858,7 +858,8 @@ static void __init prom_send_capabilities(void) { ihandle root; prom_arg_t ret; - __be32 *cores; + u32 cores; + unsigned char *ptcores; root = call_prom("open", 1, 1, ADDR("/")); if (root != 0) { @@ -868,15 +869,30 @@ static void __init prom_send_capabilities(void) * (we assume this is the same for all cores) and use it to * divide NR_CPUS. */ - cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]; - if (be32_to_cpup(cores) != NR_CPUS) { + + /* The core value may start at an odd address. If such a word + * access is made at a cache line boundary, this leads to an + * exception which may not be handled at this time. + * Forcing a per byte access to avoid exception. + */ + ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]; + cores = 0; + cores |= ptcores[0] << 24; + cores |= ptcores[1] << 16; + cores |= ptcores[2] << 8; + cores |= ptcores[3]; + if (cores != NR_CPUS) { prom_printf("WARNING ! " "ibm_architecture_vec structure inconsistent: %lu!\n", - be32_to_cpup(cores)); + cores); } else { - *cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads())); + cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", - be32_to_cpup(cores), NR_CPUS); + cores, NR_CPUS); + ptcores[0] = (cores >> 24) & 0xff; + ptcores[1] = (cores >> 16) & 0xff; + ptcores[2] = (cores >> 8) & 0xff; + ptcores[3] = cores & 0xff; } /* try calling the ibm,client-architecture-support method */ diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 9a0d24c390a..75fb40498b4 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -362,7 +362,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, void *kbuf, void __user *ubuf) { #ifdef CONFIG_VSX - double buf[33]; + u64 buf[33]; int i; #endif flush_fp_to_thread(target); @@ -371,15 +371,15 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, /* copy to local buffer then write that out */ for (i = 0; i < 32 ; i++) buf[i] = target->thread.TS_FPR(i); - memcpy(&buf[32], &target->thread.fpscr, sizeof(double)); + buf[32] = target->thread.fp_state.fpscr; return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1); #else - BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != - offsetof(struct thread_struct, TS_FPR(32))); + BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != + offsetof(struct thread_fp_state, fpr[32][0])); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.fpr, 0, -1); + &target->thread.fp_state, 0, -1); #endif } @@ -388,7 +388,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, const void *kbuf, const void __user *ubuf) { #ifdef CONFIG_VSX - double buf[33]; + u64 buf[33]; int i; #endif flush_fp_to_thread(target); @@ -400,14 +400,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, return i; for (i = 0; i < 32 ; i++) target->thread.TS_FPR(i) = buf[i]; - memcpy(&target->thread.fpscr, &buf[32], sizeof(double)); + target->thread.fp_state.fpscr = buf[32]; return 0; #else - BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) != - offsetof(struct thread_struct, TS_FPR(32))); + BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != + offsetof(struct thread_fp_state, fpr[32][0])); return user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.fpr, 0, -1); + &target->thread.fp_state, 0, -1); #endif } @@ -440,11 +440,11 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, flush_altivec_to_thread(target); - BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != - offsetof(struct thread_struct, vr[32])); + BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != + offsetof(struct thread_vr_state, vr[32])); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, - &target->thread.vr, 0, + &target->thread.vr_state, 0, 33 * sizeof(vector128)); if (!ret) { /* @@ -471,11 +471,12 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, flush_altivec_to_thread(target); - BUILD_BUG_ON(offsetof(struct thread_struct, vscr) != - offsetof(struct thread_struct, vr[32])); + BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) != + offsetof(struct thread_vr_state, vr[32])); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - &target->thread.vr, 0, 33 * sizeof(vector128)); + &target->thread.vr_state, 0, + 33 * sizeof(vector128)); if (!ret && count > 0) { /* * We use only the first word of vrsave. @@ -514,13 +515,13 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - double buf[32]; + u64 buf[32]; int ret, i; flush_vsx_to_thread(target); for (i = 0; i < 32 ; i++) - buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET]; + buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); @@ -531,7 +532,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { - double buf[32]; + u64 buf[32]; int ret,i; flush_vsx_to_thread(target); @@ -539,7 +540,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset, ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, 32 * sizeof(double)); for (i = 0; i < 32 ; i++) - target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; + target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return ret; @@ -657,7 +658,7 @@ static const struct user_regset native_regsets[] = { #endif #ifdef CONFIG_SPE [REGSET_SPE] = { - .n = 35, + .core_note_type = NT_PPC_SPE, .n = 35, .size = sizeof(u32), .align = sizeof(u32), .active = evr_active, .get = evr_get, .set = evr_set }, @@ -854,8 +855,8 @@ void user_enable_single_step(struct task_struct *task) if (regs != NULL) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS - task->thread.dbcr0 &= ~DBCR0_BT; - task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; + task->thread.debug.dbcr0 &= ~DBCR0_BT; + task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; regs->msr |= MSR_DE; #else regs->msr &= ~MSR_BE; @@ -871,8 +872,8 @@ void user_enable_block_step(struct task_struct *task) if (regs != NULL) { #ifdef CONFIG_PPC_ADV_DEBUG_REGS - task->thread.dbcr0 &= ~DBCR0_IC; - task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; + task->thread.debug.dbcr0 &= ~DBCR0_IC; + task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT; regs->msr |= MSR_DE; #else regs->msr &= ~MSR_SE; @@ -894,16 +895,16 @@ void user_disable_single_step(struct task_struct *task) * And, after doing so, if all debug flags are off, turn * off DBCR0(IDM) and MSR(DE) .... Torez */ - task->thread.dbcr0 &= ~DBCR0_IC; + task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT); /* * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set. */ - if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, - task->thread.dbcr1)) { + if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, + task->thread.debug.dbcr1)) { /* * All debug events were off..... */ - task->thread.dbcr0 &= ~DBCR0_IDM; + task->thread.debug.dbcr0 &= ~DBCR0_IDM; regs->msr &= ~MSR_DE; } #else @@ -1022,14 +1023,14 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, */ /* DAC's hold the whole address without any mode flags */ - task->thread.dac1 = data & ~0x3UL; + task->thread.debug.dac1 = data & ~0x3UL; - if (task->thread.dac1 == 0) { + if (task->thread.debug.dac1 == 0) { dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W); - if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0, - task->thread.dbcr1)) { + if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, + task->thread.debug.dbcr1)) { task->thread.regs->msr &= ~MSR_DE; - task->thread.dbcr0 &= ~DBCR0_IDM; + task->thread.debug.dbcr0 &= ~DBCR0_IDM; } return 0; } @@ -1041,7 +1042,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 register */ - task->thread.dbcr0 |= DBCR0_IDM; + task->thread.debug.dbcr0 |= DBCR0_IDM; /* Check for write and read flags and set DBCR0 accordingly */ @@ -1071,10 +1072,10 @@ static long set_instruction_bp(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) { int slot; - int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0); - int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0); - int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0); - int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0); + int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0); + int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0); + int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0); + int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0); if (dbcr_iac_range(child) & DBCR_IAC12MODE) slot2_in_use = 1; @@ -1093,9 +1094,9 @@ static long set_instruction_bp(struct task_struct *child, /* We need a pair of IAC regsisters */ if ((!slot1_in_use) && (!slot2_in_use)) { slot = 1; - child->thread.iac1 = bp_info->addr; - child->thread.iac2 = bp_info->addr2; - child->thread.dbcr0 |= DBCR0_IAC1; + child->thread.debug.iac1 = bp_info->addr; + child->thread.debug.iac2 = bp_info->addr2; + child->thread.debug.dbcr0 |= DBCR0_IAC1; if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) dbcr_iac_range(child) |= DBCR_IAC12X; @@ -1104,9 +1105,9 @@ static long set_instruction_bp(struct task_struct *child, #if CONFIG_PPC_ADV_DEBUG_IACS > 2 } else if ((!slot3_in_use) && (!slot4_in_use)) { slot = 3; - child->thread.iac3 = bp_info->addr; - child->thread.iac4 = bp_info->addr2; - child->thread.dbcr0 |= DBCR0_IAC3; + child->thread.debug.iac3 = bp_info->addr; + child->thread.debug.iac4 = bp_info->addr2; + child->thread.debug.dbcr0 |= DBCR0_IAC3; if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) dbcr_iac_range(child) |= DBCR_IAC34X; @@ -1126,30 +1127,30 @@ static long set_instruction_bp(struct task_struct *child, */ if (slot2_in_use || (slot3_in_use == slot4_in_use)) { slot = 1; - child->thread.iac1 = bp_info->addr; - child->thread.dbcr0 |= DBCR0_IAC1; + child->thread.debug.iac1 = bp_info->addr; + child->thread.debug.dbcr0 |= DBCR0_IAC1; goto out; } } if (!slot2_in_use) { slot = 2; - child->thread.iac2 = bp_info->addr; - child->thread.dbcr0 |= DBCR0_IAC2; + child->thread.debug.iac2 = bp_info->addr; + child->thread.debug.dbcr0 |= DBCR0_IAC2; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 } else if (!slot3_in_use) { slot = 3; - child->thread.iac3 = bp_info->addr; - child->thread.dbcr0 |= DBCR0_IAC3; + child->thread.debug.iac3 = bp_info->addr; + child->thread.debug.dbcr0 |= DBCR0_IAC3; } else if (!slot4_in_use) { slot = 4; - child->thread.iac4 = bp_info->addr; - child->thread.dbcr0 |= DBCR0_IAC4; + child->thread.debug.iac4 = bp_info->addr; + child->thread.debug.dbcr0 |= DBCR0_IAC4; #endif } else return -ENOSPC; } out: - child->thread.dbcr0 |= DBCR0_IDM; + child->thread.debug.dbcr0 |= DBCR0_IDM; child->thread.regs->msr |= MSR_DE; return slot; @@ -1159,49 +1160,49 @@ static int del_instruction_bp(struct task_struct *child, int slot) { switch (slot) { case 1: - if ((child->thread.dbcr0 & DBCR0_IAC1) == 0) + if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC12MODE) { /* address range - clear slots 1 & 2 */ - child->thread.iac2 = 0; + child->thread.debug.iac2 = 0; dbcr_iac_range(child) &= ~DBCR_IAC12MODE; } - child->thread.iac1 = 0; - child->thread.dbcr0 &= ~DBCR0_IAC1; + child->thread.debug.iac1 = 0; + child->thread.debug.dbcr0 &= ~DBCR0_IAC1; break; case 2: - if ((child->thread.dbcr0 & DBCR0_IAC2) == 0) + if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC12MODE) /* used in a range */ return -EINVAL; - child->thread.iac2 = 0; - child->thread.dbcr0 &= ~DBCR0_IAC2; + child->thread.debug.iac2 = 0; + child->thread.debug.dbcr0 &= ~DBCR0_IAC2; break; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 case 3: - if ((child->thread.dbcr0 & DBCR0_IAC3) == 0) + if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC34MODE) { /* address range - clear slots 3 & 4 */ - child->thread.iac4 = 0; + child->thread.debug.iac4 = 0; dbcr_iac_range(child) &= ~DBCR_IAC34MODE; } - child->thread.iac3 = 0; - child->thread.dbcr0 &= ~DBCR0_IAC3; + child->thread.debug.iac3 = 0; + child->thread.debug.dbcr0 &= ~DBCR0_IAC3; break; case 4: - if ((child->thread.dbcr0 & DBCR0_IAC4) == 0) + if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0) return -ENOENT; if (dbcr_iac_range(child) & DBCR_IAC34MODE) /* Used in a range */ return -EINVAL; - child->thread.iac4 = 0; - child->thread.dbcr0 &= ~DBCR0_IAC4; + child->thread.debug.iac4 = 0; + child->thread.debug.dbcr0 &= ~DBCR0_IAC4; break; #endif default: @@ -1231,18 +1232,18 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) dbcr_dac(child) |= DBCR_DAC1R; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) dbcr_dac(child) |= DBCR_DAC1W; - child->thread.dac1 = (unsigned long)bp_info->addr; + child->thread.debug.dac1 = (unsigned long)bp_info->addr; #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 if (byte_enable) { - child->thread.dvc1 = + child->thread.debug.dvc1 = (unsigned long)bp_info->condition_value; - child->thread.dbcr2 |= + child->thread.debug.dbcr2 |= ((byte_enable << DBCR2_DVC1BE_SHIFT) | (condition_mode << DBCR2_DVC1M_SHIFT)); } #endif #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE - } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) { + } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) { /* Both dac1 and dac2 are part of a range */ return -ENOSPC; #endif @@ -1252,19 +1253,19 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info) dbcr_dac(child) |= DBCR_DAC2R; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) dbcr_dac(child) |= DBCR_DAC2W; - child->thread.dac2 = (unsigned long)bp_info->addr; + child->thread.debug.dac2 = (unsigned long)bp_info->addr; #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 if (byte_enable) { - child->thread.dvc2 = + child->thread.debug.dvc2 = (unsigned long)bp_info->condition_value; - child->thread.dbcr2 |= + child->thread.debug.dbcr2 |= ((byte_enable << DBCR2_DVC2BE_SHIFT) | (condition_mode << DBCR2_DVC2M_SHIFT)); } #endif } else return -ENOSPC; - child->thread.dbcr0 |= DBCR0_IDM; + child->thread.debug.dbcr0 |= DBCR0_IDM; child->thread.regs->msr |= MSR_DE; return slot + 4; @@ -1276,32 +1277,32 @@ static int del_dac(struct task_struct *child, int slot) if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) return -ENOENT; - child->thread.dac1 = 0; + child->thread.debug.dac1 = 0; dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W); #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE - if (child->thread.dbcr2 & DBCR2_DAC12MODE) { - child->thread.dac2 = 0; - child->thread.dbcr2 &= ~DBCR2_DAC12MODE; + if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) { + child->thread.debug.dac2 = 0; + child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; } - child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); + child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); #endif #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 - child->thread.dvc1 = 0; + child->thread.debug.dvc1 = 0; #endif } else if (slot == 2) { if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) return -ENOENT; #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE - if (child->thread.dbcr2 & DBCR2_DAC12MODE) + if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) /* Part of a range */ return -EINVAL; - child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); + child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); #endif #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 - child->thread.dvc2 = 0; + child->thread.debug.dvc2 = 0; #endif - child->thread.dac2 = 0; + child->thread.debug.dac2 = 0; dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W); } else return -EINVAL; @@ -1343,22 +1344,22 @@ static int set_dac_range(struct task_struct *child, return -EIO; } - if (child->thread.dbcr0 & + if (child->thread.debug.dbcr0 & (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W)) return -ENOSPC; if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) - child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); + child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) - child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); - child->thread.dac1 = bp_info->addr; - child->thread.dac2 = bp_info->addr2; + child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); + child->thread.debug.dac1 = bp_info->addr; + child->thread.debug.dac2 = bp_info->addr2; if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) - child->thread.dbcr2 |= DBCR2_DAC12M; + child->thread.debug.dbcr2 |= DBCR2_DAC12M; else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE) - child->thread.dbcr2 |= DBCR2_DAC12MX; + child->thread.debug.dbcr2 |= DBCR2_DAC12MX; else /* PPC_BREAKPOINT_MODE_MASK */ - child->thread.dbcr2 |= DBCR2_DAC12MM; + child->thread.debug.dbcr2 |= DBCR2_DAC12MM; child->thread.regs->msr |= MSR_DE; return 5; @@ -1489,9 +1490,9 @@ static long ppc_del_hwdebug(struct task_struct *child, long data) rc = del_dac(child, (int)data - 4); if (!rc) { - if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0, - child->thread.dbcr1)) { - child->thread.dbcr0 &= ~DBCR0_IDM; + if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0, + child->thread.debug.dbcr1)) { + child->thread.debug.dbcr0 &= ~DBCR0_IDM; child->thread.regs->msr &= ~MSR_DE; } } @@ -1554,10 +1555,10 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - tmp = ((unsigned long *)child->thread.fpr) - [fpidx * TS_FPRWIDTH]; + memcpy(&tmp, &child->thread.fp_state.fpr, + sizeof(long)); else - tmp = child->thread.fpscr.val; + tmp = child->thread.fp_state.fpscr; } ret = put_user(tmp, datalp); break; @@ -1587,10 +1588,10 @@ long arch_ptrace(struct task_struct *child, long request, flush_fp_to_thread(child); if (fpidx < (PT_FPSCR - PT_FPR0)) - ((unsigned long *)child->thread.fpr) - [fpidx * TS_FPRWIDTH] = data; + memcpy(&child->thread.fp_state.fpr, &data, + sizeof(long)); else - child->thread.fpscr.val = data; + child->thread.fp_state.fpscr = data; ret = 0; } break; @@ -1669,7 +1670,7 @@ long arch_ptrace(struct task_struct *child, long request, if (addr > 0) break; #ifdef CONFIG_PPC_ADV_DEBUG_REGS - ret = put_user(child->thread.dac1, datalp); + ret = put_user(child->thread.debug.dac1, datalp); #else dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | (child->thread.hw_brk.type & HW_BRK_TYPE_DABR)); diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index f51599e941c..f52b7db327c 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c @@ -43,7 +43,6 @@ #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1) #define FPRHALF(i) (((i) - PT_FPR0) & 1) #define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i) -#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0)) long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) @@ -105,7 +104,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ - tmp = ((unsigned int *)child->thread.fpr) + tmp = ((unsigned int *)child->thread.fp_state.fpr) [FPRINDEX(index)]; } ret = put_user((unsigned int)tmp, (u32 __user *)data); @@ -147,8 +146,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, if (numReg >= PT_FPR0) { flush_fp_to_thread(child); /* get 64 bit FPR */ - tmp = ((u64 *)child->thread.fpr) - [FPRINDEX_3264(numReg)]; + tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0]; } else { /* register within PT_REGS struct */ unsigned long tmp2; ret = ptrace_get_reg(child, numReg, &tmp2); @@ -207,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, * to be an array of unsigned int (32 bits) - the * index passed in is based on this assumption. */ - ((unsigned int *)child->thread.fpr) + ((unsigned int *)child->thread.fp_state.fpr) [FPRINDEX(index)] = data; ret = 0; } @@ -251,8 +249,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, u64 *tmp; flush_fp_to_thread(child); /* get 64 bit FPR ... */ - tmp = &(((u64 *)child->thread.fpr) - [FPRINDEX_3264(numReg)]); + tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0]; /* ... write the 32 bit part we want */ ((u32 *)tmp)[index % 2] = data; ret = 0; @@ -269,7 +266,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, if (addr > 0) break; #ifdef CONFIG_PPC_ADV_DEBUG_REGS - ret = put_user(child->thread.dac1, (u32 __user *)data); + ret = put_user(child->thread.debug.dac1, (u32 __user *)data); #else dabr_fake = ( (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 6e7b7cdeec6..7d4c7172f38 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -223,7 +223,7 @@ unsigned long get_phb_buid(struct device_node *phb) static int phb_set_bus_ranges(struct device_node *dev, struct pci_controller *phb) { - const int *bus_range; + const __be32 *bus_range; unsigned int len; bus_range = of_get_property(dev, "bus-range", &len); @@ -231,8 +231,8 @@ static int phb_set_bus_ranges(struct device_node *dev, return 1; } - phb->first_busno = bus_range[0]; - phb->last_busno = bus_range[1]; + phb->first_busno = be32_to_cpu(bus_range[0]); + phb->last_busno = be32_to_cpu(bus_range[1]); return 0; } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 3d261c071fc..febc80445d2 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -62,8 +62,6 @@ #include <mm/mmu_decl.h> #include <asm/fadump.h> -#include "setup.h" - #ifdef DEBUG #include <asm/udbg.h> #define DBG(fmt...) udbg_printf(fmt) diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h deleted file mode 100644 index 4c67ad7fae0..00000000000 --- a/arch/powerpc/kernel/setup.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef _POWERPC_KERNEL_SETUP_H -#define _POWERPC_KERNEL_SETUP_H - -void check_for_initrd(void); -void do_init_bootmem(void); -void setup_panic(void); -extern int do_early_xmon; - -#endif /* _POWERPC_KERNEL_SETUP_H */ diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a4bbcae7257..b903dc5cf94 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -40,8 +40,6 @@ #include <asm/mmu_context.h> #include <asm/epapr_hcalls.h> -#include "setup.h" - #define DBG(fmt...) extern void bootx_init(unsigned long r4, unsigned long phys); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 278ca93e1f2..4085aaa9478 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -68,8 +68,6 @@ #include <asm/hugetlb.h> #include <asm/epapr_hcalls.h> -#include "setup.h" - #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index bebdf1a1a54..749778e0a69 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -265,27 +265,27 @@ struct rt_sigframe { unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { - double buf[ELF_NFPREG]; + u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_FPR(i); - memcpy(&buf[i], &task->thread.fpscr, sizeof(double)); + buf[i] = task->thread.fp_state.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); } unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { - double buf[ELF_NFPREG]; + u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_FPR(i) = buf[i]; - memcpy(&task->thread.fpscr, &buf[i], sizeof(double)); + task->thread.fp_state.fpscr = buf[i]; return 0; } @@ -293,25 +293,25 @@ unsigned long copy_fpr_from_user(struct task_struct *task, unsigned long copy_vsx_to_user(void __user *to, struct task_struct *task) { - double buf[ELF_NVSRHALFREG]; + u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) - buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET]; + buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); } unsigned long copy_vsx_from_user(struct task_struct *task, void __user *from) { - double buf[ELF_NVSRHALFREG]; + u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) - task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i]; + task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } @@ -319,27 +319,27 @@ unsigned long copy_vsx_from_user(struct task_struct *task, unsigned long copy_transact_fpr_to_user(void __user *to, struct task_struct *task) { - double buf[ELF_NFPREG]; + u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_TRANS_FPR(i); - memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double)); + buf[i] = task->thread.transact_fp.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); } unsigned long copy_transact_fpr_from_user(struct task_struct *task, void __user *from) { - double buf[ELF_NFPREG]; + u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_TRANS_FPR(i) = buf[i]; - memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double)); + task->thread.transact_fp.fpscr = buf[i]; return 0; } @@ -347,25 +347,25 @@ unsigned long copy_transact_fpr_from_user(struct task_struct *task, unsigned long copy_transact_vsx_to_user(void __user *to, struct task_struct *task) { - double buf[ELF_NVSRHALFREG]; + u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) - buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET]; + buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); } unsigned long copy_transact_vsx_from_user(struct task_struct *task, void __user *from) { - double buf[ELF_NVSRHALFREG]; + u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) - task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i]; + task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ @@ -373,14 +373,14 @@ unsigned long copy_transact_vsx_from_user(struct task_struct *task, inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { - return __copy_to_user(to, task->thread.fpr, + return __copy_to_user(to, task->thread.fp_state.fpr, ELF_NFPREG * sizeof(double)); } inline unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { - return __copy_from_user(task->thread.fpr, from, + return __copy_from_user(task->thread.fp_state.fpr, from, ELF_NFPREG * sizeof(double)); } @@ -388,14 +388,14 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task, inline unsigned long copy_transact_fpr_to_user(void __user *to, struct task_struct *task) { - return __copy_to_user(to, task->thread.transact_fpr, + return __copy_to_user(to, task->thread.transact_fp.fpr, ELF_NFPREG * sizeof(double)); } inline unsigned long copy_transact_fpr_from_user(struct task_struct *task, void __user *from) { - return __copy_from_user(task->thread.transact_fpr, from, + return __copy_from_user(task->thread.transact_fp.fpr, from, ELF_NFPREG * sizeof(double)); } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ @@ -423,7 +423,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); - if (__copy_to_user(&frame->mc_vregs, current->thread.vr, + if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; /* set MSR_VEC in the saved MSR value to indicate that @@ -534,17 +534,17 @@ static int save_tm_user_regs(struct pt_regs *regs, /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); - if (__copy_to_user(&frame->mc_vregs, current->thread.vr, + if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; if (msr & MSR_VEC) { if (__copy_to_user(&tm_frame->mc_vregs, - current->thread.transact_vr, + ¤t->thread.transact_vr, ELF_NVRREG * sizeof(vector128))) return 1; } else { if (__copy_to_user(&tm_frame->mc_vregs, - current->thread.vr, + ¤t->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; } @@ -692,11 +692,12 @@ static long restore_user_regs(struct pt_regs *regs, regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ - if (__copy_from_user(current->thread.vr, &sr->mc_vregs, + if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, sizeof(sr->mc_vregs))) return 1; } else if (current->thread.used_vr) - memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); + memset(¤t->thread.vr_state, 0, + ELF_NVRREG * sizeof(vector128)); /* Always get VRSAVE back */ if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) @@ -722,7 +723,7 @@ static long restore_user_regs(struct pt_regs *regs, return 1; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; #endif /* CONFIG_VSX */ /* * force the process to reload the FP registers from @@ -798,15 +799,16 @@ static long restore_tm_user_regs(struct pt_regs *regs, regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ - if (__copy_from_user(current->thread.vr, &sr->mc_vregs, + if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, sizeof(sr->mc_vregs)) || - __copy_from_user(current->thread.transact_vr, + __copy_from_user(¤t->thread.transact_vr, &tm_sr->mc_vregs, sizeof(sr->mc_vregs))) return 1; } else if (current->thread.used_vr) { - memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128)); - memset(current->thread.transact_vr, 0, + memset(¤t->thread.vr_state, 0, + ELF_NVRREG * sizeof(vector128)); + memset(¤t->thread.transact_vr, 0, ELF_NVRREG * sizeof(vector128)); } @@ -838,8 +840,8 @@ static long restore_tm_user_regs(struct pt_regs *regs, return 1; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) { - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; - current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; } #endif /* CONFIG_VSX */ @@ -891,7 +893,7 @@ static long restore_tm_user_regs(struct pt_regs *regs, #endif #ifdef CONFIG_PPC64 -int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s) +int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) { int err; @@ -1030,7 +1032,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, if (__put_user(0, &rt_sf->uc.uc_link)) goto badframe; - current->thread.fpscr.val = 0; /* turn off all fp exceptions */ + current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); @@ -1045,8 +1047,9 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, regs->gpr[5] = (unsigned long) &rt_sf->uc; regs->gpr[6] = (unsigned long) rt_sf; regs->nip = (unsigned long) ka->sa.sa_handler; - /* enter the signal handler in big-endian mode */ + /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; + regs->msr |= (MSR_KERNEL & MSR_LE); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we @@ -1309,7 +1312,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, unsigned char tmp; unsigned long new_msr = regs->msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS - unsigned long new_dbcr0 = current->thread.dbcr0; + unsigned long new_dbcr0 = current->thread.debug.dbcr0; #endif for (i=0; i<ndbg; i++) { @@ -1324,7 +1327,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, } else { new_dbcr0 &= ~DBCR0_IC; if (!DBCR_ACTIVE_EVENTS(new_dbcr0, - current->thread.dbcr1)) { + current->thread.debug.dbcr1)) { new_msr &= ~MSR_DE; new_dbcr0 &= ~DBCR0_IDM; } @@ -1359,7 +1362,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, the user is really doing something wrong. */ regs->msr = new_msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS - current->thread.dbcr0 = new_dbcr0; + current->thread.debug.dbcr0 = new_dbcr0; #endif if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) @@ -1462,7 +1465,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, regs->link = tramp; - current->thread.fpscr.val = 0; /* turn off all fp exceptions */ + current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index f93ec2835a1..b3c615764c9 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -103,7 +103,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, if (current->thread.used_vr) { flush_altivec_to_thread(current); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ - err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128)); + err |= __copy_to_user(v_regs, ¤t->thread.vr_state, + 33 * sizeof(vector128)); /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) * contains valid data. */ @@ -195,18 +196,18 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, if (current->thread.used_vr) { flush_altivec_to_thread(current); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ - err |= __copy_to_user(v_regs, current->thread.vr, + err |= __copy_to_user(v_regs, ¤t->thread.vr_state, 33 * sizeof(vector128)); /* If VEC was enabled there are transactional VRs valid too, * else they're a copy of the checkpointed VRs. */ if (msr & MSR_VEC) err |= __copy_to_user(tm_v_regs, - current->thread.transact_vr, + ¤t->thread.transact_vr, 33 * sizeof(vector128)); else err |= __copy_to_user(tm_v_regs, - current->thread.vr, + ¤t->thread.vr_state, 33 * sizeof(vector128)); /* set MSR_VEC in the MSR value in the frame to indicate @@ -349,10 +350,10 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && (msr & MSR_VEC) != 0) - err |= __copy_from_user(current->thread.vr, v_regs, + err |= __copy_from_user(¤t->thread.vr_state, v_regs, 33 * sizeof(vector128)); else if (current->thread.used_vr) - memset(current->thread.vr, 0, 33 * sizeof(vector128)); + memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); /* Always get VRSAVE back */ if (v_regs != NULL) err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); @@ -374,7 +375,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, err |= copy_vsx_from_user(current, v_regs); else for (i = 0; i < 32 ; i++) - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; #endif return err; } @@ -468,14 +469,14 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { - err |= __copy_from_user(current->thread.vr, v_regs, + err |= __copy_from_user(¤t->thread.vr_state, v_regs, 33 * sizeof(vector128)); - err |= __copy_from_user(current->thread.transact_vr, tm_v_regs, + err |= __copy_from_user(¤t->thread.transact_vr, tm_v_regs, 33 * sizeof(vector128)); } else if (current->thread.used_vr) { - memset(current->thread.vr, 0, 33 * sizeof(vector128)); - memset(current->thread.transact_vr, 0, 33 * sizeof(vector128)); + memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); + memset(¤t->thread.transact_vr, 0, 33 * sizeof(vector128)); } /* Always get VRSAVE back */ if (v_regs != NULL && tm_v_regs != NULL) { @@ -507,8 +508,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, err |= copy_transact_vsx_from_user(current, tm_v_regs); } else { for (i = 0; i < 32 ; i++) { - current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; - current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; + current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; } } #endif @@ -747,7 +748,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, goto badframe; /* Make sure signal handler doesn't get spurious FP exceptions */ - current->thread.fpscr.val = 0; + current->thread.fp_state.fpscr = 0; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we @@ -773,8 +774,9 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, /* Set up "regs" so we "return" to the signal handler. */ err |= get_user(regs->nip, &funct_desc_ptr->entry); - /* enter the signal handler in big-endian mode */ + /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; + regs->msr |= (MSR_KERNEL & MSR_LE); regs->gpr[1] = newsp; err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); regs->gpr[3] = signr; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8e59abc237d..930cd8af350 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -844,18 +844,6 @@ void __cpu_die(unsigned int cpu) smp_ops->cpu_die(cpu); } -static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex); - -void cpu_hotplug_driver_lock() -{ - mutex_lock(&powerpc_cpu_hotplug_driver_mutex); -} - -void cpu_hotplug_driver_unlock() -{ - mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); -} - void cpu_die(void) { if (ppc_md.cpu_die) diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S index 22045984835..988f38dced0 100644 --- a/arch/powerpc/kernel/swsusp_asm64.S +++ b/arch/powerpc/kernel/swsusp_asm64.S @@ -114,7 +114,9 @@ _GLOBAL(swsusp_arch_suspend) SAVE_SPECIAL(MSR) SAVE_SPECIAL(XER) #ifdef CONFIG_PPC_BOOK3S_64 +BEGIN_FW_FTR_SECTION SAVE_SPECIAL(SDR1) +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) #else SAVE_SPR(TCR) @@ -231,7 +233,9 @@ nothing_to_copy: /* can't use RESTORE_SPECIAL(MSR) */ ld r0, SL_MSR(r11) mtmsrd r0, 0 +BEGIN_FW_FTR_SECTION RESTORE_SPECIAL(SDR1) +END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) #else /* Restore SPRG1, be used to save paca */ ld r0, SL_SPRG1(r11) diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index cd809eaa8b5..ef47bcbd435 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -12,16 +12,15 @@ #include <asm/reg.h> #ifdef CONFIG_VSX -/* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */ -#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ +/* See fpu.S, this is borrowed from there */ +#define __SAVE_32FPRS_VSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ - SAVE_32FPRS_TRANSACT(n,base); \ + SAVE_32FPRS(n,base); \ b 3f; \ -2: SAVE_32VSRS_TRANSACT(n,c,base); \ +2: SAVE_32VSRS(n,c,base); \ 3: -/* ...and this is just plain borrowed from there. */ #define __REST_32FPRS_VSRS(n,c,base) \ BEGIN_FTR_SECTION \ b 2f; \ @@ -31,11 +30,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ 2: REST_32VSRS(n,c,base); \ 3: #else -#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base) -#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) +#define __SAVE_32FPRS_VSRS(n,c,base) SAVE_32FPRS(n, base) +#define __REST_32FPRS_VSRS(n,c,base) REST_32FPRS(n, base) #endif -#define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \ - __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base) +#define SAVE_32FPRS_VSRS(n,c,base) \ + __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base) #define REST_32FPRS_VSRS(n,c,base) \ __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base) @@ -107,7 +106,7 @@ DSCR_DEFAULT: _GLOBAL(tm_reclaim) mfcr r6 mflr r0 - std r6, 8(r1) + stw r6, 8(r1) std r0, 16(r1) std r2, 40(r1) stdu r1, -TM_FRAME_SIZE(r1) @@ -157,10 +156,11 @@ _GLOBAL(tm_reclaim) andis. r0, r4, MSR_VEC@h beq dont_backup_vec - SAVE_32VRS_TRANSACT(0, r6, r3) /* r6 scratch, r3 thread */ + addi r7, r3, THREAD_TRANSACT_VRSTATE + SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */ mfvscr vr0 - li r6, THREAD_TRANSACT_VSCR - stvx vr0, r3, r6 + li r6, VRSTATE_VSCR + stvx vr0, r7, r6 dont_backup_vec: mfspr r0, SPRN_VRSAVE std r0, THREAD_TRANSACT_VRSAVE(r3) @@ -168,10 +168,11 @@ dont_backup_vec: andi. r0, r4, MSR_FP beq dont_backup_fp - SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3) /* r6 scratch, r3 thread */ + addi r7, r3, THREAD_TRANSACT_FPSTATE + SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */ mffs fr0 - stfd fr0,THREAD_TRANSACT_FPSCR(r3) + stfd fr0,FPSTATE_FPSCR(r7) dont_backup_fp: /* The moment we treclaim, ALL of our GPRs will switch @@ -284,7 +285,7 @@ dont_backup_fp: REST_NVGPRS(r1) addi r1, r1, TM_FRAME_SIZE - ld r4, 8(r1) + lwz r4, 8(r1) ld r0, 16(r1) mtcr r4 mtlr r0 @@ -309,7 +310,7 @@ dont_backup_fp: _GLOBAL(tm_recheckpoint) mfcr r5 mflr r0 - std r5, 8(r1) + stw r5, 8(r1) std r0, 16(r1) std r2, 40(r1) stdu r1, -TM_FRAME_SIZE(r1) @@ -358,10 +359,11 @@ _GLOBAL(tm_recheckpoint) andis. r0, r4, MSR_VEC@h beq dont_restore_vec - li r5, THREAD_VSCR - lvx vr0, r3, r5 + addi r8, r3, THREAD_VRSTATE + li r5, VRSTATE_VSCR + lvx vr0, r8, r5 mtvscr vr0 - REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ + REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */ dont_restore_vec: ld r5, THREAD_VRSAVE(r3) mtspr SPRN_VRSAVE, r5 @@ -370,9 +372,10 @@ dont_restore_vec: andi. r0, r4, MSR_FP beq dont_restore_fp - lfd fr0, THREAD_FPSCR(r3) + addi r8, r3, THREAD_FPSTATE + lfd fr0, FPSTATE_FPSCR(r8) MTFSF_L(fr0) - REST_32FPRS_VSRS(0, R4, R3) + REST_32FPRS_VSRS(0, R4, R8) dont_restore_fp: mtmsr r6 /* FP/Vec off again! */ @@ -441,7 +444,7 @@ restore_gprs: REST_NVGPRS(r1) addi r1, r1, TM_FRAME_SIZE - ld r4, 8(r1) + lwz r4, 8(r1) ld r0, 16(r1) mtcr r4 mtlr r0 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f783c932fae..907a472f9a9 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -351,8 +351,8 @@ static inline int check_io_access(struct pt_regs *regs) #define REASON_TRAP ESR_PTR /* single-step stuff */ -#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) -#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) +#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) +#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) #else /* On non-4xx, the reason for the machine check or program @@ -816,7 +816,7 @@ static void parse_fpe(struct pt_regs *regs) flush_fp_to_thread(current); - code = __parse_fpscr(current->thread.fpscr.val); + code = __parse_fpscr(current->thread.fp_state.fpscr); _exception(SIGFPE, regs, code, regs->nip); } @@ -1018,6 +1018,13 @@ static int emulate_instruction(struct pt_regs *regs) return emulate_isel(regs, instword); } + /* Emulate sync instruction variants */ + if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) { + PPC_WARN_EMULATED(sync, regs); + asm volatile("sync"); + return 0; + } + #ifdef CONFIG_PPC64 /* Emulate the mfspr rD, DSCR. */ if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == @@ -1069,7 +1076,7 @@ static int emulate_math(struct pt_regs *regs) return 0; case 1: { int code = 0; - code = __parse_fpscr(current->thread.fpscr.val); + code = __parse_fpscr(current->thread.fp_state.fpscr); _exception(SIGFPE, regs, code, regs->nip); return 0; } @@ -1371,8 +1378,6 @@ void facility_unavailable_exception(struct pt_regs *regs) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -extern void do_load_up_fpu(struct pt_regs *regs); - void fp_unavailable_tm(struct pt_regs *regs) { /* Note: This does not handle any kind of FP laziness. */ @@ -1403,8 +1408,6 @@ void fp_unavailable_tm(struct pt_regs *regs) } #ifdef CONFIG_ALTIVEC -extern void do_load_up_altivec(struct pt_regs *regs); - void altivec_unavailable_tm(struct pt_regs *regs) { /* See the comments in fp_unavailable_tm(). This function operates @@ -1465,7 +1468,8 @@ void SoftwareEmulation(struct pt_regs *regs) if (!user_mode(regs)) { debugger(regs); - die("Kernel Mode Software FPU Emulation", regs, SIGFPE); + die("Kernel Mode Unimplemented Instruction or SW FPU Emulation", + regs, SIGFPE); } if (!emulate_math(regs)) @@ -1486,7 +1490,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status) if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE - current->thread.dbcr2 &= ~DBCR2_DAC12MODE; + current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; #endif do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, 5); @@ -1497,24 +1501,24 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status) 6); changed |= 0x01; } else if (debug_status & DBSR_IAC1) { - current->thread.dbcr0 &= ~DBCR0_IAC1; + current->thread.debug.dbcr0 &= ~DBCR0_IAC1; dbcr_iac_range(current) &= ~DBCR_IAC12MODE; do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, 1); changed |= 0x01; } else if (debug_status & DBSR_IAC2) { - current->thread.dbcr0 &= ~DBCR0_IAC2; + current->thread.debug.dbcr0 &= ~DBCR0_IAC2; do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, 2); changed |= 0x01; } else if (debug_status & DBSR_IAC3) { - current->thread.dbcr0 &= ~DBCR0_IAC3; + current->thread.debug.dbcr0 &= ~DBCR0_IAC3; dbcr_iac_range(current) &= ~DBCR_IAC34MODE; do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, 3); changed |= 0x01; } else if (debug_status & DBSR_IAC4) { - current->thread.dbcr0 &= ~DBCR0_IAC4; + current->thread.debug.dbcr0 &= ~DBCR0_IAC4; do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, 4); changed |= 0x01; @@ -1524,19 +1528,20 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status) * Check all other debug flags and see if that bit needs to be turned * back on or not. */ - if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) + if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, + current->thread.debug.dbcr1)) regs->msr |= MSR_DE; else /* Make sure the IDM flag is off */ - current->thread.dbcr0 &= ~DBCR0_IDM; + current->thread.debug.dbcr0 &= ~DBCR0_IDM; if (changed & 0x01) - mtspr(SPRN_DBCR0, current->thread.dbcr0); + mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); } void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) { - current->thread.dbsr = debug_status; + current->thread.debug.dbsr = debug_status; /* Hack alert: On BookE, Branch Taken stops on the branch itself, while * on server, it stops on the target of the branch. In order to simulate @@ -1553,8 +1558,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) /* Do the single step trick only when coming from userspace */ if (user_mode(regs)) { - current->thread.dbcr0 &= ~DBCR0_BT; - current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; + current->thread.debug.dbcr0 &= ~DBCR0_BT; + current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; regs->msr |= MSR_DE; return; } @@ -1582,13 +1587,13 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) return; if (user_mode(regs)) { - current->thread.dbcr0 &= ~DBCR0_IC; - if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, - current->thread.dbcr1)) + current->thread.debug.dbcr0 &= ~DBCR0_IC; + if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, + current->thread.debug.dbcr1)) regs->msr |= MSR_DE; else /* Make sure the IDM bit is off */ - current->thread.dbcr0 &= ~DBCR0_IDM; + current->thread.debug.dbcr0 &= ~DBCR0_IDM; } _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); @@ -1634,7 +1639,7 @@ void altivec_assist_exception(struct pt_regs *regs) /* XXX quick hack for now: set the non-Java bit in the VSCR */ printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " "in %s at %lx\n", current->comm, regs->nip); - current->thread.vscr.u[3] |= 0x10000; + current->thread.vr_state.vscr.u[3] |= 0x10000; } } #endif /* CONFIG_ALTIVEC */ @@ -1815,6 +1820,7 @@ struct ppc_emulated ppc_emulated = { WARN_EMULATED_SETUP(popcntb), WARN_EMULATED_SETUP(spe), WARN_EMULATED_SETUP(string), + WARN_EMULATED_SETUP(sync), WARN_EMULATED_SETUP(unaligned), #ifdef CONFIG_MATH_EMULATION WARN_EMULATED_SETUP(math), diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 1d9c92621b3..094e45c16a1 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -34,8 +34,7 @@ #include <asm/firmware.h> #include <asm/vdso.h> #include <asm/vdso_datapage.h> - -#include "setup.h" +#include <asm/setup.h> #undef DEBUG diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index f223409629b..e58ee10fa5c 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -4,7 +4,11 @@ */ #include <asm/vdso.h> +#ifdef __LITTLE_ENDIAN__ +OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle") +#else OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc") +#endif OUTPUT_ARCH(powerpc:common) ENTRY(_start) diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index e4863819663..64fb183a47c 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -4,7 +4,11 @@ */ #include <asm/vdso.h> +#ifdef __LITTLE_ENDIAN__ +OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle") +#else OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc") +#endif OUTPUT_ARCH(powerpc:common64) ENTRY(_start) diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c index 604d0947cb2..c4bfadb2606 100644 --- a/arch/powerpc/kernel/vecemu.c +++ b/arch/powerpc/kernel/vecemu.c @@ -271,7 +271,7 @@ int emulate_altivec(struct pt_regs *regs) vb = (instr >> 11) & 0x1f; vc = (instr >> 6) & 0x1f; - vrs = current->thread.vr; + vrs = current->thread.vr_state.vr; switch (instr & 0x3f) { case 10: switch (vc) { @@ -320,12 +320,12 @@ int emulate_altivec(struct pt_regs *regs) case 14: /* vctuxs */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va, - ¤t->thread.vscr.u[3]); + ¤t->thread.vr_state.vscr.u[3]); break; case 15: /* vctsxs */ for (i = 0; i < 4; ++i) vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va, - ¤t->thread.vscr.u[3]); + ¤t->thread.vr_state.vscr.u[3]); break; default: return -EINVAL; diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 9e20999aaef..0458a9aaba9 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -8,29 +8,6 @@ #include <asm/ptrace.h> #ifdef CONFIG_PPC_TRANSACTIONAL_MEM -/* - * Wrapper to call load_up_altivec from C. - * void do_load_up_altivec(struct pt_regs *regs); - */ -_GLOBAL(do_load_up_altivec) - mflr r0 - std r0, 16(r1) - stdu r1, -112(r1) - - subi r6, r3, STACK_FRAME_OVERHEAD - /* load_up_altivec expects r12=MSR, r13=PACA, and returns - * with r12 = new MSR. - */ - ld r12,_MSR(r6) - GET_PACA(r13) - bl load_up_altivec - std r12,_MSR(r6) - - ld r0, 112+16(r1) - addi r1, r1, 112 - mtlr r0 - blr - /* void do_load_up_transact_altivec(struct thread_struct *thread) * * This is similar to load_up_altivec but for the transactional version of the @@ -46,10 +23,11 @@ _GLOBAL(do_load_up_transact_altivec) li r4,1 stw r4,THREAD_USED_VR(r3) - li r10,THREAD_TRANSACT_VSCR + li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR lvx vr0,r10,r3 mtvscr vr0 - REST_32VRS_TRANSACT(0,r4,r3) + addi r10,r3,THREAD_TRANSACT_VRSTATE + REST_32VRS(0,r4,r10) /* Disable VEC again. */ MTMSRD(r6) @@ -59,12 +37,36 @@ _GLOBAL(do_load_up_transact_altivec) #endif /* - * load_up_altivec(unused, unused, tsk) + * Load state from memory into VMX registers including VSCR. + * Assumes the caller has enabled VMX in the MSR. + */ +_GLOBAL(load_vr_state) + li r4,VRSTATE_VSCR + lvx vr0,r4,r3 + mtvscr vr0 + REST_32VRS(0,r4,r3) + blr + +/* + * Store VMX state into memory, including VSCR. + * Assumes the caller has enabled VMX in the MSR. + */ +_GLOBAL(store_vr_state) + SAVE_32VRS(0, r4, r3) + mfvscr vr0 + li r4, VRSTATE_VSCR + stvx vr0, r4, r3 + blr + +/* * Disable VMX for the task which had it previously, * and save its vector registers in its thread_struct. * Enables the VMX for use in the kernel on return. * On SMP we know the VMX is free, since we give it up every * switch (ie, no lazy save of the vector registers). + * + * Note that on 32-bit this can only use registers that will be + * restored by fast_exception_return, i.e. r3 - r6, r10 and r11. */ _GLOBAL(load_up_altivec) mfmsr r5 /* grab the current MSR */ @@ -90,10 +92,11 @@ _GLOBAL(load_up_altivec) /* Save VMX state to last_task_used_altivec's THREAD struct */ toreal(r4) addi r4,r4,THREAD - SAVE_32VRS(0,r5,r4) + addi r6,r4,THREAD_VRSTATE + SAVE_32VRS(0,r5,r6) mfvscr vr0 - li r10,THREAD_VSCR - stvx vr0,r10,r4 + li r10,VRSTATE_VSCR + stvx vr0,r10,r6 /* Disable VMX for last_task_used_altivec */ PPC_LL r5,PT_REGS(r4) toreal(r5) @@ -125,12 +128,13 @@ _GLOBAL(load_up_altivec) oris r12,r12,MSR_VEC@h std r12,_MSR(r1) #endif + addi r6,r5,THREAD_VRSTATE li r4,1 - li r10,THREAD_VSCR + li r10,VRSTATE_VSCR stw r4,THREAD_USED_VR(r5) - lvx vr0,r10,r5 + lvx vr0,r10,r6 mtvscr vr0 - REST_32VRS(0,r4,r5) + REST_32VRS(0,r4,r6) #ifndef CONFIG_SMP /* Update last_task_used_altivec to 'current' */ subi r4,r5,THREAD /* Back to 'current' */ @@ -165,12 +169,16 @@ _GLOBAL(giveup_altivec) PPC_LCMPI 0,r3,0 beqlr /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ + PPC_LL r7,THREAD_VRSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) - PPC_LCMPI 0,r5,0 - SAVE_32VRS(0,r4,r3) + PPC_LCMPI 0,r7,0 + bne 2f + addi r7,r3,THREAD_VRSTATE +2: PPC_LCMPI 0,r5,0 + SAVE_32VRS(0,r4,r7) mfvscr vr0 - li r4,THREAD_VSCR - stvx vr0,r4,r3 + li r4,VRSTATE_VSCR + stvx vr0,r4,r7 beq 1f PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) #ifdef CONFIG_VSX diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 408956fbf4f..e7d0c88f621 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1419,8 +1419,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) /* needed to ensure proper operation of coherent allocations * later, in case driver doesn't set it explicitly */ - dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); - dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64)); + dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64)); } /* register with generic device framework */ @@ -1537,12 +1536,12 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, dn = dev->of_node; if (!dn) { - strcat(buf, "\n"); + strcpy(buf, "\n"); return strlen(buf); } cp = of_get_property(dn, "compatible", NULL); if (!cp) { - strcat(buf, "\n"); + strcpy(buf, "\n"); return strlen(buf); } diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 2f5c6b6d687..93221e87b91 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c @@ -31,13 +31,13 @@ #include "44x_tlb.h" #include "booke.h" -void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu) { kvmppc_booke_vcpu_load(vcpu, cpu); kvmppc_44x_tlb_load(vcpu); } -void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu) { kvmppc_44x_tlb_put(vcpu); kvmppc_booke_vcpu_put(vcpu); @@ -114,29 +114,32 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, return 0; } -void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { - kvmppc_get_sregs_ivor(vcpu, sregs); + return kvmppc_get_sregs_ivor(vcpu, sregs); } -int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { return kvmppc_set_sregs_ivor(vcpu, sregs); } -int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, - union kvmppc_one_reg *val) +static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { return -EINVAL; } -int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, - union kvmppc_one_reg *val) +static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { return -EINVAL; } -struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm, + unsigned int id) { struct kvmppc_vcpu_44x *vcpu_44x; struct kvm_vcpu *vcpu; @@ -167,7 +170,7 @@ out: return ERR_PTR(err); } -void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); @@ -176,28 +179,53 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) kmem_cache_free(kvm_vcpu_cache, vcpu_44x); } -int kvmppc_core_init_vm(struct kvm *kvm) +static int kvmppc_core_init_vm_44x(struct kvm *kvm) { return 0; } -void kvmppc_core_destroy_vm(struct kvm *kvm) +static void kvmppc_core_destroy_vm_44x(struct kvm *kvm) { } +static struct kvmppc_ops kvm_ops_44x = { + .get_sregs = kvmppc_core_get_sregs_44x, + .set_sregs = kvmppc_core_set_sregs_44x, + .get_one_reg = kvmppc_get_one_reg_44x, + .set_one_reg = kvmppc_set_one_reg_44x, + .vcpu_load = kvmppc_core_vcpu_load_44x, + .vcpu_put = kvmppc_core_vcpu_put_44x, + .vcpu_create = kvmppc_core_vcpu_create_44x, + .vcpu_free = kvmppc_core_vcpu_free_44x, + .mmu_destroy = kvmppc_mmu_destroy_44x, + .init_vm = kvmppc_core_init_vm_44x, + .destroy_vm = kvmppc_core_destroy_vm_44x, + .emulate_op = kvmppc_core_emulate_op_44x, + .emulate_mtspr = kvmppc_core_emulate_mtspr_44x, + .emulate_mfspr = kvmppc_core_emulate_mfspr_44x, +}; + static int __init kvmppc_44x_init(void) { int r; r = kvmppc_booke_init(); if (r) - return r; + goto err_out; + + r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); + if (r) + goto err_out; + kvm_ops_44x.owner = THIS_MODULE; + kvmppc_pr_ops = &kvm_ops_44x; - return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); +err_out: + return r; } static void __exit kvmppc_44x_exit(void) { + kvmppc_pr_ops = NULL; kvmppc_booke_exit(); } diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 35ec0a8547d..92c9ab4bcfe 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c @@ -91,8 +91,8 @@ static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn) return EMULATE_DONE; } -int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int inst, int *advance) +int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int dcrn = get_dcrn(inst); @@ -152,7 +152,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } -int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) +int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) { int emulated = EMULATE_DONE; @@ -172,7 +172,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) return emulated; } -int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) +int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) { int emulated = EMULATE_DONE; diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ed038544814..0deef1082e0 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -268,7 +268,7 @@ static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, trace_kvm_stlb_inval(stlb_index); } -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); int i; diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index ffaef2cb101..141b2027189 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -6,6 +6,7 @@ source "virt/kvm/Kconfig" menuconfig VIRTUALIZATION bool "Virtualization" + depends on !CPU_LITTLE_ENDIAN ---help--- Say Y here to get to see options for using your Linux host to run other operating systems inside virtual machines (guests). @@ -34,17 +35,20 @@ config KVM_BOOK3S_64_HANDLER bool select KVM_BOOK3S_HANDLER -config KVM_BOOK3S_PR +config KVM_BOOK3S_PR_POSSIBLE bool select KVM_MMIO select MMU_NOTIFIER +config KVM_BOOK3S_HV_POSSIBLE + bool + config KVM_BOOK3S_32 tristate "KVM support for PowerPC book3s_32 processors" depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT select KVM select KVM_BOOK3S_32_HANDLER - select KVM_BOOK3S_PR + select KVM_BOOK3S_PR_POSSIBLE ---help--- Support running unmodified book3s_32 guest kernels in virtual machines on book3s_32 host processors. @@ -59,6 +63,7 @@ config KVM_BOOK3S_64 depends on PPC_BOOK3S_64 select KVM_BOOK3S_64_HANDLER select KVM + select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE ---help--- Support running unmodified book3s_64 and book3s_32 guest kernels in virtual machines on book3s_64 host processors. @@ -69,8 +74,9 @@ config KVM_BOOK3S_64 If unsure, say N. config KVM_BOOK3S_64_HV - bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" + tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" depends on KVM_BOOK3S_64 + select KVM_BOOK3S_HV_POSSIBLE select MMU_NOTIFIER select CMA ---help--- @@ -89,9 +95,20 @@ config KVM_BOOK3S_64_HV If unsure, say N. config KVM_BOOK3S_64_PR - def_bool y - depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV - select KVM_BOOK3S_PR + tristate "KVM support without using hypervisor mode in host" + depends on KVM_BOOK3S_64 + select KVM_BOOK3S_PR_POSSIBLE + ---help--- + Support running guest kernels in virtual machines on processors + without using hypervisor mode in the host, by running the + guest in user mode (problem state) and emulating all + privileged instructions and registers. + + This is not as fast as using hypervisor mode, but works on + machines where hypervisor mode is not available or not usable, + and can emulate processors that are different from the host + processor, including emulating 32-bit processors on a 64-bit + host. config KVM_BOOKE_HV bool diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 6646c952c5e..ce569b6bf4d 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -53,41 +53,51 @@ kvm-e500mc-objs := \ e500_emulate.o kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) -kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ - $(KVM)/coalesced_mmio.o \ +kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \ + book3s_64_vio_hv.o + +kvm-pr-y := \ fpu.o \ book3s_paired_singles.o \ book3s_pr.o \ book3s_pr_papr.o \ - book3s_64_vio_hv.o \ book3s_emulate.o \ book3s_interrupts.o \ book3s_mmu_hpte.o \ book3s_64_mmu_host.o \ book3s_64_mmu.o \ book3s_32_mmu.o -kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ + +ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE +kvm-book3s_64-module-objs := \ + $(KVM)/coalesced_mmio.o + +kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ book3s_rmhandlers.o +endif -kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ +kvm-hv-y += \ book3s_hv.o \ book3s_hv_interrupts.o \ book3s_64_mmu_hv.o + kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ book3s_hv_rm_xics.o -kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \ + +ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ book3s_hv_rmhandlers.o \ book3s_hv_rm_mmu.o \ - book3s_64_vio_hv.o \ book3s_hv_ras.o \ book3s_hv_builtin.o \ book3s_hv_cma.o \ $(kvm-book3s_64-builtin-xics-objs-y) +endif kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ book3s_xics.o -kvm-book3s_64-module-objs := \ +kvm-book3s_64-module-objs += \ $(KVM)/kvm_main.o \ $(KVM)/eventfd.o \ powerpc.o \ @@ -123,4 +133,7 @@ obj-$(CONFIG_KVM_E500MC) += kvm.o obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o +obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o +obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o + obj-y += $(kvm-book3s_64-builtin-objs-y) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 700df6f1d32..8912608b7e1 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -34,6 +34,7 @@ #include <linux/vmalloc.h> #include <linux/highmem.h> +#include "book3s.h" #include "trace.h" #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU @@ -69,6 +70,50 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) { } +static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) +{ + if (!is_kvmppc_hv_enabled(vcpu->kvm)) + return to_book3s(vcpu)->hior; + return 0; +} + +static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, + unsigned long pending_now, unsigned long old_pending) +{ + if (is_kvmppc_hv_enabled(vcpu->kvm)) + return; + if (pending_now) + vcpu->arch.shared->int_pending = 1; + else if (old_pending) + vcpu->arch.shared->int_pending = 0; +} + +static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) +{ + ulong crit_raw; + ulong crit_r1; + bool crit; + + if (is_kvmppc_hv_enabled(vcpu->kvm)) + return false; + + crit_raw = vcpu->arch.shared->critical; + crit_r1 = kvmppc_get_gpr(vcpu, 1); + + /* Truncate crit indicators in 32 bit mode */ + if (!(vcpu->arch.shared->msr & MSR_SF)) { + crit_raw &= 0xffffffff; + crit_r1 &= 0xffffffff; + } + + /* Critical section when crit == r1 */ + crit = (crit_raw == crit_r1); + /* ... and we're in supervisor mode */ + crit = crit && !(vcpu->arch.shared->msr & MSR_PR); + + return crit; +} + void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) { vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); @@ -126,28 +171,32 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) printk(KERN_INFO "Queueing interrupt %x\n", vec); #endif } - +EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) { /* might as well deliver this straight away */ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); } +EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); } +EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec); int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) { return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); } +EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec); void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) { kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); } +EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec); void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) @@ -285,8 +334,10 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) return 0; } +EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter); -pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) +pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, + bool *writable) { ulong mp_pa = vcpu->arch.magic_page_pa; @@ -302,20 +353,23 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; get_page(pfn_to_page(pfn)); + if (writable) + *writable = true; return pfn; } - return gfn_to_pfn(vcpu->kvm, gfn); + return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); } +EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, - struct kvmppc_pte *pte) + bool iswrite, struct kvmppc_pte *pte) { int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); int r; if (relocated) { - r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); + r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); } else { pte->eaddr = eaddr; pte->raddr = eaddr & KVM_PAM; @@ -361,7 +415,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, vcpu->stat.st++; - if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) + if (kvmppc_xlate(vcpu, *eaddr, data, true, &pte)) return -ENOENT; *eaddr = pte.raddr; @@ -374,6 +428,7 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, return EMULATE_DONE; } +EXPORT_SYMBOL_GPL(kvmppc_st); int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data) @@ -383,7 +438,7 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, vcpu->stat.ld++; - if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) + if (kvmppc_xlate(vcpu, *eaddr, data, false, &pte)) goto nopte; *eaddr = pte.raddr; @@ -404,6 +459,7 @@ nopte: mmio: return EMULATE_DO_MMIO; } +EXPORT_SYMBOL_GPL(kvmppc_ld); int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { @@ -419,6 +475,18 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) { } +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); +} + int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; @@ -495,8 +563,7 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) if (size > sizeof(val)) return -EINVAL; - r = kvmppc_get_one_reg(vcpu, reg->id, &val); - + r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val); if (r == -EINVAL) { r = 0; switch (reg->id) { @@ -528,6 +595,9 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) } val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); break; + case KVM_REG_PPC_VRSAVE: + val = get_reg_val(reg->id, vcpu->arch.vrsave); + break; #endif /* CONFIG_ALTIVEC */ case KVM_REG_PPC_DEBUG_INST: { u32 opcode = INS_TW; @@ -572,8 +642,7 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) return -EFAULT; - r = kvmppc_set_one_reg(vcpu, reg->id, &val); - + r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val); if (r == -EINVAL) { r = 0; switch (reg->id) { @@ -605,6 +674,13 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) } vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); break; + case KVM_REG_PPC_VRSAVE: + if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { + r = -ENXIO; + break; + } + vcpu->arch.vrsave = set_reg_val(reg->id, val); + break; #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_KVM_XICS case KVM_REG_PPC_ICP_STATE: @@ -625,6 +701,27 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) return r; } +void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); +} + +void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) +{ + vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); +} + +void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) +{ + vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); +} +EXPORT_SYMBOL_GPL(kvmppc_set_msr); + +int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) +{ + return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); +} + int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { @@ -644,3 +741,141 @@ void kvmppc_decrementer_func(unsigned long data) kvmppc_core_queue_dec(vcpu); kvm_vcpu_kick(vcpu); } + +struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +{ + return kvm->arch.kvm_ops->vcpu_create(kvm, id); +} + +void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +{ + vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); +} + +int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) +{ + return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); +} + +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) +{ + return kvm->arch.kvm_ops->get_dirty_log(kvm, log); +} + +void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ + kvm->arch.kvm_ops->free_memslot(free, dont); +} + +int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) +{ + return kvm->arch.kvm_ops->create_memslot(slot, npages); +} + +void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ + kvm->arch.kvm_ops->flush_memslot(kvm, memslot); +} + +int kvmppc_core_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem) +{ + return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); +} + +void kvmppc_core_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old) +{ + kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old); +} + +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ + return kvm->arch.kvm_ops->unmap_hva(kvm, hva); +} +EXPORT_SYMBOL_GPL(kvm_unmap_hva); + +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +{ + return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); +} + +int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ + return kvm->arch.kvm_ops->age_hva(kvm, hva); +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + return kvm->arch.kvm_ops->test_age_hva(kvm, hva); +} + +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); +} + +void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +{ + vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); +} + +int kvmppc_core_init_vm(struct kvm *kvm) +{ + +#ifdef CONFIG_PPC64 + INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); + INIT_LIST_HEAD(&kvm->arch.rtas_tokens); +#endif + + return kvm->arch.kvm_ops->init_vm(kvm); +} + +void kvmppc_core_destroy_vm(struct kvm *kvm) +{ + kvm->arch.kvm_ops->destroy_vm(kvm); + +#ifdef CONFIG_PPC64 + kvmppc_rtas_tokens_free(kvm); + WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); +#endif +} + +int kvmppc_core_check_processor_compat(void) +{ + /* + * We always return 0 for book3s. We check + * for compatability while loading the HV + * or PR module + */ + return 0; +} + +static int kvmppc_book3s_init(void) +{ + int r; + + r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); + if (r) + return r; +#ifdef CONFIG_KVM_BOOK3S_32 + r = kvmppc_book3s_init_pr(); +#endif + return r; + +} + +static void kvmppc_book3s_exit(void) +{ +#ifdef CONFIG_KVM_BOOK3S_32 + kvmppc_book3s_exit_pr(); +#endif + kvm_exit(); +} + +module_init(kvmppc_book3s_init); +module_exit(kvmppc_book3s_exit); diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h new file mode 100644 index 00000000000..4bf956cf94d --- /dev/null +++ b/arch/powerpc/kvm/book3s.h @@ -0,0 +1,34 @@ +/* + * Copyright IBM Corporation, 2013 + * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License or (at your optional) any later version of the license. + * + */ + +#ifndef __POWERPC_KVM_BOOK3S_H__ +#define __POWERPC_KVM_BOOK3S_H__ + +extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm, + struct kvm_memory_slot *memslot); +extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva); +extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, + unsigned long end); +extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva); +extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva); +extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte); + +extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); +extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance); +extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, + int sprn, ulong spr_val); +extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, + int sprn, ulong *spr_val); +extern int kvmppc_book3s_init_pr(void); +extern void kvmppc_book3s_exit_pr(void); + +#endif diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index c8cefdd15fd..76a64ce6a5b 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -84,7 +84,8 @@ static inline bool sr_nx(u32 sr_raw) } static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, - struct kvmppc_pte *pte, bool data); + struct kvmppc_pte *pte, bool data, + bool iswrite); static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); @@ -99,7 +100,7 @@ static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, u64 vsid; struct kvmppc_pte pte; - if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) + if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) return pte.vpage; kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); @@ -111,10 +112,11 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) kvmppc_set_msr(vcpu, 0); } -static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, +static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu, u32 sre, gva_t eaddr, bool primary) { + struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); u32 page, hash, pteg, htabmask; hva_t r; @@ -132,7 +134,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3 kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, sr_vsid(sre)); - r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); + r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); if (kvm_is_error_hva(r)) return r; return r | (pteg & ~PAGE_MASK); @@ -145,7 +147,8 @@ static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) } static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, - struct kvmppc_pte *pte, bool data) + struct kvmppc_pte *pte, bool data, + bool iswrite) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_bat *bat; @@ -186,8 +189,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, printk(KERN_INFO "BAT is not readable!\n"); continue; } - if (!pte->may_write) { - /* let's treat r/o BATs as not-readable for now */ + if (iswrite && !pte->may_write) { dprintk_pte("BAT is read-only!\n"); continue; } @@ -201,9 +203,8 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data, - bool primary) + bool iswrite, bool primary) { - struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); u32 sre; hva_t ptegp; u32 pteg[16]; @@ -218,7 +219,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); - ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary); + ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu, sre, eaddr, primary); if (kvm_is_error_hva(ptegp)) { printk(KERN_INFO "KVM: Invalid PTEG!\n"); goto no_page_found; @@ -258,9 +259,6 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, break; } - if ( !pte->may_read ) - continue; - dprintk_pte("MMU: Found PTE -> %x %x - %x\n", pteg[i], pteg[i+1], pp); found = 1; @@ -271,19 +269,23 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, /* Update PTE C and A bits, so the guest's swapper knows we used the page */ if (found) { - u32 oldpte = pteg[i+1]; - - if (pte->may_read) - pteg[i+1] |= PTEG_FLAG_ACCESSED; - if (pte->may_write) - pteg[i+1] |= PTEG_FLAG_DIRTY; - else - dprintk_pte("KVM: Mapping read-only page!\n"); - - /* Write back into the PTEG */ - if (pteg[i+1] != oldpte) - copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); - + u32 pte_r = pteg[i+1]; + char __user *addr = (char __user *) &pteg[i+1]; + + /* + * Use single-byte writes to update the HPTE, to + * conform to what real hardware does. + */ + if (pte->may_read && !(pte_r & PTEG_FLAG_ACCESSED)) { + pte_r |= PTEG_FLAG_ACCESSED; + put_user(pte_r >> 8, addr + 2); + } + if (iswrite && pte->may_write && !(pte_r & PTEG_FLAG_DIRTY)) { + pte_r |= PTEG_FLAG_DIRTY; + put_user(pte_r, addr + 3); + } + if (!pte->may_read || (iswrite && !pte->may_write)) + return -EPERM; return 0; } @@ -302,12 +304,14 @@ no_page_found: } static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, - struct kvmppc_pte *pte, bool data) + struct kvmppc_pte *pte, bool data, + bool iswrite) { int r; ulong mp_ea = vcpu->arch.magic_page_ea; pte->eaddr = eaddr; + pte->page_size = MMU_PAGE_4K; /* Magic page override */ if (unlikely(mp_ea) && @@ -323,11 +327,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, return 0; } - r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); + r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data, iswrite); if (r < 0) - r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); + r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, + data, iswrite, true); if (r < 0) - r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false); + r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, + data, iswrite, false); return r; } @@ -347,7 +353,12 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) { - kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000); + int i; + struct kvm_vcpu *v; + + /* flush this VA on all cpus */ + kvm_for_each_vcpu(i, v, vcpu->kvm) + kvmppc_mmu_pte_flush(v, ea, 0x0FFFF000); } static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 00e619bf608..3a0abd2e5a1 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -138,7 +138,8 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, extern char etext[]; -int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) +int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, + bool iswrite) { pfn_t hpaddr; u64 vpn; @@ -152,9 +153,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) bool evict = false; struct hpte_cache *pte; int r = 0; + bool writable; /* Get host physical address for gpa */ - hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); + hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, + iswrite, &writable); if (is_error_noslot_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); @@ -204,7 +207,7 @@ next_pteg: (primary ? 0 : PTE_SEC); pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; - if (orig_pte->may_write) { + if (orig_pte->may_write && writable) { pteg1 |= PP_RWRW; mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); } else { @@ -259,6 +262,11 @@ out: return r; } +void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) +{ + kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL); +} + static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) { struct kvmppc_sid_map *map; @@ -341,7 +349,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) svcpu_put(svcpu); } -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) { int i; diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 7e345e00661..83da1f868fd 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -107,9 +107,20 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, return kvmppc_slb_calc_vpn(slb, eaddr); } +static int mmu_pagesize(int mmu_pg) +{ + switch (mmu_pg) { + case MMU_PAGE_64K: + return 16; + case MMU_PAGE_16M: + return 24; + } + return 12; +} + static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) { - return slbe->large ? 24 : 12; + return mmu_pagesize(slbe->base_page_size); } static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) @@ -119,11 +130,11 @@ static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); } -static hva_t kvmppc_mmu_book3s_64_get_pteg( - struct kvmppc_vcpu_book3s *vcpu_book3s, +static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu, struct kvmppc_slb *slbe, gva_t eaddr, bool second) { + struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); u64 hash, pteg, htabsize; u32 ssize; hva_t r; @@ -148,10 +159,10 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg( /* When running a PAPR guest, SDR1 contains a HVA address instead of a GPA */ - if (vcpu_book3s->vcpu.arch.papr_enabled) + if (vcpu->arch.papr_enabled) r = pteg; else - r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); + r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); if (kvm_is_error_hva(r)) return r; @@ -166,18 +177,38 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); - if (p < 24) - avpn >>= ((80 - p) - 56) - 8; + if (p < 16) + avpn >>= ((80 - p) - 56) - 8; /* 16 - p */ else - avpn <<= 8; + avpn <<= p - 16; return avpn; } +/* + * Return page size encoded in the second word of a HPTE, or + * -1 for an invalid encoding for the base page size indicated by + * the SLB entry. This doesn't handle mixed pagesize segments yet. + */ +static int decode_pagesize(struct kvmppc_slb *slbe, u64 r) +{ + switch (slbe->base_page_size) { + case MMU_PAGE_64K: + if ((r & 0xf000) == 0x1000) + return MMU_PAGE_64K; + break; + case MMU_PAGE_16M: + if ((r & 0xff000) == 0) + return MMU_PAGE_16M; + break; + } + return -1; +} + static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, - struct kvmppc_pte *gpte, bool data) + struct kvmppc_pte *gpte, bool data, + bool iswrite) { - struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_slb *slbe; hva_t ptegp; u64 pteg[16]; @@ -189,6 +220,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, u8 pp, key = 0; bool found = false; bool second = false; + int pgsize; ulong mp_ea = vcpu->arch.magic_page_ea; /* Magic page override */ @@ -202,6 +234,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, gpte->may_execute = true; gpte->may_read = true; gpte->may_write = true; + gpte->page_size = MMU_PAGE_4K; return 0; } @@ -222,8 +255,12 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | HPTE_V_SECONDARY; + pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; + + mutex_lock(&vcpu->kvm->arch.hpt_mutex); + do_second: - ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); + ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second); if (kvm_is_error_hva(ptegp)) goto no_page_found; @@ -240,6 +277,13 @@ do_second: for (i=0; i<16; i+=2) { /* Check all relevant fields of 1st dword */ if ((pteg[i] & v_mask) == v_val) { + /* If large page bit is set, check pgsize encoding */ + if (slbe->large && + (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { + pgsize = decode_pagesize(slbe, pteg[i+1]); + if (pgsize < 0) + continue; + } found = true; break; } @@ -256,13 +300,15 @@ do_second: v = pteg[i]; r = pteg[i+1]; pp = (r & HPTE_R_PP) | key; - eaddr_mask = 0xFFF; + if (r & HPTE_R_PP0) + pp |= 8; gpte->eaddr = eaddr; gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); - if (slbe->large) - eaddr_mask = 0xFFFFFF; + + eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1; gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); + gpte->page_size = pgsize; gpte->may_execute = ((r & HPTE_R_N) ? false : true); gpte->may_read = false; gpte->may_write = false; @@ -277,6 +323,7 @@ do_second: case 3: case 5: case 7: + case 10: gpte->may_read = true; break; } @@ -287,30 +334,37 @@ do_second: /* Update PTE R and C bits, so the guest's swapper knows we used the * page */ - if (gpte->may_read) { - /* Set the accessed flag */ + if (gpte->may_read && !(r & HPTE_R_R)) { + /* + * Set the accessed flag. + * We have to write this back with a single byte write + * because another vcpu may be accessing this on + * non-PAPR platforms such as mac99, and this is + * what real hardware does. + */ + char __user *addr = (char __user *) &pteg[i+1]; r |= HPTE_R_R; + put_user(r >> 8, addr + 6); } - if (data && gpte->may_write) { - /* Set the dirty flag -- XXX even if not writing */ + if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { + /* Set the dirty flag */ + /* Use a single byte write */ + char __user *addr = (char __user *) &pteg[i+1]; r |= HPTE_R_C; + put_user(r, addr + 7); } - /* Write back into the PTEG */ - if (pteg[i+1] != r) { - pteg[i+1] = r; - copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); - } + mutex_unlock(&vcpu->kvm->arch.hpt_mutex); - if (!gpte->may_read) + if (!gpte->may_read || (iswrite && !gpte->may_write)) return -EPERM; return 0; no_page_found: + mutex_unlock(&vcpu->kvm->arch.hpt_mutex); return -ENOENT; no_seg_found: - dprintk("KVM MMU: Trigger segment fault\n"); return -EINVAL; } @@ -345,6 +399,21 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; slbe->class = (rs & SLB_VSID_C) ? 1 : 0; + slbe->base_page_size = MMU_PAGE_4K; + if (slbe->large) { + if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) { + switch (rs & SLB_VSID_LP) { + case SLB_VSID_LP_00: + slbe->base_page_size = MMU_PAGE_16M; + break; + case SLB_VSID_LP_01: + slbe->base_page_size = MMU_PAGE_64K; + break; + } + } else + slbe->base_page_size = MMU_PAGE_16M; + } + slbe->orige = rb & (ESID_MASK | SLB_ESID_V); slbe->origv = rs; @@ -460,14 +529,45 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, bool large) { u64 mask = 0xFFFFFFFFFULL; + long i; + struct kvm_vcpu *v; dprintk("KVM MMU: tlbie(0x%lx)\n", va); - if (large) - mask = 0xFFFFFF000ULL; - kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); + /* + * The tlbie instruction changed behaviour starting with + * POWER6. POWER6 and later don't have the large page flag + * in the instruction but in the RB value, along with bits + * indicating page and segment sizes. + */ + if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { + /* POWER6 or later */ + if (va & 1) { /* L bit */ + if ((va & 0xf000) == 0x1000) + mask = 0xFFFFFFFF0ULL; /* 64k page */ + else + mask = 0xFFFFFF000ULL; /* 16M page */ + } + } else { + /* older processors, e.g. PPC970 */ + if (large) + mask = 0xFFFFFF000ULL; + } + /* flush this VA on all vcpus */ + kvm_for_each_vcpu(i, v, vcpu->kvm) + kvmppc_mmu_pte_vflush(v, va >> 12, mask); } +#ifdef CONFIG_PPC_64K_PAGES +static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid) +{ + ulong mp_ea = vcpu->arch.magic_page_ea; + + return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) && + (mp_ea >> SID_SHIFT) == esid; +} +#endif + static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid) { @@ -475,11 +575,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, struct kvmppc_slb *slb; u64 gvsid = esid; ulong mp_ea = vcpu->arch.magic_page_ea; + int pagesize = MMU_PAGE_64K; if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); if (slb) { gvsid = slb->vsid; + pagesize = slb->base_page_size; if (slb->tb) { gvsid <<= SID_SHIFT_1T - SID_SHIFT; gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); @@ -490,28 +592,41 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { case 0: - *vsid = VSID_REAL | esid; + gvsid = VSID_REAL | esid; break; case MSR_IR: - *vsid = VSID_REAL_IR | gvsid; + gvsid |= VSID_REAL_IR; break; case MSR_DR: - *vsid = VSID_REAL_DR | gvsid; + gvsid |= VSID_REAL_DR; break; case MSR_DR|MSR_IR: if (!slb) goto no_slb; - *vsid = gvsid; break; default: BUG(); break; } +#ifdef CONFIG_PPC_64K_PAGES + /* + * Mark this as a 64k segment if the host is using + * 64k pages, the host MMU supports 64k pages and + * the guest segment page size is >= 64k, + * but not if this segment contains the magic page. + */ + if (pagesize >= MMU_PAGE_64K && + mmu_psize_defs[MMU_PAGE_64K].shift && + !segment_contains_magic_page(vcpu, esid)) + gvsid |= VSID_64K; +#endif + if (vcpu->arch.shared->msr & MSR_PR) - *vsid |= VSID_PR; + gvsid |= VSID_PR; + *vsid = gvsid; return 0; no_slb: diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index e5240524bf6..0d513af62bb 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -27,14 +27,14 @@ #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/hw_irq.h> -#include "trace.h" +#include "trace_pr.h" #define PTE_SIZE 12 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, - MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M, + pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M, false); } @@ -78,7 +78,8 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) return NULL; } -int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) +int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, + bool iswrite) { unsigned long vpn; pfn_t hpaddr; @@ -90,16 +91,26 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) int attempt = 0; struct kvmppc_sid_map *map; int r = 0; + int hpsize = MMU_PAGE_4K; + bool writable; + unsigned long mmu_seq; + struct kvm *kvm = vcpu->kvm; + struct hpte_cache *cpte; + unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; + unsigned long pfn; + + /* used to check for invalidations in progress */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); /* Get host physical address for gpa */ - hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); - if (is_error_noslot_pfn(hpaddr)) { - printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); + pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable); + if (is_error_noslot_pfn(pfn)) { + printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn); r = -EINVAL; goto out; } - hpaddr <<= PAGE_SHIFT; - hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); + hpaddr = pfn << PAGE_SHIFT; /* and write the mapping ea -> hpa into the pt */ vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); @@ -117,20 +128,39 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) goto out; } - vsid = map->host_vsid; - vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); + vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); - if (!orig_pte->may_write) - rflags |= HPTE_R_PP; - else - mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); + kvm_set_pfn_accessed(pfn); + if (!orig_pte->may_write || !writable) + rflags |= PP_RXRX; + else { + mark_page_dirty(vcpu->kvm, gfn); + kvm_set_pfn_dirty(pfn); + } if (!orig_pte->may_execute) rflags |= HPTE_R_N; else - kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); + kvmppc_mmu_flush_icache(pfn); + + /* + * Use 64K pages if possible; otherwise, on 64K page kernels, + * we need to transfer 4 more bits from guest real to host real addr. + */ + if (vsid & VSID_64K) + hpsize = MMU_PAGE_64K; + else + hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); + + hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M); - hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M); + cpte = kvmppc_mmu_hpte_cache_next(vcpu); + + spin_lock(&kvm->mmu_lock); + if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) { + r = -EAGAIN; + goto out_unlock; + } map_again: hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); @@ -139,11 +169,11 @@ map_again: if (attempt > 1) if (ppc_md.hpte_remove(hpteg) < 0) { r = -1; - goto out; + goto out_unlock; } ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, - MMU_PAGE_4K, MMU_PAGE_4K, MMU_SEGSIZE_256M); + hpsize, hpsize, MMU_SEGSIZE_256M); if (ret < 0) { /* If we couldn't map a primary PTE, try a secondary */ @@ -152,8 +182,6 @@ map_again: attempt++; goto map_again; } else { - struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); - trace_kvm_book3s_64_mmu_map(rflags, hpteg, vpn, hpaddr, orig_pte); @@ -164,19 +192,37 @@ map_again: hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); } - pte->slot = hpteg + (ret & 7); - pte->host_vpn = vpn; - pte->pte = *orig_pte; - pte->pfn = hpaddr >> PAGE_SHIFT; + cpte->slot = hpteg + (ret & 7); + cpte->host_vpn = vpn; + cpte->pte = *orig_pte; + cpte->pfn = pfn; + cpte->pagesize = hpsize; - kvmppc_mmu_hpte_cache_map(vcpu, pte); + kvmppc_mmu_hpte_cache_map(vcpu, cpte); + cpte = NULL; } - kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); + +out_unlock: + spin_unlock(&kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + if (cpte) + kvmppc_mmu_hpte_cache_free(cpte); out: return r; } +void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) +{ + u64 mask = 0xfffffffffULL; + u64 vsid; + + vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); + if (vsid & VSID_64K) + mask = 0xffffffff0ULL; + kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask); +} + static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) { struct kvmppc_sid_map *map; @@ -291,6 +337,12 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) slb_vsid &= ~SLB_VSID_KP; slb_esid |= slb_index; +#ifdef CONFIG_PPC_64K_PAGES + /* Set host segment base page size to 64K if possible */ + if (gvsid & VSID_64K) + slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp; +#endif + svcpu->slb[slb_index].esid = slb_esid; svcpu->slb[slb_index].vsid = slb_vsid; @@ -326,7 +378,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) svcpu_put(svcpu); } -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) { kvmppc_mmu_hpte_destroy(vcpu); __destroy_context(to_book3s(vcpu)->context_id[0]); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 043eec8461e..f3ff587a8b7 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -260,10 +260,6 @@ int kvmppc_mmu_hv_init(void) return 0; } -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) -{ -} - static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) { kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); @@ -451,7 +447,7 @@ static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, } static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, - struct kvmppc_pte *gpte, bool data) + struct kvmppc_pte *gpte, bool data, bool iswrite) { struct kvm *kvm = vcpu->kvm; struct kvmppc_slb *slbe; @@ -906,21 +902,22 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, return 0; } -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva) { if (kvm->arch.using_mmu_notifiers) kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); return 0; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) { if (kvm->arch.using_mmu_notifiers) kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); return 0; } -void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) +void kvmppc_core_flush_memslot_hv(struct kvm *kvm, + struct kvm_memory_slot *memslot) { unsigned long *rmapp; unsigned long gfn; @@ -994,7 +991,7 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, return ret; } -int kvm_age_hva(struct kvm *kvm, unsigned long hva) +int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva) { if (!kvm->arch.using_mmu_notifiers) return 0; @@ -1032,14 +1029,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, return ret; } -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva) { if (!kvm->arch.using_mmu_notifiers) return 0; return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); } -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) { if (!kvm->arch.using_mmu_notifiers) return; @@ -1512,9 +1509,8 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | (VRMA_VSID << SLB_VSID_SHIFT_1T); - lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; - lpcr |= senc << (LPCR_VRMASD_SH - 4); - kvm->arch.lpcr = lpcr; + lpcr = senc << (LPCR_VRMASD_SH - 4); + kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); rma_setup = 1; } ++i; diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 30c2f3b134c..2c25f5412bd 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -74,3 +74,4 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, /* Didn't find the liobn, punt it to userspace */ return H_TOO_HARD; } +EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 360ce68c980..99d40f8977e 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -86,8 +86,8 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) return true; } -int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int inst, int *advance) +int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int rt = get_rt(inst); @@ -172,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->arch.mmu.tlbie(vcpu, addr, large); break; } -#ifdef CONFIG_KVM_BOOK3S_64_PR +#ifdef CONFIG_PPC_BOOK3S_64 case OP_31_XOP_FAKE_SC1: { /* SC 1 papr hypercalls */ @@ -267,12 +267,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, r = kvmppc_st(vcpu, &addr, 32, zeros, true); if ((r == -ENOENT) || (r == -EPERM)) { - struct kvmppc_book3s_shadow_vcpu *svcpu; - - svcpu = svcpu_get(vcpu); *advance = 0; vcpu->arch.shared->dar = vaddr; - svcpu->fault_dar = vaddr; + vcpu->arch.fault_dar = vaddr; dsisr = DSISR_ISSTORE; if (r == -ENOENT) @@ -281,8 +278,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, dsisr |= DSISR_PROTFAULT; vcpu->arch.shared->dsisr = dsisr; - svcpu->fault_dsisr = dsisr; - svcpu_put(svcpu); + vcpu->arch.fault_dsisr = dsisr; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); @@ -349,7 +345,7 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) return bat; } -int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) +int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) { int emulated = EMULATE_DONE; @@ -472,7 +468,7 @@ unprivileged: return emulated; } -int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) +int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) { int emulated = EMULATE_DONE; diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c index 7057a02f090..852989a9bad 100644 --- a/arch/powerpc/kvm/book3s_exports.c +++ b/arch/powerpc/kvm/book3s_exports.c @@ -20,9 +20,10 @@ #include <linux/export.h> #include <asm/kvm_book3s.h> -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); -#else +#endif +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); #ifdef CONFIG_ALTIVEC diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 62a2b5ab08e..072287f1c3b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -52,6 +52,9 @@ #include <linux/vmalloc.h> #include <linux/highmem.h> #include <linux/hugetlb.h> +#include <linux/module.h> + +#include "book3s.h" /* #define EXIT_DEBUG */ /* #define EXIT_DEBUG_SIMPLE */ @@ -66,7 +69,7 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); -void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) +static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) { int me; int cpu = vcpu->cpu; @@ -125,7 +128,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) * purely defensive; they should never fail.) */ -void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; @@ -143,7 +146,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) spin_unlock(&vcpu->arch.tbacct_lock); } -void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; @@ -155,17 +158,46 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) spin_unlock(&vcpu->arch.tbacct_lock); } -void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) +static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) { vcpu->arch.shregs.msr = msr; kvmppc_end_cede(vcpu); } -void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) +void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) { vcpu->arch.pvr = pvr; } +int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) +{ + unsigned long pcr = 0; + struct kvmppc_vcore *vc = vcpu->arch.vcore; + + if (arch_compat) { + if (!cpu_has_feature(CPU_FTR_ARCH_206)) + return -EINVAL; /* 970 has no compat mode support */ + + switch (arch_compat) { + case PVR_ARCH_205: + pcr = PCR_ARCH_205; + break; + case PVR_ARCH_206: + case PVR_ARCH_206p: + break; + default: + return -EINVAL; + } + } + + spin_lock(&vc->lock); + vc->arch_compat = arch_compat; + vc->pcr = pcr; + spin_unlock(&vc->lock); + + return 0; +} + void kvmppc_dump_regs(struct kvm_vcpu *vcpu) { int r; @@ -195,7 +227,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu) pr_err(" ESID = %.16llx VSID = %.16llx\n", vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", - vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1, + vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, vcpu->arch.last_inst); } @@ -489,7 +521,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, memset(dt, 0, sizeof(struct dtl_entry)); dt->dispatch_reason = 7; dt->processor_id = vc->pcpu + vcpu->arch.ptid; - dt->timebase = now; + dt->timebase = now + vc->tb_offset; dt->enqueue_to_dispatch_time = stolen; dt->srr0 = kvmppc_get_pc(vcpu); dt->srr1 = vcpu->arch.shregs.msr; @@ -538,6 +570,15 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) } break; case H_CONFER: + target = kvmppc_get_gpr(vcpu, 4); + if (target == -1) + break; + tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); + if (!tvcpu) { + ret = H_PARAMETER; + break; + } + kvm_vcpu_yield_to(tvcpu); break; case H_REGISTER_VPA: ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), @@ -576,8 +617,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, - struct task_struct *tsk) +static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, + struct task_struct *tsk) { int r = RESUME_HOST; @@ -671,16 +712,16 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", vcpu->arch.trap, kvmppc_get_pc(vcpu), vcpu->arch.shregs.msr); + run->hw.hardware_exit_reason = vcpu->arch.trap; r = RESUME_HOST; - BUG(); break; } return r; } -int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) +static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { int i; @@ -694,12 +735,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, return 0; } -int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) +static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { int i, j; - kvmppc_set_pvr(vcpu, sregs->pvr); + kvmppc_set_pvr_hv(vcpu, sregs->pvr); j = 0; for (i = 0; i < vcpu->arch.slb_nr; i++) { @@ -714,7 +755,23 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return 0; } -int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) +static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) +{ + struct kvmppc_vcore *vc = vcpu->arch.vcore; + u64 mask; + + spin_lock(&vc->lock); + /* + * Userspace can only modify DPFD (default prefetch depth), + * ILE (interrupt little-endian) and TC (translation control). + */ + mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; + vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); + spin_unlock(&vc->lock); +} + +static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { int r = 0; long int i; @@ -749,6 +806,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) i = id - KVM_REG_PPC_PMC1; *val = get_reg_val(id, vcpu->arch.pmc[i]); break; + case KVM_REG_PPC_SIAR: + *val = get_reg_val(id, vcpu->arch.siar); + break; + case KVM_REG_PPC_SDAR: + *val = get_reg_val(id, vcpu->arch.sdar); + break; #ifdef CONFIG_VSX case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: if (cpu_has_feature(CPU_FTR_VSX)) { @@ -787,6 +850,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) val->vpaval.length = vcpu->arch.dtl.len; spin_unlock(&vcpu->arch.vpa_update_lock); break; + case KVM_REG_PPC_TB_OFFSET: + *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); + break; + case KVM_REG_PPC_LPCR: + *val = get_reg_val(id, vcpu->arch.vcore->lpcr); + break; + case KVM_REG_PPC_PPR: + *val = get_reg_val(id, vcpu->arch.ppr); + break; + case KVM_REG_PPC_ARCH_COMPAT: + *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); + break; default: r = -EINVAL; break; @@ -795,7 +870,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) return r; } -int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) +static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { int r = 0; long int i; @@ -833,6 +909,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) i = id - KVM_REG_PPC_PMC1; vcpu->arch.pmc[i] = set_reg_val(id, *val); break; + case KVM_REG_PPC_SIAR: + vcpu->arch.siar = set_reg_val(id, *val); + break; + case KVM_REG_PPC_SDAR: + vcpu->arch.sdar = set_reg_val(id, *val); + break; #ifdef CONFIG_VSX case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: if (cpu_has_feature(CPU_FTR_VSX)) { @@ -880,6 +962,20 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) len -= len % sizeof(struct dtl_entry); r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); break; + case KVM_REG_PPC_TB_OFFSET: + /* round up to multiple of 2^24 */ + vcpu->arch.vcore->tb_offset = + ALIGN(set_reg_val(id, *val), 1UL << 24); + break; + case KVM_REG_PPC_LPCR: + kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); + break; + case KVM_REG_PPC_PPR: + vcpu->arch.ppr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_ARCH_COMPAT: + r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); + break; default: r = -EINVAL; break; @@ -888,14 +984,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) return r; } -int kvmppc_core_check_processor_compat(void) -{ - if (cpu_has_feature(CPU_FTR_HVMODE)) - return 0; - return -EIO; -} - -struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, + unsigned int id) { struct kvm_vcpu *vcpu; int err = -EINVAL; @@ -919,8 +1009,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) vcpu->arch.mmcr[0] = MMCR0_FC; vcpu->arch.ctrl = CTRL_RUNLATCH; /* default to host PVR, since we can't spoof it */ - vcpu->arch.pvr = mfspr(SPRN_PVR); - kvmppc_set_pvr(vcpu, vcpu->arch.pvr); + kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); spin_lock_init(&vcpu->arch.vpa_update_lock); spin_lock_init(&vcpu->arch.tbacct_lock); vcpu->arch.busy_preempt = TB_NIL; @@ -940,6 +1029,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) spin_lock_init(&vcore->lock); init_waitqueue_head(&vcore->wq); vcore->preempt_tb = TB_NIL; + vcore->lpcr = kvm->arch.lpcr; } kvm->arch.vcores[core] = vcore; kvm->arch.online_vcores++; @@ -972,7 +1062,7 @@ static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) vpa->dirty); } -void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) { spin_lock(&vcpu->arch.vpa_update_lock); unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); @@ -983,6 +1073,12 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) kmem_cache_free(kvm_vcpu_cache, vcpu); } +static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) +{ + /* Indicate we want to get back into the guest */ + return 1; +} + static void kvmppc_set_timer(struct kvm_vcpu *vcpu) { unsigned long dec_nsec, now; @@ -1264,8 +1360,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) ret = RESUME_GUEST; if (vcpu->arch.trap) - ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu, - vcpu->arch.run_task); + ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, + vcpu->arch.run_task); vcpu->arch.ret = ret; vcpu->arch.trap = 0; @@ -1424,7 +1520,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return vcpu->arch.ret; } -int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) +static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; int srcu_idx; @@ -1546,7 +1642,8 @@ static const struct file_operations kvm_rma_fops = { .release = kvm_rma_release, }; -long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) +static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, + struct kvm_allocate_rma *ret) { long fd; struct kvm_rma_info *ri; @@ -1592,7 +1689,8 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, (*sps)++; } -int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) +static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, + struct kvm_ppc_smmu_info *info) { struct kvm_ppc_one_seg_page_size *sps; @@ -1613,7 +1711,8 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) /* * Get (and clear) the dirty memory log for a memory slot. */ -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) +static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, + struct kvm_dirty_log *log) { struct kvm_memory_slot *memslot; int r; @@ -1667,8 +1766,8 @@ static void unpin_slot(struct kvm_memory_slot *memslot) } } -void kvmppc_core_free_memslot(struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) +static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) { if (!dont || free->arch.rmap != dont->arch.rmap) { vfree(free->arch.rmap); @@ -1681,8 +1780,8 @@ void kvmppc_core_free_memslot(struct kvm_memory_slot *free, } } -int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, - unsigned long npages) +static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, + unsigned long npages) { slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); if (!slot->arch.rmap) @@ -1692,9 +1791,9 @@ int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, return 0; } -int kvmppc_core_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_userspace_memory_region *mem) +static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem) { unsigned long *phys; @@ -1710,9 +1809,9 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm, return 0; } -void kvmppc_core_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old) +static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old) { unsigned long npages = mem->memory_size >> PAGE_SHIFT; struct kvm_memory_slot *memslot; @@ -1729,6 +1828,37 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, } } +/* + * Update LPCR values in kvm->arch and in vcores. + * Caller must hold kvm->lock. + */ +void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) +{ + long int i; + u32 cores_done = 0; + + if ((kvm->arch.lpcr & mask) == lpcr) + return; + + kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; + + for (i = 0; i < KVM_MAX_VCORES; ++i) { + struct kvmppc_vcore *vc = kvm->arch.vcores[i]; + if (!vc) + continue; + spin_lock(&vc->lock); + vc->lpcr = (vc->lpcr & ~mask) | lpcr; + spin_unlock(&vc->lock); + if (++cores_done >= kvm->arch.online_vcores) + break; + } +} + +static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) +{ + return; +} + static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) { int err = 0; @@ -1737,7 +1867,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) unsigned long hva; struct kvm_memory_slot *memslot; struct vm_area_struct *vma; - unsigned long lpcr, senc; + unsigned long lpcr = 0, senc; + unsigned long lpcr_mask = 0; unsigned long psize, porder; unsigned long rma_size; unsigned long rmls; @@ -1802,9 +1933,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) senc = slb_pgsize_encoding(psize); kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | (VRMA_VSID << SLB_VSID_SHIFT_1T); - lpcr = kvm->arch.lpcr & ~LPCR_VRMASD; - lpcr |= senc << (LPCR_VRMASD_SH - 4); - kvm->arch.lpcr = lpcr; + lpcr_mask = LPCR_VRMASD; + /* the -4 is to account for senc values starting at 0x10 */ + lpcr = senc << (LPCR_VRMASD_SH - 4); /* Create HPTEs in the hash page table for the VRMA */ kvmppc_map_vrma(vcpu, memslot, porder); @@ -1825,23 +1956,21 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) kvm->arch.rma = ri; /* Update LPCR and RMOR */ - lpcr = kvm->arch.lpcr; if (cpu_has_feature(CPU_FTR_ARCH_201)) { /* PPC970; insert RMLS value (split field) in HID4 */ - lpcr &= ~((1ul << HID4_RMLS0_SH) | - (3ul << HID4_RMLS2_SH)); - lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) | + lpcr_mask = (1ul << HID4_RMLS0_SH) | + (3ul << HID4_RMLS2_SH) | HID4_RMOR; + lpcr = ((rmls >> 2) << HID4_RMLS0_SH) | ((rmls & 3) << HID4_RMLS2_SH); /* RMOR is also in HID4 */ lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) << HID4_RMOR_SH; } else { /* POWER7 */ - lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); - lpcr |= rmls << LPCR_RMLS_SH; + lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS; + lpcr = rmls << LPCR_RMLS_SH; kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT; } - kvm->arch.lpcr = lpcr; pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); @@ -1860,6 +1989,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) } } + kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); + /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ smp_wmb(); kvm->arch.rma_setup_done = 1; @@ -1875,7 +2006,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) goto out_srcu; } -int kvmppc_core_init_vm(struct kvm *kvm) +static int kvmppc_core_init_vm_hv(struct kvm *kvm) { unsigned long lpcr, lpid; @@ -1893,9 +2024,6 @@ int kvmppc_core_init_vm(struct kvm *kvm) */ cpumask_setall(&kvm->arch.need_tlb_flush); - INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); - INIT_LIST_HEAD(&kvm->arch.rtas_tokens); - kvm->arch.rma = NULL; kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); @@ -1931,61 +2059,162 @@ int kvmppc_core_init_vm(struct kvm *kvm) return 0; } -void kvmppc_core_destroy_vm(struct kvm *kvm) +static void kvmppc_free_vcores(struct kvm *kvm) +{ + long int i; + + for (i = 0; i < KVM_MAX_VCORES; ++i) + kfree(kvm->arch.vcores[i]); + kvm->arch.online_vcores = 0; +} + +static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) { uninhibit_secondary_onlining(); + kvmppc_free_vcores(kvm); if (kvm->arch.rma) { kvm_release_rma(kvm->arch.rma); kvm->arch.rma = NULL; } - kvmppc_rtas_tokens_free(kvm); - kvmppc_free_hpt(kvm); - WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); } -/* These are stubs for now */ -void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) +/* We don't need to emulate any privileged instructions or dcbz */ +static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance) { + return EMULATE_FAIL; } -/* We don't need to emulate any privileged instructions or dcbz */ -int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int inst, int *advance) +static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, + ulong spr_val) { return EMULATE_FAIL; } -int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) +static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, + ulong *spr_val) { return EMULATE_FAIL; } -int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) +static int kvmppc_core_check_processor_compat_hv(void) { - return EMULATE_FAIL; + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return -EIO; + return 0; } -static int kvmppc_book3s_hv_init(void) +static long kvm_arch_vm_ioctl_hv(struct file *filp, + unsigned int ioctl, unsigned long arg) { - int r; + struct kvm *kvm __maybe_unused = filp->private_data; + void __user *argp = (void __user *)arg; + long r; - r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); + switch (ioctl) { - if (r) + case KVM_ALLOCATE_RMA: { + struct kvm_allocate_rma rma; + struct kvm *kvm = filp->private_data; + + r = kvm_vm_ioctl_allocate_rma(kvm, &rma); + if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) + r = -EFAULT; + break; + } + + case KVM_PPC_ALLOCATE_HTAB: { + u32 htab_order; + + r = -EFAULT; + if (get_user(htab_order, (u32 __user *)argp)) + break; + r = kvmppc_alloc_reset_hpt(kvm, &htab_order); + if (r) + break; + r = -EFAULT; + if (put_user(htab_order, (u32 __user *)argp)) + break; + r = 0; + break; + } + + case KVM_PPC_GET_HTAB_FD: { + struct kvm_get_htab_fd ghf; + + r = -EFAULT; + if (copy_from_user(&ghf, argp, sizeof(ghf))) + break; + r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); + break; + } + + default: + r = -ENOTTY; + } + + return r; +} + +static struct kvmppc_ops kvm_ops_hv = { + .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, + .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, + .get_one_reg = kvmppc_get_one_reg_hv, + .set_one_reg = kvmppc_set_one_reg_hv, + .vcpu_load = kvmppc_core_vcpu_load_hv, + .vcpu_put = kvmppc_core_vcpu_put_hv, + .set_msr = kvmppc_set_msr_hv, + .vcpu_run = kvmppc_vcpu_run_hv, + .vcpu_create = kvmppc_core_vcpu_create_hv, + .vcpu_free = kvmppc_core_vcpu_free_hv, + .check_requests = kvmppc_core_check_requests_hv, + .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv, + .flush_memslot = kvmppc_core_flush_memslot_hv, + .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, + .commit_memory_region = kvmppc_core_commit_memory_region_hv, + .unmap_hva = kvm_unmap_hva_hv, + .unmap_hva_range = kvm_unmap_hva_range_hv, + .age_hva = kvm_age_hva_hv, + .test_age_hva = kvm_test_age_hva_hv, + .set_spte_hva = kvm_set_spte_hva_hv, + .mmu_destroy = kvmppc_mmu_destroy_hv, + .free_memslot = kvmppc_core_free_memslot_hv, + .create_memslot = kvmppc_core_create_memslot_hv, + .init_vm = kvmppc_core_init_vm_hv, + .destroy_vm = kvmppc_core_destroy_vm_hv, + .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, + .emulate_op = kvmppc_core_emulate_op_hv, + .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, + .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, + .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, + .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, +}; + +static int kvmppc_book3s_init_hv(void) +{ + int r; + /* + * FIXME!! Do we need to check on all cpus ? + */ + r = kvmppc_core_check_processor_compat_hv(); + if (r < 0) return r; - r = kvmppc_mmu_hv_init(); + kvm_ops_hv.owner = THIS_MODULE; + kvmppc_hv_ops = &kvm_ops_hv; + r = kvmppc_mmu_hv_init(); return r; } -static void kvmppc_book3s_hv_exit(void) +static void kvmppc_book3s_exit_hv(void) { - kvm_exit(); + kvmppc_hv_ops = NULL; } -module_init(kvmppc_book3s_hv_init); -module_exit(kvmppc_book3s_hv_exit); +module_init(kvmppc_book3s_init_hv); +module_exit(kvmppc_book3s_exit_hv); +MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 37f1cc417ca..928142c64cb 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -158,9 +158,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) * Interrupts are enabled again at this point. */ -.global kvmppc_handler_highmem -kvmppc_handler_highmem: - /* * Register usage at this point: * diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index c71103b8a74..bc8de75b192 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -33,30 +33,6 @@ #error Need to fix lppaca and SLB shadow accesses in little endian mode #endif -/***************************************************************************** - * * - * Real Mode handlers that need to be in the linear mapping * - * * - ****************************************************************************/ - - .globl kvmppc_skip_interrupt -kvmppc_skip_interrupt: - mfspr r13,SPRN_SRR0 - addi r13,r13,4 - mtspr SPRN_SRR0,r13 - GET_SCRATCH0(r13) - rfid - b . - - .globl kvmppc_skip_Hinterrupt -kvmppc_skip_Hinterrupt: - mfspr r13,SPRN_HSRR0 - addi r13,r13,4 - mtspr SPRN_HSRR0,r13 - GET_SCRATCH0(r13) - hrfid - b . - /* * Call kvmppc_hv_entry in real mode. * Must be called with interrupts hard-disabled. @@ -66,8 +42,11 @@ kvmppc_skip_Hinterrupt: * LR = return address to continue at after eventually re-enabling MMU */ _GLOBAL(kvmppc_hv_entry_trampoline) + mflr r0 + std r0, PPC_LR_STKOFF(r1) + stdu r1, -112(r1) mfmsr r10 - LOAD_REG_ADDR(r5, kvmppc_hv_entry) + LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) li r0,MSR_RI andc r0,r10,r0 li r6,MSR_IR | MSR_DR @@ -77,11 +56,103 @@ _GLOBAL(kvmppc_hv_entry_trampoline) mtsrr1 r6 RFI -/****************************************************************************** - * * - * Entry code * - * * - *****************************************************************************/ +kvmppc_call_hv_entry: + bl kvmppc_hv_entry + + /* Back from guest - restore host state and return to caller */ + + /* Restore host DABR and DABRX */ + ld r5,HSTATE_DABR(r13) + li r6,7 + mtspr SPRN_DABR,r5 + mtspr SPRN_DABRX,r6 + + /* Restore SPRG3 */ + ld r3,PACA_SPRG3(r13) + mtspr SPRN_SPRG3,r3 + + /* + * Reload DEC. HDEC interrupts were disabled when + * we reloaded the host's LPCR value. + */ + ld r3, HSTATE_DECEXP(r13) + mftb r4 + subf r4, r4, r3 + mtspr SPRN_DEC, r4 + + /* Reload the host's PMU registers */ + ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ + lbz r4, LPPACA_PMCINUSE(r3) + cmpwi r4, 0 + beq 23f /* skip if not */ + lwz r3, HSTATE_PMC(r13) + lwz r4, HSTATE_PMC + 4(r13) + lwz r5, HSTATE_PMC + 8(r13) + lwz r6, HSTATE_PMC + 12(r13) + lwz r8, HSTATE_PMC + 16(r13) + lwz r9, HSTATE_PMC + 20(r13) +BEGIN_FTR_SECTION + lwz r10, HSTATE_PMC + 24(r13) + lwz r11, HSTATE_PMC + 28(r13) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) + mtspr SPRN_PMC1, r3 + mtspr SPRN_PMC2, r4 + mtspr SPRN_PMC3, r5 + mtspr SPRN_PMC4, r6 + mtspr SPRN_PMC5, r8 + mtspr SPRN_PMC6, r9 +BEGIN_FTR_SECTION + mtspr SPRN_PMC7, r10 + mtspr SPRN_PMC8, r11 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) + ld r3, HSTATE_MMCR(r13) + ld r4, HSTATE_MMCR + 8(r13) + ld r5, HSTATE_MMCR + 16(r13) + mtspr SPRN_MMCR1, r4 + mtspr SPRN_MMCRA, r5 + mtspr SPRN_MMCR0, r3 + isync +23: + + /* + * For external and machine check interrupts, we need + * to call the Linux handler to process the interrupt. + * We do that by jumping to absolute address 0x500 for + * external interrupts, or the machine_check_fwnmi label + * for machine checks (since firmware might have patched + * the vector area at 0x200). The [h]rfid at the end of the + * handler will return to the book3s_hv_interrupts.S code. + * For other interrupts we do the rfid to get back + * to the book3s_hv_interrupts.S code here. + */ + ld r8, 112+PPC_LR_STKOFF(r1) + addi r1, r1, 112 + ld r7, HSTATE_HOST_MSR(r13) + + cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL +BEGIN_FTR_SECTION + beq 11f +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) + + /* RFI into the highmem handler, or branch to interrupt handler */ + mfmsr r6 + li r0, MSR_RI + andc r6, r6, r0 + mtmsrd r6, 1 /* Clear RI in MSR */ + mtsrr0 r8 + mtsrr1 r7 + beqa 0x500 /* external interrupt (PPC970) */ + beq cr1, 13f /* machine check */ + RFI + + /* On POWER7, we have external interrupts set to use HSRR0/1 */ +11: mtspr SPRN_HSRR0, r8 + mtspr SPRN_HSRR1, r7 + ba 0x500 + +13: b machine_check_fwnmi + /* * We come in here when wakened from nap mode on a secondary hw thread. @@ -137,7 +208,7 @@ kvm_start_guest: cmpdi r4,0 /* if we have no vcpu to run, go back to sleep */ beq kvm_no_guest - b kvmppc_hv_entry + b 30f 27: /* XXX should handle hypervisor maintenance interrupts etc. here */ b kvm_no_guest @@ -147,6 +218,57 @@ kvm_start_guest: stw r8,HSTATE_SAVED_XIRR(r13) b kvm_no_guest +30: bl kvmppc_hv_entry + + /* Back from the guest, go back to nap */ + /* Clear our vcpu pointer so we don't come back in early */ + li r0, 0 + std r0, HSTATE_KVM_VCPU(r13) + lwsync + /* Clear any pending IPI - we're an offline thread */ + ld r5, HSTATE_XICS_PHYS(r13) + li r7, XICS_XIRR + lwzcix r3, r5, r7 /* ack any pending interrupt */ + rlwinm. r0, r3, 0, 0xffffff /* any pending? */ + beq 37f + sync + li r0, 0xff + li r6, XICS_MFRR + stbcix r0, r5, r6 /* clear the IPI */ + stwcix r3, r5, r7 /* EOI it */ +37: sync + + /* increment the nap count and then go to nap mode */ + ld r4, HSTATE_KVM_VCORE(r13) + addi r4, r4, VCORE_NAP_COUNT + lwsync /* make previous updates visible */ +51: lwarx r3, 0, r4 + addi r3, r3, 1 + stwcx. r3, 0, r4 + bne 51b + +kvm_no_guest: + li r0, KVM_HWTHREAD_IN_NAP + stb r0, HSTATE_HWTHREAD_STATE(r13) + li r3, LPCR_PECE0 + mfspr r4, SPRN_LPCR + rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 + mtspr SPRN_LPCR, r4 + isync + std r0, HSTATE_SCRATCH0(r13) + ptesync + ld r0, HSTATE_SCRATCH0(r13) +1: cmpd r0, r0 + bne 1b + nap + b . + +/****************************************************************************** + * * + * Entry code * + * * + *****************************************************************************/ + .global kvmppc_hv_entry kvmppc_hv_entry: @@ -159,7 +281,8 @@ kvmppc_hv_entry: * all other volatile GPRS = free */ mflr r0 - std r0, HSTATE_VMHANDLER(r13) + std r0, PPC_LR_STKOFF(r1) + stdu r1, -112(r1) /* Set partition DABR */ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ @@ -200,8 +323,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) ld r3, VCPU_MMCR(r4) ld r5, VCPU_MMCR + 8(r4) ld r6, VCPU_MMCR + 16(r4) + ld r7, VCPU_SIAR(r4) + ld r8, VCPU_SDAR(r4) mtspr SPRN_MMCR1, r5 mtspr SPRN_MMCRA, r6 + mtspr SPRN_SIAR, r7 + mtspr SPRN_SDAR, r8 mtspr SPRN_MMCR0, r3 isync @@ -254,22 +381,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) /* Save R1 in the PACA */ std r1, HSTATE_HOST_R1(r13) - /* Increment yield count if they have a VPA */ - ld r3, VCPU_VPA(r4) - cmpdi r3, 0 - beq 25f - lwz r5, LPPACA_YIELDCOUNT(r3) - addi r5, r5, 1 - stw r5, LPPACA_YIELDCOUNT(r3) - li r6, 1 - stb r6, VCPU_VPA_DIRTY(r4) -25: /* Load up DAR and DSISR */ ld r5, VCPU_DAR(r4) lwz r6, VCPU_DSISR(r4) mtspr SPRN_DAR, r5 mtspr SPRN_DSISR, r6 + li r6, KVM_GUEST_MODE_HOST_HV + stb r6, HSTATE_IN_GUEST(r13) + BEGIN_FTR_SECTION /* Restore AMR and UAMOR, set AMOR to all 1s */ ld r5,VCPU_AMR(r4) @@ -343,7 +463,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) bdnz 28b ptesync -22: li r0,1 + /* Add timebase offset onto timebase */ +22: ld r8,VCORE_TB_OFFSET(r5) + cmpdi r8,0 + beq 37f + mftb r6 /* current host timebase */ + add r8,r8,r6 + mtspr SPRN_TBU40,r8 /* update upper 40 bits */ + mftb r7 /* check if lower 24 bits overflowed */ + clrldi r6,r6,40 + clrldi r7,r7,40 + cmpld r7,r6 + bge 37f + addis r8,r8,0x100 /* if so, increment upper 40 bits */ + mtspr SPRN_TBU40,r8 + + /* Load guest PCR value to select appropriate compat mode */ +37: ld r7, VCORE_PCR(r5) + cmpdi r7, 0 + beq 38f + mtspr SPRN_PCR, r7 +38: + li r0,1 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ b 10f @@ -353,12 +494,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) beq 20b /* Set LPCR and RMOR. */ -10: ld r8,KVM_LPCR(r9) +10: ld r8,VCORE_LPCR(r5) mtspr SPRN_LPCR,r8 ld r8,KVM_RMOR(r9) mtspr SPRN_RMOR,r8 isync + /* Increment yield count if they have a VPA */ + ld r3, VCPU_VPA(r4) + cmpdi r3, 0 + beq 25f + lwz r5, LPPACA_YIELDCOUNT(r3) + addi r5, r5, 1 + stw r5, LPPACA_YIELDCOUNT(r3) + li r6, 1 + stb r6, VCPU_VPA_DIRTY(r4) +25: /* Check if HDEC expires soon */ mfspr r3,SPRN_HDEC cmpwi r3,10 @@ -405,7 +556,8 @@ toc_tlbie_lock: bne 24b isync - ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */ + ld r5,HSTATE_KVM_VCORE(r13) + ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */ li r0,0x18f rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ or r0,r7,r0 @@ -541,7 +693,7 @@ fast_guest_return: mtspr SPRN_HSRR1,r11 /* Activate guest mode, so faults get handled by KVM */ - li r9, KVM_GUEST_MODE_GUEST + li r9, KVM_GUEST_MODE_GUEST_HV stb r9, HSTATE_IN_GUEST(r13) /* Enter guest */ @@ -550,13 +702,15 @@ BEGIN_FTR_SECTION ld r5, VCPU_CFAR(r4) mtspr SPRN_CFAR, r5 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) +BEGIN_FTR_SECTION + ld r0, VCPU_PPR(r4) +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r5, VCPU_LR(r4) lwz r6, VCPU_CR(r4) mtlr r5 mtcr r6 - ld r0, VCPU_GPR(R0)(r4) ld r1, VCPU_GPR(R1)(r4) ld r2, VCPU_GPR(R2)(r4) ld r3, VCPU_GPR(R3)(r4) @@ -570,6 +724,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r12, VCPU_GPR(R12)(r4) ld r13, VCPU_GPR(R13)(r4) +BEGIN_FTR_SECTION + mtspr SPRN_PPR, r0 +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) + ld r0, VCPU_GPR(R0)(r4) ld r4, VCPU_GPR(R4)(r4) hrfid @@ -584,8 +742,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) /* * We come here from the first-level interrupt handlers. */ - .globl kvmppc_interrupt -kvmppc_interrupt: + .globl kvmppc_interrupt_hv +kvmppc_interrupt_hv: /* * Register contents: * R12 = interrupt vector @@ -595,6 +753,19 @@ kvmppc_interrupt: */ /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ std r9, HSTATE_HOST_R2(r13) + + lbz r9, HSTATE_IN_GUEST(r13) + cmpwi r9, KVM_GUEST_MODE_HOST_HV + beq kvmppc_bad_host_intr +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE + cmpwi r9, KVM_GUEST_MODE_GUEST + ld r9, HSTATE_HOST_R2(r13) + beq kvmppc_interrupt_pr +#endif + /* We're now back in the host but in guest MMU context */ + li r9, KVM_GUEST_MODE_HOST_HV + stb r9, HSTATE_IN_GUEST(r13) + ld r9, HSTATE_KVM_VCPU(r13) /* Save registers */ @@ -620,6 +791,10 @@ BEGIN_FTR_SECTION ld r3, HSTATE_CFAR(r13) std r3, VCPU_CFAR(r9) END_FTR_SECTION_IFSET(CPU_FTR_CFAR) +BEGIN_FTR_SECTION + ld r4, HSTATE_PPR(r13) + std r4, VCPU_PPR(r9) +END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Restore R1/R2 so we can handle faults */ ld r1, HSTATE_HOST_R1(r13) @@ -642,10 +817,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) std r3, VCPU_GPR(R13)(r9) std r4, VCPU_LR(r9) - /* Unset guest mode */ - li r0, KVM_GUEST_MODE_NONE - stb r0, HSTATE_IN_GUEST(r13) - stw r12,VCPU_TRAP(r9) /* Save HEIR (HV emulation assist reg) in last_inst @@ -696,46 +867,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) * set, we know the host wants us out so let's do it now */ do_ext_interrupt: - lbz r0, HSTATE_HOST_IPI(r13) - cmpwi r0, 0 - bne ext_interrupt_to_host - - /* Now read the interrupt from the ICP */ - ld r5, HSTATE_XICS_PHYS(r13) - li r7, XICS_XIRR - cmpdi r5, 0 - beq- ext_interrupt_to_host - lwzcix r3, r5, r7 - rlwinm. r0, r3, 0, 0xffffff - sync - beq 3f /* if nothing pending in the ICP */ - - /* We found something in the ICP... - * - * If it's not an IPI, stash it in the PACA and return to - * the host, we don't (yet) handle directing real external - * interrupts directly to the guest - */ - cmpwi r0, XICS_IPI - bne ext_stash_for_host - - /* It's an IPI, clear the MFRR and EOI it */ - li r0, 0xff - li r6, XICS_MFRR - stbcix r0, r5, r6 /* clear the IPI */ - stwcix r3, r5, r7 /* EOI it */ - sync - - /* We need to re-check host IPI now in case it got set in the - * meantime. If it's clear, we bounce the interrupt to the - * guest - */ - lbz r0, HSTATE_HOST_IPI(r13) - cmpwi r0, 0 - bne- 1f + bl kvmppc_read_intr + cmpdi r3, 0 + bgt ext_interrupt_to_host /* Allright, looks like an IPI for the guest, we need to set MER */ -3: /* Check if any CPU is heading out to the host, if so head out too */ ld r5, HSTATE_KVM_VCORE(r13) lwz r0, VCORE_ENTRY_EXIT(r5) @@ -764,27 +900,9 @@ do_ext_interrupt: mtspr SPRN_LPCR, r8 b fast_guest_return - /* We raced with the host, we need to resend that IPI, bummer */ -1: li r0, IPI_PRIORITY - stbcix r0, r5, r6 /* set the IPI */ - sync - b ext_interrupt_to_host - -ext_stash_for_host: - /* It's not an IPI and it's for the host, stash it in the PACA - * before exit, it will be picked up by the host ICP driver - */ - stw r3, HSTATE_SAVED_XIRR(r13) ext_interrupt_to_host: guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ - /* Save DEC */ - mfspr r5,SPRN_DEC - mftb r6 - extsw r5,r5 - add r5,r5,r6 - std r5,VCPU_DEC_EXPIRES(r9) - /* Save more register state */ mfdar r6 mfdsisr r7 @@ -954,7 +1072,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) mtspr SPRN_SDR1,r6 /* switch to partition page table */ mtspr SPRN_LPID,r7 isync - li r0,0 + + /* Subtract timebase offset from timebase */ + ld r8,VCORE_TB_OFFSET(r5) + cmpdi r8,0 + beq 17f + mftb r6 /* current host timebase */ + subf r8,r8,r6 + mtspr SPRN_TBU40,r8 /* update upper 40 bits */ + mftb r7 /* check if lower 24 bits overflowed */ + clrldi r6,r6,40 + clrldi r7,r7,40 + cmpld r7,r6 + bge 17f + addis r8,r8,0x100 /* if so, increment upper 40 bits */ + mtspr SPRN_TBU40,r8 + + /* Reset PCR */ +17: ld r0, VCORE_PCR(r5) + cmpdi r0, 0 + beq 18f + li r0, 0 + mtspr SPRN_PCR, r0 +18: + /* Signal secondary CPUs to continue */ stb r0,VCORE_IN_GUEST(r5) lis r8,0x7fff /* MAX_INT@h */ mtspr SPRN_HDEC,r8 @@ -1052,6 +1193,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1: addi r8,r8,16 .endr + /* Save DEC */ + mfspr r5,SPRN_DEC + mftb r6 + extsw r5,r5 + add r5,r5,r6 + std r5,VCPU_DEC_EXPIRES(r9) + /* Save and reset AMR and UAMOR before turning on the MMU */ BEGIN_FTR_SECTION mfspr r5,SPRN_AMR @@ -1062,6 +1210,10 @@ BEGIN_FTR_SECTION mtspr SPRN_AMR,r6 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) + /* Unset guest mode */ + li r0, KVM_GUEST_MODE_NONE + stb r0, HSTATE_IN_GUEST(r13) + /* Switch DSCR back to host value */ BEGIN_FTR_SECTION mfspr r8, SPRN_DSCR @@ -1134,9 +1286,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ b 22f 21: mfspr r5, SPRN_MMCR1 + mfspr r7, SPRN_SIAR + mfspr r8, SPRN_SDAR std r4, VCPU_MMCR(r9) std r5, VCPU_MMCR + 8(r9) std r6, VCPU_MMCR + 16(r9) + std r7, VCPU_SIAR(r9) + std r8, VCPU_SDAR(r9) mfspr r3, SPRN_PMC1 mfspr r4, SPRN_PMC2 mfspr r5, SPRN_PMC3 @@ -1158,103 +1314,30 @@ BEGIN_FTR_SECTION stw r11, VCPU_PMC + 28(r9) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 22: + ld r0, 112+PPC_LR_STKOFF(r1) + addi r1, r1, 112 + mtlr r0 + blr +secondary_too_late: + ld r5,HSTATE_KVM_VCORE(r13) + HMT_LOW +13: lbz r3,VCORE_IN_GUEST(r5) + cmpwi r3,0 + bne 13b + HMT_MEDIUM + li r0, KVM_GUEST_MODE_NONE + stb r0, HSTATE_IN_GUEST(r13) + ld r11,PACA_SLBSHADOWPTR(r13) - /* Secondary threads go off to take a nap on POWER7 */ -BEGIN_FTR_SECTION - lwz r0,VCPU_PTID(r9) - cmpwi r0,0 - bne secondary_nap -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) - - /* Restore host DABR and DABRX */ - ld r5,HSTATE_DABR(r13) - li r6,7 - mtspr SPRN_DABR,r5 - mtspr SPRN_DABRX,r6 - - /* Restore SPRG3 */ - ld r3,PACA_SPRG3(r13) - mtspr SPRN_SPRG3,r3 - - /* - * Reload DEC. HDEC interrupts were disabled when - * we reloaded the host's LPCR value. - */ - ld r3, HSTATE_DECEXP(r13) - mftb r4 - subf r4, r4, r3 - mtspr SPRN_DEC, r4 - - /* Reload the host's PMU registers */ - ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ - lbz r4, LPPACA_PMCINUSE(r3) - cmpwi r4, 0 - beq 23f /* skip if not */ - lwz r3, HSTATE_PMC(r13) - lwz r4, HSTATE_PMC + 4(r13) - lwz r5, HSTATE_PMC + 8(r13) - lwz r6, HSTATE_PMC + 12(r13) - lwz r8, HSTATE_PMC + 16(r13) - lwz r9, HSTATE_PMC + 20(r13) -BEGIN_FTR_SECTION - lwz r10, HSTATE_PMC + 24(r13) - lwz r11, HSTATE_PMC + 28(r13) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) - mtspr SPRN_PMC1, r3 - mtspr SPRN_PMC2, r4 - mtspr SPRN_PMC3, r5 - mtspr SPRN_PMC4, r6 - mtspr SPRN_PMC5, r8 - mtspr SPRN_PMC6, r9 -BEGIN_FTR_SECTION - mtspr SPRN_PMC7, r10 - mtspr SPRN_PMC8, r11 -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) - ld r3, HSTATE_MMCR(r13) - ld r4, HSTATE_MMCR + 8(r13) - ld r5, HSTATE_MMCR + 16(r13) - mtspr SPRN_MMCR1, r4 - mtspr SPRN_MMCRA, r5 - mtspr SPRN_MMCR0, r3 - isync -23: - /* - * For external and machine check interrupts, we need - * to call the Linux handler to process the interrupt. - * We do that by jumping to absolute address 0x500 for - * external interrupts, or the machine_check_fwnmi label - * for machine checks (since firmware might have patched - * the vector area at 0x200). The [h]rfid at the end of the - * handler will return to the book3s_hv_interrupts.S code. - * For other interrupts we do the rfid to get back - * to the book3s_hv_interrupts.S code here. - */ - ld r8, HSTATE_VMHANDLER(r13) - ld r7, HSTATE_HOST_MSR(r13) - - cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK - cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL -BEGIN_FTR_SECTION - beq 11f -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) - - /* RFI into the highmem handler, or branch to interrupt handler */ - mfmsr r6 - li r0, MSR_RI - andc r6, r6, r0 - mtmsrd r6, 1 /* Clear RI in MSR */ - mtsrr0 r8 - mtsrr1 r7 - beqa 0x500 /* external interrupt (PPC970) */ - beq cr1, 13f /* machine check */ - RFI - - /* On POWER7, we have external interrupts set to use HSRR0/1 */ -11: mtspr SPRN_HSRR0, r8 - mtspr SPRN_HSRR1, r7 - ba 0x500 - -13: b machine_check_fwnmi + .rept SLB_NUM_BOLTED + ld r5,SLBSHADOW_SAVEAREA(r11) + ld r6,SLBSHADOW_SAVEAREA+8(r11) + andis. r7,r5,SLB_ESID_V@h + beq 1f + slbmte r6,r5 +1: addi r11,r11,16 + .endr + b 22b /* * Check whether an HDSI is an HPTE not found fault or something else. @@ -1333,7 +1416,7 @@ fast_interrupt_c_return: stw r8, VCPU_LAST_INST(r9) /* Unset guest mode. */ - li r0, KVM_GUEST_MODE_NONE + li r0, KVM_GUEST_MODE_HOST_HV stb r0, HSTATE_IN_GUEST(r13) b guest_exit_cont @@ -1701,67 +1784,70 @@ machine_check_realmode: rotldi r11, r11, 63 b fast_interrupt_c_return -secondary_too_late: - ld r5,HSTATE_KVM_VCORE(r13) - HMT_LOW -13: lbz r3,VCORE_IN_GUEST(r5) - cmpwi r3,0 - bne 13b - HMT_MEDIUM - ld r11,PACA_SLBSHADOWPTR(r13) - - .rept SLB_NUM_BOLTED - ld r5,SLBSHADOW_SAVEAREA(r11) - ld r6,SLBSHADOW_SAVEAREA+8(r11) - andis. r7,r5,SLB_ESID_V@h - beq 1f - slbmte r6,r5 -1: addi r11,r11,16 - .endr +/* + * Determine what sort of external interrupt is pending (if any). + * Returns: + * 0 if no interrupt is pending + * 1 if an interrupt is pending that needs to be handled by the host + * -1 if there was a guest wakeup IPI (which has now been cleared) + */ +kvmppc_read_intr: + /* see if a host IPI is pending */ + li r3, 1 + lbz r0, HSTATE_HOST_IPI(r13) + cmpwi r0, 0 + bne 1f -secondary_nap: - /* Clear our vcpu pointer so we don't come back in early */ - li r0, 0 - std r0, HSTATE_KVM_VCPU(r13) - lwsync - /* Clear any pending IPI - assume we're a secondary thread */ - ld r5, HSTATE_XICS_PHYS(r13) + /* Now read the interrupt from the ICP */ + ld r6, HSTATE_XICS_PHYS(r13) li r7, XICS_XIRR - lwzcix r3, r5, r7 /* ack any pending interrupt */ - rlwinm. r0, r3, 0, 0xffffff /* any pending? */ - beq 37f + cmpdi r6, 0 + beq- 1f + lwzcix r0, r6, r7 + rlwinm. r3, r0, 0, 0xffffff sync - li r0, 0xff - li r6, XICS_MFRR - stbcix r0, r5, r6 /* clear the IPI */ - stwcix r3, r5, r7 /* EOI it */ -37: sync + beq 1f /* if nothing pending in the ICP */ - /* increment the nap count and then go to nap mode */ - ld r4, HSTATE_KVM_VCORE(r13) - addi r4, r4, VCORE_NAP_COUNT - lwsync /* make previous updates visible */ -51: lwarx r3, 0, r4 - addi r3, r3, 1 - stwcx. r3, 0, r4 - bne 51b + /* We found something in the ICP... + * + * If it's not an IPI, stash it in the PACA and return to + * the host, we don't (yet) handle directing real external + * interrupts directly to the guest + */ + cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ + li r3, 1 + bne 42f -kvm_no_guest: - li r0, KVM_HWTHREAD_IN_NAP - stb r0, HSTATE_HWTHREAD_STATE(r13) + /* It's an IPI, clear the MFRR and EOI it */ + li r3, 0xff + li r8, XICS_MFRR + stbcix r3, r6, r8 /* clear the IPI */ + stwcix r0, r6, r7 /* EOI it */ + sync - li r3, LPCR_PECE0 - mfspr r4, SPRN_LPCR - rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 - mtspr SPRN_LPCR, r4 - isync - std r0, HSTATE_SCRATCH0(r13) - ptesync - ld r0, HSTATE_SCRATCH0(r13) -1: cmpd r0, r0 - bne 1b - nap - b . + /* We need to re-check host IPI now in case it got set in the + * meantime. If it's clear, we bounce the interrupt to the + * guest + */ + lbz r0, HSTATE_HOST_IPI(r13) + cmpwi r0, 0 + bne- 43f + + /* OK, it's an IPI for us */ + li r3, -1 +1: blr + +42: /* It's not an IPI and it's for the host, stash it in the PACA + * before exit, it will be picked up by the host ICP driver + */ + stw r0, HSTATE_SAVED_XIRR(r13) + b 1b + +43: /* We raced with the host, we need to resend that IPI, bummer */ + li r0, IPI_PRIORITY + stbcix r0, r6, r8 /* set the IPI */ + sync + b 1b /* * Save away FP, VMX and VSX registers. @@ -1879,3 +1965,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) lwz r7,VCPU_VRSAVE(r4) mtspr SPRN_VRSAVE,r7 blr + +/* + * We come here if we get any exception or interrupt while we are + * executing host real mode code while in guest MMU context. + * For now just spin, but we should do something better. + */ +kvmppc_bad_host_intr: + b . diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 17cfae5497a..f4dd041c14e 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -26,8 +26,12 @@ #if defined(CONFIG_PPC_BOOK3S_64) #define FUNC(name) GLUE(.,name) +#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU + #elif defined(CONFIG_PPC_BOOK3S_32) #define FUNC(name) name +#define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2) + #endif /* CONFIG_PPC_BOOK3S_XX */ #define VCPU_LOAD_NVGPRS(vcpu) \ @@ -87,8 +91,14 @@ kvm_start_entry: VCPU_LOAD_NVGPRS(r4) kvm_start_lightweight: + /* Copy registers into shadow vcpu so we can access them in real mode */ + GET_SHADOW_VCPU(r3) + bl FUNC(kvmppc_copy_to_svcpu) + nop + REST_GPR(4, r1) #ifdef CONFIG_PPC_BOOK3S_64 + /* Get the dcbz32 flag */ PPC_LL r3, VCPU_HFLAGS(r4) rldicl r3, r3, 0, 63 /* r3 &= 1 */ stb r3, HSTATE_RESTORE_HID5(r13) @@ -111,9 +121,6 @@ kvm_start_lightweight: * */ -.global kvmppc_handler_highmem -kvmppc_handler_highmem: - /* * Register usage at this point: * @@ -125,18 +132,31 @@ kvmppc_handler_highmem: * */ - /* R7 = vcpu */ - PPC_LL r7, GPR4(r1) + /* Transfer reg values from shadow vcpu back to vcpu struct */ + /* On 64-bit, interrupts are still off at this point */ + PPC_LL r3, GPR4(r1) /* vcpu pointer */ + GET_SHADOW_VCPU(r4) + bl FUNC(kvmppc_copy_from_svcpu) + nop #ifdef CONFIG_PPC_BOOK3S_64 + /* Re-enable interrupts */ + ld r3, HSTATE_HOST_MSR(r13) + ori r3, r3, MSR_EE + MTMSR_EERI(r3) + /* * Reload kernel SPRG3 value. * No need to save guest value as usermode can't modify SPRG3. */ ld r3, PACA_SPRG3(r13) mtspr SPRN_SPRG3, r3 + #endif /* CONFIG_PPC_BOOK3S_64 */ + /* R7 = vcpu */ + PPC_LL r7, GPR4(r1) + PPC_STL r14, VCPU_GPR(R14)(r7) PPC_STL r15, VCPU_GPR(R15)(r7) PPC_STL r16, VCPU_GPR(R16)(r7) @@ -161,7 +181,7 @@ kvmppc_handler_highmem: /* Restore r3 (kvm_run) and r4 (vcpu) */ REST_2GPRS(3, r1) - bl FUNC(kvmppc_handle_exit) + bl FUNC(kvmppc_handle_exit_pr) /* If RESUME_GUEST, get back in the loop */ cmpwi r3, RESUME_GUEST diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index da8b13c4b77..5a1ab1250a0 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c @@ -28,7 +28,7 @@ #include <asm/mmu_context.h> #include <asm/hw_irq.h> -#include "trace.h" +#include "trace_pr.h" #define PTE_SIZE 12 @@ -56,6 +56,14 @@ static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage) HPTEG_HASH_BITS_VPTE_LONG); } +#ifdef CONFIG_PPC_BOOK3S_64 +static inline u64 kvmppc_mmu_hash_vpte_64k(u64 vpage) +{ + return hash_64((vpage & 0xffffffff0ULL) >> 4, + HPTEG_HASH_BITS_VPTE_64K); +} +#endif + void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { u64 index; @@ -83,6 +91,15 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) hlist_add_head_rcu(&pte->list_vpte_long, &vcpu3s->hpte_hash_vpte_long[index]); +#ifdef CONFIG_PPC_BOOK3S_64 + /* Add to vPTE_64k list */ + index = kvmppc_mmu_hash_vpte_64k(pte->pte.vpage); + hlist_add_head_rcu(&pte->list_vpte_64k, + &vcpu3s->hpte_hash_vpte_64k[index]); +#endif + + vcpu3s->hpte_cache_count++; + spin_unlock(&vcpu3s->mmu_lock); } @@ -113,10 +130,13 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) hlist_del_init_rcu(&pte->list_pte_long); hlist_del_init_rcu(&pte->list_vpte); hlist_del_init_rcu(&pte->list_vpte_long); +#ifdef CONFIG_PPC_BOOK3S_64 + hlist_del_init_rcu(&pte->list_vpte_64k); +#endif + vcpu3s->hpte_cache_count--; spin_unlock(&vcpu3s->mmu_lock); - vcpu3s->hpte_cache_count--; call_rcu(&pte->rcu_head, free_pte_rcu); } @@ -219,6 +239,29 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) rcu_read_unlock(); } +#ifdef CONFIG_PPC_BOOK3S_64 +/* Flush with mask 0xffffffff0 */ +static void kvmppc_mmu_pte_vflush_64k(struct kvm_vcpu *vcpu, u64 guest_vp) +{ + struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); + struct hlist_head *list; + struct hpte_cache *pte; + u64 vp_mask = 0xffffffff0ULL; + + list = &vcpu3s->hpte_hash_vpte_64k[ + kvmppc_mmu_hash_vpte_64k(guest_vp)]; + + rcu_read_lock(); + + /* Check the list for matching entries and invalidate */ + hlist_for_each_entry_rcu(pte, list, list_vpte_64k) + if ((pte->pte.vpage & vp_mask) == guest_vp) + invalidate_pte(vcpu, pte); + + rcu_read_unlock(); +} +#endif + /* Flush with mask 0xffffff000 */ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) { @@ -249,6 +292,11 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) case 0xfffffffffULL: kvmppc_mmu_pte_vflush_short(vcpu, guest_vp); break; +#ifdef CONFIG_PPC_BOOK3S_64 + case 0xffffffff0ULL: + kvmppc_mmu_pte_vflush_64k(vcpu, guest_vp); + break; +#endif case 0xffffff000ULL: kvmppc_mmu_pte_vflush_long(vcpu, guest_vp); break; @@ -285,15 +333,19 @@ struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); struct hpte_cache *pte; - pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); - vcpu3s->hpte_cache_count++; - if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM) kvmppc_mmu_pte_flush_all(vcpu); + pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL); + return pte; } +void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte) +{ + kmem_cache_free(hpte_cache, pte); +} + void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu) { kvmppc_mmu_pte_flush(vcpu, 0, 0); @@ -320,6 +372,10 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) ARRAY_SIZE(vcpu3s->hpte_hash_vpte)); kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long, ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long)); +#ifdef CONFIG_PPC_BOOK3S_64 + kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_64k, + ARRAY_SIZE(vcpu3s->hpte_hash_vpte_64k)); +#endif spin_lock_init(&vcpu3s->mmu_lock); diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 27db1e66595..fe14ca3dd17 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -40,8 +40,12 @@ #include <linux/sched.h> #include <linux/vmalloc.h> #include <linux/highmem.h> +#include <linux/module.h> -#include "trace.h" +#include "book3s.h" + +#define CREATE_TRACE_POINTS +#include "trace_pr.h" /* #define EXIT_DEBUG */ /* #define DEBUG_EXT */ @@ -56,29 +60,25 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, #define HW_PAGE_SIZE PAGE_SIZE #endif -void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); - memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, - sizeof(get_paca()->shadow_vcpu)); svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; svcpu_put(svcpu); #endif vcpu->cpu = smp_processor_id(); #ifdef CONFIG_PPC_BOOK3S_32 - current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; + current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; #endif } -void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_BOOK3S_64 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); - memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, - sizeof(get_paca()->shadow_vcpu)); to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; svcpu_put(svcpu); #endif @@ -87,7 +87,61 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; } -int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) +/* Copy data needed by real-mode code from vcpu to shadow vcpu */ +void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, + struct kvm_vcpu *vcpu) +{ + svcpu->gpr[0] = vcpu->arch.gpr[0]; + svcpu->gpr[1] = vcpu->arch.gpr[1]; + svcpu->gpr[2] = vcpu->arch.gpr[2]; + svcpu->gpr[3] = vcpu->arch.gpr[3]; + svcpu->gpr[4] = vcpu->arch.gpr[4]; + svcpu->gpr[5] = vcpu->arch.gpr[5]; + svcpu->gpr[6] = vcpu->arch.gpr[6]; + svcpu->gpr[7] = vcpu->arch.gpr[7]; + svcpu->gpr[8] = vcpu->arch.gpr[8]; + svcpu->gpr[9] = vcpu->arch.gpr[9]; + svcpu->gpr[10] = vcpu->arch.gpr[10]; + svcpu->gpr[11] = vcpu->arch.gpr[11]; + svcpu->gpr[12] = vcpu->arch.gpr[12]; + svcpu->gpr[13] = vcpu->arch.gpr[13]; + svcpu->cr = vcpu->arch.cr; + svcpu->xer = vcpu->arch.xer; + svcpu->ctr = vcpu->arch.ctr; + svcpu->lr = vcpu->arch.lr; + svcpu->pc = vcpu->arch.pc; +} + +/* Copy data touched by real-mode code from shadow vcpu back to vcpu */ +void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, + struct kvmppc_book3s_shadow_vcpu *svcpu) +{ + vcpu->arch.gpr[0] = svcpu->gpr[0]; + vcpu->arch.gpr[1] = svcpu->gpr[1]; + vcpu->arch.gpr[2] = svcpu->gpr[2]; + vcpu->arch.gpr[3] = svcpu->gpr[3]; + vcpu->arch.gpr[4] = svcpu->gpr[4]; + vcpu->arch.gpr[5] = svcpu->gpr[5]; + vcpu->arch.gpr[6] = svcpu->gpr[6]; + vcpu->arch.gpr[7] = svcpu->gpr[7]; + vcpu->arch.gpr[8] = svcpu->gpr[8]; + vcpu->arch.gpr[9] = svcpu->gpr[9]; + vcpu->arch.gpr[10] = svcpu->gpr[10]; + vcpu->arch.gpr[11] = svcpu->gpr[11]; + vcpu->arch.gpr[12] = svcpu->gpr[12]; + vcpu->arch.gpr[13] = svcpu->gpr[13]; + vcpu->arch.cr = svcpu->cr; + vcpu->arch.xer = svcpu->xer; + vcpu->arch.ctr = svcpu->ctr; + vcpu->arch.lr = svcpu->lr; + vcpu->arch.pc = svcpu->pc; + vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; + vcpu->arch.fault_dar = svcpu->fault_dar; + vcpu->arch.fault_dsisr = svcpu->fault_dsisr; + vcpu->arch.last_inst = svcpu->last_inst; +} + +static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) { int r = 1; /* Indicate we want to get back into the guest */ @@ -100,44 +154,69 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) } /************* MMU Notifiers *************/ +static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, + unsigned long end) +{ + long i; + struct kvm_vcpu *vcpu; + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) { + unsigned long hva_start, hva_end; + gfn_t gfn, gfn_end; + + hva_start = max(start, memslot->userspace_addr); + hva_end = min(end, memslot->userspace_addr + + (memslot->npages << PAGE_SHIFT)); + if (hva_start >= hva_end) + continue; + /* + * {gfn(page) | page intersects with [hva_start, hva_end)} = + * {gfn, gfn+1, ..., gfn_end-1}. + */ + gfn = hva_to_gfn_memslot(hva_start, memslot); + gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); + kvm_for_each_vcpu(i, vcpu, kvm) + kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, + gfn_end << PAGE_SHIFT); + } +} -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva) { trace_kvm_unmap_hva(hva); - /* - * Flush all shadow tlb entries everywhere. This is slow, but - * we are 100% sure that we catch the to be unmapped page - */ - kvm_flush_remote_tlbs(kvm); + do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); return 0; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, + unsigned long end) { - /* kvm_unmap_hva flushes everything anyways */ - kvm_unmap_hva(kvm, start); + do_kvm_unmap_hva(kvm, start, end); return 0; } -int kvm_age_hva(struct kvm *kvm, unsigned long hva) +static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva) { /* XXX could be more clever ;) */ return 0; } -int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva) { /* XXX could be more clever ;) */ return 0; } -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) { /* The page will get remapped properly on its next fault */ - kvm_unmap_hva(kvm, hva); + do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); } /*****************************************/ @@ -159,7 +238,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) vcpu->arch.shadow_msr = smsr; } -void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) +static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) { ulong old_msr = vcpu->arch.shared->msr; @@ -219,7 +298,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); } -void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) +void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) { u32 host_pvr; @@ -256,6 +335,23 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); + /* + * If they're asking for POWER6 or later, set the flag + * indicating that we can do multiple large page sizes + * and 1TB segments. + * Also set the flag that indicates that tlbie has the large + * page bit in the RB operand instead of the instruction. + */ + switch (PVR_VER(pvr)) { + case PVR_POWER6: + case PVR_POWER7: + case PVR_POWER7p: + case PVR_POWER8: + vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | + BOOK3S_HFLAG_NEW_TLBIE; + break; + } + #ifdef CONFIG_PPC_BOOK3S_32 /* 32 bit Book3S always has 32 byte dcbz */ vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; @@ -334,6 +430,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, ulong eaddr, int vec) { bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); + bool iswrite = false; int r = RESUME_GUEST; int relocated; int page_found = 0; @@ -344,10 +441,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, u64 vsid; relocated = data ? dr : ir; + if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) + iswrite = true; /* Resolve real address if translation turned on */ if (relocated) { - page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); + page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); } else { pte.may_execute = true; pte.may_read = true; @@ -355,6 +454,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte.raddr = eaddr & KVM_PAM; pte.eaddr = eaddr; pte.vpage = eaddr >> 12; + pte.page_size = MMU_PAGE_64K; } switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { @@ -388,22 +488,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, if (page_found == -ENOENT) { /* Page not found in guest PTE entries */ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); - vcpu->arch.shared->dsisr = svcpu->fault_dsisr; + vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr; vcpu->arch.shared->msr |= - (svcpu->shadow_srr1 & 0x00000000f8000000ULL); - svcpu_put(svcpu); + vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EPERM) { /* Storage protection */ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); - vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; + vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; vcpu->arch.shared->msr |= - svcpu->shadow_srr1 & 0x00000000f8000000ULL; - svcpu_put(svcpu); + vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EINVAL) { /* Page not found in guest SLB */ @@ -411,12 +507,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); } else if (!is_mmio && kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { + if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { + /* + * There is already a host HPTE there, presumably + * a read-only one for a page the guest thinks + * is writable, so get rid of it first. + */ + kvmppc_mmu_unmap_page(vcpu, &pte); + } /* The guest's PTE is not mapped yet. Map on the host */ - kvmppc_mmu_map_page(vcpu, &pte); + kvmppc_mmu_map_page(vcpu, &pte, iswrite); if (data) vcpu->stat.sp_storage++; else if (vcpu->arch.mmu.is_dcbz32(vcpu) && - (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) + (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) kvmppc_patch_dcbz(vcpu, &pte); } else { /* MMIO */ @@ -444,7 +548,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) #ifdef CONFIG_VSX u64 *vcpu_vsx = vcpu->arch.vsr; #endif - u64 *thread_fpr = (u64*)t->fpr; + u64 *thread_fpr = &t->fp_state.fpr[0][0]; int i; /* @@ -466,14 +570,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) /* * Note that on CPUs with VSX, giveup_fpu stores * both the traditional FP registers and the added VSX - * registers into thread.fpr[]. + * registers into thread.fp_state.fpr[]. */ if (current->thread.regs->msr & MSR_FP) giveup_fpu(current); for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; - vcpu->arch.fpscr = t->fpscr.val; + vcpu->arch.fpscr = t->fp_state.fpscr; #ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) @@ -486,8 +590,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) if (msr & MSR_VEC) { if (current->thread.regs->msr & MSR_VEC) giveup_altivec(current); - memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); - vcpu->arch.vscr = t->vscr; + memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr)); + vcpu->arch.vscr = t->vr_state.vscr; } #endif @@ -539,7 +643,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, #ifdef CONFIG_VSX u64 *vcpu_vsx = vcpu->arch.vsr; #endif - u64 *thread_fpr = (u64*)t->fpr; + u64 *thread_fpr = &t->fp_state.fpr[0][0]; int i; /* When we have paired singles, we emulate in software */ @@ -584,15 +688,15 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; #endif - t->fpscr.val = vcpu->arch.fpscr; + t->fp_state.fpscr = vcpu->arch.fpscr; t->fpexc_mode = 0; kvmppc_load_up_fpu(); } if (msr & MSR_VEC) { #ifdef CONFIG_ALTIVEC - memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); - t->vscr = vcpu->arch.vscr; + memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); + t->vr_state.vscr = vcpu->arch.vscr; t->vrsave = -1; kvmppc_load_up_altivec(); #endif @@ -619,13 +723,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) if (lost_ext & MSR_FP) kvmppc_load_up_fpu(); +#ifdef CONFIG_ALTIVEC if (lost_ext & MSR_VEC) kvmppc_load_up_altivec(); +#endif current->thread.regs->msr |= lost_ext; } -int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int exit_nr) +int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int exit_nr) { int r = RESUME_HOST; int s; @@ -643,25 +749,32 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, switch (exit_nr) { case BOOK3S_INTERRUPT_INST_STORAGE: { - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong shadow_srr1 = svcpu->shadow_srr1; + ulong shadow_srr1 = vcpu->arch.shadow_srr1; vcpu->stat.pf_instruc++; #ifdef CONFIG_PPC_BOOK3S_32 /* We set segments as unused segments when invalidating them. So * treat the respective fault as segment fault. */ - if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { - kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); - r = RESUME_GUEST; + { + struct kvmppc_book3s_shadow_vcpu *svcpu; + u32 sr; + + svcpu = svcpu_get(vcpu); + sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; svcpu_put(svcpu); - break; + if (sr == SR_INVALID) { + kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); + r = RESUME_GUEST; + break; + } } #endif - svcpu_put(svcpu); /* only care about PTEG not found errors, but leave NX alone */ if (shadow_srr1 & 0x40000000) { + int idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); + srcu_read_unlock(&vcpu->kvm->srcu, idx); vcpu->stat.sp_instruc++; } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { @@ -682,25 +795,36 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_DATA_STORAGE: { ulong dar = kvmppc_get_fault_dar(vcpu); - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - u32 fault_dsisr = svcpu->fault_dsisr; + u32 fault_dsisr = vcpu->arch.fault_dsisr; vcpu->stat.pf_storage++; #ifdef CONFIG_PPC_BOOK3S_32 /* We set segments as unused segments when invalidating them. So * treat the respective fault as segment fault. */ - if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { - kvmppc_mmu_map_segment(vcpu, dar); - r = RESUME_GUEST; + { + struct kvmppc_book3s_shadow_vcpu *svcpu; + u32 sr; + + svcpu = svcpu_get(vcpu); + sr = svcpu->sr[dar >> SID_SHIFT]; svcpu_put(svcpu); - break; + if (sr == SR_INVALID) { + kvmppc_mmu_map_segment(vcpu, dar); + r = RESUME_GUEST; + break; + } } #endif - svcpu_put(svcpu); - /* The only case we need to handle is missing shadow PTEs */ - if (fault_dsisr & DSISR_NOHPTE) { + /* + * We need to handle missing shadow PTEs, and + * protection faults due to us mapping a page read-only + * when the guest thinks it is writable. + */ + if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { + int idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); + srcu_read_unlock(&vcpu->kvm->srcu, idx); } else { vcpu->arch.shared->dar = dar; vcpu->arch.shared->dsisr = fault_dsisr; @@ -743,13 +867,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_H_EMUL_ASSIST: { enum emulation_result er; - struct kvmppc_book3s_shadow_vcpu *svcpu; ulong flags; program_interrupt: - svcpu = svcpu_get(vcpu); - flags = svcpu->shadow_srr1 & 0x1f0000ull; - svcpu_put(svcpu); + flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; if (vcpu->arch.shared->msr & MSR_PR) { #ifdef EXIT_DEBUG @@ -798,7 +919,7 @@ program_interrupt: ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; -#ifdef CONFIG_KVM_BOOK3S_64_PR +#ifdef CONFIG_PPC_BOOK3S_64 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { r = RESUME_GUEST; break; @@ -881,9 +1002,7 @@ program_interrupt: break; default: { - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong shadow_srr1 = svcpu->shadow_srr1; - svcpu_put(svcpu); + ulong shadow_srr1 = vcpu->arch.shadow_srr1; /* Ugh - bork here! What did we get? */ printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); @@ -920,8 +1039,8 @@ program_interrupt: return r; } -int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) +static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; @@ -947,13 +1066,13 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, return 0; } -int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) +static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; - kvmppc_set_pvr(vcpu, sregs->pvr); + kvmppc_set_pvr_pr(vcpu, sregs->pvr); vcpu3s->sdr1 = sregs->u.s.sdr1; if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { @@ -983,7 +1102,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, return 0; } -int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) +static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { int r = 0; @@ -1012,7 +1132,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) return r; } -int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) +static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { int r = 0; @@ -1042,28 +1163,30 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) return r; } -int kvmppc_core_check_processor_compat(void) -{ - return 0; -} - -struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, + unsigned int id) { struct kvmppc_vcpu_book3s *vcpu_book3s; struct kvm_vcpu *vcpu; int err = -ENOMEM; unsigned long p; - vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); - if (!vcpu_book3s) + vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + if (!vcpu) goto out; - vcpu_book3s->shadow_vcpu = - kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); - if (!vcpu_book3s->shadow_vcpu) + vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); + if (!vcpu_book3s) goto free_vcpu; + vcpu->arch.book3s = vcpu_book3s; + +#ifdef CONFIG_KVM_BOOK3S_32 + vcpu->arch.shadow_vcpu = + kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); + if (!vcpu->arch.shadow_vcpu) + goto free_vcpu3s; +#endif - vcpu = &vcpu_book3s->vcpu; err = kvm_vcpu_init(vcpu, kvm, id); if (err) goto free_shadow_vcpu; @@ -1076,13 +1199,19 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); #ifdef CONFIG_PPC_BOOK3S_64 - /* default to book3s_64 (970fx) */ + /* + * Default to the same as the host if we're on sufficiently + * recent machine that we have 1TB segments; + * otherwise default to PPC970FX. + */ vcpu->arch.pvr = 0x3C0301; + if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) + vcpu->arch.pvr = mfspr(SPRN_PVR); #else /* default to book3s_32 (750) */ vcpu->arch.pvr = 0x84202; #endif - kvmppc_set_pvr(vcpu, vcpu->arch.pvr); + kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); vcpu->arch.slb_nr = 64; vcpu->arch.shadow_msr = MSR_USER64; @@ -1096,32 +1225,37 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) uninit_vcpu: kvm_vcpu_uninit(vcpu); free_shadow_vcpu: - kfree(vcpu_book3s->shadow_vcpu); -free_vcpu: +#ifdef CONFIG_KVM_BOOK3S_32 + kfree(vcpu->arch.shadow_vcpu); +free_vcpu3s: +#endif vfree(vcpu_book3s); +free_vcpu: + kmem_cache_free(kvm_vcpu_cache, vcpu); out: return ERR_PTR(err); } -void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); kvm_vcpu_uninit(vcpu); - kfree(vcpu_book3s->shadow_vcpu); +#ifdef CONFIG_KVM_BOOK3S_32 + kfree(vcpu->arch.shadow_vcpu); +#endif vfree(vcpu_book3s); + kmem_cache_free(kvm_vcpu_cache, vcpu); } -int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) +static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret; - double fpr[32][TS_FPRWIDTH]; - unsigned int fpscr; + struct thread_fp_state fp; int fpexc_mode; #ifdef CONFIG_ALTIVEC - vector128 vr[32]; - vector128 vscr; + struct thread_vr_state vr; unsigned long uninitialized_var(vrsave); int used_vr; #endif @@ -1153,8 +1287,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* Save FPU state in stack */ if (current->thread.regs->msr & MSR_FP) giveup_fpu(current); - memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); - fpscr = current->thread.fpscr.val; + fp = current->thread.fp_state; fpexc_mode = current->thread.fpexc_mode; #ifdef CONFIG_ALTIVEC @@ -1163,8 +1296,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) if (used_vr) { if (current->thread.regs->msr & MSR_VEC) giveup_altivec(current); - memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); - vscr = current->thread.vscr; + vr = current->thread.vr_state; vrsave = current->thread.vrsave; } #endif @@ -1196,15 +1328,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) current->thread.regs->msr = ext_msr; /* Restore FPU/VSX state from stack */ - memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); - current->thread.fpscr.val = fpscr; + current->thread.fp_state = fp; current->thread.fpexc_mode = fpexc_mode; #ifdef CONFIG_ALTIVEC /* Restore Altivec state from stack */ if (used_vr && current->thread.used_vr) { - memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); - current->thread.vscr = vscr; + current->thread.vr_state = vr; current->thread.vrsave = vrsave; } current->thread.used_vr = used_vr; @@ -1222,8 +1352,8 @@ out: /* * Get (and clear) the dirty memory log for a memory slot. */ -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, - struct kvm_dirty_log *log) +static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, + struct kvm_dirty_log *log) { struct kvm_memory_slot *memslot; struct kvm_vcpu *vcpu; @@ -1258,67 +1388,100 @@ out: return r; } -#ifdef CONFIG_PPC64 -int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) +static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, + struct kvm_memory_slot *memslot) { - info->flags = KVM_PPC_1T_SEGMENTS; - - /* SLB is always 64 entries */ - info->slb_size = 64; - - /* Standard 4k base page size segment */ - info->sps[0].page_shift = 12; - info->sps[0].slb_enc = 0; - info->sps[0].enc[0].page_shift = 12; - info->sps[0].enc[0].pte_enc = 0; - - /* Standard 16M large page size segment */ - info->sps[1].page_shift = 24; - info->sps[1].slb_enc = SLB_VSID_L; - info->sps[1].enc[0].page_shift = 24; - info->sps[1].enc[0].pte_enc = 0; + return; +} +static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem) +{ return 0; } -#endif /* CONFIG_PPC64 */ -void kvmppc_core_free_memslot(struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) +static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old) { + return; } -int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, - unsigned long npages) +static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) { - return 0; + return; } -int kvmppc_core_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_userspace_memory_region *mem) +static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot, + unsigned long npages) { return 0; } -void kvmppc_core_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - const struct kvm_memory_slot *old) + +#ifdef CONFIG_PPC64 +static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, + struct kvm_ppc_smmu_info *info) { -} + long int i; + struct kvm_vcpu *vcpu; + + info->flags = 0; + + /* SLB is always 64 entries */ + info->slb_size = 64; + + /* Standard 4k base page size segment */ + info->sps[0].page_shift = 12; + info->sps[0].slb_enc = 0; + info->sps[0].enc[0].page_shift = 12; + info->sps[0].enc[0].pte_enc = 0; + + /* + * 64k large page size. + * We only want to put this in if the CPUs we're emulating + * support it, but unfortunately we don't have a vcpu easily + * to hand here to test. Just pick the first vcpu, and if + * that doesn't exist yet, report the minimum capability, + * i.e., no 64k pages. + * 1T segment support goes along with 64k pages. + */ + i = 1; + vcpu = kvm_get_vcpu(kvm, 0); + if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { + info->flags = KVM_PPC_1T_SEGMENTS; + info->sps[i].page_shift = 16; + info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; + info->sps[i].enc[0].page_shift = 16; + info->sps[i].enc[0].pte_enc = 1; + ++i; + } + + /* Standard 16M large page size segment */ + info->sps[i].page_shift = 24; + info->sps[i].slb_enc = SLB_VSID_L; + info->sps[i].enc[0].page_shift = 24; + info->sps[i].enc[0].pte_enc = 0; -void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) + return 0; +} +#else +static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, + struct kvm_ppc_smmu_info *info) { + /* We should not get called */ + BUG(); } +#endif /* CONFIG_PPC64 */ static unsigned int kvm_global_user_count = 0; static DEFINE_SPINLOCK(kvm_global_user_count_lock); -int kvmppc_core_init_vm(struct kvm *kvm) +static int kvmppc_core_init_vm_pr(struct kvm *kvm) { -#ifdef CONFIG_PPC64 - INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); - INIT_LIST_HEAD(&kvm->arch.rtas_tokens); -#endif + mutex_init(&kvm->arch.hpt_mutex); if (firmware_has_feature(FW_FEATURE_SET_MODE)) { spin_lock(&kvm_global_user_count_lock); @@ -1329,7 +1492,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) return 0; } -void kvmppc_core_destroy_vm(struct kvm *kvm) +static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) { #ifdef CONFIG_PPC64 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); @@ -1344,26 +1507,81 @@ void kvmppc_core_destroy_vm(struct kvm *kvm) } } -static int kvmppc_book3s_init(void) +static int kvmppc_core_check_processor_compat_pr(void) { - int r; + /* we are always compatible */ + return 0; +} - r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, - THIS_MODULE); +static long kvm_arch_vm_ioctl_pr(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + return -ENOTTY; +} - if (r) +static struct kvmppc_ops kvm_ops_pr = { + .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, + .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, + .get_one_reg = kvmppc_get_one_reg_pr, + .set_one_reg = kvmppc_set_one_reg_pr, + .vcpu_load = kvmppc_core_vcpu_load_pr, + .vcpu_put = kvmppc_core_vcpu_put_pr, + .set_msr = kvmppc_set_msr_pr, + .vcpu_run = kvmppc_vcpu_run_pr, + .vcpu_create = kvmppc_core_vcpu_create_pr, + .vcpu_free = kvmppc_core_vcpu_free_pr, + .check_requests = kvmppc_core_check_requests_pr, + .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, + .flush_memslot = kvmppc_core_flush_memslot_pr, + .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, + .commit_memory_region = kvmppc_core_commit_memory_region_pr, + .unmap_hva = kvm_unmap_hva_pr, + .unmap_hva_range = kvm_unmap_hva_range_pr, + .age_hva = kvm_age_hva_pr, + .test_age_hva = kvm_test_age_hva_pr, + .set_spte_hva = kvm_set_spte_hva_pr, + .mmu_destroy = kvmppc_mmu_destroy_pr, + .free_memslot = kvmppc_core_free_memslot_pr, + .create_memslot = kvmppc_core_create_memslot_pr, + .init_vm = kvmppc_core_init_vm_pr, + .destroy_vm = kvmppc_core_destroy_vm_pr, + .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, + .emulate_op = kvmppc_core_emulate_op_pr, + .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, + .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, + .fast_vcpu_kick = kvm_vcpu_kick, + .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, +}; + + +int kvmppc_book3s_init_pr(void) +{ + int r; + + r = kvmppc_core_check_processor_compat_pr(); + if (r < 0) return r; - r = kvmppc_mmu_hpte_sysinit(); + kvm_ops_pr.owner = THIS_MODULE; + kvmppc_pr_ops = &kvm_ops_pr; + r = kvmppc_mmu_hpte_sysinit(); return r; } -static void kvmppc_book3s_exit(void) +void kvmppc_book3s_exit_pr(void) { + kvmppc_pr_ops = NULL; kvmppc_mmu_hpte_sysexit(); - kvm_exit(); } -module_init(kvmppc_book3s_init); -module_exit(kvmppc_book3s_exit); +/* + * We only support separate modules for book3s 64 + */ +#ifdef CONFIG_PPC_BOOK3S_64 + +module_init(kvmppc_book3s_init_pr); +module_exit(kvmppc_book3s_exit_pr); + +MODULE_LICENSE("GPL"); +#endif diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index da0e0bc268b..5efa97b993d 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -21,6 +21,8 @@ #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> +#define HPTE_SIZE 16 /* bytes per HPT entry */ + static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); @@ -40,32 +42,41 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long pteg[2 * 8]; unsigned long pteg_addr, i, *hpte; + long int ret; + i = pte_index & 7; pte_index &= ~7UL; pteg_addr = get_pteg_addr(vcpu, pte_index); + mutex_lock(&vcpu->kvm->arch.hpt_mutex); copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); hpte = pteg; + ret = H_PTEG_FULL; if (likely((flags & H_EXACT) == 0)) { - pte_index &= ~7UL; for (i = 0; ; ++i) { if (i == 8) - return H_PTEG_FULL; + goto done; if ((*hpte & HPTE_V_VALID) == 0) break; hpte += 2; } } else { - i = kvmppc_get_gpr(vcpu, 5) & 7UL; hpte += i * 2; + if (*hpte & HPTE_V_VALID) + goto done; } hpte[0] = kvmppc_get_gpr(vcpu, 6); hpte[1] = kvmppc_get_gpr(vcpu, 7); - copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg)); - kvmppc_set_gpr(vcpu, 3, H_SUCCESS); + pteg_addr += i * HPTE_SIZE; + copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); kvmppc_set_gpr(vcpu, 4, pte_index | i); + ret = H_SUCCESS; + + done: + mutex_unlock(&vcpu->kvm->arch.hpt_mutex); + kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; } @@ -77,26 +88,31 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long v = 0, pteg, rb; unsigned long pte[2]; + long int ret; pteg = get_pteg_addr(vcpu, pte_index); + mutex_lock(&vcpu->kvm->arch.hpt_mutex); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); + ret = H_NOT_FOUND; if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || - ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) { - kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); - return EMULATE_DONE; - } + ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) + goto done; copy_to_user((void __user *)pteg, &v, sizeof(v)); rb = compute_tlbie_rb(pte[0], pte[1], pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); - kvmppc_set_gpr(vcpu, 3, H_SUCCESS); + ret = H_SUCCESS; kvmppc_set_gpr(vcpu, 4, pte[0]); kvmppc_set_gpr(vcpu, 5, pte[1]); + done: + mutex_unlock(&vcpu->kvm->arch.hpt_mutex); + kvmppc_set_gpr(vcpu, 3, ret); + return EMULATE_DONE; } @@ -124,6 +140,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) int paramnr = 4; int ret = H_SUCCESS; + mutex_lock(&vcpu->kvm->arch.hpt_mutex); for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); @@ -172,6 +189,7 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) } kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); } + mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; @@ -184,15 +202,16 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long rb, pteg, r, v; unsigned long pte[2]; + long int ret; pteg = get_pteg_addr(vcpu, pte_index); + mutex_lock(&vcpu->kvm->arch.hpt_mutex); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); + ret = H_NOT_FOUND; if ((pte[0] & HPTE_V_VALID) == 0 || - ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) { - kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND); - return EMULATE_DONE; - } + ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) + goto done; v = pte[0]; r = pte[1]; @@ -207,8 +226,11 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) rb = compute_tlbie_rb(v, r, pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); copy_to_user((void __user *)pteg, pte, sizeof(pte)); + ret = H_SUCCESS; - kvmppc_set_gpr(vcpu, 3, H_SUCCESS); + done: + mutex_unlock(&vcpu->kvm->arch.hpt_mutex); + kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; } diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 8f7633e3afb..a38c4c9edab 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -38,32 +38,6 @@ #define FUNC(name) GLUE(.,name) - .globl kvmppc_skip_interrupt -kvmppc_skip_interrupt: - /* - * Here all GPRs are unchanged from when the interrupt happened - * except for r13, which is saved in SPRG_SCRATCH0. - */ - mfspr r13, SPRN_SRR0 - addi r13, r13, 4 - mtspr SPRN_SRR0, r13 - GET_SCRATCH0(r13) - rfid - b . - - .globl kvmppc_skip_Hinterrupt -kvmppc_skip_Hinterrupt: - /* - * Here all GPRs are unchanged from when the interrupt happened - * except for r13, which is saved in SPRG_SCRATCH0. - */ - mfspr r13, SPRN_HSRR0 - addi r13, r13, 4 - mtspr SPRN_HSRR0, r13 - GET_SCRATCH0(r13) - hrfid - b . - #elif defined(CONFIG_PPC_BOOK3S_32) #define FUNC(name) name @@ -179,11 +153,15 @@ _GLOBAL(kvmppc_entry_trampoline) li r6, MSR_IR | MSR_DR andc r6, r5, r6 /* Clear DR and IR in MSR value */ +#ifdef CONFIG_PPC_BOOK3S_32 /* * Set EE in HOST_MSR so that it's enabled when we get into our - * C exit handler function + * C exit handler function. On 64-bit we delay enabling + * interrupts until we have finished transferring stuff + * to or from the PACA. */ ori r5, r5, MSR_EE +#endif mtsrr0 r7 mtsrr1 r6 RFI diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 3219ba89524..cf95cdef73c 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -260,6 +260,7 @@ fail: */ return rc; } +EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall); void kvmppc_rtas_tokens_free(struct kvm *kvm) { diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 1abe4788191..bc50c97751d 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -161,8 +161,8 @@ kvmppc_handler_trampoline_enter_end: .global kvmppc_handler_trampoline_exit kvmppc_handler_trampoline_exit: -.global kvmppc_interrupt -kvmppc_interrupt: +.global kvmppc_interrupt_pr +kvmppc_interrupt_pr: /* Register usage at this point: * diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index a3a5cb8ee7e..02a17dcf161 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) } /* Check for real mode returning too hard */ - if (xics->real_mode) + if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm)) return kvmppc_xics_rm_complete(vcpu, req); switch (req) { @@ -840,6 +840,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) return rc; } +EXPORT_SYMBOL_GPL(kvmppc_xics_hcall); /* -- Initialisation code etc. -- */ @@ -1250,13 +1251,13 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type) xics_debugfs_init(xics); -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE if (cpu_has_feature(CPU_FTR_ARCH_206)) { /* Enable real mode support */ xics->real_mode = ENABLE_REALMODE; xics->real_mode_dbg = DEBUG_REALMODE; } -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ return 0; } diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 17722d82f1d..53e65a210b9 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -40,7 +40,9 @@ #include "timing.h" #include "booke.h" -#include "trace.h" + +#define CREATE_TRACE_POINTS +#include "trace_booke.h" unsigned long kvmppc_booke_handlers; @@ -133,6 +135,29 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) #endif } +static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu) +{ + /* Synchronize guest's desire to get debug interrupts into shadow MSR */ +#ifndef CONFIG_KVM_BOOKE_HV + vcpu->arch.shadow_msr &= ~MSR_DE; + vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE; +#endif + + /* Force enable debug interrupts when user space wants to debug */ + if (vcpu->guest_debug) { +#ifdef CONFIG_KVM_BOOKE_HV + /* + * Since there is no shadow MSR, sync MSR_DE into the guest + * visible MSR. + */ + vcpu->arch.shared->msr |= MSR_DE; +#else + vcpu->arch.shadow_msr |= MSR_DE; + vcpu->arch.shared->msr &= ~MSR_DE; +#endif + } +} + /* * Helper function for "full" MSR writes. No need to call this if only * EE/CE/ME/DE/RI are changing. @@ -150,6 +175,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) kvmppc_mmu_msr_notify(vcpu, old_msr); kvmppc_vcpu_sync_spe(vcpu); kvmppc_vcpu_sync_fpu(vcpu); + kvmppc_vcpu_sync_debug(vcpu); } static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, @@ -655,10 +681,10 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret, s; + struct thread_struct thread; #ifdef CONFIG_PPC_FPU - unsigned int fpscr; + struct thread_fp_state fp; int fpexc_mode; - u64 fpr[32]; #endif if (!vcpu->arch.sane) { @@ -677,13 +703,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #ifdef CONFIG_PPC_FPU /* Save userspace FPU state in stack */ enable_kernel_fp(); - memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); - fpscr = current->thread.fpscr.val; + fp = current->thread.fp_state; fpexc_mode = current->thread.fpexc_mode; /* Restore guest FPU state to thread */ - memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr)); - current->thread.fpscr.val = vcpu->arch.fpscr; + memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr, + sizeof(vcpu->arch.fpr)); + current->thread.fp_state.fpscr = vcpu->arch.fpscr; /* * Since we can't trap on MSR_FP in GS-mode, we consider the guest @@ -696,6 +722,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_load_guest_fp(vcpu); #endif + /* Switch to guest debug context */ + thread.debug = vcpu->arch.shadow_dbg_reg; + switch_booke_debug_regs(&thread); + thread.debug = current->thread.debug; + current->thread.debug = vcpu->arch.shadow_dbg_reg; + kvmppc_fix_ee_before_entry(); ret = __kvmppc_vcpu_run(kvm_run, vcpu); @@ -703,18 +735,22 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* No need for kvm_guest_exit. It's done in handle_exit. We also get here with interrupts enabled. */ + /* Switch back to user space debug context */ + switch_booke_debug_regs(&thread); + current->thread.debug = thread.debug; + #ifdef CONFIG_PPC_FPU kvmppc_save_guest_fp(vcpu); vcpu->fpu_active = 0; /* Save guest FPU state from thread */ - memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr)); - vcpu->arch.fpscr = current->thread.fpscr.val; + memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr, + sizeof(vcpu->arch.fpr)); + vcpu->arch.fpscr = current->thread.fp_state.fpscr; /* Restore userspace FPU state from stack */ - memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); - current->thread.fpscr.val = fpscr; + current->thread.fp_state = fp; current->thread.fpexc_mode = fpexc_mode; #endif @@ -758,6 +794,30 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) } } +static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg); + u32 dbsr = vcpu->arch.dbsr; + + run->debug.arch.status = 0; + run->debug.arch.address = vcpu->arch.pc; + + if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { + run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; + } else { + if (dbsr & (DBSR_DAC1W | DBSR_DAC2W)) + run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE; + else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R)) + run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ; + if (dbsr & (DBSR_DAC1R | DBSR_DAC1W)) + run->debug.arch.address = dbg_reg->dac1; + else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W)) + run->debug.arch.address = dbg_reg->dac2; + } + + return RESUME_HOST; +} + static void kvmppc_fill_pt_regs(struct pt_regs *regs) { ulong r1, ip, msr, lr; @@ -818,6 +878,11 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, case BOOKE_INTERRUPT_CRITICAL: unknown_exception(®s); break; + case BOOKE_INTERRUPT_DEBUG: + /* Save DBSR before preemption is enabled */ + vcpu->arch.dbsr = mfspr(SPRN_DBSR); + kvmppc_clear_dbsr(); + break; } } @@ -1135,18 +1200,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } case BOOKE_INTERRUPT_DEBUG: { - u32 dbsr; - - vcpu->arch.pc = mfspr(SPRN_CSRR0); - - /* clear IAC events in DBSR register */ - dbsr = mfspr(SPRN_DBSR); - dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4; - mtspr(SPRN_DBSR, dbsr); - - run->exit_reason = KVM_EXIT_DEBUG; + r = kvmppc_handle_debug(run, vcpu); + if (r == RESUME_HOST) + run->exit_reason = KVM_EXIT_DEBUG; kvmppc_account_exit(vcpu, DEBUG_EXITS); - r = RESUME_HOST; break; } @@ -1197,7 +1254,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) kvmppc_set_msr(vcpu, 0); #ifndef CONFIG_KVM_BOOKE_HV - vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; + vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS; vcpu->arch.shadow_pid = 1; vcpu->arch.shared->msr = 0; #endif @@ -1359,7 +1416,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu, return 0; } -void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { sregs->u.e.features |= KVM_SREGS_E_IVOR; @@ -1379,6 +1436,7 @@ void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; + return 0; } int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) @@ -1413,8 +1471,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, get_sregs_base(vcpu, sregs); get_sregs_arch206(vcpu, sregs); - kvmppc_core_get_sregs(vcpu, sregs); - return 0; + return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, @@ -1433,7 +1490,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, if (ret < 0) return ret; - return kvmppc_core_set_sregs(vcpu, sregs); + return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); } int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) @@ -1441,7 +1498,6 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) int r = 0; union kvmppc_one_reg val; int size; - long int i; size = one_reg_size(reg->id); if (size > sizeof(val)) @@ -1449,16 +1505,24 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) switch (reg->id) { case KVM_REG_PPC_IAC1: + val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1); + break; case KVM_REG_PPC_IAC2: + val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2); + break; +#if CONFIG_PPC_ADV_DEBUG_IACS > 2 case KVM_REG_PPC_IAC3: + val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3); + break; case KVM_REG_PPC_IAC4: - i = reg->id - KVM_REG_PPC_IAC1; - val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac[i]); + val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4); break; +#endif case KVM_REG_PPC_DAC1: + val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1); + break; case KVM_REG_PPC_DAC2: - i = reg->id - KVM_REG_PPC_DAC1; - val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac[i]); + val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2); break; case KVM_REG_PPC_EPR: { u32 epr = get_guest_epr(vcpu); @@ -1477,10 +1541,13 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) val = get_reg_val(reg->id, vcpu->arch.tsr); break; case KVM_REG_PPC_DEBUG_INST: - val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV); + val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG); + break; + case KVM_REG_PPC_VRSAVE: + val = get_reg_val(reg->id, vcpu->arch.vrsave); break; default: - r = kvmppc_get_one_reg(vcpu, reg->id, &val); + r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val); break; } @@ -1498,7 +1565,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) int r = 0; union kvmppc_one_reg val; int size; - long int i; size = one_reg_size(reg->id); if (size > sizeof(val)) @@ -1509,16 +1575,24 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) switch (reg->id) { case KVM_REG_PPC_IAC1: + vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val); + break; case KVM_REG_PPC_IAC2: + vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val); + break; +#if CONFIG_PPC_ADV_DEBUG_IACS > 2 case KVM_REG_PPC_IAC3: + vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val); + break; case KVM_REG_PPC_IAC4: - i = reg->id - KVM_REG_PPC_IAC1; - vcpu->arch.dbg_reg.iac[i] = set_reg_val(reg->id, val); + vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val); break; +#endif case KVM_REG_PPC_DAC1: + vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val); + break; case KVM_REG_PPC_DAC2: - i = reg->id - KVM_REG_PPC_DAC1; - vcpu->arch.dbg_reg.dac[i] = set_reg_val(reg->id, val); + vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val); break; case KVM_REG_PPC_EPR: { u32 new_epr = set_reg_val(reg->id, val); @@ -1552,20 +1626,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) kvmppc_set_tcr(vcpu, tcr); break; } + case KVM_REG_PPC_VRSAVE: + vcpu->arch.vrsave = set_reg_val(reg->id, val); + break; default: - r = kvmppc_set_one_reg(vcpu, reg->id, &val); + r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val); break; } return r; } -int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg) -{ - return -EINVAL; -} - int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -ENOTSUPP; @@ -1590,12 +1661,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) return -ENOTSUPP; } -void kvmppc_core_free_memslot(struct kvm_memory_slot *free, +void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } -int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, +int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { return 0; @@ -1671,6 +1742,157 @@ void kvmppc_decrementer_func(unsigned long data) kvmppc_set_tsr_bits(vcpu, TSR_DIS); } +static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg, + uint64_t addr, int index) +{ + switch (index) { + case 0: + dbg_reg->dbcr0 |= DBCR0_IAC1; + dbg_reg->iac1 = addr; + break; + case 1: + dbg_reg->dbcr0 |= DBCR0_IAC2; + dbg_reg->iac2 = addr; + break; +#if CONFIG_PPC_ADV_DEBUG_IACS > 2 + case 2: + dbg_reg->dbcr0 |= DBCR0_IAC3; + dbg_reg->iac3 = addr; + break; + case 3: + dbg_reg->dbcr0 |= DBCR0_IAC4; + dbg_reg->iac4 = addr; + break; +#endif + default: + return -EINVAL; + } + + dbg_reg->dbcr0 |= DBCR0_IDM; + return 0; +} + +static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr, + int type, int index) +{ + switch (index) { + case 0: + if (type & KVMPPC_DEBUG_WATCH_READ) + dbg_reg->dbcr0 |= DBCR0_DAC1R; + if (type & KVMPPC_DEBUG_WATCH_WRITE) + dbg_reg->dbcr0 |= DBCR0_DAC1W; + dbg_reg->dac1 = addr; + break; + case 1: + if (type & KVMPPC_DEBUG_WATCH_READ) + dbg_reg->dbcr0 |= DBCR0_DAC2R; + if (type & KVMPPC_DEBUG_WATCH_WRITE) + dbg_reg->dbcr0 |= DBCR0_DAC2W; + dbg_reg->dac2 = addr; + break; + default: + return -EINVAL; + } + + dbg_reg->dbcr0 |= DBCR0_IDM; + return 0; +} +void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set) +{ + /* XXX: Add similar MSR protection for BookE-PR */ +#ifdef CONFIG_KVM_BOOKE_HV + BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP)); + if (set) { + if (prot_bitmap & MSR_UCLE) + vcpu->arch.shadow_msrp |= MSRP_UCLEP; + if (prot_bitmap & MSR_DE) + vcpu->arch.shadow_msrp |= MSRP_DEP; + if (prot_bitmap & MSR_PMM) + vcpu->arch.shadow_msrp |= MSRP_PMMP; + } else { + if (prot_bitmap & MSR_UCLE) + vcpu->arch.shadow_msrp &= ~MSRP_UCLEP; + if (prot_bitmap & MSR_DE) + vcpu->arch.shadow_msrp &= ~MSRP_DEP; + if (prot_bitmap & MSR_PMM) + vcpu->arch.shadow_msrp &= ~MSRP_PMMP; + } +#endif +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + struct debug_reg *dbg_reg; + int n, b = 0, w = 0; + + if (!(dbg->control & KVM_GUESTDBG_ENABLE)) { + vcpu->arch.shadow_dbg_reg.dbcr0 = 0; + vcpu->guest_debug = 0; + kvm_guest_protect_msr(vcpu, MSR_DE, false); + return 0; + } + + kvm_guest_protect_msr(vcpu, MSR_DE, true); + vcpu->guest_debug = dbg->control; + vcpu->arch.shadow_dbg_reg.dbcr0 = 0; + /* Set DBCR0_EDM in guest visible DBCR0 register. */ + vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM; + + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) + vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC; + + /* Code below handles only HW breakpoints */ + dbg_reg = &(vcpu->arch.shadow_dbg_reg); + +#ifdef CONFIG_KVM_BOOKE_HV + /* + * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1 + * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0 + */ + dbg_reg->dbcr1 = 0; + dbg_reg->dbcr2 = 0; +#else + /* + * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1 + * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR + * is set. + */ + dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US | + DBCR1_IAC4US; + dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; +#endif + + if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) + return 0; + + for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) { + uint64_t addr = dbg->arch.bp[n].addr; + uint32_t type = dbg->arch.bp[n].type; + + if (type == KVMPPC_DEBUG_NONE) + continue; + + if (type & !(KVMPPC_DEBUG_WATCH_READ | + KVMPPC_DEBUG_WATCH_WRITE | + KVMPPC_DEBUG_BREAKPOINT)) + return -EINVAL; + + if (type & KVMPPC_DEBUG_BREAKPOINT) { + /* Setting H/W breakpoint */ + if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++)) + return -EINVAL; + } else { + /* Setting H/W watchpoint */ + if (kvmppc_booke_add_watchpoint(dbg_reg, addr, + type, w++)) + return -EINVAL; + } + } + + return 0; +} + void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { vcpu->cpu = smp_processor_id(); @@ -1681,6 +1903,44 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) { current->thread.kvm_vcpu = NULL; vcpu->cpu = -1; + + /* Clear pending debug event in DBSR */ + kvmppc_clear_dbsr(); +} + +void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +{ + vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); +} + +int kvmppc_core_init_vm(struct kvm *kvm) +{ + return kvm->arch.kvm_ops->init_vm(kvm); +} + +struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +{ + return kvm->arch.kvm_ops->vcpu_create(kvm, id); +} + +void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +{ + vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); +} + +void kvmppc_core_destroy_vm(struct kvm *kvm) +{ + kvm->arch.kvm_ops->destroy_vm(kvm); +} + +void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); +} + +void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) +{ + vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); } int __init kvmppc_booke_init(void) diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 5fd1ba69357..09bfd9bc7cf 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -99,6 +99,30 @@ enum int_class { void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); +extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu); +extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance); +extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, + ulong spr_val); +extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, + ulong *spr_val); +extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); +extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, + struct kvm_vcpu *vcpu, + unsigned int inst, int *advance); +extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, + ulong spr_val); +extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, + ulong *spr_val); +extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); +extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, + struct kvm_vcpu *vcpu, + unsigned int inst, int *advance); +extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, + ulong spr_val); +extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, + ulong *spr_val); + /* * Load up guest vcpu FP state if it's needed. * It also set the MSR_FP in thread so that host know @@ -129,4 +153,9 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) giveup_fpu(current); #endif } + +static inline void kvmppc_clear_dbsr(void) +{ + mtspr(SPRN_DBSR, mfspr(SPRN_DBSR)); +} #endif /* __KVM_BOOKE_H__ */ diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index ce6b73c2961..497b142f651 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -305,7 +305,7 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) { } -void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu) { kvmppc_booke_vcpu_load(vcpu, cpu); @@ -313,7 +313,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); } -void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu) { #ifdef CONFIG_SPE if (vcpu->arch.shadow_msr & MSR_SPE) @@ -367,7 +367,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } -void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -388,9 +389,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) kvmppc_get_sregs_ivor(vcpu, sregs); kvmppc_get_sregs_e500_tlb(vcpu, sregs); + return 0; } -int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int ret; @@ -425,21 +428,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) return kvmppc_set_sregs_ivor(vcpu, sregs); } -int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, - union kvmppc_one_reg *val) +static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); return r; } -int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, - union kvmppc_one_reg *val) +static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); return r; } -struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm, + unsigned int id) { struct kvmppc_vcpu_e500 *vcpu_e500; struct kvm_vcpu *vcpu; @@ -481,7 +485,7 @@ out: return ERR_PTR(err); } -void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -492,15 +496,32 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) kmem_cache_free(kvm_vcpu_cache, vcpu_e500); } -int kvmppc_core_init_vm(struct kvm *kvm) +static int kvmppc_core_init_vm_e500(struct kvm *kvm) { return 0; } -void kvmppc_core_destroy_vm(struct kvm *kvm) +static void kvmppc_core_destroy_vm_e500(struct kvm *kvm) { } +static struct kvmppc_ops kvm_ops_e500 = { + .get_sregs = kvmppc_core_get_sregs_e500, + .set_sregs = kvmppc_core_set_sregs_e500, + .get_one_reg = kvmppc_get_one_reg_e500, + .set_one_reg = kvmppc_set_one_reg_e500, + .vcpu_load = kvmppc_core_vcpu_load_e500, + .vcpu_put = kvmppc_core_vcpu_put_e500, + .vcpu_create = kvmppc_core_vcpu_create_e500, + .vcpu_free = kvmppc_core_vcpu_free_e500, + .mmu_destroy = kvmppc_mmu_destroy_e500, + .init_vm = kvmppc_core_init_vm_e500, + .destroy_vm = kvmppc_core_destroy_vm_e500, + .emulate_op = kvmppc_core_emulate_op_e500, + .emulate_mtspr = kvmppc_core_emulate_mtspr_e500, + .emulate_mfspr = kvmppc_core_emulate_mfspr_e500, +}; + static int __init kvmppc_e500_init(void) { int r, i; @@ -512,11 +533,11 @@ static int __init kvmppc_e500_init(void) r = kvmppc_core_check_processor_compat(); if (r) - return r; + goto err_out; r = kvmppc_booke_init(); if (r) - return r; + goto err_out; /* copy extra E500 exception handlers */ ivor[0] = mfspr(SPRN_IVOR32); @@ -534,11 +555,19 @@ static int __init kvmppc_e500_init(void) flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + ivor[max_ivor] + handler_len); - return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); + r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); + if (r) + goto err_out; + kvm_ops_e500.owner = THIS_MODULE; + kvmppc_pr_ops = &kvm_ops_e500; + +err_out: + return r; } static void __exit kvmppc_e500_exit(void) { + kvmppc_pr_ops = NULL; kvmppc_booke_exit(); } diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index c2e5e98453a..4fd9650eb01 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -117,7 +117,7 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) #define MAS2_ATTRIB_MASK \ - (MAS2_X0 | MAS2_X1) + (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G) #define MAS3_ATTRIB_MASK \ (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index b10a01243ab..89b7f821f6c 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -26,6 +26,7 @@ #define XOP_TLBRE 946 #define XOP_TLBWE 978 #define XOP_TLBILX 18 +#define XOP_EHPRIV 270 #ifdef CONFIG_KVM_E500MC static int dbell2prio(ulong param) @@ -82,8 +83,28 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) } #endif -int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int inst, int *advance) +static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance) +{ + int emulated = EMULATE_DONE; + + switch (get_oc(inst)) { + case EHPRIV_OC_DEBUG: + run->exit_reason = KVM_EXIT_DEBUG; + run->debug.arch.address = vcpu->arch.pc; + run->debug.arch.status = 0; + kvmppc_account_exit(vcpu, DEBUG_EXITS); + emulated = EMULATE_EXIT_USER; + *advance = 0; + break; + default: + emulated = EMULATE_FAIL; + } + return emulated; +} + +int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int ra = get_ra(inst); @@ -130,6 +151,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, emulated = kvmppc_e500_emul_tlbivax(vcpu, ea); break; + case XOP_EHPRIV: + emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst, + advance); + break; + default: emulated = EMULATE_FAIL; } @@ -146,7 +172,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } -int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) +int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int emulated = EMULATE_DONE; @@ -237,7 +263,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) return emulated; } -int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) +int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int emulated = EMULATE_DONE; diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index 6d6f153b6c1..ebca6b88ea5 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -32,7 +32,7 @@ #include <asm/kvm_ppc.h> #include "e500.h" -#include "trace.h" +#include "trace_booke.h" #include "timing.h" #include "e500_mmu_host.h" @@ -536,7 +536,7 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, return get_tlb_raddr(gtlbe) | (eaddr & pgmask); } -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu) { } diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index c65593abae8..ecf2247b13b 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -32,10 +32,11 @@ #include <asm/kvm_ppc.h> #include "e500.h" -#include "trace.h" #include "timing.h" #include "e500_mmu_host.h" +#include "trace_booke.h" + #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; @@ -253,6 +254,9 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, ref->pfn = pfn; ref->flags |= E500_TLB_VALID; + /* Mark the page accessed */ + kvm_set_pfn_accessed(pfn); + if (tlbe_is_writable(gtlbe)) kvm_set_pfn_dirty(pfn); } diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 19c8379575f..4132cd2fc17 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); -void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -147,7 +147,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvmppc_load_guest_fp(vcpu); } -void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu) { vcpu->arch.eplc = mfspr(SPRN_EPLC); vcpu->arch.epsc = mfspr(SPRN_EPSC); @@ -204,7 +204,8 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } -void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -224,10 +225,11 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; - kvmppc_get_sregs_ivor(vcpu, sregs); + return kvmppc_get_sregs_ivor(vcpu, sregs); } -int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int ret; @@ -260,21 +262,22 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) return kvmppc_set_sregs_ivor(vcpu, sregs); } -int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, - union kvmppc_one_reg *val) +static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); return r; } -int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, - union kvmppc_one_reg *val) +static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val) { int r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val); return r; } -struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) +static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm, + unsigned int id) { struct kvmppc_vcpu_e500 *vcpu_e500; struct kvm_vcpu *vcpu; @@ -315,7 +318,7 @@ out: return ERR_PTR(err); } -void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) +static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); @@ -325,7 +328,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) kmem_cache_free(kvm_vcpu_cache, vcpu_e500); } -int kvmppc_core_init_vm(struct kvm *kvm) +static int kvmppc_core_init_vm_e500mc(struct kvm *kvm) { int lpid; @@ -337,27 +340,52 @@ int kvmppc_core_init_vm(struct kvm *kvm) return 0; } -void kvmppc_core_destroy_vm(struct kvm *kvm) +static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm) { kvmppc_free_lpid(kvm->arch.lpid); } +static struct kvmppc_ops kvm_ops_e500mc = { + .get_sregs = kvmppc_core_get_sregs_e500mc, + .set_sregs = kvmppc_core_set_sregs_e500mc, + .get_one_reg = kvmppc_get_one_reg_e500mc, + .set_one_reg = kvmppc_set_one_reg_e500mc, + .vcpu_load = kvmppc_core_vcpu_load_e500mc, + .vcpu_put = kvmppc_core_vcpu_put_e500mc, + .vcpu_create = kvmppc_core_vcpu_create_e500mc, + .vcpu_free = kvmppc_core_vcpu_free_e500mc, + .mmu_destroy = kvmppc_mmu_destroy_e500, + .init_vm = kvmppc_core_init_vm_e500mc, + .destroy_vm = kvmppc_core_destroy_vm_e500mc, + .emulate_op = kvmppc_core_emulate_op_e500, + .emulate_mtspr = kvmppc_core_emulate_mtspr_e500, + .emulate_mfspr = kvmppc_core_emulate_mfspr_e500, +}; + static int __init kvmppc_e500mc_init(void) { int r; r = kvmppc_booke_init(); if (r) - return r; + goto err_out; kvmppc_init_lpid(64); kvmppc_claim_lpid(0); /* host */ - return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); + r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); + if (r) + goto err_out; + kvm_ops_e500mc.owner = THIS_MODULE; + kvmppc_pr_ops = &kvm_ops_e500mc; + +err_out: + return r; } static void __exit kvmppc_e500mc_exit(void) { + kvmppc_pr_ops = NULL; kvmppc_booke_exit(); } diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 751cd45f65a..2f9a0873b44 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -130,8 +130,8 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) case SPRN_PIR: break; default: - emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, - spr_val); + emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn, + spr_val); if (emulated == EMULATE_FAIL) printk(KERN_INFO "mtspr: unknown spr " "0x%x\n", sprn); @@ -191,8 +191,8 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) spr_val = kvmppc_get_dec(vcpu, get_tb()); break; default: - emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, - &spr_val); + emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn, + &spr_val); if (unlikely(emulated == EMULATE_FAIL)) { printk(KERN_INFO "mfspr: unknown spr " "0x%x\n", sprn); @@ -464,7 +464,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) } if (emulated == EMULATE_FAIL) { - emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); + emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, + &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { @@ -483,3 +484,4 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) return emulated; } +EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 07c0106fab7..9ae97686e9f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -26,6 +26,7 @@ #include <linux/fs.h> #include <linux/slab.h> #include <linux/file.h> +#include <linux/module.h> #include <asm/cputable.h> #include <asm/uaccess.h> #include <asm/kvm_ppc.h> @@ -39,6 +40,12 @@ #define CREATE_TRACE_POINTS #include "trace.h" +struct kvmppc_ops *kvmppc_hv_ops; +EXPORT_SYMBOL_GPL(kvmppc_hv_ops); +struct kvmppc_ops *kvmppc_pr_ops; +EXPORT_SYMBOL_GPL(kvmppc_pr_ops); + + int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return !!(v->arch.pending_exceptions) || @@ -50,7 +57,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) return 1; } -#ifndef CONFIG_KVM_BOOK3S_64_HV /* * Common checks before entering the guest world. Call with interrupts * disabled. @@ -125,7 +131,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) return r; } -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) { @@ -179,6 +185,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) return r; } +EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); int kvmppc_sanity_check(struct kvm_vcpu *vcpu) { @@ -192,11 +199,9 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu) if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) goto out; -#ifdef CONFIG_KVM_BOOK3S_64_HV /* HV KVM can only do PAPR mode for now */ - if (!vcpu->arch.papr_enabled) + if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) goto out; -#endif #ifdef CONFIG_KVM_BOOKE_HV if (!cpu_has_feature(CPU_FTR_EMB_HV)) @@ -209,6 +214,7 @@ out: vcpu->arch.sane = r; return r ? 0 : -EINVAL; } +EXPORT_SYMBOL_GPL(kvmppc_sanity_check); int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) { @@ -243,6 +249,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) return r; } +EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); int kvm_arch_hardware_enable(void *garbage) { @@ -269,10 +276,35 @@ void kvm_arch_check_processor_compat(void *rtn) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { - if (type) - return -EINVAL; - + struct kvmppc_ops *kvm_ops = NULL; + /* + * if we have both HV and PR enabled, default is HV + */ + if (type == 0) { + if (kvmppc_hv_ops) + kvm_ops = kvmppc_hv_ops; + else + kvm_ops = kvmppc_pr_ops; + if (!kvm_ops) + goto err_out; + } else if (type == KVM_VM_PPC_HV) { + if (!kvmppc_hv_ops) + goto err_out; + kvm_ops = kvmppc_hv_ops; + } else if (type == KVM_VM_PPC_PR) { + if (!kvmppc_pr_ops) + goto err_out; + kvm_ops = kvmppc_pr_ops; + } else + goto err_out; + + if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) + return -ENOENT; + + kvm->arch.kvm_ops = kvm_ops; return kvmppc_core_init_vm(kvm); +err_out: + return -EINVAL; } void kvm_arch_destroy_vm(struct kvm *kvm) @@ -292,6 +324,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvmppc_core_destroy_vm(kvm); mutex_unlock(&kvm->lock); + + /* drop the module reference */ + module_put(kvm->arch.kvm_ops->owner); } void kvm_arch_sync_events(struct kvm *kvm) @@ -301,6 +336,10 @@ void kvm_arch_sync_events(struct kvm *kvm) int kvm_dev_ioctl_check_extension(long ext) { int r; + /* FIXME!! + * Should some of this be vm ioctl ? is it possible now ? + */ + int hv_enabled = kvmppc_hv_ops ? 1 : 0; switch (ext) { #ifdef CONFIG_BOOKE @@ -320,22 +359,26 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_DEVICE_CTRL: r = 1; break; -#ifndef CONFIG_KVM_BOOK3S_64_HV case KVM_CAP_PPC_PAIRED_SINGLES: case KVM_CAP_PPC_OSI: case KVM_CAP_PPC_GET_PVINFO: #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) case KVM_CAP_SW_TLB: #endif -#ifdef CONFIG_KVM_MPIC - case KVM_CAP_IRQ_MPIC: -#endif - r = 1; + /* We support this only for PR */ + r = !hv_enabled; break; +#ifdef CONFIG_KVM_MMIO case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; #endif +#ifdef CONFIG_KVM_MPIC + case KVM_CAP_IRQ_MPIC: + r = 1; + break; +#endif + #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: case KVM_CAP_PPC_ALLOC_HTAB: @@ -346,32 +389,37 @@ int kvm_dev_ioctl_check_extension(long ext) r = 1; break; #endif /* CONFIG_PPC_BOOK3S_64 */ -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_SMT: - r = threads_per_core; + if (hv_enabled) + r = threads_per_core; + else + r = 0; break; case KVM_CAP_PPC_RMA: - r = 1; + r = hv_enabled; /* PPC970 requires an RMA */ - if (cpu_has_feature(CPU_FTR_ARCH_201)) + if (r && cpu_has_feature(CPU_FTR_ARCH_201)) r = 2; break; #endif case KVM_CAP_SYNC_MMU: -#ifdef CONFIG_KVM_BOOK3S_64_HV - r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + if (hv_enabled) + r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; + else + r = 0; #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) r = 1; #else r = 0; - break; #endif -#ifdef CONFIG_KVM_BOOK3S_64_HV + break; +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_HTAB_FD: - r = 1; + r = hv_enabled; break; #endif - break; case KVM_CAP_NR_VCPUS: /* * Recommending a number of CPUs is somewhat arbitrary; we @@ -379,11 +427,10 @@ int kvm_dev_ioctl_check_extension(long ext) * will have secondary threads "offline"), and for other KVM * implementations just count online CPUs. */ -#ifdef CONFIG_KVM_BOOK3S_64_HV - r = num_present_cpus(); -#else - r = num_online_cpus(); -#endif + if (hv_enabled) + r = num_present_cpus(); + else + r = num_online_cpus(); break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; @@ -407,15 +454,16 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; } -void kvm_arch_free_memslot(struct kvm_memory_slot *free, +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { - kvmppc_core_free_memslot(free, dont); + kvmppc_core_free_memslot(kvm, free, dont); } -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) { - return kvmppc_core_create_memslot(slot, npages); + return kvmppc_core_create_memslot(kvm, slot, npages); } void kvm_arch_memslots_updated(struct kvm *kvm) @@ -659,6 +707,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, return EMULATE_DO_MMIO; } +EXPORT_SYMBOL_GPL(kvmppc_handle_load); /* Same as above, but sign extends */ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, @@ -720,6 +769,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, return EMULATE_DO_MMIO; } +EXPORT_SYMBOL_GPL(kvmppc_handle_store); int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { @@ -1024,52 +1074,12 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); goto out; } -#endif /* CONFIG_PPC_BOOK3S_64 */ - -#ifdef CONFIG_KVM_BOOK3S_64_HV - case KVM_ALLOCATE_RMA: { - struct kvm_allocate_rma rma; - struct kvm *kvm = filp->private_data; - - r = kvm_vm_ioctl_allocate_rma(kvm, &rma); - if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) - r = -EFAULT; - break; - } - - case KVM_PPC_ALLOCATE_HTAB: { - u32 htab_order; - - r = -EFAULT; - if (get_user(htab_order, (u32 __user *)argp)) - break; - r = kvmppc_alloc_reset_hpt(kvm, &htab_order); - if (r) - break; - r = -EFAULT; - if (put_user(htab_order, (u32 __user *)argp)) - break; - r = 0; - break; - } - - case KVM_PPC_GET_HTAB_FD: { - struct kvm_get_htab_fd ghf; - - r = -EFAULT; - if (copy_from_user(&ghf, argp, sizeof(ghf))) - break; - r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); - break; - } -#endif /* CONFIG_KVM_BOOK3S_64_HV */ - -#ifdef CONFIG_PPC_BOOK3S_64 case KVM_PPC_GET_SMMU_INFO: { struct kvm_ppc_smmu_info info; + struct kvm *kvm = filp->private_data; memset(&info, 0, sizeof(info)); - r = kvm_vm_ioctl_get_smmu_info(kvm, &info); + r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) r = -EFAULT; break; @@ -1080,11 +1090,15 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_vm_ioctl_rtas_define_token(kvm, argp); break; } -#endif /* CONFIG_PPC_BOOK3S_64 */ + default: { + struct kvm *kvm = filp->private_data; + r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); + } +#else /* CONFIG_PPC_BOOK3S_64 */ default: r = -ENOTTY; +#endif } - out: return r; } @@ -1106,22 +1120,26 @@ long kvmppc_alloc_lpid(void) return lpid; } +EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); void kvmppc_claim_lpid(long lpid) { set_bit(lpid, lpid_inuse); } +EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); void kvmppc_free_lpid(long lpid) { clear_bit(lpid, lpid_inuse); } +EXPORT_SYMBOL_GPL(kvmppc_free_lpid); void kvmppc_init_lpid(unsigned long nr_lpids_param) { nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); memset(lpid_inuse, 0, sizeof(lpid_inuse)); } +EXPORT_SYMBOL_GPL(kvmppc_init_lpid); int kvm_arch_init(void *opaque) { @@ -1130,4 +1148,5 @@ int kvm_arch_init(void *opaque) void kvm_arch_exit(void) { + } diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index e326489a542..2e0e67ef354 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h @@ -31,126 +31,6 @@ TRACE_EVENT(kvm_ppc_instr, __entry->inst, __entry->pc, __entry->emulate) ); -#ifdef CONFIG_PPC_BOOK3S -#define kvm_trace_symbol_exit \ - {0x100, "SYSTEM_RESET"}, \ - {0x200, "MACHINE_CHECK"}, \ - {0x300, "DATA_STORAGE"}, \ - {0x380, "DATA_SEGMENT"}, \ - {0x400, "INST_STORAGE"}, \ - {0x480, "INST_SEGMENT"}, \ - {0x500, "EXTERNAL"}, \ - {0x501, "EXTERNAL_LEVEL"}, \ - {0x502, "EXTERNAL_HV"}, \ - {0x600, "ALIGNMENT"}, \ - {0x700, "PROGRAM"}, \ - {0x800, "FP_UNAVAIL"}, \ - {0x900, "DECREMENTER"}, \ - {0x980, "HV_DECREMENTER"}, \ - {0xc00, "SYSCALL"}, \ - {0xd00, "TRACE"}, \ - {0xe00, "H_DATA_STORAGE"}, \ - {0xe20, "H_INST_STORAGE"}, \ - {0xe40, "H_EMUL_ASSIST"}, \ - {0xf00, "PERFMON"}, \ - {0xf20, "ALTIVEC"}, \ - {0xf40, "VSX"} -#else -#define kvm_trace_symbol_exit \ - {0, "CRITICAL"}, \ - {1, "MACHINE_CHECK"}, \ - {2, "DATA_STORAGE"}, \ - {3, "INST_STORAGE"}, \ - {4, "EXTERNAL"}, \ - {5, "ALIGNMENT"}, \ - {6, "PROGRAM"}, \ - {7, "FP_UNAVAIL"}, \ - {8, "SYSCALL"}, \ - {9, "AP_UNAVAIL"}, \ - {10, "DECREMENTER"}, \ - {11, "FIT"}, \ - {12, "WATCHDOG"}, \ - {13, "DTLB_MISS"}, \ - {14, "ITLB_MISS"}, \ - {15, "DEBUG"}, \ - {32, "SPE_UNAVAIL"}, \ - {33, "SPE_FP_DATA"}, \ - {34, "SPE_FP_ROUND"}, \ - {35, "PERFORMANCE_MONITOR"}, \ - {36, "DOORBELL"}, \ - {37, "DOORBELL_CRITICAL"}, \ - {38, "GUEST_DBELL"}, \ - {39, "GUEST_DBELL_CRIT"}, \ - {40, "HV_SYSCALL"}, \ - {41, "HV_PRIV"} -#endif - -TRACE_EVENT(kvm_exit, - TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), - TP_ARGS(exit_nr, vcpu), - - TP_STRUCT__entry( - __field( unsigned int, exit_nr ) - __field( unsigned long, pc ) - __field( unsigned long, msr ) - __field( unsigned long, dar ) -#ifdef CONFIG_KVM_BOOK3S_PR - __field( unsigned long, srr1 ) -#endif - __field( unsigned long, last_inst ) - ), - - TP_fast_assign( -#ifdef CONFIG_KVM_BOOK3S_PR - struct kvmppc_book3s_shadow_vcpu *svcpu; -#endif - __entry->exit_nr = exit_nr; - __entry->pc = kvmppc_get_pc(vcpu); - __entry->dar = kvmppc_get_fault_dar(vcpu); - __entry->msr = vcpu->arch.shared->msr; -#ifdef CONFIG_KVM_BOOK3S_PR - svcpu = svcpu_get(vcpu); - __entry->srr1 = svcpu->shadow_srr1; - svcpu_put(svcpu); -#endif - __entry->last_inst = vcpu->arch.last_inst; - ), - - TP_printk("exit=%s" - " | pc=0x%lx" - " | msr=0x%lx" - " | dar=0x%lx" -#ifdef CONFIG_KVM_BOOK3S_PR - " | srr1=0x%lx" -#endif - " | last_inst=0x%lx" - , - __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit), - __entry->pc, - __entry->msr, - __entry->dar, -#ifdef CONFIG_KVM_BOOK3S_PR - __entry->srr1, -#endif - __entry->last_inst - ) -); - -TRACE_EVENT(kvm_unmap_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("unmap hva 0x%lx\n", __entry->hva) -); - TRACE_EVENT(kvm_stlb_inval, TP_PROTO(unsigned int stlb_index), TP_ARGS(stlb_index), @@ -236,315 +116,6 @@ TRACE_EVENT(kvm_check_requests, __entry->cpu_nr, __entry->requests) ); - -/************************************************************************* - * Book3S trace points * - *************************************************************************/ - -#ifdef CONFIG_KVM_BOOK3S_PR - -TRACE_EVENT(kvm_book3s_reenter, - TP_PROTO(int r, struct kvm_vcpu *vcpu), - TP_ARGS(r, vcpu), - - TP_STRUCT__entry( - __field( unsigned int, r ) - __field( unsigned long, pc ) - ), - - TP_fast_assign( - __entry->r = r; - __entry->pc = kvmppc_get_pc(vcpu); - ), - - TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc) -); - -#ifdef CONFIG_PPC_BOOK3S_64 - -TRACE_EVENT(kvm_book3s_64_mmu_map, - TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr, - struct kvmppc_pte *orig_pte), - TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), - - TP_STRUCT__entry( - __field( unsigned char, flag_w ) - __field( unsigned char, flag_x ) - __field( unsigned long, eaddr ) - __field( unsigned long, hpteg ) - __field( unsigned long, va ) - __field( unsigned long long, vpage ) - __field( unsigned long, hpaddr ) - ), - - TP_fast_assign( - __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w'; - __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x'; - __entry->eaddr = orig_pte->eaddr; - __entry->hpteg = hpteg; - __entry->va = va; - __entry->vpage = orig_pte->vpage; - __entry->hpaddr = hpaddr; - ), - - TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx", - __entry->flag_w, __entry->flag_x, __entry->eaddr, - __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr) -); - -#endif /* CONFIG_PPC_BOOK3S_64 */ - -TRACE_EVENT(kvm_book3s_mmu_map, - TP_PROTO(struct hpte_cache *pte), - TP_ARGS(pte), - - TP_STRUCT__entry( - __field( u64, host_vpn ) - __field( u64, pfn ) - __field( ulong, eaddr ) - __field( u64, vpage ) - __field( ulong, raddr ) - __field( int, flags ) - ), - - TP_fast_assign( - __entry->host_vpn = pte->host_vpn; - __entry->pfn = pte->pfn; - __entry->eaddr = pte->pte.eaddr; - __entry->vpage = pte->pte.vpage; - __entry->raddr = pte->pte.raddr; - __entry->flags = (pte->pte.may_read ? 0x4 : 0) | - (pte->pte.may_write ? 0x2 : 0) | - (pte->pte.may_execute ? 0x1 : 0); - ), - - TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", - __entry->host_vpn, __entry->pfn, __entry->eaddr, - __entry->vpage, __entry->raddr, __entry->flags) -); - -TRACE_EVENT(kvm_book3s_mmu_invalidate, - TP_PROTO(struct hpte_cache *pte), - TP_ARGS(pte), - - TP_STRUCT__entry( - __field( u64, host_vpn ) - __field( u64, pfn ) - __field( ulong, eaddr ) - __field( u64, vpage ) - __field( ulong, raddr ) - __field( int, flags ) - ), - - TP_fast_assign( - __entry->host_vpn = pte->host_vpn; - __entry->pfn = pte->pfn; - __entry->eaddr = pte->pte.eaddr; - __entry->vpage = pte->pte.vpage; - __entry->raddr = pte->pte.raddr; - __entry->flags = (pte->pte.may_read ? 0x4 : 0) | - (pte->pte.may_write ? 0x2 : 0) | - (pte->pte.may_execute ? 0x1 : 0); - ), - - TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", - __entry->host_vpn, __entry->pfn, __entry->eaddr, - __entry->vpage, __entry->raddr, __entry->flags) -); - -TRACE_EVENT(kvm_book3s_mmu_flush, - TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1, - unsigned long long p2), - TP_ARGS(type, vcpu, p1, p2), - - TP_STRUCT__entry( - __field( int, count ) - __field( unsigned long long, p1 ) - __field( unsigned long long, p2 ) - __field( const char *, type ) - ), - - TP_fast_assign( - __entry->count = to_book3s(vcpu)->hpte_cache_count; - __entry->p1 = p1; - __entry->p2 = p2; - __entry->type = type; - ), - - TP_printk("Flush %d %sPTEs: %llx - %llx", - __entry->count, __entry->type, __entry->p1, __entry->p2) -); - -TRACE_EVENT(kvm_book3s_slb_found, - TP_PROTO(unsigned long long gvsid, unsigned long long hvsid), - TP_ARGS(gvsid, hvsid), - - TP_STRUCT__entry( - __field( unsigned long long, gvsid ) - __field( unsigned long long, hvsid ) - ), - - TP_fast_assign( - __entry->gvsid = gvsid; - __entry->hvsid = hvsid; - ), - - TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid) -); - -TRACE_EVENT(kvm_book3s_slb_fail, - TP_PROTO(u16 sid_map_mask, unsigned long long gvsid), - TP_ARGS(sid_map_mask, gvsid), - - TP_STRUCT__entry( - __field( unsigned short, sid_map_mask ) - __field( unsigned long long, gvsid ) - ), - - TP_fast_assign( - __entry->sid_map_mask = sid_map_mask; - __entry->gvsid = gvsid; - ), - - TP_printk("%x/%x: %llx", __entry->sid_map_mask, - SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid) -); - -TRACE_EVENT(kvm_book3s_slb_map, - TP_PROTO(u16 sid_map_mask, unsigned long long gvsid, - unsigned long long hvsid), - TP_ARGS(sid_map_mask, gvsid, hvsid), - - TP_STRUCT__entry( - __field( unsigned short, sid_map_mask ) - __field( unsigned long long, guest_vsid ) - __field( unsigned long long, host_vsid ) - ), - - TP_fast_assign( - __entry->sid_map_mask = sid_map_mask; - __entry->guest_vsid = gvsid; - __entry->host_vsid = hvsid; - ), - - TP_printk("%x: %llx -> %llx", __entry->sid_map_mask, - __entry->guest_vsid, __entry->host_vsid) -); - -TRACE_EVENT(kvm_book3s_slbmte, - TP_PROTO(u64 slb_vsid, u64 slb_esid), - TP_ARGS(slb_vsid, slb_esid), - - TP_STRUCT__entry( - __field( u64, slb_vsid ) - __field( u64, slb_esid ) - ), - - TP_fast_assign( - __entry->slb_vsid = slb_vsid; - __entry->slb_esid = slb_esid; - ), - - TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid) -); - -#endif /* CONFIG_PPC_BOOK3S */ - - -/************************************************************************* - * Book3E trace points * - *************************************************************************/ - -#ifdef CONFIG_BOOKE - -TRACE_EVENT(kvm_booke206_stlb_write, - TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3), - TP_ARGS(mas0, mas8, mas1, mas2, mas7_3), - - TP_STRUCT__entry( - __field( __u32, mas0 ) - __field( __u32, mas8 ) - __field( __u32, mas1 ) - __field( __u64, mas2 ) - __field( __u64, mas7_3 ) - ), - - TP_fast_assign( - __entry->mas0 = mas0; - __entry->mas8 = mas8; - __entry->mas1 = mas1; - __entry->mas2 = mas2; - __entry->mas7_3 = mas7_3; - ), - - TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx", - __entry->mas0, __entry->mas8, __entry->mas1, - __entry->mas2, __entry->mas7_3) -); - -TRACE_EVENT(kvm_booke206_gtlb_write, - TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3), - TP_ARGS(mas0, mas1, mas2, mas7_3), - - TP_STRUCT__entry( - __field( __u32, mas0 ) - __field( __u32, mas1 ) - __field( __u64, mas2 ) - __field( __u64, mas7_3 ) - ), - - TP_fast_assign( - __entry->mas0 = mas0; - __entry->mas1 = mas1; - __entry->mas2 = mas2; - __entry->mas7_3 = mas7_3; - ), - - TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx", - __entry->mas0, __entry->mas1, - __entry->mas2, __entry->mas7_3) -); - -TRACE_EVENT(kvm_booke206_ref_release, - TP_PROTO(__u64 pfn, __u32 flags), - TP_ARGS(pfn, flags), - - TP_STRUCT__entry( - __field( __u64, pfn ) - __field( __u32, flags ) - ), - - TP_fast_assign( - __entry->pfn = pfn; - __entry->flags = flags; - ), - - TP_printk("pfn=%llx flags=%x", - __entry->pfn, __entry->flags) -); - -TRACE_EVENT(kvm_booke_queue_irqprio, - TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority), - TP_ARGS(vcpu, priority), - - TP_STRUCT__entry( - __field( __u32, cpu_nr ) - __field( __u32, priority ) - __field( unsigned long, pending ) - ), - - TP_fast_assign( - __entry->cpu_nr = vcpu->vcpu_id; - __entry->priority = priority; - __entry->pending = vcpu->arch.pending_exceptions; - ), - - TP_printk("vcpu=%x prio=%x pending=%lx", - __entry->cpu_nr, __entry->priority, __entry->pending) -); - -#endif - #endif /* _TRACE_KVM_H */ /* This part must be outside protection */ diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h new file mode 100644 index 00000000000..f7537cf26ce --- /dev/null +++ b/arch/powerpc/kvm/trace_booke.h @@ -0,0 +1,177 @@ +#if !defined(_TRACE_KVM_BOOKE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_BOOKE_H + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm_booke +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_booke + +#define kvm_trace_symbol_exit \ + {0, "CRITICAL"}, \ + {1, "MACHINE_CHECK"}, \ + {2, "DATA_STORAGE"}, \ + {3, "INST_STORAGE"}, \ + {4, "EXTERNAL"}, \ + {5, "ALIGNMENT"}, \ + {6, "PROGRAM"}, \ + {7, "FP_UNAVAIL"}, \ + {8, "SYSCALL"}, \ + {9, "AP_UNAVAIL"}, \ + {10, "DECREMENTER"}, \ + {11, "FIT"}, \ + {12, "WATCHDOG"}, \ + {13, "DTLB_MISS"}, \ + {14, "ITLB_MISS"}, \ + {15, "DEBUG"}, \ + {32, "SPE_UNAVAIL"}, \ + {33, "SPE_FP_DATA"}, \ + {34, "SPE_FP_ROUND"}, \ + {35, "PERFORMANCE_MONITOR"}, \ + {36, "DOORBELL"}, \ + {37, "DOORBELL_CRITICAL"}, \ + {38, "GUEST_DBELL"}, \ + {39, "GUEST_DBELL_CRIT"}, \ + {40, "HV_SYSCALL"}, \ + {41, "HV_PRIV"} + +TRACE_EVENT(kvm_exit, + TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), + TP_ARGS(exit_nr, vcpu), + + TP_STRUCT__entry( + __field( unsigned int, exit_nr ) + __field( unsigned long, pc ) + __field( unsigned long, msr ) + __field( unsigned long, dar ) + __field( unsigned long, last_inst ) + ), + + TP_fast_assign( + __entry->exit_nr = exit_nr; + __entry->pc = kvmppc_get_pc(vcpu); + __entry->dar = kvmppc_get_fault_dar(vcpu); + __entry->msr = vcpu->arch.shared->msr; + __entry->last_inst = vcpu->arch.last_inst; + ), + + TP_printk("exit=%s" + " | pc=0x%lx" + " | msr=0x%lx" + " | dar=0x%lx" + " | last_inst=0x%lx" + , + __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit), + __entry->pc, + __entry->msr, + __entry->dar, + __entry->last_inst + ) +); + +TRACE_EVENT(kvm_unmap_hva, + TP_PROTO(unsigned long hva), + TP_ARGS(hva), + + TP_STRUCT__entry( + __field( unsigned long, hva ) + ), + + TP_fast_assign( + __entry->hva = hva; + ), + + TP_printk("unmap hva 0x%lx\n", __entry->hva) +); + +TRACE_EVENT(kvm_booke206_stlb_write, + TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3), + TP_ARGS(mas0, mas8, mas1, mas2, mas7_3), + + TP_STRUCT__entry( + __field( __u32, mas0 ) + __field( __u32, mas8 ) + __field( __u32, mas1 ) + __field( __u64, mas2 ) + __field( __u64, mas7_3 ) + ), + + TP_fast_assign( + __entry->mas0 = mas0; + __entry->mas8 = mas8; + __entry->mas1 = mas1; + __entry->mas2 = mas2; + __entry->mas7_3 = mas7_3; + ), + + TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx", + __entry->mas0, __entry->mas8, __entry->mas1, + __entry->mas2, __entry->mas7_3) +); + +TRACE_EVENT(kvm_booke206_gtlb_write, + TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3), + TP_ARGS(mas0, mas1, mas2, mas7_3), + + TP_STRUCT__entry( + __field( __u32, mas0 ) + __field( __u32, mas1 ) + __field( __u64, mas2 ) + __field( __u64, mas7_3 ) + ), + + TP_fast_assign( + __entry->mas0 = mas0; + __entry->mas1 = mas1; + __entry->mas2 = mas2; + __entry->mas7_3 = mas7_3; + ), + + TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx", + __entry->mas0, __entry->mas1, + __entry->mas2, __entry->mas7_3) +); + +TRACE_EVENT(kvm_booke206_ref_release, + TP_PROTO(__u64 pfn, __u32 flags), + TP_ARGS(pfn, flags), + + TP_STRUCT__entry( + __field( __u64, pfn ) + __field( __u32, flags ) + ), + + TP_fast_assign( + __entry->pfn = pfn; + __entry->flags = flags; + ), + + TP_printk("pfn=%llx flags=%x", + __entry->pfn, __entry->flags) +); + +TRACE_EVENT(kvm_booke_queue_irqprio, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int priority), + TP_ARGS(vcpu, priority), + + TP_STRUCT__entry( + __field( __u32, cpu_nr ) + __field( __u32, priority ) + __field( unsigned long, pending ) + ), + + TP_fast_assign( + __entry->cpu_nr = vcpu->vcpu_id; + __entry->priority = priority; + __entry->pending = vcpu->arch.pending_exceptions; + ), + + TP_printk("vcpu=%x prio=%x pending=%lx", + __entry->cpu_nr, __entry->priority, __entry->pending) +); + +#endif + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h new file mode 100644 index 00000000000..8b22e474834 --- /dev/null +++ b/arch/powerpc/kvm/trace_pr.h @@ -0,0 +1,297 @@ + +#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_PR_H + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm_pr +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace_pr + +#define kvm_trace_symbol_exit \ + {0x100, "SYSTEM_RESET"}, \ + {0x200, "MACHINE_CHECK"}, \ + {0x300, "DATA_STORAGE"}, \ + {0x380, "DATA_SEGMENT"}, \ + {0x400, "INST_STORAGE"}, \ + {0x480, "INST_SEGMENT"}, \ + {0x500, "EXTERNAL"}, \ + {0x501, "EXTERNAL_LEVEL"}, \ + {0x502, "EXTERNAL_HV"}, \ + {0x600, "ALIGNMENT"}, \ + {0x700, "PROGRAM"}, \ + {0x800, "FP_UNAVAIL"}, \ + {0x900, "DECREMENTER"}, \ + {0x980, "HV_DECREMENTER"}, \ + {0xc00, "SYSCALL"}, \ + {0xd00, "TRACE"}, \ + {0xe00, "H_DATA_STORAGE"}, \ + {0xe20, "H_INST_STORAGE"}, \ + {0xe40, "H_EMUL_ASSIST"}, \ + {0xf00, "PERFMON"}, \ + {0xf20, "ALTIVEC"}, \ + {0xf40, "VSX"} + +TRACE_EVENT(kvm_book3s_reenter, + TP_PROTO(int r, struct kvm_vcpu *vcpu), + TP_ARGS(r, vcpu), + + TP_STRUCT__entry( + __field( unsigned int, r ) + __field( unsigned long, pc ) + ), + + TP_fast_assign( + __entry->r = r; + __entry->pc = kvmppc_get_pc(vcpu); + ), + + TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc) +); + +#ifdef CONFIG_PPC_BOOK3S_64 + +TRACE_EVENT(kvm_book3s_64_mmu_map, + TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr, + struct kvmppc_pte *orig_pte), + TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), + + TP_STRUCT__entry( + __field( unsigned char, flag_w ) + __field( unsigned char, flag_x ) + __field( unsigned long, eaddr ) + __field( unsigned long, hpteg ) + __field( unsigned long, va ) + __field( unsigned long long, vpage ) + __field( unsigned long, hpaddr ) + ), + + TP_fast_assign( + __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w'; + __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x'; + __entry->eaddr = orig_pte->eaddr; + __entry->hpteg = hpteg; + __entry->va = va; + __entry->vpage = orig_pte->vpage; + __entry->hpaddr = hpaddr; + ), + + TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx", + __entry->flag_w, __entry->flag_x, __entry->eaddr, + __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr) +); + +#endif /* CONFIG_PPC_BOOK3S_64 */ + +TRACE_EVENT(kvm_book3s_mmu_map, + TP_PROTO(struct hpte_cache *pte), + TP_ARGS(pte), + + TP_STRUCT__entry( + __field( u64, host_vpn ) + __field( u64, pfn ) + __field( ulong, eaddr ) + __field( u64, vpage ) + __field( ulong, raddr ) + __field( int, flags ) + ), + + TP_fast_assign( + __entry->host_vpn = pte->host_vpn; + __entry->pfn = pte->pfn; + __entry->eaddr = pte->pte.eaddr; + __entry->vpage = pte->pte.vpage; + __entry->raddr = pte->pte.raddr; + __entry->flags = (pte->pte.may_read ? 0x4 : 0) | + (pte->pte.may_write ? 0x2 : 0) | + (pte->pte.may_execute ? 0x1 : 0); + ), + + TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", + __entry->host_vpn, __entry->pfn, __entry->eaddr, + __entry->vpage, __entry->raddr, __entry->flags) +); + +TRACE_EVENT(kvm_book3s_mmu_invalidate, + TP_PROTO(struct hpte_cache *pte), + TP_ARGS(pte), + + TP_STRUCT__entry( + __field( u64, host_vpn ) + __field( u64, pfn ) + __field( ulong, eaddr ) + __field( u64, vpage ) + __field( ulong, raddr ) + __field( int, flags ) + ), + + TP_fast_assign( + __entry->host_vpn = pte->host_vpn; + __entry->pfn = pte->pfn; + __entry->eaddr = pte->pte.eaddr; + __entry->vpage = pte->pte.vpage; + __entry->raddr = pte->pte.raddr; + __entry->flags = (pte->pte.may_read ? 0x4 : 0) | + (pte->pte.may_write ? 0x2 : 0) | + (pte->pte.may_execute ? 0x1 : 0); + ), + + TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", + __entry->host_vpn, __entry->pfn, __entry->eaddr, + __entry->vpage, __entry->raddr, __entry->flags) +); + +TRACE_EVENT(kvm_book3s_mmu_flush, + TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1, + unsigned long long p2), + TP_ARGS(type, vcpu, p1, p2), + + TP_STRUCT__entry( + __field( int, count ) + __field( unsigned long long, p1 ) + __field( unsigned long long, p2 ) + __field( const char *, type ) + ), + + TP_fast_assign( + __entry->count = to_book3s(vcpu)->hpte_cache_count; + __entry->p1 = p1; + __entry->p2 = p2; + __entry->type = type; + ), + + TP_printk("Flush %d %sPTEs: %llx - %llx", + __entry->count, __entry->type, __entry->p1, __entry->p2) +); + +TRACE_EVENT(kvm_book3s_slb_found, + TP_PROTO(unsigned long long gvsid, unsigned long long hvsid), + TP_ARGS(gvsid, hvsid), + + TP_STRUCT__entry( + __field( unsigned long long, gvsid ) + __field( unsigned long long, hvsid ) + ), + + TP_fast_assign( + __entry->gvsid = gvsid; + __entry->hvsid = hvsid; + ), + + TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid) +); + +TRACE_EVENT(kvm_book3s_slb_fail, + TP_PROTO(u16 sid_map_mask, unsigned long long gvsid), + TP_ARGS(sid_map_mask, gvsid), + + TP_STRUCT__entry( + __field( unsigned short, sid_map_mask ) + __field( unsigned long long, gvsid ) + ), + + TP_fast_assign( + __entry->sid_map_mask = sid_map_mask; + __entry->gvsid = gvsid; + ), + + TP_printk("%x/%x: %llx", __entry->sid_map_mask, + SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid) +); + +TRACE_EVENT(kvm_book3s_slb_map, + TP_PROTO(u16 sid_map_mask, unsigned long long gvsid, + unsigned long long hvsid), + TP_ARGS(sid_map_mask, gvsid, hvsid), + + TP_STRUCT__entry( + __field( unsigned short, sid_map_mask ) + __field( unsigned long long, guest_vsid ) + __field( unsigned long long, host_vsid ) + ), + + TP_fast_assign( + __entry->sid_map_mask = sid_map_mask; + __entry->guest_vsid = gvsid; + __entry->host_vsid = hvsid; + ), + + TP_printk("%x: %llx -> %llx", __entry->sid_map_mask, + __entry->guest_vsid, __entry->host_vsid) +); + +TRACE_EVENT(kvm_book3s_slbmte, + TP_PROTO(u64 slb_vsid, u64 slb_esid), + TP_ARGS(slb_vsid, slb_esid), + + TP_STRUCT__entry( + __field( u64, slb_vsid ) + __field( u64, slb_esid ) + ), + + TP_fast_assign( + __entry->slb_vsid = slb_vsid; + __entry->slb_esid = slb_esid; + ), + + TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid) +); + +TRACE_EVENT(kvm_exit, + TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), + TP_ARGS(exit_nr, vcpu), + + TP_STRUCT__entry( + __field( unsigned int, exit_nr ) + __field( unsigned long, pc ) + __field( unsigned long, msr ) + __field( unsigned long, dar ) + __field( unsigned long, srr1 ) + __field( unsigned long, last_inst ) + ), + + TP_fast_assign( + __entry->exit_nr = exit_nr; + __entry->pc = kvmppc_get_pc(vcpu); + __entry->dar = kvmppc_get_fault_dar(vcpu); + __entry->msr = vcpu->arch.shared->msr; + __entry->srr1 = vcpu->arch.shadow_srr1; + __entry->last_inst = vcpu->arch.last_inst; + ), + + TP_printk("exit=%s" + " | pc=0x%lx" + " | msr=0x%lx" + " | dar=0x%lx" + " | srr1=0x%lx" + " | last_inst=0x%lx" + , + __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit), + __entry->pc, + __entry->msr, + __entry->dar, + __entry->srr1, + __entry->last_inst + ) +); + +TRACE_EVENT(kvm_unmap_hva, + TP_PROTO(unsigned long hva), + TP_ARGS(hva), + + TP_STRUCT__entry( + __field( unsigned long, hva ) + ), + + TP_fast_assign( + __entry->hva = hva; + ), + + TP_printk("unmap hva 0x%lx\n", __entry->hva) +); + +#endif /* _TRACE_KVM_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 45043327669..95a20e17dbf 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -10,15 +10,23 @@ CFLAGS_REMOVE_code-patching.o = -pg CFLAGS_REMOVE_feature-fixups.o = -pg obj-y := string.o alloc.o \ - checksum_$(CONFIG_WORD_SIZE).o crtsavres.o + crtsavres.o obj-$(CONFIG_PPC32) += div64.o copy_32.o obj-$(CONFIG_HAS_IOMEM) += devres.o obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ - memcpy_64.o usercopy_64.o mem_64.o string.o \ - checksum_wrappers_64.o hweight_64.o \ - copyuser_power7.o string_64.o copypage_power7.o \ - memcpy_power7.o + usercopy_64.o mem_64.o string.o \ + hweight_64.o \ + copyuser_power7.o string_64.o copypage_power7.o +ifeq ($(CONFIG_GENERIC_CSUM),) +obj-y += checksum_$(CONFIG_WORD_SIZE).o +obj-$(CONFIG_PPC64) += checksum_wrappers_64.o +endif + +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),) +obj-$(CONFIG_PPC64) += memcpy_power7.o memcpy_64.o +endif + obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o ifeq ($(CONFIG_PPC64),y) @@ -31,3 +39,6 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o obj-y += code-patching.o obj-y += feature-fixups.o obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o + +obj-$(CONFIG_ALTIVEC) += xor_vmx.o +CFLAGS_xor_vmx.o += -maltivec -mabi=altivec diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index d1f11795a7a..e8e9c36dc78 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -19,6 +19,14 @@ */ #include <asm/ppc_asm.h> +#ifdef __BIG_ENDIAN__ +#define LVS(VRT,RA,RB) lvsl VRT,RA,RB +#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC +#else +#define LVS(VRT,RA,RB) lvsr VRT,RA,RB +#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC +#endif + .macro err1 100: .section __ex_table,"a" @@ -552,13 +560,13 @@ err3; stw r7,4(r3) li r10,32 li r11,48 - lvsl vr16,0,r4 /* Setup permute control vector */ + LVS(vr16,0,r4) /* Setup permute control vector */ err3; lvx vr0,0,r4 addi r4,r4,16 bf cr7*4+3,5f err3; lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + VPERM(vr8,vr0,vr1,vr16) addi r4,r4,16 err3; stvx vr8,r0,r3 addi r3,r3,16 @@ -566,9 +574,9 @@ err3; stvx vr8,r0,r3 5: bf cr7*4+2,6f err3; lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + VPERM(vr8,vr0,vr1,vr16) err3; lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 + VPERM(vr9,vr1,vr0,vr16) addi r4,r4,32 err3; stvx vr8,r0,r3 err3; stvx vr9,r3,r9 @@ -576,13 +584,13 @@ err3; stvx vr9,r3,r9 6: bf cr7*4+1,7f err3; lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 + VPERM(vr8,vr0,vr3,vr16) err3; lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 + VPERM(vr9,vr3,vr2,vr16) err3; lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 + VPERM(vr10,vr2,vr1,vr16) err3; lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 + VPERM(vr11,vr1,vr0,vr16) addi r4,r4,64 err3; stvx vr8,r0,r3 err3; stvx vr9,r3,r9 @@ -611,21 +619,21 @@ err3; stvx vr11,r3,r11 .align 5 8: err4; lvx vr7,r0,r4 - vperm vr8,vr0,vr7,vr16 + VPERM(vr8,vr0,vr7,vr16) err4; lvx vr6,r4,r9 - vperm vr9,vr7,vr6,vr16 + VPERM(vr9,vr7,vr6,vr16) err4; lvx vr5,r4,r10 - vperm vr10,vr6,vr5,vr16 + VPERM(vr10,vr6,vr5,vr16) err4; lvx vr4,r4,r11 - vperm vr11,vr5,vr4,vr16 + VPERM(vr11,vr5,vr4,vr16) err4; lvx vr3,r4,r12 - vperm vr12,vr4,vr3,vr16 + VPERM(vr12,vr4,vr3,vr16) err4; lvx vr2,r4,r14 - vperm vr13,vr3,vr2,vr16 + VPERM(vr13,vr3,vr2,vr16) err4; lvx vr1,r4,r15 - vperm vr14,vr2,vr1,vr16 + VPERM(vr14,vr2,vr1,vr16) err4; lvx vr0,r4,r16 - vperm vr15,vr1,vr0,vr16 + VPERM(vr15,vr1,vr0,vr16) addi r4,r4,128 err4; stvx vr8,r0,r3 err4; stvx vr9,r3,r9 @@ -649,13 +657,13 @@ err4; stvx vr15,r3,r16 bf cr7*4+1,9f err3; lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 + VPERM(vr8,vr0,vr3,vr16) err3; lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 + VPERM(vr9,vr3,vr2,vr16) err3; lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 + VPERM(vr10,vr2,vr1,vr16) err3; lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 + VPERM(vr11,vr1,vr0,vr16) addi r4,r4,64 err3; stvx vr8,r0,r3 err3; stvx vr9,r3,r9 @@ -665,9 +673,9 @@ err3; stvx vr11,r3,r11 9: bf cr7*4+2,10f err3; lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + VPERM(vr8,vr0,vr1,vr16) err3; lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 + VPERM(vr9,vr1,vr0,vr16) addi r4,r4,32 err3; stvx vr8,r0,r3 err3; stvx vr9,r3,r9 @@ -675,7 +683,7 @@ err3; stvx vr9,r3,r9 10: bf cr7*4+3,11f err3; lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + VPERM(vr8,vr0,vr1,vr16) addi r4,r4,16 err3; stvx vr8,r0,r3 addi r3,r3,16 diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 0663630baf3..e4177dbea6b 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -20,6 +20,15 @@ #include <asm/ppc_asm.h> _GLOBAL(memcpy_power7) + +#ifdef __BIG_ENDIAN__ +#define LVS(VRT,RA,RB) lvsl VRT,RA,RB +#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC +#else +#define LVS(VRT,RA,RB) lvsr VRT,RA,RB +#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRB,VRA,VRC +#endif + #ifdef CONFIG_ALTIVEC cmpldi r5,16 cmpldi cr1,r5,4096 @@ -485,13 +494,13 @@ _GLOBAL(memcpy_power7) li r10,32 li r11,48 - lvsl vr16,0,r4 /* Setup permute control vector */ + LVS(vr16,0,r4) /* Setup permute control vector */ lvx vr0,0,r4 addi r4,r4,16 bf cr7*4+3,5f lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + VPERM(vr8,vr0,vr1,vr16) addi r4,r4,16 stvx vr8,r0,r3 addi r3,r3,16 @@ -499,9 +508,9 @@ _GLOBAL(memcpy_power7) 5: bf cr7*4+2,6f lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + VPERM(vr8,vr0,vr1,vr16) lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 + VPERM(vr9,vr1,vr0,vr16) addi r4,r4,32 stvx vr8,r0,r3 stvx vr9,r3,r9 @@ -509,13 +518,13 @@ _GLOBAL(memcpy_power7) 6: bf cr7*4+1,7f lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 + VPERM(vr8,vr0,vr3,vr16) lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 + VPERM(vr9,vr3,vr2,vr16) lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 + VPERM(vr10,vr2,vr1,vr16) lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 + VPERM(vr11,vr1,vr0,vr16) addi r4,r4,64 stvx vr8,r0,r3 stvx vr9,r3,r9 @@ -544,21 +553,21 @@ _GLOBAL(memcpy_power7) .align 5 8: lvx vr7,r0,r4 - vperm vr8,vr0,vr7,vr16 + VPERM(vr8,vr0,vr7,vr16) lvx vr6,r4,r9 - vperm vr9,vr7,vr6,vr16 + VPERM(vr9,vr7,vr6,vr16) lvx vr5,r4,r10 - vperm vr10,vr6,vr5,vr16 + VPERM(vr10,vr6,vr5,vr16) lvx vr4,r4,r11 - vperm vr11,vr5,vr4,vr16 + VPERM(vr11,vr5,vr4,vr16) lvx vr3,r4,r12 - vperm vr12,vr4,vr3,vr16 + VPERM(vr12,vr4,vr3,vr16) lvx vr2,r4,r14 - vperm vr13,vr3,vr2,vr16 + VPERM(vr13,vr3,vr2,vr16) lvx vr1,r4,r15 - vperm vr14,vr2,vr1,vr16 + VPERM(vr14,vr2,vr1,vr16) lvx vr0,r4,r16 - vperm vr15,vr1,vr0,vr16 + VPERM(vr15,vr1,vr0,vr16) addi r4,r4,128 stvx vr8,r0,r3 stvx vr9,r3,r9 @@ -582,13 +591,13 @@ _GLOBAL(memcpy_power7) bf cr7*4+1,9f lvx vr3,r0,r4 - vperm vr8,vr0,vr3,vr16 + VPERM(vr8,vr0,vr3,vr16) lvx vr2,r4,r9 - vperm vr9,vr3,vr2,vr16 + VPERM(vr9,vr3,vr2,vr16) lvx vr1,r4,r10 - vperm vr10,vr2,vr1,vr16 + VPERM(vr10,vr2,vr1,vr16) lvx vr0,r4,r11 - vperm vr11,vr1,vr0,vr16 + VPERM(vr11,vr1,vr0,vr16) addi r4,r4,64 stvx vr8,r0,r3 stvx vr9,r3,r9 @@ -598,9 +607,9 @@ _GLOBAL(memcpy_power7) 9: bf cr7*4+2,10f lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + VPERM(vr8,vr0,vr1,vr16) lvx vr0,r4,r9 - vperm vr9,vr1,vr0,vr16 + VPERM(vr9,vr1,vr0,vr16) addi r4,r4,32 stvx vr8,r0,r3 stvx vr9,r3,r9 @@ -608,7 +617,7 @@ _GLOBAL(memcpy_power7) 10: bf cr7*4+3,11f lvx vr1,r0,r4 - vperm vr8,vr0,vr1,vr16 + VPERM(vr8,vr0,vr1,vr16) addi r4,r4,16 stvx vr8,r0,r3 addi r3,r3,16 diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index b1faa1593c9..c0511c27a73 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -212,11 +212,19 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea, { int err; unsigned long x, b, c; +#ifdef __LITTLE_ENDIAN__ + int len = nb; /* save a copy of the length for byte reversal */ +#endif /* unaligned, do this in pieces */ x = 0; for (; nb > 0; nb -= c) { +#ifdef __LITTLE_ENDIAN__ + c = 1; +#endif +#ifdef __BIG_ENDIAN__ c = max_align(ea); +#endif if (c > nb) c = max_align(nb); err = read_mem_aligned(&b, ea, c); @@ -225,7 +233,24 @@ static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea, x = (x << (8 * c)) + b; ea += c; } +#ifdef __LITTLE_ENDIAN__ + switch (len) { + case 2: + *dest = byterev_2(x); + break; + case 4: + *dest = byterev_4(x); + break; +#ifdef __powerpc64__ + case 8: + *dest = byterev_8(x); + break; +#endif + } +#endif +#ifdef __BIG_ENDIAN__ *dest = x; +#endif return 0; } @@ -273,9 +298,29 @@ static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea, int err; unsigned long c; +#ifdef __LITTLE_ENDIAN__ + switch (nb) { + case 2: + val = byterev_2(val); + break; + case 4: + val = byterev_4(val); + break; +#ifdef __powerpc64__ + case 8: + val = byterev_8(val); + break; +#endif + } +#endif /* unaligned or little-endian, do this in pieces */ for (; nb > 0; nb -= c) { +#ifdef __LITTLE_ENDIAN__ + c = 1; +#endif +#ifdef __BIG_ENDIAN__ c = max_align(ea); +#endif if (c > nb) c = max_align(nb); err = write_mem_aligned(val >> (nb - c) * 8, ea, c); @@ -310,22 +355,36 @@ static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long), struct pt_regs *regs) { int err; - unsigned long val[sizeof(double) / sizeof(long)]; + union { + double dbl; + unsigned long ul[2]; + struct { +#ifdef __BIG_ENDIAN__ + unsigned _pad_; + unsigned word; +#endif +#ifdef __LITTLE_ENDIAN__ + unsigned word; + unsigned _pad_; +#endif + } single; + } data; unsigned long ptr; if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & 3) == 0) return (*func)(rn, ea); - ptr = (unsigned long) &val[0]; + ptr = (unsigned long) &data.ul; if (sizeof(unsigned long) == 8 || nb == 4) { - err = read_mem_unaligned(&val[0], ea, nb, regs); - ptr += sizeof(unsigned long) - nb; + err = read_mem_unaligned(&data.ul[0], ea, nb, regs); + if (nb == 4) + ptr = (unsigned long)&(data.single.word); } else { /* reading a double on 32-bit */ - err = read_mem_unaligned(&val[0], ea, 4, regs); + err = read_mem_unaligned(&data.ul[0], ea, 4, regs); if (!err) - err = read_mem_unaligned(&val[1], ea + 4, 4, regs); + err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs); } if (err) return err; @@ -337,28 +396,42 @@ static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long), struct pt_regs *regs) { int err; - unsigned long val[sizeof(double) / sizeof(long)]; + union { + double dbl; + unsigned long ul[2]; + struct { +#ifdef __BIG_ENDIAN__ + unsigned _pad_; + unsigned word; +#endif +#ifdef __LITTLE_ENDIAN__ + unsigned word; + unsigned _pad_; +#endif + } single; + } data; unsigned long ptr; if (!address_ok(regs, ea, nb)) return -EFAULT; if ((ea & 3) == 0) return (*func)(rn, ea); - ptr = (unsigned long) &val[0]; + ptr = (unsigned long) &data.ul[0]; if (sizeof(unsigned long) == 8 || nb == 4) { - ptr += sizeof(unsigned long) - nb; + if (nb == 4) + ptr = (unsigned long)&(data.single.word); err = (*func)(rn, ptr); if (err) return err; - err = write_mem_unaligned(val[0], ea, nb, regs); + err = write_mem_unaligned(data.ul[0], ea, nb, regs); } else { /* writing a double on 32-bit */ err = (*func)(rn, ptr); if (err) return err; - err = write_mem_unaligned(val[0], ea, 4, regs); + err = write_mem_unaligned(data.ul[0], ea, 4, regs); if (!err) - err = write_mem_unaligned(val[1], ea + 4, 4, regs); + err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs); } return err; } diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c new file mode 100644 index 00000000000..e905f7c2ea7 --- /dev/null +++ b/arch/powerpc/lib/xor_vmx.c @@ -0,0 +1,177 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Author: Anton Blanchard <anton@au.ibm.com> + */ +#include <altivec.h> + +#include <linux/preempt.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <asm/switch_to.h> + +typedef vector signed char unative_t; + +#define DEFINE(V) \ + unative_t *V = (unative_t *)V##_in; \ + unative_t V##_0, V##_1, V##_2, V##_3 + +#define LOAD(V) \ + do { \ + V##_0 = V[0]; \ + V##_1 = V[1]; \ + V##_2 = V[2]; \ + V##_3 = V[3]; \ + } while (0) + +#define STORE(V) \ + do { \ + V[0] = V##_0; \ + V[1] = V##_1; \ + V[2] = V##_2; \ + V[3] = V##_3; \ + } while (0) + +#define XOR(V1, V2) \ + do { \ + V1##_0 = vec_xor(V1##_0, V2##_0); \ + V1##_1 = vec_xor(V1##_1, V2##_1); \ + V1##_2 = vec_xor(V1##_2, V2##_2); \ + V1##_3 = vec_xor(V1##_3, V2##_3); \ + } while (0) + +void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in) +{ + DEFINE(v1); + DEFINE(v2); + unsigned long lines = bytes / (sizeof(unative_t)) / 4; + + preempt_disable(); + enable_kernel_altivec(); + + do { + LOAD(v1); + LOAD(v2); + XOR(v1, v2); + STORE(v1); + + v1 += 4; + v2 += 4; + } while (--lines > 0); + + preempt_enable(); +} +EXPORT_SYMBOL(xor_altivec_2); + +void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in) +{ + DEFINE(v1); + DEFINE(v2); + DEFINE(v3); + unsigned long lines = bytes / (sizeof(unative_t)) / 4; + + preempt_disable(); + enable_kernel_altivec(); + + do { + LOAD(v1); + LOAD(v2); + LOAD(v3); + XOR(v1, v2); + XOR(v1, v3); + STORE(v1); + + v1 += 4; + v2 += 4; + v3 += 4; + } while (--lines > 0); + + preempt_enable(); +} +EXPORT_SYMBOL(xor_altivec_3); + +void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in) +{ + DEFINE(v1); + DEFINE(v2); + DEFINE(v3); + DEFINE(v4); + unsigned long lines = bytes / (sizeof(unative_t)) / 4; + + preempt_disable(); + enable_kernel_altivec(); + + do { + LOAD(v1); + LOAD(v2); + LOAD(v3); + LOAD(v4); + XOR(v1, v2); + XOR(v3, v4); + XOR(v1, v3); + STORE(v1); + + v1 += 4; + v2 += 4; + v3 += 4; + v4 += 4; + } while (--lines > 0); + + preempt_enable(); +} +EXPORT_SYMBOL(xor_altivec_4); + +void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in, unsigned long *v5_in) +{ + DEFINE(v1); + DEFINE(v2); + DEFINE(v3); + DEFINE(v4); + DEFINE(v5); + unsigned long lines = bytes / (sizeof(unative_t)) / 4; + + preempt_disable(); + enable_kernel_altivec(); + + do { + LOAD(v1); + LOAD(v2); + LOAD(v3); + LOAD(v4); + LOAD(v5); + XOR(v1, v2); + XOR(v3, v4); + XOR(v1, v5); + XOR(v1, v3); + STORE(v1); + + v1 += 4; + v2 += 4; + v3 += 4; + v4 += 4; + v5 += 4; + } while (--lines > 0); + + preempt_enable(); +} +EXPORT_SYMBOL(xor_altivec_5); diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 6747eece84a..7b6c1075017 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -287,9 +287,7 @@ void __dma_free_coherent(size_t size, void *vaddr) pte_clear(&init_mm, addr, ptep); if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); - - ClearPageReserved(page); - __free_page(page); + __free_reserved_page(page); } } addr += PAGE_SIZE; diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index c33d939120c..3ea26c25590 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -35,7 +35,11 @@ #define DBG_LOW(fmt...) #endif +#ifdef __BIG_ENDIAN__ #define HPTE_LOCK_BIT 3 +#else +#define HPTE_LOCK_BIT (56+3) +#endif DEFINE_RAW_SPINLOCK(native_tlbie_lock); @@ -172,7 +176,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize, static inline void native_lock_hpte(struct hash_pte *hptep) { - unsigned long *word = &hptep->v; + unsigned long *word = (unsigned long *)&hptep->v; while (1) { if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) @@ -184,7 +188,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep) static inline void native_unlock_hpte(struct hash_pte *hptep) { - unsigned long *word = &hptep->v; + unsigned long *word = (unsigned long *)&hptep->v; clear_bit_unlock(HPTE_LOCK_BIT, word); } @@ -204,10 +208,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, } for (i = 0; i < HPTES_PER_GROUP; i++) { - if (! (hptep->v & HPTE_V_VALID)) { + if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { /* retry with lock held */ native_lock_hpte(hptep); - if (! (hptep->v & HPTE_V_VALID)) + if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) break; native_unlock_hpte(hptep); } @@ -226,14 +230,14 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, i, hpte_v, hpte_r); } - hptep->r = hpte_r; + hptep->r = cpu_to_be64(hpte_r); /* Guarantee the second dword is visible before the valid bit */ eieio(); /* * Now set the first dword including the valid bit * NOTE: this also unlocks the hpte */ - hptep->v = hpte_v; + hptep->v = cpu_to_be64(hpte_v); __asm__ __volatile__ ("ptesync" : : : "memory"); @@ -254,12 +258,12 @@ static long native_hpte_remove(unsigned long hpte_group) for (i = 0; i < HPTES_PER_GROUP; i++) { hptep = htab_address + hpte_group + slot_offset; - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { /* retry with lock held */ native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) break; @@ -294,7 +298,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* * We need to invalidate the TLB always because hpte_remove doesn't do * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less @@ -308,8 +312,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, } else { DBG_LOW(" -> hit\n"); /* Update the HPTE */ - hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | - (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); + hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | + (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); } native_unlock_hpte(hptep); @@ -334,7 +338,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize) slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; for (i = 0; i < HPTES_PER_GROUP; i++) { hptep = htab_address + slot; - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) /* HPTE matches */ @@ -369,8 +373,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, hptep = htab_address + slot; /* Update the HPTE */ - hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | - (newpp & (HPTE_R_PP | HPTE_R_N)); + hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & + ~(HPTE_R_PP | HPTE_R_N)) | + (newpp & (HPTE_R_PP | HPTE_R_N))); /* * Ensure it is out of the tlb too. Bolted entries base and * actual page size will be same. @@ -392,7 +397,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, want_v = hpte_encode_avpn(vpn, bpsize, ssize); native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* * We need to invalidate the TLB always because hpte_remove doesn't do @@ -458,7 +463,7 @@ static void native_hugepage_invalidate(struct mm_struct *mm, hptep = htab_address + slot; want_v = hpte_encode_avpn(vpn, psize, ssize); native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* Even if we miss, we need to invalidate the TLB */ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) @@ -519,11 +524,12 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, int *psize, int *apsize, int *ssize, unsigned long *vpn) { unsigned long avpn, pteg, vpi; - unsigned long hpte_v = hpte->v; + unsigned long hpte_v = be64_to_cpu(hpte->v); + unsigned long hpte_r = be64_to_cpu(hpte->r); unsigned long vsid, seg_off; int size, a_size, shift; /* Look at the 8 bit LP value */ - unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1); + unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1); if (!(hpte_v & HPTE_V_LARGE)) { size = MMU_PAGE_4K; @@ -612,7 +618,7 @@ static void native_hpte_clear(void) * running, right? and for crash dump, we probably * don't want to wait for a maybe bad cpu. */ - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* * Call __tlbie() here rather than tlbie() since we @@ -664,7 +670,7 @@ static void native_flush_hash_range(unsigned long number, int local) hptep = htab_address + slot; want_v = hpte_encode_avpn(vpn, psize, ssize); native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) native_unlock_hpte(hptep); diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index bde8b558975..6176b3cdf57 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -251,19 +251,18 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, void *data) { char *type = of_get_flat_dt_prop(node, "device_type", NULL); - u32 *prop; + __be32 *prop; unsigned long size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", - &size); + prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size); if (prop == NULL) return 0; for (; size >= 4; size -= 4, ++prop) { - if (prop[0] == 40) { + if (be32_to_cpu(prop[0]) == 40) { DBG("1T segment support detected\n"); cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; return 1; @@ -307,23 +306,22 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, void *data) { char *type = of_get_flat_dt_prop(node, "device_type", NULL); - u32 *prop; + __be32 *prop; unsigned long size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - prop = (u32 *)of_get_flat_dt_prop(node, - "ibm,segment-page-sizes", &size); + prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size); if (prop != NULL) { pr_info("Page sizes from device-tree:\n"); size /= 4; cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); while(size > 0) { - unsigned int base_shift = prop[0]; - unsigned int slbenc = prop[1]; - unsigned int lpnum = prop[2]; + unsigned int base_shift = be32_to_cpu(prop[0]); + unsigned int slbenc = be32_to_cpu(prop[1]); + unsigned int lpnum = be32_to_cpu(prop[2]); struct mmu_psize_def *def; int idx, base_idx; @@ -356,8 +354,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, def->tlbiel = 0; while (size > 0 && lpnum) { - unsigned int shift = prop[0]; - int penc = prop[1]; + unsigned int shift = be32_to_cpu(prop[0]); + int penc = be32_to_cpu(prop[1]); prop += 2; size -= 2; lpnum--; @@ -390,8 +388,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, const char *uname, int depth, void *data) { char *type = of_get_flat_dt_prop(node, "device_type", NULL); - unsigned long *addr_prop; - u32 *page_count_prop; + __be64 *addr_prop; + __be32 *page_count_prop; unsigned int expected_pages; long unsigned int phys_addr; long unsigned int block_size; @@ -405,12 +403,12 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node, page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); if (page_count_prop == NULL) return 0; - expected_pages = (1 << page_count_prop[0]); + expected_pages = (1 << be32_to_cpu(page_count_prop[0])); addr_prop = of_get_flat_dt_prop(node, "reg", NULL); if (addr_prop == NULL) return 0; - phys_addr = addr_prop[0]; - block_size = addr_prop[1]; + phys_addr = be64_to_cpu(addr_prop[0]); + block_size = be64_to_cpu(addr_prop[1]); if (block_size != (16 * GB)) return 0; printk(KERN_INFO "Huge page(16GB) memory: " @@ -534,16 +532,16 @@ static int __init htab_dt_scan_pftsize(unsigned long node, void *data) { char *type = of_get_flat_dt_prop(node, "device_type", NULL); - u32 *prop; + __be32 *prop; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; - prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL); + prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL); if (prop != NULL) { /* pft_size[0] is the NUMA CEC cookie */ - ppc64_pft_size = prop[1]; + ppc64_pft_size = be32_to_cpu(prop[1]); return 1; } return 0; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index d67db4bd672..90bb6d9409b 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -633,8 +633,6 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, /* * This function frees user-level page tables of a process. - * - * Must be called with pagetable lock held. */ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index d47d3dab487..cff59f1bec2 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -213,7 +213,12 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, */ BUG_ON(first_memblock_base != 0); +#ifdef CONFIG_PIN_TLB + /* 8xx can only access 24MB at the moment */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000)); +#else /* 8xx can only access 8MB at the moment */ memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); +#endif } #endif /* CONFIG_8xx */ diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 8ed035d2edb..e3734edffa6 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -304,5 +304,54 @@ void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) { } -#endif /* CONFIG_SPARSEMEM_VMEMMAP */ +/* + * We do not have access to the sparsemem vmemmap, so we fallback to + * walking the list of sparsemem blocks which we already maintain for + * the sake of crashdump. In the long run, we might want to maintain + * a tree if performance of that linear walk becomes a problem. + * + * realmode_pfn_to_page functions can fail due to: + * 1) As real sparsemem blocks do not lay in RAM continously (they + * are in virtual address space which is not available in the real mode), + * the requested page struct can be split between blocks so get_page/put_page + * may fail. + * 2) When huge pages are used, the get_page/put_page API will fail + * in real mode as the linked addresses in the page struct are virtual + * too. + */ +struct page *realmode_pfn_to_page(unsigned long pfn) +{ + struct vmemmap_backing *vmem_back; + struct page *page; + unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; + unsigned long pg_va = (unsigned long) pfn_to_page(pfn); + + for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { + if (pg_va < vmem_back->virt_addr) + continue; + + /* Check that page struct is not split between real pages */ + if ((pg_va + sizeof(struct page)) > + (vmem_back->virt_addr + page_size)) + return NULL; + + page = (struct page *) (vmem_back->phys + pg_va - + vmem_back->virt_addr); + return page; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(realmode_pfn_to_page); + +#elif defined(CONFIG_FLATMEM) + +struct page *realmode_pfn_to_page(unsigned long pfn) +{ + struct page *page = pfn_to_page(pfn); + return page; +} +EXPORT_SYMBOL_GPL(realmode_pfn_to_page); + +#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index c916127f10c..078d3e00a61 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -195,7 +195,7 @@ static const __be32 *of_get_usable_memory(struct device_node *memory) u32 len; prop = of_get_property(memory, "linux,drconf-usable-memory", &len); if (!prop || len < sizeof(unsigned int)) - return 0; + return NULL; return prop; } @@ -938,8 +938,7 @@ static void __init mark_reserved_regions_for_nid(int nid) unsigned long start_pfn = physbase >> PAGE_SHIFT; unsigned long end_pfn = PFN_UP(physbase + size); struct node_active_region node_ar; - unsigned long node_end_pfn = node->node_start_pfn + - node->node_spanned_pages; + unsigned long node_end_pfn = pgdat_end_pfn(node); /* * Check to make sure that this memblock.reserved area is @@ -1154,7 +1153,7 @@ static int hot_add_drconf_scn_to_nid(struct device_node *memory, * represented in the device tree as a node (i.e. memory@XXXX) for * each memblock. */ -int hot_add_node_scn_to_nid(unsigned long scn_addr) +static int hot_add_node_scn_to_nid(unsigned long scn_addr) { struct device_node *memory; int nid = -1; @@ -1235,7 +1234,7 @@ static u64 hot_add_drconf_memory_max(void) struct device_node *memory = NULL; unsigned int drconf_cell_cnt = 0; u64 lmb_size = 0; - const __be32 *dm = 0; + const __be32 *dm = NULL; memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); if (memory) { @@ -1535,7 +1534,7 @@ static void topology_work_fn(struct work_struct *work) } static DECLARE_WORK(topology_work, topology_work_fn); -void topology_schedule_update(void) +static void topology_schedule_update(void) { schedule_work(&topology_work); } diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index edda589795c..841e0d00863 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -32,8 +32,6 @@ #include <asm/tlbflush.h> #include <asm/tlb.h> -#include "mmu_decl.h" - static inline int is_exec_fault(void) { return current->thread.regs && TRAP(current->thread.regs) == 0x400; @@ -72,7 +70,7 @@ struct page * maybe_pte_to_page(pte_t pte) * support falls into the same category. */ -static pte_t set_pte_filter(pte_t pte, unsigned long addr) +static pte_t set_pte_filter(pte_t pte) { pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || @@ -81,17 +79,6 @@ static pte_t set_pte_filter(pte_t pte, unsigned long addr) if (!pg) return pte; if (!test_bit(PG_arch_1, &pg->flags)) { -#ifdef CONFIG_8xx - /* On 8xx, cache control instructions (particularly - * "dcbst" from flush_dcache_icache) fault as write - * operation if there is an unpopulated TLB entry - * for the address in question. To workaround that, - * we invalidate the TLB here, thus avoiding dcbst - * misbehaviour. - */ - /* 8xx doesn't care about PID, size or ind args */ - _tlbil_va(addr, 0, 0, 0); -#endif /* CONFIG_8xx */ flush_dcache_icache_page(pg); set_bit(PG_arch_1, &pg->flags); } @@ -111,7 +98,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so * instead we "filter out" the exec permission for non clean pages. */ -static pte_t set_pte_filter(pte_t pte, unsigned long addr) +static pte_t set_pte_filter(pte_t pte) { struct page *pg; @@ -193,7 +180,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, * this context might not have been activated yet when this * is called. */ - pte = set_pte_filter(pte, addr); + pte = set_pte_filter(pte); /* Perform the setting of the PTE */ __set_pte_at(mm, addr, ptep, pte, 0); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 6c856fb8c15..5b960171528 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -121,7 +121,10 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) ptepage = alloc_pages(flags, 0); if (!ptepage) return NULL; - pgtable_page_ctor(ptepage); + if (!pgtable_page_ctor(ptepage)) { + __free_page(ptepage); + return NULL; + } return ptepage; } diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 536eec72c0f..9d95786aa80 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -378,6 +378,10 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) __GFP_REPEAT | __GFP_ZERO); if (!page) return NULL; + if (!kernel && !pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } ret = page_address(page); spin_lock(&mm->page_table_lock); @@ -392,9 +396,6 @@ static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) } spin_unlock(&mm->page_table_lock); - if (!kernel) - pgtable_page_ctor(page); - return (pte_t *)ret; } diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 8a5dfaf5c6b..9aee27c582d 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -39,6 +39,7 @@ #define r_X 5 #define r_addr 6 #define r_scratch1 7 +#define r_scratch2 8 #define r_D 14 #define r_HL 15 #define r_M 16 @@ -92,6 +93,8 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); ___PPC_RA(base) | IMM_L(i)) #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ ___PPC_RA(base) | IMM_L(i)) +#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ + ___PPC_RA(base) | ___PPC_RB(b)) /* Convenience helpers for the above with 'far' offsets: */ #define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \ else { PPC_ADDIS(r, base, IMM_HA(i)); \ @@ -186,6 +189,14 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ } } while (0); +#define PPC_LHBRX_OFFS(r, base, i) \ + do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0) +#ifdef __LITTLE_ENDIAN__ +#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i) +#else +#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) +#endif + static inline bool is_nearbranch(int offset) { return (offset < 32768) && (offset >= -32768); diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index 7d3a3b5619a..e76eba74d9d 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S @@ -43,8 +43,11 @@ sk_load_word_positive_offset: cmpd r_scratch1, r_addr blt bpf_slow_path_word /* Nope, just hitting the header. cr0 here is eq or gt! */ +#ifdef __LITTLE_ENDIAN__ + lwbrx r_A, r_D, r_addr +#else lwzx r_A, r_D, r_addr - /* When big endian we don't need to byteswap. */ +#endif blr /* Return success, cr0 != LT */ .globl sk_load_half @@ -56,7 +59,11 @@ sk_load_half_positive_offset: subi r_scratch1, r_HL, 2 cmpd r_scratch1, r_addr blt bpf_slow_path_half +#ifdef __LITTLE_ENDIAN__ + lhbrx r_A, r_D, r_addr +#else lhzx r_A, r_D, r_addr +#endif blr .globl sk_load_byte diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 2345bdb4d91..ac3c2a10daf 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -17,14 +17,8 @@ #include "bpf_jit.h" -#ifndef __BIG_ENDIAN -/* There are endianness assumptions herein. */ -#error "Little-endian PPC not supported in BPF compiler" -#endif - int bpf_jit_enable __read_mostly; - static inline void bpf_flush_icache(void *start, void *end) { smp_wmb(); @@ -193,6 +187,26 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_MUL(r_A, r_A, r_scratch1); } break; + case BPF_S_ALU_MOD_X: /* A %= X; */ + ctx->seen |= SEEN_XREG; + PPC_CMPWI(r_X, 0); + if (ctx->pc_ret0 != -1) { + PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); + } else { + PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); + PPC_LI(r_ret, 0); + PPC_JMP(exit_addr); + } + PPC_DIVWU(r_scratch1, r_A, r_X); + PPC_MUL(r_scratch1, r_X, r_scratch1); + PPC_SUB(r_A, r_A, r_scratch1); + break; + case BPF_S_ALU_MOD_K: /* A %= K; */ + PPC_LI32(r_scratch2, K); + PPC_DIVWU(r_scratch1, r_A, r_scratch2); + PPC_MUL(r_scratch1, r_scratch2, r_scratch1); + PPC_SUB(r_A, r_A, r_scratch1); + break; case BPF_S_ALU_DIV_X: /* A /= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); @@ -346,18 +360,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; /*** Ancillary info loads ***/ - - /* None of the BPF_S_ANC* codes appear to be passed by - * sk_chk_filter(). The interpreter and the x86 BPF - * compiler implement them so we do too -- they may be - * planted in future. - */ case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); - PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, - protocol)); - /* ntohs is a NOP with BE loads. */ + PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, + protocol)); break; case BPF_S_ANC_IFINDEX: PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c index e504166e089..fd8a3765341 100644 --- a/arch/powerpc/platforms/512x/clock.c +++ b/arch/powerpc/platforms/512x/clock.c @@ -24,6 +24,7 @@ #include <linux/mutex.h> #include <linux/io.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <asm/mpc5xxx.h> #include <asm/mpc5121.h> diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c index a82a41b4fd9..36b5652aada 100644 --- a/arch/powerpc/platforms/512x/mpc512x_shared.c +++ b/arch/powerpc/platforms/512x/mpc512x_shared.c @@ -60,8 +60,6 @@ void mpc512x_restart(char *cmd) ; } -#if IS_ENABLED(CONFIG_FB_FSL_DIU) - struct fsl_diu_shared_fb { u8 gamma[0x300]; /* 32-bit aligned! */ struct diu_ad ad0; /* 32-bit aligned! */ @@ -71,7 +69,7 @@ struct fsl_diu_shared_fb { }; #define DIU_DIV_MASK 0x000000ff -void mpc512x_set_pixel_clock(unsigned int pixclock) +static void mpc512x_set_pixel_clock(unsigned int pixclock) { unsigned long bestval, bestfreq, speed, busfreq; unsigned long minpixclock, maxpixclock, pixval; @@ -164,7 +162,7 @@ void mpc512x_set_pixel_clock(unsigned int pixclock) iounmap(ccm); } -enum fsl_diu_monitor_port +static enum fsl_diu_monitor_port mpc512x_valid_monitor_port(enum fsl_diu_monitor_port port) { return FSL_DIU_PORT_DVI; @@ -179,7 +177,7 @@ static inline void mpc512x_free_bootmem(struct page *page) free_reserved_page(page); } -void mpc512x_release_bootmem(void) +static void mpc512x_release_bootmem(void) { unsigned long addr = diu_shared_fb.fb_phys & PAGE_MASK; unsigned long size = diu_shared_fb.fb_len; @@ -205,7 +203,7 @@ void mpc512x_release_bootmem(void) * address range will be reserved in setup_arch() after bootmem * allocator is up. */ -void __init mpc512x_init_diu(void) +static void __init mpc512x_init_diu(void) { struct device_node *np; struct diu __iomem *diu_reg; @@ -274,7 +272,7 @@ out: iounmap(diu_reg); } -void __init mpc512x_setup_diu(void) +static void __init mpc512x_setup_diu(void) { int ret; @@ -303,8 +301,6 @@ void __init mpc512x_setup_diu(void) diu_ops.release_bootmem = mpc512x_release_bootmem; } -#endif - void __init mpc512x_init_IRQ(void) { struct device_node *np; @@ -337,7 +333,7 @@ static struct of_device_id __initdata of_bus_ids[] = { {}, }; -void __init mpc512x_declare_of_platform_devices(void) +static void __init mpc512x_declare_of_platform_devices(void) { if (of_platform_bus_probe(NULL, of_bus_ids, NULL)) printk(KERN_ERR __FILE__ ": " @@ -387,7 +383,7 @@ static unsigned int __init get_fifo_size(struct device_node *np, ((u32)(_base) + sizeof(struct mpc52xx_psc))) /* Init PSC FIFO space for TX and RX slices */ -void __init mpc512x_psc_fifo_init(void) +static void __init mpc512x_psc_fifo_init(void) { struct device_node *np; void __iomem *psc; diff --git a/arch/powerpc/platforms/512x/pdm360ng.c b/arch/powerpc/platforms/512x/pdm360ng.c index 24b314d7bd5..116f2325b20 100644 --- a/arch/powerpc/platforms/512x/pdm360ng.c +++ b/arch/powerpc/platforms/512x/pdm360ng.c @@ -14,6 +14,8 @@ #include <linux/kernel.h> #include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig index 90f4496017e..af54174801f 100644 --- a/arch/powerpc/platforms/52xx/Kconfig +++ b/arch/powerpc/platforms/52xx/Kconfig @@ -57,5 +57,5 @@ config PPC_MPC5200_BUGFIX config PPC_MPC5200_LPBFIFO tristate "MPC5200 LocalPlus bus FIFO driver" - depends on PPC_MPC52xx + depends on PPC_MPC52xx && PPC_BESTCOMM select PPC_BESTCOMM_GEN_BD diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index b69221ba07f..2898b737deb 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, { int l1irq; int l2irq; - struct irq_chip *irqchip; + struct irq_chip *uninitialized_var(irqchip); void *hndlr; int type; u32 reg; @@ -373,9 +373,8 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq, case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break; case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break; case MPC52xx_IRQ_L1_CRIT: - default: pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n", - __func__, l1irq); + __func__, l2irq); irq_set_chip(virq, &no_irq_chip); return 0; } diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c index 30394b409b3..6a14cf50f4a 100644 --- a/arch/powerpc/platforms/82xx/mpc8272_ads.c +++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c @@ -16,6 +16,8 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/io.h> diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c index e1dceeec499..e5f82ec8df1 100644 --- a/arch/powerpc/platforms/82xx/pq2fads.c +++ b/arch/powerpc/platforms/82xx/pq2fads.c @@ -15,6 +15,8 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/io.h> diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c index 7bc31582293..fd71cfdf238 100644 --- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c +++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c @@ -204,7 +204,6 @@ static int mcu_remove(struct i2c_client *client) ret = mcu_gpiochip_remove(mcu); if (ret) return ret; - i2c_set_clientdata(client, NULL); kfree(mcu); return 0; } diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 1d769a29249..3d9716ccd32 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -20,6 +20,8 @@ #include <linux/freezer.h> #include <linux/suspend.h> #include <linux/fsl_devices.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/export.h> diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index de2eb932099..4d4634958cf 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -218,83 +218,16 @@ config GE_IMP3A This board is a 3U CompactPCI Single Board Computer with a Freescale P2020 processor. -config P2041_RDB - bool "Freescale P2041 RDB" - select DEFAULT_UIMAGE - select PPC_E500MC - select PHYS_64BIT - select SWIOTLB - select ARCH_REQUIRE_GPIOLIB - select GPIO_MPC8XXX - select HAS_RAPIDIO - select PPC_EPAPR_HV_PIC - help - This option enables support for the P2041 RDB board - -config P3041_DS - bool "Freescale P3041 DS" - select DEFAULT_UIMAGE - select PPC_E500MC - select PHYS_64BIT - select SWIOTLB - select ARCH_REQUIRE_GPIOLIB - select GPIO_MPC8XXX - select HAS_RAPIDIO - select PPC_EPAPR_HV_PIC - help - This option enables support for the P3041 DS board - -config P4080_DS - bool "Freescale P4080 DS" - select DEFAULT_UIMAGE - select PPC_E500MC - select PHYS_64BIT - select SWIOTLB - select ARCH_REQUIRE_GPIOLIB - select GPIO_MPC8XXX - select HAS_RAPIDIO - select PPC_EPAPR_HV_PIC - help - This option enables support for the P4080 DS board - config SGY_CTS1000 tristate "Servergy CTS-1000 support" select GPIOLIB select OF_GPIO - depends on P4080_DS + depends on CORENET_GENERIC help Enable this to support functionality in Servergy's CTS-1000 systems. endif # PPC32 -config P5020_DS - bool "Freescale P5020 DS" - select DEFAULT_UIMAGE - select E500 - select PPC_E500MC - select PHYS_64BIT - select SWIOTLB - select ARCH_REQUIRE_GPIOLIB - select GPIO_MPC8XXX - select HAS_RAPIDIO - select PPC_EPAPR_HV_PIC - help - This option enables support for the P5020 DS board - -config P5040_DS - bool "Freescale P5040 DS" - select DEFAULT_UIMAGE - select E500 - select PPC_E500MC - select PHYS_64BIT - select SWIOTLB - select ARCH_REQUIRE_GPIOLIB - select GPIO_MPC8XXX - select HAS_RAPIDIO - select PPC_EPAPR_HV_PIC - help - This option enables support for the P5040 DS board - config PPC_QEMU_E500 bool "QEMU generic e500 platform" select DEFAULT_UIMAGE @@ -310,10 +243,8 @@ config PPC_QEMU_E500 unset based on the emulated CPU (or actual host CPU in the case of KVM). -if PPC64 - -config T4240_QDS - bool "Freescale T4240 QDS" +config CORENET_GENERIC + bool "Freescale CoreNet Generic" select DEFAULT_UIMAGE select E500 select PPC_E500MC @@ -324,26 +255,14 @@ config T4240_QDS select HAS_RAPIDIO select PPC_EPAPR_HV_PIC help - This option enables support for the T4240 QDS board - -config B4_QDS - bool "Freescale B4 QDS" - select DEFAULT_UIMAGE - select E500 - select PPC_E500MC - select PHYS_64BIT - select SWIOTLB - select GPIOLIB - select ARCH_REQUIRE_GPIOLIB - select HAS_RAPIDIO - select PPC_EPAPR_HV_PIC - help - This option enables support for the B4 QDS board - The B4 application development system B4 QDS is a complete - debugging environment intended for engineers developing - applications for the B4. + This option enables support for the FSL CoreNet based boards. + For 32bit kernel, the following boards are supported: + P2041 RDB, P3041 DS and P4080 DS + For 64bit kernel, the following boards are supported: + T4240 QDS and B4 QDS + The following boards are supported for both 32bit and 64bit kernel: + P5020 DS and P5040 DS -endif endif # FSL_SOC_BOOKE config TQM85xx diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile index 53c9f75a690..dd4c0b59577 100644 --- a/arch/powerpc/platforms/85xx/Makefile +++ b/arch/powerpc/platforms/85xx/Makefile @@ -18,13 +18,7 @@ obj-$(CONFIG_P1010_RDB) += p1010rdb.o obj-$(CONFIG_P1022_DS) += p1022_ds.o obj-$(CONFIG_P1022_RDK) += p1022_rdk.o obj-$(CONFIG_P1023_RDS) += p1023_rds.o -obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o -obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o -obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o -obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o -obj-$(CONFIG_P5040_DS) += p5040_ds.o corenet_ds.o -obj-$(CONFIG_T4240_QDS) += t4240_qds.o corenet_ds.o -obj-$(CONFIG_B4_QDS) += b4_qds.o corenet_ds.o +obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o obj-$(CONFIG_STX_GP3) += stx_gp3.o obj-$(CONFIG_TQM85xx) += tqm85xx.o obj-$(CONFIG_SBC8548) += sbc8548.o diff --git a/arch/powerpc/platforms/85xx/b4_qds.c b/arch/powerpc/platforms/85xx/b4_qds.c deleted file mode 100644 index 0c6702f8b88..00000000000 --- a/arch/powerpc/platforms/85xx/b4_qds.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * B4 QDS Setup - * Should apply for QDS platform of B4860 and it's personalities. - * viz B4860/B4420/B4220QDS - * - * Copyright 2012 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/phy.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <mm/mmu_decl.h> -#include <asm/prom.h> -#include <asm/udbg.h> -#include <asm/mpic.h> - -#include <linux/of_platform.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include <asm/ehv_pic.h> - -#include "corenet_ds.h" - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init b4_qds_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); -#ifdef CONFIG_SMP - extern struct smp_ops_t smp_85xx_ops; -#endif - - if ((of_flat_dt_is_compatible(root, "fsl,B4860QDS")) || - (of_flat_dt_is_compatible(root, "fsl,B4420QDS")) || - (of_flat_dt_is_compatible(root, "fsl,B4220QDS"))) - return 1; - - /* Check if we're running under the Freescale hypervisor */ - if ((of_flat_dt_is_compatible(root, "fsl,B4860QDS-hv")) || - (of_flat_dt_is_compatible(root, "fsl,B4420QDS-hv")) || - (of_flat_dt_is_compatible(root, "fsl,B4220QDS-hv"))) { - ppc_md.init_IRQ = ehv_pic_init; - ppc_md.get_irq = ehv_pic_get_irq; - ppc_md.restart = fsl_hv_restart; - ppc_md.power_off = fsl_hv_halt; - ppc_md.halt = fsl_hv_halt; -#ifdef CONFIG_SMP - /* - * Disable the timebase sync operations because we can't write - * to the timebase registers under the hypervisor. - */ - smp_85xx_ops.give_timebase = NULL; - smp_85xx_ops.take_timebase = NULL; -#endif - return 1; - } - - return 0; -} - -define_machine(b4_qds) { - .name = "B4 QDS", - .probe = b4_qds_probe, - .setup_arch = corenet_ds_setup_arch, - .init_IRQ = corenet_ds_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, -#endif -/* coreint doesn't play nice with lazy EE, use legacy mpic for now */ -#ifdef CONFIG_PPC64 - .get_irq = mpic_get_irq, -#else - .get_irq = mpic_get_coreint_irq, -#endif - .restart = fsl_rstcr_restart, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else - .power_save = e500_idle, -#endif -}; - -machine_arch_initcall(b4_qds, corenet_ds_publish_devices); - -#ifdef CONFIG_SWIOTLB -machine_arch_initcall(b4_qds, swiotlb_setup_bus_notifier); -#endif diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c index 6208e49142b..213d5b81582 100644 --- a/arch/powerpc/platforms/85xx/c293pcie.c +++ b/arch/powerpc/platforms/85xx/c293pcie.c @@ -11,6 +11,7 @@ #include <linux/stddef.h> #include <linux/kernel.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index d0861a0d836..eba78c85303 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c @@ -5,6 +5,8 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <sysdev/cpm2_pic.h> diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c deleted file mode 100644 index aa3690bae41..00000000000 --- a/arch/powerpc/platforms/85xx/corenet_ds.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Corenet based SoC DS Setup - * - * Maintained by Kumar Gala (see MAINTAINERS for contact information) - * - * Copyright 2009-2011 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/interrupt.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <asm/ppc-pci.h> -#include <mm/mmu_decl.h> -#include <asm/prom.h> -#include <asm/udbg.h> -#include <asm/mpic.h> - -#include <linux/of_platform.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include "smp.h" - -void __init corenet_ds_pic_init(void) -{ - struct mpic *mpic; - unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU | - MPIC_NO_RESET; - - if (ppc_md.get_irq == mpic_get_coreint_irq) - flags |= MPIC_ENABLE_COREINT; - - mpic = mpic_alloc(NULL, 0, flags, 0, 512, " OpenPIC "); - BUG_ON(mpic == NULL); - - mpic_init(mpic); -} - -/* - * Setup the architecture - */ -void __init corenet_ds_setup_arch(void) -{ - mpc85xx_smp_init(); - - swiotlb_detect_4g(); - - pr_info("%s board from Freescale Semiconductor\n", ppc_md.name); -} - -static const struct of_device_id of_device_ids[] = { - { - .compatible = "simple-bus" - }, - { - .compatible = "fsl,srio", - }, - { - .compatible = "fsl,p4080-pcie", - }, - { - .compatible = "fsl,qoriq-pcie-v2.2", - }, - { - .compatible = "fsl,qoriq-pcie-v2.3", - }, - { - .compatible = "fsl,qoriq-pcie-v2.4", - }, - { - .compatible = "fsl,qoriq-pcie-v3.0", - }, - /* The following two are for the Freescale hypervisor */ - { - .name = "hypervisor", - }, - { - .name = "handles", - }, - {} -}; - -int __init corenet_ds_publish_devices(void) -{ - return of_platform_bus_probe(NULL, of_device_ids, NULL); -} diff --git a/arch/powerpc/platforms/85xx/corenet_ds.h b/arch/powerpc/platforms/85xx/corenet_ds.h deleted file mode 100644 index ddd700b2303..00000000000 --- a/arch/powerpc/platforms/85xx/corenet_ds.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Corenet based SoC DS Setup - * - * Copyright 2009 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#ifndef CORENET_DS_H -#define CORENET_DS_H - -extern void __init corenet_ds_pic_init(void); -extern void __init corenet_ds_setup_arch(void); -extern int __init corenet_ds_publish_devices(void); - -#endif diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c new file mode 100644 index 00000000000..fbd871e6975 --- /dev/null +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -0,0 +1,182 @@ +/* + * Corenet based SoC DS Setup + * + * Maintained by Kumar Gala (see MAINTAINERS for contact information) + * + * Copyright 2009-2011 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/kdev_t.h> +#include <linux/delay.h> +#include <linux/interrupt.h> + +#include <asm/time.h> +#include <asm/machdep.h> +#include <asm/pci-bridge.h> +#include <asm/ppc-pci.h> +#include <mm/mmu_decl.h> +#include <asm/prom.h> +#include <asm/udbg.h> +#include <asm/mpic.h> +#include <asm/ehv_pic.h> + +#include <linux/of_platform.h> +#include <sysdev/fsl_soc.h> +#include <sysdev/fsl_pci.h> +#include "smp.h" + +void __init corenet_gen_pic_init(void) +{ + struct mpic *mpic; + unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU | + MPIC_NO_RESET; + + if (ppc_md.get_irq == mpic_get_coreint_irq) + flags |= MPIC_ENABLE_COREINT; + + mpic = mpic_alloc(NULL, 0, flags, 0, 512, " OpenPIC "); + BUG_ON(mpic == NULL); + + mpic_init(mpic); +} + +/* + * Setup the architecture + */ +void __init corenet_gen_setup_arch(void) +{ + mpc85xx_smp_init(); + + swiotlb_detect_4g(); + + pr_info("%s board from Freescale Semiconductor\n", ppc_md.name); +} + +static const struct of_device_id of_device_ids[] = { + { + .compatible = "simple-bus" + }, + { + .compatible = "fsl,srio", + }, + { + .compatible = "fsl,p4080-pcie", + }, + { + .compatible = "fsl,qoriq-pcie-v2.2", + }, + { + .compatible = "fsl,qoriq-pcie-v2.3", + }, + { + .compatible = "fsl,qoriq-pcie-v2.4", + }, + { + .compatible = "fsl,qoriq-pcie-v3.0", + }, + /* The following two are for the Freescale hypervisor */ + { + .name = "hypervisor", + }, + { + .name = "handles", + }, + {} +}; + +int __init corenet_gen_publish_devices(void) +{ + return of_platform_bus_probe(NULL, of_device_ids, NULL); +} + +static const char * const boards[] __initconst = { + "fsl,P2041RDB", + "fsl,P3041DS", + "fsl,P4080DS", + "fsl,P5020DS", + "fsl,P5040DS", + "fsl,T4240QDS", + "fsl,B4860QDS", + "fsl,B4420QDS", + "fsl,B4220QDS", + NULL +}; + +static const char * const hv_boards[] __initconst = { + "fsl,P2041RDB-hv", + "fsl,P3041DS-hv", + "fsl,P4080DS-hv", + "fsl,P5020DS-hv", + "fsl,P5040DS-hv", + "fsl,T4240QDS-hv", + "fsl,B4860QDS-hv", + "fsl,B4420QDS-hv", + "fsl,B4220QDS-hv", + NULL +}; + +/* + * Called very early, device-tree isn't unflattened + */ +static int __init corenet_generic_probe(void) +{ + unsigned long root = of_get_flat_dt_root(); +#ifdef CONFIG_SMP + extern struct smp_ops_t smp_85xx_ops; +#endif + + if (of_flat_dt_match(root, boards)) + return 1; + + /* Check if we're running under the Freescale hypervisor */ + if (of_flat_dt_match(root, hv_boards)) { + ppc_md.init_IRQ = ehv_pic_init; + ppc_md.get_irq = ehv_pic_get_irq; + ppc_md.restart = fsl_hv_restart; + ppc_md.power_off = fsl_hv_halt; + ppc_md.halt = fsl_hv_halt; +#ifdef CONFIG_SMP + /* + * Disable the timebase sync operations because we can't write + * to the timebase registers under the hypervisor. + */ + smp_85xx_ops.give_timebase = NULL; + smp_85xx_ops.take_timebase = NULL; +#endif + return 1; + } + + return 0; +} + +define_machine(corenet_generic) { + .name = "CoreNet Generic", + .probe = corenet_generic_probe, + .setup_arch = corenet_gen_setup_arch, + .init_IRQ = corenet_gen_pic_init, +#ifdef CONFIG_PCI + .pcibios_fixup_bus = fsl_pcibios_fixup_bus, +#endif + .get_irq = mpic_get_coreint_irq, + .restart = fsl_rstcr_restart, + .calibrate_decr = generic_calibrate_decr, + .progress = udbg_progress, +#ifdef CONFIG_PPC64 + .power_save = book3e_idle, +#else + .power_save = e500_idle, +#endif +}; + +machine_arch_initcall(corenet_generic, corenet_gen_publish_devices); + +#ifdef CONFIG_SWIOTLB +machine_arch_initcall(corenet_generic, swiotlb_setup_bus_notifier); +#endif diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c index 0252961392d..d6a3dd31149 100644 --- a/arch/powerpc/platforms/85xx/p1010rdb.c +++ b/arch/powerpc/platforms/85xx/p1010rdb.c @@ -66,6 +66,8 @@ static int __init p1010_rdb_probe(void) if (of_flat_dt_is_compatible(root, "fsl,P1010RDB")) return 1; + if (of_flat_dt_is_compatible(root, "fsl,P1010RDB-PB")) + return 1; return 0; } diff --git a/arch/powerpc/platforms/85xx/p2041_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c deleted file mode 100644 index 000c0892fc4..00000000000 --- a/arch/powerpc/platforms/85xx/p2041_rdb.c +++ /dev/null @@ -1,87 +0,0 @@ -/* - * P2041 RDB Setup - * - * Copyright 2011 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/phy.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <mm/mmu_decl.h> -#include <asm/prom.h> -#include <asm/udbg.h> -#include <asm/mpic.h> - -#include <linux/of_platform.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include <asm/ehv_pic.h> - -#include "corenet_ds.h" - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init p2041_rdb_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); -#ifdef CONFIG_SMP - extern struct smp_ops_t smp_85xx_ops; -#endif - - if (of_flat_dt_is_compatible(root, "fsl,P2041RDB")) - return 1; - - /* Check if we're running under the Freescale hypervisor */ - if (of_flat_dt_is_compatible(root, "fsl,P2041RDB-hv")) { - ppc_md.init_IRQ = ehv_pic_init; - ppc_md.get_irq = ehv_pic_get_irq; - ppc_md.restart = fsl_hv_restart; - ppc_md.power_off = fsl_hv_halt; - ppc_md.halt = fsl_hv_halt; -#ifdef CONFIG_SMP - /* - * Disable the timebase sync operations because we can't write - * to the timebase registers under the hypervisor. - */ - smp_85xx_ops.give_timebase = NULL; - smp_85xx_ops.take_timebase = NULL; -#endif - return 1; - } - - return 0; -} - -define_machine(p2041_rdb) { - .name = "P2041 RDB", - .probe = p2041_rdb_probe, - .setup_arch = corenet_ds_setup_arch, - .init_IRQ = corenet_ds_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, -#endif - .get_irq = mpic_get_coreint_irq, - .restart = fsl_rstcr_restart, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, - .power_save = e500_idle, -}; - -machine_arch_initcall(p2041_rdb, corenet_ds_publish_devices); - -#ifdef CONFIG_SWIOTLB -machine_arch_initcall(p2041_rdb, swiotlb_setup_bus_notifier); -#endif diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c deleted file mode 100644 index b3edc205daa..00000000000 --- a/arch/powerpc/platforms/85xx/p3041_ds.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * P3041 DS Setup - * - * Maintained by Kumar Gala (see MAINTAINERS for contact information) - * - * Copyright 2009-2010 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/phy.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <mm/mmu_decl.h> -#include <asm/prom.h> -#include <asm/udbg.h> -#include <asm/mpic.h> - -#include <linux/of_platform.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include <asm/ehv_pic.h> - -#include "corenet_ds.h" - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init p3041_ds_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); -#ifdef CONFIG_SMP - extern struct smp_ops_t smp_85xx_ops; -#endif - - if (of_flat_dt_is_compatible(root, "fsl,P3041DS")) - return 1; - - /* Check if we're running under the Freescale hypervisor */ - if (of_flat_dt_is_compatible(root, "fsl,P3041DS-hv")) { - ppc_md.init_IRQ = ehv_pic_init; - ppc_md.get_irq = ehv_pic_get_irq; - ppc_md.restart = fsl_hv_restart; - ppc_md.power_off = fsl_hv_halt; - ppc_md.halt = fsl_hv_halt; -#ifdef CONFIG_SMP - /* - * Disable the timebase sync operations because we can't write - * to the timebase registers under the hypervisor. - */ - smp_85xx_ops.give_timebase = NULL; - smp_85xx_ops.take_timebase = NULL; -#endif - return 1; - } - - return 0; -} - -define_machine(p3041_ds) { - .name = "P3041 DS", - .probe = p3041_ds_probe, - .setup_arch = corenet_ds_setup_arch, - .init_IRQ = corenet_ds_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, -#endif - .get_irq = mpic_get_coreint_irq, - .restart = fsl_rstcr_restart, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, - .power_save = e500_idle, -}; - -machine_arch_initcall(p3041_ds, corenet_ds_publish_devices); - -#ifdef CONFIG_SWIOTLB -machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier); -#endif diff --git a/arch/powerpc/platforms/85xx/p4080_ds.c b/arch/powerpc/platforms/85xx/p4080_ds.c deleted file mode 100644 index 54df10632ae..00000000000 --- a/arch/powerpc/platforms/85xx/p4080_ds.c +++ /dev/null @@ -1,87 +0,0 @@ -/* - * P4080 DS Setup - * - * Maintained by Kumar Gala (see MAINTAINERS for contact information) - * - * Copyright 2009 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/interrupt.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <mm/mmu_decl.h> -#include <asm/prom.h> -#include <asm/udbg.h> -#include <asm/mpic.h> - -#include <linux/of_platform.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include <asm/ehv_pic.h> - -#include "corenet_ds.h" - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init p4080_ds_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); -#ifdef CONFIG_SMP - extern struct smp_ops_t smp_85xx_ops; -#endif - - if (of_flat_dt_is_compatible(root, "fsl,P4080DS")) - return 1; - - /* Check if we're running under the Freescale hypervisor */ - if (of_flat_dt_is_compatible(root, "fsl,P4080DS-hv")) { - ppc_md.init_IRQ = ehv_pic_init; - ppc_md.get_irq = ehv_pic_get_irq; - ppc_md.restart = fsl_hv_restart; - ppc_md.power_off = fsl_hv_halt; - ppc_md.halt = fsl_hv_halt; -#ifdef CONFIG_SMP - /* - * Disable the timebase sync operations because we can't write - * to the timebase registers under the hypervisor. - */ - smp_85xx_ops.give_timebase = NULL; - smp_85xx_ops.take_timebase = NULL; -#endif - return 1; - } - - return 0; -} - -define_machine(p4080_ds) { - .name = "P4080 DS", - .probe = p4080_ds_probe, - .setup_arch = corenet_ds_setup_arch, - .init_IRQ = corenet_ds_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, -#endif - .get_irq = mpic_get_coreint_irq, - .restart = fsl_rstcr_restart, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, - .power_save = e500_idle, -}; - -machine_arch_initcall(p4080_ds, corenet_ds_publish_devices); -#ifdef CONFIG_SWIOTLB -machine_arch_initcall(p4080_ds, swiotlb_setup_bus_notifier); -#endif diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c deleted file mode 100644 index 39cfa4044e6..00000000000 --- a/arch/powerpc/platforms/85xx/p5020_ds.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * P5020 DS Setup - * - * Maintained by Kumar Gala (see MAINTAINERS for contact information) - * - * Copyright 2009-2010 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/phy.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <mm/mmu_decl.h> -#include <asm/prom.h> -#include <asm/udbg.h> -#include <asm/mpic.h> - -#include <linux/of_platform.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include <asm/ehv_pic.h> - -#include "corenet_ds.h" - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init p5020_ds_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); -#ifdef CONFIG_SMP - extern struct smp_ops_t smp_85xx_ops; -#endif - - if (of_flat_dt_is_compatible(root, "fsl,P5020DS")) - return 1; - - /* Check if we're running under the Freescale hypervisor */ - if (of_flat_dt_is_compatible(root, "fsl,P5020DS-hv")) { - ppc_md.init_IRQ = ehv_pic_init; - ppc_md.get_irq = ehv_pic_get_irq; - ppc_md.restart = fsl_hv_restart; - ppc_md.power_off = fsl_hv_halt; - ppc_md.halt = fsl_hv_halt; -#ifdef CONFIG_SMP - /* - * Disable the timebase sync operations because we can't write - * to the timebase registers under the hypervisor. - */ - smp_85xx_ops.give_timebase = NULL; - smp_85xx_ops.take_timebase = NULL; -#endif - return 1; - } - - return 0; -} - -define_machine(p5020_ds) { - .name = "P5020 DS", - .probe = p5020_ds_probe, - .setup_arch = corenet_ds_setup_arch, - .init_IRQ = corenet_ds_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, -#endif - .get_irq = mpic_get_coreint_irq, - .restart = fsl_rstcr_restart, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else - .power_save = e500_idle, -#endif -}; - -machine_arch_initcall(p5020_ds, corenet_ds_publish_devices); - -#ifdef CONFIG_SWIOTLB -machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier); -#endif diff --git a/arch/powerpc/platforms/85xx/p5040_ds.c b/arch/powerpc/platforms/85xx/p5040_ds.c deleted file mode 100644 index f70e74cddf9..00000000000 --- a/arch/powerpc/platforms/85xx/p5040_ds.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * P5040 DS Setup - * - * Copyright 2009-2010 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/pci.h> - -#include <asm/machdep.h> -#include <asm/udbg.h> -#include <asm/mpic.h> - -#include <linux/of_fdt.h> - -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include <asm/ehv_pic.h> - -#include "corenet_ds.h" - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init p5040_ds_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); -#ifdef CONFIG_SMP - extern struct smp_ops_t smp_85xx_ops; -#endif - - if (of_flat_dt_is_compatible(root, "fsl,P5040DS")) - return 1; - - /* Check if we're running under the Freescale hypervisor */ - if (of_flat_dt_is_compatible(root, "fsl,P5040DS-hv")) { - ppc_md.init_IRQ = ehv_pic_init; - ppc_md.get_irq = ehv_pic_get_irq; - ppc_md.restart = fsl_hv_restart; - ppc_md.power_off = fsl_hv_halt; - ppc_md.halt = fsl_hv_halt; -#ifdef CONFIG_SMP - /* - * Disable the timebase sync operations because we can't write - * to the timebase registers under the hypervisor. - */ - smp_85xx_ops.give_timebase = NULL; - smp_85xx_ops.take_timebase = NULL; -#endif - return 1; - } - - return 0; -} - -define_machine(p5040_ds) { - .name = "P5040 DS", - .probe = p5040_ds_probe, - .setup_arch = corenet_ds_setup_arch, - .init_IRQ = corenet_ds_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, -#endif - .get_irq = mpic_get_coreint_irq, - .restart = fsl_rstcr_restart, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else - .power_save = e500_idle, -#endif -}; - -machine_arch_initcall(p5040_ds, corenet_ds_publish_devices); - -#ifdef CONFIG_SWIOTLB -machine_arch_initcall(p5040_ds, swiotlb_setup_bus_notifier); -#endif diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c index 6a7704b92c3..3daff7c6356 100644 --- a/arch/powerpc/platforms/85xx/ppa8548.c +++ b/arch/powerpc/platforms/85xx/ppa8548.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/reboot.h> #include <linux/seq_file.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 7179726ba5c..b9197cea185 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/of_gpio.h> +#include <linux/of_irq.h> #include <linux/workqueue.h> #include <linux/reboot.h> #include <linux/interrupt.h> diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 281b7f01df6..393f975ab39 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/kexec.h> #include <linux/highmem.h> #include <linux/cpu.h> diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index 3bbbf748948..55a9682b952 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c @@ -9,6 +9,8 @@ */ #include <linux/irq.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/io.h> diff --git a/arch/powerpc/platforms/85xx/t4240_qds.c b/arch/powerpc/platforms/85xx/t4240_qds.c deleted file mode 100644 index 91ead6b1b8a..00000000000 --- a/arch/powerpc/platforms/85xx/t4240_qds.c +++ /dev/null @@ -1,93 +0,0 @@ -/* - * T4240 QDS Setup - * - * Maintained by Kumar Gala (see MAINTAINERS for contact information) - * - * Copyright 2012 Freescale Semiconductor Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/kdev_t.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/phy.h> - -#include <asm/time.h> -#include <asm/machdep.h> -#include <asm/pci-bridge.h> -#include <mm/mmu_decl.h> -#include <asm/prom.h> -#include <asm/udbg.h> -#include <asm/mpic.h> - -#include <linux/of_platform.h> -#include <sysdev/fsl_soc.h> -#include <sysdev/fsl_pci.h> -#include <asm/ehv_pic.h> - -#include "corenet_ds.h" - -/* - * Called very early, device-tree isn't unflattened - */ -static int __init t4240_qds_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); -#ifdef CONFIG_SMP - extern struct smp_ops_t smp_85xx_ops; -#endif - - if (of_flat_dt_is_compatible(root, "fsl,T4240QDS")) - return 1; - - /* Check if we're running under the Freescale hypervisor */ - if (of_flat_dt_is_compatible(root, "fsl,T4240QDS-hv")) { - ppc_md.init_IRQ = ehv_pic_init; - ppc_md.get_irq = ehv_pic_get_irq; - ppc_md.restart = fsl_hv_restart; - ppc_md.power_off = fsl_hv_halt; - ppc_md.halt = fsl_hv_halt; -#ifdef CONFIG_SMP - /* - * Disable the timebase sync operations because we can't write - * to the timebase registers under the hypervisor. - */ - smp_85xx_ops.give_timebase = NULL; - smp_85xx_ops.take_timebase = NULL; -#endif - return 1; - } - - return 0; -} - -define_machine(t4240_qds) { - .name = "T4240 QDS", - .probe = t4240_qds_probe, - .setup_arch = corenet_ds_setup_arch, - .init_IRQ = corenet_ds_pic_init, -#ifdef CONFIG_PCI - .pcibios_fixup_bus = fsl_pcibios_fixup_bus, -#endif - .get_irq = mpic_get_coreint_irq, - .restart = fsl_rstcr_restart, - .calibrate_decr = generic_calibrate_decr, - .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else - .power_save = e500_idle, -#endif -}; - -machine_arch_initcall(t4240_qds, corenet_ds_publish_devices); - -#ifdef CONFIG_SWIOTLB -machine_arch_initcall(t4240_qds, swiotlb_setup_bus_notifier); -#endif diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index 9982f57c98b..d5b98c0f958 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c @@ -10,6 +10,7 @@ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/interrupt.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/mpic.h> diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c index 7d9ac6040d6..e62166681d0 100644 --- a/arch/powerpc/platforms/8xx/ep88xc.c +++ b/arch/powerpc/platforms/8xx/ep88xc.c @@ -10,6 +10,8 @@ */ #include <linux/init.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/machdep.h> diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c index 866feff83c9..63084640c5c 100644 --- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c @@ -15,6 +15,8 @@ */ #include <linux/init.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/io.h> diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c index 5d98398c2f5..c1262581b63 100644 --- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c +++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c @@ -25,6 +25,8 @@ #include <linux/fs_uart_pd.h> #include <linux/fsl_devices.h> #include <linux/mii.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/delay.h> diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c index 8d21ab70e06..251aba8759e 100644 --- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c +++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c @@ -28,6 +28,7 @@ #include <linux/fs_uart_pd.h> #include <linux/fsl_devices.h> #include <linux/mii.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/delay.h> @@ -48,7 +49,7 @@ struct cpm_pin { int port, pin, flags; }; -static struct __initdata cpm_pin tqm8xx_pins[] = { +static struct cpm_pin tqm8xx_pins[] __initdata = { /* SMC1 */ {CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */ {CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */ @@ -63,7 +64,7 @@ static struct __initdata cpm_pin tqm8xx_pins[] = { {CPM_PORTC, 11, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, }; -static struct __initdata cpm_pin tqm8xx_fec_pins[] = { +static struct cpm_pin tqm8xx_fec_pins[] __initdata = { /* MII */ {CPM_PORTD, 3, CPM_PIN_OUTPUT}, {CPM_PORTD, 4, CPM_PIN_OUTPUT}, diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 6704e2e20e6..c2a566fb8bb 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -93,22 +93,23 @@ choice config GENERIC_CPU bool "Generic" + depends on !CPU_LITTLE_ENDIAN config CELL_CPU bool "Cell Broadband Engine" - depends on PPC_BOOK3S_64 + depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN config POWER4_CPU bool "POWER4" - depends on PPC_BOOK3S_64 + depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN config POWER5_CPU bool "POWER5" - depends on PPC_BOOK3S_64 + depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN config POWER6_CPU bool "POWER6" - depends on PPC_BOOK3S_64 + depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN config POWER7_CPU bool "POWER7" diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c index 14be2bd358b..4278acfa2ed 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c +++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c @@ -486,7 +486,6 @@ static __init int celleb_setup_pciex(struct device_node *node, struct pci_controller *phb) { struct resource r; - struct of_irq oirq; int virq; /* SMMIO registers; used inside this file */ @@ -507,12 +506,11 @@ static __init int celleb_setup_pciex(struct device_node *node, phb->ops = &scc_pciex_pci_ops; /* internal interrupt handler */ - if (of_irq_map_one(node, 1, &oirq)) { + virq = irq_of_parse_and_map(node, 1); + if (!virq) { pr_err("PCIEXC:Failed to map irq\n"); goto error; } - virq = irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); if (request_irq(virq, pciex_handle_internal_irq, 0, "pciex", (void *)phb)) { pr_err("PCIEXC:Failed to request irq\n"); diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c index 9c339ec646f..c8eb5719382 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_sio.c +++ b/arch/powerpc/platforms/cell/celleb_scc_sio.c @@ -45,7 +45,7 @@ static int __init txx9_serial_init(void) struct device_node *node; int i; struct uart_port req; - struct of_irq irq; + struct of_phandle_args irq; struct resource res; for_each_compatible_node(node, "serial", "toshiba,sio-scc") { @@ -53,7 +53,7 @@ static int __init txx9_serial_init(void) if (!(txx9_serial_bitmap & (1<<i))) continue; - if (of_irq_map_one(node, i, &irq)) + if (of_irq_parse_one(node, i, &irq)) continue; if (of_address_to_resource(node, txx9_scc_tab[i].index, &res)) @@ -66,8 +66,7 @@ static int __init txx9_serial_init(void) #ifdef CONFIG_SERIAL_TXX9_CONSOLE req.membase = ioremap(req.mapbase, 0x24); #endif - req.irq = irq_create_of_mapping(irq.controller, - irq.specifier, irq.size); + req.irq = irq_create_of_mapping(&irq); req.flags |= UPF_IOREMAP | UPF_BUGGY_UART /*HAVE_CTS_LINE*/; req.uartclk = 83300000; diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 8e299447127..1f72f4ab635 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -235,12 +235,9 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) /* First, we check whether we have a real "interrupts" in the device * tree in case the device-tree is ever fixed */ - struct of_irq oirq; - if (of_irq_map_one(pic->host->of_node, 0, &oirq) == 0) { - virq = irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); + virq = irq_of_parse_and_map(pic->host->of_node, 0); + if (virq) return virq; - } /* Now do the horrible hacks */ tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL); diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c index 2bb6977c0a5..c3327f3d8cf 100644 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ b/arch/powerpc/platforms/cell/spu_manage.c @@ -177,21 +177,20 @@ out: static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) { - struct of_irq oirq; + struct of_phandle_args oirq; int ret; int i; for (i=0; i < 3; i++) { - ret = of_irq_map_one(np, i, &oirq); + ret = of_irq_parse_one(np, i, &oirq); if (ret) { pr_debug("spu_new: failed to get irq %d\n", i); goto err; } ret = -EINVAL; - pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0], - oirq.controller->full_name); - spu->irqs[i] = irq_create_of_mapping(oirq.controller, - oirq.specifier, oirq.size); + pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0], + oirq.np->full_name); + spu->irqs[i] = irq_create_of_mapping(&oirq); if (spu->irqs[i] == NO_IRQ) { pr_debug("spu_new: failed to map it !\n"); goto err; @@ -200,7 +199,7 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) return 0; err: - pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, + pr_debug("failed to map irq %x for spu %s\n", *oirq.args, spu->name); for (; i >= 0; i--) { if (spu->irqs[i] != NO_IRQ) diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c index db4e638cf40..3844f1397fc 100644 --- a/arch/powerpc/platforms/cell/spu_syscalls.c +++ b/arch/powerpc/platforms/cell/spu_syscalls.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/syscalls.h> #include <linux/rcupdate.h> +#include <linux/binfmts.h> #include <asm/spu.h> @@ -126,7 +127,7 @@ int elf_coredump_extra_notes_size(void) return ret; } -int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset) +int elf_coredump_extra_notes_write(struct coredump_params *cprm) { struct spufs_calls *calls; int ret; @@ -135,7 +136,7 @@ int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset) if (!calls) return 0; - ret = calls->coredump_extra_notes_write(file, foffset); + ret = calls->coredump_extra_notes_write(cprm); spufs_calls_put(calls); diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index c9500ea7be2..be6212ddbf0 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -27,6 +27,8 @@ #include <linux/gfp.h> #include <linux/list.h> #include <linux/syscalls.h> +#include <linux/coredump.h> +#include <linux/binfmts.h> #include <asm/uaccess.h> @@ -48,44 +50,6 @@ static ssize_t do_coredump_read(int num, struct spu_context *ctx, void *buffer, return ++ret; /* count trailing NULL */ } -/* - * These are the only things you should do on a core-file: use only these - * functions to write out all the necessary info. - */ -static int spufs_dump_write(struct file *file, const void *addr, int nr, loff_t *foffset) -{ - unsigned long limit = rlimit(RLIMIT_CORE); - ssize_t written; - - if (*foffset + nr > limit) - return -EIO; - - written = file->f_op->write(file, addr, nr, &file->f_pos); - *foffset += written; - - if (written != nr) - return -EIO; - - return 0; -} - -static int spufs_dump_align(struct file *file, char *buf, loff_t new_off, - loff_t *foffset) -{ - int rc, size; - - size = min((loff_t)PAGE_SIZE, new_off - *foffset); - memset(buf, 0, size); - - rc = 0; - while (rc == 0 && new_off > *foffset) { - size = min((loff_t)PAGE_SIZE, new_off - *foffset); - rc = spufs_dump_write(file, buf, size, foffset); - } - - return rc; -} - static int spufs_ctx_note_size(struct spu_context *ctx, int dfd) { int i, sz, total = 0; @@ -165,10 +129,10 @@ int spufs_coredump_extra_notes_size(void) } static int spufs_arch_write_note(struct spu_context *ctx, int i, - struct file *file, int dfd, loff_t *foffset) + struct coredump_params *cprm, int dfd) { loff_t pos = 0; - int sz, rc, nread, total = 0; + int sz, rc, total = 0; const int bufsz = PAGE_SIZE; char *name; char fullname[80], *buf; @@ -186,42 +150,39 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i, en.n_descsz = sz; en.n_type = NT_SPU; - rc = spufs_dump_write(file, &en, sizeof(en), foffset); - if (rc) - goto out; + if (!dump_emit(cprm, &en, sizeof(en))) + goto Eio; - rc = spufs_dump_write(file, fullname, en.n_namesz, foffset); - if (rc) - goto out; + if (!dump_emit(cprm, fullname, en.n_namesz)) + goto Eio; - rc = spufs_dump_align(file, buf, roundup(*foffset, 4), foffset); - if (rc) - goto out; + if (!dump_align(cprm, 4)) + goto Eio; do { - nread = do_coredump_read(i, ctx, buf, bufsz, &pos); - if (nread > 0) { - rc = spufs_dump_write(file, buf, nread, foffset); - if (rc) - goto out; - total += nread; + rc = do_coredump_read(i, ctx, buf, bufsz, &pos); + if (rc > 0) { + if (!dump_emit(cprm, buf, rc)) + goto Eio; + total += rc; } - } while (nread == bufsz && total < sz); + } while (rc == bufsz && total < sz); - if (nread < 0) { - rc = nread; + if (rc < 0) goto out; - } - - rc = spufs_dump_align(file, buf, roundup(*foffset - total + sz, 4), - foffset); + if (!dump_skip(cprm, + roundup(cprm->written - total + sz, 4) - cprm->written)) + goto Eio; out: free_page((unsigned long)buf); return rc; +Eio: + free_page((unsigned long)buf); + return -EIO; } -int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset) +int spufs_coredump_extra_notes_write(struct coredump_params *cprm) { struct spu_context *ctx; int fd, j, rc; @@ -233,7 +194,7 @@ int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset) return rc; for (j = 0; spufs_coredump_read[j].name != NULL; j++) { - rc = spufs_arch_write_note(ctx, j, file, fd, foffset); + rc = spufs_arch_write_note(ctx, j, cprm, fd); if (rc) { spu_release_saved(ctx); return rc; diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 67852ade4c0..0ba3c959835 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -247,12 +247,13 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[]; /* system call implementation */ extern struct spufs_calls spufs_calls; +struct coredump_params; long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags, umode_t mode, struct file *filp); /* ELF coredump callbacks for writing SPU ELF notes */ extern int spufs_coredump_extra_notes_size(void); -extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset); +extern int spufs_coredump_extra_notes_write(struct coredump_params *cprm); extern const struct file_operations spufs_context_fops; diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index d3ceff04ffc..9ef8cc3378d 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c @@ -66,7 +66,7 @@ static void chrp_nvram_write(int addr, unsigned char val) void __init chrp_nvram_init(void) { struct device_node *nvram; - const unsigned int *nbytes_p; + const __be32 *nbytes_p; unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); @@ -79,7 +79,7 @@ void __init chrp_nvram_init(void) return; } - nvram_size = *nbytes_p; + nvram_size = be32_to_cpup(nbytes_p); printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size); of_node_put(nvram); diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index 53d6eee0196..4cde8e7da4b 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/of.h> +#include <linux/of_address.h> #include <asm/io.h> #include "flipper-pic.h" diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index 3006b5117ec..6c03034dbbd 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -18,6 +18,8 @@ #include <linux/init.h> #include <linux/irq.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/io.h> #include "hlwd-pic.h" @@ -181,6 +183,7 @@ struct irq_domain *hlwd_pic_init(struct device_node *np) &hlwd_irq_domain_ops, io_base); if (!irq_domain) { pr_err("failed to allocate irq_domain\n"); + iounmap(io_base); return NULL; } diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c index 92ac9b52b32..b97f6f3d3c5 100644 --- a/arch/powerpc/platforms/fsl_uli1575.c +++ b/arch/powerpc/platforms/fsl_uli1575.c @@ -321,8 +321,7 @@ static void hpcd_final_uli5288(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct device_node *hosenode = hose ? hose->dn : NULL; - struct of_irq oirq; - int virq, pin = 2; + struct of_phandle_args oirq; u32 laddr[3]; if (!machine_is(mpc86xx_hpcd)) @@ -331,12 +330,13 @@ static void hpcd_final_uli5288(struct pci_dev *dev) if (!hosenode) return; + oirq.np = hosenode; + oirq.args[0] = 2; + oirq.args_count = 1; laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(31, 0) << 8); laddr[1] = laddr[2] = 0; - of_irq_map_raw(hosenode, &pin, 1, laddr, &oirq); - virq = irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); - dev->irq = virq; + of_irq_parse_raw(laddr, &oirq); + dev->irq = irq_create_of_mapping(&oirq); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, hpcd_quirk_uli1575); diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index 0237ab782fb..15adee54463 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c @@ -30,6 +30,7 @@ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/phy.h> +#include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index fc536f2971c..7553b6a77c6 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -452,7 +452,7 @@ static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, */ if (use_irq) { /* Clear completion */ - INIT_COMPLETION(host->complete); + reinit_completion(&host->complete); /* Ack stale interrupts */ kw_write_reg(reg_isr, kw_read_reg(reg_isr)); /* Arm timeout */ @@ -717,7 +717,7 @@ static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, return -EINVAL; } - INIT_COMPLETION(comp); + reinit_completion(&comp); req->data[0] = PMU_I2C_CMD; req->reply[0] = 0xff; req->nbytes = sizeof(struct pmu_i2c_hdr) + 1; @@ -748,7 +748,7 @@ static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, hdr->bus = PMU_I2C_BUS_STATUS; - INIT_COMPLETION(comp); + reinit_completion(&comp); req->data[0] = PMU_I2C_CMD; req->reply[0] = 0xff; req->nbytes = 2; diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index f5e3cda6660..e49d07f3d54 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -4,6 +4,7 @@ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/of_irq.h> #include <asm/pmac_feature.h> #include <asm/pmac_pfunc.h> diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 31036b56670..4c24bf60d39 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -393,8 +393,8 @@ static void __init pmac_pic_probe_oldstyle(void) #endif } -int of_irq_map_oldworld(struct device_node *device, int index, - struct of_irq *out_irq) +int of_irq_parse_oldworld(struct device_node *device, int index, + struct of_phandle_args *out_irq) { const u32 *ints = NULL; int intlen; @@ -422,9 +422,9 @@ int of_irq_map_oldworld(struct device_node *device, int index, if (index >= intlen) return -EINVAL; - out_irq->controller = NULL; - out_irq->specifier[0] = ints[index]; - out_irq->size = 1; + out_irq->np = NULL; + out_irq->args[0] = ints[index]; + out_irq->args_count = 1; return 0; } diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index 6fae5eb99ea..9fced3f6d2d 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -9,6 +9,8 @@ config PPC_POWERNV select EPAPR_BOOT select PPC_INDIRECT_PIO select PPC_UDBG_16550 + select PPC_SCOM + select ARCH_RANDOM default y config POWERNV_MSI diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 300c437d713..873fa1370dc 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -1,6 +1,8 @@ obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o -obj-y += opal-rtc.o opal-nvram.o opal-lpc.o +obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o +obj-y += rng.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o +obj-$(CONFIG_PPC_SCOM) += opal-xscom.o diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index cf42e74514f..02245cee781 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -59,26 +59,60 @@ static struct notifier_block ioda_eeh_nb = { }; #ifdef CONFIG_DEBUG_FS -static int ioda_eeh_dbgfs_set(void *data, u64 val) +static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val) { struct pci_controller *hose = data; struct pnv_phb *phb = hose->private_data; - out_be64(phb->regs + 0xD10, val); + out_be64(phb->regs + offset, val); return 0; } -static int ioda_eeh_dbgfs_get(void *data, u64 *val) +static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val) { struct pci_controller *hose = data; struct pnv_phb *phb = hose->private_data; - *val = in_be64(phb->regs + 0xD10); + *val = in_be64(phb->regs + offset); return 0; } -DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_dbgfs_ops, ioda_eeh_dbgfs_get, - ioda_eeh_dbgfs_set, "0x%llx\n"); +static int ioda_eeh_outb_dbgfs_set(void *data, u64 val) +{ + return ioda_eeh_dbgfs_set(data, 0xD10, val); +} + +static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val) +{ + return ioda_eeh_dbgfs_get(data, 0xD10, val); +} + +static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val) +{ + return ioda_eeh_dbgfs_set(data, 0xD90, val); +} + +static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val) +{ + return ioda_eeh_dbgfs_get(data, 0xD90, val); +} + +static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val) +{ + return ioda_eeh_dbgfs_set(data, 0xE10, val); +} + +static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val) +{ + return ioda_eeh_dbgfs_get(data, 0xE10, val); +} + +DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get, + ioda_eeh_outb_dbgfs_set, "0x%llx\n"); +DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get, + ioda_eeh_inbA_dbgfs_set, "0x%llx\n"); +DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get, + ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); #endif /* CONFIG_DEBUG_FS */ /** @@ -106,27 +140,30 @@ static int ioda_eeh_post_init(struct pci_controller *hose) ioda_eeh_nb_init = 1; } - /* FIXME: Enable it for PHB3 later */ - if (phb->type == PNV_PHB_IODA1) { + /* We needn't HUB diag-data on PHB3 */ + if (phb->type == PNV_PHB_IODA1 && !hub_diag) { + hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO); if (!hub_diag) { - hub_diag = (char *)__get_free_page(GFP_KERNEL | - __GFP_ZERO); - if (!hub_diag) { - pr_err("%s: Out of memory !\n", - __func__); - return -ENOMEM; - } + pr_err("%s: Out of memory !\n", __func__); + return -ENOMEM; } + } #ifdef CONFIG_DEBUG_FS - if (phb->dbgfs) - debugfs_create_file("err_injct", 0600, - phb->dbgfs, hose, - &ioda_eeh_dbgfs_ops); + if (phb->dbgfs) { + debugfs_create_file("err_injct_outbound", 0600, + phb->dbgfs, hose, + &ioda_eeh_outb_dbgfs_ops); + debugfs_create_file("err_injct_inboundA", 0600, + phb->dbgfs, hose, + &ioda_eeh_inbA_dbgfs_ops); + debugfs_create_file("err_injct_inboundB", 0600, + phb->dbgfs, hose, + &ioda_eeh_inbB_dbgfs_ops); + } #endif - phb->eeh_state |= PNV_EEH_STATE_ENABLED; - } + phb->eeh_state |= PNV_EEH_STATE_ENABLED; return 0; } @@ -546,8 +583,8 @@ static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); if (ret) { spin_unlock_irqrestore(&phb->lock, flags); - pr_warning("%s: Failed to get log for PHB#%x-PE#%x\n", - __func__, hose->global_number, pe->addr); + pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n", + __func__, hose->global_number, pe->addr, ret); return -EIO; } @@ -710,6 +747,73 @@ static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose, } } +static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose, + struct OpalIoPhbErrorCommon *common) +{ + struct OpalIoPhb3ErrorData *data; + int i; + + data = (struct OpalIoPhb3ErrorData*)common; + pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n", + hose->global_number, common->version); + + pr_info(" brdgCtl: %08x\n", data->brdgCtl); + + pr_info(" portStatusReg: %08x\n", data->portStatusReg); + pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); + pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); + + pr_info(" deviceStatus: %08x\n", data->deviceStatus); + pr_info(" slotStatus: %08x\n", data->slotStatus); + pr_info(" linkStatus: %08x\n", data->linkStatus); + pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); + pr_info(" devSecStatus: %08x\n", data->devSecStatus); + + pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); + pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); + pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); + pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); + pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); + pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); + pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); + pr_info(" sourceId: %08x\n", data->sourceId); + pr_info(" errorClass: %016llx\n", data->errorClass); + pr_info(" correlator: %016llx\n", data->correlator); + pr_info(" nFir: %016llx\n", data->nFir); + pr_info(" nFirMask: %016llx\n", data->nFirMask); + pr_info(" nFirWOF: %016llx\n", data->nFirWOF); + pr_info(" PhbPlssr: %016llx\n", data->phbPlssr); + pr_info(" PhbCsr: %016llx\n", data->phbCsr); + pr_info(" lemFir: %016llx\n", data->lemFir); + pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); + pr_info(" lemWOF: %016llx\n", data->lemWOF); + pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); + pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); + pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); + pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); + pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); + pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); + pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); + pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); + pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); + pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); + pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); + pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); + pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); + pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); + pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); + pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); + + for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { + if ((data->pestA[i] >> 63) == 0 && + (data->pestB[i] >> 63) == 0) + continue; + + pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); + pr_info(" PESTB: %016llx\n", data->pestB[i]); + } +} + static void ioda_eeh_phb_diag(struct pci_controller *hose) { struct pnv_phb *phb = hose->private_data; @@ -728,6 +832,9 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose) case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: ioda_eeh_p7ioc_phb_diag(hose, common); break; + case OPAL_PHB_ERROR_DATA_TYPE_PHB3: + ioda_eeh_phb3_phb_diag(hose, common); + break; default: pr_warning("%s: Unrecognized I/O chip %d\n", __func__, common->ioType); diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c index 79663d26e6e..73b981438cc 100644 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c @@ -144,11 +144,8 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag) /* * Enable EEH explicitly so that we will do EEH check * while accessing I/O stuff - * - * FIXME: Enable that for PHB3 later */ - if (phb->type == PNV_PHB_IODA1) - eeh_subsystem_enabled = 1; + eeh_subsystem_enabled = 1; /* Save memory bars */ eeh_save_bars(edev); diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c new file mode 100644 index 00000000000..6ffa6b1ec5b --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-flash.c @@ -0,0 +1,667 @@ +/* + * PowerNV OPAL Firmware Update Interface + * + * Copyright 2013 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define DEBUG + +#include <linux/kernel.h> +#include <linux/reboot.h> +#include <linux/init.h> +#include <linux/kobject.h> +#include <linux/sysfs.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/pagemap.h> + +#include <asm/opal.h> + +/* FLASH status codes */ +#define FLASH_NO_OP -1099 /* No operation initiated by user */ +#define FLASH_NO_AUTH -9002 /* Not a service authority partition */ + +/* Validate image status values */ +#define VALIDATE_IMG_READY -1001 /* Image ready for validation */ +#define VALIDATE_IMG_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */ + +/* Manage image status values */ +#define MANAGE_ACTIVE_ERR -9001 /* Cannot overwrite active img */ + +/* Flash image status values */ +#define FLASH_IMG_READY 0 /* Img ready for flash on reboot */ +#define FLASH_INVALID_IMG -1003 /* Flash image shorter than expected */ +#define FLASH_IMG_NULL_DATA -1004 /* Bad data in sg list entry */ +#define FLASH_IMG_BAD_LEN -1005 /* Bad length in sg list entry */ + +/* Manage operation tokens */ +#define FLASH_REJECT_TMP_SIDE 0 /* Reject temporary fw image */ +#define FLASH_COMMIT_TMP_SIDE 1 /* Commit temporary fw image */ + +/* Update tokens */ +#define FLASH_UPDATE_CANCEL 0 /* Cancel update request */ +#define FLASH_UPDATE_INIT 1 /* Initiate update */ + +/* Validate image update result tokens */ +#define VALIDATE_TMP_UPDATE 0 /* T side will be updated */ +#define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */ +#define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */ +#define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */ +/* + * Current T side will be committed to P side before being replace with new + * image, and the new image is downlevel from current image + */ +#define VALIDATE_TMP_COMMIT_DL 4 +/* + * Current T side will be committed to P side before being replaced with new + * image + */ +#define VALIDATE_TMP_COMMIT 5 +/* + * T side will be updated with a downlevel image + */ +#define VALIDATE_TMP_UPDATE_DL 6 +/* + * The candidate image's release date is later than the system's firmware + * service entitlement date - service warranty period has expired + */ +#define VALIDATE_OUT_OF_WRNTY 7 + +/* Validate buffer size */ +#define VALIDATE_BUF_SIZE 4096 + +/* XXX: Assume candidate image size is <= 256MB */ +#define MAX_IMAGE_SIZE 0x10000000 + +/* Flash sg list version */ +#define SG_LIST_VERSION (1UL) + +/* Image status */ +enum { + IMAGE_INVALID, + IMAGE_LOADING, + IMAGE_READY, +}; + +/* Candidate image data */ +struct image_data_t { + int status; + void *data; + uint32_t size; +}; + +/* Candidate image header */ +struct image_header_t { + uint16_t magic; + uint16_t version; + uint32_t size; +}; + +/* Scatter/gather entry */ +struct opal_sg_entry { + void *data; + long length; +}; + +/* We calculate number of entries based on PAGE_SIZE */ +#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) + +/* + * This struct is very similar but not identical to that + * needed by the opal flash update. All we need to do for + * opal is rewrite num_entries into a version/length and + * translate the pointers to absolute. + */ +struct opal_sg_list { + unsigned long num_entries; + struct opal_sg_list *next; + struct opal_sg_entry entry[SG_ENTRIES_PER_NODE]; +}; + +struct validate_flash_t { + int status; /* Return status */ + void *buf; /* Candiate image buffer */ + uint32_t buf_size; /* Image size */ + uint32_t result; /* Update results token */ +}; + +struct manage_flash_t { + int status; /* Return status */ +}; + +struct update_flash_t { + int status; /* Return status */ +}; + +static struct image_header_t image_header; +static struct image_data_t image_data; +static struct validate_flash_t validate_flash_data; +static struct manage_flash_t manage_flash_data; +static struct update_flash_t update_flash_data; + +static DEFINE_MUTEX(image_data_mutex); + +/* + * Validate candidate image + */ +static inline void opal_flash_validate(void) +{ + struct validate_flash_t *args_buf = &validate_flash_data; + + args_buf->status = opal_validate_flash(__pa(args_buf->buf), + &(args_buf->buf_size), + &(args_buf->result)); +} + +/* + * Validate output format: + * validate result token + * current image version details + * new image version details + */ +static ssize_t validate_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct validate_flash_t *args_buf = &validate_flash_data; + int len; + + /* Candidate image is not validated */ + if (args_buf->status < VALIDATE_TMP_UPDATE) { + len = sprintf(buf, "%d\n", args_buf->status); + goto out; + } + + /* Result token */ + len = sprintf(buf, "%d\n", args_buf->result); + + /* Current and candidate image version details */ + if ((args_buf->result != VALIDATE_TMP_UPDATE) && + (args_buf->result < VALIDATE_CUR_UNKNOWN)) + goto out; + + if (args_buf->buf_size > (VALIDATE_BUF_SIZE - len)) { + memcpy(buf + len, args_buf->buf, VALIDATE_BUF_SIZE - len); + len = VALIDATE_BUF_SIZE; + } else { + memcpy(buf + len, args_buf->buf, args_buf->buf_size); + len += args_buf->buf_size; + } +out: + /* Set status to default */ + args_buf->status = FLASH_NO_OP; + return len; +} + +/* + * Validate candidate firmware image + * + * Note: + * We are only interested in first 4K bytes of the + * candidate image. + */ +static ssize_t validate_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct validate_flash_t *args_buf = &validate_flash_data; + + if (buf[0] != '1') + return -EINVAL; + + mutex_lock(&image_data_mutex); + + if (image_data.status != IMAGE_READY || + image_data.size < VALIDATE_BUF_SIZE) { + args_buf->result = VALIDATE_INVALID_IMG; + args_buf->status = VALIDATE_IMG_INCOMPLETE; + goto out; + } + + /* Copy first 4k bytes of candidate image */ + memcpy(args_buf->buf, image_data.data, VALIDATE_BUF_SIZE); + + args_buf->status = VALIDATE_IMG_READY; + args_buf->buf_size = VALIDATE_BUF_SIZE; + + /* Validate candidate image */ + opal_flash_validate(); + +out: + mutex_unlock(&image_data_mutex); + return count; +} + +/* + * Manage flash routine + */ +static inline void opal_flash_manage(uint8_t op) +{ + struct manage_flash_t *const args_buf = &manage_flash_data; + + args_buf->status = opal_manage_flash(op); +} + +/* + * Show manage flash status + */ +static ssize_t manage_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct manage_flash_t *const args_buf = &manage_flash_data; + int rc; + + rc = sprintf(buf, "%d\n", args_buf->status); + /* Set status to default*/ + args_buf->status = FLASH_NO_OP; + return rc; +} + +/* + * Manage operations: + * 0 - Reject + * 1 - Commit + */ +static ssize_t manage_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + uint8_t op; + switch (buf[0]) { + case '0': + op = FLASH_REJECT_TMP_SIDE; + break; + case '1': + op = FLASH_COMMIT_TMP_SIDE; + break; + default: + return -EINVAL; + } + + /* commit/reject temporary image */ + opal_flash_manage(op); + return count; +} + +/* + * Free sg list + */ +static void free_sg_list(struct opal_sg_list *list) +{ + struct opal_sg_list *sg1; + while (list) { + sg1 = list->next; + kfree(list); + list = sg1; + } + list = NULL; +} + +/* + * Build candidate image scatter gather list + * + * list format: + * ----------------------------------- + * | VER (8) | Entry length in bytes | + * ----------------------------------- + * | Pointer to next entry | + * ----------------------------------- + * | Address of memory area 1 | + * ----------------------------------- + * | Length of memory area 1 | + * ----------------------------------- + * | ......... | + * ----------------------------------- + * | ......... | + * ----------------------------------- + * | Address of memory area N | + * ----------------------------------- + * | Length of memory area N | + * ----------------------------------- + */ +static struct opal_sg_list *image_data_to_sglist(void) +{ + struct opal_sg_list *sg1, *list = NULL; + void *addr; + int size; + + addr = image_data.data; + size = image_data.size; + + sg1 = kzalloc((sizeof(struct opal_sg_list)), GFP_KERNEL); + if (!sg1) + return NULL; + + list = sg1; + sg1->num_entries = 0; + while (size > 0) { + /* Translate virtual address to physical address */ + sg1->entry[sg1->num_entries].data = + (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT); + + if (size > PAGE_SIZE) + sg1->entry[sg1->num_entries].length = PAGE_SIZE; + else + sg1->entry[sg1->num_entries].length = size; + + sg1->num_entries++; + if (sg1->num_entries >= SG_ENTRIES_PER_NODE) { + sg1->next = kzalloc((sizeof(struct opal_sg_list)), + GFP_KERNEL); + if (!sg1->next) { + pr_err("%s : Failed to allocate memory\n", + __func__); + goto nomem; + } + + sg1 = sg1->next; + sg1->num_entries = 0; + } + addr += PAGE_SIZE; + size -= PAGE_SIZE; + } + return list; +nomem: + free_sg_list(list); + return NULL; +} + +/* + * OPAL update flash + */ +static int opal_flash_update(int op) +{ + struct opal_sg_list *sg, *list, *next; + unsigned long addr; + int64_t rc = OPAL_PARAMETER; + + if (op == FLASH_UPDATE_CANCEL) { + pr_alert("FLASH: Image update cancelled\n"); + addr = '\0'; + goto flash; + } + + list = image_data_to_sglist(); + if (!list) + goto invalid_img; + + /* First entry address */ + addr = __pa(list); + + /* Translate sg list address to absolute */ + for (sg = list; sg; sg = next) { + next = sg->next; + /* Don't translate NULL pointer for last entry */ + if (sg->next) + sg->next = (struct opal_sg_list *)__pa(sg->next); + else + sg->next = NULL; + + /* Make num_entries into the version/length field */ + sg->num_entries = (SG_LIST_VERSION << 56) | + (sg->num_entries * sizeof(struct opal_sg_entry) + 16); + } + + pr_alert("FLASH: Image is %u bytes\n", image_data.size); + pr_alert("FLASH: Image update requested\n"); + pr_alert("FLASH: Image will be updated during system reboot\n"); + pr_alert("FLASH: This will take several minutes. Do not power off!\n"); + +flash: + rc = opal_update_flash(addr); + +invalid_img: + return rc; +} + +/* + * Show candidate image status + */ +static ssize_t update_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct update_flash_t *const args_buf = &update_flash_data; + return sprintf(buf, "%d\n", args_buf->status); +} + +/* + * Set update image flag + * 1 - Flash new image + * 0 - Cancel flash request + */ +static ssize_t update_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct update_flash_t *const args_buf = &update_flash_data; + int rc = count; + + mutex_lock(&image_data_mutex); + + switch (buf[0]) { + case '0': + if (args_buf->status == FLASH_IMG_READY) + opal_flash_update(FLASH_UPDATE_CANCEL); + args_buf->status = FLASH_NO_OP; + break; + case '1': + /* Image is loaded? */ + if (image_data.status == IMAGE_READY) + args_buf->status = + opal_flash_update(FLASH_UPDATE_INIT); + else + args_buf->status = FLASH_INVALID_IMG; + break; + default: + rc = -EINVAL; + } + + mutex_unlock(&image_data_mutex); + return rc; +} + +/* + * Free image buffer + */ +static void free_image_buf(void) +{ + void *addr; + int size; + + addr = image_data.data; + size = PAGE_ALIGN(image_data.size); + while (size > 0) { + ClearPageReserved(vmalloc_to_page(addr)); + addr += PAGE_SIZE; + size -= PAGE_SIZE; + } + vfree(image_data.data); + image_data.data = NULL; + image_data.status = IMAGE_INVALID; +} + +/* + * Allocate image buffer. + */ +static int alloc_image_buf(char *buffer, size_t count) +{ + void *addr; + int size; + + if (count < sizeof(struct image_header_t)) { + pr_warn("FLASH: Invalid candidate image\n"); + return -EINVAL; + } + + memcpy(&image_header, (void *)buffer, sizeof(struct image_header_t)); + image_data.size = be32_to_cpu(image_header.size); + pr_debug("FLASH: Candiate image size = %u\n", image_data.size); + + if (image_data.size > MAX_IMAGE_SIZE) { + pr_warn("FLASH: Too large image\n"); + return -EINVAL; + } + if (image_data.size < VALIDATE_BUF_SIZE) { + pr_warn("FLASH: Image is shorter than expected\n"); + return -EINVAL; + } + + image_data.data = vzalloc(PAGE_ALIGN(image_data.size)); + if (!image_data.data) { + pr_err("%s : Failed to allocate memory\n", __func__); + return -ENOMEM; + } + + /* Pin memory */ + addr = image_data.data; + size = PAGE_ALIGN(image_data.size); + while (size > 0) { + SetPageReserved(vmalloc_to_page(addr)); + addr += PAGE_SIZE; + size -= PAGE_SIZE; + } + + image_data.status = IMAGE_LOADING; + return 0; +} + +/* + * Copy candidate image + * + * Parse candidate image header to get total image size + * and pre-allocate required memory. + */ +static ssize_t image_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t pos, size_t count) +{ + int rc; + + mutex_lock(&image_data_mutex); + + /* New image ? */ + if (pos == 0) { + /* Free memory, if already allocated */ + if (image_data.data) + free_image_buf(); + + /* Cancel outstanding image update request */ + if (update_flash_data.status == FLASH_IMG_READY) + opal_flash_update(FLASH_UPDATE_CANCEL); + + /* Allocate memory */ + rc = alloc_image_buf(buffer, count); + if (rc) + goto out; + } + + if (image_data.status != IMAGE_LOADING) { + rc = -ENOMEM; + goto out; + } + + if ((pos + count) > image_data.size) { + rc = -EINVAL; + goto out; + } + + memcpy(image_data.data + pos, (void *)buffer, count); + rc = count; + + /* Set image status */ + if ((pos + count) == image_data.size) { + pr_debug("FLASH: Candidate image loaded....\n"); + image_data.status = IMAGE_READY; + } + +out: + mutex_unlock(&image_data_mutex); + return rc; +} + +/* + * sysfs interface : + * OPAL uses below sysfs files for code update. + * We create these files under /sys/firmware/opal. + * + * image : Interface to load candidate firmware image + * validate_flash : Validate firmware image + * manage_flash : Commit/Reject firmware image + * update_flash : Flash new firmware image + * + */ +static struct bin_attribute image_data_attr = { + .attr = {.name = "image", .mode = 0200}, + .size = MAX_IMAGE_SIZE, /* Limit image size */ + .write = image_data_write, +}; + +static struct kobj_attribute validate_attribute = + __ATTR(validate_flash, 0600, validate_show, validate_store); + +static struct kobj_attribute manage_attribute = + __ATTR(manage_flash, 0600, manage_show, manage_store); + +static struct kobj_attribute update_attribute = + __ATTR(update_flash, 0600, update_show, update_store); + +static struct attribute *image_op_attrs[] = { + &validate_attribute.attr, + &manage_attribute.attr, + &update_attribute.attr, + NULL /* need to NULL terminate the list of attributes */ +}; + +static struct attribute_group image_op_attr_group = { + .attrs = image_op_attrs, +}; + +void __init opal_flash_init(void) +{ + int ret; + + /* Allocate validate image buffer */ + validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL); + if (!validate_flash_data.buf) { + pr_err("%s : Failed to allocate memory\n", __func__); + return; + } + + /* Make sure /sys/firmware/opal directory is created */ + if (!opal_kobj) { + pr_warn("FLASH: opal kobject is not available\n"); + goto nokobj; + } + + /* Create the sysfs files */ + ret = sysfs_create_group(opal_kobj, &image_op_attr_group); + if (ret) { + pr_warn("FLASH: Failed to create sysfs files\n"); + goto nokobj; + } + + ret = sysfs_create_bin_file(opal_kobj, &image_data_attr); + if (ret) { + pr_warn("FLASH: Failed to create sysfs files\n"); + goto nosysfs_file; + } + + /* Set default status */ + validate_flash_data.status = FLASH_NO_OP; + manage_flash_data.status = FLASH_NO_OP; + update_flash_data.status = FLASH_NO_OP; + image_data.status = IMAGE_INVALID; + return; + +nosysfs_file: + sysfs_remove_group(opal_kobj, &image_op_attr_group); + +nokobj: + kfree(validate_flash_data.buf); + return; +} diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index a7614bb14e1..e7e59e4f989 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -17,6 +17,7 @@ #include <asm/firmware.h> #include <asm/xics.h> #include <asm/opal.h> +#include <asm/prom.h> static int opal_lpc_chip_id = -1; diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c index 3f83e1ae26a..acd9f7e9667 100644 --- a/arch/powerpc/platforms/powernv/opal-nvram.c +++ b/arch/powerpc/platforms/powernv/opal-nvram.c @@ -65,7 +65,7 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) void __init opal_nvram_init(void) { struct device_node *np; - const u32 *nbytes_p; + const __be32 *nbytes_p; np = of_find_compatible_node(NULL, NULL, "ibm,opal-nvram"); if (np == NULL) @@ -76,7 +76,7 @@ void __init opal_nvram_init(void) of_node_put(np); return; } - nvram_size = *nbytes_p; + nvram_size = be32_to_cpup(nbytes_p); printk(KERN_INFO "OPAL nvram setup, %u bytes\n", nvram_size); of_node_put(np); diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c index 2aa7641aac9..7d07c7e80ec 100644 --- a/arch/powerpc/platforms/powernv/opal-rtc.c +++ b/arch/powerpc/platforms/powernv/opal-rtc.c @@ -37,10 +37,12 @@ unsigned long __init opal_get_boot_time(void) struct rtc_time tm; u32 y_m_d; u64 h_m_s_ms; + __be32 __y_m_d; + __be64 __h_m_s_ms; long rc = OPAL_BUSY; while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { - rc = opal_rtc_read(&y_m_d, &h_m_s_ms); + rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); else @@ -48,6 +50,8 @@ unsigned long __init opal_get_boot_time(void) } if (rc != OPAL_SUCCESS) return 0; + y_m_d = be32_to_cpu(__y_m_d); + h_m_s_ms = be64_to_cpu(__h_m_s_ms); opal_to_tm(y_m_d, h_m_s_ms, &tm); return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); @@ -58,9 +62,11 @@ void opal_get_rtc_time(struct rtc_time *tm) long rc = OPAL_BUSY; u32 y_m_d; u64 h_m_s_ms; + __be32 __y_m_d; + __be64 __h_m_s_ms; while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { - rc = opal_rtc_read(&y_m_d, &h_m_s_ms); + rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); else @@ -68,6 +74,8 @@ void opal_get_rtc_time(struct rtc_time *tm) } if (rc != OPAL_SUCCESS) return; + y_m_d = be32_to_cpu(__y_m_d); + h_m_s_ms = be64_to_cpu(__h_m_s_ms); opal_to_tm(y_m_d, h_m_s_ms, tm); } diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 8f3844535fb..e7806504e97 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -24,7 +24,7 @@ mflr r0; \ mfcr r12; \ std r0,16(r1); \ - std r12,8(r1); \ + stw r12,8(r1); \ std r1,PACAR1(r13); \ li r0,0; \ mfmsr r12; \ @@ -34,7 +34,7 @@ mtmsrd r12,1; \ LOAD_REG_ADDR(r0,.opal_return); \ mtlr r0; \ - li r0,MSR_DR|MSR_IR; \ + li r0,MSR_DR|MSR_IR|MSR_LE;\ andc r12,r12,r0; \ li r0,token; \ mtspr SPRN_HSRR1,r12; \ @@ -45,8 +45,15 @@ hrfid _STATIC(opal_return) + /* + * Fixup endian on OPAL return... we should be able to simplify + * this by instead converting the below trampoline to a set of + * bytes (always BE) since MSR:LE will end up fixed up as a side + * effect of the rfid. + */ + FIXUP_ENDIAN ld r2,PACATOC(r13); - ld r4,8(r1); + lwz r4,8(r1); ld r5,16(r1); ld r6,PACASAVEDMSR(r13); mtspr SPRN_SRR0,r5; @@ -116,3 +123,6 @@ OPAL_CALL(opal_xscom_write, OPAL_XSCOM_WRITE); OPAL_CALL(opal_lpc_read, OPAL_LPC_READ); OPAL_CALL(opal_lpc_write, OPAL_LPC_WRITE); OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU); +OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE); +OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); +OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c new file mode 100644 index 00000000000..4d99a8fd55a --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -0,0 +1,128 @@ +/* + * PowerNV LPC bus handling. + * + * Copyright 2013 IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/bug.h> +#include <linux/gfp.h> +#include <linux/slab.h> + +#include <asm/machdep.h> +#include <asm/firmware.h> +#include <asm/opal.h> +#include <asm/scom.h> + +/* + * We could probably fit that inside the scom_map_t + * which is a void* after all but it's really too ugly + * so let's kmalloc it for now + */ +struct opal_scom_map { + uint32_t chip; + uint64_t addr; +}; + +static scom_map_t opal_scom_map(struct device_node *dev, u64 reg, u64 count) +{ + struct opal_scom_map *m; + const __be32 *gcid; + + if (!of_get_property(dev, "scom-controller", NULL)) { + pr_err("%s: device %s is not a SCOM controller\n", + __func__, dev->full_name); + return SCOM_MAP_INVALID; + } + gcid = of_get_property(dev, "ibm,chip-id", NULL); + if (!gcid) { + pr_err("%s: device %s has no ibm,chip-id\n", + __func__, dev->full_name); + return SCOM_MAP_INVALID; + } + m = kmalloc(sizeof(struct opal_scom_map), GFP_KERNEL); + if (!m) + return NULL; + m->chip = be32_to_cpup(gcid); + m->addr = reg; + + return (scom_map_t)m; +} + +static void opal_scom_unmap(scom_map_t map) +{ + kfree(map); +} + +static int opal_xscom_err_xlate(int64_t rc) +{ + switch(rc) { + case 0: + return 0; + /* Add more translations if necessary */ + default: + return -EIO; + } +} + +static u64 opal_scom_unmangle(u64 reg) +{ + /* + * XSCOM indirect addresses have the top bit set. Additionally + * the reset of the top 3 nibbles is always 0. + * + * Because the debugfs interface uses signed offsets and shifts + * the address left by 3, we basically cannot use the top 4 bits + * of the 64-bit address, and thus cannot use the indirect bit. + * + * To deal with that, we support the indirect bit being in bit + * 4 (IBM notation) instead of bit 0 in this API, we do the + * conversion here. To leave room for further xscom address + * expansion, we only clear out the top byte + * + */ + if (reg & (1ull << 59)) + reg = (reg & ~(0xffull << 56)) | (1ull << 63); + return reg; +} + +static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) +{ + struct opal_scom_map *m = map; + int64_t rc; + + reg = opal_scom_unmangle(reg); + rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); + return opal_xscom_err_xlate(rc); +} + +static int opal_scom_write(scom_map_t map, u64 reg, u64 value) +{ + struct opal_scom_map *m = map; + int64_t rc; + + reg = opal_scom_unmangle(reg); + rc = opal_xscom_write(m->chip, m->addr + reg, value); + return opal_xscom_err_xlate(rc); +} + +static const struct scom_controller opal_scom_controller = { + .map = opal_scom_map, + .unmap = opal_scom_unmap, + .read = opal_scom_read, + .write = opal_scom_write +}; + +static int opal_xscom_init(void) +{ + if (firmware_has_feature(FW_FEATURE_OPALv3)) + scom_init(&opal_scom_controller); + return 0; +} +arch_initcall(opal_xscom_init); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 2911abe550f..1c798cd5537 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -13,15 +13,20 @@ #include <linux/types.h> #include <linux/of.h> +#include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/slab.h> +#include <linux/kobject.h> #include <asm/opal.h> #include <asm/firmware.h> #include "powernv.h" +/* /sys/firmware/opal */ +struct kobject *opal_kobj; + struct opal { u64 base; u64 entry; @@ -77,6 +82,7 @@ int __init early_init_dt_scan_opal(unsigned long node, static int __init opal_register_exception_handlers(void) { +#ifdef __BIG_ENDIAN__ u64 glue; if (!(powerpc_firmware_features & FW_FEATURE_OPAL)) @@ -94,6 +100,7 @@ static int __init opal_register_exception_handlers(void) 0, glue); glue += 128; opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); +#endif return 0; } @@ -164,27 +171,28 @@ void opal_notifier_disable(void) int opal_get_chars(uint32_t vtermno, char *buf, int count) { - s64 len, rc; - u64 evt; + s64 rc; + __be64 evt, len; if (!opal.entry) return -ENODEV; opal_poll_events(&evt); - if ((evt & OPAL_EVENT_CONSOLE_INPUT) == 0) + if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0) return 0; - len = count; - rc = opal_console_read(vtermno, &len, buf); + len = cpu_to_be64(count); + rc = opal_console_read(vtermno, &len, buf); if (rc == OPAL_SUCCESS) - return len; + return be64_to_cpu(len); return 0; } int opal_put_chars(uint32_t vtermno, const char *data, int total_len) { int written = 0; + __be64 olen; s64 len, rc; unsigned long flags; - u64 evt; + __be64 evt; if (!opal.entry) return -ENODEV; @@ -199,13 +207,14 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) */ spin_lock_irqsave(&opal_write_lock, flags); if (firmware_has_feature(FW_FEATURE_OPALv2)) { - rc = opal_console_write_buffer_space(vtermno, &len); + rc = opal_console_write_buffer_space(vtermno, &olen); + len = be64_to_cpu(olen); if (rc || len < total_len) { spin_unlock_irqrestore(&opal_write_lock, flags); /* Closed -> drop characters */ if (rc) return total_len; - opal_poll_events(&evt); + opal_poll_events(NULL); return -EAGAIN; } } @@ -216,8 +225,9 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) rc = OPAL_BUSY; while(total_len > 0 && (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { - len = total_len; - rc = opal_console_write(vtermno, &len, data); + olen = cpu_to_be64(total_len); + rc = opal_console_write(vtermno, &olen, data); + len = be64_to_cpu(olen); /* Closed or other error drop */ if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && @@ -237,7 +247,8 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) */ do opal_poll_events(&evt); - while(rc == OPAL_SUCCESS && (evt & OPAL_EVENT_CONSOLE_OUTPUT)); + while(rc == OPAL_SUCCESS && + (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT)); } spin_unlock_irqrestore(&opal_write_lock, flags); return written; @@ -360,7 +371,7 @@ int opal_machine_check(struct pt_regs *regs) static irqreturn_t opal_interrupt(int irq, void *data) { - uint64_t events; + __be64 events; opal_handle_interrupt(virq_to_hw(irq), &events); @@ -369,10 +380,21 @@ static irqreturn_t opal_interrupt(int irq, void *data) return IRQ_HANDLED; } +static int opal_sysfs_init(void) +{ + opal_kobj = kobject_create_and_add("opal", firmware_kobj); + if (!opal_kobj) { + pr_warn("kobject_create_and_add opal failed\n"); + return -ENOMEM; + } + + return 0; +} + static int __init opal_init(void) { struct device_node *np, *consoles; - const u32 *irqs; + const __be32 *irqs; int rc, i, irqlen; opal_node = of_find_node_by_path("/ibm,opal"); @@ -414,6 +436,14 @@ static int __init opal_init(void) " (0x%x)\n", rc, irq, hwirq); opal_irqs[i] = irq; } + + /* Create "opal" kobject under /sys/firmware */ + rc = opal_sysfs_init(); + if (rc == 0) { + /* Setup code update interface */ + opal_flash_init(); + } + return 0; } subsys_initcall(opal_init); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 74a5a5773b1..084cdfa4068 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -70,6 +70,16 @@ define_pe_printk_level(pe_err, KERN_ERR); define_pe_printk_level(pe_warn, KERN_WARNING); define_pe_printk_level(pe_info, KERN_INFO); +/* + * stdcix is only supposed to be used in hypervisor real mode as per + * the architecture spec + */ +static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr) +{ + __asm__ __volatile__("stdcix %0,0,%1" + : : "r" (val), "r" (paddr) : "memory"); +} + static int pnv_ioda_alloc_pe(struct pnv_phb *phb) { unsigned long pe; @@ -153,13 +163,23 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) rid_end = pe->rid + 1; } - /* Associate PE in PELT */ + /* + * Associate PE in PELT. We need add the PE into the + * corresponding PELT-V as well. Otherwise, the error + * originated from the PE might contribute to other + * PEs. + */ rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, bcomp, dcomp, fcomp, OPAL_MAP_PE); if (rc) { pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); return -ENXIO; } + + rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, + pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); + if (rc) + pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc); opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); @@ -454,10 +474,13 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) } } -static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl, - u64 *startp, u64 *endp) +static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe, + struct iommu_table *tbl, + __be64 *startp, __be64 *endp, bool rm) { - u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; + __be64 __iomem *invalidate = rm ? + (__be64 __iomem *)pe->tce_inval_reg_phys : + (__be64 __iomem *)tbl->it_index; unsigned long start, end, inc; start = __pa(startp); @@ -484,7 +507,10 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl, mb(); /* Ensure above stores are visible */ while (start <= end) { - __raw_writeq(start, invalidate); + if (rm) + __raw_rm_writeq(cpu_to_be64(start), invalidate); + else + __raw_writeq(cpu_to_be64(start), invalidate); start += inc; } @@ -496,10 +522,12 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl, static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe, struct iommu_table *tbl, - u64 *startp, u64 *endp) + __be64 *startp, __be64 *endp, bool rm) { unsigned long start, end, inc; - u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; + __be64 __iomem *invalidate = rm ? + (__be64 __iomem *)pe->tce_inval_reg_phys : + (__be64 __iomem *)tbl->it_index; /* We'll invalidate DMA address in PE scope */ start = 0x2ul << 60; @@ -515,22 +543,25 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe, mb(); while (start <= end) { - __raw_writeq(start, invalidate); + if (rm) + __raw_rm_writeq(cpu_to_be64(start), invalidate); + else + __raw_writeq(cpu_to_be64(start), invalidate); start += inc; } } void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, - u64 *startp, u64 *endp) + __be64 *startp, __be64 *endp, bool rm) { struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe, tce32_table); struct pnv_phb *phb = pe->phb; if (phb->type == PNV_PHB_IODA1) - pnv_pci_ioda1_tce_invalidate(tbl, startp, endp); + pnv_pci_ioda1_tce_invalidate(pe, tbl, startp, endp, rm); else - pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp); + pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm); } static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, @@ -603,7 +634,9 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, * bus number, print that out instead. */ tbl->it_busno = 0; - tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8); + pe->tce_inval_reg_phys = be64_to_cpup(swinvp); + tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys, + 8); tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE | TCE_PCI_SWINV_PAIR; } @@ -681,7 +714,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, * bus number, print that out instead. */ tbl->it_busno = 0; - tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8); + pe->tce_inval_reg_phys = be64_to_cpup(swinvp); + tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys, + 8); tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; } iommu_init_table(tbl, phb->hose->node); @@ -786,8 +821,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, struct irq_data *idata; struct irq_chip *ichip; unsigned int xive_num = hwirq - phb->msi_base; - uint64_t addr64; - uint32_t addr32, data; + __be32 data; int rc; /* No PE assigned ? bail out ... no MSI for you ! */ @@ -811,6 +845,8 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, } if (is_64) { + __be64 addr64; + rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, &addr64, &data); if (rc) { @@ -818,9 +854,11 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, pci_name(dev), rc); return -EIO; } - msg->address_hi = addr64 >> 32; - msg->address_lo = addr64 & 0xfffffffful; + msg->address_hi = be64_to_cpu(addr64) >> 32; + msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful; } else { + __be32 addr32; + rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, &addr32, &data); if (rc) { @@ -829,9 +867,9 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, return -EIO; } msg->address_hi = 0; - msg->address_lo = addr32; + msg->address_lo = be32_to_cpu(addr32); } - msg->data = data; + msg->data = be32_to_cpu(data); /* * Change the IRQ chip for the MSI interrupts on PHB3. @@ -1106,8 +1144,8 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, struct pci_controller *hose; struct pnv_phb *phb; unsigned long size, m32map_off, iomap_off, pemap_off; - const u64 *prop64; - const u32 *prop32; + const __be64 *prop64; + const __be32 *prop32; int len; u64 phb_id; void *aux; @@ -1142,8 +1180,8 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, spin_lock_init(&phb->lock); prop32 = of_get_property(np, "bus-range", &len); if (prop32 && len == 8) { - hose->first_busno = prop32[0]; - hose->last_busno = prop32[1]; + hose->first_busno = be32_to_cpu(prop32[0]); + hose->last_busno = be32_to_cpu(prop32[1]); } else { pr_warn(" Broken <bus-range> on %s\n", np->full_name); hose->first_busno = 0; @@ -1171,12 +1209,13 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, pr_err(" Failed to map registers !\n"); /* Initialize more IODA stuff */ + phb->ioda.total_pe = 1; prop32 = of_get_property(np, "ibm,opal-num-pes", NULL); - if (!prop32) - phb->ioda.total_pe = 1; - else - phb->ioda.total_pe = *prop32; - + if (prop32) + phb->ioda.total_pe = be32_to_cpup(prop32); + prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL); + if (prop32) + phb->ioda.reserved_pe = be32_to_cpup(prop32); phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); /* FW Has already off top 64k of M32 space (MSI space) */ phb->ioda.m32_size += 0x10000; @@ -1205,7 +1244,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, if (phb->type == PNV_PHB_IODA1) phb->ioda.io_segmap = aux + iomap_off; phb->ioda.pe_array = aux + pemap_off; - set_bit(0, phb->ioda.pe_alloc); + set_bit(phb->ioda.reserved_pe, phb->ioda.pe_alloc); INIT_LIST_HEAD(&phb->ioda.pe_dma_list); INIT_LIST_HEAD(&phb->ioda.pe_list); @@ -1230,8 +1269,10 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, segment_size); #endif - pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n", + pr_info(" %d (%d) PE's M32: 0x%x [segment=0x%x]" + " IO: 0x%x [segment=0x%x]\n", phb->ioda.total_pe, + phb->ioda.reserved_pe, phb->ioda.m32_size, phb->ioda.m32_segsize, phb->ioda.io_size, phb->ioda.io_segsize); @@ -1268,13 +1309,6 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); if (rc) pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc); - - /* - * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset - * has cleared the RTT which has the same effect - */ - if (ioda_type == PNV_PHB_IODA1) - opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE); } void __init pnv_pci_init_ioda2_phb(struct device_node *np) @@ -1285,7 +1319,7 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np) void __init pnv_pci_init_ioda_hub(struct device_node *np) { struct device_node *phbn; - const u64 *prop64; + const __be64 *prop64; u64 hub_id; pr_info("Probing IODA IO-Hub %s\n", np->full_name); diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c index b68db6325c1..f8b4bd8afb2 100644 --- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c +++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c @@ -99,7 +99,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id, void *tce_mem, u64 tce_size) { struct pnv_phb *phb; - const u64 *prop64; + const __be64 *prop64; u64 phb_id; int64_t rc; static int primary = 1; @@ -178,7 +178,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id, void __init pnv_pci_init_p5ioc2_hub(struct device_node *np) { struct device_node *phbn; - const u64 *prop64; + const __be64 *prop64; u64 hub_id; void *tce_mem; uint64_t tce_per_phb; diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index a28d3b5e639..4eb33a9ed53 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -236,17 +236,21 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb, { s64 rc; u8 fstate; - u16 pcierr; + __be16 pcierr; u32 pe_no; /* * Get the PE#. During the PCI probe stage, we might not * setup that yet. So all ER errors should be mapped to - * PE#0 + * reserved PE. */ pe_no = PCI_DN(dn)->pe_number; - if (pe_no == IODA_INVALID_PE) - pe_no = 0; + if (pe_no == IODA_INVALID_PE) { + if (phb->type == PNV_PHB_P5IOC2) + pe_no = 0; + else + pe_no = phb->ioda.reserved_pe; + } /* Read freeze status */ rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr, @@ -283,16 +287,16 @@ int pnv_pci_cfg_read(struct device_node *dn, break; } case 2: { - u16 v16; + __be16 v16; rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, &v16); - *val = (rc == OPAL_SUCCESS) ? v16 : 0xffff; + *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff; break; } case 4: { - u32 v32; + __be32 v32; rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); - *val = (rc == OPAL_SUCCESS) ? v32 : 0xffffffff; + *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff; break; } default: @@ -401,10 +405,10 @@ struct pci_ops pnv_pci_ops = { static int pnv_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + struct dma_attrs *attrs, bool rm) { u64 proto_tce; - u64 *tcep, *tces; + __be64 *tcep, *tces; u64 rpn; proto_tce = TCE_PCI_READ; // Read allowed @@ -412,33 +416,48 @@ static int pnv_tce_build(struct iommu_table *tbl, long index, long npages, if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; - tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset; + tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset; rpn = __pa(uaddr) >> TCE_SHIFT; while (npages--) - *(tcep++) = proto_tce | (rpn++ << TCE_RPN_SHIFT); + *(tcep++) = cpu_to_be64(proto_tce | (rpn++ << TCE_RPN_SHIFT)); /* Some implementations won't cache invalid TCEs and thus may not * need that flush. We'll probably turn it_type into a bit mask * of flags if that becomes the case */ if (tbl->it_type & TCE_PCI_SWINV_CREATE) - pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); + pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm); return 0; } -static void pnv_tce_free(struct iommu_table *tbl, long index, long npages) +static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages, + unsigned long uaddr, + enum dma_data_direction direction, + struct dma_attrs *attrs) { - u64 *tcep, *tces; + return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, + false); +} - tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset; +static void pnv_tce_free(struct iommu_table *tbl, long index, long npages, + bool rm) +{ + __be64 *tcep, *tces; + + tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset; while (npages--) - *(tcep++) = 0; + *(tcep++) = cpu_to_be64(0); if (tbl->it_type & TCE_PCI_SWINV_FREE) - pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); + pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm); +} + +static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages) +{ + pnv_tce_free(tbl, index, npages, false); } static unsigned long pnv_tce_get(struct iommu_table *tbl, long index) @@ -446,6 +465,19 @@ static unsigned long pnv_tce_get(struct iommu_table *tbl, long index) return ((u64 *)tbl->it_base)[index - tbl->it_offset]; } +static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages, + unsigned long uaddr, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true); +} + +static void pnv_tce_free_rm(struct iommu_table *tbl, long index, long npages) +{ + pnv_tce_free(tbl, index, npages, true); +} + void pnv_pci_setup_iommu_table(struct iommu_table *tbl, void *tce_mem, u64 tce_size, u64 dma_offset) @@ -484,8 +516,8 @@ static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose) swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info", NULL); if (swinvp) { - tbl->it_busno = swinvp[1]; - tbl->it_index = (unsigned long)ioremap(swinvp[0], 8); + tbl->it_busno = be64_to_cpu(swinvp[1]); + tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8); tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE; } return tbl; @@ -610,8 +642,10 @@ void __init pnv_pci_init(void) /* Configure IOMMU DMA hooks */ ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup; - ppc_md.tce_build = pnv_tce_build; - ppc_md.tce_free = pnv_tce_free; + ppc_md.tce_build = pnv_tce_build_vm; + ppc_md.tce_free = pnv_tce_free_vm; + ppc_md.tce_build_rm = pnv_tce_build_rm; + ppc_md.tce_free_rm = pnv_tce_free_rm; ppc_md.tce_get = pnv_tce_get; ppc_md.pci_probe_mode = pnv_pci_probe_mode; set_pci_dma_ops(&dma_iommu_ops); diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index d633c64e05a..911c24ef033 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -17,7 +17,7 @@ enum pnv_phb_model { PNV_PHB_MODEL_PHB3, }; -#define PNV_PCI_DIAG_BUF_SIZE 4096 +#define PNV_PCI_DIAG_BUF_SIZE 8192 #define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */ #define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */ #define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */ @@ -52,6 +52,7 @@ struct pnv_ioda_pe { int tce32_seg; int tce32_segcount; struct iommu_table tce32_table; + phys_addr_t tce_inval_reg_phys; /* XXX TODO: Add support for additional 64-bit iommus */ @@ -124,6 +125,7 @@ struct pnv_phb { struct { /* Global bridge info */ unsigned int total_pe; + unsigned int reserved_pe; unsigned int m32_size; unsigned int m32_segsize; unsigned int m32_pci_base; @@ -193,6 +195,6 @@ extern void pnv_pci_init_p5ioc2_hub(struct device_node *np); extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np); extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, - u64 *startp, u64 *endp); + __be64 *startp, __be64 *endp, bool rm); #endif /* __POWERNV_PCI_H */ diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c new file mode 100644 index 00000000000..8844628915d --- /dev/null +++ b/arch/powerpc/platforms/powernv/rng.c @@ -0,0 +1,125 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "powernv-rng: " fmt + +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <linux/smp.h> +#include <asm/archrandom.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/machdep.h> + + +struct powernv_rng { + void __iomem *regs; + unsigned long mask; +}; + +static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng); + + +static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val) +{ + unsigned long parity; + + /* Calculate the parity of the value */ + asm ("popcntd %0,%1" : "=r" (parity) : "r" (val)); + + /* xor our value with the previous mask */ + val ^= rng->mask; + + /* update the mask based on the parity of this value */ + rng->mask = (rng->mask << 1) | (parity & 1); + + return val; +} + +int powernv_get_random_long(unsigned long *v) +{ + struct powernv_rng *rng; + + rng = get_cpu_var(powernv_rng); + + *v = rng_whiten(rng, in_be64(rng->regs)); + + put_cpu_var(rng); + + return 1; +} +EXPORT_SYMBOL_GPL(powernv_get_random_long); + +static __init void rng_init_per_cpu(struct powernv_rng *rng, + struct device_node *dn) +{ + int chip_id, cpu; + + chip_id = of_get_ibm_chip_id(dn); + if (chip_id == -1) + pr_warn("No ibm,chip-id found for %s.\n", dn->full_name); + + for_each_possible_cpu(cpu) { + if (per_cpu(powernv_rng, cpu) == NULL || + cpu_to_chip_id(cpu) == chip_id) { + per_cpu(powernv_rng, cpu) = rng; + } + } +} + +static __init int rng_create(struct device_node *dn) +{ + struct powernv_rng *rng; + unsigned long val; + + rng = kzalloc(sizeof(*rng), GFP_KERNEL); + if (!rng) + return -ENOMEM; + + rng->regs = of_iomap(dn, 0); + if (!rng->regs) { + kfree(rng); + return -ENXIO; + } + + val = in_be64(rng->regs); + rng->mask = val; + + rng_init_per_cpu(rng, dn); + + pr_info_once("Registering arch random hook.\n"); + + ppc_md.get_random_long = powernv_get_random_long; + + return 0; +} + +static __init int rng_init(void) +{ + struct device_node *dn; + int rc; + + for_each_compatible_node(dn, NULL, "ibm,power-rng") { + rc = rng_create(dn); + if (rc) { + pr_err("Failed creating rng for %s (%d).\n", + dn->full_name, rc); + continue; + } + + /* Create devices for hwrng driver */ + of_platform_device_create(dn, NULL, NULL); + } + + return 0; +} +subsys_initcall(rng_init); diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index e239dcfa224..19884b2a51b 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -23,6 +23,7 @@ #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/of.h> +#include <linux/of_fdt.h> #include <linux/interrupt.h> #include <linux/bug.h> diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 6c61ec5ee91..fbccac9cd2d 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -3,7 +3,7 @@ ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG obj-y := lpar.o hvCall.o nvram.o reconfig.o \ setup.o iommu.o event_sources.o ras.o \ - firmware.o power.o dlpar.o mobility.o + firmware.o power.o dlpar.o mobility.o rng.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SCANLOG) += scanlog.o obj-$(CONFIG_EEH) += eeh_pseries.o diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 7cfdaae1721..a8fe5aa3d34 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -404,46 +404,38 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) unsigned long drc_index; int rc; - cpu_hotplug_driver_lock(); rc = strict_strtoul(buf, 0, &drc_index); - if (rc) { - rc = -EINVAL; - goto out; - } + if (rc) + return -EINVAL; parent = of_find_node_by_path("/cpus"); - if (!parent) { - rc = -ENODEV; - goto out; - } + if (!parent) + return -ENODEV; dn = dlpar_configure_connector(drc_index, parent); - if (!dn) { - rc = -EINVAL; - goto out; - } + if (!dn) + return -EINVAL; of_node_put(parent); rc = dlpar_acquire_drc(drc_index); if (rc) { dlpar_free_cc_nodes(dn); - rc = -EINVAL; - goto out; + return -EINVAL; } rc = dlpar_attach_node(dn); if (rc) { dlpar_release_drc(drc_index); dlpar_free_cc_nodes(dn); - goto out; + return rc; } rc = dlpar_online_cpu(dn); -out: - cpu_hotplug_driver_unlock(); + if (rc) + return rc; - return rc ? rc : count; + return count; } static int dlpar_offline_cpu(struct device_node *dn) @@ -516,30 +508,27 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count) return -EINVAL; } - cpu_hotplug_driver_lock(); rc = dlpar_offline_cpu(dn); if (rc) { of_node_put(dn); - rc = -EINVAL; - goto out; + return -EINVAL; } rc = dlpar_release_drc(*drc_index); if (rc) { of_node_put(dn); - goto out; + return rc; } rc = dlpar_detach_node(dn); if (rc) { dlpar_acquire_drc(*drc_index); - goto out; + return rc; } of_node_put(dn); -out: - cpu_hotplug_driver_unlock(); - return rc ? rc : count; + + return count; } static int __init pseries_dlpar_init(void) diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c index 2605c310166..18380e8f6df 100644 --- a/arch/powerpc/platforms/pseries/event_sources.c +++ b/arch/powerpc/platforms/pseries/event_sources.c @@ -25,7 +25,7 @@ void request_event_sources_irqs(struct device_node *np, const char *name) { int i, index, count = 0; - struct of_irq oirq; + struct of_phandle_args oirq; const u32 *opicprop; unsigned int opicplen; unsigned int virqs[16]; @@ -55,13 +55,11 @@ void request_event_sources_irqs(struct device_node *np, /* Else use normal interrupt tree parsing */ else { /* First try to do a proper OF tree parsing */ - for (index = 0; of_irq_map_one(np, index, &oirq) == 0; + for (index = 0; of_irq_parse_one(np, index, &oirq) == 0; index++) { if (count > 15) break; - virqs[count] = irq_create_of_mapping(oirq.controller, - oirq.specifier, - oirq.size); + virqs[count] = irq_create_of_mapping(&oirq); if (virqs[count] == NO_IRQ) { pr_err("event-sources: Unable to allocate " "interrupt number for %s\n", diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 9a432de363b..9590dbb756f 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -10,12 +10,14 @@ */ #include <linux/of.h> +#include <linux/of_address.h> #include <linux/memblock.h> #include <linux/vmalloc.h> #include <linux/memory.h> #include <asm/firmware.h> #include <asm/machdep.h> +#include <asm/prom.h> #include <asm/sparsemem.h> static unsigned long get_memblock_size(void) diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 0307901e413..f253361552a 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -52,7 +52,7 @@ static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, - u64 *startp, u64 *endp) + __be64 *startp, __be64 *endp) { u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; unsigned long start, end, inc; @@ -86,7 +86,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, struct dma_attrs *attrs) { u64 proto_tce; - u64 *tcep, *tces; + __be64 *tcep, *tces; u64 rpn; proto_tce = TCE_PCI_READ; // Read allowed @@ -94,12 +94,12 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, if (direction != DMA_TO_DEVICE) proto_tce |= TCE_PCI_WRITE; - tces = tcep = ((u64 *)tbl->it_base) + index; + tces = tcep = ((__be64 *)tbl->it_base) + index; while (npages--) { /* can't move this out since we might cross MEMBLOCK boundary */ rpn = __pa(uaddr) >> TCE_SHIFT; - *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; + *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT); uaddr += TCE_PAGE_SIZE; tcep++; @@ -113,9 +113,9 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index, static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) { - u64 *tcep, *tces; + __be64 *tcep, *tces; - tces = tcep = ((u64 *)tbl->it_base) + index; + tces = tcep = ((__be64 *)tbl->it_base) + index; while (npages--) *(tcep++) = 0; @@ -126,11 +126,11 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) { - u64 *tcep; + __be64 *tcep; - tcep = ((u64 *)tbl->it_base) + index; + tcep = ((__be64 *)tbl->it_base) + index; - return *tcep; + return be64_to_cpu(*tcep); } static void tce_free_pSeriesLP(struct iommu_table*, long, long); @@ -177,7 +177,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, return ret; } -static DEFINE_PER_CPU(u64 *, tce_page); +static DEFINE_PER_CPU(__be64 *, tce_page); static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, @@ -186,7 +186,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, { u64 rc = 0; u64 proto_tce; - u64 *tcep; + __be64 *tcep; u64 rpn; long l, limit; long tcenum_start = tcenum, npages_start = npages; @@ -206,7 +206,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, * from iommu_alloc{,_sg}() */ if (!tcep) { - tcep = (u64 *)__get_free_page(GFP_ATOMIC); + tcep = (__be64 *)__get_free_page(GFP_ATOMIC); /* If allocation fails, fall back to the loop implementation */ if (!tcep) { local_irq_restore(flags); @@ -230,7 +230,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE); for (l = 0; l < limit; l++) { - tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; + tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT); rpn++; } @@ -329,16 +329,16 @@ struct direct_window { /* Dynamic DMA Window support */ struct ddw_query_response { - u32 windows_available; - u32 largest_available_block; - u32 page_size; - u32 migration_capable; + __be32 windows_available; + __be32 largest_available_block; + __be32 page_size; + __be32 migration_capable; }; struct ddw_create_response { - u32 liobn; - u32 addr_hi; - u32 addr_lo; + __be32 liobn; + __be32 addr_hi; + __be32 addr_lo; }; static LIST_HEAD(direct_window_list); @@ -392,7 +392,8 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, unsigned long num_pfn, const void *arg) { const struct dynamic_dma_window_prop *maprange = arg; - u64 *tcep, tce_size, num_tce, dma_offset, next, proto_tce, liobn; + u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn; + __be64 *tcep; u32 tce_shift; u64 rc = 0; long l, limit; @@ -401,7 +402,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, tcep = __get_cpu_var(tce_page); if (!tcep) { - tcep = (u64 *)__get_free_page(GFP_ATOMIC); + tcep = (__be64 *)__get_free_page(GFP_ATOMIC); if (!tcep) { local_irq_enable(); return -ENOMEM; @@ -435,7 +436,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, dma_offset = next + be64_to_cpu(maprange->dma_base); for (l = 0; l < limit; l++) { - tcep[l] = proto_tce | next; + tcep[l] = cpu_to_be64(proto_tce | next); next += tce_size; } @@ -780,7 +781,7 @@ static u64 find_existing_ddw(struct device_node *pdn) list_for_each_entry(window, &direct_window_list, list) { if (window->device == pdn) { direct64 = window->prop; - dma_addr = direct64->dma_base; + dma_addr = be64_to_cpu(direct64->dma_base); break; } } @@ -1045,11 +1046,11 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) dev_dbg(&dev->dev, "no free dynamic windows"); goto out_restore_window; } - if (query.page_size & 4) { + if (be32_to_cpu(query.page_size) & 4) { page_shift = 24; /* 16MB */ - } else if (query.page_size & 2) { + } else if (be32_to_cpu(query.page_size) & 2) { page_shift = 16; /* 64kB */ - } else if (query.page_size & 1) { + } else if (be32_to_cpu(query.page_size) & 1) { page_shift = 12; /* 4kB */ } else { dev_dbg(&dev->dev, "no supported direct page size in mask %x", @@ -1059,7 +1060,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) /* verify the window * number of ptes will map the partition */ /* check largest block * page size > max memory hotplug addr */ max_addr = memory_hotplug_max(); - if (query.largest_available_block < (max_addr >> page_shift)) { + if (be32_to_cpu(query.largest_available_block) < (max_addr >> page_shift)) { dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " "%llu-sized pages\n", max_addr, query.largest_available_block, 1ULL << page_shift); @@ -1085,7 +1086,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) if (ret != 0) goto out_free_prop; - ddwprop->liobn = cpu_to_be32(create.liobn); + ddwprop->liobn = create.liobn; ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2)); ddwprop->tce_shift = cpu_to_be32(page_shift); ddwprop->window_shift = cpu_to_be32(len); diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index d276cd3edd8..7bfaf58d466 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -31,7 +31,7 @@ #define NVRW_CNT 0x20 /* - * Set oops header version to distingush between old and new format header. + * Set oops header version to distinguish between old and new format header. * lnx,oops-log partition max size is 4000, header version > 4000 will * help in identifying new header. */ @@ -429,9 +429,6 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition loff_t p; int size; - /* Scan nvram for partitions */ - nvram_scan_partitions(); - /* Look for ours */ p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size); @@ -795,6 +792,9 @@ static int __init pseries_nvram_init_log_partitions(void) { int rc; + /* Scan nvram for partitions */ + nvram_scan_partitions(); + rc = pseries_nvram_init_os_partition(&rtas_log_partition); nvram_init_oops_partition(rc == 0); return 0; @@ -804,7 +804,7 @@ machine_arch_initcall(pseries, pseries_nvram_init_log_partitions); int __init pSeries_nvram_init(void) { struct device_node *nvram; - const unsigned int *nbytes_p; + const __be32 *nbytes_p; unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); @@ -817,7 +817,7 @@ int __init pSeries_nvram_init(void) return -EIO; } - nvram_size = *nbytes_p; + nvram_size = be32_to_cpup(nbytes_p); nvram_fetch = rtas_token("nvram-fetch"); nvram_store = rtas_token("nvram-store"); diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c new file mode 100644 index 00000000000..a702f1c0824 --- /dev/null +++ b/arch/powerpc/platforms/pseries/rng.c @@ -0,0 +1,44 @@ +/* + * Copyright 2013, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define pr_fmt(fmt) "pseries-rng: " fmt + +#include <linux/kernel.h> +#include <linux/of.h> +#include <asm/archrandom.h> +#include <asm/machdep.h> + + +static int pseries_get_random_long(unsigned long *v) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + if (plpar_hcall(H_RANDOM, retbuf) == H_SUCCESS) { + *v = retbuf[0]; + return 1; + } + + return 0; +} + +static __init int rng_init(void) +{ + struct device_node *dn; + + dn = of_find_compatible_node(NULL, NULL, "ibm,random"); + if (!dn) + return -ENODEV; + + pr_info("Registering arch random hook.\n"); + + ppc_md.get_random_long = pseries_get_random_long; + + return 0; +} +subsys_initcall(rng_init); diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index 5f997e79d57..16a255255d3 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c @@ -106,7 +106,7 @@ static int pseries_prepare_late(void) atomic_set(&suspend_data.done, 0); atomic_set(&suspend_data.error, 0); suspend_data.complete = &suspend_work; - INIT_COMPLETION(suspend_work); + reinit_completion(&suspend_work); return 0; } diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c index b56b70aeb49..268bc899c1f 100644 --- a/arch/powerpc/platforms/wsp/scom_smp.c +++ b/arch/powerpc/platforms/wsp/scom_smp.c @@ -116,7 +116,14 @@ static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask) scom_write(scom, SCOM_RAMIC, cmd); - while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) { + for (;;) { + if (scom_read(scom, SCOM_RAMC, &val) != 0) { + pr_err("SCOM error on instruction 0x%08x, thread %d\n", + insn, thread); + return -1; + } + if (val & mask) + break; pr_devel("Waiting on RAMC = 0x%llx\n", val); if (++n == 3) { pr_err("RAMC timeout on instruction 0x%08x, thread %d\n", @@ -151,9 +158,7 @@ static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt, if (rc) return rc; - *out_gpr = scom_read(scom, SCOM_RAMD); - - return 0; + return scom_read(scom, SCOM_RAMD, out_gpr); } static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr) @@ -353,7 +358,10 @@ int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np) pr_devel("Bringing up CPU%d using SCOM...\n", lcpu); - pccr0 = scom_read(scom, SCOM_PCCR0); + if (scom_read(scom, SCOM_PCCR0, &pccr0) != 0) { + printk(KERN_ERR "XSCOM failure readng PCCR0 on CPU%d\n", lcpu); + return -1; + } scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG | SCOM_PCCR0_ENABLE_RAM); diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c index 4052e2259f3..8928507affe 100644 --- a/arch/powerpc/platforms/wsp/scom_wsp.c +++ b/arch/powerpc/platforms/wsp/scom_wsp.c @@ -50,18 +50,22 @@ static void wsp_scom_unmap(scom_map_t map) iounmap((void *)map); } -static u64 wsp_scom_read(scom_map_t map, u32 reg) +static int wsp_scom_read(scom_map_t map, u64 reg, u64 *value) { u64 __iomem *addr = (u64 __iomem *)map; - return in_be64(addr + reg); + *value = in_be64(addr + reg); + + return 0; } -static void wsp_scom_write(scom_map_t map, u32 reg, u64 value) +static int wsp_scom_write(scom_map_t map, u64 reg, u64 value) { u64 __iomem *addr = (u64 __iomem *)map; - return out_be64(addr + reg, value); + out_be64(addr + reg, value); + + return 0; } static const struct scom_controller wsp_scom_controller = { diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c index d25cc96c21b..ddb6efe8891 100644 --- a/arch/powerpc/platforms/wsp/wsp.c +++ b/arch/powerpc/platforms/wsp/wsp.c @@ -89,6 +89,7 @@ void wsp_halt(void) struct device_node *dn; struct device_node *mine; struct device_node *me; + int rc; me = of_get_cpu_node(smp_processor_id(), NULL); mine = scom_find_parent(me); @@ -101,15 +102,15 @@ void wsp_halt(void) /* read-modify-write it so the HW probe does not get * confused */ - val = scom_read(m, 0); - val |= 1; - scom_write(m, 0, val); + rc = scom_read(m, 0, &val); + if (rc == 0) + scom_write(m, 0, val | 1); scom_unmap(m); } m = scom_map(mine, 0, 1); - val = scom_read(m, 0); - val |= 1; - scom_write(m, 0, val); + rc = scom_read(m, 0, &val); + if (rc == 0) + scom_write(m, 0, val | 1); /* should never return */ scom_unmap(m); } diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index ab4cb547647..13ec968be4c 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig @@ -28,7 +28,7 @@ config PPC_SCOM config SCOM_DEBUGFS bool "Expose SCOM controllers via debugfs" - depends on PPC_SCOM + depends on PPC_SCOM && DEBUG_FS default n config GE_FPGA diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index 4dd534194ae..4f786957129 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -22,6 +22,7 @@ #include <linux/spinlock.h> #include <linux/export.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/slab.h> #include <asm/udbg.h> diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c index 9cd0e60716f..b74085cea1a 100644 --- a/arch/powerpc/sysdev/ehv_pic.c +++ b/arch/powerpc/sysdev/ehv_pic.c @@ -19,6 +19,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/of.h> +#include <linux/of_address.h> #include <asm/io.h> #include <asm/irq.h> diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c index 0eb871cc343..06ac3c61b3d 100644 --- a/arch/powerpc/sysdev/fsl_gtm.c +++ b/arch/powerpc/sysdev/fsl_gtm.c @@ -19,6 +19,8 @@ #include <linux/list.h> #include <linux/io.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/slab.h> @@ -401,16 +403,15 @@ static int __init fsl_gtm_init(void) gtm->clock = *clock; for (i = 0; i < ARRAY_SIZE(gtm->timers); i++) { - int ret; - struct resource irq; + unsigned int irq; - ret = of_irq_to_resource(np, i, &irq); - if (ret == NO_IRQ) { + irq = irq_of_parse_and_map(np, i); + if (irq == NO_IRQ) { pr_err("%s: not enough interrupts specified\n", np->full_name); goto err; } - gtm->timers[i].irq = irq.start; + gtm->timers[i].irq = irq; gtm->timers[i].gtm = gtm; } diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index ccfb50ddfe3..4dfd61df8ab 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -40,12 +40,12 @@ static int fsl_pcie_bus_fixup, is_mpc83xx_pci; -static void quirk_fsl_pcie_header(struct pci_dev *dev) +static void quirk_fsl_pcie_early(struct pci_dev *dev) { u8 hdr_type; /* if we aren't a PCIe don't bother */ - if (!pci_find_capability(dev, PCI_CAP_ID_EXP)) + if (!pci_is_pcie(dev)) return; /* if we aren't in host mode don't bother */ @@ -562,7 +562,8 @@ no_bridge: } #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_pcie_header); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, + quirk_fsl_pcie_early); #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) struct mpc83xx_pcie_priv { diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index 592a0f8d527..8cf4aa0e3a2 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c @@ -18,6 +18,7 @@ #include <linux/suspend.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/of_address.h> #include <linux/of_platform.h> struct pmc_regs { diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index e2fb3171f41..95dd892e990 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -28,6 +28,8 @@ #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/delay.h> #include <linux/slab.h> diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c index 14bd5221f28..00e224a1048 100644 --- a/arch/powerpc/sysdev/fsl_rmu.c +++ b/arch/powerpc/sysdev/fsl_rmu.c @@ -27,6 +27,7 @@ #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/slab.h> diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h index c6d00736f07..4c5a19ef4f0 100644 --- a/arch/powerpc/sysdev/fsl_soc.h +++ b/arch/powerpc/sysdev/fsl_soc.h @@ -21,8 +21,6 @@ struct device_node; extern void fsl_rstcr_restart(char *cmd); -#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) - /* The different ports that the DIU can be connected to */ enum fsl_diu_monitor_port { FSL_DIU_PORT_DVI, /* DVI */ @@ -43,7 +41,6 @@ struct platform_diu_data_ops { }; extern struct platform_diu_data_ops diu_ops; -#endif void fsl_hv_restart(char *cmd); void fsl_hv_halt(void); diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 1be54faf60d..0e166ed4cd1 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -535,7 +535,7 @@ static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase, mpic->fixups[irq].data = readl(base + 4) | 0x80000000; } } - + static void __init mpic_scan_ht_pics(struct mpic *mpic) { @@ -1088,8 +1088,14 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq, * is done here. */ if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) { + int cpu; + + preempt_disable(); + cpu = mpic_processor_id(mpic); + preempt_enable(); + mpic_set_vector(virq, hw); - mpic_set_destination(virq, mpic_processor_id(mpic)); + mpic_set_destination(virq, cpu); mpic_irq_set_priority(virq, 8); } @@ -1475,7 +1481,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, * as a default instead of the value read from the HW. */ last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK) - >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT; + >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT; if (isu_size) last_irq = isu_size * MPIC_MAX_ISU - 1; of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq); @@ -1625,7 +1631,7 @@ void __init mpic_init(struct mpic *mpic) /* start with vector = source number, and masked */ u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); - + /* check if protected */ if (mpic->protected && test_bit(i, mpic->protected)) continue; @@ -1634,7 +1640,7 @@ void __init mpic_init(struct mpic *mpic) mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); } } - + /* Init spurious vector */ mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec); diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index c75325865a8..2c9b52aa266 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -237,15 +237,13 @@ static int mpic_msgr_probe(struct platform_device *dev) raw_spin_lock_init(&msgr->lock); if (receive_mask & (1 << i)) { - struct resource irq; - - if (of_irq_to_resource(np, irq_index, &irq) == NO_IRQ) { + msgr->irq = irq_of_parse_and_map(np, irq_index); + if (msgr->irq == NO_IRQ) { dev_err(&dev->dev, "Missing interrupt specifier"); kfree(msgr); return -EFAULT; } - msgr->irq = irq.start; irq_index += 1; } else { msgr->irq = NO_IRQ; diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c index bbf342c8831..7dc39f35a4c 100644 --- a/arch/powerpc/sysdev/mpic_msi.c +++ b/arch/powerpc/sysdev/mpic_msi.c @@ -35,7 +35,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) const struct irq_domain_ops *ops = mpic->irqhost->ops; struct device_node *np; int flags, index, i; - struct of_irq oirq; + struct of_phandle_args oirq; pr_debug("mpic: found U3, guessing msi allocator setup\n"); @@ -63,9 +63,9 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) pr_debug("mpic: mapping hwirqs for %s\n", np->full_name); index = 0; - while (of_irq_map_one(np, index++, &oirq) == 0) { - ops->xlate(mpic->irqhost, NULL, oirq.specifier, - oirq.size, &hwirq, &flags); + while (of_irq_parse_one(np, index++, &oirq) == 0) { + ops->xlate(mpic->irqhost, NULL, oirq.args, + oirq.args_count, &hwirq, &flags); msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq); } } diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c index c06db92a4fb..22d7d57eead 100644 --- a/arch/powerpc/sysdev/mpic_timer.c +++ b/arch/powerpc/sysdev/mpic_timer.c @@ -19,7 +19,9 @@ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/syscore_ops.h> #include <sysdev/fsl_soc.h> #include <asm/io.h> diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c index 4a25c26f0bf..a3a8fad8537 100644 --- a/arch/powerpc/sysdev/mv64x60_dev.c +++ b/arch/powerpc/sysdev/mv64x60_dev.c @@ -228,7 +228,7 @@ static struct platform_device * __init mv64x60_eth_register_shared_pdev( if (id == 0) { pdev = platform_device_register_simple("orion-mdio", -1, &r[1], 1); - if (!pdev) + if (IS_ERR(pdev)) return pdev; } diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c index c9e803f3e26..6f54b54b132 100644 --- a/arch/powerpc/sysdev/of_rtc.c +++ b/arch/powerpc/sysdev/of_rtc.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/of.h> #include <linux/init.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/slab.h> diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c index 1b15f93479c..b7c43453236 100644 --- a/arch/powerpc/sysdev/ppc4xx_ocm.c +++ b/arch/powerpc/sysdev/ppc4xx_ocm.c @@ -26,6 +26,7 @@ #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/of.h> +#include <linux/of_address.h> #include <asm/rheap.h> #include <asm/ppc4xx_ocm.h> #include <linux/slab.h> diff --git a/arch/powerpc/sysdev/ppc4xx_soc.c b/arch/powerpc/sysdev/ppc4xx_soc.c index 0debcc31ad7..5c77c9ba33a 100644 --- a/arch/powerpc/sysdev/ppc4xx_soc.c +++ b/arch/powerpc/sysdev/ppc4xx_soc.c @@ -19,6 +19,7 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/dcr.h> diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c index 9193e12df69..6f5a8d177c4 100644 --- a/arch/powerpc/sysdev/scom.c +++ b/arch/powerpc/sysdev/scom.c @@ -25,6 +25,7 @@ #include <asm/debug.h> #include <asm/prom.h> #include <asm/scom.h> +#include <asm/uaccess.h> const struct scom_controller *scom_controller; EXPORT_SYMBOL_GPL(scom_controller); @@ -53,7 +54,7 @@ scom_map_t scom_map_device(struct device_node *dev, int index) { struct device_node *parent; unsigned int cells, size; - const u32 *prop; + const __be32 *prop, *sprop; u64 reg, cnt; scom_map_t ret; @@ -62,12 +63,24 @@ scom_map_t scom_map_device(struct device_node *dev, int index) if (parent == NULL) return 0; - prop = of_get_property(parent, "#scom-cells", NULL); - cells = prop ? *prop : 1; - + /* + * We support "scom-reg" properties for adding scom registers + * to a random device-tree node with an explicit scom-parent + * + * We also support the simple "reg" property if the device is + * a direct child of a scom controller. + * + * In case both exist, "scom-reg" takes precedence. + */ prop = of_get_property(dev, "scom-reg", &size); + sprop = of_get_property(parent, "#scom-cells", NULL); + if (!prop && parent == dev->parent) { + prop = of_get_property(dev, "reg", &size); + sprop = of_get_property(parent, "#address-cells", NULL); + } if (!prop) - return 0; + return NULL; + cells = sprop ? be32_to_cpup(sprop) : 1; size >>= 2; if (index >= (size / (2*cells))) @@ -86,62 +99,89 @@ EXPORT_SYMBOL_GPL(scom_map_device); #ifdef CONFIG_SCOM_DEBUGFS struct scom_debug_entry { struct device_node *dn; - unsigned long addr; - scom_map_t map; - spinlock_t lock; - char name[8]; - struct debugfs_blob_wrapper blob; + struct debugfs_blob_wrapper path; + char name[16]; }; -static int scom_addr_set(void *data, u64 val) +static ssize_t scom_debug_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos) { - struct scom_debug_entry *ent = data; - - ent->addr = 0; - scom_unmap(ent->map); - - ent->map = scom_map(ent->dn, val, 1); - if (scom_map_ok(ent->map)) - ent->addr = val; - else - return -EFAULT; - - return 0; -} - -static int scom_addr_get(void *data, u64 *val) -{ - struct scom_debug_entry *ent = data; - *val = ent->addr; - return 0; + struct scom_debug_entry *ent = filp->private_data; + u64 __user *ubuf64 = (u64 __user *)ubuf; + loff_t off = *ppos; + ssize_t done = 0; + u64 reg, reg_cnt, val; + scom_map_t map; + int rc; + + if (off < 0 || (off & 7) || (count & 7)) + return -EINVAL; + reg = off >> 3; + reg_cnt = count >> 3; + + map = scom_map(ent->dn, reg, reg_cnt); + if (!scom_map_ok(map)) + return -ENXIO; + + for (reg = 0; reg < reg_cnt; reg++) { + rc = scom_read(map, reg, &val); + if (!rc) + rc = put_user(val, ubuf64); + if (rc) { + if (!done) + done = rc; + break; + } + ubuf64++; + *ppos += 8; + done += 8; + } + scom_unmap(map); + return done; } -DEFINE_SIMPLE_ATTRIBUTE(scom_addr_fops, scom_addr_get, scom_addr_set, - "0x%llx\n"); -static int scom_val_set(void *data, u64 val) +static ssize_t scom_debug_write(struct file* filp, const char __user *ubuf, + size_t count, loff_t *ppos) { - struct scom_debug_entry *ent = data; - - if (!scom_map_ok(ent->map)) - return -EFAULT; - - scom_write(ent->map, 0, val); - - return 0; + struct scom_debug_entry *ent = filp->private_data; + u64 __user *ubuf64 = (u64 __user *)ubuf; + loff_t off = *ppos; + ssize_t done = 0; + u64 reg, reg_cnt, val; + scom_map_t map; + int rc; + + if (off < 0 || (off & 7) || (count & 7)) + return -EINVAL; + reg = off >> 3; + reg_cnt = count >> 3; + + map = scom_map(ent->dn, reg, reg_cnt); + if (!scom_map_ok(map)) + return -ENXIO; + + for (reg = 0; reg < reg_cnt; reg++) { + rc = get_user(val, ubuf64); + if (!rc) + rc = scom_write(map, reg, val); + if (rc) { + if (!done) + done = rc; + break; + } + ubuf64++; + done += 8; + } + scom_unmap(map); + return done; } -static int scom_val_get(void *data, u64 *val) -{ - struct scom_debug_entry *ent = data; - - if (!scom_map_ok(ent->map)) - return -EFAULT; - - *val = scom_read(ent->map, 0); - return 0; -} -DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set, - "0x%llx\n"); +static const struct file_operations scom_debug_fops = { + .read = scom_debug_read, + .write = scom_debug_write, + .open = simple_open, + .llseek = default_llseek, +}; static int scom_debug_init_one(struct dentry *root, struct device_node *dn, int i) @@ -154,11 +194,9 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn, return -ENOMEM; ent->dn = of_node_get(dn); - ent->map = SCOM_MAP_INVALID; - spin_lock_init(&ent->lock); - snprintf(ent->name, 8, "scom%d", i); - ent->blob.data = (void*) dn->full_name; - ent->blob.size = strlen(dn->full_name); + snprintf(ent->name, 16, "%08x", i); + ent->path.data = (void*) dn->full_name; + ent->path.size = strlen(dn->full_name); dir = debugfs_create_dir(ent->name, root); if (!dir) { @@ -167,9 +205,8 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn, return -1; } - debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops); - debugfs_create_file("value", 0600, dir, ent, &scom_val_fops); - debugfs_create_blob("path", 0400, dir, &ent->blob); + debugfs_create_blob("devspec", 0400, dir, &ent->path); + debugfs_create_file("access", 0600, dir, ent, &scom_debug_fops); return 0; } @@ -185,8 +222,13 @@ static int scom_debug_init(void) return -1; i = rc = 0; - for_each_node_with_property(dn, "scom-controller") - rc |= scom_debug_init_one(root, dn, i++); + for_each_node_with_property(dn, "scom-controller") { + int id = of_get_ibm_chip_id(dn); + if (id == -1) + id = i; + rc |= scom_debug_init_one(root, dn, id); + i++; + } return rc; } diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index 39d72212655..3c6ee1b64e5 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c @@ -112,6 +112,7 @@ static int ics_opal_set_affinity(struct irq_data *d, bool force) { unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + __be16 oserver; int16_t server; int8_t priority; int64_t rc; @@ -120,13 +121,13 @@ static int ics_opal_set_affinity(struct irq_data *d, if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return -1; - rc = opal_get_xive(hw_irq, &server, &priority); + rc = opal_get_xive(hw_irq, &oserver, &priority); if (rc != OPAL_SUCCESS) { - pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)" - " error %lld\n", - __func__, d->irq, hw_irq, server, rc); + pr_err("%s: opal_get_xive(irq=%d [hw 0x%x]) error %lld\n", + __func__, d->irq, hw_irq, rc); return -1; } + server = be16_to_cpu(oserver); wanted_server = xics_get_irq_server(d->irq, cpumask, 1); if (wanted_server < 0) { @@ -181,7 +182,7 @@ static int ics_opal_map(struct ics *ics, unsigned int virq) { unsigned int hw_irq = (unsigned int)virq_to_hw(virq); int64_t rc; - int16_t server; + __be16 server; int8_t priority; if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)) @@ -201,7 +202,7 @@ static int ics_opal_map(struct ics *ics, unsigned int virq) static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec) { int64_t rc; - int16_t server; + __be16 server; int8_t priority; /* Check if HAL knows about this interrupt */ @@ -215,14 +216,14 @@ static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec) static long ics_opal_get_server(struct ics *ics, unsigned long vec) { int64_t rc; - int16_t server; + __be16 server; int8_t priority; /* Check if HAL knows about this interrupt */ rc = opal_get_xive(vec, &server, &priority); if (rc != OPAL_SUCCESS) return -1; - return ics_opal_unmangle_server(server); + return ics_opal_unmangle_server(be16_to_cpu(server)); } int __init ics_opal_init(void) diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 8d73c3c0bee..83f943a8e0d 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c @@ -23,6 +23,8 @@ #include <linux/kernel.h> #include <linux/irq.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/i8259.h> diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f75d7e51792..314fced4fc1 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -141,7 +141,6 @@ config S390 select OLD_SIGACTION select OLD_SIGSUSPEND3 select SYSCTL_EXCEPTION_TRACE - select USE_GENERIC_SMP_HELPERS if SMP select VIRT_CPU_ACCOUNTING select VIRT_TO_BUS diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index e87ecaa2c56..d5bc3750616 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -38,13 +38,6 @@ struct sca_block { struct sca_entry cpu[64]; } __attribute__((packed)); -#define KVM_NR_PAGE_SIZES 2 -#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8) -#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) -#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) -#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) -#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) - #define CPUSTAT_STOPPED 0x80000000 #define CPUSTAT_WAIT 0x10000000 #define CPUSTAT_ECALL_PEND 0x08000000 @@ -220,7 +213,6 @@ struct kvm_s390_interrupt_info { /* for local_interrupt.action_flags */ #define ACTION_STORE_ON_STOP (1<<0) #define ACTION_STOP_ON_STOP (1<<1) -#define ACTION_RELOADVCPU_ON_STOP (1<<2) struct kvm_s390_local_interrupt { spinlock_t lock; diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 92494494692..c286c2e868f 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -82,4 +82,6 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 5a3ab5c191f..6e244297840 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -49,7 +49,7 @@ typedef struct __u32 gprs_high[NUM_GPRS]; } rt_sigframe32; -int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) +int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) { int err; diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 59a9c35c459..bc71a7b95af 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -680,7 +680,7 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accouting + * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(p); diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 7845e15a17d..b89b59158b9 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -50,7 +50,7 @@ void *module_alloc(unsigned long size) if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL, -1, + GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); } #endif diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 3a74d8af0d6..78d967f180f 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -107,14 +107,13 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) { - int ret, idx; + int ret; /* No virtio-ccw notification? Get out quickly. */ if (!vcpu->kvm->arch.css_support || (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY)) return -EOPNOTSUPP; - idx = srcu_read_lock(&vcpu->kvm->srcu); /* * The layout is as follows: * - gpr 2 contains the subchannel id (passed as addr) @@ -125,7 +124,6 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) vcpu->run->s.regs.gprs[2], 8, &vcpu->run->s.regs.gprs[3], vcpu->run->s.regs.gprs[4]); - srcu_read_unlock(&vcpu->kvm->srcu, idx); /* * Return cookie in gpr 2, but don't overwrite the register if the diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 99d789e8a01..374a439ccc6 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -18,20 +18,27 @@ #include <asm/uaccess.h> #include "kvm-s390.h" +/* Convert real to absolute address by applying the prefix of the CPU */ +static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu, + unsigned long gaddr) +{ + unsigned long prefix = vcpu->arch.sie_block->prefix; + if (gaddr < 2 * PAGE_SIZE) + gaddr += prefix; + else if (gaddr >= prefix && gaddr < prefix + 2 * PAGE_SIZE) + gaddr -= prefix; + return gaddr; +} + static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu, void __user *gptr, int prefixing) { - unsigned long prefix = vcpu->arch.sie_block->prefix; unsigned long gaddr = (unsigned long) gptr; unsigned long uaddr; - if (prefixing) { - if (gaddr < 2 * PAGE_SIZE) - gaddr += prefix; - else if ((gaddr >= prefix) && (gaddr < prefix + 2 * PAGE_SIZE)) - gaddr -= prefix; - } + if (prefixing) + gaddr = kvm_s390_real_to_abs(vcpu, gaddr); uaddr = gmap_fault(gaddr, vcpu->arch.gmap); if (IS_ERR_VALUE(uaddr)) uaddr = -EFAULT; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 5ee56e5acc2..5ddbbde6f65 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -62,12 +62,6 @@ static int handle_stop(struct kvm_vcpu *vcpu) trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); - if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) { - vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP; - rc = SIE_INTERCEPT_RERUNVCPU; - vcpu->run->exit_reason = KVM_EXIT_INTR; - } - if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 7f1f7ac5cf7..5f79d2d79ca 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -436,6 +436,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); no_timer: + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); spin_lock(&vcpu->arch.local_int.float_int->lock); spin_lock_bh(&vcpu->arch.local_int.lock); add_wait_queue(&vcpu->wq, &wait); @@ -455,6 +456,8 @@ no_timer: remove_wait_queue(&vcpu->wq, &wait); spin_unlock_bh(&vcpu->arch.local_int.lock); spin_unlock(&vcpu->arch.local_int.float_int->lock); + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); return 0; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ed8064cb5c4..569494e01ec 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -695,9 +695,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) return 0; } -static int __vcpu_run(struct kvm_vcpu *vcpu) +static int vcpu_pre_run(struct kvm_vcpu *vcpu) { - int rc; + int rc, cpuflags; memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); @@ -715,28 +715,24 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) return rc; vcpu->arch.sie_block->icptcode = 0; - VCPU_EVENT(vcpu, 6, "entering sie flags %x", - atomic_read(&vcpu->arch.sie_block->cpuflags)); - trace_kvm_s390_sie_enter(vcpu, - atomic_read(&vcpu->arch.sie_block->cpuflags)); + cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); + VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); + trace_kvm_s390_sie_enter(vcpu, cpuflags); - /* - * As PF_VCPU will be used in fault handler, between guest_enter - * and guest_exit should be no uaccess. - */ - preempt_disable(); - kvm_guest_enter(); - preempt_enable(); - rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); - kvm_guest_exit(); + return 0; +} + +static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) +{ + int rc; VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); - if (rc > 0) + if (exit_reason >= 0) { rc = 0; - if (rc < 0) { + } else { if (kvm_is_ucontrol(vcpu->kvm)) { rc = SIE_INTERCEPT_UCONTROL; } else { @@ -747,6 +743,49 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) } memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); + + if (rc == 0) { + if (kvm_is_ucontrol(vcpu->kvm)) + rc = -EOPNOTSUPP; + else + rc = kvm_handle_sie_intercept(vcpu); + } + + return rc; +} + +static int __vcpu_run(struct kvm_vcpu *vcpu) +{ + int rc, exit_reason; + + /* + * We try to hold kvm->srcu during most of vcpu_run (except when run- + * ning the guest), so that memslots (and other stuff) are protected + */ + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + + do { + rc = vcpu_pre_run(vcpu); + if (rc) + break; + + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + /* + * As PF_VCPU will be used in fault handler, between + * guest_enter and guest_exit should be no uaccess. + */ + preempt_disable(); + kvm_guest_enter(); + preempt_enable(); + exit_reason = sie64a(vcpu->arch.sie_block, + vcpu->run->s.regs.gprs); + kvm_guest_exit(); + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + + rc = vcpu_post_run(vcpu, exit_reason); + } while (!signal_pending(current) && !rc); + + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); return rc; } @@ -755,7 +794,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int rc; sigset_t sigsaved; -rerun_vcpu: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -788,19 +826,7 @@ rerun_vcpu: } might_fault(); - - do { - rc = __vcpu_run(vcpu); - if (rc) - break; - if (kvm_is_ucontrol(vcpu->kvm)) - rc = -EOPNOTSUPP; - else - rc = kvm_handle_sie_intercept(vcpu); - } while (!signal_pending(current) && !rc); - - if (rc == SIE_INTERCEPT_RERUNVCPU) - goto rerun_vcpu; + rc = __vcpu_run(vcpu); if (signal_pending(current) && !rc) { kvm_run->exit_reason = KVM_EXIT_INTR; @@ -958,6 +984,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; + int idx; long r; switch (ioctl) { @@ -971,7 +998,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, break; } case KVM_S390_STORE_STATUS: + idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_s390_vcpu_store_status(vcpu, arg); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; case KVM_S390_SET_INITIAL_PSW: { psw_t psw; @@ -1067,12 +1096,13 @@ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -void kvm_arch_free_memslot(struct kvm_memory_slot *free, +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { } -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) { return 0; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index dc99f1ca426..b44912a3294 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -28,8 +28,7 @@ typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); extern unsigned long *vfacilities; /* negativ values are error codes, positive values for internal conditions */ -#define SIE_INTERCEPT_RERUNVCPU (1<<0) -#define SIE_INTERCEPT_UCONTROL (1<<1) +#define SIE_INTERCEPT_UCONTROL (1<<0) int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ @@ -91,8 +90,10 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) { - *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20; - *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; + if (r1) + *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20; + if (r2) + *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; } static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 59200ee275e..2440602e6df 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -30,6 +30,38 @@ #include "kvm-s390.h" #include "trace.h" +/* Handle SCK (SET CLOCK) interception */ +static int handle_set_clock(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu *cpup; + s64 hostclk, val; + u64 op2; + int i; + + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + + op2 = kvm_s390_get_base_disp_s(vcpu); + if (op2 & 7) /* Operand must be on a doubleword boundary */ + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + if (get_guest(vcpu, val, (u64 __user *) op2)) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + + if (store_tod_clock(&hostclk)) { + kvm_s390_set_psw_cc(vcpu, 3); + return 0; + } + val = (val - hostclk) & ~0x3fUL; + + mutex_lock(&vcpu->kvm->lock); + kvm_for_each_vcpu(i, cpup, vcpu->kvm) + cpup->arch.sie_block->epoch = val; + mutex_unlock(&vcpu->kvm->lock); + + kvm_s390_set_psw_cc(vcpu, 0); + return 0; +} + static int handle_set_prefix(struct kvm_vcpu *vcpu) { u64 operand2; @@ -128,6 +160,33 @@ static int handle_skey(struct kvm_vcpu *vcpu) return 0; } +static int handle_test_block(struct kvm_vcpu *vcpu) +{ + unsigned long hva; + gpa_t addr; + int reg2; + + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + + kvm_s390_get_regs_rre(vcpu, NULL, ®2); + addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; + addr = kvm_s390_real_to_abs(vcpu, addr); + + hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); + if (kvm_is_error_hva(hva)) + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + /* + * We don't expect errors on modern systems, and do not care + * about storage keys (yet), so let's just clear the page. + */ + if (clear_user((void __user *)hva, PAGE_SIZE) != 0) + return -EFAULT; + kvm_s390_set_psw_cc(vcpu, 0); + vcpu->run->s.regs.gprs[0] = 0; + return 0; +} + static int handle_tpi(struct kvm_vcpu *vcpu) { struct kvm_s390_interrupt_info *inti; @@ -438,12 +497,14 @@ out_exception: static const intercept_handler_t b2_handlers[256] = { [0x02] = handle_stidp, + [0x04] = handle_set_clock, [0x10] = handle_set_prefix, [0x11] = handle_store_prefix, [0x12] = handle_store_cpu_address, [0x29] = handle_skey, [0x2a] = handle_skey, [0x2b] = handle_skey, + [0x2c] = handle_test_block, [0x30] = handle_io_inst, [0x31] = handle_io_inst, [0x32] = handle_io_inst, diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 6bcb045d2bd..9b436c21195 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -64,6 +64,11 @@ static unsigned long mmap_rnd(void) return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; } +static unsigned long mmap_base_legacy(void) +{ + return TASK_UNMAPPED_BASE + mmap_rnd(); +} + static inline unsigned long mmap_base(void) { unsigned long gap = rlimit(RLIMIT_STACK); @@ -89,7 +94,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) * bit is set, or if the expected stack growth is unlimited: */ if (mmap_is_legacy()) { - mm->mmap_base = TASK_UNMAPPED_BASE; + mm->mmap_base = mmap_base_legacy(); mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(); @@ -164,7 +169,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) * bit is set, or if the expected stack growth is unlimited: */ if (mmap_is_legacy()) { - mm->mmap_base = TASK_UNMAPPED_BASE; + mm->mmap_base = mmap_base_legacy(); mm->get_unmapped_area = s390_get_unmapped_area; } else { mm->mmap_base = mmap_base(); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0a2e5e08674..e794c88f699 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -772,7 +772,11 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, __free_page(page); return NULL; } - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + kfree(mp); + __free_page(page); + return NULL; + } mp->vmaddr = vmaddr & PMD_MASK; INIT_LIST_HEAD(&mp->mapper); page->index = (unsigned long) mp; @@ -902,7 +906,10 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) page = alloc_page(GFP_KERNEL|__GFP_REPEAT); if (!page) return NULL; - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } atomic_set(&page->_mapcount, 1); table = (unsigned long *) page_to_phys(page); clear_table(table, _PAGE_INVALID, PAGE_SIZE); @@ -1244,11 +1251,11 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, assert_spin_locked(&mm->page_table_lock); /* FIFO */ - if (!mm->pmd_huge_pte) + if (!pmd_huge_pte(mm, pmdp)) INIT_LIST_HEAD(lh); else - list_add(lh, (struct list_head *) mm->pmd_huge_pte); - mm->pmd_huge_pte = pgtable; + list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); + pmd_huge_pte(mm, pmdp) = pgtable; } pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) @@ -1260,12 +1267,12 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) assert_spin_locked(&mm->page_table_lock); /* FIFO */ - pgtable = mm->pmd_huge_pte; + pgtable = pmd_huge_pte(mm, pmdp); lh = (struct list_head *) pgtable; if (list_empty(lh)) - mm->pmd_huge_pte = NULL; + pmd_huge_pte(mm, pmdp) = NULL; else { - mm->pmd_huge_pte = (pgtable_t) lh->next; + pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; list_del(lh); } ptep = (pte_t *) pgtable; diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h index 716b3fd1d86..2e067657db9 100644 --- a/arch/score/include/asm/pgalloc.h +++ b/arch/score/include/asm/pgalloc.h @@ -54,9 +54,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, struct page *pte; pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); - if (pte) { - clear_highpage(pte); - pgtable_page_ctor(pte); + if (!pte) + return NULL; + clear_highpage(pte); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; } return pte; } diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 224f4bc9925..9b0979f4df7 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -1,5 +1,6 @@ config SUPERH def_bool y + select ARCH_MIGHT_HAVE_PC_PARPORT select EXPERT select CLKDEV_LOOKUP select HAVE_IDE if HAS_IOPORT @@ -711,7 +712,6 @@ config CC_STACKPROTECTOR config SMP bool "Symmetric multi-processing support" depends on SYS_SUPPORTS_SMP - select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, like most personal computers, say N. If diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 1fa8be40977..122f737a901 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -15,6 +15,7 @@ #include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mtd/physmap.h> +#include <linux/mfd/tmio.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/io.h> diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h index 06c4281aab6..09fc2bc8a79 100644 --- a/arch/sh/include/asm/fpu.h +++ b/arch/sh/include/asm/fpu.h @@ -46,7 +46,7 @@ static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) save_fpu(tsk); release_fpu(regs); } else - tsk->fpu_counter = 0; + tsk->thread.fpu_counter = 0; } static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs) diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h index 21c5088788d..b9d9489a501 100644 --- a/arch/sh/include/asm/mmu_context.h +++ b/arch/sh/include/asm/mmu_context.h @@ -81,7 +81,7 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) /* * Fix version; Note that we avoid version #0 - * to distingush NO_CONTEXT. + * to distinguish NO_CONTEXT. */ if (!asid) asid = MMU_CONTEXT_FIRST_VERSION; diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 8c00785c60d..a33673b3687 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -47,7 +47,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, if (!pg) return NULL; page = virt_to_page(pg); - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + quicklist_free(QUICK_PT, NULL, pg); + return NULL; + } return page; } diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h index e699a12cdcc..18e0377f72b 100644 --- a/arch/sh/include/asm/processor_32.h +++ b/arch/sh/include/asm/processor_32.h @@ -111,6 +111,16 @@ struct thread_struct { /* Extended processor state */ union thread_xstate *xstate; + + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; }; #define INIT_THREAD { \ diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h index 1cc7d319714..eedd4f625d0 100644 --- a/arch/sh/include/asm/processor_64.h +++ b/arch/sh/include/asm/processor_64.h @@ -126,6 +126,16 @@ struct thread_struct { /* floating point info */ union thread_xstate *xstate; + + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; }; #define INIT_MMAP \ diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c index f8f7af51c12..4e332244ea7 100644 --- a/arch/sh/kernel/cpu/fpu.c +++ b/arch/sh/kernel/cpu/fpu.c @@ -44,7 +44,7 @@ void __fpu_state_restore(void) restore_fpu(tsk); task_thread_info(tsk)->status |= TS_USEDFPU; - tsk->fpu_counter++; + tsk->thread.fpu_counter++; } void fpu_state_restore(struct pt_regs *regs) diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index ebd3933005b..2885fc9d9dc 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, #endif ti->addr_limit = KERNEL_DS; ti->status &= ~TS_USEDFPU; - p->fpu_counter = 0; + p->thread.fpu_counter = 0; return 0; } *childregs = *current_pt_regs(); @@ -189,7 +189,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) unlazy_fpu(prev, task_pt_regs(prev)); /* we're going to use this soon, after a few expensive things */ - if (next->fpu_counter > 5) + if (next->thread.fpu_counter > 5) prefetch(next_t->xstate); #ifdef CONFIG_MMU @@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ - if (next->fpu_counter > 5) + if (next->thread.fpu_counter > 5) __fpu_state_restore(); return prev; diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 174d124b419..e2062e64334 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -374,7 +374,7 @@ asmlinkage void ret_from_kernel_thread(void); int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *p) { - struct pt_regs *childregs, *regs = current_pt_regs(); + struct pt_regs *childregs; #ifdef CONFIG_SH_FPU /* can't happen for a kernel thread */ @@ -393,7 +393,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (unlikely(p->flags & PF_KTHREAD)) { memset(childregs, 0, sizeof(struct pt_regs)); childregs->regs[2] = (unsigned long)arg; - childregs->regs[3] = (unsigned long)fn; + childregs->regs[3] = (unsigned long)usp; childregs->sr = (1 << 30); /* not user_mode */ childregs->sr |= SR_FD; /* Invalidate FPU flag */ p->thread.pc = (unsigned long) ret_from_kernel_thread; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 33890fd267c..2d089fe2cba 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -231,7 +231,7 @@ static void __init bootmem_init_one_node(unsigned int nid) if (!p->node_spanned_pages) return; - end_pfn = p->node_start_pfn + p->node_spanned_pages; + end_pfn = pgdat_end_pfn(p); total_pages = bootmem_bootmap_pages(p->node_spanned_pages); diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 78c4fdb91bc..d4f7a6a163d 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -12,6 +12,7 @@ config 64BIT config SPARC bool default y + select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI select OF select OF_PROMTREE select HAVE_IDE @@ -28,7 +29,6 @@ config SPARC select HAVE_ARCH_JUMP_LABEL select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION - select USE_GENERIC_SMP_HELPERS if SMP select GENERIC_PCI_IOMAP select HAVE_NMI_WATCHDOG if SPARC64 select HAVE_BPF_JIT @@ -64,6 +64,7 @@ config SPARC64 select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_SYSCALL_TRACEPOINTS + select HAVE_CONTEXT_TRACKING select HAVE_DEBUG_KMEMLEAK select RTC_DRV_CMOS select RTC_DRV_BQ4802 diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index 76092c4dd27..f668797ae23 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -93,7 +93,6 @@ typedef struct { spinlock_t lock; unsigned long sparc64_ctx_val; unsigned long huge_pte_count; - struct page *pgtable_page; struct tsb_config tsb_block[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; } mm_context_t; diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index e15538899f3..aac53fcea80 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -15,7 +15,10 @@ #define DCACHE_ALIASING_POSSIBLE #endif -#define HPAGE_SHIFT 22 +#define HPAGE_SHIFT 23 +#define REAL_HPAGE_SHIFT 22 + +#define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) @@ -53,8 +56,8 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag /* These are used to make use of C type-checking.. */ typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long iopte; } iopte_t; -typedef struct { unsigned int pmd; } pmd_t; -typedef struct { unsigned int pgd; } pgd_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgprot; } pgprot_t; #define pte_val(x) ((x).pte) @@ -73,8 +76,8 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* .. while these make it easier on the compiler */ typedef unsigned long pte_t; typedef unsigned long iopte_t; -typedef unsigned int pmd_t; -typedef unsigned int pgd_t; +typedef unsigned long pmd_t; +typedef unsigned long pgd_t; typedef unsigned long pgprot_t; #define pte_val(x) (x) @@ -93,18 +96,44 @@ typedef unsigned long pgprot_t; typedef pte_t *pgtable_t; +/* These two values define the virtual address space range in which we + * must forbid 64-bit user processes from making mappings. It used to + * represent precisely the virtual address space hole present in most + * early sparc64 chips including UltraSPARC-I. But now it also is + * further constrained by the limits of our page tables, which is + * 43-bits of virtual address. + */ +#define SPARC64_VA_HOLE_TOP _AC(0xfffffc0000000000,UL) +#define SPARC64_VA_HOLE_BOTTOM _AC(0x0000040000000000,UL) + +/* The next two defines specify the actual exclusion region we + * enforce, wherein we use a 4GB red zone on each side of the VA hole. + */ +#define VA_EXCLUDE_START (SPARC64_VA_HOLE_BOTTOM - (1UL << 32UL)) +#define VA_EXCLUDE_END (SPARC64_VA_HOLE_TOP + (1UL << 32UL)) + #define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \ - (_AC(0x0000000070000000,UL)) : \ - (_AC(0xfffff80000000000,UL) + (1UL << 32UL))) + _AC(0x0000000070000000,UL) : \ + VA_EXCLUDE_END) #include <asm-generic/memory_model.h> +#define PAGE_OFFSET_BY_BITS(X) (-(_AC(1,UL) << (X))) +extern unsigned long PAGE_OFFSET; + #endif /* !(__ASSEMBLY__) */ -/* We used to stick this into a hard-coded global register (%g4) - * but that does not make sense anymore. +/* The maximum number of physical memory address bits we support, this + * is used to size various tables used to manage kernel TLB misses and + * also the sparsemem code. + */ +#define MAX_PHYS_ADDRESS_BITS 47 + +/* These two shift counts are used when indexing sparc64_valid_addr_bitmap + * and kpte_linear_bitmap. */ -#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL) +#define ILOG2_4MB 22 +#define ILOG2_256MB 28 #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 36760317814..8358dc14495 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -48,18 +48,18 @@ /* PMD_SHIFT determines the size of the area a second-level page * table can map */ -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4)) +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PMD_BITS (PAGE_SHIFT - 2) +#define PMD_BITS (PAGE_SHIFT - 3) /* PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-4) + PMD_BITS) +#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define PGDIR_BITS (PAGE_SHIFT - 2) +#define PGDIR_BITS (PAGE_SHIFT - 3) -#if (PGDIR_SHIFT + PGDIR_BITS) != 44 +#if (PGDIR_SHIFT + PGDIR_BITS) != 43 #error Page table parameters do not cover virtual address space properly. #endif @@ -67,35 +67,12 @@ #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages. #endif -/* PMDs point to PTE tables which are 4K aligned. */ -#define PMD_PADDR _AC(0xfffffffe,UL) -#define PMD_PADDR_SHIFT _AC(11,UL) - -#define PMD_ISHUGE _AC(0x00000001,UL) - -/* This is the PMD layout when PMD_ISHUGE is set. With 4MB huge - * pages, this frees up a bunch of bits in the layout that we can - * use for the protection settings and software metadata. - */ -#define PMD_HUGE_PADDR _AC(0xfffff800,UL) -#define PMD_HUGE_PROTBITS _AC(0x000007ff,UL) -#define PMD_HUGE_PRESENT _AC(0x00000400,UL) -#define PMD_HUGE_WRITE _AC(0x00000200,UL) -#define PMD_HUGE_DIRTY _AC(0x00000100,UL) -#define PMD_HUGE_ACCESSED _AC(0x00000080,UL) -#define PMD_HUGE_EXEC _AC(0x00000040,UL) -#define PMD_HUGE_SPLITTING _AC(0x00000020,UL) - -/* PGDs point to PMD tables which are 8K aligned. */ -#define PGD_PADDR _AC(0xfffffffc,UL) -#define PGD_PADDR_SHIFT _AC(11,UL) - #ifndef __ASSEMBLY__ #include <linux/sched.h> /* Entries per page directory level. */ -#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-4)) +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) #define PTRS_PER_PMD (1UL << PMD_BITS) #define PTRS_PER_PGD (1UL << PGDIR_BITS) @@ -112,6 +89,7 @@ #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/ #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */ +#define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */ /* Advertise support for _PAGE_SPECIAL */ #define __HAVE_ARCH_PTE_SPECIAL @@ -125,6 +103,7 @@ #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */ #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */ +#define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */ #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */ #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ @@ -155,6 +134,7 @@ #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */ #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */ #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */ +#define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */ #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */ #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */ #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */ @@ -180,6 +160,10 @@ #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V +#if REAL_HPAGE_SHIFT != 22 +#error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up +#endif + #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V @@ -239,16 +223,13 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) #ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot); -#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) - -extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); - -static inline pmd_t pmd_mkhuge(pmd_t pmd) +static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) { - /* Do nothing, mk_pmd() does this part. */ - return pmd; + pte_t pte = pfn_pte(page_nr, pgprot); + + return __pmd(pte_val(pte)); } +#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) #endif /* This one can be done with two shifts. */ @@ -309,14 +290,25 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) : "=r" (mask), "=r" (tmp) : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | - _PAGE_SPECIAL), + _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | - _PAGE_SPECIAL)); + _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pte_t pte = __pte(pmd_val(pmd)); + + pte = pte_modify(pte, newprot); + + return __pmd(pte_val(pte)); +} +#endif + static inline pte_t pgoff_to_pte(unsigned long off) { off <<= PAGE_SHIFT; @@ -357,7 +349,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot) */ #define pgprot_noncached pgprot_noncached -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) static inline pte_t pte_mkhuge(pte_t pte) { unsigned long mask; @@ -375,6 +367,17 @@ static inline pte_t pte_mkhuge(pte_t pte) return __pte(pte_val(pte) | mask); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pte_t pte = __pte(pmd_val(pmd)); + + pte = pte_mkhuge(pte); + pte_val(pte) |= _PAGE_PMD_HUGE; + + return __pmd(pte_val(pte)); +} +#endif #endif static inline pte_t pte_mkdirty(pte_t pte) @@ -626,91 +629,130 @@ static inline unsigned long pte_special(pte_t pte) return pte_val(pte) & _PAGE_SPECIAL; } -static inline int pmd_large(pmd_t pmd) +static inline unsigned long pmd_large(pmd_t pmd) { - return (pmd_val(pmd) & (PMD_ISHUGE | PMD_HUGE_PRESENT)) == - (PMD_ISHUGE | PMD_HUGE_PRESENT); + pte_t pte = __pte(pmd_val(pmd)); + + return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline int pmd_young(pmd_t pmd) +static inline unsigned long pmd_young(pmd_t pmd) { - return pmd_val(pmd) & PMD_HUGE_ACCESSED; + pte_t pte = __pte(pmd_val(pmd)); + + return pte_young(pte); } -static inline int pmd_write(pmd_t pmd) +static inline unsigned long pmd_write(pmd_t pmd) { - return pmd_val(pmd) & PMD_HUGE_WRITE; + pte_t pte = __pte(pmd_val(pmd)); + + return pte_write(pte); } static inline unsigned long pmd_pfn(pmd_t pmd) { - unsigned long val = pmd_val(pmd) & PMD_HUGE_PADDR; + pte_t pte = __pte(pmd_val(pmd)); - return val >> (PAGE_SHIFT - PMD_PADDR_SHIFT); + return pte_pfn(pte); } -static inline int pmd_trans_splitting(pmd_t pmd) +static inline unsigned long pmd_trans_huge(pmd_t pmd) { - return (pmd_val(pmd) & (PMD_ISHUGE|PMD_HUGE_SPLITTING)) == - (PMD_ISHUGE|PMD_HUGE_SPLITTING); + pte_t pte = __pte(pmd_val(pmd)); + + return pte_val(pte) & _PAGE_PMD_HUGE; } -static inline int pmd_trans_huge(pmd_t pmd) +static inline unsigned long pmd_trans_splitting(pmd_t pmd) { - return pmd_val(pmd) & PMD_ISHUGE; + pte_t pte = __pte(pmd_val(pmd)); + + return pmd_trans_huge(pmd) && pte_special(pte); } #define has_transparent_hugepage() 1 static inline pmd_t pmd_mkold(pmd_t pmd) { - pmd_val(pmd) &= ~PMD_HUGE_ACCESSED; - return pmd; + pte_t pte = __pte(pmd_val(pmd)); + + pte = pte_mkold(pte); + + return __pmd(pte_val(pte)); } static inline pmd_t pmd_wrprotect(pmd_t pmd) { - pmd_val(pmd) &= ~PMD_HUGE_WRITE; - return pmd; + pte_t pte = __pte(pmd_val(pmd)); + + pte = pte_wrprotect(pte); + + return __pmd(pte_val(pte)); } static inline pmd_t pmd_mkdirty(pmd_t pmd) { - pmd_val(pmd) |= PMD_HUGE_DIRTY; - return pmd; + pte_t pte = __pte(pmd_val(pmd)); + + pte = pte_mkdirty(pte); + + return __pmd(pte_val(pte)); } static inline pmd_t pmd_mkyoung(pmd_t pmd) { - pmd_val(pmd) |= PMD_HUGE_ACCESSED; - return pmd; + pte_t pte = __pte(pmd_val(pmd)); + + pte = pte_mkyoung(pte); + + return __pmd(pte_val(pte)); } static inline pmd_t pmd_mkwrite(pmd_t pmd) { - pmd_val(pmd) |= PMD_HUGE_WRITE; - return pmd; + pte_t pte = __pte(pmd_val(pmd)); + + pte = pte_mkwrite(pte); + + return __pmd(pte_val(pte)); } static inline pmd_t pmd_mknotpresent(pmd_t pmd) { - pmd_val(pmd) &= ~PMD_HUGE_PRESENT; + unsigned long mask; + + if (tlb_type == hypervisor) + mask = _PAGE_PRESENT_4V; + else + mask = _PAGE_PRESENT_4U; + + pmd_val(pmd) &= ~mask; + return pmd; } static inline pmd_t pmd_mksplitting(pmd_t pmd) { - pmd_val(pmd) |= PMD_HUGE_SPLITTING; - return pmd; + pte_t pte = __pte(pmd_val(pmd)); + + pte = pte_mkspecial(pte); + + return __pmd(pte_val(pte)); } -extern pgprot_t pmd_pgprot(pmd_t entry); +static inline pgprot_t pmd_pgprot(pmd_t entry) +{ + unsigned long val = pmd_val(entry); + + return __pgprot(val); +} #endif static inline int pmd_present(pmd_t pmd) { - return pmd_val(pmd) != 0U; + return pmd_val(pmd) != 0UL; } #define pmd_none(pmd) (!pmd_val(pmd)) @@ -728,33 +770,32 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { - unsigned long val = __pa((unsigned long) (ptep)) >> PMD_PADDR_SHIFT; + unsigned long val = __pa((unsigned long) (ptep)); pmd_val(*pmdp) = val; } #define pud_set(pudp, pmdp) \ - (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> PGD_PADDR_SHIFT)) + (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)))) static inline unsigned long __pmd_page(pmd_t pmd) { - unsigned long paddr = (unsigned long) pmd_val(pmd); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (pmd_val(pmd) & PMD_ISHUGE) - paddr &= PMD_HUGE_PADDR; -#endif - paddr <<= PMD_PADDR_SHIFT; - return ((unsigned long) __va(paddr)); + pte_t pte = __pte(pmd_val(pmd)); + unsigned long pfn; + + pfn = pte_pfn(pte); + + return ((unsigned long) __va(pfn << PAGE_SHIFT)); } #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) #define pud_page_vaddr(pud) \ - ((unsigned long) __va((((unsigned long)pud_val(pud))<<PGD_PADDR_SHIFT))) + ((unsigned long) __va(pud_val(pud))) #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) #define pmd_bad(pmd) (0) -#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U) +#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (0) #define pud_present(pud) (pud_val(pud) != 0U) -#define pud_clear(pudp) (pud_val(*(pudp)) = 0U) +#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) /* Same in both SUN4V and SUN4U. */ #define pte_none(pte) (!pte_val(pte)) @@ -789,7 +830,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, pmd_t *pmdp) { pmd_t pmd = *pmdp; - set_pmd_at(mm, addr, pmdp, __pmd(0U)); + set_pmd_at(mm, addr, pmdp, __pmd(0UL)); return pmd; } @@ -837,8 +878,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, }) #endif -extern pgd_t swapper_pg_dir[2048]; -extern pmd_t swapper_low_pmd_dir[2048]; +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD]; extern void paging_init(void); extern unsigned long find_ecache_flush_span(unsigned long size); diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h index 67c62578d17..11ebd659e7b 100644 --- a/arch/sparc/include/asm/prom.h +++ b/arch/sparc/include/asm/prom.h @@ -43,10 +43,6 @@ extern int of_getintprop_default(struct device_node *np, const char *name, int def); extern int of_find_in_proplist(const char *list, const char *match, int len); -#ifdef CONFIG_NUMA -extern int of_node_to_nid(struct device_node *dp); -#define of_node_to_nid of_node_to_nid -#endif extern void prom_build_devicetree(void); extern void of_populate_present_mask(void); @@ -63,13 +59,5 @@ extern char *of_console_options; extern void irq_trans_init(struct device_node *dp); extern char *build_path_component(struct device_node *dp); -/* SPARC has local implementations */ -extern int of_address_to_resource(struct device_node *dev, int index, - struct resource *r); -#define of_address_to_resource of_address_to_resource - -void __iomem *of_iomap(struct device_node *node, int index); -#define of_iomap of_iomap - #endif /* __KERNEL__ */ #endif /* _SPARC_PROM_H */ diff --git a/arch/sparc/include/asm/sparsemem.h b/arch/sparc/include/asm/sparsemem.h index b99d4e4b6d2..e5e1752d5d7 100644 --- a/arch/sparc/include/asm/sparsemem.h +++ b/arch/sparc/include/asm/sparsemem.h @@ -3,9 +3,11 @@ #ifdef __KERNEL__ +#include <asm/page.h> + #define SECTION_SIZE_BITS 30 -#define MAX_PHYSADDR_BITS 42 -#define MAX_PHYSMEM_BITS 42 +#define MAX_PHYSADDR_BITS MAX_PHYS_ADDRESS_BITS +#define MAX_PHYSMEM_BITS MAX_PHYS_ADDRESS_BITS #endif /* !(__KERNEL__) */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 2b4e17b79e9..a5f01ac6d0f 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -190,7 +190,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ /* flag bit 6 is available */ #define TIF_32BIT 7 /* 32-bit binary */ -/* flag bit 8 is available */ +#define TIF_NOHZ 8 /* in adaptive nohz mode */ #define TIF_SECCOMP 9 /* secure computing */ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ @@ -208,6 +208,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_UNALIGNED (1<<TIF_UNALIGNED) #define _TIF_32BIT (1<<TIF_32BIT) +#define _TIF_NOHZ (1<<TIF_NOHZ) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index e696432b950..2230f80d9fe 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -142,98 +142,39 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; or REG1, %lo(swapper_pg_dir), REG1; \ sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ - andn REG2, 0x3, REG2; \ - lduw [REG1 + REG2], REG1; \ + andn REG2, 0x7, REG2; \ + ldx [REG1 + REG2], REG1; \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ - sllx REG1, PGD_PADDR_SHIFT, REG1; \ - andn REG2, 0x3, REG2; \ - lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + andn REG2, 0x7, REG2; \ + ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - PMD_SHIFT, REG2; \ - srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ - sllx REG1, PMD_PADDR_SHIFT, REG1; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ add REG1, REG2, REG1; - /* These macros exists only to make the PMD translator below - * easier to read. It hides the ELF section switch for the - * sun4v code patching. - */ -#define OR_PTE_BIT_1INSN(REG, NAME) \ -661: or REG, _PAGE_##NAME##_4U, REG; \ - .section .sun4v_1insn_patch, "ax"; \ - .word 661b; \ - or REG, _PAGE_##NAME##_4V, REG; \ - .previous; - -#define OR_PTE_BIT_2INSN(REG, TMP, NAME) \ -661: sethi %hi(_PAGE_##NAME##_4U), TMP; \ - or REG, TMP, REG; \ - .section .sun4v_2insn_patch, "ax"; \ - .word 661b; \ - mov -1, TMP; \ - or REG, _PAGE_##NAME##_4V, REG; \ - .previous; - - /* Load into REG the PTE value for VALID, CACHE, and SZHUGE. */ -#define BUILD_PTE_VALID_SZHUGE_CACHE(REG) \ -661: sethi %uhi(_PAGE_VALID|_PAGE_SZHUGE_4U), REG; \ - .section .sun4v_1insn_patch, "ax"; \ - .word 661b; \ - sethi %uhi(_PAGE_VALID), REG; \ - .previous; \ - sllx REG, 32, REG; \ -661: or REG, _PAGE_CP_4U|_PAGE_CV_4U, REG; \ - .section .sun4v_1insn_patch, "ax"; \ - .word 661b; \ - or REG, _PAGE_CP_4V|_PAGE_CV_4V|_PAGE_SZHUGE_4V, REG; \ - .previous; - /* PMD has been loaded into REG1, interpret the value, seeing * if it is a HUGE PMD or a normal one. If it is not valid * then jump to FAIL_LABEL. If it is a HUGE PMD, and it * translates to a valid PTE, branch to PTE_LABEL. * - * We translate the PMD by hand, one bit at a time, - * constructing the huge PTE. - * - * So we construct the PTE in REG2 as follows: - * - * 1) Extract the PMD PFN from REG1 and place it into REG2. - * - * 2) Translate PMD protection bits in REG1 into REG2, one bit - * at a time using andcc tests on REG1 and OR's into REG2. - * - * Only two bits to be concerned with here, EXEC and WRITE. - * Now REG1 is freed up and we can use it as a temporary. - * - * 3) Construct the VALID, CACHE, and page size PTE bits in - * REG1, OR with REG2 to form final PTE. + * We have to propagate the 4MB bit of the virtual address + * because we are fabricating 8MB pages using 4MB hw pages. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ - brz,pn REG1, FAIL_LABEL; \ - andcc REG1, PMD_ISHUGE, %g0; \ - be,pt %xcc, 700f; \ - and REG1, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED, REG2; \ - cmp REG2, PMD_HUGE_PRESENT|PMD_HUGE_ACCESSED; \ - bne,pn %xcc, FAIL_LABEL; \ - andn REG1, PMD_HUGE_PROTBITS, REG2; \ - sllx REG2, PMD_PADDR_SHIFT, REG2; \ - /* REG2 now holds PFN << PAGE_SHIFT */ \ - andcc REG1, PMD_HUGE_WRITE, %g0; \ - bne,a,pt %xcc, 1f; \ - OR_PTE_BIT_1INSN(REG2, W); \ -1: andcc REG1, PMD_HUGE_EXEC, %g0; \ - be,pt %xcc, 1f; \ - nop; \ - OR_PTE_BIT_2INSN(REG2, REG1, EXEC); \ - /* REG1 can now be clobbered, build final PTE */ \ -1: BUILD_PTE_VALID_SZHUGE_CACHE(REG1); \ - ba,pt %xcc, PTE_LABEL; \ - or REG1, REG2, REG1; \ + brz,pn REG1, FAIL_LABEL; \ + sethi %uhi(_PAGE_PMD_HUGE), REG2; \ + sllx REG2, 32, REG2; \ + andcc REG1, REG2, %g0; \ + be,pt %xcc, 700f; \ + sethi %hi(4 * 1024 * 1024), REG2; \ + andn REG1, REG2, REG1; \ + and VADDR, REG2, REG2; \ + brlz,pt REG1, PTE_LABEL; \ + or REG1, REG2, REG1; \ 700: #else #define USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ @@ -253,18 +194,16 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; #define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \ sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ - andn REG2, 0x3, REG2; \ - lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ + andn REG2, 0x7, REG2; \ + ldxa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ - sllx REG1, PGD_PADDR_SHIFT, REG1; \ - andn REG2, 0x3, REG2; \ - lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + andn REG2, 0x7, REG2; \ + ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ USER_PGTABLE_CHECK_PMD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ sllx VADDR, 64 - PMD_SHIFT, REG2; \ - srlx REG2, 64 - (PAGE_SHIFT - 1), REG2; \ - sllx REG1, PMD_PADDR_SHIFT, REG1; \ + srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ add REG1, REG2, REG1; \ ldxa [REG1] ASI_PHYS_USE_EC, REG1; \ diff --git a/arch/sparc/include/uapi/asm/errno.h b/arch/sparc/include/uapi/asm/errno.h index c351aba997b..20423e17285 100644 --- a/arch/sparc/include/uapi/asm/errno.h +++ b/arch/sparc/include/uapi/asm/errno.h @@ -40,7 +40,7 @@ #define EPROCLIM 67 /* SUNOS: Too many processes */ #define EUSERS 68 /* Too many users */ #define EDQUOT 69 /* Quota exceeded */ -#define ESTALE 70 /* Stale NFS file handle */ +#define ESTALE 70 /* Stale file handle */ #define EREMOTE 71 /* Object is remote */ #define ENOSTR 72 /* Device not a stream */ #define ETIME 73 /* Timer expired */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 4e1d66c3ce7..0f21e9a5ca1 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -72,6 +72,8 @@ #define SO_BUSY_POLL 0x0030 +#define SO_MAX_PACING_RATE 0x0031 + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 9c179fbfb21..140966fbd30 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -88,7 +88,6 @@ extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); extern void bad_trap_tl1(struct pt_regs *regs, long lvl); -extern void do_fpe_common(struct pt_regs *regs); extern void do_fpieee(struct pt_regs *regs); extern void do_fpother(struct pt_regs *regs); extern void do_tof(struct pt_regs *regs); diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 53c0a82e603..60b19f50c80 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -159,11 +159,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); unsigned long flags; if (user_mode(regs)) { bad_trap(regs, trap_level); - return; + goto out; } flushw_all(); @@ -171,6 +172,8 @@ asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) local_irq_save(flags); kgdb_handle_exception(0x172, SIGTRAP, 0, regs); local_irq_restore(flags); +out: + exception_exit(prev_state); } int kgdb_arch_init(void) diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index e72212148d2..1b097350319 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/kdebug.h> #include <linux/slab.h> +#include <linux/context_tracking.h> #include <asm/signal.h> #include <asm/cacheflush.h> #include <asm/uaccess.h> @@ -349,7 +350,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accouting + * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); @@ -418,12 +419,14 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); + BUG_ON(trap_level != 0x170 && trap_level != 0x171); if (user_mode(regs)) { local_irq_enable(); bad_trap(regs, trap_level); - return; + goto out; } /* trap_level == 0x170 --> ta 0x70 @@ -433,6 +436,8 @@ asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, (trap_level == 0x170) ? "debug" : "debug_2", regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) bad_trap(regs, trap_level); +out: + exception_exit(prev_state); } /* Jprobes support. */ diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index fde5a419cf2..542e96ac4d3 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -153,12 +153,19 @@ kvmap_dtlb_tsb4m_miss: /* Clear the PAGE_OFFSET top virtual bits, shift * down to get PFN, and make sure PFN is in range. */ - sllx %g4, 21, %g5 +661: sllx %g4, 0, %g5 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous /* Check to see if we know about valid memory at the 4MB * chunk this physical address will reside within. */ - srlx %g5, 21 + 41, %g2 +661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous + brnz,pn %g2, kvmap_dtlb_longpath nop @@ -176,7 +183,11 @@ valid_addr_bitmap_patch: or %g7, %lo(sparc64_valid_addr_bitmap), %g7 .previous - srlx %g5, 21 + 22, %g2 +661: srlx %g5, ILOG2_4MB, %g2 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous + srlx %g2, 6, %g5 and %g2, 63, %g2 sllx %g5, 3, %g5 @@ -189,9 +200,18 @@ valid_addr_bitmap_patch: 2: sethi %hi(kpte_linear_bitmap), %g2 /* Get the 256MB physical address index. */ - sllx %g4, 21, %g5 +661: sllx %g4, 0, %g5 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous + or %g2, %lo(kpte_linear_bitmap), %g2 - srlx %g5, 21 + 28, %g5 + +661: srlx %g5, ILOG2_256MB, %g5 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous + and %g5, (32 - 1), %g7 /* Divide by 32 to get the offset into the bitmask. */ diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index 4435488ebe2..97655e0fd24 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c @@ -29,7 +29,7 @@ static void *module_map(unsigned long size) if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL, -1, + GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); } #else diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index bc4d3f5d2e5..cb021453de2 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -398,8 +398,8 @@ static void apb_fake_ranges(struct pci_dev *dev, apb_calc_first_last(map, &first, &last); res = bus->resource[1]; res->flags = IORESOURCE_MEM; - region.start = (first << 21); - region.end = (last << 21) + ((1 << 21) - 1); + region.start = (first << 29); + region.end = (last << 29) + ((1 << 29) - 1); pcibios_bus_to_resource(dev, res, ®ion); } diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index baebab21549..32a280ec38c 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -31,6 +31,7 @@ #include <linux/elfcore.h> #include <linux/sysrq.h> #include <linux/nmi.h> +#include <linux/context_tracking.h> #include <asm/uaccess.h> #include <asm/page.h> @@ -557,6 +558,7 @@ void fault_in_user_windows(void) barf: set_thread_wsaved(window + 1); + user_exit(); do_exit(SIGILL); } diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index d397d7fc5c2..6b39125eb92 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c @@ -373,6 +373,59 @@ static const char *get_mid_prop(void) return (tlb_type == spitfire ? "upa-portid" : "portid"); } +bool arch_find_n_match_cpu_physical_id(struct device_node *cpun, + int cpu, unsigned int *thread) +{ + const char *mid_prop = get_mid_prop(); + int this_cpu_id; + + /* On hypervisor based platforms we interrogate the 'reg' + * property. On everything else we look for a 'upa-portis', + * 'portid', or 'cpuid' property. + */ + + if (tlb_type == hypervisor) { + struct property *prop = of_find_property(cpun, "reg", NULL); + u32 *regs; + + if (!prop) { + pr_warn("CPU node missing reg property\n"); + return false; + } + regs = prop->value; + this_cpu_id = regs[0] & 0x0fffffff; + } else { + this_cpu_id = of_getintprop_default(cpun, mid_prop, -1); + + if (this_cpu_id < 0) { + mid_prop = "cpuid"; + this_cpu_id = of_getintprop_default(cpun, mid_prop, -1); + } + if (this_cpu_id < 0) { + pr_warn("CPU node missing cpu ID property\n"); + return false; + } + } + if (this_cpu_id == cpu) { + if (thread) { + int proc_id = cpu_data(cpu).proc_id; + + /* On sparc64, the cpu thread information is obtained + * either from OBP or the machine description. We've + * actually probed this information already long before + * this interface gets called so instead of interrogating + * both the OF node and the MDESC again, just use what + * we discovered already. + */ + if (proc_id < 0) + proc_id = 0; + *thread = proc_id; + } + return true; + } + return false; +} + static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg) { struct device_node *dp; diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 773c1f2983c..c13c9f25d83 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -27,6 +27,7 @@ #include <trace/syscall.h> #include <linux/compat.h> #include <linux/elf.h> +#include <linux/context_tracking.h> #include <asm/asi.h> #include <asm/pgtable.h> @@ -1066,6 +1067,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) /* do the secure computing check first */ secure_computing_strict(regs->u_regs[UREG_G1]); + if (test_thread_flag(TIF_NOHZ)) + user_exit(); + if (test_thread_flag(TIF_SYSCALL_TRACE)) ret = tracehook_report_syscall_entry(regs); @@ -1086,6 +1090,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) asmlinkage void syscall_trace_leave(struct pt_regs *regs) { + if (test_thread_flag(TIF_NOHZ)) + user_exit(); + audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) @@ -1093,4 +1100,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); + + if (test_thread_flag(TIF_NOHZ)) + user_enter(); } diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 76213db97a4..39f0c662f4c 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -18,10 +18,16 @@ #define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) +#ifdef CONFIG_CONTEXT_TRACKING +# define SCHEDULE_USER schedule_user +#else +# define SCHEDULE_USER schedule +#endif + .text .align 32 __handle_preemption: - call schedule + call SCHEDULE_USER wrpr %g0, RTRAP_PSTATE, %pstate ba,pt %xcc, __handle_preemption_continue wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index b524f91dd0e..ee789d2ef05 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -68,7 +68,7 @@ struct rt_signal_frame32 { /* __siginfo_rwin_t * */u32 rwin_save; } __attribute__((aligned(8))); -int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) +int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) { int err; diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 35923e8abd8..cd91d010e6d 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -23,6 +23,7 @@ #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/bitops.h> +#include <linux/context_tracking.h> #include <asm/uaccess.h> #include <asm/ptrace.h> @@ -43,6 +44,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; + enum ctx_state prev_state = exception_enter(); mc_gregset_t __user *grp; unsigned long pc, npc, tstate; unsigned long fp, i7; @@ -129,16 +131,19 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) } if (err) goto do_sigsegv; - +out: + exception_exit(prev_state); return; do_sigsegv: force_sig(SIGSEGV, current); + goto out; } asmlinkage void sparc64_get_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; + enum ctx_state prev_state = exception_enter(); mc_gregset_t __user *grp; mcontext_t __user *mcp; unsigned long fp, i7; @@ -220,10 +225,12 @@ asmlinkage void sparc64_get_context(struct pt_regs *regs) } if (err) goto do_sigsegv; - +out: + exception_exit(prev_state); return; do_sigsegv: force_sig(SIGSEGV, current); + goto out; } struct rt_signal_frame { @@ -528,11 +535,13 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) { + user_exit(); if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } + user_enter(); } diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index e142545244f..b66a5338231 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1399,8 +1399,13 @@ void __init smp_cpus_done(unsigned int max_cpus) void smp_send_reschedule(int cpu) { - xcall_deliver((u64) &xcall_receive_signal, 0, 0, - cpumask_of(cpu)); + if (cpu == smp_processor_id()) { + WARN_ON_ONCE(preemptible()); + set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); + } else { + xcall_deliver((u64) &xcall_receive_signal, + 0, 0, cpumask_of(cpu)); + } } void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S index bde867fd71e..e0c09bf8561 100644 --- a/arch/sparc/kernel/sun4v_tlb_miss.S +++ b/arch/sparc/kernel/sun4v_tlb_miss.S @@ -182,7 +182,7 @@ sun4v_tsb_miss_common: cmp %g5, -1 be,pt %xcc, 80f nop - COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7) + COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7) /* That clobbered %g2, reload it. */ ldxa [%g0] ASI_SCRATCHPAD, %g2 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 51561b8b15b..beb0b5a5f21 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -24,6 +24,7 @@ #include <linux/personality.h> #include <linux/random.h> #include <linux/export.h> +#include <linux/context_tracking.h> #include <asm/uaccess.h> #include <asm/utrap.h> @@ -39,9 +40,6 @@ asmlinkage unsigned long sys_getpagesize(void) return PAGE_SIZE; } -#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) -#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) - /* Does addr --> addr+len fall within 4GB of the VA-space hole or * overflow past the end of the 64-bit address space? */ @@ -499,6 +497,7 @@ asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs) asmlinkage void sparc_breakpoint(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (test_thread_flag(TIF_32BIT)) { @@ -517,6 +516,7 @@ asmlinkage void sparc_breakpoint(struct pt_regs *regs) #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc); #endif + exception_exit(prev_state); } extern void check_pending(int signum); diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index d950197a17e..87729fff13b 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -52,7 +52,7 @@ sys32_rt_sigreturn: #endif .align 32 1: ldx [%g6 + TI_FLAGS], %l5 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 be,pt %icc, rtrap nop call syscall_trace_leave @@ -184,7 +184,7 @@ linux_sparc_syscall32: srl %i3, 0, %o3 ! IEU0 srl %i2, 0, %o2 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 bne,pn %icc, linux_syscall_trace32 ! CTI mov %i0, %l5 ! IEU1 5: call %l7 ! CTI Group brk forced @@ -207,7 +207,7 @@ linux_sparc_syscall: mov %i3, %o3 ! IEU1 mov %i4, %o4 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 bne,pn %icc, linux_syscall_trace ! CTI Group mov %i0, %l5 ! IEU0 2: call %l7 ! CTI Group brk forced @@ -223,7 +223,7 @@ ret_sys_call: cmp %o0, -ERESTART_RESTARTBLOCK bgeu,pn %xcc, 1f - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc 2: diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index b3f833ab90e..4ced92f0535 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -20,6 +20,7 @@ #include <linux/ftrace.h> #include <linux/reboot.h> #include <linux/gfp.h> +#include <linux/context_tracking.h> #include <asm/smp.h> #include <asm/delay.h> @@ -186,11 +187,12 @@ EXPORT_SYMBOL_GPL(unregister_dimm_printer); void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "instruction access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) { printk("spitfire_insn_access_exception: SFSR[%016lx] " @@ -207,6 +209,8 @@ void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, un info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); +out: + exception_exit(prev_state); } void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) @@ -260,11 +264,12 @@ void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, u void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x30, SIGTRAP) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ @@ -280,7 +285,7 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un #endif regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - return; + goto out; } /* Shit... */ printk("spitfire_data_access_exception: SFSR[%016lx] " @@ -294,6 +299,8 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); +out: + exception_exit(prev_state); } void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) @@ -1994,6 +2001,7 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, */ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) { + enum ctx_state prev_state = exception_enter(); struct sun4v_error_entry *ent, local_copy; struct trap_per_cpu *tb; unsigned long paddr; @@ -2022,12 +2030,14 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) pr_info("Shutdown request, %u seconds...\n", local_copy.err_secs); orderly_poweroff(true); - return; + goto out; } sun4v_log_error(regs, &local_copy, cpu, KERN_ERR "RESUMABLE ERROR", &sun4v_resum_oflow_cnt); +out: + exception_exit(prev_state); } /* If we try to printk() we'll probably make matters worse, by trying @@ -2152,7 +2162,7 @@ void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) err, op); } -void do_fpe_common(struct pt_regs *regs) +static void do_fpe_common(struct pt_regs *regs) { if (regs->tstate & TSTATE_PRIV) { regs->tpc = regs->tnpc; @@ -2188,23 +2198,28 @@ void do_fpe_common(struct pt_regs *regs) void do_fpieee(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); + if (notify_die(DIE_TRAP, "fpu exception ieee", regs, 0, 0x24, SIGFPE) == NOTIFY_STOP) - return; + goto out; do_fpe_common(regs); +out: + exception_exit(prev_state); } extern int do_mathemu(struct pt_regs *, struct fpustate *, bool); void do_fpother(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); struct fpustate *f = FPUSTATE; int ret = 0; if (notify_die(DIE_TRAP, "fpu exception other", regs, 0, 0x25, SIGFPE) == NOTIFY_STOP) - return; + goto out; switch ((current_thread_info()->xfsr[0] & 0x1c000)) { case (2 << 14): /* unfinished_FPop */ @@ -2213,17 +2228,20 @@ void do_fpother(struct pt_regs *regs) break; } if (ret) - return; + goto out; do_fpe_common(regs); +out: + exception_exit(prev_state); } void do_tof(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs, 0, 0x26, SIGEMT) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) die_if_kernel("Penguin overflow trap from kernel mode", regs); @@ -2237,15 +2255,18 @@ void do_tof(struct pt_regs *regs) info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGEMT, &info, current); +out: + exception_exit(prev_state); } void do_div0(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "integer division by zero", regs, 0, 0x28, SIGFPE) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) die_if_kernel("TL0: Kernel divide by zero.", regs); @@ -2259,6 +2280,8 @@ void do_div0(struct pt_regs *regs) info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGFPE, &info, current); +out: + exception_exit(prev_state); } static void instruction_dump(unsigned int *pc) @@ -2415,6 +2438,7 @@ extern int handle_ldf_stq(u32 insn, struct pt_regs *regs); void do_illegal_instruction(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; @@ -2422,7 +2446,7 @@ void do_illegal_instruction(struct pt_regs *regs) if (notify_die(DIE_TRAP, "illegal instruction", regs, 0, 0x10, SIGILL) == NOTIFY_STOP) - return; + goto out; if (tstate & TSTATE_PRIV) die_if_kernel("Kernel illegal instruction", regs); @@ -2431,14 +2455,14 @@ void do_illegal_instruction(struct pt_regs *regs) if (get_user(insn, (u32 __user *) pc) != -EFAULT) { if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ { if (handle_popc(insn, regs)) - return; + goto out; } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { if (handle_ldf_stq(insn, regs)) - return; + goto out; } else if (tlb_type == hypervisor) { if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) { if (!vis_emul(regs, insn)) - return; + goto out; } else { struct fpustate *f = FPUSTATE; @@ -2448,7 +2472,7 @@ void do_illegal_instruction(struct pt_regs *regs) * Trap in the %fsr to unimplemented_FPop. */ if (do_mathemu(regs, f, true)) - return; + goto out; } } } @@ -2458,21 +2482,24 @@ void do_illegal_instruction(struct pt_regs *regs) info.si_addr = (void __user *)pc; info.si_trapno = 0; force_sig_info(SIGILL, &info, current); +out: + exception_exit(prev_state); } extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "memory address unaligned", regs, 0, 0x34, SIGSEGV) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) { kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); - return; + goto out; } info.si_signo = SIGBUS; info.si_errno = 0; @@ -2480,6 +2507,8 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); +out: + exception_exit(prev_state); } void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) @@ -2504,11 +2533,12 @@ void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_c void do_privop(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "privileged operation", regs, 0, 0x11, SIGILL) == NOTIFY_STOP) - return; + goto out; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; @@ -2520,6 +2550,8 @@ void do_privop(struct pt_regs *regs) info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGILL, &info, current); +out: + exception_exit(prev_state); } void do_privact(struct pt_regs *regs) @@ -2530,99 +2562,116 @@ void do_privact(struct pt_regs *regs) /* Trap level 1 stuff or other traps we should never see... */ void do_cee(struct pt_regs *regs) { + exception_enter(); die_if_kernel("TL0: Cache Error Exception", regs); } void do_cee_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Cache Error Exception", regs); } void do_dae_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Data Access Exception", regs); } void do_iae_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Instruction Access Exception", regs); } void do_div0_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: DIV0 Exception", regs); } void do_fpdis_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU Disabled", regs); } void do_fpieee_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU IEEE Exception", regs); } void do_fpother_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU Other Exception", regs); } void do_ill_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Illegal Instruction Exception", regs); } void do_irq_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: IRQ Exception", regs); } void do_lddfmna_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: LDDF Exception", regs); } void do_stdfmna_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: STDF Exception", regs); } void do_paw(struct pt_regs *regs) { + exception_enter(); die_if_kernel("TL0: Phys Watchpoint Exception", regs); } void do_paw_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Phys Watchpoint Exception", regs); } void do_vaw(struct pt_regs *regs) { + exception_enter(); die_if_kernel("TL0: Virt Watchpoint Exception", regs); } void do_vaw_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Virt Watchpoint Exception", regs); } void do_tof_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Tag Overflow Exception", regs); } diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index a313e4a9399..14158d40ba7 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -75,7 +75,7 @@ tsb_miss_page_table_walk: mov 512, %g7 andn %g5, 0x7, %g5 sllx %g7, %g6, %g7 - srlx %g4, HPAGE_SHIFT, %g6 + srlx %g4, REAL_HPAGE_SHIFT, %g6 sub %g7, 1, %g7 and %g6, %g7, %g6 sllx %g6, 4, %g6 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 8201c25e766..3c1a7cb3157 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -21,9 +21,12 @@ #include <linux/bitops.h> #include <linux/perf_event.h> #include <linux/ratelimit.h> +#include <linux/context_tracking.h> #include <asm/fpumacro.h> #include <asm/cacheflush.h> +#include "entry.h" + enum direction { load, /* ld, ldd, ldh, ldsh */ store, /* st, std, sth, stsh */ @@ -418,9 +421,6 @@ int handle_popc(u32 insn, struct pt_regs *regs) extern void do_fpother(struct pt_regs *regs); extern void do_privact(struct pt_regs *regs); -extern void spitfire_data_access_exception(struct pt_regs *regs, - unsigned long sfsr, - unsigned long sfar); extern void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx); @@ -578,6 +578,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { + enum ctx_state prev_state = exception_enter(); unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; @@ -632,13 +633,16 @@ daex: sun4v_data_access_exception(regs, sfar, sfsr); else spitfire_data_access_exception(regs, sfsr, sfar); - return; + goto out; } advance(regs); +out: + exception_exit(prev_state); } void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { + enum ctx_state prev_state = exception_enter(); unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; @@ -680,7 +684,9 @@ daex: sun4v_data_access_exception(regs, sfar, sfsr); else spitfire_data_access_exception(regs, sfsr, sfar); - return; + goto out; } advance(regs); +out: + exception_exit(prev_state); } diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 0bacceb1915..932ff90fd76 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -122,6 +122,11 @@ SECTIONS *(.swapper_4m_tsb_phys_patch) __swapper_4m_tsb_phys_patch_end = .; } + .page_offset_shift_patch : { + __page_offset_shift_patch = .; + *(.page_offset_shift_patch) + __page_offset_shift_patch_end = .; + } .popc_3insn_patch : { __popc_3insn_patch = .; *(.popc_3insn_patch) diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S index 77e531f6c2a..46272dfc26e 100644 --- a/arch/sparc/lib/clear_page.S +++ b/arch/sparc/lib/clear_page.S @@ -37,10 +37,10 @@ _clear_page: /* %o0=dest */ .globl clear_user_page clear_user_page: /* %o0=dest, %o1=vaddr */ lduw [%g6 + TI_PRE_COUNT], %o2 - sethi %uhi(PAGE_OFFSET), %g2 + sethi %hi(PAGE_OFFSET), %g2 sethi %hi(PAGE_SIZE), %o4 - sllx %g2, 32, %g2 + ldx [%g2 + %lo(PAGE_OFFSET)], %g2 sethi %hi(PAGE_KERNEL_LOCKED), %g3 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S index 4d2df328e51..dd16c61f326 100644 --- a/arch/sparc/lib/copy_page.S +++ b/arch/sparc/lib/copy_page.S @@ -46,10 +46,10 @@ .type copy_user_page,#function copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ lduw [%g6 + TI_PRE_COUNT], %o4 - sethi %uhi(PAGE_OFFSET), %g2 + sethi %hi(PAGE_OFFSET), %g2 sethi %hi(PAGE_SIZE), %o3 - sllx %g2, 32, %g2 + ldx [%g2 + %lo(PAGE_OFFSET)], %g2 sethi %hi(PAGE_KERNEL_LOCKED), %g3 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 2ebec263d68..69bb818fdd7 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -21,6 +21,7 @@ #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/percpu.h> +#include <linux/context_tracking.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -272,6 +273,7 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned int insn = 0; @@ -282,7 +284,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) fault_code = get_thread_fault_code(); if (notify_page_fault(regs)) - return; + goto exit_exception; si_code = SEGV_MAPERR; address = current_thread_info()->fault_address; @@ -313,7 +315,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) /* Valid, no problems... */ } else { bad_kernel_pc(regs, address); - return; + goto exit_exception; } } else flags |= FAULT_FLAG_USER; @@ -430,7 +432,7 @@ good_area: fault = handle_mm_fault(mm, vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) - return; + goto exit_exception; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) @@ -482,6 +484,8 @@ good_area: } #endif +exit_exception: + exception_exit(prev_state); return; /* @@ -494,7 +498,7 @@ bad_area: handle_kernel_fault: do_kernel_fault(regs, si_code, fault_code, insn, address); - return; + goto exit_exception; /* * We ran out of memory, or some other thing happened to us that made @@ -505,7 +509,7 @@ out_of_memory: up_read(&mm->mmap_sem); if (!(regs->tstate & TSTATE_PRIV)) { pagefault_out_of_memory(); - return; + goto exit_exception; } goto handle_kernel_fault; diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 01ee23dd724..c4d3da68b80 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -71,13 +71,12 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, int *nr) { struct page *head, *page, *tail; - u32 mask; int refs; - mask = PMD_HUGE_PRESENT; - if (write) - mask |= PMD_HUGE_WRITE; - if ((pmd_val(pmd) & mask) != mask) + if (!pmd_large(pmd)) + return 0; + + if (write && !pmd_write(pmd)) return 0; refs = 0; diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 96399646570..30963178d7e 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -21,8 +21,6 @@ /* Slightly simplified from the non-hugepage variant because by * definition we don't have to worry about any page coloring stuff */ -#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) -#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, unsigned long addr, diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index ed82edad1a3..6b643790e4f 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -354,7 +354,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.huge_pte_count && is_hugetlb_pte(pte)) - __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, address, pte_val(pte)); else #endif @@ -1557,6 +1557,96 @@ unsigned long __init find_ecache_flush_span(unsigned long size) return ~0UL; } +unsigned long PAGE_OFFSET; +EXPORT_SYMBOL(PAGE_OFFSET); + +static void __init page_offset_shift_patch_one(unsigned int *insn, unsigned long phys_bits) +{ + unsigned long final_shift; + unsigned int val = *insn; + unsigned int cnt; + + /* We are patching in ilog2(max_supported_phys_address), and + * we are doing so in a manner similar to a relocation addend. + * That is, we are adding the shift value to whatever value + * is in the shift instruction count field already. + */ + cnt = (val & 0x3f); + val &= ~0x3f; + + /* If we are trying to shift >= 64 bits, clear the destination + * register. This can happen when phys_bits ends up being equal + * to MAX_PHYS_ADDRESS_BITS. + */ + final_shift = (cnt + (64 - phys_bits)); + if (final_shift >= 64) { + unsigned int rd = (val >> 25) & 0x1f; + + val = 0x80100000 | (rd << 25); + } else { + val |= final_shift; + } + *insn = val; + + __asm__ __volatile__("flush %0" + : /* no outputs */ + : "r" (insn)); +} + +static void __init page_offset_shift_patch(unsigned long phys_bits) +{ + extern unsigned int __page_offset_shift_patch; + extern unsigned int __page_offset_shift_patch_end; + unsigned int *p; + + p = &__page_offset_shift_patch; + while (p < &__page_offset_shift_patch_end) { + unsigned int *insn = (unsigned int *)(unsigned long)*p; + + page_offset_shift_patch_one(insn, phys_bits); + + p++; + } +} + +static void __init setup_page_offset(void) +{ + unsigned long max_phys_bits = 40; + + if (tlb_type == cheetah || tlb_type == cheetah_plus) { + max_phys_bits = 42; + } else if (tlb_type == hypervisor) { + switch (sun4v_chip_type) { + case SUN4V_CHIP_NIAGARA1: + case SUN4V_CHIP_NIAGARA2: + max_phys_bits = 39; + break; + case SUN4V_CHIP_NIAGARA3: + max_phys_bits = 43; + break; + case SUN4V_CHIP_NIAGARA4: + case SUN4V_CHIP_NIAGARA5: + case SUN4V_CHIP_SPARC64X: + default: + max_phys_bits = 47; + break; + } + } + + if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) { + prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n", + max_phys_bits); + prom_halt(); + } + + PAGE_OFFSET = PAGE_OFFSET_BY_BITS(max_phys_bits); + + pr_info("PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", + PAGE_OFFSET, max_phys_bits); + + page_offset_shift_patch(max_phys_bits); +} + static void __init tsb_phys_patch(void) { struct tsb_ldquad_phys_patch_entry *pquad; @@ -1722,7 +1812,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) #ifndef CONFIG_DEBUG_PAGEALLOC if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ - 0xfffff80000000000UL; + PAGE_OFFSET; kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); } else { @@ -1731,7 +1821,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ - 0xfffff80000000000UL; + PAGE_OFFSET; kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); } else { @@ -1740,7 +1830,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ - 0xfffff80000000000UL; + PAGE_OFFSET; kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); } else { @@ -1752,7 +1842,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) /* paging_init() sets up the page tables */ static unsigned long last_valid_pfn; -pgd_t swapper_pg_dir[2048]; +pgd_t swapper_pg_dir[PTRS_PER_PGD]; static void sun4u_pgprot_init(void); static void sun4v_pgprot_init(void); @@ -1763,6 +1853,8 @@ void __init paging_init(void) unsigned long real_end, i; int node; + setup_page_offset(); + /* These build time checkes make sure that the dcache_dirty_cpu() * page->flags usage will work. * @@ -2261,10 +2353,10 @@ static void __init sun4u_pgprot_init(void) __ACCESS_BITS_4U | _PAGE_E_4U); #ifdef CONFIG_DEBUG_PAGEALLOC - kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL; + kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ - 0xfffff80000000000UL; + PAGE_OFFSET; #endif kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U); @@ -2308,10 +2400,10 @@ static void __init sun4v_pgprot_init(void) _PAGE_CACHE = _PAGE_CACHE_4V; #ifdef CONFIG_DEBUG_PAGEALLOC - kern_linear_pte_xor[0] = _PAGE_VALID ^ 0xfffff80000000000UL; + kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ - 0xfffff80000000000UL; + PAGE_OFFSET; #endif kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | _PAGE_W_4V); @@ -2455,53 +2547,13 @@ void __flush_tlb_all(void) : : "r" (pstate)); } -static pte_t *get_from_cache(struct mm_struct *mm) -{ - struct page *page; - pte_t *ret; - - spin_lock(&mm->page_table_lock); - page = mm->context.pgtable_page; - ret = NULL; - if (page) { - void *p = page_address(page); - - mm->context.pgtable_page = NULL; - - ret = (pte_t *) (p + (PAGE_SIZE / 2)); - } - spin_unlock(&mm->page_table_lock); - - return ret; -} - -static struct page *__alloc_for_cache(struct mm_struct *mm) -{ - struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | - __GFP_REPEAT | __GFP_ZERO); - - if (page) { - spin_lock(&mm->page_table_lock); - if (!mm->context.pgtable_page) { - atomic_set(&page->_count, 2); - mm->context.pgtable_page = page; - } - spin_unlock(&mm->page_table_lock); - } - return page; -} - pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - struct page *page; - pte_t *pte; - - pte = get_from_cache(mm); - if (pte) - return pte; + struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | + __GFP_REPEAT | __GFP_ZERO); + pte_t *pte = NULL; - page = __alloc_for_cache(mm); if (page) pte = (pte_t *) page_address(page); @@ -2511,36 +2563,30 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) { - struct page *page; - pte_t *pte; - - pte = get_from_cache(mm); - if (pte) - return pte; + struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | + __GFP_REPEAT | __GFP_ZERO); + pte_t *pte = NULL; - page = __alloc_for_cache(mm); - if (page) { - pgtable_page_ctor(page); - pte = (pte_t *) page_address(page); + if (!page) + return NULL; + if (!pgtable_page_ctor(page)) { + free_hot_cold_page(page, 0); + return NULL; } - - return pte; + return (pte_t *) page_address(page); } void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - struct page *page = virt_to_page(pte); - if (put_page_testzero(page)) - free_hot_cold_page(page, 0); + free_page((unsigned long)pte); } static void __pte_free(pgtable_t pte) { struct page *page = virt_to_page(pte); - if (put_page_testzero(page)) { - pgtable_page_dtor(page); - free_hot_cold_page(page, 0); - } + + pgtable_page_dtor(page); + __free_page(page); } void pte_free(struct mm_struct *mm, pgtable_t pte) @@ -2557,124 +2603,27 @@ void pgtable_free(void *table, bool is_page) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot, bool for_modify) -{ - if (pgprot_val(pgprot) & _PAGE_VALID) - pmd_val(pmd) |= PMD_HUGE_PRESENT; - if (tlb_type == hypervisor) { - if (pgprot_val(pgprot) & _PAGE_WRITE_4V) - pmd_val(pmd) |= PMD_HUGE_WRITE; - if (pgprot_val(pgprot) & _PAGE_EXEC_4V) - pmd_val(pmd) |= PMD_HUGE_EXEC; - - if (!for_modify) { - if (pgprot_val(pgprot) & _PAGE_ACCESSED_4V) - pmd_val(pmd) |= PMD_HUGE_ACCESSED; - if (pgprot_val(pgprot) & _PAGE_MODIFIED_4V) - pmd_val(pmd) |= PMD_HUGE_DIRTY; - } - } else { - if (pgprot_val(pgprot) & _PAGE_WRITE_4U) - pmd_val(pmd) |= PMD_HUGE_WRITE; - if (pgprot_val(pgprot) & _PAGE_EXEC_4U) - pmd_val(pmd) |= PMD_HUGE_EXEC; - - if (!for_modify) { - if (pgprot_val(pgprot) & _PAGE_ACCESSED_4U) - pmd_val(pmd) |= PMD_HUGE_ACCESSED; - if (pgprot_val(pgprot) & _PAGE_MODIFIED_4U) - pmd_val(pmd) |= PMD_HUGE_DIRTY; - } - } - - return pmd; -} - -pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) -{ - pmd_t pmd; - - pmd_val(pmd) = (page_nr << ((PAGE_SHIFT - PMD_PADDR_SHIFT))); - pmd_val(pmd) |= PMD_ISHUGE; - pmd = pmd_set_protbits(pmd, pgprot, false); - return pmd; -} - -pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) -{ - pmd_val(pmd) &= ~(PMD_HUGE_PRESENT | - PMD_HUGE_WRITE | - PMD_HUGE_EXEC); - pmd = pmd_set_protbits(pmd, newprot, true); - return pmd; -} - -pgprot_t pmd_pgprot(pmd_t entry) -{ - unsigned long pte = 0; - - if (pmd_val(entry) & PMD_HUGE_PRESENT) - pte |= _PAGE_VALID; - - if (tlb_type == hypervisor) { - if (pmd_val(entry) & PMD_HUGE_PRESENT) - pte |= _PAGE_PRESENT_4V; - if (pmd_val(entry) & PMD_HUGE_EXEC) - pte |= _PAGE_EXEC_4V; - if (pmd_val(entry) & PMD_HUGE_WRITE) - pte |= _PAGE_W_4V; - if (pmd_val(entry) & PMD_HUGE_ACCESSED) - pte |= _PAGE_ACCESSED_4V; - if (pmd_val(entry) & PMD_HUGE_DIRTY) - pte |= _PAGE_MODIFIED_4V; - pte |= _PAGE_CP_4V|_PAGE_CV_4V; - } else { - if (pmd_val(entry) & PMD_HUGE_PRESENT) - pte |= _PAGE_PRESENT_4U; - if (pmd_val(entry) & PMD_HUGE_EXEC) - pte |= _PAGE_EXEC_4U; - if (pmd_val(entry) & PMD_HUGE_WRITE) - pte |= _PAGE_W_4U; - if (pmd_val(entry) & PMD_HUGE_ACCESSED) - pte |= _PAGE_ACCESSED_4U; - if (pmd_val(entry) & PMD_HUGE_DIRTY) - pte |= _PAGE_MODIFIED_4U; - pte |= _PAGE_CP_4U|_PAGE_CV_4U; - } - - return __pgprot(pte); -} - void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) { unsigned long pte, flags; struct mm_struct *mm; pmd_t entry = *pmd; - pgprot_t prot; if (!pmd_large(entry) || !pmd_young(entry)) return; - pte = (pmd_val(entry) & ~PMD_HUGE_PROTBITS); - pte <<= PMD_PADDR_SHIFT; - pte |= _PAGE_VALID; - - prot = pmd_pgprot(entry); - - if (tlb_type == hypervisor) - pgprot_val(prot) |= _PAGE_SZHUGE_4V; - else - pgprot_val(prot) |= _PAGE_SZHUGE_4U; + pte = pmd_val(entry); - pte |= pgprot_val(prot); + /* We are fabricating 8MB pages using 4MB real hw pages. */ + pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); mm = vma->vm_mm; spin_lock_irqsave(&mm->context.lock, flags); if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) - __update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT, + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, addr, pte); spin_unlock_irqrestore(&mm->context.lock, flags); diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h index 0661aa606de..5d3782deb40 100644 --- a/arch/sparc/mm/init_64.h +++ b/arch/sparc/mm/init_64.h @@ -1,11 +1,13 @@ #ifndef _SPARC64_MM_INIT_H #define _SPARC64_MM_INIT_H +#include <asm/page.h> + /* Most of the symbols in this file are defined in init.c and * marked non-static so that assembler code can get at them. */ -#define MAX_PHYS_ADDRESS (1UL << 41UL) +#define MAX_PHYS_ADDRESS (1UL << MAX_PHYS_ADDRESS_BITS) #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) #define KPTE_BITMAP_BYTES \ ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 4) diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 5d721df48a7..869023abe5a 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -345,7 +345,10 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) return NULL; page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } return page; } diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 7a91f288c70..ad3bf4b4324 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -161,8 +161,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, if (mm == &init_mm) return; - if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) { - if (pmd_val(pmd) & PMD_ISHUGE) + if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { + if (pmd_val(pmd) & _PAGE_PMD_HUGE) mm->context.huge_pte_count++; else mm->context.huge_pte_count--; @@ -178,13 +178,16 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, } if (!pmd_none(orig)) { - bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0); + pte_t orig_pte = __pte(pmd_val(orig)); + bool exec = pte_exec(orig_pte); addr &= HPAGE_MASK; - if (pmd_val(orig) & PMD_ISHUGE) + if (pmd_trans_huge(orig)) { tlb_batch_add_one(mm, addr, exec); - else + tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); + } else { tlb_batch_pmd_scan(mm, addr, orig, exec); + } } } @@ -196,11 +199,11 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, assert_spin_locked(&mm->page_table_lock); /* FIFO */ - if (!mm->pmd_huge_pte) + if (!pmd_huge_pte(mm, pmdp)) INIT_LIST_HEAD(lh); else - list_add(lh, (struct list_head *) mm->pmd_huge_pte); - mm->pmd_huge_pte = pgtable; + list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); + pmd_huge_pte(mm, pmdp) = pgtable; } pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) @@ -211,12 +214,12 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) assert_spin_locked(&mm->page_table_lock); /* FIFO */ - pgtable = mm->pmd_huge_pte; + pgtable = pmd_huge_pte(mm, pmdp); lh = (struct list_head *) pgtable; if (list_empty(lh)) - mm->pmd_huge_pte = NULL; + pmd_huge_pte(mm, pmdp) = NULL; else { - mm->pmd_huge_pte = (pgtable_t) lh->next; + pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; list_del(lh); } pte_val(pgtable[0]) = 0; diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 2cc3bce5ee9..3b3a360b429 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -87,7 +87,7 @@ void flush_tsb_user(struct tlb_batch *tb) nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); + __flush_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries); } #endif spin_unlock_irqrestore(&mm->context.lock, flags); @@ -111,7 +111,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); + __flush_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries); } #endif spin_unlock_irqrestore(&mm->context.lock, flags); @@ -472,8 +472,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.huge_pte_count = 0; #endif - mm->context.pgtable_page = NULL; - /* copy_mm() copies over the parent's mm_struct before calling * us, so we need to zero out the TSB pointer or else tsb_grow() * will be confused and think there is an older TSB to free up. @@ -512,17 +510,10 @@ static void tsb_destroy_one(struct tsb_config *tp) void destroy_context(struct mm_struct *mm) { unsigned long flags, i; - struct page *page; for (i = 0; i < MM_NUM_TSBS; i++) tsb_destroy_one(&mm->context.tsb_block[i]); - page = mm->context.pgtable_page; - if (page && put_page_testzero(page)) { - pgtable_page_dtor(page); - free_hot_cold_page(page, 0); - } - spin_lock_irqsave(&ctx_alloc_lock, flags); if (CTX_VALID(mm->context)) { diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index 432aa0cb1b3..b4f4733abc6 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -153,10 +153,10 @@ __spitfire_flush_tlb_mm_slow: .globl __flush_icache_page __flush_icache_page: /* %o0 = phys_page */ srlx %o0, PAGE_SHIFT, %o0 - sethi %uhi(PAGE_OFFSET), %g1 + sethi %hi(PAGE_OFFSET), %g1 sllx %o0, PAGE_SHIFT, %o0 sethi %hi(PAGE_SIZE), %g2 - sllx %g1, 32, %g1 + ldx [%g1 + %lo(PAGE_OFFSET)], %g1 add %o0, %g1, %o0 1: subcc %g2, 32, %g2 bne,pt %icc, 1b @@ -178,8 +178,8 @@ __flush_icache_page: /* %o0 = phys_page */ .align 64 .globl __flush_dcache_page __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ - sethi %uhi(PAGE_OFFSET), %g1 - sllx %g1, 32, %g1 + sethi %hi(PAGE_OFFSET), %g1 + ldx [%g1 + %lo(PAGE_OFFSET)], %g1 sub %o0, %g1, %o0 ! physical address srlx %o0, 11, %o0 ! make D-cache TAG sethi %hi(1 << 14), %o2 ! D-cache size @@ -287,8 +287,8 @@ __cheetah_flush_tlb_pending: /* 27 insns */ #ifdef DCACHE_ALIASING_POSSIBLE __cheetah_flush_dcache_page: /* 11 insns */ - sethi %uhi(PAGE_OFFSET), %g1 - sllx %g1, 32, %g1 + sethi %hi(PAGE_OFFSET), %g1 + ldx [%g1 + %lo(PAGE_OFFSET)], %g1 sub %o0, %g1, %o0 sethi %hi(PAGE_SIZE), %o4 1: subcc %o4, (1 << 5), %o4 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index d45a2c48f18..b3692ce78f9 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -8,7 +8,6 @@ config TILE select HAVE_KVM if !TILEGX select GENERIC_FIND_FIRST_BIT select SYSCTL_EXCEPTION_TRACE - select USE_GENERIC_SMP_HELPERS select CC_OPTIMIZE_FOR_SIZE select HAVE_DEBUG_KMEMLEAK select GENERIC_IRQ_PROBE diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index 85e00b2f39b..19c04b5ce40 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -49,7 +49,7 @@ struct compat_rt_sigframe { struct compat_ucontext uc; }; -int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from) +int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from) { int err; diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index b7180e6e900..c45593db771 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -251,15 +251,12 @@ static void fixup_read_and_payload_sizes(void) /* Scan for the smallest maximum payload size. */ for_each_pci_dev(dev) { u32 devcap; - int max_payload; if (!pci_is_pcie(dev)) continue; - pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &devcap); - max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD; - if (max_payload < smallest_max_payload) - smallest_max_payload = max_payload; + if (dev->pcie_mpss < smallest_max_payload) + smallest_max_payload = dev->pcie_mpss; } /* Now, set the max_payload_size for all devices to that value. */ diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 4fd9ec0b58e..5e86eac4bfa 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -241,6 +241,11 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, if (p == NULL) return NULL; + if (!pgtable_page_ctor(p)) { + __free_pages(p, L2_USER_PGTABLE_ORDER); + return NULL; + } + /* * Make every page have a page_count() of one, not just the first. * We don't use __GFP_COMP since it doesn't look like it works @@ -251,7 +256,6 @@ struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, inc_zone_page_state(p+i, NR_PAGETABLE); } - pgtable_page_ctor(p); return p; } diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 7ddb64baf32..8636e905426 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -279,8 +279,12 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte; pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); - if (pte) - pgtable_page_ctor(pte); + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 82cdd8906f3..a7ba27b2752 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -1,5 +1,6 @@ config UNICORE32 def_bool y + select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_MEMBLOCK select HAVE_GENERIC_DMA_COHERENT select HAVE_DMA_ATTRS diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h index 0213e373a89..2e02d1356fd 100644 --- a/arch/unicore32/include/asm/pgalloc.h +++ b/arch/unicore32/include/asm/pgalloc.h @@ -51,12 +51,14 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) struct page *pte; pte = alloc_pages(PGALLOC_GFP, 0); - if (pte) { - if (!PageHighMem(pte)) { - void *page = page_address(pte); - clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t)); - } - pgtable_page_ctor(pte); + if (!pte) + return NULL; + if (!PageHighMem(pte)) { + void *page = page_address(pte); + clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t)); + } + if (!pgtable_page_ctor(pte)) { + __free_page(pte); } return pte; diff --git a/arch/unicore32/kernel/puv3-nb0916.c b/arch/unicore32/kernel/puv3-nb0916.c index 181108b8ecc..0c6618e7189 100644 --- a/arch/unicore32/kernel/puv3-nb0916.c +++ b/arch/unicore32/kernel/puv3-nb0916.c @@ -54,6 +54,7 @@ static struct platform_pwm_backlight_data nb0916_backlight_data = { .max_brightness = 100, .dft_brightness = 100, .pwm_period_ns = 70 * 1024, + .enable_gpio = -1, }; static struct gpio_keys_button nb0916_gpio_keys[] = { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 725e1573ea8..e903c71f7e6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -22,6 +22,7 @@ config X86_64 config X86 def_bool y select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_AOUT if X86_32 select HAVE_UNSTABLE_SCHED_CLOCK select ARCH_SUPPORTS_NUMA_BALANCING @@ -90,7 +91,6 @@ config X86 select GENERIC_IRQ_SHOW select GENERIC_CLOCKEVENTS_MIN_ADJUST select IRQ_FORCED_THREADING - select USE_GENERIC_SMP_HELPERS if SMP select HAVE_BPF_JIT if X86_64 select HAVE_ARCH_TRANSPARENT_HUGEPAGE select CLKEVT_I8253 @@ -255,10 +255,6 @@ config ARCH_HWEIGHT_CFLAGS default "-fcall-saved-ecx -fcall-saved-edx" if X86_32 default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64 -config ARCH_CPU_PROBE_RELEASE - def_bool y - depends on HOTPLUG_CPU - config ARCH_SUPPORTS_UPROBES def_bool y @@ -639,10 +635,10 @@ config PARAVIRT_SPINLOCKS spinlock implementation with something virtualization-friendly (for example, block the virtual CPU rather than spinning). - Unfortunately the downside is an up to 5% performance hit on - native kernels, with various workloads. + It has a minimal impact on native kernels and gives a nice performance + benefit on paravirtualized KVM / Xen kernels. - If you are unsure how to answer this question, answer N. + If you are unsure how to answer this question, answer Y. source "arch/x86/xen/Kconfig" @@ -1889,6 +1885,10 @@ config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA +config ARCH_ENABLE_SPLIT_PMD_PTLOCK + def_bool y + depends on X86_64 || X86_PAE + menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index bae3aba95b1..d21ff89207c 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -25,6 +25,7 @@ #include <linux/personality.h> #include <linux/init.h> #include <linux/jiffies.h> +#include <linux/perf_event.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> @@ -33,14 +34,18 @@ #include <asm/ia32.h> #undef WARN_OLD -#undef CORE_DUMP /* definitely broken */ static int load_aout_binary(struct linux_binprm *); static int load_aout_library(struct file *); -#ifdef CORE_DUMP -static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, - unsigned long limit); +#ifdef CONFIG_COREDUMP +static int aout_core_dump(struct coredump_params *); + +static unsigned long get_dr(int n) +{ + struct perf_event *bp = current->thread.ptrace_bps[n]; + return bp ? bp->hw.info.address : 0; +} /* * fill in the user structure for a core dump.. @@ -48,6 +53,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, static void dump_thread32(struct pt_regs *regs, struct user32 *dump) { u32 fs, gs; + memset(dump, 0, sizeof(*dump)); /* changed the size calculations - should hopefully work better. lbt */ dump->magic = CMAGIC; @@ -57,15 +63,12 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump) dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; dump->u_dsize -= dump->u_tsize; - dump->u_ssize = 0; - dump->u_debugreg[0] = current->thread.debugreg0; - dump->u_debugreg[1] = current->thread.debugreg1; - dump->u_debugreg[2] = current->thread.debugreg2; - dump->u_debugreg[3] = current->thread.debugreg3; - dump->u_debugreg[4] = 0; - dump->u_debugreg[5] = 0; + dump->u_debugreg[0] = get_dr(0); + dump->u_debugreg[1] = get_dr(1); + dump->u_debugreg[2] = get_dr(2); + dump->u_debugreg[3] = get_dr(3); dump->u_debugreg[6] = current->thread.debugreg6; - dump->u_debugreg[7] = current->thread.debugreg7; + dump->u_debugreg[7] = current->thread.ptrace_dr7; if (dump->start_stack < 0xc0000000) { unsigned long tmp; @@ -74,24 +77,24 @@ static void dump_thread32(struct pt_regs *regs, struct user32 *dump) dump->u_ssize = tmp >> PAGE_SHIFT; } - dump->regs.bx = regs->bx; - dump->regs.cx = regs->cx; - dump->regs.dx = regs->dx; - dump->regs.si = regs->si; - dump->regs.di = regs->di; - dump->regs.bp = regs->bp; - dump->regs.ax = regs->ax; + dump->regs.ebx = regs->bx; + dump->regs.ecx = regs->cx; + dump->regs.edx = regs->dx; + dump->regs.esi = regs->si; + dump->regs.edi = regs->di; + dump->regs.ebp = regs->bp; + dump->regs.eax = regs->ax; dump->regs.ds = current->thread.ds; dump->regs.es = current->thread.es; savesegment(fs, fs); dump->regs.fs = fs; savesegment(gs, gs); dump->regs.gs = gs; - dump->regs.orig_ax = regs->orig_ax; - dump->regs.ip = regs->ip; + dump->regs.orig_eax = regs->orig_ax; + dump->regs.eip = regs->ip; dump->regs.cs = regs->cs; - dump->regs.flags = regs->flags; - dump->regs.sp = regs->sp; + dump->regs.eflags = regs->flags; + dump->regs.esp = regs->sp; dump->regs.ss = regs->ss; #if 1 /* FIXME */ @@ -107,7 +110,7 @@ static struct linux_binfmt aout_format = { .module = THIS_MODULE, .load_binary = load_aout_binary, .load_shlib = load_aout_library, -#ifdef CORE_DUMP +#ifdef CONFIG_COREDUMP .core_dump = aout_core_dump, #endif .min_coredump = PAGE_SIZE @@ -122,7 +125,7 @@ static void set_brk(unsigned long start, unsigned long end) vm_brk(start, end - start); } -#ifdef CORE_DUMP +#ifdef CONFIG_COREDUMP /* * These are the only things you should do on a core-file: use only these * macros to write out all the necessary info. @@ -130,15 +133,7 @@ static void set_brk(unsigned long start, unsigned long end) #include <linux/coredump.h> -#define DUMP_WRITE(addr, nr) \ - if (!dump_write(file, (void *)(addr), (nr))) \ - goto end_coredump; - -#define DUMP_SEEK(offset) \ - if (!dump_seek(file, offset)) \ - goto end_coredump; - -#define START_DATA() (u.u_tsize << PAGE_SHIFT) +#define START_DATA(u) (u.u_tsize << PAGE_SHIFT) #define START_STACK(u) (u.start_stack) /* @@ -151,8 +146,7 @@ static void set_brk(unsigned long start, unsigned long end) * dumping of the process results in another error.. */ -static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, - unsigned long limit) +static int aout_core_dump(struct coredump_params *cprm) { mm_segment_t fs; int has_dumped = 0; @@ -164,19 +158,19 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, has_dumped = 1; strncpy(dump.u_comm, current->comm, sizeof(current->comm)); dump.u_ar0 = offsetof(struct user32, regs); - dump.signal = signr; - dump_thread32(regs, &dump); + dump.signal = cprm->siginfo->si_signo; + dump_thread32(cprm->regs, &dump); /* * If the size of the dump file exceeds the rlimit, then see * what would happen if we wrote the stack, but not the data * area. */ - if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > limit) + if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) dump.u_dsize = 0; /* Make sure we have enough room to write the stack and data areas. */ - if ((dump.u_ssize + 1) * PAGE_SIZE > limit) + if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) dump.u_ssize = 0; /* make sure we actually have a data and stack area to dump */ @@ -190,22 +184,26 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, set_fs(KERNEL_DS); /* struct user */ - DUMP_WRITE(&dump, sizeof(dump)); + if (!dump_emit(cprm, &dump, sizeof(dump))) + goto end_coredump; /* Now dump all of the user data. Include malloced stuff as well */ - DUMP_SEEK(PAGE_SIZE - sizeof(dump)); + if (!dump_skip(cprm, PAGE_SIZE - sizeof(dump))) + goto end_coredump; /* now we start writing out the user space info */ set_fs(USER_DS); /* Dump the data area */ if (dump.u_dsize != 0) { dump_start = START_DATA(dump); dump_size = dump.u_dsize << PAGE_SHIFT; - DUMP_WRITE(dump_start, dump_size); + if (!dump_emit(cprm, (void *)dump_start, dump_size)) + goto end_coredump; } /* Now prepare to dump the stack area */ if (dump.u_ssize != 0) { dump_start = START_STACK(dump); dump_size = dump.u_ssize << PAGE_SHIFT; - DUMP_WRITE(dump_start, dump_size); + if (!dump_emit(cprm, (void *)dump_start, dump_size)) + goto end_coredump; } end_coredump: set_fs(fs); diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 665a730307f..220675795e0 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -34,7 +34,7 @@ #include <asm/sys_ia32.h> #include <asm/smap.h> -int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) +int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) { int err = 0; bool ia32 = test_thread_flag(TIF_IA32); diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index b1977bad543..c8c1e700c26 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -26,6 +26,7 @@ #include <acpi/pdc_intel.h> #include <asm/numa.h> +#include <asm/fixmap.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/mpspec.h> diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index b90e5dfeee4..50d033a8947 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -327,10 +327,25 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate) { write_idt_entry(trace_idt_table, entry, gate); } + +static inline void _trace_set_gate(int gate, unsigned type, void *addr, + unsigned dpl, unsigned ist, unsigned seg) +{ + gate_desc s; + + pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg); + /* + * does not need to be atomic because it is only done once at + * setup time + */ + write_trace_idt_entry(gate, &s); +} #else static inline void write_trace_idt_entry(int entry, const gate_desc *gate) { } + +#define _trace_set_gate(gate, type, addr, dpl, ist, seg) #endif static inline void _set_gate(int gate, unsigned type, void *addr, @@ -353,11 +368,14 @@ static inline void _set_gate(int gate, unsigned type, void *addr, * Pentium F0 0F bugfix can have resulted in the mapped * IDT being write-protected. */ -static inline void set_intr_gate(unsigned int n, void *addr) -{ - BUG_ON((unsigned)n > 0xFF); - _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); -} +#define set_intr_gate(n, addr) \ + do { \ + BUG_ON((unsigned)n > 0xFF); \ + _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \ + __KERNEL_CS); \ + _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\ + 0, 0, __KERNEL_CS); \ + } while (0) extern int first_system_vector; /* used_vectors is BITMAP for irq is not managed by percpu vector_irq */ @@ -374,37 +392,10 @@ static inline void alloc_system_vector(int vector) } } -#ifdef CONFIG_TRACING -static inline void trace_set_intr_gate(unsigned int gate, void *addr) -{ - gate_desc s; - - pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS); - write_idt_entry(trace_idt_table, gate, &s); -} - -static inline void __trace_alloc_intr_gate(unsigned int n, void *addr) -{ - trace_set_intr_gate(n, addr); -} -#else -static inline void trace_set_intr_gate(unsigned int gate, void *addr) -{ -} - -#define __trace_alloc_intr_gate(n, addr) -#endif - -static inline void __alloc_intr_gate(unsigned int n, void *addr) -{ - set_intr_gate(n, addr); -} - #define alloc_intr_gate(n, addr) \ do { \ alloc_system_vector(n); \ - __alloc_intr_gate(n, addr); \ - __trace_alloc_intr_gate(n, trace_##addr); \ + set_intr_gate(n, addr); \ } while (0) /* diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 4d0bda7b11e..c49a613c645 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -365,7 +365,7 @@ static inline void drop_fpu(struct task_struct *tsk) * Forget coprocessor state.. */ preempt_disable(); - tsk->fpu_counter = 0; + tsk->thread.fpu_counter = 0; __drop_fpu(tsk); clear_used_math(); preempt_enable(); @@ -424,7 +424,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta * or if the past 5 consecutive context-switches used math. */ fpu.preload = tsk_used_math(new) && (use_eager_fpu() || - new->fpu_counter > 5); + new->thread.fpu_counter > 5); if (__thread_has_fpu(old)) { if (!__save_init_fpu(old)) cpu = ~0; @@ -433,16 +433,16 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta /* Don't change CR0.TS if we just switch! */ if (fpu.preload) { - new->fpu_counter++; + new->thread.fpu_counter++; __thread_set_has_fpu(new); prefetch(new->thread.fpu.state); } else if (!use_eager_fpu()) stts(); } else { - old->fpu_counter = 0; + old->thread.fpu_counter = 0; old->thread.fpu.last_cpu = ~0; if (fpu.preload) { - new->fpu_counter++; + new->thread.fpu_counter++; if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) fpu.preload = 0; else diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 92b3bae08b7..cba45d99ac1 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -187,6 +187,9 @@ extern __visible void smp_invalidate_interrupt(struct pt_regs *); #endif extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); +#ifdef CONFIG_TRACING +#define trace_interrupt interrupt +#endif typedef int vector_irq_t[NR_VECTORS]; DECLARE_PER_CPU(vector_irq_t, vector_irq); diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index 2c37aadcbc3..32ce71375b2 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h @@ -21,7 +21,7 @@ enum die_val { DIE_NMIUNKNOWN, }; -extern void printk_address(unsigned long address, int reliable); +extern void printk_address(unsigned long address); extern void die(const char *, struct pt_regs *,long); extern int __must_check __die(const char *, struct pt_regs *, long); extern void show_trace(struct task_struct *t, struct pt_regs *regs, diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 15f960c06ff..24ec1216596 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -274,13 +274,17 @@ struct x86_emulate_ctxt { bool guest_mode; /* guest running a nested guest */ bool perm_ok; /* do not check permissions if true */ - bool only_vendor_specific_insn; + bool ud; /* inject an #UD if host doesn't support insn */ bool have_exception; struct x86_exception exception; - /* decode cache */ - u8 twobyte; + /* + * decode cache + */ + + /* current opcode length in bytes */ + u8 opcode_len; u8 b; u8 intercept; u8 lock_prefix; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c76ff74a98f..ae5d7830855 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -79,6 +79,13 @@ #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) +static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) +{ + /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */ + return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - + (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); +} + #define SELECTOR_TI_MASK (1 << 2) #define SELECTOR_RPL_MASK 0x03 @@ -253,7 +260,6 @@ struct kvm_pio_request { * mode. */ struct kvm_mmu { - void (*new_cr3)(struct kvm_vcpu *vcpu); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index); @@ -261,7 +267,6 @@ struct kvm_mmu { bool prefault); void (*inject_page_fault)(struct kvm_vcpu *vcpu, struct x86_exception *fault); - void (*free)(struct kvm_vcpu *vcpu); gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, struct x86_exception *exception); gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); @@ -389,6 +394,8 @@ struct kvm_vcpu_arch { struct fpu guest_fpu; u64 xcr0; + u64 guest_supported_xcr0; + u32 guest_xstate_size; struct kvm_pio_request pio; void *pio_data; @@ -557,7 +564,9 @@ struct kvm_arch { struct list_head assigned_dev_head; struct iommu_domain *iommu_domain; - int iommu_flags; + bool iommu_noncoherent; +#define __KVM_HAVE_ARCH_NONCOHERENT_DMA + atomic_t noncoherent_dma_count; struct kvm_pic *vpic; struct kvm_ioapic *vioapic; struct kvm_pit *vpit; @@ -780,11 +789,11 @@ void kvm_mmu_module_exit(void); void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); -int kvm_mmu_setup(struct kvm_vcpu *vcpu); +void kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, u64 dirty_mask, u64 nx_mask, u64 x_mask); -int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); +void kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, @@ -922,13 +931,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, void *insn, int insn_len); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); +void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu); void kvm_enable_tdp(void); void kvm_disable_tdp(void); -int complete_pio(struct kvm_vcpu *vcpu); -bool kvm_check_iopl(struct kvm_vcpu *vcpu); - static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { return gpa; diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 626cf70082d..3142a94c7b4 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h @@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { } #define default_get_smp_config x86_init_uint_noop #endif -void generic_processor_info(int apicid, int version); +int generic_processor_info(int apicid, int version); #ifdef CONFIG_ACPI extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index cb7502852ac..e139b13f2a3 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -218,10 +218,14 @@ void msrs_free(struct msr *msrs); #ifdef CONFIG_SMP int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); +int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); +int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); #else /* CONFIG_SMP */ @@ -235,6 +239,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) wrmsr(msr_no, l, h); return 0; } +static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +{ + rdmsrl(msr_no, *q); + return 0; +} +static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +{ + wrmsrl(msr_no, q); + return 0; +} static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, struct msr *msrs) { @@ -254,6 +268,14 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { return wrmsr_safe(msr_no, l, h); } +static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +{ + return rdmsrl_safe(msr_no, q); +} +static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +{ + return wrmsrl_safe(msr_no, q); +} static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) { return rdmsr_safe_regs(regs); diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index b3e18f80030..94220d14d5c 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -378,9 +378,6 @@ do { \ #define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) #define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) #define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) -#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) -#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) -#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val) #define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val) #define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val) @@ -400,9 +397,6 @@ do { \ #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) -#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) -#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) -#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) @@ -447,7 +441,6 @@ do { \ #define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) -#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) #define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) #define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) @@ -457,7 +450,6 @@ do { \ #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val) #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) -#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index b4389a468fb..c4412e972bb 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -80,12 +80,21 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, #if PAGETABLE_LEVELS > 2 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + struct page *page; + page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); + if (!page) + return NULL; + if (!pgtable_pmd_page_ctor(page)) { + __free_pages(page, 0); + return NULL; + } + return (pmd_t *)page_address(page); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); + pgtable_pmd_page_dtor(virt_to_page(pmd)); free_page((unsigned long)pmd); } diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 987c75ecc33..7b034a4057f 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -488,6 +488,15 @@ struct thread_struct { unsigned long iopl; /* Max allowed port in the bitmap, in bytes: */ unsigned io_bitmap_max; + /* + * fpu_counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char fpu_counter; }; /* diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h index bade6ac3b14..fbeb06ed0ea 100644 --- a/arch/x86/include/asm/prom.h +++ b/arch/x86/include/asm/prom.h @@ -39,10 +39,5 @@ static inline void x86_dtb_init(void) { } extern char cmd_line[COMMAND_LINE_SIZE]; -#define pci_address_to_pio pci_address_to_pio -unsigned long pci_address_to_pio(phys_addr_t addr); - -#define HAVE_ARCH_DEVTREE_FIXUPS - #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index be8269b00e2..d6b078e9fa2 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -14,6 +14,8 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall, struct timespec *ts); void pvclock_resume(void); +void pvclock_touch_watchdogs(void); + /* * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, * yielding a 64-bit result. diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index c48a95035a7..6f1c3a8a33a 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -214,6 +214,9 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; +#ifdef CONFIG_TRACING +#define trace_early_idt_handlers early_idt_handlers +#endif /* * Load a segment. Fall back on loading the zero diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h new file mode 100644 index 00000000000..2fbc66c7885 --- /dev/null +++ b/arch/x86/include/asm/trace/exceptions.h @@ -0,0 +1,52 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM exceptions + +#if !defined(_TRACE_PAGE_FAULT_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGE_FAULT_H + +#include <linux/tracepoint.h> + +extern void trace_irq_vector_regfunc(void); +extern void trace_irq_vector_unregfunc(void); + +DECLARE_EVENT_CLASS(x86_exceptions, + + TP_PROTO(unsigned long address, struct pt_regs *regs, + unsigned long error_code), + + TP_ARGS(address, regs, error_code), + + TP_STRUCT__entry( + __field( unsigned long, address ) + __field( unsigned long, ip ) + __field( unsigned long, error_code ) + ), + + TP_fast_assign( + __entry->address = address; + __entry->ip = regs->ip; + __entry->error_code = error_code; + ), + + TP_printk("address=%pf ip=%pf error_code=0x%lx", + (void *)__entry->address, (void *)__entry->ip, + __entry->error_code) ); + +#define DEFINE_PAGE_FAULT_EVENT(name) \ +DEFINE_EVENT_FN(x86_exceptions, name, \ + TP_PROTO(unsigned long address, struct pt_regs *regs, \ + unsigned long error_code), \ + TP_ARGS(address, regs, error_code), \ + trace_irq_vector_regfunc, \ + trace_irq_vector_unregfunc); + +DEFINE_PAGE_FAULT_EVENT(page_fault_user); +DEFINE_PAGE_FAULT_EVENT(page_fault_kernel); + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE exceptions +#endif /* _TRACE_PAGE_FAULT_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 7036cb60cd8..58d66fe06b6 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -37,6 +37,23 @@ asmlinkage void machine_check(void); #endif /* CONFIG_X86_MCE */ asmlinkage void simd_coprocessor_error(void); +#ifdef CONFIG_TRACING +asmlinkage void trace_page_fault(void); +#define trace_divide_error divide_error +#define trace_bounds bounds +#define trace_invalid_op invalid_op +#define trace_device_not_available device_not_available +#define trace_coprocessor_segment_overrun coprocessor_segment_overrun +#define trace_invalid_TSS invalid_TSS +#define trace_segment_not_present segment_not_present +#define trace_general_protection general_protection +#define trace_spurious_interrupt_bug spurious_interrupt_bug +#define trace_coprocessor_error coprocessor_error +#define trace_alignment_check alignment_check +#define trace_simd_coprocessor_error simd_coprocessor_error +#define trace_async_page_fault async_page_fault +#endif + dotraplinkage void do_divide_error(struct pt_regs *, long); dotraplinkage void do_debug(struct pt_regs *, long); dotraplinkage void do_nmi(struct pt_regs *, long); @@ -55,6 +72,9 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *); #endif dotraplinkage void do_general_protection(struct pt_regs *, long); dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); +#ifdef CONFIG_TRACING +dotraplinkage void trace_do_page_fault(struct pt_regs *, unsigned long); +#endif dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long); dotraplinkage void do_coprocessor_error(struct pt_regs *, long); dotraplinkage void do_alignment_check(struct pt_regs *, long); diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 828a1565ba5..0f1be11e43d 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -172,6 +172,7 @@ struct x86_platform_ops { struct pci_dev; struct msi_msg; +struct msi_desc; struct x86_msi_ops { int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); @@ -182,6 +183,8 @@ struct x86_msi_ops { void (*teardown_msi_irqs)(struct pci_dev *dev); void (*restore_msi_irqs)(struct pci_dev *dev, int irq); int (*setup_hpet_msi)(unsigned int irq, unsigned int id); + u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); + u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); }; struct IO_APIC_route_entry; diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h new file mode 100644 index 00000000000..7f02fe4e2c7 --- /dev/null +++ b/arch/x86/include/asm/xen/page-coherent.h @@ -0,0 +1,38 @@ +#ifndef _ASM_X86_XEN_PAGE_COHERENT_H +#define _ASM_X86_XEN_PAGE_COHERENT_H + +#include <asm/page.h> +#include <linux/dma-attrs.h> +#include <linux/dma-mapping.h> + +static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags, + struct dma_attrs *attrs) +{ + void *vstart = (void*)__get_free_pages(flags, get_order(size)); + *dma_handle = virt_to_phys(vstart); + return vstart; +} + +static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + free_pages((unsigned long) cpu_addr, get_order(size)); +} + +static inline void xen_dma_map_page(struct device *hwdev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { } + +static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { } + +static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) { } + +static inline void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) { } + +#endif /* _ASM_X86_XEN_PAGE_COHERENT_H */ diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 5d9a3033b3d..d3a87780c70 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -211,9 +211,9 @@ struct kvm_cpuid_entry2 { __u32 padding[3]; }; -#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX 1 -#define KVM_CPUID_FLAG_STATEFUL_FUNC 2 -#define KVM_CPUID_FLAG_STATE_READ_NEXT 4 +#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0) +#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) +#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) /* for KVM_SET_CPUID2 */ struct kvm_cpuid2 { diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index bb0465090ae..b93e09a0fa2 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -536,6 +536,7 @@ /* MSR_IA32_VMX_MISC bits */ #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) +#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F /* AMD-V MSRs */ #define MSR_VM_CR 0xc0010114 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 40c76604199..6c0b43bd024 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -189,24 +189,31 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) return 0; } -static void acpi_register_lapic(int id, u8 enabled) +/** + * acpi_register_lapic - register a local apic and generates a logic cpu number + * @id: local apic id to register + * @enabled: this cpu is enabled or not + * + * Returns the logic cpu number which maps to the local apic + */ +static int acpi_register_lapic(int id, u8 enabled) { unsigned int ver = 0; if (id >= MAX_LOCAL_APIC) { printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); - return; + return -EINVAL; } if (!enabled) { ++disabled_cpus; - return; + return -EINVAL; } if (boot_cpu_physical_apicid != -1U) ver = apic_version[boot_cpu_physical_apicid]; - generic_processor_info(id, ver); + return generic_processor_info(id, ver); } static int __init @@ -614,84 +621,27 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) #endif } -static int _acpi_map_lsapic(acpi_handle handle, int *pcpu) +static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj; - struct acpi_madt_local_apic *lapic; - cpumask_var_t tmp_map, new_map; - u8 physid; int cpu; - int retval = -ENOMEM; - - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) - return -EINVAL; - - if (!buffer.length || !buffer.pointer) - return -EINVAL; - - obj = buffer.pointer; - if (obj->type != ACPI_TYPE_BUFFER || - obj->buffer.length < sizeof(*lapic)) { - kfree(buffer.pointer); - return -EINVAL; - } - lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; - - if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || - !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { - kfree(buffer.pointer); - return -EINVAL; - } - - physid = lapic->id; - - kfree(buffer.pointer); - buffer.length = ACPI_ALLOCATE_BUFFER; - buffer.pointer = NULL; - lapic = NULL; - - if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL)) - goto out; - - if (!alloc_cpumask_var(&new_map, GFP_KERNEL)) - goto free_tmp_map; - - cpumask_copy(tmp_map, cpu_present_mask); - acpi_register_lapic(physid, ACPI_MADT_ENABLED); - - /* - * If acpi_register_lapic successfully generates a new logical cpu - * number, then the following will get us exactly what was mapped - */ - cpumask_andnot(new_map, cpu_present_mask, tmp_map); - if (cpumask_empty(new_map)) { - printk ("Unable to map lapic to logical cpu number\n"); - retval = -EINVAL; - goto free_new_map; + cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED); + if (cpu < 0) { + pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); + return cpu; } acpi_processor_set_pdc(handle); - - cpu = cpumask_first(new_map); acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; - retval = 0; - -free_new_map: - free_cpumask_var(new_map); -free_tmp_map: - free_cpumask_var(tmp_map); -out: - return retval; + return 0; } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu) +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { - return _acpi_map_lsapic(handle, pcpu); + return _acpi_map_lsapic(handle, physid, pcpu); } EXPORT_SYMBOL(acpi_map_lsapic); @@ -745,7 +695,7 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table) #ifdef CONFIG_HPET_TIMER #include <asm/hpet.h> -static struct __initdata resource *hpet_res; +static struct resource *hpet_res __initdata; static int __init acpi_parse_hpet(struct acpi_table_header *table) { diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 33120100ff5..3a2ae4c8894 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -26,6 +26,17 @@ static char temp_stack[4096]; #endif /** + * x86_acpi_enter_sleep_state - enter sleep state + * @state: Sleep state to enter. + * + * Wrapper around acpi_enter_sleep_state() to be called by assmebly. + */ +acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state) +{ + return acpi_enter_sleep_state(state); +} + +/** * x86_acpi_suspend_lowlevel - save kernel state * * Create an identity mapped page table and copy the wakeup routine to diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index c9c2c982d5e..65c7b606b60 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h @@ -17,3 +17,5 @@ extern void wakeup_long64(void); extern void do_suspend_lowlevel(void); extern int x86_acpi_suspend_lowlevel(void); + +acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state); diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index d1daa66ab16..665c6b7d2ea 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -73,7 +73,7 @@ ENTRY(do_suspend_lowlevel) call save_processor_state call save_registers pushl $3 - call acpi_enter_sleep_state + call x86_acpi_enter_sleep_state addl $4, %esp # In case of S3 failure, we'll emerge here. Jump diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 8ea5164cbd0..ae693b51ed8 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -73,7 +73,7 @@ ENTRY(do_suspend_lowlevel) addq $8, %rsp movl $3, %edi xorl %eax, %eax - call acpi_enter_sleep_state + call x86_acpi_enter_sleep_state /* in case something went wrong, restore the machine status and go on */ jmp resume_point diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 15e8563e5c2..df94598ad05 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -402,17 +402,6 @@ void alternatives_enable_smp(void) { struct smp_alt_module *mod; -#ifdef CONFIG_LOCKDEP - /* - * Older binutils section handling bug prevented - * alternatives-replacement from working reliably. - * - * If this still occurs then you should see a hang - * or crash shortly after this line: - */ - pr_info("lockdep: fixing up alternatives\n"); -#endif - /* Why bother if there are no other CPUs? */ BUG_ON(num_possible_cpus() == 1); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a7eb82d9b01..ed165d65738 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup) apic_write(APIC_LVT1, value); } -void generic_processor_info(int apicid, int version) +int generic_processor_info(int apicid, int version) { int cpu, max = nr_cpu_ids; bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, @@ -2127,7 +2127,7 @@ void generic_processor_info(int apicid, int version) " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); disabled_cpus++; - return; + return -ENODEV; } if (num_processors >= nr_cpu_ids) { @@ -2138,7 +2138,7 @@ void generic_processor_info(int apicid, int version) " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); disabled_cpus++; - return; + return -EINVAL; } num_processors++; @@ -2183,6 +2183,8 @@ void generic_processor_info(int apicid, int version) #endif set_cpu_possible(cpu, true); set_cpu_present(cpu, true); + + return cpu; } int hard_smp_processor_id(void) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3daece79a14..bca023bdd6b 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -339,7 +339,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c) #endif /* - * On a AMD dual core setup the lower bits of the APIC id distingush the cores. + * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. * Assumes number of cores is a power of two. */ static void amd_detect_cmp(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 1414c90feab..0641113e296 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -1,5 +1,5 @@ /* - * Routines to indentify caches on Intel CPU. + * Routines to identify caches on Intel CPU. * * Changes: * Venkatesh Pallipadi : Adding cache identification through cpuid(4) diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index f2cc63e9cf0..b6f794aa169 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -1,5 +1,5 @@ /* - * Routines to indentify additional cpu features that are scattered in + * Routines to identify additional cpu features that are scattered in * cpuid space. */ #include <linux/cpu.h> diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 376dc787344..d35078ea144 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -20,22 +20,13 @@ #include <asm/hpet.h> #include <asm/apic.h> #include <asm/pci_x86.h> +#include <asm/setup.h> __initdata u64 initial_dtb; char __initdata cmd_line[COMMAND_LINE_SIZE]; int __initdata of_ioapic; -unsigned long pci_address_to_pio(phys_addr_t address) -{ - /* - * The ioport address can be directly used by inX / outX - */ - BUG_ON(address >= (1 << 16)); - return (unsigned long)address; -} -EXPORT_SYMBOL_GPL(pci_address_to_pio); - void __init early_init_dt_scan_chosen_arch(unsigned long node) { BUG(); @@ -51,15 +42,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); } -#ifdef CONFIG_BLK_DEV_INITRD -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - initrd_start = (unsigned long)__va(start); - initrd_end = (unsigned long)__va(end); - initrd_below_start_ok = 1; -} -#endif - void __init add_dtb(u64 data) { initial_dtb = data + offsetof(struct setup_data, data); @@ -105,7 +87,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) static int x86_of_pci_irq_enable(struct pci_dev *dev) { - struct of_irq oirq; u32 virq; int ret; u8 pin; @@ -116,12 +97,7 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev) if (!pin) return 0; - ret = of_irq_map_pci(dev, &oirq); - if (ret) - return ret; - - virq = irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); + virq = of_irq_parse_and_map_pci(dev, 0, 0); if (virq == 0) return -EINVAL; dev->irq = virq; @@ -230,7 +206,7 @@ static void __init dtb_apic_setup(void) static void __init x86_flattree_get_config(void) { u32 size, map_len; - void *new_dtb; + struct boot_param_header *dt; if (!initial_dtb) return; @@ -238,24 +214,17 @@ static void __init x86_flattree_get_config(void) map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)sizeof(struct boot_param_header)); - initial_boot_params = early_memremap(initial_dtb, map_len); - size = be32_to_cpu(initial_boot_params->totalsize); + dt = early_memremap(initial_dtb, map_len); + size = be32_to_cpu(dt->totalsize); if (map_len < size) { - early_iounmap(initial_boot_params, map_len); - initial_boot_params = early_memremap(initial_dtb, size); + early_iounmap(dt, map_len); + dt = early_memremap(initial_dtb, size); map_len = size; } - new_dtb = alloc_bootmem(size); - memcpy(new_dtb, initial_boot_params, size); - early_iounmap(initial_boot_params, map_len); - - initial_boot_params = new_dtb; - - /* root level address cells */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); - - unflatten_device_tree(); + initial_boot_params = dt; + unflatten_and_copy_device_tree(); + early_iounmap(dt, map_len); } #else static inline void x86_flattree_get_config(void) { } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index deb6421c9e6..d9c12d3022a 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -25,12 +25,17 @@ unsigned int code_bytes = 64; int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE; static int die_counter; -void printk_address(unsigned long address, int reliable) +static void printk_stack_address(unsigned long address, int reliable) { pr_cont(" [<%p>] %s%pB\n", (void *)address, reliable ? "" : "? ", (void *)address); } +void printk_address(unsigned long address) +{ + pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address); +} + #ifdef CONFIG_FUNCTION_GRAPH_TRACER static void print_ftrace_graph_addr(unsigned long addr, void *data, @@ -151,7 +156,7 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) { touch_nmi_watchdog(); printk(data); - printk_address(addr, reliable); + printk_stack_address(addr, reliable); } static const struct stacktrace_ops print_trace_ops = { @@ -281,7 +286,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) #else /* Executive summary in case the oops scrolled away */ printk(KERN_ALERT "RIP "); - printk_address(regs->ip, 1); + printk_address(regs->ip); printk(" RSP <%016lx>\n", regs->sp); #endif return 0; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index b3cd3ebae07..96f958d8cd4 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -313,6 +313,16 @@ static size_t __init gen6_stolen_size(int num, int slot, int func) return gmch_ctrl << 25; /* 32 MB units */ } +static inline size_t gen8_stolen_size(int num, int slot, int func) +{ + u16 gmch_ctrl; + + gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); + gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; + gmch_ctrl &= BDW_GMCH_GMS_MASK; + return gmch_ctrl << 25; /* 32 MB units */ +} + typedef size_t (*stolen_size_fn)(int num, int slot, int func); static struct pci_device_id intel_stolen_ids[] __initdata = { @@ -336,6 +346,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = { INTEL_IVB_D_IDS(gen6_stolen_size), INTEL_HSW_D_IDS(gen6_stolen_size), INTEL_HSW_M_IDS(gen6_stolen_size), + INTEL_BDW_M_IDS(gen8_stolen_size), + INTEL_BDW_D_IDS(gen8_stolen_size) }; static void __init intel_graphics_stolen(int num, int slot, int func) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index fd1bc1b15e6..51e2988c572 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1244,6 +1244,16 @@ return_to_handler: */ .pushsection .kprobes.text, "ax" +#ifdef CONFIG_TRACING +ENTRY(trace_page_fault) + RING0_EC_FRAME + ASM_CLAC + pushl_cfi $trace_do_page_fault + jmp error_code + CFI_ENDPROC +END(trace_page_fault) +#endif + ENTRY(page_fault) RING0_EC_FRAME ASM_CLAC diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 603be7c7067..e21b0785a85 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1278,6 +1278,17 @@ ENTRY(\sym) END(\sym) .endm +#ifdef CONFIG_TRACING +.macro trace_errorentry sym do_sym +errorentry trace(\sym) trace(\do_sym) +errorentry \sym \do_sym +.endm +#else +.macro trace_errorentry sym do_sym +errorentry \sym \do_sym +.endm +#endif + /* error code is on the stack already */ .macro paranoiderrorentry sym do_sym ENTRY(\sym) @@ -1480,7 +1491,7 @@ zeroentry xen_int3 do_int3 errorentry xen_stack_segment do_stack_segment #endif errorentry general_protection do_general_protection -errorentry page_fault do_page_fault +trace_errorentry page_fault do_page_fault #ifdef CONFIG_KVM_GUEST errorentry async_page_fault do_async_page_fault #endif diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 42a392a9fd0..d4bdd253fea 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -248,6 +248,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return ret; } +static int is_ftrace_caller(unsigned long ip) +{ + if (ip == (unsigned long)(&ftrace_call) || + ip == (unsigned long)(&ftrace_regs_call)) + return 1; + + return 0; +} + /* * A breakpoint was added to the code address we are about to * modify, and this is the handle that will just skip over it. @@ -257,10 +266,13 @@ int ftrace_update_ftrace_func(ftrace_func_t func) */ int ftrace_int3_handler(struct pt_regs *regs) { + unsigned long ip; + if (WARN_ON_ONCE(!regs)) return 0; - if (!ftrace_location(regs->ip - 1)) + ip = regs->ip - 1; + if (!ftrace_location(ip) && !is_ftrace_caller(ip)) return 0; regs->ip += MCOUNT_INSN_SIZE - 1; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 1be8e43b669..85126ccbdf6 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -162,7 +162,7 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data) clear_bss(); for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) - set_intr_gate(i, &early_idt_handlers[i]); + set_intr_gate(i, early_idt_handlers[i]); load_idt((const struct desc_ptr *)&idt_descr); copy_bootdata(__va(real_mode_data)); diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 5d576ab3440..e8368c6dd2a 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -100,7 +100,7 @@ void unlazy_fpu(struct task_struct *tsk) __save_init_fpu(tsk); __thread_fpu_end(tsk); } else - tsk->fpu_counter = 0; + tsk->thread.fpu_counter = 0; preempt_enable(); } EXPORT_SYMBOL(unlazy_fpu); diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index ee11b7dfbfb..26d5a55a273 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c @@ -42,15 +42,27 @@ static void __jump_label_transform(struct jump_entry *entry, int init) { union jump_code_union code; + const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP }; const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5]; if (type == JUMP_LABEL_ENABLE) { - /* - * We are enabling this jump label. If it is not a nop - * then something must have gone wrong. - */ - if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0)) - bug_at((void *)entry->code, __LINE__); + if (init) { + /* + * Jump label is enabled for the first time. + * So we expect a default_nop... + */ + if (unlikely(memcmp((void *)entry->code, default_nop, 5) + != 0)) + bug_at((void *)entry->code, __LINE__); + } else { + /* + * ...otherwise expect an ideal_nop. Otherwise + * something went horribly wrong. + */ + if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) + != 0)) + bug_at((void *)entry->code, __LINE__); + } code.jump = 0xe9; code.offset = entry->target - @@ -63,7 +75,6 @@ static void __jump_label_transform(struct jump_entry *entry, * are converting the default nop to the ideal nop. */ if (init) { - const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP }; if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0)) bug_at((void *)entry->code, __LINE__); } else { diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index b2046e4d0b5..6dd802c6d78 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -464,7 +464,7 @@ static struct notifier_block kvm_cpu_notifier = { static void __init kvm_apf_trap_init(void) { - set_intr_gate(14, &async_page_fault); + set_intr_gate(14, async_page_fault); } void __init kvm_guest_init(void) diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1570e074134..e6041094ff2 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -139,6 +139,7 @@ bool kvm_check_and_clear_guest_paused(void) src = &hv_clock[cpu].pvti; if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { src->flags &= ~PVCLOCK_GUEST_STOPPED; + pvclock_touch_watchdogs(); ret = true; } diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index af99f71aeb7..c3d4cc972ec 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -431,7 +431,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); if (request_firmware(&fw, (const char *)fw_name, device)) { - pr_err("failed to load file %s\n", fw_name); + pr_debug("failed to load file %s\n", fw_name); goto out; } diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 216a4d754b0..18be189368b 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -49,7 +49,7 @@ void *module_alloc(unsigned long size) return NULL; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, - -1, __builtin_return_address(0)); + NUMA_NO_NODE, __builtin_return_address(0)); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c2ec1aa6d45..6f1236c29c4 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -153,7 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, childregs->orig_ax = -1; childregs->cs = __KERNEL_CS | get_kernel_rpl(); childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; - p->fpu_counter = 0; + p->thread.fpu_counter = 0; p->thread.io_bitmap_ptr = NULL; memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); return 0; @@ -166,7 +166,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, p->thread.ip = (unsigned long) ret_from_fork; task_user_gs(p) = get_user_gs(current_pt_regs()); - p->fpu_counter = 0; + p->thread.fpu_counter = 0; p->thread.io_bitmap_ptr = NULL; tsk = current; err = -ENOMEM; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 45ab4d6fc8a..9c0280f93d0 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -63,7 +63,7 @@ void __show_regs(struct pt_regs *regs, int all) unsigned int ds, cs, es; printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); - printk_address(regs->ip, 1); + printk_address(regs->ip); printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, regs->flags); printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n", @@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, p->thread.sp = (unsigned long) childregs; p->thread.usersp = me->thread.usersp; set_tsk_thread_flag(p, TIF_FORK); - p->fpu_counter = 0; + p->thread.fpu_counter = 0; p->thread.io_bitmap_ptr = NULL; savesegment(gs, p->thread.gsindex); diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index a16bae3f83b..2f355d229a5 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -43,6 +43,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src) return pv_tsc_khz; } +void pvclock_touch_watchdogs(void) +{ + touch_softlockup_watchdog_sync(); + clocksource_touch_watchdog(); + rcu_cpu_stall_reset(); + reset_hung_task_detector(); +} + static atomic64_t last_value = ATOMIC64_INIT(0); void pvclock_resume(void) @@ -74,6 +82,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) version = __pvclock_read_cycles(src, &ret, &flags); } while ((src->version & 1) || version != src->version); + if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) { + src->flags &= ~PVCLOCK_GUEST_STOPPED; + pvclock_touch_watchdogs(); + } + if ((valid_flags & PVCLOCK_TSC_STABLE_BIT) && (flags & PVCLOCK_TSC_STABLE_BIT)) return ret; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 918d489fa53..cb233bc9dee 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1121,8 +1121,6 @@ void __init setup_arch(char **cmdline_p) acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start); #endif - reserve_crashkernel(); - vsmp_init(); io_delay_init(); @@ -1135,6 +1133,13 @@ void __init setup_arch(char **cmdline_p) early_acpi_boot_init(); initmem_init(); + + /* + * Reserve memory for crash kernel after SRAT is parsed so that it + * won't consume hotpluggable memory. + */ + reserve_crashkernel(); + memblock_find_dma_reserve(); #ifdef CONFIG_KVM_GUEST diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2a165580fa1..85dc05a3aa0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -81,27 +81,6 @@ /* State of each CPU */ DEFINE_PER_CPU(int, cpu_state) = { 0 }; -#ifdef CONFIG_HOTPLUG_CPU -/* - * We need this for trampoline_base protection from concurrent accesses when - * off- and onlining cores wildly. - */ -static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); - -void cpu_hotplug_driver_lock(void) -{ - mutex_lock(&x86_cpu_hotplug_driver_mutex); -} - -void cpu_hotplug_driver_unlock(void) -{ - mutex_unlock(&x86_cpu_hotplug_driver_mutex); -} - -ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } -ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; } -#endif - /* Number of siblings per CPU package */ int smp_num_siblings = 1; EXPORT_SYMBOL(smp_num_siblings); diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c index 6e60b5fe224..649b010da00 100644 --- a/arch/x86/kernel/topology.c +++ b/arch/x86/kernel/topology.c @@ -65,29 +65,32 @@ int __ref _debug_hotplug_cpu(int cpu, int action) if (!cpu_is_hotpluggable(cpu)) return -EINVAL; - cpu_hotplug_driver_lock(); + lock_device_hotplug(); switch (action) { case 0: ret = cpu_down(cpu); if (!ret) { pr_info("CPU %u is now offline\n", cpu); + dev->offline = true; kobject_uevent(&dev->kobj, KOBJ_OFFLINE); } else pr_debug("Can't offline CPU%d.\n", cpu); break; case 1: ret = cpu_up(cpu); - if (!ret) + if (!ret) { + dev->offline = false; kobject_uevent(&dev->kobj, KOBJ_ONLINE); - else + } else { pr_debug("Can't online CPU%d.\n", cpu); + } break; default: ret = -EINVAL; } - cpu_hotplug_driver_unlock(); + unlock_device_hotplug(); return ret; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 729aa779ff7..b857ed890b4 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -653,7 +653,7 @@ void math_state_restore(void) return; } - tsk->fpu_counter++; + tsk->thread.fpu_counter++; } EXPORT_SYMBOL_GPL(math_state_restore); @@ -713,7 +713,7 @@ void __init early_trap_init(void) /* int3 can be called from all */ set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); #ifdef CONFIG_X86_32 - set_intr_gate(X86_TRAP_PF, &page_fault); + set_intr_gate(X86_TRAP_PF, page_fault); #endif load_idt(&idt_descr); } @@ -721,7 +721,7 @@ void __init early_trap_init(void) void __init early_trap_pf_init(void) { #ifdef CONFIG_X86_64 - set_intr_gate(X86_TRAP_PF, &page_fault); + set_intr_gate(X86_TRAP_PF, page_fault); #endif } @@ -737,30 +737,30 @@ void __init trap_init(void) early_iounmap(p, 4); #endif - set_intr_gate(X86_TRAP_DE, ÷_error); + set_intr_gate(X86_TRAP_DE, divide_error); set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); /* int4 can be called from all */ set_system_intr_gate(X86_TRAP_OF, &overflow); - set_intr_gate(X86_TRAP_BR, &bounds); - set_intr_gate(X86_TRAP_UD, &invalid_op); - set_intr_gate(X86_TRAP_NM, &device_not_available); + set_intr_gate(X86_TRAP_BR, bounds); + set_intr_gate(X86_TRAP_UD, invalid_op); + set_intr_gate(X86_TRAP_NM, device_not_available); #ifdef CONFIG_X86_32 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); #else set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); #endif - set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun); - set_intr_gate(X86_TRAP_TS, &invalid_TSS); - set_intr_gate(X86_TRAP_NP, &segment_not_present); + set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); + set_intr_gate(X86_TRAP_TS, invalid_TSS); + set_intr_gate(X86_TRAP_NP, segment_not_present); set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); - set_intr_gate(X86_TRAP_GP, &general_protection); - set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug); - set_intr_gate(X86_TRAP_MF, &coprocessor_error); - set_intr_gate(X86_TRAP_AC, &alignment_check); + set_intr_gate(X86_TRAP_GP, general_protection); + set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); + set_intr_gate(X86_TRAP_MF, coprocessor_error); + set_intr_gate(X86_TRAP_AC, alignment_check); #ifdef CONFIG_X86_MCE set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); #endif - set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error); + set_intr_gate(X86_TRAP_XF, simd_coprocessor_error); /* Reserve all the builtin and the syscall vector: */ for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 8ce0072cd70..021783b1f46 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -116,6 +116,8 @@ struct x86_msi_ops x86_msi = { .teardown_msi_irqs = default_teardown_msi_irqs, .restore_msi_irqs = default_restore_msi_irqs, .setup_hpet_msi = default_setup_hpet_msi, + .msi_mask_irq = default_msi_mask_irq, + .msix_mask_irq = default_msix_mask_irq, }; /* MSI arch specific hooks */ @@ -138,6 +140,14 @@ void arch_restore_msi_irqs(struct pci_dev *dev, int irq) { x86_msi.restore_msi_irqs(dev, irq); } +u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +{ + return x86_msi.msi_mask_irq(desc, mask, flag); +} +u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) +{ + return x86_msi.msix_mask_irq(desc, flag); +} #endif struct x86_io_apic_ops x86_io_apic_ops = { diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index a47a3e54b96..b89c5db2b83 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -38,6 +38,7 @@ config KVM select PERF_EVENTS select HAVE_KVM_MSI select HAVE_KVM_CPU_RELAX_INTERCEPT + select KVM_VFIO ---help--- Support hosting fully virtualized guest machines using hardware virtualization extensions. You will need a fairly recent diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index bf4fb04d011..25d22b2d650 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -9,7 +9,7 @@ KVM := ../../../virt/kvm kvm-y += $(KVM)/kvm_main.o $(KVM)/ioapic.o \ $(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o \ - $(KVM)/eventfd.o $(KVM)/irqchip.o + $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += $(KVM)/assigned-dev.o $(KVM)/iommu.o kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index b110fe6c03d..c6976257eff 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -23,6 +23,26 @@ #include "mmu.h" #include "trace.h" +static u32 xstate_required_size(u64 xstate_bv) +{ + int feature_bit = 0; + u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; + + xstate_bv &= ~XSTATE_FPSSE; + while (xstate_bv) { + if (xstate_bv & 0x1) { + u32 eax, ebx, ecx, edx; + cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); + ret = max(ret, eax + ebx); + } + + xstate_bv >>= 1; + feature_bit++; + } + + return ret; +} + void kvm_update_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; @@ -46,6 +66,18 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu) apic->lapic_timer.timer_mode_mask = 1 << 17; } + best = kvm_find_cpuid_entry(vcpu, 0xD, 0); + if (!best) { + vcpu->arch.guest_supported_xcr0 = 0; + vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; + } else { + vcpu->arch.guest_supported_xcr0 = + (best->eax | ((u64)best->edx << 32)) & + host_xcr0 & KVM_SUPPORTED_XCR0; + vcpu->arch.guest_xstate_size = + xstate_required_size(vcpu->arch.guest_supported_xcr0); + } + kvm_pmu_cpuid_update(vcpu); } @@ -182,13 +214,35 @@ static bool supported_xcr0_bit(unsigned bit) { u64 mask = ((u64)1 << bit); - return mask & (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) & host_xcr0; + return mask & KVM_SUPPORTED_XCR0 & host_xcr0; } #define F(x) bit(X86_FEATURE_##x) -static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, - u32 index, int *nent, int maxnent) +static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry, + u32 func, u32 index, int *nent, int maxnent) +{ + switch (func) { + case 0: + entry->eax = 1; /* only one leaf currently */ + ++*nent; + break; + case 1: + entry->ecx = F(MOVBE); + ++*nent; + break; + default: + break; + } + + entry->function = func; + entry->index = index; + + return 0; +} + +static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, + u32 index, int *nent, int maxnent) { int r; unsigned f_nx = is_efer_nx() ? F(NX) : 0; @@ -383,6 +437,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, case 0xd: { int idx, i; + entry->eax &= host_xcr0 & KVM_SUPPORTED_XCR0; + entry->edx &= (host_xcr0 & KVM_SUPPORTED_XCR0) >> 32; entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; for (idx = 1, i = 1; idx < 64; ++idx) { if (*nent >= maxnent) @@ -481,6 +537,15 @@ out: return r; } +static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func, + u32 idx, int *nent, int maxnent, unsigned int type) +{ + if (type == KVM_GET_EMULATED_CPUID) + return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent); + + return __do_cpuid_ent(entry, func, idx, nent, maxnent); +} + #undef F struct kvm_cpuid_param { @@ -495,8 +560,36 @@ static bool is_centaur_cpu(const struct kvm_cpuid_param *param) return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR; } -int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries) +static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, + __u32 num_entries, unsigned int ioctl_type) +{ + int i; + __u32 pad[3]; + + if (ioctl_type != KVM_GET_EMULATED_CPUID) + return false; + + /* + * We want to make sure that ->padding is being passed clean from + * userspace in case we want to use it for something in the future. + * + * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we + * have to give ourselves satisfied only with the emulated side. /me + * sheds a tear. + */ + for (i = 0; i < num_entries; i++) { + if (copy_from_user(pad, entries[i].padding, sizeof(pad))) + return true; + + if (pad[0] || pad[1] || pad[2]) + return true; + } + return false; +} + +int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries, + unsigned int type) { struct kvm_cpuid_entry2 *cpuid_entries; int limit, nent = 0, r = -E2BIG, i; @@ -513,8 +606,12 @@ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, goto out; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) cpuid->nent = KVM_MAX_CPUID_ENTRIES; + + if (sanity_check_entries(entries, cpuid->nent, type)) + return -EINVAL; + r = -ENOMEM; - cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); + cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); if (!cpuid_entries) goto out; @@ -526,7 +623,7 @@ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, continue; r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx, - &nent, cpuid->nent); + &nent, cpuid->nent, type); if (r) goto out_free; @@ -537,7 +634,7 @@ int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, limit = cpuid_entries[nent - 1].eax; for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func) r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx, - &nent, cpuid->nent); + &nent, cpuid->nent, type); if (r) goto out_free; @@ -661,6 +758,7 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) *edx = best->edx; } else *eax = *ebx = *ecx = *edx = 0; + trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx); } EXPORT_SYMBOL_GPL(kvm_cpuid); @@ -676,6 +774,5 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); kvm_register_write(vcpu, VCPU_REGS_RDX, edx); kvm_x86_ops->skip_emulated_instruction(vcpu); - trace_kvm_cpuid(function, eax, ebx, ecx, edx); } EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index b7fd0798488..f1e4895174b 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -6,8 +6,9 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function, u32 index); -int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, - struct kvm_cpuid_entry2 __user *entries); +int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries, + unsigned int type); int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid *cpuid, struct kvm_cpuid_entry __user *entries); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index ddc3f3d2afd..07ffca0a89e 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -130,7 +130,7 @@ #define Mov (1<<20) /* Misc flags */ #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ -#define VendorSpecific (1<<22) /* Vendor specific instruction */ +#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */ #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ #define Undefined (1<<25) /* No Such Instruction */ @@ -785,9 +785,10 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, * @highbyte_regs specifies whether to decode AH,CH,DH,BH. */ static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, - int highbyte_regs) + int byteop) { void *p; + int highbyte_regs = (ctxt->rex_prefix == 0) && byteop; if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; @@ -1024,7 +1025,6 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op) { unsigned reg = ctxt->modrm_reg; - int highbyte_regs = ctxt->rex_prefix == 0; if (!(ctxt->d & ModRM)) reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3); @@ -1045,13 +1045,9 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt, } op->type = OP_REG; - if (ctxt->d & ByteOp) { - op->addr.reg = decode_register(ctxt, reg, highbyte_regs); - op->bytes = 1; - } else { - op->addr.reg = decode_register(ctxt, reg, 0); - op->bytes = ctxt->op_bytes; - } + op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; + op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp); + fetch_register_operand(op); op->orig_val = op->val; } @@ -1082,12 +1078,10 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, ctxt->modrm_seg = VCPU_SREG_DS; if (ctxt->modrm_mod == 3) { - int highbyte_regs = ctxt->rex_prefix == 0; - op->type = OP_REG; op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, - highbyte_regs && (ctxt->d & ByteOp)); + ctxt->d & ByteOp); if (ctxt->d & Sse) { op->type = OP_XMM; op->bytes = 16; @@ -2961,6 +2955,46 @@ static int em_mov(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +#define FFL(x) bit(X86_FEATURE_##x) + +static int em_movbe(struct x86_emulate_ctxt *ctxt) +{ + u32 ebx, ecx, edx, eax = 1; + u16 tmp; + + /* + * Check MOVBE is set in the guest-visible CPUID leaf. + */ + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); + if (!(ecx & FFL(MOVBE))) + return emulate_ud(ctxt); + + switch (ctxt->op_bytes) { + case 2: + /* + * From MOVBE definition: "...When the operand size is 16 bits, + * the upper word of the destination register remains unchanged + * ..." + * + * Both casting ->valptr and ->val to u16 breaks strict aliasing + * rules so we have to do the operation almost per hand. + */ + tmp = (u16)ctxt->src.val; + ctxt->dst.val &= ~0xffffUL; + ctxt->dst.val |= (unsigned long)swab16(tmp); + break; + case 4: + ctxt->dst.val = swab32((u32)ctxt->src.val); + break; + case 8: + ctxt->dst.val = swab64(ctxt->src.val); + break; + default: + return X86EMUL_PROPAGATE_FAULT; + } + return X86EMUL_CONTINUE; +} + static int em_cr_write(struct x86_emulate_ctxt *ctxt) { if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) @@ -3256,6 +3290,18 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_sahf(struct x86_emulate_ctxt *ctxt) +{ + u32 flags; + + flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; + flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; + + ctxt->eflags &= ~0xffUL; + ctxt->eflags |= flags | X86_EFLAGS_FIXED; + return X86EMUL_CONTINUE; +} + static int em_lahf(struct x86_emulate_ctxt *ctxt) { *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; @@ -3502,7 +3548,7 @@ static const struct opcode group7_rm1[] = { static const struct opcode group7_rm3[] = { DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), - II(SrcNone | Prot | VendorSpecific, em_vmmcall, vmmcall), + II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), DIP(SrcNone | Prot | Priv, stgi, check_svme), @@ -3587,7 +3633,7 @@ static const struct group_dual group7 = { { II(SrcMem16 | Mov | Priv, em_lmsw, lmsw), II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg), }, { - I(SrcNone | Priv | VendorSpecific, em_vmcall), + I(SrcNone | Priv | EmulateOnUD, em_vmcall), EXT(0, group7_rm1), N, EXT(0, group7_rm3), II(SrcNone | DstMem | Mov, em_smsw, smsw), N, @@ -3750,7 +3796,8 @@ static const struct opcode opcode_table[256] = { D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), I(SrcImmFAddr | No64, em_call_far), N, II(ImplicitOps | Stack, em_pushf, pushf), - II(ImplicitOps | Stack, em_popf, popf), N, I(ImplicitOps, em_lahf), + II(ImplicitOps | Stack, em_popf, popf), + I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf), /* 0xA0 - 0xA7 */ I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), @@ -3810,7 +3857,7 @@ static const struct opcode opcode_table[256] = { static const struct opcode twobyte_table[256] = { /* 0x00 - 0x0F */ G(0, group6), GD(0, &group7), N, N, - N, I(ImplicitOps | VendorSpecific, em_syscall), + N, I(ImplicitOps | EmulateOnUD, em_syscall), II(ImplicitOps | Priv, em_clts, clts), N, DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, N, D(ImplicitOps | ModRM), N, N, @@ -3830,8 +3877,8 @@ static const struct opcode twobyte_table[256] = { IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), II(ImplicitOps | Priv, em_rdmsr, rdmsr), IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc), - I(ImplicitOps | VendorSpecific, em_sysenter), - I(ImplicitOps | Priv | VendorSpecific, em_sysexit), + I(ImplicitOps | EmulateOnUD, em_sysenter), + I(ImplicitOps | Priv | EmulateOnUD, em_sysexit), N, N, N, N, N, N, N, N, N, N, /* 0x40 - 0x4F */ @@ -3892,6 +3939,30 @@ static const struct opcode twobyte_table[256] = { N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N }; +static const struct gprefix three_byte_0f_38_f0 = { + I(DstReg | SrcMem | Mov, em_movbe), N, N, N +}; + +static const struct gprefix three_byte_0f_38_f1 = { + I(DstMem | SrcReg | Mov, em_movbe), N, N, N +}; + +/* + * Insns below are selected by the prefix which indexed by the third opcode + * byte. + */ +static const struct opcode opcode_map_0f_38[256] = { + /* 0x00 - 0x7f */ + X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), + /* 0x80 - 0xef */ + X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), + /* 0xf0 - 0xf1 */ + GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0), + GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1), + /* 0xf2 - 0xff */ + N, N, X4(N), X8(N) +}; + #undef D #undef N #undef G @@ -4040,7 +4111,8 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, case OpMem8: ctxt->memop.bytes = 1; if (ctxt->memop.type == OP_REG) { - ctxt->memop.addr.reg = decode_register(ctxt, ctxt->modrm_rm, 1); + ctxt->memop.addr.reg = decode_register(ctxt, + ctxt->modrm_rm, true); fetch_register_operand(&ctxt->memop); } goto mem_common; @@ -4126,6 +4198,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ctxt->_eip = ctxt->eip; ctxt->fetch.start = ctxt->_eip; ctxt->fetch.end = ctxt->fetch.start + insn_len; + ctxt->opcode_len = 1; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); @@ -4208,9 +4281,16 @@ done_prefixes: opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ if (ctxt->b == 0x0f) { - ctxt->twobyte = 1; + ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; + + /* 0F_38 opcode map */ + if (ctxt->b == 0x38) { + ctxt->opcode_len = 3; + ctxt->b = insn_fetch(u8, ctxt); + opcode = opcode_map_0f_38[ctxt->b]; + } } ctxt->d = opcode.flags; @@ -4267,7 +4347,7 @@ done_prefixes: if (ctxt->d == 0 || (ctxt->d & NotImpl)) return EMULATION_FAILED; - if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn) + if (!(ctxt->d & EmulateOnUD) && ctxt->ud) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) @@ -4540,8 +4620,10 @@ special_insn: goto writeback; } - if (ctxt->twobyte) + if (ctxt->opcode_len == 2) goto twobyte_insn; + else if (ctxt->opcode_len == 3) + goto threebyte_insn; switch (ctxt->b) { case 0x63: /* movsxd */ @@ -4726,6 +4808,8 @@ twobyte_insn: goto cannot_emulate; } +threebyte_insn: + if (rc != X86EMUL_CONTINUE) goto done; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index dce0df8150d..40772ef0f2b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2570,11 +2570,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, kvm_release_pfn_clean(pfn); } -static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) -{ - mmu_free_roots(vcpu); -} - static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) { @@ -3424,18 +3419,11 @@ out_unlock: return 0; } -static void nonpaging_free(struct kvm_vcpu *vcpu) -{ - mmu_free_roots(vcpu); -} - -static int nonpaging_init_context(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) +static void nonpaging_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) { - context->new_cr3 = nonpaging_new_cr3; context->page_fault = nonpaging_page_fault; context->gva_to_gpa = nonpaging_gva_to_gpa; - context->free = nonpaging_free; context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; context->update_pte = nonpaging_update_pte; @@ -3444,7 +3432,6 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu, context->root_hpa = INVALID_PAGE; context->direct_map = true; context->nx = false; - return 0; } void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) @@ -3454,9 +3441,8 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb); -static void paging_new_cr3(struct kvm_vcpu *vcpu) +void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu) { - pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu)); mmu_free_roots(vcpu); } @@ -3471,11 +3457,6 @@ static void inject_page_fault(struct kvm_vcpu *vcpu, vcpu->arch.mmu.inject_page_fault(vcpu, fault); } -static void paging_free(struct kvm_vcpu *vcpu) -{ - nonpaging_free(vcpu); -} - static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, unsigned access, int *nr_present) { @@ -3665,9 +3646,9 @@ static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) mmu->last_pte_bitmap = map; } -static int paging64_init_context_common(struct kvm_vcpu *vcpu, - struct kvm_mmu *context, - int level) +static void paging64_init_context_common(struct kvm_vcpu *vcpu, + struct kvm_mmu *context, + int level) { context->nx = is_nx(vcpu); context->root_level = level; @@ -3677,27 +3658,24 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, update_last_pte_bitmap(vcpu, context); ASSERT(is_pae(vcpu)); - context->new_cr3 = paging_new_cr3; context->page_fault = paging64_page_fault; context->gva_to_gpa = paging64_gva_to_gpa; context->sync_page = paging64_sync_page; context->invlpg = paging64_invlpg; context->update_pte = paging64_update_pte; - context->free = paging_free; context->shadow_root_level = level; context->root_hpa = INVALID_PAGE; context->direct_map = false; - return 0; } -static int paging64_init_context(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) +static void paging64_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) { - return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); + paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); } -static int paging32_init_context(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) +static void paging32_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) { context->nx = false; context->root_level = PT32_ROOT_LEVEL; @@ -3706,33 +3684,28 @@ static int paging32_init_context(struct kvm_vcpu *vcpu, update_permission_bitmask(vcpu, context, false); update_last_pte_bitmap(vcpu, context); - context->new_cr3 = paging_new_cr3; context->page_fault = paging32_page_fault; context->gva_to_gpa = paging32_gva_to_gpa; - context->free = paging_free; context->sync_page = paging32_sync_page; context->invlpg = paging32_invlpg; context->update_pte = paging32_update_pte; context->shadow_root_level = PT32E_ROOT_LEVEL; context->root_hpa = INVALID_PAGE; context->direct_map = false; - return 0; } -static int paging32E_init_context(struct kvm_vcpu *vcpu, - struct kvm_mmu *context) +static void paging32E_init_context(struct kvm_vcpu *vcpu, + struct kvm_mmu *context) { - return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); + paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); } -static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) +static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu *context = vcpu->arch.walk_mmu; context->base_role.word = 0; - context->new_cr3 = nonpaging_new_cr3; context->page_fault = tdp_page_fault; - context->free = nonpaging_free; context->sync_page = nonpaging_sync_page; context->invlpg = nonpaging_invlpg; context->update_pte = nonpaging_update_pte; @@ -3767,37 +3740,32 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) update_permission_bitmask(vcpu, context, false); update_last_pte_bitmap(vcpu, context); - - return 0; } -int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) +void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) { - int r; bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); ASSERT(vcpu); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); if (!is_paging(vcpu)) - r = nonpaging_init_context(vcpu, context); + nonpaging_init_context(vcpu, context); else if (is_long_mode(vcpu)) - r = paging64_init_context(vcpu, context); + paging64_init_context(vcpu, context); else if (is_pae(vcpu)) - r = paging32E_init_context(vcpu, context); + paging32E_init_context(vcpu, context); else - r = paging32_init_context(vcpu, context); + paging32_init_context(vcpu, context); vcpu->arch.mmu.base_role.nxe = is_nx(vcpu); vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); vcpu->arch.mmu.base_role.smep_andnot_wp = smep && !is_write_protection(vcpu); - - return r; } EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); -int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly) { ASSERT(vcpu); @@ -3806,37 +3774,30 @@ int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, context->shadow_root_level = kvm_x86_ops->get_tdp_level(); context->nx = true; - context->new_cr3 = paging_new_cr3; context->page_fault = ept_page_fault; context->gva_to_gpa = ept_gva_to_gpa; context->sync_page = ept_sync_page; context->invlpg = ept_invlpg; context->update_pte = ept_update_pte; - context->free = paging_free; context->root_level = context->shadow_root_level; context->root_hpa = INVALID_PAGE; context->direct_map = false; update_permission_bitmask(vcpu, context, true); reset_rsvds_bits_mask_ept(vcpu, context, execonly); - - return 0; } EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); -static int init_kvm_softmmu(struct kvm_vcpu *vcpu) +static void init_kvm_softmmu(struct kvm_vcpu *vcpu) { - int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); - + kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3; vcpu->arch.walk_mmu->get_cr3 = get_cr3; vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read; vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; - - return r; } -static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) +static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) { struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; @@ -3873,11 +3834,9 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) update_permission_bitmask(vcpu, g_context, false); update_last_pte_bitmap(vcpu, g_context); - - return 0; } -static int init_kvm_mmu(struct kvm_vcpu *vcpu) +static void init_kvm_mmu(struct kvm_vcpu *vcpu) { if (mmu_is_nested(vcpu)) return init_kvm_nested_mmu(vcpu); @@ -3887,18 +3846,12 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu) return init_kvm_softmmu(vcpu); } -static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) +void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) { ASSERT(vcpu); - if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) - /* mmu.free() should set root_hpa = INVALID_PAGE */ - vcpu->arch.mmu.free(vcpu); -} -int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) -{ - destroy_kvm_mmu(vcpu); - return init_kvm_mmu(vcpu); + kvm_mmu_unload(vcpu); + init_kvm_mmu(vcpu); } EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); @@ -3923,6 +3876,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load); void kvm_mmu_unload(struct kvm_vcpu *vcpu) { mmu_free_roots(vcpu); + WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); } EXPORT_SYMBOL_GPL(kvm_mmu_unload); @@ -4281,12 +4235,12 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) return alloc_mmu_pages(vcpu); } -int kvm_mmu_setup(struct kvm_vcpu *vcpu) +void kvm_mmu_setup(struct kvm_vcpu *vcpu) { ASSERT(vcpu); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); - return init_kvm_mmu(vcpu); + init_kvm_mmu(vcpu); } void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) @@ -4428,7 +4382,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) int nr_to_scan = sc->nr_to_scan; unsigned long freed = 0; - raw_spin_lock(&kvm_lock); + spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { int idx; @@ -4478,9 +4432,8 @@ unlock: break; } - raw_spin_unlock(&kvm_lock); + spin_unlock(&kvm_lock); return freed; - } static unsigned long @@ -4574,7 +4527,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) { ASSERT(vcpu); - destroy_kvm_mmu(vcpu); + kvm_mmu_unload(vcpu); free_mmu_pages(vcpu); mmu_free_memory_caches(vcpu); } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 77e044a0f5f..29261527435 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -70,8 +70,8 @@ enum { }; int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); -int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); -int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, +void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); +void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly); static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c0bc80391e4..c7168a5cff1 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1959,11 +1959,9 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, nested_svm_vmexit(svm); } -static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) +static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) { - int r; - - r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu); + kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu); vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3; vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3; @@ -1971,8 +1969,6 @@ static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit; vcpu->arch.mmu.shadow_root_level = get_npt_level(); vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; - - return r; } static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2b2fce1b200..b2fe1c252f3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1498,7 +1498,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, break; if (i == NR_AUTOLOAD_MSRS) { - printk_once(KERN_WARNING"Not enough mst switch entries. " + printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; } else if (i == m->nr) { @@ -1898,16 +1898,12 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) /* * KVM wants to inject page-faults which it got to the guest. This function * checks whether in a nested guest, we need to inject them to L1 or L2. - * This function assumes it is called with the exit reason in vmcs02 being - * a #PF exception (this is the only case in which KVM injects a #PF when L2 - * is running). */ -static int nested_pf_handled(struct kvm_vcpu *vcpu) +static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) { struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ - if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR))) + if (!(vmcs12->exception_bitmap & (1u << nr))) return 0; nested_vmx_vmexit(vcpu); @@ -1921,8 +1917,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, struct vcpu_vmx *vmx = to_vmx(vcpu); u32 intr_info = nr | INTR_INFO_VALID_MASK; - if (nr == PF_VECTOR && is_guest_mode(vcpu) && - !vmx->nested.nested_run_pending && nested_pf_handled(vcpu)) + if (!reinject && is_guest_mode(vcpu) && + nested_vmx_check_exception(vcpu, nr)) return; if (has_error_code) { @@ -2204,9 +2200,15 @@ static __init void nested_vmx_setup_ctls_msrs(void) #ifdef CONFIG_X86_64 VM_EXIT_HOST_ADDR_SPACE_SIZE | #endif - VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; + VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT | + VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; + if (!(nested_vmx_pinbased_ctls_high & PIN_BASED_VMX_PREEMPTION_TIMER) || + !(nested_vmx_exit_ctls_high & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)) { + nested_vmx_exit_ctls_high &= ~VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; + nested_vmx_pinbased_ctls_high &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + } nested_vmx_exit_ctls_high |= (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | - VM_EXIT_LOAD_IA32_EFER); + VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER); /* entry controls */ rdmsr(MSR_IA32_VMX_ENTRY_CTLS, @@ -2226,7 +2228,8 @@ static __init void nested_vmx_setup_ctls_msrs(void) nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); nested_vmx_procbased_ctls_low = 0; nested_vmx_procbased_ctls_high &= - CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING | + CPU_BASED_VIRTUAL_INTR_PENDING | + CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING | @@ -2252,13 +2255,15 @@ static __init void nested_vmx_setup_ctls_msrs(void) nested_vmx_secondary_ctls_low = 0; nested_vmx_secondary_ctls_high &= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_UNRESTRICTED_GUEST | SECONDARY_EXEC_WBINVD_EXITING; if (enable_ept) { /* nested EPT: emulate EPT also to L1 */ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT; nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT | - VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT; + VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT | + VMX_EPT_INVEPT_BIT; nested_vmx_ept_caps &= vmx_capability.ept; /* * Since invept is completely emulated we support both global @@ -3380,8 +3385,10 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) if (enable_ept) { eptp = construct_eptp(cr3); vmcs_write64(EPT_POINTER, eptp); - guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) : - vcpu->kvm->arch.ept_identity_map_addr; + if (is_paging(vcpu) || is_guest_mode(vcpu)) + guest_cr3 = kvm_read_cr3(vcpu); + else + guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr; ept_load_pdptrs(vcpu); } @@ -4879,6 +4886,17 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) hypercall[2] = 0xc1; } +static bool nested_cr0_valid(struct vmcs12 *vmcs12, unsigned long val) +{ + unsigned long always_on = VMXON_CR0_ALWAYSON; + + if (nested_vmx_secondary_ctls_high & + SECONDARY_EXEC_UNRESTRICTED_GUEST && + nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) + always_on &= ~(X86_CR0_PE | X86_CR0_PG); + return (val & always_on) == always_on; +} + /* called to set cr0 as appropriate for a mov-to-cr0 exit. */ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) { @@ -4897,9 +4915,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) val = (val & ~vmcs12->cr0_guest_host_mask) | (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); - /* TODO: will have to take unrestricted guest mode into - * account */ - if ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) + if (!nested_cr0_valid(vmcs12, val)) return 1; if (kvm_set_cr0(vcpu, val)) @@ -6627,6 +6643,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) return 0; else if (is_page_fault(intr_info)) return enable_ept; + else if (is_no_device(intr_info) && + !(nested_read_cr0(vmcs12) & X86_CR0_TS)) + return 0; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); case EXIT_REASON_EXTERNAL_INTERRUPT: @@ -6722,6 +6741,27 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) *info2 = vmcs_read32(VM_EXIT_INTR_INFO); } +static void nested_adjust_preemption_timer(struct kvm_vcpu *vcpu) +{ + u64 delta_tsc_l1; + u32 preempt_val_l1, preempt_val_l2, preempt_scale; + + if (!(get_vmcs12(vcpu)->pin_based_vm_exec_control & + PIN_BASED_VMX_PREEMPTION_TIMER)) + return; + preempt_scale = native_read_msr(MSR_IA32_VMX_MISC) & + MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE; + preempt_val_l2 = vmcs_read32(VMX_PREEMPTION_TIMER_VALUE); + delta_tsc_l1 = vmx_read_l1_tsc(vcpu, native_read_tsc()) + - vcpu->arch.last_guest_tsc; + preempt_val_l1 = delta_tsc_l1 >> preempt_scale; + if (preempt_val_l2 <= preempt_val_l1) + preempt_val_l2 = 0; + else + preempt_val_l2 -= preempt_val_l1; + vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, preempt_val_l2); +} + /* * The guest has exited. See if we can fix it or if we need userspace * assistance. @@ -6736,20 +6776,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) if (vmx->emulation_required) return handle_invalid_guest_state(vcpu); - /* - * the KVM_REQ_EVENT optimization bit is only on for one entry, and if - * we did not inject a still-pending event to L1 now because of - * nested_run_pending, we need to re-enable this bit. - */ - if (vmx->nested.nested_run_pending) - kvm_make_request(KVM_REQ_EVENT, vcpu); - - if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH || - exit_reason == EXIT_REASON_VMRESUME)) - vmx->nested.nested_run_pending = 1; - else - vmx->nested.nested_run_pending = 0; - if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { nested_vmx_vmexit(vcpu); return 1; @@ -7061,9 +7087,9 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); - kvm_queue_exception_e(vcpu, vector, err); + kvm_requeue_exception_e(vcpu, vector, err); } else - kvm_queue_exception(vcpu, vector); + kvm_requeue_exception(vcpu, vector); break; case INTR_TYPE_SOFT_INTR: vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); @@ -7146,6 +7172,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) atomic_switch_perf_msrs(vmx); debugctlmsr = get_debugctlmsr(); + if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) + nested_adjust_preemption_timer(vcpu); vmx->__launched = vmx->loaded_vmcs->launched; asm( /* Store host registers */ @@ -7284,6 +7312,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); + /* + * the KVM_REQ_EVENT optimization bit is only on for one entry, and if + * we did not inject a still-pending event to L1 now because of + * nested_run_pending, we need to re-enable this bit. + */ + if (vmx->nested.nested_run_pending) + kvm_make_request(KVM_REQ_EVENT, vcpu); + + vmx->nested.nested_run_pending = 0; + vmx_complete_atomic_exit(vmx); vmx_recover_nmi_blocking(vmx); vmx_complete_interrupts(vmx); @@ -7410,8 +7448,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) */ if (is_mmio) ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; - else if (vcpu->kvm->arch.iommu_domain && - !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)) + else if (kvm_arch_has_noncoherent_dma(vcpu->kvm)) ret = kvm_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT; else @@ -7501,9 +7538,9 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) return get_vmcs12(vcpu)->ept_pointer; } -static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) +static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) { - int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu, + kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu, nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT); vcpu->arch.mmu.set_cr3 = vmx_set_cr3; @@ -7511,8 +7548,6 @@ static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; - - return r; } static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) @@ -7520,6 +7555,20 @@ static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) vcpu->arch.walk_mmu = &vcpu->arch.mmu; } +static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, + struct x86_exception *fault) +{ + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); + + WARN_ON(!is_guest_mode(vcpu)); + + /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ + if (vmcs12->exception_bitmap & (1u << PF_VECTOR)) + nested_vmx_vmexit(vcpu); + else + kvm_inject_page_fault(vcpu, fault); +} + /* * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it @@ -7533,6 +7582,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control; + u32 exit_control; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); @@ -7706,7 +7756,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER * bits are further modified by vmx_set_efer() below. */ - vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); + exit_control = vmcs_config.vmexit_ctrl; + if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER) + exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; + vmcs_write32(VM_EXIT_CONTROLS, exit_control); /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are * emulated by vmx_set_efer(), below. @@ -7773,6 +7826,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) kvm_set_cr3(vcpu, vmcs12->guest_cr3); kvm_mmu_reset_context(vcpu); + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; + /* * L1 may access the L2's PDPTR, so save them to construct vmcs12 */ @@ -7876,7 +7932,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) return 1; } - if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || + if (!nested_cr0_valid(vmcs12, vmcs12->guest_cr0) || ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { nested_vmx_entry_failure(vcpu, vmcs12, EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); @@ -7938,6 +7994,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) enter_guest_mode(vcpu); + vmx->nested.nested_run_pending = 1; + vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); cpu = get_cpu(); @@ -8005,7 +8063,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, u32 idt_vectoring; unsigned int nr; - if (vcpu->arch.exception.pending) { + if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) { nr = vcpu->arch.exception.nr; idt_vectoring = nr | VECTORING_INFO_VALID_MASK; @@ -8023,7 +8081,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, } vmcs12->idt_vectoring_info_field = idt_vectoring; - } else if (vcpu->arch.nmi_pending) { + } else if (vcpu->arch.nmi_injected) { vmcs12->idt_vectoring_info_field = INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; } else if (vcpu->arch.interrupt.pending) { @@ -8105,6 +8163,11 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmcs12->guest_pending_dbg_exceptions = vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); + if ((vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER) && + (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)) + vmcs12->vmx_preemption_timer_value = + vmcs_read32(VMX_PREEMPTION_TIMER_VALUE); + /* * In some cases (usually, nested EPT), L2 is allowed to change its * own CR3 without exiting. If it has changed it, we must keep it. @@ -8130,6 +8193,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); + if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) + vmcs12->guest_ia32_efer = vcpu->arch.efer; vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); @@ -8201,7 +8266,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, * fpu_active (which may have changed). * Note that vmx_set_cr0 refers to efer set above. */ - kvm_set_cr0(vcpu, vmcs12->host_cr0); + vmx_set_cr0(vcpu, vmcs12->host_cr0); /* * If we did fpu_activate()/fpu_deactivate() during L2's run, we need * to apply the same changes to L1's vmcs. We just set cr0 correctly, @@ -8224,6 +8289,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, kvm_set_cr3(vcpu, vmcs12->host_cr3); kvm_mmu_reset_context(vcpu); + if (!enable_ept) + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; + if (enable_vpid) { /* * Trivially support vpid by letting L2s share their parent diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e5ca72a5cdb..21ef1ba184a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -577,6 +577,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0; + u64 valid_bits; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) @@ -586,8 +587,16 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; - if (xcr0 & ~host_xcr0) + + /* + * Do not allow the guest to set bits that we do not support + * saving. However, xcr0 bit 0 is always set, even if the + * emulated CPU does not support XSAVE (see fx_init). + */ + valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; + if (xcr0 & ~valid_bits) return 1; + kvm_put_guest_xcr0(vcpu); vcpu->arch.xcr0 = xcr0; return 0; @@ -684,7 +693,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); - vcpu->arch.mmu.new_cr3(vcpu); + kvm_mmu_new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); @@ -2564,6 +2573,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: + case KVM_CAP_EXT_EMUL_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: @@ -2673,15 +2683,17 @@ long kvm_arch_dev_ioctl(struct file *filp, r = 0; break; } - case KVM_GET_SUPPORTED_CPUID: { + case KVM_GET_SUPPORTED_CPUID: + case KVM_GET_EMULATED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; - r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, - cpuid_arg->entries); + + r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, + ioctl); if (r) goto out; @@ -2715,8 +2727,7 @@ static void wbinvd_ipi(void *garbage) static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { - return vcpu->kvm->arch.iommu_domain && - !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY); + return kvm_arch_has_noncoherent_dma(vcpu->kvm); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) @@ -2984,11 +2995,13 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { - if (cpu_has_xsave) + if (cpu_has_xsave) { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->xsave, - xstate_size); - else { + vcpu->arch.guest_xstate_size); + *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= + vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; + } else { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct i387_fxsave_struct)); @@ -3003,10 +3016,19 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; - if (cpu_has_xsave) + if (cpu_has_xsave) { + /* + * Here we allow setting states that are not present in + * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility + * with old userspace. + */ + if (xstate_bv & ~KVM_SUPPORTED_XCR0) + return -EINVAL; + if (xstate_bv & ~host_xcr0) + return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->xsave, - guest_xsave->region, xstate_size); - else { + guest_xsave->region, vcpu->arch.guest_xstate_size); + } else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, @@ -3042,9 +3064,9 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ - if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) { + if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, - guest_xcrs->xcrs[0].value); + guest_xcrs->xcrs[i].value); break; } if (r) @@ -4775,8 +4797,8 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) static void init_decode_cache(struct x86_emulate_ctxt *ctxt) { - memset(&ctxt->twobyte, 0, - (void *)&ctxt->_regs - (void *)&ctxt->twobyte); + memset(&ctxt->opcode_len, 0, + (void *)&ctxt->_regs - (void *)&ctxt->opcode_len); ctxt->fetch.start = 0; ctxt->fetch.end = 0; @@ -5094,8 +5116,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, ctxt->have_exception = false; ctxt->perm_ok = false; - ctxt->only_vendor_specific_insn - = emulation_type & EMULTYPE_TRAP_UD; + ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); @@ -5263,7 +5284,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); - raw_spin_lock(&kvm_lock); + spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) @@ -5273,7 +5294,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va send_ipi = 1; } } - raw_spin_unlock(&kvm_lock); + spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* @@ -5426,12 +5447,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work) struct kvm_vcpu *vcpu; int i; - raw_spin_lock(&kvm_lock); + spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); atomic_set(&kvm_guest_has_master_clock, 0); - raw_spin_unlock(&kvm_lock); + spin_unlock(&kvm_lock); } static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); @@ -5945,10 +5966,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) vcpu->mode = IN_GUEST_MODE; + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); + /* We should set ->mode before check ->requests, * see the comment in make_all_cpus_request. */ - smp_mb(); + smp_mb__after_srcu_read_unlock(); local_irq_disable(); @@ -5958,12 +5981,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) smp_wmb(); local_irq_enable(); preempt_enable(); + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = 1; goto cancel_injection; } - srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); - if (req_immediate_exit) smp_send_reschedule(vcpu->cpu); @@ -6688,7 +6710,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) if (r) return r; kvm_vcpu_reset(vcpu); - r = kvm_mmu_setup(vcpu); + kvm_mmu_setup(vcpu); vcpu_put(vcpu); return r; @@ -6940,6 +6962,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) vcpu->arch.ia32_tsc_adjust_msr = 0x0; vcpu->arch.pv_time_enabled = false; + + vcpu->arch.guest_supported_xcr0 = 0; + vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; + kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); @@ -6981,6 +7007,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); + atomic_set(&kvm->arch.noncoherent_dma_count, 0); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); @@ -7065,7 +7092,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); } -void kvm_arch_free_memslot(struct kvm_memory_slot *free, +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { int i; @@ -7086,7 +7113,8 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free, } } -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) { int i; @@ -7283,7 +7311,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) int r; if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || - is_error_page(work->page)) + work->wakeup_all) return; r = kvm_mmu_reload(vcpu); @@ -7393,7 +7421,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct x86_exception fault; trace_kvm_async_pf_ready(work->arch.token, work->gva); - if (is_error_page(work->page)) + if (work->wakeup_all) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); @@ -7420,6 +7448,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) kvm_x86_ops->interrupt_allowed(vcpu); } +void kvm_arch_register_noncoherent_dma(struct kvm *kvm) +{ + atomic_inc(&kvm->arch.noncoherent_dma_count); +} +EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); + +void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) +{ + atomic_dec(&kvm->arch.noncoherent_dma_count); +} +EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); + +bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) +{ + return atomic_read(&kvm->arch.noncoherent_dma_count); +} +EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index e224f7a671b..587fb9ede43 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -122,6 +122,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); +#define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) extern u64 host_xcr0; extern struct static_key kvm_no_apic_vcpu; diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c index a6b1b86d225..518532e6a3f 100644 --- a/arch/x86/lib/msr-smp.c +++ b/arch/x86/lib/msr-smp.c @@ -47,6 +47,21 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) } EXPORT_SYMBOL(rdmsr_on_cpu); +int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +{ + int err; + struct msr_info rv; + + memset(&rv, 0, sizeof(rv)); + + rv.msr_no = msr_no; + err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); + *q = rv.reg.q; + + return err; +} +EXPORT_SYMBOL(rdmsrl_on_cpu); + int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) { int err; @@ -63,6 +78,22 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) } EXPORT_SYMBOL(wrmsr_on_cpu); +int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +{ + int err; + struct msr_info rv; + + memset(&rv, 0, sizeof(rv)); + + rv.msr_no = msr_no; + rv.reg.q = q; + + err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); + + return err; +} +EXPORT_SYMBOL(wrmsrl_on_cpu); + static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs, void (*msr_func) (void *info)) @@ -159,6 +190,37 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) } EXPORT_SYMBOL(wrmsr_safe_on_cpu); +int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) +{ + int err; + struct msr_info rv; + + memset(&rv, 0, sizeof(rv)); + + rv.msr_no = msr_no; + rv.reg.q = q; + + err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); + + return err ? err : rv.err; +} +EXPORT_SYMBOL(wrmsrl_safe_on_cpu); + +int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) +{ + int err; + struct msr_info rv; + + memset(&rv, 0, sizeof(rv)); + + rv.msr_no = msr_no; + err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); + *q = rv.reg.q; + + return err ? err : rv.err; +} +EXPORT_SYMBOL(rdmsrl_safe_on_cpu); + /* * These variants are significantly slower, but allows control over * the entire 32-bit GPR set. diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 23d8e5fecf7..6a19ad9f370 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -6,6 +6,8 @@ nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_physaddr.o := $(nostackp) CFLAGS_setup_nx.o := $(nostackp) +CFLAGS_fault.o := -I$(src)/../include/asm/trace + obj-$(CONFIG_X86_PAT) += pat_rbtree.o obj-$(CONFIG_SMP) += tlb.o diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7a517bb4106..9ff85bb8dd6 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -20,6 +20,9 @@ #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ #include <asm/fixmap.h> /* VSYSCALL_START */ +#define CREATE_TRACE_POINTS +#include <asm/trace/exceptions.h> + /* * Page fault error code bits: * @@ -596,7 +599,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, printk(KERN_CONT " at %p\n", (void *) address); printk(KERN_ALERT "IP:"); - printk_address(regs->ip, 1); + printk_address(regs->ip); dump_pagetable(address); } @@ -1232,3 +1235,23 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) __do_page_fault(regs, error_code); exception_exit(prev_state); } + +static void trace_page_fault_entries(struct pt_regs *regs, + unsigned long error_code) +{ + if (user_mode(regs)) + trace_page_fault_user(read_cr2(), regs, error_code); + else + trace_page_fault_kernel(read_cr2(), regs, error_code); +} + +dotraplinkage void __kprobes +trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) +{ + enum ctx_state prev_state; + + prev_state = exception_enter(); + trace_page_fault_entries(regs, error_code); + __do_page_fault(regs, error_code); + exception_exit(prev_state); +} diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index ce32017c5e3..f9713061811 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -53,12 +53,12 @@ __ref void *alloc_low_pages(unsigned int num) if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { unsigned long ret; if (min_pfn_mapped >= max_pfn_mapped) - panic("alloc_low_page: ran out of memory"); + panic("alloc_low_pages: ran out of memory"); ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, max_pfn_mapped << PAGE_SHIFT, PAGE_SIZE * num , PAGE_SIZE); if (!ret) - panic("alloc_low_page: can not alloc memory"); + panic("alloc_low_pages: can not alloc memory"); memblock_reserve(ret, PAGE_SIZE * num); pfn = ret >> PAGE_SHIFT; } else { @@ -418,27 +418,27 @@ static unsigned long __init get_new_step_size(unsigned long step_size) return step_size << 5; } -void __init init_mem_mapping(void) +/** + * memory_map_top_down - Map [map_start, map_end) top down + * @map_start: start address of the target memory range + * @map_end: end address of the target memory range + * + * This function will setup direct mapping for memory range + * [map_start, map_end) in top-down. That said, the page tables + * will be allocated at the end of the memory, and we map the + * memory in top-down. + */ +static void __init memory_map_top_down(unsigned long map_start, + unsigned long map_end) { - unsigned long end, real_end, start, last_start; + unsigned long real_end, start, last_start; unsigned long step_size; unsigned long addr; unsigned long mapped_ram_size = 0; unsigned long new_mapped_ram_size; - probe_page_size_mask(); - -#ifdef CONFIG_X86_64 - end = max_pfn << PAGE_SHIFT; -#else - end = max_low_pfn << PAGE_SHIFT; -#endif - - /* the ISA range is always mapped regardless of memory holes */ - init_memory_mapping(0, ISA_END_ADDRESS); - /* xen has big range in reserved near end of ram, skip it at first.*/ - addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE); + addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); real_end = addr + PMD_SIZE; /* step_size need to be small so pgt_buf from BRK could cover it */ @@ -453,13 +453,13 @@ void __init init_mem_mapping(void) * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages * for page table. */ - while (last_start > ISA_END_ADDRESS) { + while (last_start > map_start) { if (last_start > step_size) { start = round_down(last_start - 1, step_size); - if (start < ISA_END_ADDRESS) - start = ISA_END_ADDRESS; + if (start < map_start) + start = map_start; } else - start = ISA_END_ADDRESS; + start = map_start; new_mapped_ram_size = init_range_memory_mapping(start, last_start); last_start = start; @@ -470,8 +470,89 @@ void __init init_mem_mapping(void) mapped_ram_size += new_mapped_ram_size; } - if (real_end < end) - init_range_memory_mapping(real_end, end); + if (real_end < map_end) + init_range_memory_mapping(real_end, map_end); +} + +/** + * memory_map_bottom_up - Map [map_start, map_end) bottom up + * @map_start: start address of the target memory range + * @map_end: end address of the target memory range + * + * This function will setup direct mapping for memory range + * [map_start, map_end) in bottom-up. Since we have limited the + * bottom-up allocation above the kernel, the page tables will + * be allocated just above the kernel and we map the memory + * in [map_start, map_end) in bottom-up. + */ +static void __init memory_map_bottom_up(unsigned long map_start, + unsigned long map_end) +{ + unsigned long next, new_mapped_ram_size, start; + unsigned long mapped_ram_size = 0; + /* step_size need to be small so pgt_buf from BRK could cover it */ + unsigned long step_size = PMD_SIZE; + + start = map_start; + min_pfn_mapped = start >> PAGE_SHIFT; + + /* + * We start from the bottom (@map_start) and go to the top (@map_end). + * The memblock_find_in_range() gets us a block of RAM from the + * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages + * for page table. + */ + while (start < map_end) { + if (map_end - start > step_size) { + next = round_up(start + 1, step_size); + if (next > map_end) + next = map_end; + } else + next = map_end; + + new_mapped_ram_size = init_range_memory_mapping(start, next); + start = next; + + if (new_mapped_ram_size > mapped_ram_size) + step_size = get_new_step_size(step_size); + mapped_ram_size += new_mapped_ram_size; + } +} + +void __init init_mem_mapping(void) +{ + unsigned long end; + + probe_page_size_mask(); + +#ifdef CONFIG_X86_64 + end = max_pfn << PAGE_SHIFT; +#else + end = max_low_pfn << PAGE_SHIFT; +#endif + + /* the ISA range is always mapped regardless of memory holes */ + init_memory_mapping(0, ISA_END_ADDRESS); + + /* + * If the allocation is in bottom-up direction, we setup direct mapping + * in bottom-up, otherwise we setup direct mapping in top-down. + */ + if (memblock_bottom_up()) { + unsigned long kernel_end = __pa_symbol(_end); + + /* + * we need two separate calls here. This is because we want to + * allocate page tables above the kernel. So we first map + * [kernel_end, end) to make memory above the kernel be mapped + * as soon as possible. And then use page tables allocated above + * the kernel to map [ISA_END_ADDRESS, kernel_end). + */ + memory_map_bottom_up(kernel_end, end); + memory_map_bottom_up(ISA_END_ADDRESS, kernel_end); + } else { + memory_map_top_down(ISA_END_ADDRESS, end); + } #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 8bf93bae1f1..24aec58d6af 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -567,6 +567,17 @@ static int __init numa_init(int (*init_func)(void)) ret = init_func(); if (ret < 0) return ret; + + /* + * We reset memblock back to the top-down direction + * here because if we configured ACPI_NUMA, we have + * parsed SRAT in init_func(). It is ok to have the + * reset here even if we did't configure ACPI_NUMA + * or acpi numa init fails and fallbacks to dummy + * numa init. + */ + memblock_set_bottom_up(false); + ret = numa_cleanup_meminfo(&numa_meminfo); if (ret < 0) return ret; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index dfa537a03be..a7cccb6d7fe 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -25,8 +25,12 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte; pte = alloc_pages(__userpte_alloc_gfp, 0); - if (pte) - pgtable_page_ctor(pte); + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } @@ -189,8 +193,10 @@ static void free_pmds(pmd_t *pmds[]) int i; for(i = 0; i < PREALLOCATED_PMDS; i++) - if (pmds[i]) + if (pmds[i]) { + pgtable_pmd_page_dtor(virt_to_page(pmds[i])); free_page((unsigned long)pmds[i]); + } } static int preallocate_pmds(pmd_t *pmds[]) @@ -200,8 +206,13 @@ static int preallocate_pmds(pmd_t *pmds[]) for(i = 0; i < PREALLOCATED_PMDS; i++) { pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); - if (pmd == NULL) + if (!pmd) failed = true; + if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { + free_page((unsigned long)pmds[i]); + pmd = NULL; + failed = true; + } pmds[i] = pmd; } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 516593e1ce3..26328e80086 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -788,5 +788,7 @@ void bpf_jit_free(struct sk_filter *fp) if (fp->bpf_func != sk_run_filter) { INIT_WORK(&fp->work, bpf_jit_free_deferred); schedule_work(&fp->work); + } else { + kfree(fp); } } diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index b30e937689d..7fb24e53d4c 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -354,12 +354,12 @@ static void coalesce_windows(struct pci_root_info *info, unsigned long type) * the kernel resource tree doesn't allow overlaps. */ if (resource_overlaps(res1, res2)) { - res1->start = min(res1->start, res2->start); - res1->end = max(res1->end, res2->end); + res2->start = min(res1->start, res2->start); + res2->end = max(res1->end, res2->end); dev_info(&info->bridge->dev, "host bridge window expanded to %pR; %pR ignored\n", - res1, res2); - res2->flags = 0; + res2, res1); + res1->flags = 0; } } } diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index f5809fa2753..b046e070e08 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -231,7 +231,7 @@ static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int wh offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)]; if ((offset) && (where == offset)) - value = value & 0xfffffffc; + value = value & ~PCI_EXP_LNKCTL_ASPMC; return raw_pci_write(pci_domain_nr(bus), bus->number, devfn, where, size, value); @@ -252,7 +252,7 @@ static struct pci_ops quirk_pcie_aspm_ops = { */ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev) { - int cap_base, i; + int i; struct pci_bus *pbus; struct pci_dev *dev; @@ -278,7 +278,7 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev) for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i) quirk_aspm_offset[i] = 0; - pbus->ops = pbus->parent->ops; + pci_bus_set_ops(pbus, pbus->parent->ops); } else { /* * If devices are attached to the root port at power-up or @@ -286,13 +286,15 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev) * each root port to save the register offsets and replace the * bus ops. */ - list_for_each_entry(dev, &pbus->devices, bus_list) { + list_for_each_entry(dev, &pbus->devices, bus_list) /* There are 0 to 8 devices attached to this bus */ - cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP); - quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = cap_base + 0x10; - } - pbus->ops = &quirk_pcie_aspm_ops; + quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = + dev->pcie_cap + PCI_EXP_LNKCTL; + + pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops); + dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n"); } + } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk); diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 48e8461057b..5eee4959785 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -382,7 +382,14 @@ static void xen_teardown_msi_irq(unsigned int irq) { xen_destroy_irq(irq); } - +static u32 xen_nop_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) +{ + return 0; +} +static u32 xen_nop_msix_mask_irq(struct msi_desc *desc, u32 flag) +{ + return 0; +} #endif int __init pci_xen_init(void) @@ -406,6 +413,8 @@ int __init pci_xen_init(void) x86_msi.setup_msi_irqs = xen_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; + x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; + x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; #endif return 0; } @@ -485,6 +494,8 @@ int __init pci_xen_initial_domain(void) x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; x86_msi.teardown_msi_irq = xen_teardown_msi_irq; x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; + x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; + x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; #endif xen_setup_acpi_sci(); __acpi_register_gsi = acpi_register_gsi_xen; diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c index fef7d0ba7e3..649a12befba 100644 --- a/arch/x86/platform/olpc/olpc-xo15-sci.c +++ b/arch/x86/platform/olpc/olpc-xo15-sci.c @@ -40,16 +40,9 @@ static bool lid_wake_on_close; */ static int set_lid_wake_behavior(bool wake_on_close) { - struct acpi_object_list arg_list; - union acpi_object arg; acpi_status status; - arg_list.count = 1; - arg_list.pointer = &arg; - arg.type = ACPI_TYPE_INTEGER; - arg.integer.value = wake_on_close; - - status = acpi_evaluate_object(NULL, "\\_SB.PCI0.LID.LIDW", &arg_list, NULL); + status = acpi_execute_simple_method(NULL, "\\_SB.PCI0.LID.LIDW", wake_on_close); if (ACPI_FAILURE(status)) { pr_warning(PFX "failed to set lid behavior\n"); return 1; diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index 2e863ad4a77..8eeccba7313 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c @@ -399,7 +399,7 @@ static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs) printk(KERN_DEFAULT "UV: %4d %6d %-32.32s ", cpu, current->pid, current->comm); - printk_address(regs->ip, 1); + printk_address(regs->ip); } /* Dump this cpu's state */ diff --git a/arch/x86/um/elfcore.c b/arch/x86/um/elfcore.c index 6bb49b687c9..7bb89a27a5e 100644 --- a/arch/x86/um/elfcore.c +++ b/arch/x86/um/elfcore.c @@ -11,8 +11,7 @@ Elf32_Half elf_core_extra_phdrs(void) return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0; } -int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, - unsigned long limit) +int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) { if ( vsyscall_ehdr ) { const struct elfhdr *const ehdrp = @@ -32,17 +31,14 @@ int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size, phdr.p_offset += ofs; } phdr.p_paddr = 0; /* match other core phdrs */ - *size += sizeof(phdr); - if (*size > limit - || !dump_write(file, &phdr, sizeof(phdr))) + if (!dump_emit(cprm, &phdr, sizeof(phdr))) return 0; } } return 1; } -int elf_core_write_extra_data(struct file *file, size_t *size, - unsigned long limit) +int elf_core_write_extra_data(struct coredump_params *cprm) { if ( vsyscall_ehdr ) { const struct elfhdr *const ehdrp = @@ -55,10 +51,7 @@ int elf_core_write_extra_data(struct file *file, size_t *size, if (phdrp[i].p_type == PT_LOAD) { void *addr = (void *) phdrp[i].p_vaddr; size_t filesz = phdrp[i].p_filesz; - - *size += filesz; - if (*size > limit - || !dump_write(file, addr, filesz)) + if (!dump_emit(cprm, addr, filesz)) return 0; } } diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 72074d52840..2ada505067c 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts) ts->tv_nsec = 0; do { - seq = read_seqcount_begin(>od->seq); + seq = read_seqcount_begin_no_lockdep(>od->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->wall_time_sec; ns = gtod->wall_time_snsec; @@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts) ts->tv_nsec = 0; do { - seq = read_seqcount_begin(>od->seq); + seq = read_seqcount_begin_no_lockdep(>od->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->monotonic_time_sec; ns = gtod->monotonic_time_snsec; @@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts) { unsigned long seq; do { - seq = read_seqcount_begin(>od->seq); + seq = read_seqcount_begin_no_lockdep(>od->seq); ts->tv_sec = gtod->wall_time_coarse.tv_sec; ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; } while (unlikely(read_seqcount_retry(>od->seq, seq))); @@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts) { unsigned long seq; do { - seq = read_seqcount_begin(>od->seq); + seq = read_seqcount_begin_no_lockdep(>od->seq); ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; } while (unlikely(read_seqcount_retry(>od->seq, seq))); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index fdc3ba28ca3..ce563be09cc 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -468,8 +468,8 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); * 3 PCD PWT UC UC UC * 4 PAT WB WC WB * 5 PAT PWT WC WP WT - * 6 PAT PCD UC- UC UC- - * 7 PAT PCD PWT UC UC UC + * 6 PAT PCD UC- rsv UC- + * 7 PAT PCD PWT UC rsv UC */ void xen_set_pat(u64 pat) @@ -796,8 +796,8 @@ static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) { spinlock_t *ptl = NULL; -#if USE_SPLIT_PTLOCKS - ptl = __pte_lockptr(page); +#if USE_SPLIT_PTE_PTLOCKS + ptl = ptlock_ptr(page); spin_lock_nest_lock(ptl, &mm->page_table_lock); #endif @@ -1637,7 +1637,7 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, __set_pfn_prot(pfn, PAGE_KERNEL_RO); - if (level == PT_PTE && USE_SPLIT_PTLOCKS) + if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); xen_mc_issue(PARAVIRT_LAZY_MMU); @@ -1671,7 +1671,7 @@ static inline void xen_release_ptpage(unsigned long pfn, unsigned level) if (!PageHighMem(page)) { xen_mc_batch(); - if (level == PT_PTE && USE_SPLIT_PTLOCKS) + if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); __set_pfn_prot(pfn, PAGE_KERNEL); @@ -2328,12 +2328,14 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, return success; } -int xen_create_contiguous_region(unsigned long vstart, unsigned int order, - unsigned int address_bits) +int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, + unsigned int address_bits, + dma_addr_t *dma_handle) { unsigned long *in_frames = discontig_frames, out_frame; unsigned long flags; int success; + unsigned long vstart = (unsigned long)phys_to_virt(pstart); /* * Currently an auto-translated guest will not perform I/O, nor will @@ -2368,15 +2370,17 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order, spin_unlock_irqrestore(&xen_reservation_lock, flags); + *dma_handle = virt_to_machine(vstart).maddr; return success ? 0 : -ENOMEM; } EXPORT_SYMBOL_GPL(xen_create_contiguous_region); -void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) +void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) { unsigned long *out_frames = discontig_frames, in_frame; unsigned long flags; int success; + unsigned long vstart; if (xen_feature(XENFEAT_auto_translated_physmap)) return; @@ -2384,6 +2388,7 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) if (unlikely(order > MAX_CONTIG_ORDER)) return; + vstart = (unsigned long)phys_to_virt(pstart); memset((void *) vstart, 0, PAGE_SIZE << order); spin_lock_irqsave(&xen_reservation_lock, flags); diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index a61c7d5811b..2ae8699e876 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -799,10 +799,10 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { unsigned topidx, mididx, idx; - if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { - BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); + /* don't track P2M changes in autotranslate guests */ + if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return true; - } + if (unlikely(pfn >= MAX_P2M_PFN)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c index 969570491c3..0e98e5d241d 100644 --- a/arch/x86/xen/pci-swiotlb-xen.c +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -75,8 +75,10 @@ void __init pci_xen_swiotlb_init(void) xen_swiotlb_init(1, true /* early */); dma_ops = &xen_swiotlb_dma_ops; +#ifdef CONFIG_PCI /* Make sure ACS will be enabled */ pci_request_acs(); +#endif } } @@ -92,8 +94,10 @@ int pci_xen_swiotlb_init_late(void) return rc; dma_ops = &xen_swiotlb_dma_ops; +#ifdef CONFIG_PCI /* Make sure ACS will be enabled */ pci_request_acs(); +#endif return 0; } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 09f3059cb00..68c054f59de 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -556,7 +556,7 @@ void xen_enable_syscall(void) } #endif /* CONFIG_X86_64 */ } -void __cpuinit xen_enable_nmi(void) +void xen_enable_nmi(void) { #ifdef CONFIG_X86_64 if (register_callback(CALLBACKTYPE_nmi, nmi)) diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 31d04758b76..c36b325abd8 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -149,7 +149,7 @@ static int xen_smp_intr_init(unsigned int cpu) rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, xen_reschedule_interrupt, - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, + IRQF_PERCPU|IRQF_NOBALANCING, resched_name, NULL); if (rc < 0) @@ -161,7 +161,7 @@ static int xen_smp_intr_init(unsigned int cpu) rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, xen_call_function_interrupt, - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, + IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) @@ -171,7 +171,7 @@ static int xen_smp_intr_init(unsigned int cpu) debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, - IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, + IRQF_PERCPU | IRQF_NOBALANCING, debug_name, NULL); if (rc < 0) goto fail; @@ -182,7 +182,7 @@ static int xen_smp_intr_init(unsigned int cpu) rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, xen_call_function_single_interrupt, - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, + IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) @@ -201,7 +201,7 @@ static int xen_smp_intr_init(unsigned int cpu) rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, cpu, xen_irq_work_interrupt, - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, + IRQF_PERCPU|IRQF_NOBALANCING, callfunc_name, NULL); if (rc < 0) diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index be6b8607895..0e36cde12f7 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -234,7 +234,7 @@ void xen_init_lock_cpu(int cpu) irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, cpu, dummy_handler, - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, + IRQF_PERCPU|IRQF_NOBALANCING, name, NULL); diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index ee365895b06..12a1ca707b9 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -443,8 +443,7 @@ void xen_setup_timer(int cpu) name = "<timer kasprintf failed>"; irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, - IRQF_DISABLED|IRQF_PERCPU| - IRQF_NOBALANCING|IRQF_TIMER| + IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| IRQF_FORCE_RESUME, name, NULL); diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index cf914c8c249..d38eb9237e6 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h @@ -38,35 +38,46 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) free_page((unsigned long)pgd); } -/* Use a slab cache for the pte pages (see also sparc64 implementation) */ - -extern struct kmem_cache *pgtable_cache; - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); + pte_t *ptep; + int i; + + ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + if (!ptep) + return NULL; + for (i = 0; i < 1024; i++) + pte_clear(NULL, 0, ptep + i); + return ptep; } static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) { + pte_t *pte; struct page *page; - page = virt_to_page(pte_alloc_one_kernel(mm, addr)); - pgtable_page_ctor(page); + pte = pte_alloc_one_kernel(mm, addr); + if (!pte) + return NULL; + page = virt_to_page(pte); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } return page; } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - kmem_cache_free(pgtable_cache, pte); + free_page((unsigned long)pte); } static inline void pte_free(struct mm_struct *mm, pgtable_t pte) { pgtable_page_dtor(pte); - kmem_cache_free(pgtable_cache, page_address(pte)); + __free_page(pte); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 0fdf5d043f8..216446295ad 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -220,12 +220,11 @@ extern unsigned long empty_zero_page[1024]; #ifdef CONFIG_MMU extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; extern void paging_init(void); -extern void pgtable_cache_init(void); #else # define swapper_pg_dir NULL static inline void paging_init(void) { } -static inline void pgtable_cache_init(void) { } #endif +static inline void pgtable_cache_init(void) { } /* * The pmd contains the kernel virtual address of the pte page. diff --git a/arch/xtensa/include/asm/prom.h b/arch/xtensa/include/asm/prom.h deleted file mode 100644 index f3d7cd2c0de..00000000000 --- a/arch/xtensa/include/asm/prom.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _XTENSA_ASM_PROM_H -#define _XTENSA_ASM_PROM_H - -#define HAVE_ARCH_DEVTREE_FIXUPS - -#endif /* _XTENSA_ASM_PROM_H */ diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index c114483010c..7db5c22faa6 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -87,4 +87,6 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _XTENSA_SOCKET_H */ diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 946fb8d06c8..6e2b6638122 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -21,11 +21,8 @@ #include <linux/screen_info.h> #include <linux/bootmem.h> #include <linux/kernel.h> - -#ifdef CONFIG_OF #include <linux/of_fdt.h> #include <linux/of_platform.h> -#endif #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) # include <linux/console.h> @@ -64,8 +61,8 @@ extern struct rtc_ops no_rtc_ops; struct rtc_ops *rtc_ops; #ifdef CONFIG_BLK_DEV_INITRD -extern void *initrd_start; -extern void *initrd_end; +extern unsigned long initrd_start; +extern unsigned long initrd_end; int initrd_is_mapped = 0; extern int initrd_below_start_ok; #endif @@ -152,8 +149,8 @@ static int __init parse_tag_initrd(const bp_tag_t* tag) { meminfo_t* mi; mi = (meminfo_t*)(tag->data); - initrd_start = __va(mi->start); - initrd_end = __va(mi->end); + initrd_start = (unsigned long)__va(mi->start); + initrd_end = (unsigned long)__va(mi->end); return 0; } @@ -170,13 +167,6 @@ static int __init parse_tag_fdt(const bp_tag_t *tag) __tagtable(BP_TAG_FDT, parse_tag_fdt); -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - initrd_start = (void *)__va(start); - initrd_end = (void *)__va(end); - initrd_below_start_ok = 1; -} - #endif /* CONFIG_OF */ #endif /* CONFIG_BLK_DEV_INITRD */ @@ -222,9 +212,13 @@ static int __init parse_bootparam(const bp_tag_t* tag) } #ifdef CONFIG_OF +bool __initdata dt_memory_scan = false; void __init early_init_dt_add_memory_arch(u64 base, u64 size) { + if (!dt_memory_scan) + return; + size &= PAGE_MASK; add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size); } @@ -236,31 +230,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) void __init early_init_devtree(void *params) { - /* Setup flat device-tree pointer */ - initial_boot_params = params; - - /* Retrieve various informations from the /chosen node of the - * device-tree, including the platform type, initrd location and - * size, TCE reserve, and more ... - */ - if (!command_line[0]) - of_scan_flat_dt(early_init_dt_scan_chosen, command_line); - - /* Scan memory nodes and rebuild MEMBLOCKs */ - of_scan_flat_dt(early_init_dt_scan_root, NULL); if (sysmem.nr_banks == 0) - of_scan_flat_dt(early_init_dt_scan_memory, NULL); -} + dt_memory_scan = true; -static void __init copy_devtree(void) -{ - void *alloc = early_init_dt_alloc_memory_arch( - be32_to_cpu(initial_boot_params->totalsize), 8); - if (alloc) { - memcpy(alloc, initial_boot_params, - be32_to_cpu(initial_boot_params->totalsize)); - initial_boot_params = alloc; - } + early_init_dt_scan(params); + + if (!command_line[0]) + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); } static int __init xtensa_device_probe(void) @@ -525,10 +501,7 @@ void __init setup_arch(char **cmdline_p) bootmem_init(); -#ifdef CONFIG_OF - copy_devtree(); - unflatten_device_tree(); -#endif + unflatten_and_copy_device_tree(); platform_setup(cmdline_p); diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index a1077570e38..c43771c974b 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -50,23 +50,3 @@ void __init init_mmu(void) */ set_ptevaddr_register(PGTABLE_START); } - -struct kmem_cache *pgtable_cache __read_mostly; - -static void pgd_ctor(void *addr) -{ - pte_t *ptep = (pte_t *)addr; - int i; - - for (i = 0; i < 1024; i++, ptep++) - pte_clear(NULL, 0, ptep); - -} - -void __init pgtable_cache_init(void) -{ - pgtable_cache = kmem_cache_create("pgd", - PAGE_SIZE, PAGE_SIZE, - SLAB_HWCACHE_ALIGN, - pgd_ctor); -} |