diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/assembler.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/hardware/linkup-l1110.h | 48 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/opcodes-virt.h | 29 | ||||
-rw-r--r-- | arch/arm/include/asm/opcodes.h | 181 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 40 | ||||
-rw-r--r-- | arch/arm/include/asm/sched_clock.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/syscall.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/thread_info.h | 6 | ||||
-rw-r--r-- | arch/arm/include/asm/tlb.h | 4 | ||||
-rw-r--r-- | arch/arm/include/asm/uaccess.h | 58 | ||||
-rw-r--r-- | arch/arm/include/asm/unistd.h | 10 |
13 files changed, 302 insertions, 98 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 03fb93621d0..5c8b3bf4d82 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -320,4 +320,12 @@ .size \name , . - \name .endm + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS + adds \tmp, \addr, #\size - 1 + sbcccs \tmp, \tmp, \limit + bcs \bad +#endif + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 2ae842df455..5c44dcb0987 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -203,6 +203,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, } /* + * This can be called during early boot to increase the size of the atomic + * coherent DMA pool above the default value of 256KiB. It must be called + * before postcore_initcall. + */ +extern void __init init_dma_coherent_pool_size(unsigned long size); + +/* * This can be called during boot to increase the size of the consistent * DMA region above it's default value of 2MB. It must be called before the * memory allocator is initialised, i.e. before any core_initcall. diff --git a/arch/arm/include/asm/hardware/linkup-l1110.h b/arch/arm/include/asm/hardware/linkup-l1110.h deleted file mode 100644 index 7ec91168a57..00000000000 --- a/arch/arm/include/asm/hardware/linkup-l1110.h +++ /dev/null @@ -1,48 +0,0 @@ -/* -* -* Definitions for H3600 Handheld Computer -* -* Copyright 2001 Compaq Computer Corporation. -* -* Use consistent with the GNU GPL is permitted, -* provided that this copyright notice is -* preserved in its entirety in all copies and derived works. -* -* COMPAQ COMPUTER CORPORATION MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, -* AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS -* FITNESS FOR ANY PARTICULAR PURPOSE. -* -* Author: Jamey Hicks. -* -*/ - -/* LinkUp Systems PCCard/CompactFlash Interface for SA-1100 */ - -/* PC Card Status Register */ -#define LINKUP_PRS_S1 (1 << 0) /* voltage control bits S1-S4 */ -#define LINKUP_PRS_S2 (1 << 1) -#define LINKUP_PRS_S3 (1 << 2) -#define LINKUP_PRS_S4 (1 << 3) -#define LINKUP_PRS_BVD1 (1 << 4) -#define LINKUP_PRS_BVD2 (1 << 5) -#define LINKUP_PRS_VS1 (1 << 6) -#define LINKUP_PRS_VS2 (1 << 7) -#define LINKUP_PRS_RDY (1 << 8) -#define LINKUP_PRS_CD1 (1 << 9) -#define LINKUP_PRS_CD2 (1 << 10) - -/* PC Card Command Register */ -#define LINKUP_PRC_S1 (1 << 0) -#define LINKUP_PRC_S2 (1 << 1) -#define LINKUP_PRC_S3 (1 << 2) -#define LINKUP_PRC_S4 (1 << 3) -#define LINKUP_PRC_RESET (1 << 4) -#define LINKUP_PRC_APOE (1 << 5) /* Auto Power Off Enable: clears S1-S4 when either nCD goes high */ -#define LINKUP_PRC_CFE (1 << 6) /* CompactFlash mode Enable: addresses A[10:0] only, A[25:11] high */ -#define LINKUP_PRC_SOE (1 << 7) /* signal output driver enable */ -#define LINKUP_PRC_SSP (1 << 8) /* sock select polarity: 0 for socket 0, 1 for socket 1 */ -#define LINKUP_PRC_MBZ (1 << 15) /* must be zero */ - -struct linkup_l1110 { - volatile short prc; -}; diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index e965f1b560f..5f6ddcc5645 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) #endif #endif +#endif /* __ASSEMBLY__ */ #ifndef PHYS_OFFSET #ifdef PLAT_PHYS_OFFSET @@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x) #endif #endif +#ifndef __ASSEMBLY__ + /* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h new file mode 100644 index 00000000000..b85665a96f8 --- /dev/null +++ b/arch/arm/include/asm/opcodes-virt.h @@ -0,0 +1,29 @@ +/* + * opcodes-virt.h: Opcode definitions for the ARM virtualization extensions + * Copyright (C) 2012 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef __ASM_ARM_OPCODES_VIRT_H +#define __ASM_ARM_OPCODES_VIRT_H + +#include <asm/opcodes.h> + +#define __HVC(imm16) __inst_arm_thumb32( \ + 0xE1400070 | (((imm16) & 0xFFF0) << 4) | ((imm16) & 0x000F), \ + 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ +) + +#endif /* ! __ASM_ARM_OPCODES_VIRT_H */ diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index 19c48deda70..74e211a6fb2 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -19,6 +19,33 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); /* + * Assembler opcode byteswap helpers. + * These are only intended for use by this header: don't use them directly, + * because they will be suboptimal in most cases. + */ +#define ___asm_opcode_swab32(x) ( \ + (((x) << 24) & 0xFF000000) \ + | (((x) << 8) & 0x00FF0000) \ + | (((x) >> 8) & 0x0000FF00) \ + | (((x) >> 24) & 0x000000FF) \ +) +#define ___asm_opcode_swab16(x) ( \ + (((x) << 8) & 0xFF00) \ + | (((x) >> 8) & 0x00FF) \ +) +#define ___asm_opcode_swahb32(x) ( \ + (((x) << 8) & 0xFF00FF00) \ + | (((x) >> 8) & 0x00FF00FF) \ +) +#define ___asm_opcode_swahw32(x) ( \ + (((x) << 16) & 0xFFFF0000) \ + | (((x) >> 16) & 0x0000FFFF) \ +) +#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF) +#define ___asm_opcode_identity16(x) ((x) & 0xFFFF) + + +/* * Opcode byteswap helpers * * These macros help with converting instructions between a canonical integer @@ -41,39 +68,163 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not * represent any valid Thumb-2 instruction. For this range, * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. + * + * The ___asm variants are intended only for use by this header, in situations + * involving inline assembler. For .S files, the normal __opcode_*() macros + * should do the right thing. */ +#ifdef __ASSEMBLY__ -#ifndef __ASSEMBLY__ +#define ___opcode_swab32(x) ___asm_opcode_swab32(x) +#define ___opcode_swab16(x) ___asm_opcode_swab16(x) +#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x) +#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x) +#define ___opcode_identity32(x) ___asm_opcode_identity32(x) +#define ___opcode_identity16(x) ___asm_opcode_identity16(x) + +#else /* ! __ASSEMBLY__ */ #include <linux/types.h> #include <linux/swab.h> +#define ___opcode_swab32(x) swab32(x) +#define ___opcode_swab16(x) swab16(x) +#define ___opcode_swahb32(x) swahb32(x) +#define ___opcode_swahw32(x) swahw32(x) +#define ___opcode_identity32(x) ((u32)(x)) +#define ___opcode_identity16(x) ((u16)(x)) + +#endif /* ! __ASSEMBLY__ */ + + #ifdef CONFIG_CPU_ENDIAN_BE8 -#define __opcode_to_mem_arm(x) swab32(x) -#define __opcode_to_mem_thumb16(x) swab16(x) -#define __opcode_to_mem_thumb32(x) swahb32(x) -#else -#define __opcode_to_mem_arm(x) ((u32)(x)) -#define __opcode_to_mem_thumb16(x) ((u16)(x)) -#define __opcode_to_mem_thumb32(x) swahw32(x) + +#define __opcode_to_mem_arm(x) ___opcode_swab32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x) +#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x) + +#else /* ! CONFIG_CPU_ENDIAN_BE8 */ + +#define __opcode_to_mem_arm(x) ___opcode_identity32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x) +#ifndef CONFIG_CPU_ENDIAN_BE32 +/* + * On BE32 systems, using 32-bit accesses to store Thumb instructions will not + * work in all cases, due to alignment constraints. For now, a correct + * version is not provided for BE32. + */ +#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x) #endif +#endif /* ! CONFIG_CPU_ENDIAN_BE8 */ + #define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) #define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) +#ifndef CONFIG_CPU_ENDIAN_BE32 #define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) +#endif /* Operations specific to Thumb opcodes */ /* Instruction size checks: */ -#define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL) -#define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL) +#define __opcode_is_thumb32(x) ( \ + ((x) & 0xF8000000) == 0xE8000000 \ + || ((x) & 0xF0000000) == 0xF0000000 \ +) +#define __opcode_is_thumb16(x) ( \ + ((x) & 0xFFFF0000) == 0 \ + && !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000) \ +) /* Operations to construct or split 32-bit Thumb instructions: */ -#define __opcode_thumb32_first(x) ((u16)((x) >> 16)) -#define __opcode_thumb32_second(x) ((u16)(x)) -#define __opcode_thumb32_compose(first, second) \ - (((u32)(u16)(first) << 16) | (u32)(u16)(second)) +#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16)) +#define __opcode_thumb32_second(x) (___opcode_identity16(x)) +#define __opcode_thumb32_compose(first, second) ( \ + (___opcode_identity32(___opcode_identity16(first)) << 16) \ + | ___opcode_identity32(___opcode_identity16(second)) \ +) +#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16)) +#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x)) +#define ___asm_opcode_thumb32_compose(first, second) ( \ + (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \ + | ___asm_opcode_identity32(___asm_opcode_identity16(second)) \ +) -#endif /* __ASSEMBLY__ */ +/* + * Opcode injection helpers + * + * In rare cases it is necessary to assemble an opcode which the + * assembler does not support directly, or which would normally be + * rejected because of the CFLAGS or AFLAGS used to build the affected + * file. + * + * Before using these macros, consider carefully whether it is feasible + * instead to change the build flags for your file, or whether it really + * makes sense to support old assembler versions when building that + * particular kernel feature. + * + * The macros defined here should only be used where there is no viable + * alternative. + * + * + * __inst_arm(x): emit the specified ARM opcode + * __inst_thumb16(x): emit the specified 16-bit Thumb opcode + * __inst_thumb32(x): emit the specified 32-bit Thumb opcode + * + * __inst_arm_thumb16(arm, thumb): emit either the specified arm or + * 16-bit Thumb opcode, depending on whether an ARM or Thumb-2 + * kernel is being built + * + * __inst_arm_thumb32(arm, thumb): emit either the specified arm or + * 32-bit Thumb opcode, depending on whether an ARM or Thumb-2 + * kernel is being built + * + * + * Note that using these macros directly is poor practice. Instead, you + * should use them to define human-readable wrapper macros to encode the + * instructions that you care about. In code which might run on ARMv7 or + * above, you can usually use the __inst_arm_thumb{16,32} macros to + * specify the ARM and Thumb alternatives at the same time. This ensures + * that the correct opcode gets emitted depending on the instruction set + * used for the kernel build. + * + * Look at opcodes-virt.h for an example of how to use these macros. + */ +#include <linux/stringify.h> + +#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x)) +#define __inst_thumb32(x) ___inst_thumb32( \ + ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)), \ + ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x)) \ +) +#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x)) + +#ifdef CONFIG_THUMB2_KERNEL +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \ + __inst_thumb16(thumb_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \ + __inst_thumb32(thumb_opcode) +#else +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#endif + +/* Helpers for the helpers. Don't use these directly. */ +#ifdef __ASSEMBLY__ +#define ___inst_arm(x) .long x +#define ___inst_thumb16(x) .short x +#define ___inst_thumb32(first, second) .short first, second +#else +#define ___inst_arm(x) ".long " __stringify(x) "\n\t" +#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t" +#define ___inst_thumb32(first, second) \ + ".short " __stringify(first) ", " __stringify(second) "\n\t" +#endif #endif /* __ASM_ARM_OPCODES_H */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index f66626d71e7..41dc31f834c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte) (0) + +#define pte_present_user(pte) \ + ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ + (L_PTE_PRESENT | L_PTE_USER)) + #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) { @@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - if (addr >= TASK_SIZE) - set_pte_ext(ptep, pteval, 0); - else { + unsigned long ext = 0; + + if (addr < TASK_SIZE && pte_present_user(pteval)) { __sync_icache_dcache(pteval); - set_pte_ext(ptep, pteval, PTE_EXT_NG); + ext |= PTE_EXT_NG; } -} -#define pte_none(pte) (!pte_val(pte)) -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) -#define pte_special(pte) (0) - -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) + set_pte_ext(ptep, pteval, ext); +} #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset --------------------> <- type --> 0 0 0 + * <--------------- offset ----------------------> < type -> 0 0 0 * - * This gives us up to 63 swap files and 32GB per swap file. Note that + * This gives us up to 31 swap files and 64GB per swap file. Note that * the offset field is always non-zero. */ #define __SWP_TYPE_SHIFT 3 -#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index e3f75726343..05b8e82ec9f 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -10,5 +10,7 @@ extern void sched_clock_postinit(void); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); +extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, + unsigned long rate); #endif diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h index c334a23ddf7..47486a41c56 100644 --- a/arch/arm/include/asm/syscall.h +++ b/arch/arm/include/asm/syscall.h @@ -9,6 +9,10 @@ #include <linux/err.h> +#include <asm/unistd.h> + +#define NR_syscalls (__NR_syscalls) + extern const unsigned long sys_call_table[]; static inline int syscall_get_nr(struct task_struct *task, diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index af7b0bda335..f71cdab18b8 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -59,7 +59,9 @@ struct thread_info { __u32 syscall; /* syscall number */ __u8 used_cp[16]; /* thread used copro */ unsigned long tp_value; +#ifdef CONFIG_CRUNCH struct crunch_state crunchstate; +#endif union fp_state fpstate __attribute__((aligned(8))); union vfp_state vfpstate; #ifdef CONFIG_ARM_THUMBEE @@ -148,6 +150,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 +#define TIF_SYSCALL_TRACEPOINT 10 #define TIF_POLLING_NRFLAG 16 #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ @@ -160,12 +163,13 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) /* Checks for any syscall work in entry-common.S */ -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 314d4664eae..99a19512ee2 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); +#ifdef CONFIG_ARM_LPAE + tlb_add_flush(tlb, addr); +#else /* * With the classic ARM MMU, a pte page has two corresponding pmd * entries, each covering 1MB. @@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, addr &= PMD_MASK; tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); tlb_add_flush(tlb, addr + SZ_1M); +#endif tlb_remove_page(tlb, pte); } diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 479a6352e0b..77bd79f2ffd 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -101,28 +101,39 @@ extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -#define __get_user_x(__r2,__p,__e,__s,__i...) \ +#define __GUP_CLOBBER_1 "lr", "cc" +#ifdef CONFIG_CPU_USE_DOMAINS +#define __GUP_CLOBBER_2 "ip", "lr", "cc" +#else +#define __GUP_CLOBBER_2 "lr", "cc" +#endif +#define __GUP_CLOBBER_4 "lr", "cc" + +#define __get_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ + __asmeq("%3", "r1") \ "bl __get_user_" #__s \ : "=&r" (__e), "=r" (__r2) \ - : "0" (__p) \ - : __i, "cc") + : "0" (__p), "r" (__l) \ + : __GUP_CLOBBER_##__s) -#define get_user(x,p) \ +#define __get_user_check(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ register unsigned long __r2 asm("r2"); \ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __get_user_x(__r2, __p, __e, 1, "lr"); \ - break; \ + __get_user_x(__r2, __p, __e, __l, 1); \ + break; \ case 2: \ - __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ + __get_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __get_user_x(__r2, __p, __e, 4, "lr"); \ + __get_user_x(__r2, __p, __e, __l, 4); \ break; \ default: __e = __get_user_bad(); break; \ } \ @@ -130,42 +141,57 @@ extern int __get_user_4(void *); __e; \ }) +#define get_user(x,p) \ + ({ \ + might_fault(); \ + __get_user_check(x,p); \ + }) + extern int __put_user_1(void *, unsigned int); extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__s) \ +#define __put_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%2", "r2") \ + __asmeq("%3", "r1") \ "bl __put_user_" #__s \ : "=&r" (__e) \ - : "0" (__p), "r" (__r2) \ + : "0" (__p), "r" (__r2), "r" (__l) \ : "ip", "lr", "cc") -#define put_user(x,p) \ +#define __put_user_check(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __r2 asm("r2") = (x); \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __put_user_x(__r2, __p, __e, 1); \ + __put_user_x(__r2, __p, __e, __l, 1); \ break; \ case 2: \ - __put_user_x(__r2, __p, __e, 2); \ + __put_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __put_user_x(__r2, __p, __e, 4); \ + __put_user_x(__r2, __p, __e, __l, 4); \ break; \ case 8: \ - __put_user_x(__r2, __p, __e, 8); \ + __put_user_x(__r2, __p, __e, __l, 8); \ break; \ default: __e = __put_user_bad(); break; \ } \ __e; \ }) +#define put_user(x,p) \ + ({ \ + might_fault(); \ + __put_user_check(x,p); \ + }) + #else /* CONFIG_MMU */ /* @@ -219,6 +245,7 @@ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ + might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ @@ -300,6 +327,7 @@ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ + might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 0cab47d4a83..d9ff5cc3a50 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -404,6 +404,15 @@ #define __NR_setns (__NR_SYSCALL_BASE+375) #define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) + /* 378 for kcmp */ + +/* + * This may need to be greater than __NR_last_syscall+1 in order to + * account for the padding in the syscall table + */ +#ifdef __KERNEL__ +#define __NR_syscalls (380) +#endif /* __KERNEL__ */ /* * The following SWIs are ARM private. @@ -483,6 +492,7 @@ */ #define __IGNORE_fadvise64_64 #define __IGNORE_migrate_pages +#define __IGNORE_kcmp #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ |