diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2011-08-12 00:14:29 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-08-13 11:26:40 +0100 |
commit | daece59689e76ed55d8863cae04993679a8e844e (patch) | |
tree | 4dadad4d46ce72aef69a7573ba8e3a518addc9ad /arch/arm | |
parent | 9e775ad19f52d70a53797b4d0eb740c52b0a9567 (diff) |
ARM: 7013/1: P2V: Remove ARM_PATCH_PHYS_VIRT_16BIT
This code can be removed now that MSM targets no longer need the 16-bit
offsets for P2V.
Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/Kconfig | 10 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/module.h | 4 | ||||
-rw-r--r-- | arch/arm/kernel/head.S | 61 |
4 files changed, 14 insertions, 68 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 8882a535cf4..272eadc7a12 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -205,20 +205,12 @@ config ARM_PATCH_PHYS_VIRT kernel in system memory. This can only be used with non-XIP MMU kernels where the base - of physical memory is at a 16MB boundary, or theoretically 64K - for the MSM machine class. + of physical memory is at a 16MB boundary. Only disable this option if you know that you do not require this feature (eg, building a kernel for a single machine) and you need to shrink the kernel to the minimal size. -config ARM_PATCH_PHYS_VIRT_16BIT - def_bool y - depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM - help - This option extends the physical to virtual translation patching - to allow physical memory down to a theoretical minimum of 64K - boundaries. source "init/Kconfig" diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index b8de516e600..441fc4fe826 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -160,7 +160,6 @@ * so that all we need to do is modify the 8-bit constant field. */ #define __PV_BITS_31_24 0x81000000 -#define __PV_BITS_23_16 0x00810000 extern unsigned long __pv_phys_offset; #define PHYS_OFFSET __pv_phys_offset @@ -178,9 +177,6 @@ static inline unsigned long __virt_to_phys(unsigned long x) { unsigned long t; __pv_stub(x, t, "add", __PV_BITS_31_24); -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT - __pv_stub(t, t, "add", __PV_BITS_23_16); -#endif return t; } @@ -188,9 +184,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) { unsigned long t; __pv_stub(x, t, "sub", __PV_BITS_31_24); -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT - __pv_stub(t, t, "sub", __PV_BITS_23_16); -#endif return t; } #else diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 543b44916d2..6c6809f982f 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -31,11 +31,7 @@ struct mod_arch_specific { /* Add __virt_to_phys patching state as well */ #ifdef CONFIG_ARM_PATCH_PHYS_VIRT -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT -#define MODULE_ARCH_VERMAGIC_P2V "p2v16 " -#else #define MODULE_ARCH_VERMAGIC_P2V "p2v8 " -#endif #else #define MODULE_ARCH_VERMAGIC_P2V "" #endif diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 742b6108a00..136abb61094 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -488,13 +488,8 @@ __fixup_pv_table: add r5, r5, r3 @ adjust table end address add r7, r7, r3 @ adjust __pv_phys_offset address str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset -#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT mov r6, r3, lsr #24 @ constant for add/sub instructions teq r3, r6, lsl #24 @ must be 16MiB aligned -#else - mov r6, r3, lsr #16 @ constant for add/sub instructions - teq r3, r6, lsl #16 @ must be 64kiB aligned -#endif THUMB( it ne @ cross section branch ) bne __error str r6, [r7, #4] @ save to __pv_offset @@ -510,20 +505,8 @@ ENDPROC(__fixup_pv_table) .text __fixup_a_pv_table: #ifdef CONFIG_THUMB2_KERNEL -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT - lsls r0, r6, #24 - lsr r6, #8 - beq 1f - clz r7, r0 - lsr r0, #24 - lsl r0, r7 - bic r0, 0x0080 - lsrs r7, #1 - orrcs r0, #0x0080 - orr r0, r0, r7, lsl #12 -#endif -1: lsls r6, #24 - beq 4f + lsls r6, #24 + beq 2f clz r7, r6 lsr r6, #24 lsl r6, r7 @@ -532,43 +515,25 @@ __fixup_a_pv_table: orrcs r6, #0x0080 orr r6, r6, r7, lsl #12 orr r6, #0x4000 - b 4f -2: @ at this point the C flag is always clear - add r7, r3 -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT - ldrh ip, [r7] - tst ip, 0x0400 @ the i bit tells us LS or MS byte - beq 3f - cmp r0, #0 @ set C flag, and ... - biceq ip, 0x0400 @ immediate zero value has a special encoding - streqh ip, [r7] @ that requires the i bit cleared -#endif -3: ldrh ip, [r7, #2] + b 2f +1: add r7, r3 + ldrh ip, [r7, #2] and ip, 0x8f00 - orrcc ip, r6 @ mask in offset bits 31-24 - orrcs ip, r0 @ mask in offset bits 23-16 + orr ip, r6 @ mask in offset bits 31-24 strh ip, [r7, #2] -4: cmp r4, r5 +2: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot - bcc 2b + bcc 1b bx lr #else -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT - and r0, r6, #255 @ offset bits 23-16 - mov r6, r6, lsr #8 @ offset bits 31-24 -#else - mov r0, #0 @ just in case... -#endif - b 3f -2: ldr ip, [r7, r3] + b 2f +1: ldr ip, [r7, r3] bic ip, ip, #0x000000ff - tst ip, #0x400 @ rotate shift tells us LS or MS byte - orrne ip, ip, r6 @ mask in offset bits 31-24 - orreq ip, ip, r0 @ mask in offset bits 23-16 + orr ip, ip, r6 @ mask in offset bits 31-24 str ip, [r7, r3] -3: cmp r4, r5 +2: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot - bcc 2b + bcc 1b mov pc, lr #endif ENDPROC(__fixup_a_pv_table) |