summaryrefslogtreecommitdiffstats
path: root/arch/x86/boot
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2009-05-08 16:45:15 -0700
committerH. Peter Anvin <hpa@zytor.com>2009-05-08 17:34:52 -0700
commit36d3793c947f1ef7ba3d24eeeddc1be41adc5ab4 (patch)
treebf5987e4caacb7aab5a78ca683a3179bd8eba717 /arch/x86/boot
parent97541912785369925723b6255438ad9fce2ddf04 (diff)
x86, boot: use appropriate rep string for move and clear
In the pre-decompression code, use the appropriate largest possible rep movs and rep stos to move code and clear bss, respectively. For reverse copy, do note that the initial values are supposed to be the address of the first (highest) copy datum, not one byte beyond the end of the buffer. rep strings are not necessarily the fastest way to perform these operations on all current processors, but are likely to be in the future, and perhaps more importantly, we want to encourage the architecturally right thing to do here. This also fixes a couple of trivial inefficiencies on 64 bits. [ Impact: trivial performance enhancement, increase code similarity ] Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/boot')
-rw-r--r--arch/x86/boot/compressed/head_32.S11
-rw-r--r--arch/x86/boot/compressed/head_64.S26
2 files changed, 19 insertions, 18 deletions
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index d02a4f02be1..6710dc78ac5 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -100,11 +100,12 @@ ENTRY(startup_32)
* where decompression in place becomes safe.
*/
pushl %esi
- leal _bss(%ebp), %esi
- leal _bss(%ebx), %edi
+ leal (_bss-4)(%ebp), %esi
+ leal (_bss-4)(%ebx), %edi
movl $(_bss - startup_32), %ecx
+ shrl $2, %ecx
std
- rep movsb
+ rep movsl
cld
popl %esi
@@ -135,8 +136,8 @@ relocated:
leal _bss(%ebx), %edi
leal _ebss(%ebx), %ecx
subl %edi, %ecx
- cld
- rep stosb
+ shrl $2, %ecx
+ rep stosl
/*
* Do the decompression, and jump to the new kernel..
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index a0b18426069..723c72dfd7b 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -260,15 +260,15 @@ ENTRY(startup_64)
* Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe.
*/
- leaq _bss(%rip), %r8
- leaq _bss(%rbx), %r9
+ pushq %rsi
+ leaq (_bss-8)(%rip), %rsi
+ leaq (_bss-8)(%rbx), %rdi
movq $_bss /* - $startup_32 */, %rcx
-1: subq $8, %r8
- subq $8, %r9
- movq 0(%r8), %rax
- movq %rax, 0(%r9)
- subq $8, %rcx
- jnz 1b
+ shrq $3, %rcx
+ std
+ rep movsq
+ cld
+ popq %rsi
/*
* Jump to the relocated address.
@@ -282,12 +282,12 @@ relocated:
/*
* Clear BSS (stack is currently empty)
*/
- xorq %rax, %rax
- leaq _bss(%rbx), %rdi
- leaq _ebss(%rbx), %rcx
+ xorl %eax, %eax
+ leaq _bss(%rip), %rdi
+ leaq _ebss(%rip), %rcx
subq %rdi, %rcx
- cld
- rep stosb
+ shrq $3, %rcx
+ rep stosq
/*
* Do the decompression, and jump to the new kernel..