diff options
author | H. J. Lu <hjl@lucon.org> | 2005-05-01 08:58:48 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-01 08:58:48 -0700 |
commit | fd51f666fa591294bd7462447512666e61c56ea0 (patch) | |
tree | 0addf0006900152975c38bd75fdfd238c9179013 /arch | |
parent | d5b63d78f1e75f6c6f04862dfb2f2a4aeffafd4c (diff) |
[PATCH] i386/x86_64 segment register access update
The new i386/x86_64 assemblers no longer accept instructions for moving
between a segment register and a 32bit memory location, i.e.,
movl (%eax),%ds
movl %ds,(%eax)
To generate instructions for moving between a segment register and a
16bit memory location without the 16bit operand size prefix, 0x66,
mov (%eax),%ds
mov %ds,(%eax)
should be used. It will work with both new and old assemblers. The
assembler starting from 2.16.90.0.1 will also support
movw (%eax),%ds
movw %ds,(%eax)
without the 0x66 prefix. I am enclosing patches for 2.4 and 2.6 kernels
here. The resulting kernel binaries should be unchanged as before, with
old and new assemblers, if gcc never generates memory access for
unsigned gsindex;
asm volatile("movl %%gs,%0" : "=g" (gsindex));
If gcc does generate memory access for the code above, the upper bits
in gsindex are undefined and the new assembler doesn't allow it.
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/vm86.c | 4 | ||||
-rw-r--r-- | arch/x86_64/kernel/process.c | 12 |
3 files changed, 10 insertions, 10 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index b2203e21acb..85bd56d4431 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -611,8 +611,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas * Save away %fs and %gs. No need to save %es and %ds, as * those are always kernel segments while inside the kernel. */ - asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); - asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); + asm volatile("mov %%fs,%0":"=m" (prev->fs)); + asm volatile("mov %%gs,%0":"=m" (prev->gs)); /* * Restore %fs and %gs if needed. diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index 2f3d52dacff..d16cd3738a4 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk */ info->regs32->eax = 0; tsk->thread.saved_esp0 = tsk->thread.esp0; - asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs)); - asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); + asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs)); + asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs)); tss = &per_cpu(init_tss, get_cpu()); tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 9922d2ba24a..761b6d35e33 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -402,10 +402,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, p->thread.fs = me->thread.fs; p->thread.gs = me->thread.gs; - asm("movl %%gs,%0" : "=m" (p->thread.gsindex)); - asm("movl %%fs,%0" : "=m" (p->thread.fsindex)); - asm("movl %%es,%0" : "=m" (p->thread.es)); - asm("movl %%ds,%0" : "=m" (p->thread.ds)); + asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); + asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); + asm("mov %%es,%0" : "=m" (p->thread.es)); + asm("mov %%ds,%0" : "=m" (p->thread.ds)); if (unlikely(me->thread.io_bitmap_ptr != NULL)) { p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); @@ -468,11 +468,11 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct * * Switch DS and ES. * This won't pick up thread selector changes, but I guess that is ok. */ - asm volatile("movl %%es,%0" : "=m" (prev->es)); + asm volatile("mov %%es,%0" : "=m" (prev->es)); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); - asm volatile ("movl %%ds,%0" : "=m" (prev->ds)); + asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); |