diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-02 16:19:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-07-02 16:19:24 -0700 |
commit | e13053f50664d3d614bbc9b8c83abdad979ac7c9 (patch) | |
tree | 07ee41cd50ba26bd7ec92255184f80aff70a2e9a /arch/m32r/include/asm | |
parent | 2d722f6d5671794c0de0e29e3da75006ac086718 (diff) | |
parent | 662bbcb2747c2422cf98d3d97619509379eee466 (diff) |
Merge branch 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull voluntary preemption fixes from Ingo Molnar:
"This tree contains a speedup which is achieved through better
might_sleep()/might_fault() preemption point annotations for uaccess
functions, by Michael S Tsirkin:
1. The only reason uaccess routines might sleep is if they fault.
Make this explicit for all architectures.
2. A voluntary preemption point in uaccess functions means compiler
can't inline them efficiently, this breaks assumptions that they
are very fast and small that e.g. net code seems to make. Remove
this preemption point so behaviour matches with what callers
assume.
3. Accesses (e.g through socket ops) to kernel memory with KERNEL_DS
like net/sunrpc does will never sleep. Remove an unconditinal
might_sleep() in the might_fault() inline in kernel.h (used when
PROVE_LOCKING is not set).
4. Accesses with pagefault_disable() return EFAULT but won't cause
caller to sleep. Check for that and thus avoid might_sleep() when
PROVE_LOCKING is set.
These changes offer a nice speedup for CONFIG_PREEMPT_VOLUNTARY=y
kernels, here's a network bandwidth measurement between a virtual
machine and the host:
before:
incoming: 7122.77 Mb/s
outgoing: 8480.37 Mb/s
after:
incoming: 8619.24 Mb/s [ +21.0% ]
outgoing: 9455.42 Mb/s [ +11.5% ]
I kept these changes in a separate tree, separate from scheduler
changes, because it's a mixed MM and scheduler topic"
* 'sched-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
mm, sched: Allow uaccess in atomic with pagefault_disable()
mm, sched: Drop voluntary schedule from might_fault()
x86: uaccess s/might_sleep/might_fault/
tile: uaccess s/might_sleep/might_fault/
powerpc: uaccess s/might_sleep/might_fault/
mn10300: uaccess s/might_sleep/might_fault/
microblaze: uaccess s/might_sleep/might_fault/
m32r: uaccess s/might_sleep/might_fault/
frv: uaccess s/might_sleep/might_fault/
arm64: uaccess s/might_sleep/might_fault/
asm-generic: uaccess s/might_sleep/might_fault/
Diffstat (limited to 'arch/m32r/include/asm')
-rw-r--r-- | arch/m32r/include/asm/uaccess.h | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 1c7047bea20..84fe7ba5303 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -216,7 +216,7 @@ extern int fixup_exception(struct pt_regs *regs); ({ \ long __gu_err = 0; \ unsigned long __gu_val; \ - might_sleep(); \ + might_fault(); \ __get_user_size(__gu_val,(ptr),(size),__gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -227,7 +227,7 @@ extern int fixup_exception(struct pt_regs *regs); long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - might_sleep(); \ + might_fault(); \ if (access_ok(VERIFY_READ,__gu_addr,size)) \ __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ @@ -295,7 +295,7 @@ do { \ #define __put_user_nocheck(x,ptr,size) \ ({ \ long __pu_err; \ - might_sleep(); \ + might_fault(); \ __put_user_size((x),(ptr),(size),__pu_err); \ __pu_err; \ }) @@ -305,7 +305,7 @@ do { \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - might_sleep(); \ + might_fault(); \ if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ __put_user_size((x),__pu_addr,(size),__pu_err); \ __pu_err; \ @@ -597,7 +597,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon */ #define copy_to_user(to,from,n) \ ({ \ - might_sleep(); \ + might_fault(); \ __generic_copy_to_user((to),(from),(n)); \ }) @@ -638,7 +638,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon */ #define copy_from_user(to,from,n) \ ({ \ - might_sleep(); \ + might_fault(); \ __generic_copy_from_user((to),(from),(n)); \ }) |